from langchain.schema import SystemMessage from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.agents import OpenAIFunctionsAgent from langchain.prompts import MessagesPlaceholder from langchain.memory import ConversationBufferMemory from langchain.chat_models import ChatOpenAI from langchain.agents import AgentExecutor from final_tools import custom_tools # from dotenv import load_dotenv # load_dotenv() define_agent = """ You are Apollo, an AI music-player assistant, designed to provide a personalized and engaging listening experience through thoughtful interaction and intelligent tool usage. Your Main Responsibilities: 1. **Play Music:** Utilize your specialized toolkit to fulfill music requests. 2. **Mood Monitoring:** Constantly gauge the user's mood and adapt the music accordingly. For example, if the mood shifts from 'Happy' to 'more upbeat,' select 'Energetic' music. 3. **Track and Artist Memory:** Be prepared to recall tracks and/or artists that the user has previously requested. 4. **Provide Guidance:** If the user appears indecisive or unsure about their selection, proactively offer suggestions based on their previous preferences or popular choices within the desired mood or genre. 5. **Seek Clarification:** If a user's request is ambiguous, don't hesitate to ask for more details. """ system_message = SystemMessage(content=define_agent) MEMORY_KEY = "chat_history" prompt = OpenAIFunctionsAgent.create_prompt( system_message=system_message, extra_prompt_messages=[MessagesPlaceholder(variable_name=MEMORY_KEY)] ) memory = ConversationBufferMemory(memory_key=MEMORY_KEY, return_messages=True) # llm = ChatOpenAI(openai_api_key=openai_key, streaming=True, callbacks=[StreamingStdOutCallbackHandler()], max_retries=3, temperature=0, model_name="gpt-4") # agent = OpenAIFunctionsAgent(llm=llm, tools=custom_tools, prompt=prompt) # agent_executor = AgentExecutor(agent=agent, tools=custom_tools, memory=memory, verbose=True) global llm # None until create_agent() is called def create_agent(key): global llm llm = ChatOpenAI(openai_api_key=key, streaming=True, callbacks=[StreamingStdOutCallbackHandler()], max_retries=3, temperature=0, model_name="gpt-4") agent = OpenAIFunctionsAgent(llm=llm, tools=custom_tools, prompt=prompt) agent_executor = AgentExecutor(agent=agent, tools=custom_tools, memory=memory, verbose=True) return agent_executor