|
|
|
"""Agent.py |
|
|
|
Automatically generated by Colab. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1aYmc6hcd6JKLK6sdFwPS-yC89WliBwhh |
|
""" |
|
|
|
pip install langchain langchain-openai pydantic openai |
|
|
|
pip install langchain-community |
|
|
|
"""# Library and Framework imports |
|
- ChatOpenAI: This is the LLM backend (OpenAI’s GPT model). |
|
|
|
- ConversationBufferMemory: This stores past messages, enabling Memory (MCP). |
|
|
|
- initialize_agent: Initializes the agent, which decides how to use the LLM and tools. |
|
|
|
- Tool: Represents a single tool the agent can call |
|
""" |
|
|
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.agents import initialize_agent, AgentType |
|
from langchain.tools import Tool |
|
import os |
|
|
|
"""# Build the Memory Layer (MCP) |
|
|
|
- The memory ensures the agent doesn’t “forget” previous context, which is crucial for MCP (Memory + Chain-of-Thought). |
|
|
|
- return_messages=True means the memory will store entire message objects (input and output). |
|
""" |
|
|
|
def build_memory() -> ConversationBufferMemory: |
|
"""Create memory so the agent can remember past messages""" |
|
return ConversationBufferMemory(memory_key="chat_history", return_messages=True) |
|
|
|
"""# Prompt engineering: |
|
- SystemPrompt: Sets up the agent's persona as an experienced platform architect. This is the agent's guide. |
|
|
|
- HumanPrompt: Defines what the agent needs (modernization plan, resilience strategy). |
|
|
|
- CoT Behavior: The agent will reason step-by-step and expose only the reasoning summary at the end. |
|
""" |
|
|
|
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate |
|
|
|
def build_prompt() -> ChatPromptTemplate: |
|
"""Create a system+human prompt that encourages explicit planning (CoT-style).""" |
|
system = SystemMessagePromptTemplate.from_template( |
|
"You are InfraResilience, a senior platform architect.\n" |
|
"Task: given a legacy stack, propose modernization options AND a resilience test plan.\n" |
|
"Be concrete, AWS-aware (Airflow/Step Functions/ECS/Lambda/S3/SQS/SNS), and security-conscious.\n" |
|
"Output concise, actionable bullet points.\n" |
|
"When planning, think step-by-step internally; expose only a brief reasoning summary." |
|
) |
|
|
|
human = HumanMessagePromptTemplate.from_template( |
|
"Legacy stack description:\n{legacy_stack}\n\n" |
|
"Outage scenario (optional): {outage_scenario}\n\n" |
|
"Provide:\n" |
|
"1) Modernization plan\n" |
|
"2) Resilience test strategy\n" |
|
"3) Short reasoning summary" |
|
) |
|
|
|
return ChatPromptTemplate.from_messages([system, human]) |
|
|
|
"""# GPT-4o-Mini LLM build |
|
- temperature=0.2: This controls the randomness of the LLM. A lower value makes the LLM more deterministic (less random). |
|
|
|
- LLM (OpenAI's GPT) that the agent will use for reasoning. |
|
""" |
|
|
|
from typing import Optional |
|
def build_llm(model_name: Optional[str] = None, temperature: float = 0.2) -> ChatOpenAI: |
|
"""Instantiate the chat LLM with sane defaults.""" |
|
chosen = model_name or os.getenv("LLM_MODEL_NAME", "gpt-4o-mini") |
|
return ChatOpenAI(model=chosen, temperature=temperature) |
|
|
|
"""So far, we have Memory, Prompts, LLM initialized. Now, let's build the agent with tools. |
|
|
|
The agent is now connected to tools (we'll define in tools.py), memory, and the prompt template. It's ready to think through problems. |
|
""" |
|
|
|
from typing import List |
|
def create_agent(tools: List[Tool], llm: Optional[ChatOpenAI] = None, memory: Optional[ConversationBufferMemory] = None): |
|
llm = llm or build_llm() |
|
memory = memory or build_memory() |
|
prompt = build_prompt() |
|
|
|
agent = initialize_agent( |
|
tools=tools, |
|
llm=llm, |
|
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, |
|
memory=memory, |
|
verbose=False, |
|
agent_kwargs={"extra_prompt_messages": [prompt]}, |
|
) |
|
|
|
return agent |