File size: 6,969 Bytes
fa2eb35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# Copyright (c) Microsoft. All rights reserved.

from typing import Optional
import semantic_kernel, autogen


class AutoGenPlanner:
    """(Demo) Semantic Kernel planner using Conversational Programming via AutoGen.

    AutoGenPlanner leverages OpenAI Function Calling and AutoGen agents to solve
    a task using only the Plugins loaded into Semantic Kernel. SK Plugins are
    automatically shared with AutoGen, so you only need to load the Plugins in SK
    with the usual `kernel.import_skill(...)` syntax. You can use native and
    semantic functions without any additional configuration. Currently the integration
    is limited to functions with a single string parameter. The planner has been
    tested with GPT 3.5 Turbo and GPT 4. It always used 3.5 Turbo with OpenAI,
    just for performance and cost reasons.
    """

    import datetime
    from typing import List, Dict

    ASSISTANT_PERSONA = f"""Only use the functions you have been provided with.
Do not ask the user to perform other actions than executing the functions.
Use the functions you have to find information not available.
Today's date is: {datetime.date.today().strftime("%B %d, %Y")}.
Reply TERMINATE when the task is done.
"""

    def __init__(self, kernel: semantic_kernel.Kernel, llm_config: Dict = None):
        """
        Args:
            kernel: an instance of Semantic Kernel, with plugins loaded.
            llm_config: a dictionary with the following keys:
                - type: "openai" or "azure"
                - openai_api_key: OpenAI API key
                - azure_api_key: Azure API key
                - azure_deployment: Azure deployment name
                - azure_endpoint: Azure endpoint
        """
        super().__init__()
        self.kernel = kernel
        self.llm_config = llm_config

    def create_assistant_agent(self, name: str, persona: str = ASSISTANT_PERSONA) -> autogen.AssistantAgent:
        """
        Create a new AutoGen Assistant Agent.
        Args:
            name (str): the name of the agent
            persona (str): the LLM system message defining the agent persona,
                in case you want to customize it.
        """
        return autogen.AssistantAgent(name=name, system_message=persona, llm_config=self.__get_autogen_config())

    def create_user_agent(
        self, name: str, max_auto_reply: Optional[int] = None, human_input: Optional[str] = "ALWAYS"
    ) -> autogen.UserProxyAgent:
        """
        Create a new AutoGen User Proxy Agent.
        Args:
            name (str): the name of the agent
            max_auto_reply (int): the maximum number of consecutive auto replies.
                default to None (no limit provided).
            human_input (str): the human input mode. default to "ALWAYS".
                Possible values are "ALWAYS", "TERMINATE", "NEVER".
                (1) When "ALWAYS", the agent prompts for human input every time a message is received.
                    Under this mode, the conversation stops when the human input is "exit",
                    or when is_termination_msg is True and there is no human input.
                (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
                    the number of auto reply reaches the max_consecutive_auto_reply.
                (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
                    when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
        """
        return autogen.UserProxyAgent(
            name=name,
            human_input_mode=human_input,
            max_consecutive_auto_reply=max_auto_reply,
            function_map=self.__get_function_map(),
        )

    def __get_autogen_config(self):
        """
        Get the AutoGen LLM and Function Calling configuration.
        """
        if self.llm_config:
            if self.llm_config["type"] == "openai":
                if not self.llm_config["openai_api_key"] or self.llm_config["openai_api_key"] == "sk-...":
                    raise Exception("OpenAI API key is not set")
                return {
                    "functions": self.__get_function_definitions(),
                    "config_list": [{"model": "gpt-3.5-turbo", "api_key": self.llm_config["openai_api_key"]}],
                }
            if self.llm_config["type"] == "azure":
                if (
                    not self.llm_config["azure_api_key"]
                    or not self.llm_config["azure_deployment"]
                    or not self.llm_config["azure_endpoint"]
                ):
                    raise Exception("Azure OpenAI API configuration is incomplete")
                return {
                    "functions": self.__get_function_definitions(),
                    "config_list": [
                        {
                            "model": self.llm_config["azure_deployment"],
                            "api_type": "azure",
                            "api_key": self.llm_config["azure_api_key"],
                            "api_base": self.llm_config["azure_endpoint"],
                            "api_version": "2023-08-01-preview",
                        }
                    ],
                }

        raise Exception("LLM type not provided, must be 'openai' or 'azure'")

    def __get_function_definitions(self) -> List:
        """
        Get the list of function definitions for OpenAI Function Calling.
        """
        functions = []
        sk_functions = self.kernel.skills.get_functions_view()
        for ns in {**sk_functions.native_functions, **sk_functions.semantic_functions}:
            for f in sk_functions.native_functions[ns]:
                functions.append(
                    {
                        "name": f.name,
                        "description": f.description,
                        "parameters": {
                            "type": "object",
                            "properties": {
                                f.parameters[0].name: {
                                    "description": f.parameters[0].description,
                                    "type": f.parameters[0].type_,
                                }
                            },
                            "required": [f.parameters[0].name],
                        },
                    }
                )
        return functions

    def __get_function_map(self) -> Dict:
        """
        Get the function map for AutoGen Function Calling.
        """
        function_map = {}
        sk_functions = self.kernel.skills.get_functions_view()
        for ns in {**sk_functions.native_functions, **sk_functions.semantic_functions}:
            for f in sk_functions.native_functions[ns]:
                function_map[f.name] = self.kernel.skills.get_function(f.skill_name, f.name)
        return function_map