ag2 0.4b1__py3-none-any.whl → 0.4.2b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- ag2-0.4.2b1.dist-info/METADATA +19 -0
- ag2-0.4.2b1.dist-info/RECORD +6 -0
- ag2-0.4.2b1.dist-info/top_level.txt +1 -0
- ag2-0.4b1.dist-info/METADATA +0 -496
- ag2-0.4b1.dist-info/RECORD +0 -115
- ag2-0.4b1.dist-info/top_level.txt +0 -1
- autogen/__init__.py +0 -17
- autogen/_pydantic.py +0 -116
- autogen/agentchat/__init__.py +0 -42
- autogen/agentchat/agent.py +0 -142
- autogen/agentchat/assistant_agent.py +0 -85
- autogen/agentchat/chat.py +0 -306
- autogen/agentchat/contrib/__init__.py +0 -0
- autogen/agentchat/contrib/agent_builder.py +0 -787
- autogen/agentchat/contrib/agent_optimizer.py +0 -450
- autogen/agentchat/contrib/capabilities/__init__.py +0 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
- autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
- autogen/agentchat/contrib/capabilities/teachability.py +0 -406
- autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
- autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
- autogen/agentchat/contrib/capabilities/transforms.py +0 -565
- autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
- autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
- autogen/agentchat/contrib/captainagent.py +0 -487
- autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
- autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
- autogen/agentchat/contrib/graph_rag/document.py +0 -24
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -76
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -50
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -56
- autogen/agentchat/contrib/img_utils.py +0 -390
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
- autogen/agentchat/contrib/llava_agent.py +0 -176
- autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
- autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
- autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -701
- autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
- autogen/agentchat/contrib/swarm_agent.py +0 -414
- autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
- autogen/agentchat/contrib/tool_retriever.py +0 -114
- autogen/agentchat/contrib/vectordb/__init__.py +0 -0
- autogen/agentchat/contrib/vectordb/base.py +0 -243
- autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
- autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
- autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
- autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
- autogen/agentchat/contrib/vectordb/utils.py +0 -126
- autogen/agentchat/contrib/web_surfer.py +0 -305
- autogen/agentchat/conversable_agent.py +0 -2908
- autogen/agentchat/groupchat.py +0 -1668
- autogen/agentchat/user_proxy_agent.py +0 -109
- autogen/agentchat/utils.py +0 -207
- autogen/browser_utils.py +0 -291
- autogen/cache/__init__.py +0 -10
- autogen/cache/abstract_cache_base.py +0 -78
- autogen/cache/cache.py +0 -182
- autogen/cache/cache_factory.py +0 -85
- autogen/cache/cosmos_db_cache.py +0 -150
- autogen/cache/disk_cache.py +0 -109
- autogen/cache/in_memory_cache.py +0 -61
- autogen/cache/redis_cache.py +0 -128
- autogen/code_utils.py +0 -745
- autogen/coding/__init__.py +0 -22
- autogen/coding/base.py +0 -113
- autogen/coding/docker_commandline_code_executor.py +0 -262
- autogen/coding/factory.py +0 -45
- autogen/coding/func_with_reqs.py +0 -203
- autogen/coding/jupyter/__init__.py +0 -22
- autogen/coding/jupyter/base.py +0 -32
- autogen/coding/jupyter/docker_jupyter_server.py +0 -164
- autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
- autogen/coding/jupyter/jupyter_client.py +0 -224
- autogen/coding/jupyter/jupyter_code_executor.py +0 -161
- autogen/coding/jupyter/local_jupyter_server.py +0 -168
- autogen/coding/local_commandline_code_executor.py +0 -410
- autogen/coding/markdown_code_extractor.py +0 -44
- autogen/coding/utils.py +0 -57
- autogen/exception_utils.py +0 -46
- autogen/extensions/__init__.py +0 -0
- autogen/formatting_utils.py +0 -76
- autogen/function_utils.py +0 -362
- autogen/graph_utils.py +0 -148
- autogen/io/__init__.py +0 -15
- autogen/io/base.py +0 -105
- autogen/io/console.py +0 -43
- autogen/io/websockets.py +0 -213
- autogen/logger/__init__.py +0 -11
- autogen/logger/base_logger.py +0 -140
- autogen/logger/file_logger.py +0 -287
- autogen/logger/logger_factory.py +0 -29
- autogen/logger/logger_utils.py +0 -42
- autogen/logger/sqlite_logger.py +0 -459
- autogen/math_utils.py +0 -356
- autogen/oai/__init__.py +0 -33
- autogen/oai/anthropic.py +0 -428
- autogen/oai/bedrock.py +0 -600
- autogen/oai/cerebras.py +0 -264
- autogen/oai/client.py +0 -1148
- autogen/oai/client_utils.py +0 -167
- autogen/oai/cohere.py +0 -453
- autogen/oai/completion.py +0 -1216
- autogen/oai/gemini.py +0 -469
- autogen/oai/groq.py +0 -281
- autogen/oai/mistral.py +0 -279
- autogen/oai/ollama.py +0 -576
- autogen/oai/openai_utils.py +0 -810
- autogen/oai/together.py +0 -343
- autogen/retrieve_utils.py +0 -487
- autogen/runtime_logging.py +0 -163
- autogen/token_count_utils.py +0 -257
- autogen/types.py +0 -20
- autogen/version.py +0 -7
- {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/LICENSE +0 -0
- {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/NOTICE.md +0 -0
- {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/WHEEL +0 -0
|
@@ -1,2908 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
-
#
|
|
3
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
-
#
|
|
5
|
-
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
-
# SPDX-License-Identifier: MIT
|
|
7
|
-
import asyncio
|
|
8
|
-
import copy
|
|
9
|
-
import functools
|
|
10
|
-
import inspect
|
|
11
|
-
import json
|
|
12
|
-
import logging
|
|
13
|
-
import re
|
|
14
|
-
import warnings
|
|
15
|
-
from collections import defaultdict
|
|
16
|
-
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union
|
|
17
|
-
|
|
18
|
-
from openai import BadRequestError
|
|
19
|
-
|
|
20
|
-
from autogen.agentchat.chat import _post_process_carryover_item
|
|
21
|
-
from autogen.exception_utils import InvalidCarryOverType, SenderRequired
|
|
22
|
-
|
|
23
|
-
from .._pydantic import model_dump
|
|
24
|
-
from ..cache.cache import AbstractCache
|
|
25
|
-
from ..code_utils import (
|
|
26
|
-
PYTHON_VARIANTS,
|
|
27
|
-
UNKNOWN,
|
|
28
|
-
check_can_use_docker_or_throw,
|
|
29
|
-
content_str,
|
|
30
|
-
decide_use_docker,
|
|
31
|
-
execute_code,
|
|
32
|
-
extract_code,
|
|
33
|
-
infer_lang,
|
|
34
|
-
)
|
|
35
|
-
from ..coding.base import CodeExecutor
|
|
36
|
-
from ..coding.factory import CodeExecutorFactory
|
|
37
|
-
from ..formatting_utils import colored
|
|
38
|
-
from ..function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str
|
|
39
|
-
from ..io.base import IOStream
|
|
40
|
-
from ..oai.client import ModelClient, OpenAIWrapper
|
|
41
|
-
from ..runtime_logging import log_event, log_function_use, log_new_agent, logging_enabled
|
|
42
|
-
from .agent import Agent, LLMAgent
|
|
43
|
-
from .chat import ChatResult, a_initiate_chats, initiate_chats
|
|
44
|
-
from .utils import consolidate_chat_info, gather_usage_summary
|
|
45
|
-
|
|
46
|
-
__all__ = ("ConversableAgent",)
|
|
47
|
-
|
|
48
|
-
logger = logging.getLogger(__name__)
|
|
49
|
-
|
|
50
|
-
F = TypeVar("F", bound=Callable[..., Any])
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class ConversableAgent(LLMAgent):
|
|
54
|
-
"""(In preview) A class for generic conversable agents which can be configured as assistant or user proxy.
|
|
55
|
-
|
|
56
|
-
After receiving each message, the agent will send a reply to the sender unless the msg is a termination msg.
|
|
57
|
-
For example, AssistantAgent and UserProxyAgent are subclasses of this class,
|
|
58
|
-
configured with different default settings.
|
|
59
|
-
|
|
60
|
-
To modify auto reply, override `generate_reply` method.
|
|
61
|
-
To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS".
|
|
62
|
-
To modify the way to get human input, override `get_human_input` method.
|
|
63
|
-
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
|
|
64
|
-
`run_code`, and `execute_function` methods respectively.
|
|
65
|
-
"""
|
|
66
|
-
|
|
67
|
-
DEFAULT_CONFIG = False # False or dict, the default config for llm inference
|
|
68
|
-
MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change)
|
|
69
|
-
|
|
70
|
-
DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
|
|
71
|
-
DEFAULT_SUMMARY_METHOD = "last_msg"
|
|
72
|
-
llm_config: Union[Dict, Literal[False]]
|
|
73
|
-
|
|
74
|
-
def __init__(
|
|
75
|
-
self,
|
|
76
|
-
name: str,
|
|
77
|
-
system_message: Optional[Union[str, List]] = "You are a helpful AI Assistant.",
|
|
78
|
-
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
|
79
|
-
max_consecutive_auto_reply: Optional[int] = None,
|
|
80
|
-
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE",
|
|
81
|
-
function_map: Optional[Dict[str, Callable]] = None,
|
|
82
|
-
code_execution_config: Union[Dict, Literal[False]] = False,
|
|
83
|
-
llm_config: Optional[Union[Dict, Literal[False]]] = None,
|
|
84
|
-
default_auto_reply: Union[str, Dict] = "",
|
|
85
|
-
description: Optional[str] = None,
|
|
86
|
-
chat_messages: Optional[Dict[Agent, List[Dict]]] = None,
|
|
87
|
-
silent: Optional[bool] = None,
|
|
88
|
-
):
|
|
89
|
-
"""
|
|
90
|
-
Args:
|
|
91
|
-
name (str): name of the agent.
|
|
92
|
-
system_message (str or list): system message for the ChatCompletion inference.
|
|
93
|
-
is_termination_msg (function): a function that takes a message in the form of a dictionary
|
|
94
|
-
and returns a boolean value indicating if this received message is a termination message.
|
|
95
|
-
The dict can contain the following keys: "content", "role", "name", "function_call".
|
|
96
|
-
max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
|
|
97
|
-
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
|
98
|
-
When set to 0, no auto reply will be generated.
|
|
99
|
-
human_input_mode (str): whether to ask for human inputs every time a message is received.
|
|
100
|
-
Possible values are "ALWAYS", "TERMINATE", "NEVER".
|
|
101
|
-
(1) When "ALWAYS", the agent prompts for human input every time a message is received.
|
|
102
|
-
Under this mode, the conversation stops when the human input is "exit",
|
|
103
|
-
or when is_termination_msg is True and there is no human input.
|
|
104
|
-
(2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
|
|
105
|
-
the number of auto reply reaches the max_consecutive_auto_reply.
|
|
106
|
-
(3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
|
|
107
|
-
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
|
|
108
|
-
function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls.
|
|
109
|
-
code_execution_config (dict or False): config for the code execution.
|
|
110
|
-
To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:
|
|
111
|
-
- work_dir (Optional, str): The working directory for the code execution.
|
|
112
|
-
If None, a default working directory will be used.
|
|
113
|
-
The default working directory is the "extensions" directory under
|
|
114
|
-
"path_to_autogen".
|
|
115
|
-
- use_docker (Optional, list, str or bool): The docker image to use for code execution.
|
|
116
|
-
Default is True, which means the code will be executed in a docker container. A default list of images will be used.
|
|
117
|
-
If a list or a str of image name(s) is provided, the code will be executed in a docker container
|
|
118
|
-
with the first image successfully pulled.
|
|
119
|
-
If False, the code will be executed in the current environment.
|
|
120
|
-
We strongly recommend using docker for code execution.
|
|
121
|
-
- timeout (Optional, int): The maximum execution time in seconds.
|
|
122
|
-
- last_n_messages (Experimental, int or str): The number of messages to look back for code execution.
|
|
123
|
-
If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto)
|
|
124
|
-
llm_config (dict or False or None): llm inference configuration.
|
|
125
|
-
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
|
|
126
|
-
for available options.
|
|
127
|
-
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
|
|
128
|
-
To disable llm-based auto reply, set to False.
|
|
129
|
-
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
|
|
130
|
-
default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.
|
|
131
|
-
description (str): a short description of the agent. This description is used by other agents
|
|
132
|
-
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
|
|
133
|
-
chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
|
|
134
|
-
Can be used to give the agent a memory by providing the chat history. This will allow the agent to
|
|
135
|
-
resume previous had conversations. Defaults to an empty chat history.
|
|
136
|
-
silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
|
|
137
|
-
silent in each function.
|
|
138
|
-
"""
|
|
139
|
-
# we change code_execution_config below and we have to make sure we don't change the input
|
|
140
|
-
# in case of UserProxyAgent, without this we could even change the default value {}
|
|
141
|
-
code_execution_config = (
|
|
142
|
-
code_execution_config.copy() if hasattr(code_execution_config, "copy") else code_execution_config
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
self._name = name
|
|
146
|
-
# a dictionary of conversations, default value is list
|
|
147
|
-
if chat_messages is None:
|
|
148
|
-
self._oai_messages = defaultdict(list)
|
|
149
|
-
else:
|
|
150
|
-
self._oai_messages = chat_messages
|
|
151
|
-
|
|
152
|
-
self._oai_system_message = [{"content": system_message, "role": "system"}]
|
|
153
|
-
self._description = description if description is not None else system_message
|
|
154
|
-
self._is_termination_msg = (
|
|
155
|
-
is_termination_msg
|
|
156
|
-
if is_termination_msg is not None
|
|
157
|
-
else (lambda x: content_str(x.get("content")) == "TERMINATE")
|
|
158
|
-
)
|
|
159
|
-
self.silent = silent
|
|
160
|
-
# Take a copy to avoid modifying the given dict
|
|
161
|
-
if isinstance(llm_config, dict):
|
|
162
|
-
try:
|
|
163
|
-
llm_config = copy.deepcopy(llm_config)
|
|
164
|
-
except TypeError as e:
|
|
165
|
-
raise TypeError(
|
|
166
|
-
"Please implement __deepcopy__ method for each value class in llm_config to support deepcopy."
|
|
167
|
-
" Refer to the docs for more details: https://ag2ai.github.io/ag2/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy"
|
|
168
|
-
) from e
|
|
169
|
-
|
|
170
|
-
self._validate_llm_config(llm_config)
|
|
171
|
-
|
|
172
|
-
if logging_enabled():
|
|
173
|
-
log_new_agent(self, locals())
|
|
174
|
-
|
|
175
|
-
# Initialize standalone client cache object.
|
|
176
|
-
self.client_cache = None
|
|
177
|
-
|
|
178
|
-
self.human_input_mode = human_input_mode
|
|
179
|
-
self._max_consecutive_auto_reply = (
|
|
180
|
-
max_consecutive_auto_reply if max_consecutive_auto_reply is not None else self.MAX_CONSECUTIVE_AUTO_REPLY
|
|
181
|
-
)
|
|
182
|
-
self._consecutive_auto_reply_counter = defaultdict(int)
|
|
183
|
-
self._max_consecutive_auto_reply_dict = defaultdict(self.max_consecutive_auto_reply)
|
|
184
|
-
self._function_map = (
|
|
185
|
-
{}
|
|
186
|
-
if function_map is None
|
|
187
|
-
else {name: callable for name, callable in function_map.items() if self._assert_valid_name(name)}
|
|
188
|
-
)
|
|
189
|
-
self._default_auto_reply = default_auto_reply
|
|
190
|
-
self._reply_func_list = []
|
|
191
|
-
self._human_input = []
|
|
192
|
-
self.reply_at_receive = defaultdict(bool)
|
|
193
|
-
self.register_reply([Agent, None], ConversableAgent.generate_oai_reply)
|
|
194
|
-
self.register_reply([Agent, None], ConversableAgent.a_generate_oai_reply, ignore_async_in_sync_chat=True)
|
|
195
|
-
|
|
196
|
-
# Setting up code execution.
|
|
197
|
-
# Do not register code execution reply if code execution is disabled.
|
|
198
|
-
if code_execution_config is not False:
|
|
199
|
-
# If code_execution_config is None, set it to an empty dict.
|
|
200
|
-
if code_execution_config is None:
|
|
201
|
-
warnings.warn(
|
|
202
|
-
"Using None to signal a default code_execution_config is deprecated. "
|
|
203
|
-
"Use {} to use default or False to disable code execution.",
|
|
204
|
-
stacklevel=2,
|
|
205
|
-
)
|
|
206
|
-
code_execution_config = {}
|
|
207
|
-
if not isinstance(code_execution_config, dict):
|
|
208
|
-
raise ValueError("code_execution_config must be a dict or False.")
|
|
209
|
-
|
|
210
|
-
# We have got a valid code_execution_config.
|
|
211
|
-
self._code_execution_config = code_execution_config
|
|
212
|
-
|
|
213
|
-
if self._code_execution_config.get("executor") is not None:
|
|
214
|
-
if "use_docker" in self._code_execution_config:
|
|
215
|
-
raise ValueError(
|
|
216
|
-
"'use_docker' in code_execution_config is not valid when 'executor' is set. Use the appropriate arg in the chosen executor instead."
|
|
217
|
-
)
|
|
218
|
-
|
|
219
|
-
if "work_dir" in self._code_execution_config:
|
|
220
|
-
raise ValueError(
|
|
221
|
-
"'work_dir' in code_execution_config is not valid when 'executor' is set. Use the appropriate arg in the chosen executor instead."
|
|
222
|
-
)
|
|
223
|
-
|
|
224
|
-
if "timeout" in self._code_execution_config:
|
|
225
|
-
raise ValueError(
|
|
226
|
-
"'timeout' in code_execution_config is not valid when 'executor' is set. Use the appropriate arg in the chosen executor instead."
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
# Use the new code executor.
|
|
230
|
-
self._code_executor = CodeExecutorFactory.create(self._code_execution_config)
|
|
231
|
-
self.register_reply([Agent, None], ConversableAgent._generate_code_execution_reply_using_executor)
|
|
232
|
-
else:
|
|
233
|
-
# Legacy code execution using code_utils.
|
|
234
|
-
use_docker = self._code_execution_config.get("use_docker", None)
|
|
235
|
-
use_docker = decide_use_docker(use_docker)
|
|
236
|
-
check_can_use_docker_or_throw(use_docker)
|
|
237
|
-
self._code_execution_config["use_docker"] = use_docker
|
|
238
|
-
self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
|
|
239
|
-
else:
|
|
240
|
-
# Code execution is disabled.
|
|
241
|
-
self._code_execution_config = False
|
|
242
|
-
|
|
243
|
-
self.register_reply([Agent, None], ConversableAgent.generate_tool_calls_reply)
|
|
244
|
-
self.register_reply([Agent, None], ConversableAgent.a_generate_tool_calls_reply, ignore_async_in_sync_chat=True)
|
|
245
|
-
self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
|
|
246
|
-
self.register_reply(
|
|
247
|
-
[Agent, None], ConversableAgent.a_generate_function_call_reply, ignore_async_in_sync_chat=True
|
|
248
|
-
)
|
|
249
|
-
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
|
|
250
|
-
self.register_reply(
|
|
251
|
-
[Agent, None], ConversableAgent.a_check_termination_and_human_reply, ignore_async_in_sync_chat=True
|
|
252
|
-
)
|
|
253
|
-
|
|
254
|
-
# Registered hooks are kept in lists, indexed by hookable method, to be called in their order of registration.
|
|
255
|
-
# New hookable methods should be added to this list as required to support new agent capabilities.
|
|
256
|
-
self.hook_lists: Dict[str, List[Callable]] = {
|
|
257
|
-
"process_last_received_message": [],
|
|
258
|
-
"process_all_messages_before_reply": [],
|
|
259
|
-
"process_message_before_send": [],
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
def _validate_llm_config(self, llm_config):
|
|
263
|
-
assert llm_config in (None, False) or isinstance(
|
|
264
|
-
llm_config, dict
|
|
265
|
-
), "llm_config must be a dict or False or None."
|
|
266
|
-
if llm_config is None:
|
|
267
|
-
llm_config = self.DEFAULT_CONFIG
|
|
268
|
-
self.llm_config = self.DEFAULT_CONFIG if llm_config is None else llm_config
|
|
269
|
-
# TODO: more complete validity check
|
|
270
|
-
if self.llm_config in [{}, {"config_list": []}, {"config_list": [{"model": ""}]}]:
|
|
271
|
-
raise ValueError(
|
|
272
|
-
"When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
|
|
273
|
-
)
|
|
274
|
-
self.client = None if self.llm_config is False else OpenAIWrapper(**self.llm_config)
|
|
275
|
-
|
|
276
|
-
@staticmethod
|
|
277
|
-
def _is_silent(agent: Agent, silent: Optional[bool] = False) -> bool:
|
|
278
|
-
return agent.silent if agent.silent is not None else silent
|
|
279
|
-
|
|
280
|
-
@property
|
|
281
|
-
def name(self) -> str:
|
|
282
|
-
"""Get the name of the agent."""
|
|
283
|
-
return self._name
|
|
284
|
-
|
|
285
|
-
@property
|
|
286
|
-
def description(self) -> str:
|
|
287
|
-
"""Get the description of the agent."""
|
|
288
|
-
return self._description
|
|
289
|
-
|
|
290
|
-
@description.setter
|
|
291
|
-
def description(self, description: str):
|
|
292
|
-
"""Set the description of the agent."""
|
|
293
|
-
self._description = description
|
|
294
|
-
|
|
295
|
-
@property
|
|
296
|
-
def code_executor(self) -> Optional[CodeExecutor]:
|
|
297
|
-
"""The code executor used by this agent. Returns None if code execution is disabled."""
|
|
298
|
-
if not hasattr(self, "_code_executor"):
|
|
299
|
-
return None
|
|
300
|
-
return self._code_executor
|
|
301
|
-
|
|
302
|
-
def register_reply(
|
|
303
|
-
self,
|
|
304
|
-
trigger: Union[Type[Agent], str, Agent, Callable[[Agent], bool], List],
|
|
305
|
-
reply_func: Callable,
|
|
306
|
-
position: int = 0,
|
|
307
|
-
config: Optional[Any] = None,
|
|
308
|
-
reset_config: Optional[Callable] = None,
|
|
309
|
-
*,
|
|
310
|
-
ignore_async_in_sync_chat: bool = False,
|
|
311
|
-
remove_other_reply_funcs: bool = False,
|
|
312
|
-
):
|
|
313
|
-
"""Register a reply function.
|
|
314
|
-
|
|
315
|
-
The reply function will be called when the trigger matches the sender.
|
|
316
|
-
The function registered later will be checked earlier by default.
|
|
317
|
-
To change the order, set the position to a positive integer.
|
|
318
|
-
|
|
319
|
-
Both sync and async reply functions can be registered. The sync reply function will be triggered
|
|
320
|
-
from both sync and async chats. However, an async reply function will only be triggered from async
|
|
321
|
-
chats (initiated with `ConversableAgent.a_initiate_chat`). If an `async` reply function is registered
|
|
322
|
-
and a chat is initialized with a sync function, `ignore_async_in_sync_chat` determines the behaviour as follows:
|
|
323
|
-
if `ignore_async_in_sync_chat` is set to `False` (default value), an exception will be raised, and
|
|
324
|
-
if `ignore_async_in_sync_chat` is set to `True`, the reply function will be ignored.
|
|
325
|
-
|
|
326
|
-
Args:
|
|
327
|
-
trigger (Agent class, str, Agent instance, callable, or list): the trigger.
|
|
328
|
-
If a class is provided, the reply function will be called when the sender is an instance of the class.
|
|
329
|
-
If a string is provided, the reply function will be called when the sender's name matches the string.
|
|
330
|
-
If an agent instance is provided, the reply function will be called when the sender is the agent instance.
|
|
331
|
-
If a callable is provided, the reply function will be called when the callable returns True.
|
|
332
|
-
If a list is provided, the reply function will be called when any of the triggers in the list is activated.
|
|
333
|
-
If None is provided, the reply function will be called only when the sender is None.
|
|
334
|
-
Note: Be sure to register `None` as a trigger if you would like to trigger an auto-reply function with non-empty messages and `sender=None`.
|
|
335
|
-
reply_func (Callable): the reply function.
|
|
336
|
-
The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
|
|
337
|
-
|
|
338
|
-
```python
|
|
339
|
-
def reply_func(
|
|
340
|
-
recipient: ConversableAgent,
|
|
341
|
-
messages: Optional[List[Dict]] = None,
|
|
342
|
-
sender: Optional[Agent] = None,
|
|
343
|
-
config: Optional[Any] = None,
|
|
344
|
-
) -> Tuple[bool, Union[str, Dict, None]]:
|
|
345
|
-
```
|
|
346
|
-
position (int): the position of the reply function in the reply function list.
|
|
347
|
-
The function registered later will be checked earlier by default.
|
|
348
|
-
To change the order, set the position to a positive integer.
|
|
349
|
-
config (Any): the config to be passed to the reply function.
|
|
350
|
-
When an agent is reset, the config will be reset to the original value.
|
|
351
|
-
reset_config (Callable): the function to reset the config.
|
|
352
|
-
The function returns None. Signature: ```def reset_config(config: Any)```
|
|
353
|
-
ignore_async_in_sync_chat (bool): whether to ignore the async reply function in sync chats. If `False`, an exception
|
|
354
|
-
will be raised if an async reply function is registered and a chat is initialized with a sync
|
|
355
|
-
function.
|
|
356
|
-
remove_other_reply_funcs (bool): whether to remove other reply functions when registering this reply function.
|
|
357
|
-
"""
|
|
358
|
-
if not isinstance(trigger, (type, str, Agent, Callable, list)):
|
|
359
|
-
raise ValueError("trigger must be a class, a string, an agent, a callable or a list.")
|
|
360
|
-
if remove_other_reply_funcs:
|
|
361
|
-
self._reply_func_list.clear()
|
|
362
|
-
self._reply_func_list.insert(
|
|
363
|
-
position,
|
|
364
|
-
{
|
|
365
|
-
"trigger": trigger,
|
|
366
|
-
"reply_func": reply_func,
|
|
367
|
-
"config": copy.copy(config),
|
|
368
|
-
"init_config": config,
|
|
369
|
-
"reset_config": reset_config,
|
|
370
|
-
"ignore_async_in_sync_chat": ignore_async_in_sync_chat and inspect.iscoroutinefunction(reply_func),
|
|
371
|
-
},
|
|
372
|
-
)
|
|
373
|
-
|
|
374
|
-
def replace_reply_func(self, old_reply_func: Callable, new_reply_func: Callable):
|
|
375
|
-
"""Replace a registered reply function with a new one.
|
|
376
|
-
|
|
377
|
-
Args:
|
|
378
|
-
old_reply_func (Callable): the old reply function to be replaced.
|
|
379
|
-
new_reply_func (Callable): the new reply function to replace the old one.
|
|
380
|
-
"""
|
|
381
|
-
for f in self._reply_func_list:
|
|
382
|
-
if f["reply_func"] == old_reply_func:
|
|
383
|
-
f["reply_func"] = new_reply_func
|
|
384
|
-
|
|
385
|
-
@staticmethod
|
|
386
|
-
def _get_chats_to_run(
|
|
387
|
-
chat_queue: List[Dict[str, Any]], recipient: Agent, messages: Union[str, Callable], sender: Agent, config: Any
|
|
388
|
-
) -> List[Dict[str, Any]]:
|
|
389
|
-
"""A simple chat reply function.
|
|
390
|
-
This function initiate one or a sequence of chats between the "recipient" and the agents in the
|
|
391
|
-
chat_queue.
|
|
392
|
-
|
|
393
|
-
It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
|
|
394
|
-
|
|
395
|
-
Returns:
|
|
396
|
-
Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
|
|
397
|
-
"""
|
|
398
|
-
last_msg = messages[-1].get("content")
|
|
399
|
-
chat_to_run = []
|
|
400
|
-
for i, c in enumerate(chat_queue):
|
|
401
|
-
current_c = c.copy()
|
|
402
|
-
if current_c.get("sender") is None:
|
|
403
|
-
current_c["sender"] = recipient
|
|
404
|
-
message = current_c.get("message")
|
|
405
|
-
# If message is not provided in chat_queue, we by default use the last message from the original chat history as the first message in this nested chat (for the first chat in the chat queue).
|
|
406
|
-
# NOTE: This setting is prone to change.
|
|
407
|
-
if message is None and i == 0:
|
|
408
|
-
message = last_msg
|
|
409
|
-
if callable(message):
|
|
410
|
-
message = message(recipient, messages, sender, config)
|
|
411
|
-
# We only run chat that has a valid message. NOTE: This is prone to change dependin on applications.
|
|
412
|
-
if message:
|
|
413
|
-
current_c["message"] = message
|
|
414
|
-
chat_to_run.append(current_c)
|
|
415
|
-
return chat_to_run
|
|
416
|
-
|
|
417
|
-
@staticmethod
|
|
418
|
-
def _summary_from_nested_chats(
|
|
419
|
-
chat_queue: List[Dict[str, Any]], recipient: Agent, messages: Union[str, Callable], sender: Agent, config: Any
|
|
420
|
-
) -> Tuple[bool, Union[str, None]]:
|
|
421
|
-
"""A simple chat reply function.
|
|
422
|
-
This function initiate one or a sequence of chats between the "recipient" and the agents in the
|
|
423
|
-
chat_queue.
|
|
424
|
-
|
|
425
|
-
It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
|
|
426
|
-
|
|
427
|
-
Returns:
|
|
428
|
-
Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
|
|
429
|
-
"""
|
|
430
|
-
chat_to_run = ConversableAgent._get_chats_to_run(chat_queue, recipient, messages, sender, config)
|
|
431
|
-
if not chat_to_run:
|
|
432
|
-
return True, None
|
|
433
|
-
res = initiate_chats(chat_to_run)
|
|
434
|
-
return True, res[-1].summary
|
|
435
|
-
|
|
436
|
-
@staticmethod
|
|
437
|
-
async def _a_summary_from_nested_chats(
|
|
438
|
-
chat_queue: List[Dict[str, Any]], recipient: Agent, messages: Union[str, Callable], sender: Agent, config: Any
|
|
439
|
-
) -> Tuple[bool, Union[str, None]]:
|
|
440
|
-
"""A simple chat reply function.
|
|
441
|
-
This function initiate one or a sequence of chats between the "recipient" and the agents in the
|
|
442
|
-
chat_queue.
|
|
443
|
-
|
|
444
|
-
It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
|
|
445
|
-
|
|
446
|
-
Returns:
|
|
447
|
-
Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
|
|
448
|
-
"""
|
|
449
|
-
chat_to_run = ConversableAgent._get_chats_to_run(chat_queue, recipient, messages, sender, config)
|
|
450
|
-
if not chat_to_run:
|
|
451
|
-
return True, None
|
|
452
|
-
res = await a_initiate_chats(chat_to_run)
|
|
453
|
-
index_of_last_chat = chat_to_run[-1]["chat_id"]
|
|
454
|
-
return True, res[index_of_last_chat].summary
|
|
455
|
-
|
|
456
|
-
def register_nested_chats(
|
|
457
|
-
self,
|
|
458
|
-
chat_queue: List[Dict[str, Any]],
|
|
459
|
-
trigger: Union[Type[Agent], str, Agent, Callable[[Agent], bool], List],
|
|
460
|
-
reply_func_from_nested_chats: Union[str, Callable] = "summary_from_nested_chats",
|
|
461
|
-
position: int = 2,
|
|
462
|
-
use_async: Union[bool, None] = None,
|
|
463
|
-
**kwargs,
|
|
464
|
-
) -> None:
|
|
465
|
-
"""Register a nested chat reply function.
|
|
466
|
-
Args:
|
|
467
|
-
chat_queue (list): a list of chat objects to be initiated. If use_async is used, then all messages in chat_queue must have a chat-id associated with them.
|
|
468
|
-
trigger (Agent class, str, Agent instance, callable, or list): refer to `register_reply` for details.
|
|
469
|
-
reply_func_from_nested_chats (Callable, str): the reply function for the nested chat.
|
|
470
|
-
The function takes a chat_queue for nested chat, recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
|
|
471
|
-
Default to "summary_from_nested_chats", which corresponds to a built-in reply function that get summary from the nested chat_queue.
|
|
472
|
-
```python
|
|
473
|
-
def reply_func_from_nested_chats(
|
|
474
|
-
chat_queue: List[Dict],
|
|
475
|
-
recipient: ConversableAgent,
|
|
476
|
-
messages: Optional[List[Dict]] = None,
|
|
477
|
-
sender: Optional[Agent] = None,
|
|
478
|
-
config: Optional[Any] = None,
|
|
479
|
-
) -> Tuple[bool, Union[str, Dict, None]]:
|
|
480
|
-
```
|
|
481
|
-
position (int): Ref to `register_reply` for details. Default to 2. It means we first check the termination and human reply, then check the registered nested chat reply.
|
|
482
|
-
use_async: Uses a_initiate_chats internally to start nested chats. If the original chat is initiated with a_initiate_chats, you may set this to true so nested chats do not run in sync.
|
|
483
|
-
kwargs: Ref to `register_reply` for details.
|
|
484
|
-
"""
|
|
485
|
-
if use_async:
|
|
486
|
-
for chat in chat_queue:
|
|
487
|
-
if chat.get("chat_id") is None:
|
|
488
|
-
raise ValueError("chat_id is required for async nested chats")
|
|
489
|
-
|
|
490
|
-
if use_async:
|
|
491
|
-
if reply_func_from_nested_chats == "summary_from_nested_chats":
|
|
492
|
-
reply_func_from_nested_chats = self._a_summary_from_nested_chats
|
|
493
|
-
if not callable(reply_func_from_nested_chats) or not inspect.iscoroutinefunction(
|
|
494
|
-
reply_func_from_nested_chats
|
|
495
|
-
):
|
|
496
|
-
raise ValueError("reply_func_from_nested_chats must be a callable and a coroutine")
|
|
497
|
-
|
|
498
|
-
async def wrapped_reply_func(recipient, messages=None, sender=None, config=None):
|
|
499
|
-
return await reply_func_from_nested_chats(chat_queue, recipient, messages, sender, config)
|
|
500
|
-
|
|
501
|
-
else:
|
|
502
|
-
if reply_func_from_nested_chats == "summary_from_nested_chats":
|
|
503
|
-
reply_func_from_nested_chats = self._summary_from_nested_chats
|
|
504
|
-
if not callable(reply_func_from_nested_chats):
|
|
505
|
-
raise ValueError("reply_func_from_nested_chats must be a callable")
|
|
506
|
-
|
|
507
|
-
def wrapped_reply_func(recipient, messages=None, sender=None, config=None):
|
|
508
|
-
return reply_func_from_nested_chats(chat_queue, recipient, messages, sender, config)
|
|
509
|
-
|
|
510
|
-
functools.update_wrapper(wrapped_reply_func, reply_func_from_nested_chats)
|
|
511
|
-
|
|
512
|
-
self.register_reply(
|
|
513
|
-
trigger,
|
|
514
|
-
wrapped_reply_func,
|
|
515
|
-
position,
|
|
516
|
-
kwargs.get("config"),
|
|
517
|
-
kwargs.get("reset_config"),
|
|
518
|
-
ignore_async_in_sync_chat=(
|
|
519
|
-
not use_async if use_async is not None else kwargs.get("ignore_async_in_sync_chat")
|
|
520
|
-
),
|
|
521
|
-
)
|
|
522
|
-
|
|
523
|
-
@property
|
|
524
|
-
def system_message(self) -> str:
|
|
525
|
-
"""Return the system message."""
|
|
526
|
-
return self._oai_system_message[0]["content"]
|
|
527
|
-
|
|
528
|
-
def update_system_message(self, system_message: str) -> None:
|
|
529
|
-
"""Update the system message.
|
|
530
|
-
|
|
531
|
-
Args:
|
|
532
|
-
system_message (str): system message for the ChatCompletion inference.
|
|
533
|
-
"""
|
|
534
|
-
self._oai_system_message[0]["content"] = system_message
|
|
535
|
-
|
|
536
|
-
def update_max_consecutive_auto_reply(self, value: int, sender: Optional[Agent] = None):
|
|
537
|
-
"""Update the maximum number of consecutive auto replies.
|
|
538
|
-
|
|
539
|
-
Args:
|
|
540
|
-
value (int): the maximum number of consecutive auto replies.
|
|
541
|
-
sender (Agent): when the sender is provided, only update the max_consecutive_auto_reply for that sender.
|
|
542
|
-
"""
|
|
543
|
-
if sender is None:
|
|
544
|
-
self._max_consecutive_auto_reply = value
|
|
545
|
-
for k in self._max_consecutive_auto_reply_dict:
|
|
546
|
-
self._max_consecutive_auto_reply_dict[k] = value
|
|
547
|
-
else:
|
|
548
|
-
self._max_consecutive_auto_reply_dict[sender] = value
|
|
549
|
-
|
|
550
|
-
def max_consecutive_auto_reply(self, sender: Optional[Agent] = None) -> int:
|
|
551
|
-
"""The maximum number of consecutive auto replies."""
|
|
552
|
-
return self._max_consecutive_auto_reply if sender is None else self._max_consecutive_auto_reply_dict[sender]
|
|
553
|
-
|
|
554
|
-
@property
|
|
555
|
-
def chat_messages(self) -> Dict[Agent, List[Dict]]:
|
|
556
|
-
"""A dictionary of conversations from agent to list of messages."""
|
|
557
|
-
return self._oai_messages
|
|
558
|
-
|
|
559
|
-
def chat_messages_for_summary(self, agent: Agent) -> List[Dict]:
|
|
560
|
-
"""A list of messages as a conversation to summarize."""
|
|
561
|
-
return self._oai_messages[agent]
|
|
562
|
-
|
|
563
|
-
def last_message(self, agent: Optional[Agent] = None) -> Optional[Dict]:
|
|
564
|
-
"""The last message exchanged with the agent.
|
|
565
|
-
|
|
566
|
-
Args:
|
|
567
|
-
agent (Agent): The agent in the conversation.
|
|
568
|
-
If None and more than one agent's conversations are found, an error will be raised.
|
|
569
|
-
If None and only one conversation is found, the last message of the only conversation will be returned.
|
|
570
|
-
|
|
571
|
-
Returns:
|
|
572
|
-
The last message exchanged with the agent.
|
|
573
|
-
"""
|
|
574
|
-
if agent is None:
|
|
575
|
-
n_conversations = len(self._oai_messages)
|
|
576
|
-
if n_conversations == 0:
|
|
577
|
-
return None
|
|
578
|
-
if n_conversations == 1:
|
|
579
|
-
for conversation in self._oai_messages.values():
|
|
580
|
-
return conversation[-1]
|
|
581
|
-
raise ValueError("More than one conversation is found. Please specify the sender to get the last message.")
|
|
582
|
-
if agent not in self._oai_messages.keys():
|
|
583
|
-
raise KeyError(
|
|
584
|
-
f"The agent '{agent.name}' is not present in any conversation. No history available for this agent."
|
|
585
|
-
)
|
|
586
|
-
return self._oai_messages[agent][-1]
|
|
587
|
-
|
|
588
|
-
@property
|
|
589
|
-
def use_docker(self) -> Union[bool, str, None]:
|
|
590
|
-
"""Bool value of whether to use docker to execute the code,
|
|
591
|
-
or str value of the docker image name to use, or None when code execution is disabled.
|
|
592
|
-
"""
|
|
593
|
-
return None if self._code_execution_config is False else self._code_execution_config.get("use_docker")
|
|
594
|
-
|
|
595
|
-
@staticmethod
|
|
596
|
-
def _message_to_dict(message: Union[Dict, str]) -> Dict:
|
|
597
|
-
"""Convert a message to a dictionary.
|
|
598
|
-
|
|
599
|
-
The message can be a string or a dictionary. The string will be put in the "content" field of the new dictionary.
|
|
600
|
-
"""
|
|
601
|
-
if isinstance(message, str):
|
|
602
|
-
return {"content": message}
|
|
603
|
-
elif isinstance(message, dict):
|
|
604
|
-
return message
|
|
605
|
-
else:
|
|
606
|
-
return dict(message)
|
|
607
|
-
|
|
608
|
-
@staticmethod
|
|
609
|
-
def _normalize_name(name):
|
|
610
|
-
"""
|
|
611
|
-
LLMs sometimes ask functions while ignoring their own format requirements, this function should be used to replace invalid characters with "_".
|
|
612
|
-
|
|
613
|
-
Prefer _assert_valid_name for validating user configuration or input
|
|
614
|
-
"""
|
|
615
|
-
return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:64]
|
|
616
|
-
|
|
617
|
-
@staticmethod
|
|
618
|
-
def _assert_valid_name(name):
|
|
619
|
-
"""
|
|
620
|
-
Ensure that configured names are valid, raises ValueError if not.
|
|
621
|
-
|
|
622
|
-
For munging LLM responses use _normalize_name to ensure LLM specified names don't break the API.
|
|
623
|
-
"""
|
|
624
|
-
if not re.match(r"^[a-zA-Z0-9_-]+$", name):
|
|
625
|
-
raise ValueError(f"Invalid name: {name}. Only letters, numbers, '_' and '-' are allowed.")
|
|
626
|
-
if len(name) > 64:
|
|
627
|
-
raise ValueError(f"Invalid name: {name}. Name must be less than 64 characters.")
|
|
628
|
-
return name
|
|
629
|
-
|
|
630
|
-
def _append_oai_message(self, message: Union[Dict, str], role, conversation_id: Agent, is_sending: bool) -> bool:
|
|
631
|
-
"""Append a message to the ChatCompletion conversation.
|
|
632
|
-
|
|
633
|
-
If the message received is a string, it will be put in the "content" field of the new dictionary.
|
|
634
|
-
If the message received is a dictionary but does not have any of the three fields "content", "function_call", or "tool_calls",
|
|
635
|
-
this message is not a valid ChatCompletion message.
|
|
636
|
-
If only "function_call" or "tool_calls" is provided, "content" will be set to None if not provided, and the role of the message will be forced "assistant".
|
|
637
|
-
|
|
638
|
-
Args:
|
|
639
|
-
message (dict or str): message to be appended to the ChatCompletion conversation.
|
|
640
|
-
role (str): role of the message, can be "assistant" or "function".
|
|
641
|
-
conversation_id (Agent): id of the conversation, should be the recipient or sender.
|
|
642
|
-
is_sending (bool): If the agent (aka self) is sending to the conversation_id agent, otherwise receiving.
|
|
643
|
-
|
|
644
|
-
Returns:
|
|
645
|
-
bool: whether the message is appended to the ChatCompletion conversation.
|
|
646
|
-
"""
|
|
647
|
-
message = self._message_to_dict(message)
|
|
648
|
-
# create oai message to be appended to the oai conversation that can be passed to oai directly.
|
|
649
|
-
oai_message = {
|
|
650
|
-
k: message[k]
|
|
651
|
-
for k in ("content", "function_call", "tool_calls", "tool_responses", "tool_call_id", "name", "context")
|
|
652
|
-
if k in message and message[k] is not None
|
|
653
|
-
}
|
|
654
|
-
if "content" not in oai_message:
|
|
655
|
-
if "function_call" in oai_message or "tool_calls" in oai_message:
|
|
656
|
-
oai_message["content"] = None # if only function_call is provided, content will be set to None.
|
|
657
|
-
else:
|
|
658
|
-
return False
|
|
659
|
-
|
|
660
|
-
if message.get("role") in ["function", "tool"]:
|
|
661
|
-
oai_message["role"] = message.get("role")
|
|
662
|
-
if "tool_responses" in oai_message:
|
|
663
|
-
for tool_response in oai_message["tool_responses"]:
|
|
664
|
-
tool_response["content"] = str(tool_response["content"])
|
|
665
|
-
elif "override_role" in message:
|
|
666
|
-
# If we have a direction to override the role then set the
|
|
667
|
-
# role accordingly. Used to customise the role for the
|
|
668
|
-
# select speaker prompt.
|
|
669
|
-
oai_message["role"] = message.get("override_role")
|
|
670
|
-
else:
|
|
671
|
-
oai_message["role"] = role
|
|
672
|
-
|
|
673
|
-
if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
|
|
674
|
-
oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
|
|
675
|
-
elif "name" not in oai_message:
|
|
676
|
-
# If we don't have a name field, append it
|
|
677
|
-
if is_sending:
|
|
678
|
-
oai_message["name"] = self.name
|
|
679
|
-
else:
|
|
680
|
-
oai_message["name"] = conversation_id.name
|
|
681
|
-
|
|
682
|
-
self._oai_messages[conversation_id].append(oai_message)
|
|
683
|
-
|
|
684
|
-
return True
|
|
685
|
-
|
|
686
|
-
def _process_message_before_send(
|
|
687
|
-
self, message: Union[Dict, str], recipient: Agent, silent: bool
|
|
688
|
-
) -> Union[Dict, str]:
|
|
689
|
-
"""Process the message before sending it to the recipient."""
|
|
690
|
-
hook_list = self.hook_lists["process_message_before_send"]
|
|
691
|
-
for hook in hook_list:
|
|
692
|
-
message = hook(
|
|
693
|
-
sender=self, message=message, recipient=recipient, silent=ConversableAgent._is_silent(self, silent)
|
|
694
|
-
)
|
|
695
|
-
return message
|
|
696
|
-
|
|
697
|
-
def send(
|
|
698
|
-
self,
|
|
699
|
-
message: Union[Dict, str],
|
|
700
|
-
recipient: Agent,
|
|
701
|
-
request_reply: Optional[bool] = None,
|
|
702
|
-
silent: Optional[bool] = False,
|
|
703
|
-
):
|
|
704
|
-
"""Send a message to another agent.
|
|
705
|
-
|
|
706
|
-
Args:
|
|
707
|
-
message (dict or str): message to be sent.
|
|
708
|
-
The message could contain the following fields:
|
|
709
|
-
- content (str or List): Required, the content of the message. (Can be None)
|
|
710
|
-
- function_call (str): the name of the function to be called.
|
|
711
|
-
- name (str): the name of the function to be called.
|
|
712
|
-
- role (str): the role of the message, any role that is not "function"
|
|
713
|
-
will be modified to "assistant".
|
|
714
|
-
- context (dict): the context of the message, which will be passed to
|
|
715
|
-
[OpenAIWrapper.create](../oai/client#create).
|
|
716
|
-
For example, one agent can send a message A as:
|
|
717
|
-
```python
|
|
718
|
-
{
|
|
719
|
-
"content": lambda context: context["use_tool_msg"],
|
|
720
|
-
"context": {
|
|
721
|
-
"use_tool_msg": "Use tool X if they are relevant."
|
|
722
|
-
}
|
|
723
|
-
}
|
|
724
|
-
```
|
|
725
|
-
Next time, one agent can send a message B with a different "use_tool_msg".
|
|
726
|
-
Then the content of message A will be refreshed to the new "use_tool_msg".
|
|
727
|
-
So effectively, this provides a way for an agent to send a "link" and modify
|
|
728
|
-
the content of the "link" later.
|
|
729
|
-
recipient (Agent): the recipient of the message.
|
|
730
|
-
request_reply (bool or None): whether to request a reply from the recipient.
|
|
731
|
-
silent (bool or None): (Experimental) whether to print the message sent.
|
|
732
|
-
|
|
733
|
-
Raises:
|
|
734
|
-
ValueError: if the message can't be converted into a valid ChatCompletion message.
|
|
735
|
-
"""
|
|
736
|
-
message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
|
|
737
|
-
# When the agent composes and sends the message, the role of the message is "assistant"
|
|
738
|
-
# unless it's "function".
|
|
739
|
-
valid = self._append_oai_message(message, "assistant", recipient, is_sending=True)
|
|
740
|
-
if valid:
|
|
741
|
-
recipient.receive(message, self, request_reply, silent)
|
|
742
|
-
else:
|
|
743
|
-
raise ValueError(
|
|
744
|
-
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
|
|
745
|
-
)
|
|
746
|
-
|
|
747
|
-
async def a_send(
|
|
748
|
-
self,
|
|
749
|
-
message: Union[Dict, str],
|
|
750
|
-
recipient: Agent,
|
|
751
|
-
request_reply: Optional[bool] = None,
|
|
752
|
-
silent: Optional[bool] = False,
|
|
753
|
-
):
|
|
754
|
-
"""(async) Send a message to another agent.
|
|
755
|
-
|
|
756
|
-
Args:
|
|
757
|
-
message (dict or str): message to be sent.
|
|
758
|
-
The message could contain the following fields:
|
|
759
|
-
- content (str or List): Required, the content of the message. (Can be None)
|
|
760
|
-
- function_call (str): the name of the function to be called.
|
|
761
|
-
- name (str): the name of the function to be called.
|
|
762
|
-
- role (str): the role of the message, any role that is not "function"
|
|
763
|
-
will be modified to "assistant".
|
|
764
|
-
- context (dict): the context of the message, which will be passed to
|
|
765
|
-
[OpenAIWrapper.create](../oai/client#create).
|
|
766
|
-
For example, one agent can send a message A as:
|
|
767
|
-
```python
|
|
768
|
-
{
|
|
769
|
-
"content": lambda context: context["use_tool_msg"],
|
|
770
|
-
"context": {
|
|
771
|
-
"use_tool_msg": "Use tool X if they are relevant."
|
|
772
|
-
}
|
|
773
|
-
}
|
|
774
|
-
```
|
|
775
|
-
Next time, one agent can send a message B with a different "use_tool_msg".
|
|
776
|
-
Then the content of message A will be refreshed to the new "use_tool_msg".
|
|
777
|
-
So effectively, this provides a way for an agent to send a "link" and modify
|
|
778
|
-
the content of the "link" later.
|
|
779
|
-
recipient (Agent): the recipient of the message.
|
|
780
|
-
request_reply (bool or None): whether to request a reply from the recipient.
|
|
781
|
-
silent (bool or None): (Experimental) whether to print the message sent.
|
|
782
|
-
|
|
783
|
-
Raises:
|
|
784
|
-
ValueError: if the message can't be converted into a valid ChatCompletion message.
|
|
785
|
-
"""
|
|
786
|
-
message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
|
|
787
|
-
# When the agent composes and sends the message, the role of the message is "assistant"
|
|
788
|
-
# unless it's "function".
|
|
789
|
-
valid = self._append_oai_message(message, "assistant", recipient, is_sending=True)
|
|
790
|
-
if valid:
|
|
791
|
-
await recipient.a_receive(message, self, request_reply, silent)
|
|
792
|
-
else:
|
|
793
|
-
raise ValueError(
|
|
794
|
-
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
|
|
795
|
-
)
|
|
796
|
-
|
|
797
|
-
def _print_received_message(self, message: Union[Dict, str], sender: Agent, skip_head: bool = False):
|
|
798
|
-
iostream = IOStream.get_default()
|
|
799
|
-
# print the message received
|
|
800
|
-
if not skip_head:
|
|
801
|
-
iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True)
|
|
802
|
-
message = self._message_to_dict(message)
|
|
803
|
-
|
|
804
|
-
if message.get("tool_responses"): # Handle tool multi-call responses
|
|
805
|
-
for tool_response in message["tool_responses"]:
|
|
806
|
-
self._print_received_message(tool_response, sender, skip_head=True)
|
|
807
|
-
if message.get("role") == "tool":
|
|
808
|
-
return # If role is tool, then content is just a concatenation of all tool_responses
|
|
809
|
-
|
|
810
|
-
if message.get("role") in ["function", "tool"]:
|
|
811
|
-
if message["role"] == "function":
|
|
812
|
-
id_key = "name"
|
|
813
|
-
else:
|
|
814
|
-
id_key = "tool_call_id"
|
|
815
|
-
id = message.get(id_key, "No id found")
|
|
816
|
-
func_print = f"***** Response from calling {message['role']} ({id}) *****"
|
|
817
|
-
iostream.print(colored(func_print, "green"), flush=True)
|
|
818
|
-
iostream.print(message["content"], flush=True)
|
|
819
|
-
iostream.print(colored("*" * len(func_print), "green"), flush=True)
|
|
820
|
-
else:
|
|
821
|
-
content = message.get("content")
|
|
822
|
-
if content is not None:
|
|
823
|
-
if "context" in message:
|
|
824
|
-
content = OpenAIWrapper.instantiate(
|
|
825
|
-
content,
|
|
826
|
-
message["context"],
|
|
827
|
-
self.llm_config and self.llm_config.get("allow_format_str_template", False),
|
|
828
|
-
)
|
|
829
|
-
iostream.print(content_str(content), flush=True)
|
|
830
|
-
if "function_call" in message and message["function_call"]:
|
|
831
|
-
function_call = dict(message["function_call"])
|
|
832
|
-
func_print = (
|
|
833
|
-
f"***** Suggested function call: {function_call.get('name', '(No function name found)')} *****"
|
|
834
|
-
)
|
|
835
|
-
iostream.print(colored(func_print, "green"), flush=True)
|
|
836
|
-
iostream.print(
|
|
837
|
-
"Arguments: \n",
|
|
838
|
-
function_call.get("arguments", "(No arguments found)"),
|
|
839
|
-
flush=True,
|
|
840
|
-
sep="",
|
|
841
|
-
)
|
|
842
|
-
iostream.print(colored("*" * len(func_print), "green"), flush=True)
|
|
843
|
-
if "tool_calls" in message and message["tool_calls"]:
|
|
844
|
-
for tool_call in message["tool_calls"]:
|
|
845
|
-
id = tool_call.get("id", "No tool call id found")
|
|
846
|
-
function_call = dict(tool_call.get("function", {}))
|
|
847
|
-
func_print = f"***** Suggested tool call ({id}): {function_call.get('name', '(No function name found)')} *****"
|
|
848
|
-
iostream.print(colored(func_print, "green"), flush=True)
|
|
849
|
-
iostream.print(
|
|
850
|
-
"Arguments: \n",
|
|
851
|
-
function_call.get("arguments", "(No arguments found)"),
|
|
852
|
-
flush=True,
|
|
853
|
-
sep="",
|
|
854
|
-
)
|
|
855
|
-
iostream.print(colored("*" * len(func_print), "green"), flush=True)
|
|
856
|
-
|
|
857
|
-
iostream.print("\n", "-" * 80, flush=True, sep="")
|
|
858
|
-
|
|
859
|
-
def _process_received_message(self, message: Union[Dict, str], sender: Agent, silent: bool):
|
|
860
|
-
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
|
|
861
|
-
valid = self._append_oai_message(message, "user", sender, is_sending=False)
|
|
862
|
-
if logging_enabled():
|
|
863
|
-
log_event(self, "received_message", message=message, sender=sender.name, valid=valid)
|
|
864
|
-
|
|
865
|
-
if not valid:
|
|
866
|
-
raise ValueError(
|
|
867
|
-
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
|
|
868
|
-
)
|
|
869
|
-
|
|
870
|
-
if not ConversableAgent._is_silent(sender, silent):
|
|
871
|
-
self._print_received_message(message, sender)
|
|
872
|
-
|
|
873
|
-
def receive(
|
|
874
|
-
self,
|
|
875
|
-
message: Union[Dict, str],
|
|
876
|
-
sender: Agent,
|
|
877
|
-
request_reply: Optional[bool] = None,
|
|
878
|
-
silent: Optional[bool] = False,
|
|
879
|
-
):
|
|
880
|
-
"""Receive a message from another agent.
|
|
881
|
-
|
|
882
|
-
Once a message is received, this function sends a reply to the sender or stop.
|
|
883
|
-
The reply can be generated automatically or entered manually by a human.
|
|
884
|
-
|
|
885
|
-
Args:
|
|
886
|
-
message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
|
|
887
|
-
1. "content": content of the message, can be None.
|
|
888
|
-
2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
|
|
889
|
-
3. "tool_calls": a list of dictionaries containing the function name and arguments.
|
|
890
|
-
4. "role": role of the message, can be "assistant", "user", "function", "tool".
|
|
891
|
-
This field is only needed to distinguish between "function" or "assistant"/"user".
|
|
892
|
-
5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
|
|
893
|
-
6. "context" (dict): the context of the message, which will be passed to
|
|
894
|
-
[OpenAIWrapper.create](../oai/client#create).
|
|
895
|
-
sender: sender of an Agent instance.
|
|
896
|
-
request_reply (bool or None): whether a reply is requested from the sender.
|
|
897
|
-
If None, the value is determined by `self.reply_at_receive[sender]`.
|
|
898
|
-
silent (bool or None): (Experimental) whether to print the message received.
|
|
899
|
-
|
|
900
|
-
Raises:
|
|
901
|
-
ValueError: if the message can't be converted into a valid ChatCompletion message.
|
|
902
|
-
"""
|
|
903
|
-
self._process_received_message(message, sender, silent)
|
|
904
|
-
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
|
|
905
|
-
return
|
|
906
|
-
reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
|
|
907
|
-
if reply is not None:
|
|
908
|
-
self.send(reply, sender, silent=silent)
|
|
909
|
-
|
|
910
|
-
async def a_receive(
|
|
911
|
-
self,
|
|
912
|
-
message: Union[Dict, str],
|
|
913
|
-
sender: Agent,
|
|
914
|
-
request_reply: Optional[bool] = None,
|
|
915
|
-
silent: Optional[bool] = False,
|
|
916
|
-
):
|
|
917
|
-
"""(async) Receive a message from another agent.
|
|
918
|
-
|
|
919
|
-
Once a message is received, this function sends a reply to the sender or stop.
|
|
920
|
-
The reply can be generated automatically or entered manually by a human.
|
|
921
|
-
|
|
922
|
-
Args:
|
|
923
|
-
message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
|
|
924
|
-
1. "content": content of the message, can be None.
|
|
925
|
-
2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
|
|
926
|
-
3. "tool_calls": a list of dictionaries containing the function name and arguments.
|
|
927
|
-
4. "role": role of the message, can be "assistant", "user", "function".
|
|
928
|
-
This field is only needed to distinguish between "function" or "assistant"/"user".
|
|
929
|
-
5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
|
|
930
|
-
6. "context" (dict): the context of the message, which will be passed to
|
|
931
|
-
[OpenAIWrapper.create](../oai/client#create).
|
|
932
|
-
sender: sender of an Agent instance.
|
|
933
|
-
request_reply (bool or None): whether a reply is requested from the sender.
|
|
934
|
-
If None, the value is determined by `self.reply_at_receive[sender]`.
|
|
935
|
-
silent (bool or None): (Experimental) whether to print the message received.
|
|
936
|
-
|
|
937
|
-
Raises:
|
|
938
|
-
ValueError: if the message can't be converted into a valid ChatCompletion message.
|
|
939
|
-
"""
|
|
940
|
-
self._process_received_message(message, sender, silent)
|
|
941
|
-
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
|
|
942
|
-
return
|
|
943
|
-
reply = await self.a_generate_reply(sender=sender)
|
|
944
|
-
if reply is not None:
|
|
945
|
-
await self.a_send(reply, sender, silent=silent)
|
|
946
|
-
|
|
947
|
-
def _prepare_chat(
|
|
948
|
-
self,
|
|
949
|
-
recipient: "ConversableAgent",
|
|
950
|
-
clear_history: bool,
|
|
951
|
-
prepare_recipient: bool = True,
|
|
952
|
-
reply_at_receive: bool = True,
|
|
953
|
-
) -> None:
|
|
954
|
-
self.reset_consecutive_auto_reply_counter(recipient)
|
|
955
|
-
self.reply_at_receive[recipient] = reply_at_receive
|
|
956
|
-
if clear_history:
|
|
957
|
-
self.clear_history(recipient)
|
|
958
|
-
self._human_input = []
|
|
959
|
-
if prepare_recipient:
|
|
960
|
-
recipient._prepare_chat(self, clear_history, False, reply_at_receive)
|
|
961
|
-
|
|
962
|
-
def _raise_exception_on_async_reply_functions(self) -> None:
|
|
963
|
-
"""Raise an exception if any async reply functions are registered.
|
|
964
|
-
|
|
965
|
-
Raises:
|
|
966
|
-
RuntimeError: if any async reply functions are registered.
|
|
967
|
-
"""
|
|
968
|
-
reply_functions = {
|
|
969
|
-
f["reply_func"] for f in self._reply_func_list if not f.get("ignore_async_in_sync_chat", False)
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
async_reply_functions = [f for f in reply_functions if inspect.iscoroutinefunction(f)]
|
|
973
|
-
if async_reply_functions:
|
|
974
|
-
msg = (
|
|
975
|
-
"Async reply functions can only be used with ConversableAgent.a_initiate_chat(). The following async reply functions are found: "
|
|
976
|
-
+ ", ".join([f.__name__ for f in async_reply_functions])
|
|
977
|
-
)
|
|
978
|
-
|
|
979
|
-
raise RuntimeError(msg)
|
|
980
|
-
|
|
981
|
-
def initiate_chat(
|
|
982
|
-
self,
|
|
983
|
-
recipient: "ConversableAgent",
|
|
984
|
-
clear_history: bool = True,
|
|
985
|
-
silent: Optional[bool] = False,
|
|
986
|
-
cache: Optional[AbstractCache] = None,
|
|
987
|
-
max_turns: Optional[int] = None,
|
|
988
|
-
summary_method: Optional[Union[str, Callable]] = DEFAULT_SUMMARY_METHOD,
|
|
989
|
-
summary_args: Optional[dict] = {},
|
|
990
|
-
message: Optional[Union[Dict, str, Callable]] = None,
|
|
991
|
-
**kwargs,
|
|
992
|
-
) -> ChatResult:
|
|
993
|
-
"""Initiate a chat with the recipient agent.
|
|
994
|
-
|
|
995
|
-
Reset the consecutive auto reply counter.
|
|
996
|
-
If `clear_history` is True, the chat history with the recipient agent will be cleared.
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
Args:
|
|
1000
|
-
recipient: the recipient agent.
|
|
1001
|
-
clear_history (bool): whether to clear the chat history with the agent. Default is True.
|
|
1002
|
-
silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
|
|
1003
|
-
cache (AbstractCache or None): the cache client to be used for this conversation. Default is None.
|
|
1004
|
-
max_turns (int or None): the maximum number of turns for the chat between the two agents. One turn means one conversation round trip. Note that this is different from
|
|
1005
|
-
[max_consecutive_auto_reply](#max_consecutive_auto_reply) which is the maximum number of consecutive auto replies; and it is also different from [max_rounds in GroupChat](./groupchat#groupchat-objects) which is the maximum number of rounds in a group chat session.
|
|
1006
|
-
If max_turns is set to None, the chat will continue until a termination condition is met. Default is None.
|
|
1007
|
-
summary_method (str or callable): a method to get a summary from the chat. Default is DEFAULT_SUMMARY_METHOD, i.e., "last_msg".
|
|
1008
|
-
|
|
1009
|
-
Supported strings are "last_msg" and "reflection_with_llm":
|
|
1010
|
-
- when set to "last_msg", it returns the last message of the dialog as the summary.
|
|
1011
|
-
- when set to "reflection_with_llm", it returns a summary extracted using an llm client.
|
|
1012
|
-
`llm_config` must be set in either the recipient or sender.
|
|
1013
|
-
|
|
1014
|
-
A callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g.,
|
|
1015
|
-
|
|
1016
|
-
```python
|
|
1017
|
-
def my_summary_method(
|
|
1018
|
-
sender: ConversableAgent,
|
|
1019
|
-
recipient: ConversableAgent,
|
|
1020
|
-
summary_args: dict,
|
|
1021
|
-
):
|
|
1022
|
-
return recipient.last_message(sender)["content"]
|
|
1023
|
-
```
|
|
1024
|
-
summary_args (dict): a dictionary of arguments to be passed to the summary_method.
|
|
1025
|
-
One example key is "summary_prompt", and value is a string of text used to prompt a LLM-based agent (the sender or receiver agent) to reflect
|
|
1026
|
-
on the conversation and extract a summary when summary_method is "reflection_with_llm".
|
|
1027
|
-
The default summary_prompt is DEFAULT_SUMMARY_PROMPT, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out."
|
|
1028
|
-
Another available key is "summary_role", which is the role of the message sent to the agent in charge of summarizing. Default is "system".
|
|
1029
|
-
message (str, dict or Callable): the initial message to be sent to the recipient. Needs to be provided. Otherwise, input() will be called to get the initial message.
|
|
1030
|
-
- If a string or a dict is provided, it will be used as the initial message. `generate_init_message` is called to generate the initial message for the agent based on this string and the context.
|
|
1031
|
-
If dict, it may contain the following reserved fields (either content or tool_calls need to be provided).
|
|
1032
|
-
|
|
1033
|
-
1. "content": content of the message, can be None.
|
|
1034
|
-
2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
|
|
1035
|
-
3. "tool_calls": a list of dictionaries containing the function name and arguments.
|
|
1036
|
-
4. "role": role of the message, can be "assistant", "user", "function".
|
|
1037
|
-
This field is only needed to distinguish between "function" or "assistant"/"user".
|
|
1038
|
-
5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
|
|
1039
|
-
6. "context" (dict): the context of the message, which will be passed to
|
|
1040
|
-
[OpenAIWrapper.create](../oai/client#create).
|
|
1041
|
-
|
|
1042
|
-
- If a callable is provided, it will be called to get the initial message in the form of a string or a dict.
|
|
1043
|
-
If the returned type is dict, it may contain the reserved fields mentioned above.
|
|
1044
|
-
|
|
1045
|
-
Example of a callable message (returning a string):
|
|
1046
|
-
|
|
1047
|
-
```python
|
|
1048
|
-
def my_message(sender: ConversableAgent, recipient: ConversableAgent, context: dict) -> Union[str, Dict]:
|
|
1049
|
-
carryover = context.get("carryover", "")
|
|
1050
|
-
if isinstance(message, list):
|
|
1051
|
-
carryover = carryover[-1]
|
|
1052
|
-
final_msg = "Write a blogpost." + "\\nContext: \\n" + carryover
|
|
1053
|
-
return final_msg
|
|
1054
|
-
```
|
|
1055
|
-
|
|
1056
|
-
Example of a callable message (returning a dict):
|
|
1057
|
-
|
|
1058
|
-
```python
|
|
1059
|
-
def my_message(sender: ConversableAgent, recipient: ConversableAgent, context: dict) -> Union[str, Dict]:
|
|
1060
|
-
final_msg = {}
|
|
1061
|
-
carryover = context.get("carryover", "")
|
|
1062
|
-
if isinstance(message, list):
|
|
1063
|
-
carryover = carryover[-1]
|
|
1064
|
-
final_msg["content"] = "Write a blogpost." + "\\nContext: \\n" + carryover
|
|
1065
|
-
final_msg["context"] = {"prefix": "Today I feel"}
|
|
1066
|
-
return final_msg
|
|
1067
|
-
```
|
|
1068
|
-
**kwargs: any additional information. It has the following reserved fields:
|
|
1069
|
-
- "carryover": a string or a list of string to specify the carryover information to be passed to this chat.
|
|
1070
|
-
If provided, we will combine this carryover (by attaching a "context: " string and the carryover content after the message content) with the "message" content when generating the initial chat
|
|
1071
|
-
message in `generate_init_message`.
|
|
1072
|
-
- "verbose": a boolean to specify whether to print the message and carryover in a chat. Default is False.
|
|
1073
|
-
|
|
1074
|
-
Raises:
|
|
1075
|
-
RuntimeError: if any async reply functions are registered and not ignored in sync chat.
|
|
1076
|
-
|
|
1077
|
-
Returns:
|
|
1078
|
-
ChatResult: an ChatResult object.
|
|
1079
|
-
"""
|
|
1080
|
-
_chat_info = locals().copy()
|
|
1081
|
-
_chat_info["sender"] = self
|
|
1082
|
-
consolidate_chat_info(_chat_info, uniform_sender=self)
|
|
1083
|
-
for agent in [self, recipient]:
|
|
1084
|
-
agent._raise_exception_on_async_reply_functions()
|
|
1085
|
-
agent.previous_cache = agent.client_cache
|
|
1086
|
-
agent.client_cache = cache
|
|
1087
|
-
if isinstance(max_turns, int):
|
|
1088
|
-
self._prepare_chat(recipient, clear_history, reply_at_receive=False)
|
|
1089
|
-
for _ in range(max_turns):
|
|
1090
|
-
if _ == 0:
|
|
1091
|
-
if isinstance(message, Callable):
|
|
1092
|
-
msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
|
|
1093
|
-
else:
|
|
1094
|
-
msg2send = self.generate_init_message(message, **kwargs)
|
|
1095
|
-
else:
|
|
1096
|
-
msg2send = self.generate_reply(messages=self.chat_messages[recipient], sender=recipient)
|
|
1097
|
-
if msg2send is None:
|
|
1098
|
-
break
|
|
1099
|
-
self.send(msg2send, recipient, request_reply=True, silent=silent)
|
|
1100
|
-
else:
|
|
1101
|
-
self._prepare_chat(recipient, clear_history)
|
|
1102
|
-
if isinstance(message, Callable):
|
|
1103
|
-
msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
|
|
1104
|
-
else:
|
|
1105
|
-
msg2send = self.generate_init_message(message, **kwargs)
|
|
1106
|
-
self.send(msg2send, recipient, silent=silent)
|
|
1107
|
-
summary = self._summarize_chat(
|
|
1108
|
-
summary_method,
|
|
1109
|
-
summary_args,
|
|
1110
|
-
recipient,
|
|
1111
|
-
cache=cache,
|
|
1112
|
-
)
|
|
1113
|
-
for agent in [self, recipient]:
|
|
1114
|
-
agent.client_cache = agent.previous_cache
|
|
1115
|
-
agent.previous_cache = None
|
|
1116
|
-
chat_result = ChatResult(
|
|
1117
|
-
chat_history=self.chat_messages[recipient],
|
|
1118
|
-
summary=summary,
|
|
1119
|
-
cost=gather_usage_summary([self, recipient]),
|
|
1120
|
-
human_input=self._human_input,
|
|
1121
|
-
)
|
|
1122
|
-
return chat_result
|
|
1123
|
-
|
|
1124
|
-
async def a_initiate_chat(
|
|
1125
|
-
self,
|
|
1126
|
-
recipient: "ConversableAgent",
|
|
1127
|
-
clear_history: bool = True,
|
|
1128
|
-
silent: Optional[bool] = False,
|
|
1129
|
-
cache: Optional[AbstractCache] = None,
|
|
1130
|
-
max_turns: Optional[int] = None,
|
|
1131
|
-
summary_method: Optional[Union[str, Callable]] = DEFAULT_SUMMARY_METHOD,
|
|
1132
|
-
summary_args: Optional[dict] = {},
|
|
1133
|
-
message: Optional[Union[str, Callable]] = None,
|
|
1134
|
-
**kwargs,
|
|
1135
|
-
) -> ChatResult:
|
|
1136
|
-
"""(async) Initiate a chat with the recipient agent.
|
|
1137
|
-
|
|
1138
|
-
Reset the consecutive auto reply counter.
|
|
1139
|
-
If `clear_history` is True, the chat history with the recipient agent will be cleared.
|
|
1140
|
-
`a_generate_init_message` is called to generate the initial message for the agent.
|
|
1141
|
-
|
|
1142
|
-
Args: Please refer to `initiate_chat`.
|
|
1143
|
-
|
|
1144
|
-
Returns:
|
|
1145
|
-
ChatResult: an ChatResult object.
|
|
1146
|
-
"""
|
|
1147
|
-
_chat_info = locals().copy()
|
|
1148
|
-
_chat_info["sender"] = self
|
|
1149
|
-
consolidate_chat_info(_chat_info, uniform_sender=self)
|
|
1150
|
-
for agent in [self, recipient]:
|
|
1151
|
-
agent.previous_cache = agent.client_cache
|
|
1152
|
-
agent.client_cache = cache
|
|
1153
|
-
if isinstance(max_turns, int):
|
|
1154
|
-
self._prepare_chat(recipient, clear_history, reply_at_receive=False)
|
|
1155
|
-
for _ in range(max_turns):
|
|
1156
|
-
if _ == 0:
|
|
1157
|
-
if isinstance(message, Callable):
|
|
1158
|
-
msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
|
|
1159
|
-
else:
|
|
1160
|
-
msg2send = await self.a_generate_init_message(message, **kwargs)
|
|
1161
|
-
else:
|
|
1162
|
-
msg2send = await self.a_generate_reply(messages=self.chat_messages[recipient], sender=recipient)
|
|
1163
|
-
if msg2send is None:
|
|
1164
|
-
break
|
|
1165
|
-
await self.a_send(msg2send, recipient, request_reply=True, silent=silent)
|
|
1166
|
-
else:
|
|
1167
|
-
self._prepare_chat(recipient, clear_history)
|
|
1168
|
-
if isinstance(message, Callable):
|
|
1169
|
-
msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
|
|
1170
|
-
else:
|
|
1171
|
-
msg2send = await self.a_generate_init_message(message, **kwargs)
|
|
1172
|
-
await self.a_send(msg2send, recipient, silent=silent)
|
|
1173
|
-
summary = self._summarize_chat(
|
|
1174
|
-
summary_method,
|
|
1175
|
-
summary_args,
|
|
1176
|
-
recipient,
|
|
1177
|
-
cache=cache,
|
|
1178
|
-
)
|
|
1179
|
-
for agent in [self, recipient]:
|
|
1180
|
-
agent.client_cache = agent.previous_cache
|
|
1181
|
-
agent.previous_cache = None
|
|
1182
|
-
chat_result = ChatResult(
|
|
1183
|
-
chat_history=self.chat_messages[recipient],
|
|
1184
|
-
summary=summary,
|
|
1185
|
-
cost=gather_usage_summary([self, recipient]),
|
|
1186
|
-
human_input=self._human_input,
|
|
1187
|
-
)
|
|
1188
|
-
return chat_result
|
|
1189
|
-
|
|
1190
|
-
def _summarize_chat(
|
|
1191
|
-
self,
|
|
1192
|
-
summary_method,
|
|
1193
|
-
summary_args,
|
|
1194
|
-
recipient: Optional[Agent] = None,
|
|
1195
|
-
cache: Optional[AbstractCache] = None,
|
|
1196
|
-
) -> str:
|
|
1197
|
-
"""Get a chat summary from an agent participating in a chat.
|
|
1198
|
-
|
|
1199
|
-
Args:
|
|
1200
|
-
summary_method (str or callable): the summary_method to get the summary.
|
|
1201
|
-
The callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g,
|
|
1202
|
-
```python
|
|
1203
|
-
def my_summary_method(
|
|
1204
|
-
sender: ConversableAgent,
|
|
1205
|
-
recipient: ConversableAgent,
|
|
1206
|
-
summary_args: dict,
|
|
1207
|
-
):
|
|
1208
|
-
return recipient.last_message(sender)["content"]
|
|
1209
|
-
```
|
|
1210
|
-
summary_args (dict): a dictionary of arguments to be passed to the summary_method.
|
|
1211
|
-
recipient: the recipient agent in a chat.
|
|
1212
|
-
prompt (str): the prompt used to get a summary when summary_method is "reflection_with_llm".
|
|
1213
|
-
|
|
1214
|
-
Returns:
|
|
1215
|
-
str: a chat summary from the agent.
|
|
1216
|
-
"""
|
|
1217
|
-
summary = ""
|
|
1218
|
-
if summary_method is None:
|
|
1219
|
-
return summary
|
|
1220
|
-
if "cache" not in summary_args:
|
|
1221
|
-
summary_args["cache"] = cache
|
|
1222
|
-
if summary_method == "reflection_with_llm":
|
|
1223
|
-
summary_method = self._reflection_with_llm_as_summary
|
|
1224
|
-
elif summary_method == "last_msg":
|
|
1225
|
-
summary_method = self._last_msg_as_summary
|
|
1226
|
-
|
|
1227
|
-
if isinstance(summary_method, Callable):
|
|
1228
|
-
summary = summary_method(self, recipient, summary_args)
|
|
1229
|
-
else:
|
|
1230
|
-
raise ValueError(
|
|
1231
|
-
"If not None, the summary_method must be a string from [`reflection_with_llm`, `last_msg`] or a callable."
|
|
1232
|
-
)
|
|
1233
|
-
return summary
|
|
1234
|
-
|
|
1235
|
-
@staticmethod
|
|
1236
|
-
def _last_msg_as_summary(sender, recipient, summary_args) -> str:
|
|
1237
|
-
"""Get a chat summary from the last message of the recipient."""
|
|
1238
|
-
summary = ""
|
|
1239
|
-
try:
|
|
1240
|
-
content = recipient.last_message(sender)["content"]
|
|
1241
|
-
if isinstance(content, str):
|
|
1242
|
-
summary = content.replace("TERMINATE", "")
|
|
1243
|
-
elif isinstance(content, list):
|
|
1244
|
-
# Remove the `TERMINATE` word in the content list.
|
|
1245
|
-
summary = "\n".join(
|
|
1246
|
-
x["text"].replace("TERMINATE", "") for x in content if isinstance(x, dict) and "text" in x
|
|
1247
|
-
)
|
|
1248
|
-
except (IndexError, AttributeError) as e:
|
|
1249
|
-
warnings.warn(f"Cannot extract summary using last_msg: {e}. Using an empty str as summary.", UserWarning)
|
|
1250
|
-
return summary
|
|
1251
|
-
|
|
1252
|
-
@staticmethod
|
|
1253
|
-
def _reflection_with_llm_as_summary(sender, recipient, summary_args):
|
|
1254
|
-
prompt = summary_args.get("summary_prompt")
|
|
1255
|
-
prompt = ConversableAgent.DEFAULT_SUMMARY_PROMPT if prompt is None else prompt
|
|
1256
|
-
if not isinstance(prompt, str):
|
|
1257
|
-
raise ValueError("The summary_prompt must be a string.")
|
|
1258
|
-
msg_list = recipient.chat_messages_for_summary(sender)
|
|
1259
|
-
agent = sender if recipient is None else recipient
|
|
1260
|
-
role = summary_args.get("summary_role", None)
|
|
1261
|
-
if role and not isinstance(role, str):
|
|
1262
|
-
raise ValueError("The summary_role in summary_arg must be a string.")
|
|
1263
|
-
try:
|
|
1264
|
-
summary = sender._reflection_with_llm(
|
|
1265
|
-
prompt, msg_list, llm_agent=agent, cache=summary_args.get("cache"), role=role
|
|
1266
|
-
)
|
|
1267
|
-
except BadRequestError as e:
|
|
1268
|
-
warnings.warn(
|
|
1269
|
-
f"Cannot extract summary using reflection_with_llm: {e}. Using an empty str as summary.", UserWarning
|
|
1270
|
-
)
|
|
1271
|
-
summary = ""
|
|
1272
|
-
return summary
|
|
1273
|
-
|
|
1274
|
-
def _reflection_with_llm(
|
|
1275
|
-
self,
|
|
1276
|
-
prompt,
|
|
1277
|
-
messages,
|
|
1278
|
-
llm_agent: Optional[Agent] = None,
|
|
1279
|
-
cache: Optional[AbstractCache] = None,
|
|
1280
|
-
role: Union[str, None] = None,
|
|
1281
|
-
) -> str:
|
|
1282
|
-
"""Get a chat summary using reflection with an llm client based on the conversation history.
|
|
1283
|
-
|
|
1284
|
-
Args:
|
|
1285
|
-
prompt (str): The prompt (in this method it is used as system prompt) used to get the summary.
|
|
1286
|
-
messages (list): The messages generated as part of a chat conversation.
|
|
1287
|
-
llm_agent: the agent with an llm client.
|
|
1288
|
-
cache (AbstractCache or None): the cache client to be used for this conversation.
|
|
1289
|
-
role (str): the role of the message, usually "system" or "user". Default is "system".
|
|
1290
|
-
"""
|
|
1291
|
-
if not role:
|
|
1292
|
-
role = "system"
|
|
1293
|
-
|
|
1294
|
-
system_msg = [
|
|
1295
|
-
{
|
|
1296
|
-
"role": role,
|
|
1297
|
-
"content": prompt,
|
|
1298
|
-
}
|
|
1299
|
-
]
|
|
1300
|
-
|
|
1301
|
-
messages = messages + system_msg
|
|
1302
|
-
if llm_agent and llm_agent.client is not None:
|
|
1303
|
-
llm_client = llm_agent.client
|
|
1304
|
-
elif self.client is not None:
|
|
1305
|
-
llm_client = self.client
|
|
1306
|
-
else:
|
|
1307
|
-
raise ValueError("No OpenAIWrapper client is found.")
|
|
1308
|
-
response = self._generate_oai_reply_from_client(llm_client=llm_client, messages=messages, cache=cache)
|
|
1309
|
-
return response
|
|
1310
|
-
|
|
1311
|
-
def _check_chat_queue_for_sender(self, chat_queue: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1312
|
-
"""
|
|
1313
|
-
Check the chat queue and add the "sender" key if it's missing.
|
|
1314
|
-
|
|
1315
|
-
Args:
|
|
1316
|
-
chat_queue (List[Dict[str, Any]]): A list of dictionaries containing chat information.
|
|
1317
|
-
|
|
1318
|
-
Returns:
|
|
1319
|
-
List[Dict[str, Any]]: A new list of dictionaries with the "sender" key added if it was missing.
|
|
1320
|
-
"""
|
|
1321
|
-
chat_queue_with_sender = []
|
|
1322
|
-
for chat_info in chat_queue:
|
|
1323
|
-
if chat_info.get("sender") is None:
|
|
1324
|
-
chat_info["sender"] = self
|
|
1325
|
-
chat_queue_with_sender.append(chat_info)
|
|
1326
|
-
return chat_queue_with_sender
|
|
1327
|
-
|
|
1328
|
-
def initiate_chats(self, chat_queue: List[Dict[str, Any]]) -> List[ChatResult]:
|
|
1329
|
-
"""(Experimental) Initiate chats with multiple agents.
|
|
1330
|
-
|
|
1331
|
-
Args:
|
|
1332
|
-
chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
|
|
1333
|
-
Each dictionary should contain the input arguments for [`initiate_chat`](conversable_agent#initiate_chat)
|
|
1334
|
-
|
|
1335
|
-
Returns: a list of ChatResult objects corresponding to the finished chats in the chat_queue.
|
|
1336
|
-
"""
|
|
1337
|
-
_chat_queue = self._check_chat_queue_for_sender(chat_queue)
|
|
1338
|
-
self._finished_chats = initiate_chats(_chat_queue)
|
|
1339
|
-
return self._finished_chats
|
|
1340
|
-
|
|
1341
|
-
async def a_initiate_chats(self, chat_queue: List[Dict[str, Any]]) -> Dict[int, ChatResult]:
|
|
1342
|
-
_chat_queue = self._check_chat_queue_for_sender(chat_queue)
|
|
1343
|
-
self._finished_chats = await a_initiate_chats(_chat_queue)
|
|
1344
|
-
return self._finished_chats
|
|
1345
|
-
|
|
1346
|
-
def get_chat_results(self, chat_index: Optional[int] = None) -> Union[List[ChatResult], ChatResult]:
|
|
1347
|
-
"""A summary from the finished chats of particular agents."""
|
|
1348
|
-
if chat_index is not None:
|
|
1349
|
-
return self._finished_chats[chat_index]
|
|
1350
|
-
else:
|
|
1351
|
-
return self._finished_chats
|
|
1352
|
-
|
|
1353
|
-
def reset(self):
|
|
1354
|
-
"""Reset the agent."""
|
|
1355
|
-
self.clear_history()
|
|
1356
|
-
self.reset_consecutive_auto_reply_counter()
|
|
1357
|
-
self.stop_reply_at_receive()
|
|
1358
|
-
if self.client is not None:
|
|
1359
|
-
self.client.clear_usage_summary()
|
|
1360
|
-
for reply_func_tuple in self._reply_func_list:
|
|
1361
|
-
if reply_func_tuple["reset_config"] is not None:
|
|
1362
|
-
reply_func_tuple["reset_config"](reply_func_tuple["config"])
|
|
1363
|
-
else:
|
|
1364
|
-
reply_func_tuple["config"] = copy.copy(reply_func_tuple["init_config"])
|
|
1365
|
-
|
|
1366
|
-
def stop_reply_at_receive(self, sender: Optional[Agent] = None):
|
|
1367
|
-
"""Reset the reply_at_receive of the sender."""
|
|
1368
|
-
if sender is None:
|
|
1369
|
-
self.reply_at_receive.clear()
|
|
1370
|
-
else:
|
|
1371
|
-
self.reply_at_receive[sender] = False
|
|
1372
|
-
|
|
1373
|
-
def reset_consecutive_auto_reply_counter(self, sender: Optional[Agent] = None):
|
|
1374
|
-
"""Reset the consecutive_auto_reply_counter of the sender."""
|
|
1375
|
-
if sender is None:
|
|
1376
|
-
self._consecutive_auto_reply_counter.clear()
|
|
1377
|
-
else:
|
|
1378
|
-
self._consecutive_auto_reply_counter[sender] = 0
|
|
1379
|
-
|
|
1380
|
-
def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preserve: Optional[int] = None):
|
|
1381
|
-
"""Clear the chat history of the agent.
|
|
1382
|
-
|
|
1383
|
-
Args:
|
|
1384
|
-
recipient: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
|
|
1385
|
-
nr_messages_to_preserve: the number of newest messages to preserve in the chat history.
|
|
1386
|
-
"""
|
|
1387
|
-
iostream = IOStream.get_default()
|
|
1388
|
-
if recipient is None:
|
|
1389
|
-
if nr_messages_to_preserve:
|
|
1390
|
-
for key in self._oai_messages:
|
|
1391
|
-
nr_messages_to_preserve_internal = nr_messages_to_preserve
|
|
1392
|
-
# if breaking history between function call and function response, save function call message
|
|
1393
|
-
# additionally, otherwise openai will return error
|
|
1394
|
-
first_msg_to_save = self._oai_messages[key][-nr_messages_to_preserve_internal]
|
|
1395
|
-
if "tool_responses" in first_msg_to_save:
|
|
1396
|
-
nr_messages_to_preserve_internal += 1
|
|
1397
|
-
iostream.print(
|
|
1398
|
-
f"Preserving one more message for {self.name} to not divide history between tool call and "
|
|
1399
|
-
f"tool response."
|
|
1400
|
-
)
|
|
1401
|
-
# Remove messages from history except last `nr_messages_to_preserve` messages.
|
|
1402
|
-
self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:]
|
|
1403
|
-
else:
|
|
1404
|
-
self._oai_messages.clear()
|
|
1405
|
-
else:
|
|
1406
|
-
self._oai_messages[recipient].clear()
|
|
1407
|
-
if nr_messages_to_preserve:
|
|
1408
|
-
iostream.print(
|
|
1409
|
-
colored(
|
|
1410
|
-
"WARNING: `nr_preserved_messages` is ignored when clearing chat history with a specific agent.",
|
|
1411
|
-
"yellow",
|
|
1412
|
-
),
|
|
1413
|
-
flush=True,
|
|
1414
|
-
)
|
|
1415
|
-
|
|
1416
|
-
def generate_oai_reply(
|
|
1417
|
-
self,
|
|
1418
|
-
messages: Optional[List[Dict]] = None,
|
|
1419
|
-
sender: Optional[Agent] = None,
|
|
1420
|
-
config: Optional[OpenAIWrapper] = None,
|
|
1421
|
-
) -> Tuple[bool, Union[str, Dict, None]]:
|
|
1422
|
-
"""Generate a reply using autogen.oai."""
|
|
1423
|
-
client = self.client if config is None else config
|
|
1424
|
-
if client is None:
|
|
1425
|
-
return False, None
|
|
1426
|
-
if messages is None:
|
|
1427
|
-
messages = self._oai_messages[sender]
|
|
1428
|
-
extracted_response = self._generate_oai_reply_from_client(
|
|
1429
|
-
client, self._oai_system_message + messages, self.client_cache
|
|
1430
|
-
)
|
|
1431
|
-
return (False, None) if extracted_response is None else (True, extracted_response)
|
|
1432
|
-
|
|
1433
|
-
def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> Union[str, Dict, None]:
|
|
1434
|
-
# unroll tool_responses
|
|
1435
|
-
all_messages = []
|
|
1436
|
-
for message in messages:
|
|
1437
|
-
tool_responses = message.get("tool_responses", [])
|
|
1438
|
-
if tool_responses:
|
|
1439
|
-
all_messages += tool_responses
|
|
1440
|
-
# tool role on the parent message means the content is just concatenation of all of the tool_responses
|
|
1441
|
-
if message.get("role") != "tool":
|
|
1442
|
-
all_messages.append({key: message[key] for key in message if key != "tool_responses"})
|
|
1443
|
-
else:
|
|
1444
|
-
all_messages.append(message)
|
|
1445
|
-
|
|
1446
|
-
# TODO: #1143 handle token limit exceeded error
|
|
1447
|
-
response = llm_client.create(
|
|
1448
|
-
context=messages[-1].pop("context", None), messages=all_messages, cache=cache, agent=self
|
|
1449
|
-
)
|
|
1450
|
-
extracted_response = llm_client.extract_text_or_completion_object(response)[0]
|
|
1451
|
-
|
|
1452
|
-
if extracted_response is None:
|
|
1453
|
-
warnings.warn(f"Extracted_response from {response} is None.", UserWarning)
|
|
1454
|
-
return None
|
|
1455
|
-
# ensure function and tool calls will be accepted when sent back to the LLM
|
|
1456
|
-
if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"):
|
|
1457
|
-
extracted_response = model_dump(extracted_response)
|
|
1458
|
-
if isinstance(extracted_response, dict):
|
|
1459
|
-
if extracted_response.get("function_call"):
|
|
1460
|
-
extracted_response["function_call"]["name"] = self._normalize_name(
|
|
1461
|
-
extracted_response["function_call"]["name"]
|
|
1462
|
-
)
|
|
1463
|
-
for tool_call in extracted_response.get("tool_calls") or []:
|
|
1464
|
-
tool_call["function"]["name"] = self._normalize_name(tool_call["function"]["name"])
|
|
1465
|
-
# Remove id and type if they are not present.
|
|
1466
|
-
# This is to make the tool call object compatible with Mistral API.
|
|
1467
|
-
if tool_call.get("id") is None:
|
|
1468
|
-
tool_call.pop("id")
|
|
1469
|
-
if tool_call.get("type") is None:
|
|
1470
|
-
tool_call.pop("type")
|
|
1471
|
-
return extracted_response
|
|
1472
|
-
|
|
1473
|
-
async def a_generate_oai_reply(
|
|
1474
|
-
self,
|
|
1475
|
-
messages: Optional[List[Dict]] = None,
|
|
1476
|
-
sender: Optional[Agent] = None,
|
|
1477
|
-
config: Optional[Any] = None,
|
|
1478
|
-
) -> Tuple[bool, Union[str, Dict, None]]:
|
|
1479
|
-
"""Generate a reply using autogen.oai asynchronously."""
|
|
1480
|
-
iostream = IOStream.get_default()
|
|
1481
|
-
|
|
1482
|
-
def _generate_oai_reply(
|
|
1483
|
-
self, iostream: IOStream, *args: Any, **kwargs: Any
|
|
1484
|
-
) -> Tuple[bool, Union[str, Dict, None]]:
|
|
1485
|
-
with IOStream.set_default(iostream):
|
|
1486
|
-
return self.generate_oai_reply(*args, **kwargs)
|
|
1487
|
-
|
|
1488
|
-
return await asyncio.get_event_loop().run_in_executor(
|
|
1489
|
-
None,
|
|
1490
|
-
functools.partial(
|
|
1491
|
-
_generate_oai_reply, self=self, iostream=iostream, messages=messages, sender=sender, config=config
|
|
1492
|
-
),
|
|
1493
|
-
)
|
|
1494
|
-
|
|
1495
|
-
def _generate_code_execution_reply_using_executor(
|
|
1496
|
-
self,
|
|
1497
|
-
messages: Optional[List[Dict]] = None,
|
|
1498
|
-
sender: Optional[Agent] = None,
|
|
1499
|
-
config: Optional[Union[Dict, Literal[False]]] = None,
|
|
1500
|
-
):
|
|
1501
|
-
"""Generate a reply using code executor."""
|
|
1502
|
-
iostream = IOStream.get_default()
|
|
1503
|
-
|
|
1504
|
-
if config is not None:
|
|
1505
|
-
raise ValueError("config is not supported for _generate_code_execution_reply_using_executor.")
|
|
1506
|
-
if self._code_execution_config is False:
|
|
1507
|
-
return False, None
|
|
1508
|
-
if messages is None:
|
|
1509
|
-
messages = self._oai_messages[sender]
|
|
1510
|
-
last_n_messages = self._code_execution_config.get("last_n_messages", "auto")
|
|
1511
|
-
|
|
1512
|
-
if not (isinstance(last_n_messages, (int, float)) and last_n_messages >= 0) and last_n_messages != "auto":
|
|
1513
|
-
raise ValueError("last_n_messages must be either a non-negative integer, or the string 'auto'.")
|
|
1514
|
-
|
|
1515
|
-
num_messages_to_scan = last_n_messages
|
|
1516
|
-
if last_n_messages == "auto":
|
|
1517
|
-
# Find when the agent last spoke
|
|
1518
|
-
num_messages_to_scan = 0
|
|
1519
|
-
for message in reversed(messages):
|
|
1520
|
-
if "role" not in message:
|
|
1521
|
-
break
|
|
1522
|
-
elif message["role"] != "user":
|
|
1523
|
-
break
|
|
1524
|
-
else:
|
|
1525
|
-
num_messages_to_scan += 1
|
|
1526
|
-
num_messages_to_scan = min(len(messages), num_messages_to_scan)
|
|
1527
|
-
messages_to_scan = messages[-num_messages_to_scan:]
|
|
1528
|
-
|
|
1529
|
-
# iterate through the last n messages in reverse
|
|
1530
|
-
# if code blocks are found, execute the code blocks and return the output
|
|
1531
|
-
# if no code blocks are found, continue
|
|
1532
|
-
for message in reversed(messages_to_scan):
|
|
1533
|
-
if not message["content"]:
|
|
1534
|
-
continue
|
|
1535
|
-
code_blocks = self._code_executor.code_extractor.extract_code_blocks(message["content"])
|
|
1536
|
-
if len(code_blocks) == 0:
|
|
1537
|
-
continue
|
|
1538
|
-
|
|
1539
|
-
num_code_blocks = len(code_blocks)
|
|
1540
|
-
if num_code_blocks == 1:
|
|
1541
|
-
iostream.print(
|
|
1542
|
-
colored(
|
|
1543
|
-
f"\n>>>>>>>> EXECUTING CODE BLOCK (inferred language is {code_blocks[0].language})...",
|
|
1544
|
-
"red",
|
|
1545
|
-
),
|
|
1546
|
-
flush=True,
|
|
1547
|
-
)
|
|
1548
|
-
else:
|
|
1549
|
-
iostream.print(
|
|
1550
|
-
colored(
|
|
1551
|
-
f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join([x.language for x in code_blocks])}])...",
|
|
1552
|
-
"red",
|
|
1553
|
-
),
|
|
1554
|
-
flush=True,
|
|
1555
|
-
)
|
|
1556
|
-
|
|
1557
|
-
# found code blocks, execute code.
|
|
1558
|
-
code_result = self._code_executor.execute_code_blocks(code_blocks)
|
|
1559
|
-
exitcode2str = "execution succeeded" if code_result.exit_code == 0 else "execution failed"
|
|
1560
|
-
return True, f"exitcode: {code_result.exit_code} ({exitcode2str})\nCode output: {code_result.output}"
|
|
1561
|
-
|
|
1562
|
-
return False, None
|
|
1563
|
-
|
|
1564
|
-
def generate_code_execution_reply(
|
|
1565
|
-
self,
|
|
1566
|
-
messages: Optional[List[Dict]] = None,
|
|
1567
|
-
sender: Optional[Agent] = None,
|
|
1568
|
-
config: Optional[Union[Dict, Literal[False]]] = None,
|
|
1569
|
-
):
|
|
1570
|
-
"""Generate a reply using code execution."""
|
|
1571
|
-
code_execution_config = config if config is not None else self._code_execution_config
|
|
1572
|
-
if code_execution_config is False:
|
|
1573
|
-
return False, None
|
|
1574
|
-
if messages is None:
|
|
1575
|
-
messages = self._oai_messages[sender]
|
|
1576
|
-
last_n_messages = code_execution_config.pop("last_n_messages", "auto")
|
|
1577
|
-
|
|
1578
|
-
if not (isinstance(last_n_messages, (int, float)) and last_n_messages >= 0) and last_n_messages != "auto":
|
|
1579
|
-
raise ValueError("last_n_messages must be either a non-negative integer, or the string 'auto'.")
|
|
1580
|
-
|
|
1581
|
-
messages_to_scan = last_n_messages
|
|
1582
|
-
if last_n_messages == "auto":
|
|
1583
|
-
# Find when the agent last spoke
|
|
1584
|
-
messages_to_scan = 0
|
|
1585
|
-
for i in range(len(messages)):
|
|
1586
|
-
message = messages[-(i + 1)]
|
|
1587
|
-
if "role" not in message:
|
|
1588
|
-
break
|
|
1589
|
-
elif message["role"] != "user":
|
|
1590
|
-
break
|
|
1591
|
-
else:
|
|
1592
|
-
messages_to_scan += 1
|
|
1593
|
-
|
|
1594
|
-
# iterate through the last n messages in reverse
|
|
1595
|
-
# if code blocks are found, execute the code blocks and return the output
|
|
1596
|
-
# if no code blocks are found, continue
|
|
1597
|
-
for i in range(min(len(messages), messages_to_scan)):
|
|
1598
|
-
message = messages[-(i + 1)]
|
|
1599
|
-
if not message["content"]:
|
|
1600
|
-
continue
|
|
1601
|
-
code_blocks = extract_code(message["content"])
|
|
1602
|
-
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
|
|
1603
|
-
continue
|
|
1604
|
-
|
|
1605
|
-
# found code blocks, execute code and push "last_n_messages" back
|
|
1606
|
-
exitcode, logs = self.execute_code_blocks(code_blocks)
|
|
1607
|
-
code_execution_config["last_n_messages"] = last_n_messages
|
|
1608
|
-
exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
|
|
1609
|
-
return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
|
|
1610
|
-
|
|
1611
|
-
# no code blocks are found, push last_n_messages back and return.
|
|
1612
|
-
code_execution_config["last_n_messages"] = last_n_messages
|
|
1613
|
-
|
|
1614
|
-
return False, None
|
|
1615
|
-
|
|
1616
|
-
def generate_function_call_reply(
|
|
1617
|
-
self,
|
|
1618
|
-
messages: Optional[List[Dict]] = None,
|
|
1619
|
-
sender: Optional[Agent] = None,
|
|
1620
|
-
config: Optional[Any] = None,
|
|
1621
|
-
) -> Tuple[bool, Union[Dict, None]]:
|
|
1622
|
-
"""
|
|
1623
|
-
Generate a reply using function call.
|
|
1624
|
-
|
|
1625
|
-
"function_call" replaced by "tool_calls" as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
|
|
1626
|
-
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions
|
|
1627
|
-
"""
|
|
1628
|
-
if config is None:
|
|
1629
|
-
config = self
|
|
1630
|
-
if messages is None:
|
|
1631
|
-
messages = self._oai_messages[sender]
|
|
1632
|
-
message = messages[-1]
|
|
1633
|
-
if "function_call" in message and message["function_call"]:
|
|
1634
|
-
func_call = message["function_call"]
|
|
1635
|
-
func = self._function_map.get(func_call.get("name", None), None)
|
|
1636
|
-
if inspect.iscoroutinefunction(func):
|
|
1637
|
-
try:
|
|
1638
|
-
# get the running loop if it was already created
|
|
1639
|
-
loop = asyncio.get_running_loop()
|
|
1640
|
-
close_loop = False
|
|
1641
|
-
except RuntimeError:
|
|
1642
|
-
# create a loop if there is no running loop
|
|
1643
|
-
loop = asyncio.new_event_loop()
|
|
1644
|
-
close_loop = True
|
|
1645
|
-
|
|
1646
|
-
_, func_return = loop.run_until_complete(self.a_execute_function(func_call))
|
|
1647
|
-
if close_loop:
|
|
1648
|
-
loop.close()
|
|
1649
|
-
else:
|
|
1650
|
-
_, func_return = self.execute_function(message["function_call"])
|
|
1651
|
-
return True, func_return
|
|
1652
|
-
return False, None
|
|
1653
|
-
|
|
1654
|
-
async def a_generate_function_call_reply(
|
|
1655
|
-
self,
|
|
1656
|
-
messages: Optional[List[Dict]] = None,
|
|
1657
|
-
sender: Optional[Agent] = None,
|
|
1658
|
-
config: Optional[Any] = None,
|
|
1659
|
-
) -> Tuple[bool, Union[Dict, None]]:
|
|
1660
|
-
"""
|
|
1661
|
-
Generate a reply using async function call.
|
|
1662
|
-
|
|
1663
|
-
"function_call" replaced by "tool_calls" as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
|
|
1664
|
-
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions
|
|
1665
|
-
"""
|
|
1666
|
-
if config is None:
|
|
1667
|
-
config = self
|
|
1668
|
-
if messages is None:
|
|
1669
|
-
messages = self._oai_messages[sender]
|
|
1670
|
-
message = messages[-1]
|
|
1671
|
-
if "function_call" in message:
|
|
1672
|
-
func_call = message["function_call"]
|
|
1673
|
-
func_name = func_call.get("name", "")
|
|
1674
|
-
func = self._function_map.get(func_name, None)
|
|
1675
|
-
if func and inspect.iscoroutinefunction(func):
|
|
1676
|
-
_, func_return = await self.a_execute_function(func_call)
|
|
1677
|
-
else:
|
|
1678
|
-
_, func_return = self.execute_function(func_call)
|
|
1679
|
-
return True, func_return
|
|
1680
|
-
|
|
1681
|
-
return False, None
|
|
1682
|
-
|
|
1683
|
-
def _str_for_tool_response(self, tool_response):
|
|
1684
|
-
return str(tool_response.get("content", ""))
|
|
1685
|
-
|
|
1686
|
-
def generate_tool_calls_reply(
|
|
1687
|
-
self,
|
|
1688
|
-
messages: Optional[List[Dict]] = None,
|
|
1689
|
-
sender: Optional[Agent] = None,
|
|
1690
|
-
config: Optional[Any] = None,
|
|
1691
|
-
) -> Tuple[bool, Union[Dict, None]]:
|
|
1692
|
-
"""Generate a reply using tool call."""
|
|
1693
|
-
if config is None:
|
|
1694
|
-
config = self
|
|
1695
|
-
if messages is None:
|
|
1696
|
-
messages = self._oai_messages[sender]
|
|
1697
|
-
message = messages[-1]
|
|
1698
|
-
tool_returns = []
|
|
1699
|
-
for tool_call in message.get("tool_calls", []):
|
|
1700
|
-
function_call = tool_call.get("function", {})
|
|
1701
|
-
func = self._function_map.get(function_call.get("name", None), None)
|
|
1702
|
-
if inspect.iscoroutinefunction(func):
|
|
1703
|
-
try:
|
|
1704
|
-
# get the running loop if it was already created
|
|
1705
|
-
loop = asyncio.get_running_loop()
|
|
1706
|
-
close_loop = False
|
|
1707
|
-
except RuntimeError:
|
|
1708
|
-
# create a loop if there is no running loop
|
|
1709
|
-
loop = asyncio.new_event_loop()
|
|
1710
|
-
close_loop = True
|
|
1711
|
-
|
|
1712
|
-
_, func_return = loop.run_until_complete(self.a_execute_function(function_call))
|
|
1713
|
-
if close_loop:
|
|
1714
|
-
loop.close()
|
|
1715
|
-
else:
|
|
1716
|
-
_, func_return = self.execute_function(function_call)
|
|
1717
|
-
content = func_return.get("content", "")
|
|
1718
|
-
if content is None:
|
|
1719
|
-
content = ""
|
|
1720
|
-
tool_call_id = tool_call.get("id", None)
|
|
1721
|
-
if tool_call_id is not None:
|
|
1722
|
-
tool_call_response = {
|
|
1723
|
-
"tool_call_id": tool_call_id,
|
|
1724
|
-
"role": "tool",
|
|
1725
|
-
"content": content,
|
|
1726
|
-
}
|
|
1727
|
-
else:
|
|
1728
|
-
# Do not include tool_call_id if it is not present.
|
|
1729
|
-
# This is to make the tool call object compatible with Mistral API.
|
|
1730
|
-
tool_call_response = {
|
|
1731
|
-
"role": "tool",
|
|
1732
|
-
"content": content,
|
|
1733
|
-
}
|
|
1734
|
-
tool_returns.append(tool_call_response)
|
|
1735
|
-
if tool_returns:
|
|
1736
|
-
return True, {
|
|
1737
|
-
"role": "tool",
|
|
1738
|
-
"tool_responses": tool_returns,
|
|
1739
|
-
"content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
|
|
1740
|
-
}
|
|
1741
|
-
return False, None
|
|
1742
|
-
|
|
1743
|
-
async def _a_execute_tool_call(self, tool_call):
|
|
1744
|
-
id = tool_call["id"]
|
|
1745
|
-
function_call = tool_call.get("function", {})
|
|
1746
|
-
_, func_return = await self.a_execute_function(function_call)
|
|
1747
|
-
return {
|
|
1748
|
-
"tool_call_id": id,
|
|
1749
|
-
"role": "tool",
|
|
1750
|
-
"content": func_return.get("content", ""),
|
|
1751
|
-
}
|
|
1752
|
-
|
|
1753
|
-
async def a_generate_tool_calls_reply(
|
|
1754
|
-
self,
|
|
1755
|
-
messages: Optional[List[Dict]] = None,
|
|
1756
|
-
sender: Optional[Agent] = None,
|
|
1757
|
-
config: Optional[Any] = None,
|
|
1758
|
-
) -> Tuple[bool, Union[Dict, None]]:
|
|
1759
|
-
"""Generate a reply using async function call."""
|
|
1760
|
-
if config is None:
|
|
1761
|
-
config = self
|
|
1762
|
-
if messages is None:
|
|
1763
|
-
messages = self._oai_messages[sender]
|
|
1764
|
-
message = messages[-1]
|
|
1765
|
-
async_tool_calls = []
|
|
1766
|
-
for tool_call in message.get("tool_calls", []):
|
|
1767
|
-
async_tool_calls.append(self._a_execute_tool_call(tool_call))
|
|
1768
|
-
if async_tool_calls:
|
|
1769
|
-
tool_returns = await asyncio.gather(*async_tool_calls)
|
|
1770
|
-
return True, {
|
|
1771
|
-
"role": "tool",
|
|
1772
|
-
"tool_responses": tool_returns,
|
|
1773
|
-
"content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
|
|
1774
|
-
}
|
|
1775
|
-
|
|
1776
|
-
return False, None
|
|
1777
|
-
|
|
1778
|
-
def check_termination_and_human_reply(
|
|
1779
|
-
self,
|
|
1780
|
-
messages: Optional[List[Dict]] = None,
|
|
1781
|
-
sender: Optional[Agent] = None,
|
|
1782
|
-
config: Optional[Any] = None,
|
|
1783
|
-
) -> Tuple[bool, Union[str, None]]:
|
|
1784
|
-
"""Check if the conversation should be terminated, and if human reply is provided.
|
|
1785
|
-
|
|
1786
|
-
This method checks for conditions that require the conversation to be terminated, such as reaching
|
|
1787
|
-
a maximum number of consecutive auto-replies or encountering a termination message. Additionally,
|
|
1788
|
-
it prompts for and processes human input based on the configured human input mode, which can be
|
|
1789
|
-
'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter
|
|
1790
|
-
for the conversation and prints relevant messages based on the human input received.
|
|
1791
|
-
|
|
1792
|
-
Args:
|
|
1793
|
-
- messages (Optional[List[Dict]]): A list of message dictionaries, representing the conversation history.
|
|
1794
|
-
- sender (Optional[Agent]): The agent object representing the sender of the message.
|
|
1795
|
-
- config (Optional[Any]): Configuration object, defaults to the current instance if not provided.
|
|
1796
|
-
|
|
1797
|
-
Returns:
|
|
1798
|
-
- Tuple[bool, Union[str, Dict, None]]: A tuple containing a boolean indicating if the conversation
|
|
1799
|
-
should be terminated, and a human reply which can be a string, a dictionary, or None.
|
|
1800
|
-
"""
|
|
1801
|
-
iostream = IOStream.get_default()
|
|
1802
|
-
|
|
1803
|
-
if config is None:
|
|
1804
|
-
config = self
|
|
1805
|
-
if messages is None:
|
|
1806
|
-
messages = self._oai_messages[sender] if sender else []
|
|
1807
|
-
message = messages[-1]
|
|
1808
|
-
reply = ""
|
|
1809
|
-
no_human_input_msg = ""
|
|
1810
|
-
sender_name = "the sender" if sender is None else sender.name
|
|
1811
|
-
if self.human_input_mode == "ALWAYS":
|
|
1812
|
-
reply = self.get_human_input(
|
|
1813
|
-
f"Replying as {self.name}. Provide feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: "
|
|
1814
|
-
)
|
|
1815
|
-
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
|
|
1816
|
-
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
|
|
1817
|
-
reply = reply if reply or not self._is_termination_msg(message) else "exit"
|
|
1818
|
-
else:
|
|
1819
|
-
if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
|
|
1820
|
-
if self.human_input_mode == "NEVER":
|
|
1821
|
-
reply = "exit"
|
|
1822
|
-
else:
|
|
1823
|
-
# self.human_input_mode == "TERMINATE":
|
|
1824
|
-
terminate = self._is_termination_msg(message)
|
|
1825
|
-
reply = self.get_human_input(
|
|
1826
|
-
f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
|
|
1827
|
-
if terminate
|
|
1828
|
-
else f"Please give feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: "
|
|
1829
|
-
)
|
|
1830
|
-
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
|
|
1831
|
-
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
|
|
1832
|
-
reply = reply if reply or not terminate else "exit"
|
|
1833
|
-
elif self._is_termination_msg(message):
|
|
1834
|
-
if self.human_input_mode == "NEVER":
|
|
1835
|
-
reply = "exit"
|
|
1836
|
-
else:
|
|
1837
|
-
# self.human_input_mode == "TERMINATE":
|
|
1838
|
-
reply = self.get_human_input(
|
|
1839
|
-
f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
|
|
1840
|
-
)
|
|
1841
|
-
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
|
|
1842
|
-
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
|
|
1843
|
-
reply = reply or "exit"
|
|
1844
|
-
|
|
1845
|
-
# print the no_human_input_msg
|
|
1846
|
-
if no_human_input_msg:
|
|
1847
|
-
iostream.print(colored(f"\n>>>>>>>> {no_human_input_msg}", "red"), flush=True)
|
|
1848
|
-
|
|
1849
|
-
# stop the conversation
|
|
1850
|
-
if reply == "exit":
|
|
1851
|
-
# reset the consecutive_auto_reply_counter
|
|
1852
|
-
self._consecutive_auto_reply_counter[sender] = 0
|
|
1853
|
-
return True, None
|
|
1854
|
-
|
|
1855
|
-
# send the human reply
|
|
1856
|
-
if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
|
|
1857
|
-
# reset the consecutive_auto_reply_counter
|
|
1858
|
-
self._consecutive_auto_reply_counter[sender] = 0
|
|
1859
|
-
# User provided a custom response, return function and tool failures indicating user interruption
|
|
1860
|
-
tool_returns = []
|
|
1861
|
-
if message.get("function_call", False):
|
|
1862
|
-
tool_returns.append(
|
|
1863
|
-
{
|
|
1864
|
-
"role": "function",
|
|
1865
|
-
"name": message["function_call"].get("name", ""),
|
|
1866
|
-
"content": "USER INTERRUPTED",
|
|
1867
|
-
}
|
|
1868
|
-
)
|
|
1869
|
-
|
|
1870
|
-
if message.get("tool_calls", False):
|
|
1871
|
-
tool_returns.extend(
|
|
1872
|
-
[
|
|
1873
|
-
{"role": "tool", "tool_call_id": tool_call.get("id", ""), "content": "USER INTERRUPTED"}
|
|
1874
|
-
for tool_call in message["tool_calls"]
|
|
1875
|
-
]
|
|
1876
|
-
)
|
|
1877
|
-
|
|
1878
|
-
response = {"role": "user", "content": reply}
|
|
1879
|
-
if tool_returns:
|
|
1880
|
-
response["tool_responses"] = tool_returns
|
|
1881
|
-
|
|
1882
|
-
return True, response
|
|
1883
|
-
|
|
1884
|
-
# increment the consecutive_auto_reply_counter
|
|
1885
|
-
self._consecutive_auto_reply_counter[sender] += 1
|
|
1886
|
-
if self.human_input_mode != "NEVER":
|
|
1887
|
-
iostream.print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
|
|
1888
|
-
|
|
1889
|
-
return False, None
|
|
1890
|
-
|
|
1891
|
-
async def a_check_termination_and_human_reply(
|
|
1892
|
-
self,
|
|
1893
|
-
messages: Optional[List[Dict]] = None,
|
|
1894
|
-
sender: Optional[Agent] = None,
|
|
1895
|
-
config: Optional[Any] = None,
|
|
1896
|
-
) -> Tuple[bool, Union[str, None]]:
|
|
1897
|
-
"""(async) Check if the conversation should be terminated, and if human reply is provided.
|
|
1898
|
-
|
|
1899
|
-
This method checks for conditions that require the conversation to be terminated, such as reaching
|
|
1900
|
-
a maximum number of consecutive auto-replies or encountering a termination message. Additionally,
|
|
1901
|
-
it prompts for and processes human input based on the configured human input mode, which can be
|
|
1902
|
-
'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter
|
|
1903
|
-
for the conversation and prints relevant messages based on the human input received.
|
|
1904
|
-
|
|
1905
|
-
Args:
|
|
1906
|
-
- messages (Optional[List[Dict]]): A list of message dictionaries, representing the conversation history.
|
|
1907
|
-
- sender (Optional[Agent]): The agent object representing the sender of the message.
|
|
1908
|
-
- config (Optional[Any]): Configuration object, defaults to the current instance if not provided.
|
|
1909
|
-
|
|
1910
|
-
Returns:
|
|
1911
|
-
- Tuple[bool, Union[str, Dict, None]]: A tuple containing a boolean indicating if the conversation
|
|
1912
|
-
should be terminated, and a human reply which can be a string, a dictionary, or None.
|
|
1913
|
-
"""
|
|
1914
|
-
iostream = IOStream.get_default()
|
|
1915
|
-
|
|
1916
|
-
if config is None:
|
|
1917
|
-
config = self
|
|
1918
|
-
if messages is None:
|
|
1919
|
-
messages = self._oai_messages[sender] if sender else []
|
|
1920
|
-
message = messages[-1] if messages else {}
|
|
1921
|
-
reply = ""
|
|
1922
|
-
no_human_input_msg = ""
|
|
1923
|
-
sender_name = "the sender" if sender is None else sender.name
|
|
1924
|
-
if self.human_input_mode == "ALWAYS":
|
|
1925
|
-
reply = await self.a_get_human_input(
|
|
1926
|
-
f"Replying as {self.name}. Provide feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: "
|
|
1927
|
-
)
|
|
1928
|
-
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
|
|
1929
|
-
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
|
|
1930
|
-
reply = reply if reply or not self._is_termination_msg(message) else "exit"
|
|
1931
|
-
else:
|
|
1932
|
-
if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
|
|
1933
|
-
if self.human_input_mode == "NEVER":
|
|
1934
|
-
reply = "exit"
|
|
1935
|
-
else:
|
|
1936
|
-
# self.human_input_mode == "TERMINATE":
|
|
1937
|
-
terminate = self._is_termination_msg(message)
|
|
1938
|
-
reply = await self.a_get_human_input(
|
|
1939
|
-
f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
|
|
1940
|
-
if terminate
|
|
1941
|
-
else f"Please give feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: "
|
|
1942
|
-
)
|
|
1943
|
-
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
|
|
1944
|
-
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
|
|
1945
|
-
reply = reply if reply or not terminate else "exit"
|
|
1946
|
-
elif self._is_termination_msg(message):
|
|
1947
|
-
if self.human_input_mode == "NEVER":
|
|
1948
|
-
reply = "exit"
|
|
1949
|
-
else:
|
|
1950
|
-
# self.human_input_mode == "TERMINATE":
|
|
1951
|
-
reply = await self.a_get_human_input(
|
|
1952
|
-
f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
|
|
1953
|
-
)
|
|
1954
|
-
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
|
|
1955
|
-
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
|
|
1956
|
-
reply = reply or "exit"
|
|
1957
|
-
|
|
1958
|
-
# print the no_human_input_msg
|
|
1959
|
-
if no_human_input_msg:
|
|
1960
|
-
iostream.print(colored(f"\n>>>>>>>> {no_human_input_msg}", "red"), flush=True)
|
|
1961
|
-
|
|
1962
|
-
# stop the conversation
|
|
1963
|
-
if reply == "exit":
|
|
1964
|
-
# reset the consecutive_auto_reply_counter
|
|
1965
|
-
self._consecutive_auto_reply_counter[sender] = 0
|
|
1966
|
-
return True, None
|
|
1967
|
-
|
|
1968
|
-
# send the human reply
|
|
1969
|
-
if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
|
|
1970
|
-
# User provided a custom response, return function and tool results indicating user interruption
|
|
1971
|
-
# reset the consecutive_auto_reply_counter
|
|
1972
|
-
self._consecutive_auto_reply_counter[sender] = 0
|
|
1973
|
-
tool_returns = []
|
|
1974
|
-
if message.get("function_call", False):
|
|
1975
|
-
tool_returns.append(
|
|
1976
|
-
{
|
|
1977
|
-
"role": "function",
|
|
1978
|
-
"name": message["function_call"].get("name", ""),
|
|
1979
|
-
"content": "USER INTERRUPTED",
|
|
1980
|
-
}
|
|
1981
|
-
)
|
|
1982
|
-
|
|
1983
|
-
if message.get("tool_calls", False):
|
|
1984
|
-
tool_returns.extend(
|
|
1985
|
-
[
|
|
1986
|
-
{"role": "tool", "tool_call_id": tool_call.get("id", ""), "content": "USER INTERRUPTED"}
|
|
1987
|
-
for tool_call in message["tool_calls"]
|
|
1988
|
-
]
|
|
1989
|
-
)
|
|
1990
|
-
|
|
1991
|
-
response = {"role": "user", "content": reply}
|
|
1992
|
-
if tool_returns:
|
|
1993
|
-
response["tool_responses"] = tool_returns
|
|
1994
|
-
|
|
1995
|
-
return True, response
|
|
1996
|
-
|
|
1997
|
-
# increment the consecutive_auto_reply_counter
|
|
1998
|
-
self._consecutive_auto_reply_counter[sender] += 1
|
|
1999
|
-
if self.human_input_mode != "NEVER":
|
|
2000
|
-
iostream.print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
|
|
2001
|
-
|
|
2002
|
-
return False, None
|
|
2003
|
-
|
|
2004
|
-
def generate_reply(
|
|
2005
|
-
self,
|
|
2006
|
-
messages: Optional[List[Dict[str, Any]]] = None,
|
|
2007
|
-
sender: Optional["Agent"] = None,
|
|
2008
|
-
**kwargs: Any,
|
|
2009
|
-
) -> Union[str, Dict, None]:
|
|
2010
|
-
"""Reply based on the conversation history and the sender.
|
|
2011
|
-
|
|
2012
|
-
Either messages or sender must be provided.
|
|
2013
|
-
Register a reply_func with `None` as one trigger for it to be activated when `messages` is non-empty and `sender` is `None`.
|
|
2014
|
-
Use registered auto reply functions to generate replies.
|
|
2015
|
-
By default, the following functions are checked in order:
|
|
2016
|
-
1. check_termination_and_human_reply
|
|
2017
|
-
2. generate_function_call_reply (deprecated in favor of tool_calls)
|
|
2018
|
-
3. generate_tool_calls_reply
|
|
2019
|
-
4. generate_code_execution_reply
|
|
2020
|
-
5. generate_oai_reply
|
|
2021
|
-
Every function returns a tuple (final, reply).
|
|
2022
|
-
When a function returns final=False, the next function will be checked.
|
|
2023
|
-
So by default, termination and human reply will be checked first.
|
|
2024
|
-
If not terminating and human reply is skipped, execute function or code and return the result.
|
|
2025
|
-
AI replies are generated only when no code execution is performed.
|
|
2026
|
-
|
|
2027
|
-
Args:
|
|
2028
|
-
messages: a list of messages in the conversation history.
|
|
2029
|
-
sender: sender of an Agent instance.
|
|
2030
|
-
|
|
2031
|
-
Additional keyword arguments:
|
|
2032
|
-
exclude (List[Callable]): a list of reply functions to be excluded.
|
|
2033
|
-
|
|
2034
|
-
Returns:
|
|
2035
|
-
str or dict or None: reply. None if no reply is generated.
|
|
2036
|
-
"""
|
|
2037
|
-
if all((messages is None, sender is None)):
|
|
2038
|
-
error_msg = f"Either {messages=} or {sender=} must be provided."
|
|
2039
|
-
logger.error(error_msg)
|
|
2040
|
-
raise AssertionError(error_msg)
|
|
2041
|
-
|
|
2042
|
-
if messages is None:
|
|
2043
|
-
messages = self._oai_messages[sender]
|
|
2044
|
-
|
|
2045
|
-
# Call the hookable method that gives registered hooks a chance to process the last message.
|
|
2046
|
-
# Message modifications do not affect the incoming messages or self._oai_messages.
|
|
2047
|
-
messages = self.process_last_received_message(messages)
|
|
2048
|
-
|
|
2049
|
-
# Call the hookable method that gives registered hooks a chance to process all messages.
|
|
2050
|
-
# Message modifications do not affect the incoming messages or self._oai_messages.
|
|
2051
|
-
messages = self.process_all_messages_before_reply(messages)
|
|
2052
|
-
|
|
2053
|
-
for reply_func_tuple in self._reply_func_list:
|
|
2054
|
-
reply_func = reply_func_tuple["reply_func"]
|
|
2055
|
-
if "exclude" in kwargs and reply_func in kwargs["exclude"]:
|
|
2056
|
-
continue
|
|
2057
|
-
if inspect.iscoroutinefunction(reply_func):
|
|
2058
|
-
continue
|
|
2059
|
-
if self._match_trigger(reply_func_tuple["trigger"], sender):
|
|
2060
|
-
final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
|
|
2061
|
-
if logging_enabled():
|
|
2062
|
-
log_event(
|
|
2063
|
-
self,
|
|
2064
|
-
"reply_func_executed",
|
|
2065
|
-
reply_func_module=reply_func.__module__,
|
|
2066
|
-
reply_func_name=reply_func.__name__,
|
|
2067
|
-
final=final,
|
|
2068
|
-
reply=reply,
|
|
2069
|
-
)
|
|
2070
|
-
if final:
|
|
2071
|
-
return reply
|
|
2072
|
-
return self._default_auto_reply
|
|
2073
|
-
|
|
2074
|
-
async def a_generate_reply(
|
|
2075
|
-
self,
|
|
2076
|
-
messages: Optional[List[Dict[str, Any]]] = None,
|
|
2077
|
-
sender: Optional["Agent"] = None,
|
|
2078
|
-
**kwargs: Any,
|
|
2079
|
-
) -> Union[str, Dict[str, Any], None]:
|
|
2080
|
-
"""(async) Reply based on the conversation history and the sender.
|
|
2081
|
-
|
|
2082
|
-
Either messages or sender must be provided.
|
|
2083
|
-
Register a reply_func with `None` as one trigger for it to be activated when `messages` is non-empty and `sender` is `None`.
|
|
2084
|
-
Use registered auto reply functions to generate replies.
|
|
2085
|
-
By default, the following functions are checked in order:
|
|
2086
|
-
1. check_termination_and_human_reply
|
|
2087
|
-
2. generate_function_call_reply
|
|
2088
|
-
3. generate_tool_calls_reply
|
|
2089
|
-
4. generate_code_execution_reply
|
|
2090
|
-
5. generate_oai_reply
|
|
2091
|
-
Every function returns a tuple (final, reply).
|
|
2092
|
-
When a function returns final=False, the next function will be checked.
|
|
2093
|
-
So by default, termination and human reply will be checked first.
|
|
2094
|
-
If not terminating and human reply is skipped, execute function or code and return the result.
|
|
2095
|
-
AI replies are generated only when no code execution is performed.
|
|
2096
|
-
|
|
2097
|
-
Args:
|
|
2098
|
-
messages: a list of messages in the conversation history.
|
|
2099
|
-
sender: sender of an Agent instance.
|
|
2100
|
-
|
|
2101
|
-
Additional keyword arguments:
|
|
2102
|
-
exclude (List[Callable]): a list of reply functions to be excluded.
|
|
2103
|
-
|
|
2104
|
-
Returns:
|
|
2105
|
-
str or dict or None: reply. None if no reply is generated.
|
|
2106
|
-
"""
|
|
2107
|
-
if all((messages is None, sender is None)):
|
|
2108
|
-
error_msg = f"Either {messages=} or {sender=} must be provided."
|
|
2109
|
-
logger.error(error_msg)
|
|
2110
|
-
raise AssertionError(error_msg)
|
|
2111
|
-
|
|
2112
|
-
if messages is None:
|
|
2113
|
-
messages = self._oai_messages[sender]
|
|
2114
|
-
|
|
2115
|
-
# Call the hookable method that gives registered hooks a chance to process all messages.
|
|
2116
|
-
# Message modifications do not affect the incoming messages or self._oai_messages.
|
|
2117
|
-
messages = self.process_all_messages_before_reply(messages)
|
|
2118
|
-
|
|
2119
|
-
# Call the hookable method that gives registered hooks a chance to process the last message.
|
|
2120
|
-
# Message modifications do not affect the incoming messages or self._oai_messages.
|
|
2121
|
-
messages = self.process_last_received_message(messages)
|
|
2122
|
-
|
|
2123
|
-
for reply_func_tuple in self._reply_func_list:
|
|
2124
|
-
reply_func = reply_func_tuple["reply_func"]
|
|
2125
|
-
if "exclude" in kwargs and reply_func in kwargs["exclude"]:
|
|
2126
|
-
continue
|
|
2127
|
-
|
|
2128
|
-
if self._match_trigger(reply_func_tuple["trigger"], sender):
|
|
2129
|
-
if inspect.iscoroutinefunction(reply_func):
|
|
2130
|
-
final, reply = await reply_func(
|
|
2131
|
-
self, messages=messages, sender=sender, config=reply_func_tuple["config"]
|
|
2132
|
-
)
|
|
2133
|
-
else:
|
|
2134
|
-
final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
|
|
2135
|
-
if final:
|
|
2136
|
-
return reply
|
|
2137
|
-
return self._default_auto_reply
|
|
2138
|
-
|
|
2139
|
-
def _match_trigger(self, trigger: Union[None, str, type, Agent, Callable, List], sender: Optional[Agent]) -> bool:
|
|
2140
|
-
"""Check if the sender matches the trigger.
|
|
2141
|
-
|
|
2142
|
-
Args:
|
|
2143
|
-
- trigger (Union[None, str, type, Agent, Callable, List]): The condition to match against the sender.
|
|
2144
|
-
Can be `None`, string, type, `Agent` instance, callable, or a list of these.
|
|
2145
|
-
- sender (Agent): The sender object or type to be matched against the trigger.
|
|
2146
|
-
|
|
2147
|
-
Returns:
|
|
2148
|
-
- bool: Returns `True` if the sender matches the trigger, otherwise `False`.
|
|
2149
|
-
|
|
2150
|
-
Raises:
|
|
2151
|
-
- ValueError: If the trigger type is unsupported.
|
|
2152
|
-
"""
|
|
2153
|
-
if trigger is None:
|
|
2154
|
-
return sender is None
|
|
2155
|
-
elif isinstance(trigger, str):
|
|
2156
|
-
if sender is None:
|
|
2157
|
-
raise SenderRequired()
|
|
2158
|
-
return trigger == sender.name
|
|
2159
|
-
elif isinstance(trigger, type):
|
|
2160
|
-
return isinstance(sender, trigger)
|
|
2161
|
-
elif isinstance(trigger, Agent):
|
|
2162
|
-
# return True if the sender is the same type (class) as the trigger
|
|
2163
|
-
return trigger == sender
|
|
2164
|
-
elif isinstance(trigger, Callable):
|
|
2165
|
-
rst = trigger(sender)
|
|
2166
|
-
assert isinstance(rst, bool), f"trigger {trigger} must return a boolean value."
|
|
2167
|
-
return rst
|
|
2168
|
-
elif isinstance(trigger, list):
|
|
2169
|
-
return any(self._match_trigger(t, sender) for t in trigger)
|
|
2170
|
-
else:
|
|
2171
|
-
raise ValueError(f"Unsupported trigger type: {type(trigger)}")
|
|
2172
|
-
|
|
2173
|
-
def get_human_input(self, prompt: str) -> str:
|
|
2174
|
-
"""Get human input.
|
|
2175
|
-
|
|
2176
|
-
Override this method to customize the way to get human input.
|
|
2177
|
-
|
|
2178
|
-
Args:
|
|
2179
|
-
prompt (str): prompt for the human input.
|
|
2180
|
-
|
|
2181
|
-
Returns:
|
|
2182
|
-
str: human input.
|
|
2183
|
-
"""
|
|
2184
|
-
iostream = IOStream.get_default()
|
|
2185
|
-
|
|
2186
|
-
reply = iostream.input(prompt)
|
|
2187
|
-
self._human_input.append(reply)
|
|
2188
|
-
return reply
|
|
2189
|
-
|
|
2190
|
-
async def a_get_human_input(self, prompt: str) -> str:
|
|
2191
|
-
"""(Async) Get human input.
|
|
2192
|
-
|
|
2193
|
-
Override this method to customize the way to get human input.
|
|
2194
|
-
|
|
2195
|
-
Args:
|
|
2196
|
-
prompt (str): prompt for the human input.
|
|
2197
|
-
|
|
2198
|
-
Returns:
|
|
2199
|
-
str: human input.
|
|
2200
|
-
"""
|
|
2201
|
-
loop = asyncio.get_running_loop()
|
|
2202
|
-
reply = await loop.run_in_executor(None, functools.partial(self.get_human_input, prompt))
|
|
2203
|
-
return reply
|
|
2204
|
-
|
|
2205
|
-
def run_code(self, code, **kwargs):
|
|
2206
|
-
"""Run the code and return the result.
|
|
2207
|
-
|
|
2208
|
-
Override this function to modify the way to run the code.
|
|
2209
|
-
Args:
|
|
2210
|
-
code (str): the code to be executed.
|
|
2211
|
-
**kwargs: other keyword arguments.
|
|
2212
|
-
|
|
2213
|
-
Returns:
|
|
2214
|
-
A tuple of (exitcode, logs, image).
|
|
2215
|
-
exitcode (int): the exit code of the code execution.
|
|
2216
|
-
logs (str): the logs of the code execution.
|
|
2217
|
-
image (str or None): the docker image used for the code execution.
|
|
2218
|
-
"""
|
|
2219
|
-
return execute_code(code, **kwargs)
|
|
2220
|
-
|
|
2221
|
-
def execute_code_blocks(self, code_blocks):
|
|
2222
|
-
"""Execute the code blocks and return the result."""
|
|
2223
|
-
iostream = IOStream.get_default()
|
|
2224
|
-
|
|
2225
|
-
logs_all = ""
|
|
2226
|
-
for i, code_block in enumerate(code_blocks):
|
|
2227
|
-
lang, code = code_block
|
|
2228
|
-
if not lang:
|
|
2229
|
-
lang = infer_lang(code)
|
|
2230
|
-
iostream.print(
|
|
2231
|
-
colored(
|
|
2232
|
-
f"\n>>>>>>>> EXECUTING CODE BLOCK {i} (inferred language is {lang})...",
|
|
2233
|
-
"red",
|
|
2234
|
-
),
|
|
2235
|
-
flush=True,
|
|
2236
|
-
)
|
|
2237
|
-
if lang in ["bash", "shell", "sh"]:
|
|
2238
|
-
exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config)
|
|
2239
|
-
elif lang in PYTHON_VARIANTS:
|
|
2240
|
-
if code.startswith("# filename: "):
|
|
2241
|
-
filename = code[11 : code.find("\n")].strip()
|
|
2242
|
-
else:
|
|
2243
|
-
filename = None
|
|
2244
|
-
exitcode, logs, image = self.run_code(
|
|
2245
|
-
code,
|
|
2246
|
-
lang="python",
|
|
2247
|
-
filename=filename,
|
|
2248
|
-
**self._code_execution_config,
|
|
2249
|
-
)
|
|
2250
|
-
else:
|
|
2251
|
-
# In case the language is not supported, we return an error message.
|
|
2252
|
-
exitcode, logs, image = (
|
|
2253
|
-
1,
|
|
2254
|
-
f"unknown language {lang}",
|
|
2255
|
-
None,
|
|
2256
|
-
)
|
|
2257
|
-
# raise NotImplementedError
|
|
2258
|
-
if image is not None:
|
|
2259
|
-
self._code_execution_config["use_docker"] = image
|
|
2260
|
-
logs_all += "\n" + logs
|
|
2261
|
-
if exitcode != 0:
|
|
2262
|
-
return exitcode, logs_all
|
|
2263
|
-
return exitcode, logs_all
|
|
2264
|
-
|
|
2265
|
-
@staticmethod
|
|
2266
|
-
def _format_json_str(jstr):
|
|
2267
|
-
"""Remove newlines outside of quotes, and handle JSON escape sequences.
|
|
2268
|
-
|
|
2269
|
-
1. this function removes the newline in the query outside of quotes otherwise json.loads(s) will fail.
|
|
2270
|
-
Ex 1:
|
|
2271
|
-
"{\n"tool": "python",\n"query": "print('hello')\nprint('world')"\n}" -> "{"tool": "python","query": "print('hello')\nprint('world')"}"
|
|
2272
|
-
Ex 2:
|
|
2273
|
-
"{\n \"location\": \"Boston, MA\"\n}" -> "{"location": "Boston, MA"}"
|
|
2274
|
-
|
|
2275
|
-
2. this function also handles JSON escape sequences inside quotes.
|
|
2276
|
-
Ex 1:
|
|
2277
|
-
'{"args": "a\na\na\ta"}' -> '{"args": "a\\na\\na\\ta"}'
|
|
2278
|
-
"""
|
|
2279
|
-
result = []
|
|
2280
|
-
inside_quotes = False
|
|
2281
|
-
last_char = " "
|
|
2282
|
-
for char in jstr:
|
|
2283
|
-
if last_char != "\\" and char == '"':
|
|
2284
|
-
inside_quotes = not inside_quotes
|
|
2285
|
-
last_char = char
|
|
2286
|
-
if not inside_quotes and char == "\n":
|
|
2287
|
-
continue
|
|
2288
|
-
if inside_quotes and char == "\n":
|
|
2289
|
-
char = "\\n"
|
|
2290
|
-
if inside_quotes and char == "\t":
|
|
2291
|
-
char = "\\t"
|
|
2292
|
-
result.append(char)
|
|
2293
|
-
return "".join(result)
|
|
2294
|
-
|
|
2295
|
-
def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, Any]]:
|
|
2296
|
-
"""Execute a function call and return the result.
|
|
2297
|
-
|
|
2298
|
-
Override this function to modify the way to execute function and tool calls.
|
|
2299
|
-
|
|
2300
|
-
Args:
|
|
2301
|
-
func_call: a dictionary extracted from openai message at "function_call" or "tool_calls" with keys "name" and "arguments".
|
|
2302
|
-
|
|
2303
|
-
Returns:
|
|
2304
|
-
A tuple of (is_exec_success, result_dict).
|
|
2305
|
-
is_exec_success (boolean): whether the execution is successful.
|
|
2306
|
-
result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
|
|
2307
|
-
|
|
2308
|
-
"function_call" deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
|
|
2309
|
-
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
|
|
2310
|
-
"""
|
|
2311
|
-
iostream = IOStream.get_default()
|
|
2312
|
-
|
|
2313
|
-
func_name = func_call.get("name", "")
|
|
2314
|
-
func = self._function_map.get(func_name, None)
|
|
2315
|
-
|
|
2316
|
-
is_exec_success = False
|
|
2317
|
-
if func is not None:
|
|
2318
|
-
# Extract arguments from a json-like string and put it into a dict.
|
|
2319
|
-
input_string = self._format_json_str(func_call.get("arguments", "{}"))
|
|
2320
|
-
try:
|
|
2321
|
-
arguments = json.loads(input_string)
|
|
2322
|
-
except json.JSONDecodeError as e:
|
|
2323
|
-
arguments = None
|
|
2324
|
-
content = f"Error: {e}\n The argument must be in JSON format."
|
|
2325
|
-
|
|
2326
|
-
# Try to execute the function
|
|
2327
|
-
if arguments is not None:
|
|
2328
|
-
iostream.print(
|
|
2329
|
-
colored(f"\n>>>>>>>> EXECUTING FUNCTION {func_name}...", "magenta"),
|
|
2330
|
-
flush=True,
|
|
2331
|
-
)
|
|
2332
|
-
try:
|
|
2333
|
-
content = func(**arguments)
|
|
2334
|
-
is_exec_success = True
|
|
2335
|
-
except Exception as e:
|
|
2336
|
-
content = f"Error: {e}"
|
|
2337
|
-
else:
|
|
2338
|
-
content = f"Error: Function {func_name} not found."
|
|
2339
|
-
|
|
2340
|
-
if verbose:
|
|
2341
|
-
iostream.print(
|
|
2342
|
-
colored(f"\nInput arguments: {arguments}\nOutput:\n{content}", "magenta"),
|
|
2343
|
-
flush=True,
|
|
2344
|
-
)
|
|
2345
|
-
|
|
2346
|
-
return is_exec_success, {
|
|
2347
|
-
"name": func_name,
|
|
2348
|
-
"role": "function",
|
|
2349
|
-
"content": content,
|
|
2350
|
-
}
|
|
2351
|
-
|
|
2352
|
-
async def a_execute_function(self, func_call):
|
|
2353
|
-
"""Execute an async function call and return the result.
|
|
2354
|
-
|
|
2355
|
-
Override this function to modify the way async functions and tools are executed.
|
|
2356
|
-
|
|
2357
|
-
Args:
|
|
2358
|
-
func_call: a dictionary extracted from openai message at key "function_call" or "tool_calls" with keys "name" and "arguments".
|
|
2359
|
-
|
|
2360
|
-
Returns:
|
|
2361
|
-
A tuple of (is_exec_success, result_dict).
|
|
2362
|
-
is_exec_success (boolean): whether the execution is successful.
|
|
2363
|
-
result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
|
|
2364
|
-
|
|
2365
|
-
"function_call" deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
|
|
2366
|
-
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
|
|
2367
|
-
"""
|
|
2368
|
-
iostream = IOStream.get_default()
|
|
2369
|
-
|
|
2370
|
-
func_name = func_call.get("name", "")
|
|
2371
|
-
func = self._function_map.get(func_name, None)
|
|
2372
|
-
|
|
2373
|
-
is_exec_success = False
|
|
2374
|
-
if func is not None:
|
|
2375
|
-
# Extract arguments from a json-like string and put it into a dict.
|
|
2376
|
-
input_string = self._format_json_str(func_call.get("arguments", "{}"))
|
|
2377
|
-
try:
|
|
2378
|
-
arguments = json.loads(input_string)
|
|
2379
|
-
except json.JSONDecodeError as e:
|
|
2380
|
-
arguments = None
|
|
2381
|
-
content = f"Error: {e}\n The argument must be in JSON format."
|
|
2382
|
-
|
|
2383
|
-
# Try to execute the function
|
|
2384
|
-
if arguments is not None:
|
|
2385
|
-
iostream.print(
|
|
2386
|
-
colored(f"\n>>>>>>>> EXECUTING ASYNC FUNCTION {func_name}...", "magenta"),
|
|
2387
|
-
flush=True,
|
|
2388
|
-
)
|
|
2389
|
-
try:
|
|
2390
|
-
if inspect.iscoroutinefunction(func):
|
|
2391
|
-
content = await func(**arguments)
|
|
2392
|
-
else:
|
|
2393
|
-
# Fallback to sync function if the function is not async
|
|
2394
|
-
content = func(**arguments)
|
|
2395
|
-
is_exec_success = True
|
|
2396
|
-
except Exception as e:
|
|
2397
|
-
content = f"Error: {e}"
|
|
2398
|
-
else:
|
|
2399
|
-
content = f"Error: Function {func_name} not found."
|
|
2400
|
-
|
|
2401
|
-
return is_exec_success, {
|
|
2402
|
-
"name": func_name,
|
|
2403
|
-
"role": "function",
|
|
2404
|
-
"content": content,
|
|
2405
|
-
}
|
|
2406
|
-
|
|
2407
|
-
def generate_init_message(self, message: Union[Dict, str, None], **kwargs) -> Union[str, Dict]:
|
|
2408
|
-
"""Generate the initial message for the agent.
|
|
2409
|
-
If message is None, input() will be called to get the initial message.
|
|
2410
|
-
|
|
2411
|
-
Args:
|
|
2412
|
-
message (str or None): the message to be processed.
|
|
2413
|
-
**kwargs: any additional information. It has the following reserved fields:
|
|
2414
|
-
"carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
|
|
2415
|
-
If provided, we will combine this carryover with the "message" content when generating the initial chat
|
|
2416
|
-
message.
|
|
2417
|
-
Returns:
|
|
2418
|
-
str or dict: the processed message.
|
|
2419
|
-
"""
|
|
2420
|
-
if message is None:
|
|
2421
|
-
message = self.get_human_input(">")
|
|
2422
|
-
|
|
2423
|
-
return self._handle_carryover(message, kwargs)
|
|
2424
|
-
|
|
2425
|
-
def _handle_carryover(self, message: Union[str, Dict], kwargs: dict) -> Union[str, Dict]:
|
|
2426
|
-
if not kwargs.get("carryover"):
|
|
2427
|
-
return message
|
|
2428
|
-
|
|
2429
|
-
if isinstance(message, str):
|
|
2430
|
-
return self._process_carryover(message, kwargs)
|
|
2431
|
-
|
|
2432
|
-
elif isinstance(message, dict):
|
|
2433
|
-
if isinstance(message.get("content"), str):
|
|
2434
|
-
# Makes sure the original message is not mutated
|
|
2435
|
-
message = message.copy()
|
|
2436
|
-
message["content"] = self._process_carryover(message["content"], kwargs)
|
|
2437
|
-
elif isinstance(message.get("content"), list):
|
|
2438
|
-
# Makes sure the original message is not mutated
|
|
2439
|
-
message = message.copy()
|
|
2440
|
-
message["content"] = self._process_multimodal_carryover(message["content"], kwargs)
|
|
2441
|
-
else:
|
|
2442
|
-
raise InvalidCarryOverType("Carryover should be a string or a list of strings.")
|
|
2443
|
-
|
|
2444
|
-
return message
|
|
2445
|
-
|
|
2446
|
-
def _process_carryover(self, content: str, kwargs: dict) -> str:
|
|
2447
|
-
# Makes sure there's a carryover
|
|
2448
|
-
if not kwargs.get("carryover"):
|
|
2449
|
-
return content
|
|
2450
|
-
|
|
2451
|
-
# if carryover is string
|
|
2452
|
-
if isinstance(kwargs["carryover"], str):
|
|
2453
|
-
content += "\nContext: \n" + kwargs["carryover"]
|
|
2454
|
-
elif isinstance(kwargs["carryover"], list):
|
|
2455
|
-
content += "\nContext: \n" + ("\n").join([_post_process_carryover_item(t) for t in kwargs["carryover"]])
|
|
2456
|
-
else:
|
|
2457
|
-
raise InvalidCarryOverType(
|
|
2458
|
-
"Carryover should be a string or a list of strings. Not adding carryover to the message."
|
|
2459
|
-
)
|
|
2460
|
-
return content
|
|
2461
|
-
|
|
2462
|
-
def _process_multimodal_carryover(self, content: List[Dict], kwargs: dict) -> List[Dict]:
|
|
2463
|
-
"""Prepends the context to a multimodal message."""
|
|
2464
|
-
# Makes sure there's a carryover
|
|
2465
|
-
if not kwargs.get("carryover"):
|
|
2466
|
-
return content
|
|
2467
|
-
|
|
2468
|
-
return [{"type": "text", "text": self._process_carryover("", kwargs)}] + content
|
|
2469
|
-
|
|
2470
|
-
async def a_generate_init_message(self, message: Union[Dict, str, None], **kwargs) -> Union[str, Dict]:
|
|
2471
|
-
"""Generate the initial message for the agent.
|
|
2472
|
-
If message is None, input() will be called to get the initial message.
|
|
2473
|
-
|
|
2474
|
-
Args:
|
|
2475
|
-
Please refer to `generate_init_message` for the description of the arguments.
|
|
2476
|
-
|
|
2477
|
-
Returns:
|
|
2478
|
-
str or dict: the processed message.
|
|
2479
|
-
"""
|
|
2480
|
-
if message is None:
|
|
2481
|
-
message = await self.a_get_human_input(">")
|
|
2482
|
-
|
|
2483
|
-
return self._handle_carryover(message, kwargs)
|
|
2484
|
-
|
|
2485
|
-
def register_function(self, function_map: Dict[str, Union[Callable, None]]):
|
|
2486
|
-
"""Register functions to the agent.
|
|
2487
|
-
|
|
2488
|
-
Args:
|
|
2489
|
-
function_map: a dictionary mapping function names to functions. if function_map[name] is None, the function will be removed from the function_map.
|
|
2490
|
-
"""
|
|
2491
|
-
for name, func in function_map.items():
|
|
2492
|
-
self._assert_valid_name(name)
|
|
2493
|
-
if func is None and name not in self._function_map.keys():
|
|
2494
|
-
warnings.warn(f"The function {name} to remove doesn't exist", name)
|
|
2495
|
-
if name in self._function_map:
|
|
2496
|
-
warnings.warn(f"Function '{name}' is being overridden.", UserWarning)
|
|
2497
|
-
self._function_map.update(function_map)
|
|
2498
|
-
self._function_map = {k: v for k, v in self._function_map.items() if v is not None}
|
|
2499
|
-
|
|
2500
|
-
def update_function_signature(self, func_sig: Union[str, Dict], is_remove: None):
|
|
2501
|
-
"""update a function_signature in the LLM configuration for function_call.
|
|
2502
|
-
|
|
2503
|
-
Args:
|
|
2504
|
-
func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions
|
|
2505
|
-
is_remove: whether removing the function from llm_config with name 'func_sig'
|
|
2506
|
-
|
|
2507
|
-
Deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
|
|
2508
|
-
See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
|
|
2509
|
-
"""
|
|
2510
|
-
|
|
2511
|
-
if not isinstance(self.llm_config, dict):
|
|
2512
|
-
error_msg = "To update a function signature, agent must have an llm_config"
|
|
2513
|
-
logger.error(error_msg)
|
|
2514
|
-
raise AssertionError(error_msg)
|
|
2515
|
-
|
|
2516
|
-
if is_remove:
|
|
2517
|
-
if "functions" not in self.llm_config.keys():
|
|
2518
|
-
error_msg = "The agent config doesn't have function {name}.".format(name=func_sig)
|
|
2519
|
-
logger.error(error_msg)
|
|
2520
|
-
raise AssertionError(error_msg)
|
|
2521
|
-
else:
|
|
2522
|
-
self.llm_config["functions"] = [
|
|
2523
|
-
func for func in self.llm_config["functions"] if func["name"] != func_sig
|
|
2524
|
-
]
|
|
2525
|
-
else:
|
|
2526
|
-
if not isinstance(func_sig, dict):
|
|
2527
|
-
raise ValueError(
|
|
2528
|
-
f"The function signature must be of the type dict. Received function signature type {type(func_sig)}"
|
|
2529
|
-
)
|
|
2530
|
-
|
|
2531
|
-
self._assert_valid_name(func_sig["name"])
|
|
2532
|
-
if "functions" in self.llm_config.keys():
|
|
2533
|
-
if any(func["name"] == func_sig["name"] for func in self.llm_config["functions"]):
|
|
2534
|
-
warnings.warn(f"Function '{func_sig['name']}' is being overridden.", UserWarning)
|
|
2535
|
-
|
|
2536
|
-
self.llm_config["functions"] = [
|
|
2537
|
-
func for func in self.llm_config["functions"] if func.get("name") != func_sig["name"]
|
|
2538
|
-
] + [func_sig]
|
|
2539
|
-
else:
|
|
2540
|
-
self.llm_config["functions"] = [func_sig]
|
|
2541
|
-
|
|
2542
|
-
if len(self.llm_config["functions"]) == 0:
|
|
2543
|
-
del self.llm_config["functions"]
|
|
2544
|
-
|
|
2545
|
-
self.client = OpenAIWrapper(**self.llm_config)
|
|
2546
|
-
|
|
2547
|
-
def update_tool_signature(self, tool_sig: Union[str, Dict], is_remove: None):
|
|
2548
|
-
"""update a tool_signature in the LLM configuration for tool_call.
|
|
2549
|
-
|
|
2550
|
-
Args:
|
|
2551
|
-
tool_sig (str or dict): description/name of the tool to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
|
|
2552
|
-
is_remove: whether removing the tool from llm_config with name 'tool_sig'
|
|
2553
|
-
"""
|
|
2554
|
-
|
|
2555
|
-
if not self.llm_config:
|
|
2556
|
-
error_msg = "To update a tool signature, agent must have an llm_config"
|
|
2557
|
-
logger.error(error_msg)
|
|
2558
|
-
raise AssertionError(error_msg)
|
|
2559
|
-
|
|
2560
|
-
if is_remove:
|
|
2561
|
-
if "tools" not in self.llm_config.keys():
|
|
2562
|
-
error_msg = "The agent config doesn't have tool {name}.".format(name=tool_sig)
|
|
2563
|
-
logger.error(error_msg)
|
|
2564
|
-
raise AssertionError(error_msg)
|
|
2565
|
-
else:
|
|
2566
|
-
self.llm_config["tools"] = [
|
|
2567
|
-
tool for tool in self.llm_config["tools"] if tool["function"]["name"] != tool_sig
|
|
2568
|
-
]
|
|
2569
|
-
else:
|
|
2570
|
-
if not isinstance(tool_sig, dict):
|
|
2571
|
-
raise ValueError(
|
|
2572
|
-
f"The tool signature must be of the type dict. Received tool signature type {type(tool_sig)}"
|
|
2573
|
-
)
|
|
2574
|
-
self._assert_valid_name(tool_sig["function"]["name"])
|
|
2575
|
-
if "tools" in self.llm_config:
|
|
2576
|
-
if any(tool["function"]["name"] == tool_sig["function"]["name"] for tool in self.llm_config["tools"]):
|
|
2577
|
-
warnings.warn(f"Function '{tool_sig['function']['name']}' is being overridden.", UserWarning)
|
|
2578
|
-
self.llm_config["tools"] = [
|
|
2579
|
-
tool
|
|
2580
|
-
for tool in self.llm_config["tools"]
|
|
2581
|
-
if tool.get("function", {}).get("name") != tool_sig["function"]["name"]
|
|
2582
|
-
] + [tool_sig]
|
|
2583
|
-
else:
|
|
2584
|
-
self.llm_config["tools"] = [tool_sig]
|
|
2585
|
-
|
|
2586
|
-
if len(self.llm_config["tools"]) == 0:
|
|
2587
|
-
del self.llm_config["tools"]
|
|
2588
|
-
|
|
2589
|
-
self.client = OpenAIWrapper(**self.llm_config)
|
|
2590
|
-
|
|
2591
|
-
def can_execute_function(self, name: Union[List[str], str]) -> bool:
|
|
2592
|
-
"""Whether the agent can execute the function."""
|
|
2593
|
-
names = name if isinstance(name, list) else [name]
|
|
2594
|
-
return all([n in self._function_map for n in names])
|
|
2595
|
-
|
|
2596
|
-
@property
|
|
2597
|
-
def function_map(self) -> Dict[str, Callable]:
|
|
2598
|
-
"""Return the function map."""
|
|
2599
|
-
return self._function_map
|
|
2600
|
-
|
|
2601
|
-
def _wrap_function(self, func: F) -> F:
|
|
2602
|
-
"""Wrap the function to dump the return value to json.
|
|
2603
|
-
|
|
2604
|
-
Handles both sync and async functions.
|
|
2605
|
-
|
|
2606
|
-
Args:
|
|
2607
|
-
func: the function to be wrapped.
|
|
2608
|
-
|
|
2609
|
-
Returns:
|
|
2610
|
-
The wrapped function.
|
|
2611
|
-
"""
|
|
2612
|
-
|
|
2613
|
-
@load_basemodels_if_needed
|
|
2614
|
-
@functools.wraps(func)
|
|
2615
|
-
def _wrapped_func(*args, **kwargs):
|
|
2616
|
-
retval = func(*args, **kwargs)
|
|
2617
|
-
if logging_enabled():
|
|
2618
|
-
log_function_use(self, func, kwargs, retval)
|
|
2619
|
-
return serialize_to_str(retval)
|
|
2620
|
-
|
|
2621
|
-
@load_basemodels_if_needed
|
|
2622
|
-
@functools.wraps(func)
|
|
2623
|
-
async def _a_wrapped_func(*args, **kwargs):
|
|
2624
|
-
retval = await func(*args, **kwargs)
|
|
2625
|
-
if logging_enabled():
|
|
2626
|
-
log_function_use(self, func, kwargs, retval)
|
|
2627
|
-
return serialize_to_str(retval)
|
|
2628
|
-
|
|
2629
|
-
wrapped_func = _a_wrapped_func if inspect.iscoroutinefunction(func) else _wrapped_func
|
|
2630
|
-
|
|
2631
|
-
# needed for testing
|
|
2632
|
-
wrapped_func._origin = func
|
|
2633
|
-
|
|
2634
|
-
return wrapped_func
|
|
2635
|
-
|
|
2636
|
-
def register_for_llm(
|
|
2637
|
-
self,
|
|
2638
|
-
*,
|
|
2639
|
-
name: Optional[str] = None,
|
|
2640
|
-
description: Optional[str] = None,
|
|
2641
|
-
api_style: Literal["function", "tool"] = "tool",
|
|
2642
|
-
) -> Callable[[F], F]:
|
|
2643
|
-
"""Decorator factory for registering a function to be used by an agent.
|
|
2644
|
-
|
|
2645
|
-
It's return value is used to decorate a function to be registered to the agent. The function uses type hints to
|
|
2646
|
-
specify the arguments and return type. The function name is used as the default name for the function,
|
|
2647
|
-
but a custom name can be provided. The function description is used to describe the function in the
|
|
2648
|
-
agent's configuration.
|
|
2649
|
-
|
|
2650
|
-
Args:
|
|
2651
|
-
name (optional(str)): name of the function. If None, the function name will be used (default: None).
|
|
2652
|
-
description (optional(str)): description of the function (default: None). It is mandatory
|
|
2653
|
-
for the initial decorator, but the following ones can omit it.
|
|
2654
|
-
api_style: (literal): the API style for function call.
|
|
2655
|
-
For Azure OpenAI API, use version 2023-12-01-preview or later.
|
|
2656
|
-
`"function"` style will be deprecated. For earlier version use
|
|
2657
|
-
`"function"` if `"tool"` doesn't work.
|
|
2658
|
-
See [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling?tabs=python) for details.
|
|
2659
|
-
|
|
2660
|
-
Returns:
|
|
2661
|
-
The decorator for registering a function to be used by an agent.
|
|
2662
|
-
|
|
2663
|
-
Examples:
|
|
2664
|
-
```
|
|
2665
|
-
@user_proxy.register_for_execution()
|
|
2666
|
-
@agent2.register_for_llm()
|
|
2667
|
-
@agent1.register_for_llm(description="This is a very useful function")
|
|
2668
|
-
def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
|
|
2669
|
-
return a + str(b * c)
|
|
2670
|
-
```
|
|
2671
|
-
|
|
2672
|
-
For Azure OpenAI versions prior to 2023-12-01-preview, set `api_style`
|
|
2673
|
-
to `"function"` if `"tool"` doesn't work:
|
|
2674
|
-
```
|
|
2675
|
-
@agent2.register_for_llm(api_style="function")
|
|
2676
|
-
def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
|
|
2677
|
-
return a + str(b * c)
|
|
2678
|
-
```
|
|
2679
|
-
|
|
2680
|
-
"""
|
|
2681
|
-
|
|
2682
|
-
def _decorator(func: F) -> F:
|
|
2683
|
-
"""Decorator for registering a function to be used by an agent.
|
|
2684
|
-
|
|
2685
|
-
Args:
|
|
2686
|
-
func: the function to be registered.
|
|
2687
|
-
|
|
2688
|
-
Returns:
|
|
2689
|
-
The function to be registered, with the _description attribute set to the function description.
|
|
2690
|
-
|
|
2691
|
-
Raises:
|
|
2692
|
-
ValueError: if the function description is not provided and not propagated by a previous decorator.
|
|
2693
|
-
RuntimeError: if the LLM config is not set up before registering a function.
|
|
2694
|
-
|
|
2695
|
-
"""
|
|
2696
|
-
# name can be overwritten by the parameter, by default it is the same as function name
|
|
2697
|
-
if name:
|
|
2698
|
-
func._name = name
|
|
2699
|
-
elif not hasattr(func, "_name"):
|
|
2700
|
-
func._name = func.__name__
|
|
2701
|
-
|
|
2702
|
-
# description is propagated from the previous decorator, but it is mandatory for the first one
|
|
2703
|
-
if description:
|
|
2704
|
-
func._description = description
|
|
2705
|
-
else:
|
|
2706
|
-
if not hasattr(func, "_description"):
|
|
2707
|
-
raise ValueError("Function description is required, none found.")
|
|
2708
|
-
|
|
2709
|
-
# get JSON schema for the function
|
|
2710
|
-
f = get_function_schema(func, name=func._name, description=func._description)
|
|
2711
|
-
|
|
2712
|
-
# register the function to the agent if there is LLM config, raise an exception otherwise
|
|
2713
|
-
if self.llm_config is None:
|
|
2714
|
-
raise RuntimeError("LLM config must be setup before registering a function for LLM.")
|
|
2715
|
-
|
|
2716
|
-
if api_style == "function":
|
|
2717
|
-
f = f["function"]
|
|
2718
|
-
self.update_function_signature(f, is_remove=False)
|
|
2719
|
-
elif api_style == "tool":
|
|
2720
|
-
self.update_tool_signature(f, is_remove=False)
|
|
2721
|
-
else:
|
|
2722
|
-
raise ValueError(f"Unsupported API style: {api_style}")
|
|
2723
|
-
|
|
2724
|
-
return func
|
|
2725
|
-
|
|
2726
|
-
return _decorator
|
|
2727
|
-
|
|
2728
|
-
def register_for_execution(
|
|
2729
|
-
self,
|
|
2730
|
-
name: Optional[str] = None,
|
|
2731
|
-
) -> Callable[[F], F]:
|
|
2732
|
-
"""Decorator factory for registering a function to be executed by an agent.
|
|
2733
|
-
|
|
2734
|
-
It's return value is used to decorate a function to be registered to the agent.
|
|
2735
|
-
|
|
2736
|
-
Args:
|
|
2737
|
-
name (optional(str)): name of the function. If None, the function name will be used (default: None).
|
|
2738
|
-
|
|
2739
|
-
Returns:
|
|
2740
|
-
The decorator for registering a function to be used by an agent.
|
|
2741
|
-
|
|
2742
|
-
Examples:
|
|
2743
|
-
```
|
|
2744
|
-
@user_proxy.register_for_execution()
|
|
2745
|
-
@agent2.register_for_llm()
|
|
2746
|
-
@agent1.register_for_llm(description="This is a very useful function")
|
|
2747
|
-
def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14):
|
|
2748
|
-
return a + str(b * c)
|
|
2749
|
-
```
|
|
2750
|
-
|
|
2751
|
-
"""
|
|
2752
|
-
|
|
2753
|
-
def _decorator(func: F) -> F:
|
|
2754
|
-
"""Decorator for registering a function to be used by an agent.
|
|
2755
|
-
|
|
2756
|
-
Args:
|
|
2757
|
-
func: the function to be registered.
|
|
2758
|
-
|
|
2759
|
-
Returns:
|
|
2760
|
-
The function to be registered, with the _description attribute set to the function description.
|
|
2761
|
-
|
|
2762
|
-
Raises:
|
|
2763
|
-
ValueError: if the function description is not provided and not propagated by a previous decorator.
|
|
2764
|
-
|
|
2765
|
-
"""
|
|
2766
|
-
# name can be overwritten by the parameter, by default it is the same as function name
|
|
2767
|
-
if name:
|
|
2768
|
-
func._name = name
|
|
2769
|
-
elif not hasattr(func, "_name"):
|
|
2770
|
-
func._name = func.__name__
|
|
2771
|
-
|
|
2772
|
-
self.register_function({func._name: self._wrap_function(func)})
|
|
2773
|
-
|
|
2774
|
-
return func
|
|
2775
|
-
|
|
2776
|
-
return _decorator
|
|
2777
|
-
|
|
2778
|
-
def register_model_client(self, model_client_cls: ModelClient, **kwargs):
|
|
2779
|
-
"""Register a model client.
|
|
2780
|
-
|
|
2781
|
-
Args:
|
|
2782
|
-
model_client_cls: A custom client class that follows the Client interface
|
|
2783
|
-
**kwargs: The kwargs for the custom client class to be initialized with
|
|
2784
|
-
"""
|
|
2785
|
-
self.client.register_model_client(model_client_cls, **kwargs)
|
|
2786
|
-
|
|
2787
|
-
def register_hook(self, hookable_method: str, hook: Callable):
|
|
2788
|
-
"""
|
|
2789
|
-
Registers a hook to be called by a hookable method, in order to add a capability to the agent.
|
|
2790
|
-
Registered hooks are kept in lists (one per hookable method), and are called in their order of registration.
|
|
2791
|
-
|
|
2792
|
-
Args:
|
|
2793
|
-
hookable_method: A hookable method name implemented by ConversableAgent.
|
|
2794
|
-
hook: A method implemented by a subclass of AgentCapability.
|
|
2795
|
-
"""
|
|
2796
|
-
assert hookable_method in self.hook_lists, f"{hookable_method} is not a hookable method."
|
|
2797
|
-
hook_list = self.hook_lists[hookable_method]
|
|
2798
|
-
assert hook not in hook_list, f"{hook} is already registered as a hook."
|
|
2799
|
-
hook_list.append(hook)
|
|
2800
|
-
|
|
2801
|
-
def process_all_messages_before_reply(self, messages: List[Dict]) -> List[Dict]:
|
|
2802
|
-
"""
|
|
2803
|
-
Calls any registered capability hooks to process all messages, potentially modifying the messages.
|
|
2804
|
-
"""
|
|
2805
|
-
hook_list = self.hook_lists["process_all_messages_before_reply"]
|
|
2806
|
-
# If no hooks are registered, or if there are no messages to process, return the original message list.
|
|
2807
|
-
if len(hook_list) == 0 or messages is None:
|
|
2808
|
-
return messages
|
|
2809
|
-
|
|
2810
|
-
# Call each hook (in order of registration) to process the messages.
|
|
2811
|
-
processed_messages = messages
|
|
2812
|
-
for hook in hook_list:
|
|
2813
|
-
processed_messages = hook(processed_messages)
|
|
2814
|
-
return processed_messages
|
|
2815
|
-
|
|
2816
|
-
def process_last_received_message(self, messages: List[Dict]) -> List[Dict]:
|
|
2817
|
-
"""
|
|
2818
|
-
Calls any registered capability hooks to use and potentially modify the text of the last message,
|
|
2819
|
-
as long as the last message is not a function call or exit command.
|
|
2820
|
-
"""
|
|
2821
|
-
|
|
2822
|
-
# If any required condition is not met, return the original message list.
|
|
2823
|
-
hook_list = self.hook_lists["process_last_received_message"]
|
|
2824
|
-
if len(hook_list) == 0:
|
|
2825
|
-
return messages # No hooks registered.
|
|
2826
|
-
if messages is None:
|
|
2827
|
-
return None # No message to process.
|
|
2828
|
-
if len(messages) == 0:
|
|
2829
|
-
return messages # No message to process.
|
|
2830
|
-
last_message = messages[-1]
|
|
2831
|
-
if "function_call" in last_message:
|
|
2832
|
-
return messages # Last message is a function call.
|
|
2833
|
-
if "context" in last_message:
|
|
2834
|
-
return messages # Last message contains a context key.
|
|
2835
|
-
if "content" not in last_message:
|
|
2836
|
-
return messages # Last message has no content.
|
|
2837
|
-
|
|
2838
|
-
user_content = last_message["content"]
|
|
2839
|
-
if not isinstance(user_content, str) and not isinstance(user_content, list):
|
|
2840
|
-
# if the user_content is a string, it is for regular LLM
|
|
2841
|
-
# if the user_content is a list, it should follow the multimodal LMM format.
|
|
2842
|
-
return messages
|
|
2843
|
-
if user_content == "exit":
|
|
2844
|
-
return messages # Last message is an exit command.
|
|
2845
|
-
|
|
2846
|
-
# Call each hook (in order of registration) to process the user's message.
|
|
2847
|
-
processed_user_content = user_content
|
|
2848
|
-
for hook in hook_list:
|
|
2849
|
-
processed_user_content = hook(processed_user_content)
|
|
2850
|
-
|
|
2851
|
-
if processed_user_content == user_content:
|
|
2852
|
-
return messages # No hooks actually modified the user's message.
|
|
2853
|
-
|
|
2854
|
-
# Replace the last user message with the expanded one.
|
|
2855
|
-
messages = messages.copy()
|
|
2856
|
-
messages[-1]["content"] = processed_user_content
|
|
2857
|
-
return messages
|
|
2858
|
-
|
|
2859
|
-
def print_usage_summary(self, mode: Union[str, List[str]] = ["actual", "total"]) -> None:
|
|
2860
|
-
"""Print the usage summary."""
|
|
2861
|
-
iostream = IOStream.get_default()
|
|
2862
|
-
|
|
2863
|
-
if self.client is None:
|
|
2864
|
-
iostream.print(f"No cost incurred from agent '{self.name}'.")
|
|
2865
|
-
else:
|
|
2866
|
-
iostream.print(f"Agent '{self.name}':")
|
|
2867
|
-
self.client.print_usage_summary(mode)
|
|
2868
|
-
|
|
2869
|
-
def get_actual_usage(self) -> Union[None, Dict[str, int]]:
|
|
2870
|
-
"""Get the actual usage summary."""
|
|
2871
|
-
if self.client is None:
|
|
2872
|
-
return None
|
|
2873
|
-
else:
|
|
2874
|
-
return self.client.actual_usage_summary
|
|
2875
|
-
|
|
2876
|
-
def get_total_usage(self) -> Union[None, Dict[str, int]]:
|
|
2877
|
-
"""Get the total usage summary."""
|
|
2878
|
-
if self.client is None:
|
|
2879
|
-
return None
|
|
2880
|
-
else:
|
|
2881
|
-
return self.client.total_usage_summary
|
|
2882
|
-
|
|
2883
|
-
|
|
2884
|
-
def register_function(
|
|
2885
|
-
f: Callable[..., Any],
|
|
2886
|
-
*,
|
|
2887
|
-
caller: ConversableAgent,
|
|
2888
|
-
executor: ConversableAgent,
|
|
2889
|
-
name: Optional[str] = None,
|
|
2890
|
-
description: str,
|
|
2891
|
-
) -> None:
|
|
2892
|
-
"""Register a function to be proposed by an agent and executed for an executor.
|
|
2893
|
-
|
|
2894
|
-
This function can be used instead of function decorators `@ConversationAgent.register_for_llm` and
|
|
2895
|
-
`@ConversationAgent.register_for_execution`.
|
|
2896
|
-
|
|
2897
|
-
Args:
|
|
2898
|
-
f: the function to be registered.
|
|
2899
|
-
caller: the agent calling the function, typically an instance of ConversableAgent.
|
|
2900
|
-
executor: the agent executing the function, typically an instance of UserProxy.
|
|
2901
|
-
name: name of the function. If None, the function name will be used (default: None).
|
|
2902
|
-
description: description of the function. The description is used by LLM to decode whether the function
|
|
2903
|
-
is called. Make sure the description is properly describing what the function does or it might not be
|
|
2904
|
-
called by LLM when needed.
|
|
2905
|
-
|
|
2906
|
-
"""
|
|
2907
|
-
f = caller.register_for_llm(name=name, description=description)(f)
|
|
2908
|
-
executor.register_for_execution(name=name)(f)
|