ag2 0.9.9__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/METADATA +243 -214
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/RECORD +113 -87
- autogen/_website/generate_mkdocs.py +3 -3
- autogen/_website/notebook_processor.py +1 -1
- autogen/_website/utils.py +1 -1
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +105 -0
- autogen/a2a/client.py +280 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +165 -0
- autogen/agentchat/__init__.py +3 -0
- autogen/agentchat/agent.py +0 -2
- autogen/agentchat/assistant_agent.py +15 -15
- autogen/agentchat/chat.py +57 -41
- autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
- autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
- autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
- autogen/agentchat/contrib/capabilities/transforms.py +1 -1
- autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
- autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
- autogen/agentchat/contrib/llava_agent.py +1 -13
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
- autogen/agentchat/contrib/rag/query_engine.py +11 -11
- autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
- autogen/agentchat/contrib/swarm_agent.py +3 -2
- autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
- autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
- autogen/agentchat/contrib/web_surfer.py +1 -1
- autogen/agentchat/conversable_agent.py +359 -150
- autogen/agentchat/group/context_expression.py +21 -21
- autogen/agentchat/group/group_tool_executor.py +46 -15
- autogen/agentchat/group/guardrails.py +41 -33
- autogen/agentchat/group/handoffs.py +11 -11
- autogen/agentchat/group/multi_agent_chat.py +56 -2
- autogen/agentchat/group/on_condition.py +11 -11
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +119 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/groupchat.py +102 -49
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
- autogen/agentchat/realtime/experimental/function_observer.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +22 -13
- autogen/agentchat/user_proxy_agent.py +55 -53
- autogen/agents/experimental/document_agent/document_agent.py +1 -10
- autogen/agents/experimental/document_agent/parser_utils.py +5 -1
- autogen/browser_utils.py +4 -4
- autogen/cache/abstract_cache_base.py +2 -6
- autogen/cache/disk_cache.py +1 -6
- autogen/cache/in_memory_cache.py +2 -6
- autogen/cache/redis_cache.py +1 -5
- autogen/coding/__init__.py +10 -2
- autogen/coding/base.py +2 -1
- autogen/coding/docker_commandline_code_executor.py +1 -6
- autogen/coding/factory.py +9 -0
- autogen/coding/jupyter/docker_jupyter_server.py +1 -7
- autogen/coding/jupyter/jupyter_client.py +2 -9
- autogen/coding/jupyter/jupyter_code_executor.py +2 -7
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +0 -65
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/environments/docker_python_environment.py +3 -3
- autogen/environments/system_python_environment.py +5 -5
- autogen/environments/venv_python_environment.py +5 -5
- autogen/events/agent_events.py +1 -1
- autogen/events/client_events.py +1 -1
- autogen/fast_depends/utils.py +10 -0
- autogen/graph_utils.py +5 -7
- autogen/import_utils.py +3 -1
- autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
- autogen/io/processors/console_event_processor.py +8 -3
- autogen/llm_config/client.py +3 -2
- autogen/llm_config/config.py +168 -91
- autogen/llm_config/entry.py +38 -26
- autogen/llm_config/types.py +35 -0
- autogen/llm_config/utils.py +223 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
- autogen/messages/agent_messages.py +1 -1
- autogen/messages/client_messages.py +1 -1
- autogen/oai/__init__.py +8 -1
- autogen/oai/bedrock.py +0 -13
- autogen/oai/client.py +25 -11
- autogen/oai/client_utils.py +31 -1
- autogen/oai/cohere.py +4 -14
- autogen/oai/gemini.py +4 -6
- autogen/oai/gemini_types.py +1 -0
- autogen/oai/openai_utils.py +44 -115
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +142 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/tools/dependency_injection.py +4 -8
- autogen/tools/experimental/reliable/reliable.py +3 -2
- autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
- autogen/tools/function_utils.py +2 -1
- autogen/version.py +1 -1
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -14,7 +14,7 @@ import re
|
|
|
14
14
|
import threading
|
|
15
15
|
import warnings
|
|
16
16
|
from collections import defaultdict
|
|
17
|
-
from collections.abc import Callable, Generator, Iterable
|
|
17
|
+
from collections.abc import Callable, Container, Generator, Iterable
|
|
18
18
|
from contextlib import contextmanager
|
|
19
19
|
from dataclasses import dataclass
|
|
20
20
|
from inspect import signature
|
|
@@ -77,7 +77,7 @@ from .chat import (
|
|
|
77
77
|
initiate_chats,
|
|
78
78
|
)
|
|
79
79
|
from .group.context_variables import ContextVariables
|
|
80
|
-
from .group.guardrails import Guardrail
|
|
80
|
+
from .group.guardrails import Guardrail, GuardrailResult
|
|
81
81
|
from .group.handoffs import Handoffs
|
|
82
82
|
from .utils import consolidate_chat_info, gather_usage_summary
|
|
83
83
|
|
|
@@ -132,11 +132,11 @@ class ConversableAgent(LLMAgent):
|
|
|
132
132
|
For example, AssistantAgent and UserProxyAgent are subclasses of this class,
|
|
133
133
|
configured with different default settings.
|
|
134
134
|
|
|
135
|
-
To modify auto reply, override `generate_reply` method.
|
|
136
|
-
To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS".
|
|
137
|
-
To modify the way to get human input, override `get_human_input` method.
|
|
138
|
-
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
|
|
139
|
-
`run_code`, and `execute_function` methods respectively.
|
|
135
|
+
To modify auto reply, override `generate_reply` method. \n
|
|
136
|
+
To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS". \n
|
|
137
|
+
To modify the way to get human input, override `get_human_input` method. \n
|
|
138
|
+
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`, \n
|
|
139
|
+
`run_code`, and `execute_function` methods respectively. \n
|
|
140
140
|
"""
|
|
141
141
|
|
|
142
142
|
DEFAULT_CONFIG = False # False or dict, the default config for llm inference
|
|
@@ -144,7 +144,7 @@ class ConversableAgent(LLMAgent):
|
|
|
144
144
|
|
|
145
145
|
DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
|
|
146
146
|
DEFAULT_SUMMARY_METHOD = "last_msg"
|
|
147
|
-
llm_config:
|
|
147
|
+
llm_config: LLMConfig | Literal[False]
|
|
148
148
|
|
|
149
149
|
def __init__(
|
|
150
150
|
self,
|
|
@@ -168,60 +168,60 @@ class ConversableAgent(LLMAgent):
|
|
|
168
168
|
| None = None,
|
|
169
169
|
handoffs: Handoffs | None = None,
|
|
170
170
|
):
|
|
171
|
-
"""Args
|
|
172
|
-
name (str): name of the agent
|
|
173
|
-
system_message (str or list): system message for the ChatCompletion inference
|
|
174
|
-
is_termination_msg (function): a function that takes a message in the form of a dictionary
|
|
171
|
+
"""Args:\n
|
|
172
|
+
1) name (str): name of the agent.\n
|
|
173
|
+
2) system_message (str or list): system message for the ChatCompletion inference.\n
|
|
174
|
+
3) is_termination_msg (function): a function that takes a message in the form of a dictionary
|
|
175
175
|
and returns a boolean value indicating if this received message is a termination message.
|
|
176
|
-
The dict can contain the following keys: "content", "role", "name", "function_call"
|
|
177
|
-
max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
|
|
176
|
+
The dict can contain the following keys: "content", "role", "name", "function_call".\n
|
|
177
|
+
4) max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
|
|
178
178
|
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
|
179
|
-
When set to 0, no auto reply will be generated
|
|
180
|
-
human_input_mode (str): whether to ask for human inputs every time a message is received
|
|
181
|
-
Possible values are "ALWAYS", "TERMINATE", "NEVER"
|
|
179
|
+
When set to 0, no auto reply will be generated.\n
|
|
180
|
+
5) human_input_mode (str): whether to ask for human inputs every time a message is received.\n
|
|
181
|
+
Possible values are "ALWAYS", "TERMINATE", "NEVER".\n
|
|
182
182
|
(1) When "ALWAYS", the agent prompts for human input every time a message is received.
|
|
183
183
|
Under this mode, the conversation stops when the human input is "exit",
|
|
184
|
-
or when is_termination_msg is True and there is no human input
|
|
184
|
+
or when is_termination_msg is True and there is no human input.\n
|
|
185
185
|
(2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
|
|
186
|
-
the number of auto reply reaches the max_consecutive_auto_reply
|
|
186
|
+
the number of auto reply reaches the max_consecutive_auto_reply.\n
|
|
187
187
|
(3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
|
|
188
|
-
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
|
|
189
|
-
function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls.
|
|
190
|
-
code_execution_config (dict or False): config for the code execution
|
|
191
|
-
To disable code execution, set to False. Otherwise, set to a dictionary with the following keys
|
|
192
|
-
- work_dir (Optional, str): The working directory for the code execution
|
|
193
|
-
If None, a default working directory will be used
|
|
188
|
+
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True. \n
|
|
189
|
+
6) function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls. \n
|
|
190
|
+
7) code_execution_config (dict or False): config for the code execution.\n
|
|
191
|
+
To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:\n
|
|
192
|
+
- work_dir (Optional, str): The working directory for the code execution.\n
|
|
193
|
+
If None, a default working directory will be used.\n
|
|
194
194
|
The default working directory is the "extensions" directory under
|
|
195
|
-
"path_to_autogen"
|
|
196
|
-
- use_docker (Optional, list, str or bool): The docker image to use for code execution
|
|
197
|
-
Default is True, which means the code will be executed in a docker container. A default list of images will be used
|
|
198
|
-
If a list or a str of image name(s) is provided, the code will be executed in a docker container
|
|
199
|
-
with the first image successfully pulled
|
|
200
|
-
If False, the code will be executed in the current environment
|
|
201
|
-
We strongly recommend using docker for code execution
|
|
202
|
-
- timeout (Optional, int): The maximum execution time in seconds
|
|
195
|
+
"path_to_autogen".\n
|
|
196
|
+
- use_docker (Optional, list, str or bool): The docker image to use for code execution.\n
|
|
197
|
+
Default is True, which means the code will be executed in a docker container. A default list of images will be used.\n
|
|
198
|
+
If a list or a str of image name(s) is provided, the code will be executed in a docker container\n
|
|
199
|
+
with the first image successfully pulled.\n
|
|
200
|
+
If False, the code will be executed in the current environment.\n
|
|
201
|
+
We strongly recommend using docker for code execution.\n
|
|
202
|
+
- timeout (Optional, int): The maximum execution time in seconds.\n
|
|
203
203
|
- last_n_messages (Experimental, int or str): The number of messages to look back for code execution.
|
|
204
|
-
If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto)
|
|
205
|
-
llm_config (LLMConfig or dict or False or None): llm inference configuration
|
|
206
|
-
Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)
|
|
207
|
-
for available options
|
|
208
|
-
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config
|
|
209
|
-
To disable llm-based auto reply, set to False
|
|
210
|
-
When set to None, will use self.DEFAULT_CONFIG, which defaults to False
|
|
211
|
-
default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated
|
|
212
|
-
description (str): a short description of the agent. This description is used by other agents
|
|
213
|
-
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
|
|
214
|
-
chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
|
|
204
|
+
If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto)\n
|
|
205
|
+
8) llm_config (LLMConfig or dict or False or None): llm inference configuration.\n
|
|
206
|
+
Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)\n
|
|
207
|
+
for available options.\n
|
|
208
|
+
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.\n
|
|
209
|
+
To disable llm-based auto reply, set to False.\n
|
|
210
|
+
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.\n
|
|
211
|
+
9) default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.\n
|
|
212
|
+
10) description (str): a short description of the agent. This description is used by other agents
|
|
213
|
+
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)\n
|
|
214
|
+
11) chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
|
|
215
215
|
Can be used to give the agent a memory by providing the chat history. This will allow the agent to
|
|
216
|
-
resume previous had conversations. Defaults to an empty chat history
|
|
217
|
-
silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
|
|
218
|
-
silent in each function
|
|
219
|
-
context_variables (ContextVariables or None): Context variables that provide a persistent context for the agent.
|
|
220
|
-
Note: This will be a reference to a shared context for multi-agent chats
|
|
221
|
-
Behaves like a dictionary with keys and values (akin to dict[str, Any])
|
|
222
|
-
functions (List[Callable[..., Any]]): A list of functions to register with the agent, these will be wrapped up as tools and registered for LLM (not execution)
|
|
223
|
-
update_agent_state_before_reply (List[Callable[..., Any]]): A list of functions, including UpdateSystemMessage's, called to update the agent before it replies
|
|
224
|
-
handoffs (Handoffs): Handoffs object containing all handoff transition conditions
|
|
216
|
+
resume previous had conversations. Defaults to an empty chat history.\n
|
|
217
|
+
12) silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
|
|
218
|
+
silent in each function.\n
|
|
219
|
+
13) context_variables (ContextVariables or None): Context variables that provide a persistent context for the agent.
|
|
220
|
+
Note: This will be a reference to a shared context for multi-agent chats.\n
|
|
221
|
+
Behaves like a dictionary with keys and values (akin to dict[str, Any]).\n
|
|
222
|
+
14) functions (List[Callable[..., Any]]): A list of functions to register with the agent, these will be wrapped up as tools and registered for LLM (not execution).\n
|
|
223
|
+
15) update_agent_state_before_reply (List[Callable[..., Any]]): A list of functions, including UpdateSystemMessage's, called to update the agent before it replies.\n
|
|
224
|
+
16) handoffs (Handoffs): Handoffs object containing all handoff transition conditions.\n
|
|
225
225
|
"""
|
|
226
226
|
self.handoffs = handoffs if handoffs is not None else Handoffs()
|
|
227
227
|
self.input_guardrails: list[Guardrail] = []
|
|
@@ -370,6 +370,12 @@ class ConversableAgent(LLMAgent):
|
|
|
370
370
|
"process_all_messages_before_reply": [],
|
|
371
371
|
"process_message_before_send": [],
|
|
372
372
|
"update_agent_state": [],
|
|
373
|
+
# Safeguard hooks for monitoring agent interactions
|
|
374
|
+
"safeguard_tool_inputs": [], # Hook for processing tool inputs before execution
|
|
375
|
+
"safeguard_tool_outputs": [], # Hook for processing tool outputs after execution
|
|
376
|
+
"safeguard_llm_inputs": [], # Hook for processing LLM inputs before sending
|
|
377
|
+
"safeguard_llm_outputs": [], # Hook for processing LLM outputs after receiving
|
|
378
|
+
"safeguard_human_inputs": [], # Hook for processing human inputs
|
|
373
379
|
}
|
|
374
380
|
|
|
375
381
|
# Associate agent update state hooks
|
|
@@ -379,9 +385,7 @@ class ConversableAgent(LLMAgent):
|
|
|
379
385
|
if not self.llm_config:
|
|
380
386
|
return
|
|
381
387
|
|
|
382
|
-
if any(
|
|
383
|
-
entry for entry in self.llm_config.config_list if entry.api_type == "openai" and re.search(r"\s", name)
|
|
384
|
-
]):
|
|
388
|
+
if any(entry for entry in self.llm_config.config_list if entry.api_type == "openai" and re.search(r"\s", name)):
|
|
385
389
|
raise ValueError(f"The name of the agent cannot contain any whitespace. The name provided is: '{name}'")
|
|
386
390
|
|
|
387
391
|
def _get_display_name(self):
|
|
@@ -485,25 +489,15 @@ class ConversableAgent(LLMAgent):
|
|
|
485
489
|
def _validate_llm_config(
|
|
486
490
|
cls, llm_config: LLMConfig | dict[str, Any] | Literal[False] | None
|
|
487
491
|
) -> LLMConfig | Literal[False]:
|
|
488
|
-
# if not(llm_config in (None, False) or isinstance(llm_config, [dict, LLMConfig])):
|
|
489
|
-
# raise ValueError(
|
|
490
|
-
# "llm_config must be a dict or False or None."
|
|
491
|
-
# )
|
|
492
|
-
|
|
493
492
|
if llm_config is None:
|
|
494
493
|
llm_config = LLMConfig.get_current_llm_config()
|
|
495
494
|
if llm_config is None:
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
llm_config = LLMConfig(**llm_config)
|
|
499
|
-
elif isinstance(llm_config, LLMConfig):
|
|
500
|
-
llm_config = llm_config.copy()
|
|
495
|
+
return cls.DEFAULT_CONFIG
|
|
496
|
+
|
|
501
497
|
elif llm_config is False:
|
|
502
|
-
|
|
503
|
-
else:
|
|
504
|
-
raise ValueError("llm_config must be a LLMConfig, dict or False or None.")
|
|
498
|
+
return False
|
|
505
499
|
|
|
506
|
-
return llm_config
|
|
500
|
+
return LLMConfig.ensure_config(llm_config)
|
|
507
501
|
|
|
508
502
|
@classmethod
|
|
509
503
|
def _create_client(cls, llm_config: LLMConfig | Literal[False]) -> OpenAIWrapper | None:
|
|
@@ -1050,7 +1044,11 @@ class ConversableAgent(LLMAgent):
|
|
|
1050
1044
|
return name
|
|
1051
1045
|
|
|
1052
1046
|
def _append_oai_message(
|
|
1053
|
-
self,
|
|
1047
|
+
self,
|
|
1048
|
+
message: dict[str, Any] | str,
|
|
1049
|
+
conversation_id: Agent,
|
|
1050
|
+
role: str = "assistant",
|
|
1051
|
+
name: str | None = None,
|
|
1054
1052
|
) -> bool:
|
|
1055
1053
|
"""Append a message to the ChatCompletion conversation.
|
|
1056
1054
|
|
|
@@ -1061,50 +1059,17 @@ class ConversableAgent(LLMAgent):
|
|
|
1061
1059
|
|
|
1062
1060
|
Args:
|
|
1063
1061
|
message (dict or str): message to be appended to the ChatCompletion conversation.
|
|
1064
|
-
role (str): role of the message, can be "assistant" or "function".
|
|
1065
1062
|
conversation_id (Agent): id of the conversation, should be the recipient or sender.
|
|
1066
|
-
|
|
1063
|
+
role (str): role of the message, can be "assistant" or "function".
|
|
1064
|
+
name (str | None): name of the message author, can be the name of the agent. If not provided, the name of the currentagent will be used.
|
|
1067
1065
|
|
|
1068
1066
|
Returns:
|
|
1069
1067
|
bool: whether the message is appended to the ChatCompletion conversation.
|
|
1070
1068
|
"""
|
|
1071
|
-
message = self.
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
k: message[k]
|
|
1075
|
-
for k in ("content", "function_call", "tool_calls", "tool_responses", "tool_call_id", "name", "context")
|
|
1076
|
-
if k in message and message[k] is not None
|
|
1077
|
-
}
|
|
1078
|
-
if "content" not in oai_message:
|
|
1079
|
-
if "function_call" in oai_message or "tool_calls" in oai_message:
|
|
1080
|
-
oai_message["content"] = None # if only function_call is provided, content will be set to None.
|
|
1081
|
-
else:
|
|
1082
|
-
return False
|
|
1083
|
-
|
|
1084
|
-
if message.get("role") in ["function", "tool"]:
|
|
1085
|
-
oai_message["role"] = message.get("role")
|
|
1086
|
-
if "tool_responses" in oai_message:
|
|
1087
|
-
for tool_response in oai_message["tool_responses"]:
|
|
1088
|
-
tool_response["content"] = str(tool_response["content"])
|
|
1089
|
-
elif "override_role" in message:
|
|
1090
|
-
# If we have a direction to override the role then set the
|
|
1091
|
-
# role accordingly. Used to customise the role for the
|
|
1092
|
-
# select speaker prompt.
|
|
1093
|
-
oai_message["role"] = message.get("override_role")
|
|
1094
|
-
else:
|
|
1095
|
-
oai_message["role"] = role
|
|
1096
|
-
|
|
1097
|
-
if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
|
|
1098
|
-
oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
|
|
1099
|
-
elif "name" not in oai_message:
|
|
1100
|
-
# If we don't have a name field, append it
|
|
1101
|
-
if is_sending:
|
|
1102
|
-
oai_message["name"] = self.name
|
|
1103
|
-
else:
|
|
1104
|
-
oai_message["name"] = conversation_id.name
|
|
1105
|
-
|
|
1069
|
+
valid, oai_message = normilize_message_to_oai(message, role=role, name=name or self.name)
|
|
1070
|
+
if not valid:
|
|
1071
|
+
return False
|
|
1106
1072
|
self._oai_messages[conversation_id].append(oai_message)
|
|
1107
|
-
|
|
1108
1073
|
return True
|
|
1109
1074
|
|
|
1110
1075
|
def _process_message_before_send(
|
|
@@ -1158,7 +1123,7 @@ class ConversableAgent(LLMAgent):
|
|
|
1158
1123
|
message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
|
|
1159
1124
|
# When the agent composes and sends the message, the role of the message is "assistant"
|
|
1160
1125
|
# unless it's "function".
|
|
1161
|
-
valid = self._append_oai_message(message, "assistant",
|
|
1126
|
+
valid = self._append_oai_message(message, recipient, role="assistant", name=self.name)
|
|
1162
1127
|
if valid:
|
|
1163
1128
|
recipient.receive(message, self, request_reply, silent)
|
|
1164
1129
|
else:
|
|
@@ -1206,7 +1171,7 @@ class ConversableAgent(LLMAgent):
|
|
|
1206
1171
|
message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
|
|
1207
1172
|
# When the agent composes and sends the message, the role of the message is "assistant"
|
|
1208
1173
|
# unless it's "function".
|
|
1209
|
-
valid = self._append_oai_message(message, "assistant",
|
|
1174
|
+
valid = self._append_oai_message(message, recipient, role="assistant", name=self.name)
|
|
1210
1175
|
if valid:
|
|
1211
1176
|
await recipient.a_receive(message, self, request_reply, silent)
|
|
1212
1177
|
else:
|
|
@@ -1215,7 +1180,7 @@ class ConversableAgent(LLMAgent):
|
|
|
1215
1180
|
)
|
|
1216
1181
|
|
|
1217
1182
|
def _print_received_message(self, message: dict[str, Any] | str, sender: Agent, skip_head: bool = False):
|
|
1218
|
-
message =
|
|
1183
|
+
message = message_to_dict(message)
|
|
1219
1184
|
message_model = create_received_event_model(event=message, sender=sender, recipient=self)
|
|
1220
1185
|
iostream = IOStream.get_default()
|
|
1221
1186
|
# message_model.print(iostream.print)
|
|
@@ -1223,7 +1188,7 @@ class ConversableAgent(LLMAgent):
|
|
|
1223
1188
|
|
|
1224
1189
|
def _process_received_message(self, message: dict[str, Any] | str, sender: Agent, silent: bool):
|
|
1225
1190
|
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
|
|
1226
|
-
valid = self._append_oai_message(message, "user", sender
|
|
1191
|
+
valid = self._append_oai_message(message, sender, role="user", name=sender.name)
|
|
1227
1192
|
if logging_enabled():
|
|
1228
1193
|
log_event(self, "received_message", message=message, sender=sender.name, valid=valid)
|
|
1229
1194
|
|
|
@@ -1355,9 +1320,10 @@ class ConversableAgent(LLMAgent):
|
|
|
1355
1320
|
Returns:
|
|
1356
1321
|
bool: True if the chat should be terminated, False otherwise.
|
|
1357
1322
|
"""
|
|
1323
|
+
content = message.get("content")
|
|
1358
1324
|
return (
|
|
1359
1325
|
isinstance(recipient, ConversableAgent)
|
|
1360
|
-
and
|
|
1326
|
+
and content is not None
|
|
1361
1327
|
and hasattr(recipient, "_is_termination_msg")
|
|
1362
1328
|
and recipient._is_termination_msg(message)
|
|
1363
1329
|
)
|
|
@@ -2181,6 +2147,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2181
2147
|
messages: list[dict[str, Any]] | None = None,
|
|
2182
2148
|
sender: Agent | None = None,
|
|
2183
2149
|
config: OpenAIWrapper | None = None,
|
|
2150
|
+
**kwargs: Any,
|
|
2184
2151
|
) -> tuple[bool, str | dict[str, Any] | None]:
|
|
2185
2152
|
"""Generate a reply using autogen.oai."""
|
|
2186
2153
|
client = self.client if config is None else config
|
|
@@ -2188,12 +2155,34 @@ class ConversableAgent(LLMAgent):
|
|
|
2188
2155
|
return False, None
|
|
2189
2156
|
if messages is None:
|
|
2190
2157
|
messages = self._oai_messages[sender]
|
|
2158
|
+
|
|
2159
|
+
# Process messages before sending to LLM, hook point for llm input monitoring
|
|
2160
|
+
processed_messages = self._process_llm_input(self._oai_system_message + messages)
|
|
2161
|
+
if processed_messages is None:
|
|
2162
|
+
return True, {"content": "LLM call blocked by safeguard", "role": "assistant"}
|
|
2163
|
+
|
|
2191
2164
|
extracted_response = self._generate_oai_reply_from_client(
|
|
2192
|
-
client,
|
|
2165
|
+
client,
|
|
2166
|
+
self._oai_system_message + messages,
|
|
2167
|
+
self.client_cache,
|
|
2168
|
+
**kwargs,
|
|
2193
2169
|
)
|
|
2170
|
+
|
|
2171
|
+
# Process LLM response
|
|
2172
|
+
if extracted_response is not None:
|
|
2173
|
+
processed_extracted_response = self._process_llm_output(extracted_response)
|
|
2174
|
+
if processed_extracted_response is None:
|
|
2175
|
+
raise ValueError("safeguard_llm_outputs hook returned None")
|
|
2176
|
+
|
|
2194
2177
|
return (False, None) if extracted_response is None else (True, extracted_response)
|
|
2195
2178
|
|
|
2196
|
-
def _generate_oai_reply_from_client(
|
|
2179
|
+
def _generate_oai_reply_from_client(
|
|
2180
|
+
self,
|
|
2181
|
+
llm_client,
|
|
2182
|
+
messages,
|
|
2183
|
+
cache,
|
|
2184
|
+
**kwargs: Any,
|
|
2185
|
+
) -> str | dict[str, Any] | None:
|
|
2197
2186
|
# unroll tool_responses
|
|
2198
2187
|
all_messages = []
|
|
2199
2188
|
for message in messages:
|
|
@@ -2212,6 +2201,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2212
2201
|
messages=all_messages,
|
|
2213
2202
|
cache=cache,
|
|
2214
2203
|
agent=self,
|
|
2204
|
+
**kwargs,
|
|
2215
2205
|
)
|
|
2216
2206
|
extracted_response = llm_client.extract_text_or_completion_object(response)[0]
|
|
2217
2207
|
|
|
@@ -2241,20 +2231,27 @@ class ConversableAgent(LLMAgent):
|
|
|
2241
2231
|
messages: list[dict[str, Any]] | None = None,
|
|
2242
2232
|
sender: Agent | None = None,
|
|
2243
2233
|
config: Any | None = None,
|
|
2234
|
+
**kwargs: Any,
|
|
2244
2235
|
) -> tuple[bool, str | dict[str, Any] | None]:
|
|
2245
2236
|
"""Generate a reply using autogen.oai asynchronously."""
|
|
2246
2237
|
iostream = IOStream.get_default()
|
|
2247
2238
|
|
|
2248
2239
|
def _generate_oai_reply(
|
|
2249
|
-
self, iostream: IOStream, *args: Any, **
|
|
2240
|
+
self, iostream: IOStream, *args: Any, **kw: Any
|
|
2250
2241
|
) -> tuple[bool, str | dict[str, Any] | None]:
|
|
2251
2242
|
with IOStream.set_default(iostream):
|
|
2252
|
-
return self.generate_oai_reply(*args, **
|
|
2243
|
+
return self.generate_oai_reply(*args, **kw)
|
|
2253
2244
|
|
|
2254
2245
|
return await asyncio.get_event_loop().run_in_executor(
|
|
2255
2246
|
None,
|
|
2256
2247
|
functools.partial(
|
|
2257
|
-
_generate_oai_reply,
|
|
2248
|
+
_generate_oai_reply,
|
|
2249
|
+
self=self,
|
|
2250
|
+
iostream=iostream,
|
|
2251
|
+
messages=messages,
|
|
2252
|
+
sender=sender,
|
|
2253
|
+
config=config,
|
|
2254
|
+
**kwargs,
|
|
2258
2255
|
),
|
|
2259
2256
|
)
|
|
2260
2257
|
|
|
@@ -2418,7 +2415,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2418
2415
|
if messages is None:
|
|
2419
2416
|
messages = self._oai_messages[sender]
|
|
2420
2417
|
message = messages[-1]
|
|
2421
|
-
if "function_call"
|
|
2418
|
+
if message.get("function_call"):
|
|
2422
2419
|
call_id = message.get("id", None)
|
|
2423
2420
|
func_call = message["function_call"]
|
|
2424
2421
|
func_name = func_call.get("name", "")
|
|
@@ -2449,14 +2446,26 @@ class ConversableAgent(LLMAgent):
|
|
|
2449
2446
|
tool_returns = []
|
|
2450
2447
|
for tool_call in message.get("tool_calls", []):
|
|
2451
2448
|
function_call = tool_call.get("function", {})
|
|
2449
|
+
|
|
2450
|
+
# Hook: Process tool input before execution
|
|
2451
|
+
processed_call = self._process_tool_input(function_call)
|
|
2452
|
+
if processed_call is None:
|
|
2453
|
+
raise ValueError("safeguard_tool_inputs hook returned None")
|
|
2454
|
+
|
|
2452
2455
|
tool_call_id = tool_call.get("id", None)
|
|
2453
|
-
func = self._function_map.get(
|
|
2454
|
-
if
|
|
2455
|
-
coro = self.a_execute_function(
|
|
2456
|
+
func = self._function_map.get(processed_call.get("name", None), None)
|
|
2457
|
+
if is_coroutine_callable(func):
|
|
2458
|
+
coro = self.a_execute_function(processed_call, call_id=tool_call_id)
|
|
2456
2459
|
_, func_return = self._run_async_in_thread(coro)
|
|
2457
2460
|
else:
|
|
2458
|
-
_, func_return = self.execute_function(
|
|
2459
|
-
|
|
2461
|
+
_, func_return = self.execute_function(processed_call, call_id=tool_call_id)
|
|
2462
|
+
|
|
2463
|
+
# Hook: Process tool output before returning
|
|
2464
|
+
processed_return = self._process_tool_output(func_return)
|
|
2465
|
+
if processed_return is None:
|
|
2466
|
+
raise ValueError("safeguard_tool_outputs hook returned None")
|
|
2467
|
+
|
|
2468
|
+
content = processed_return.get("content", "")
|
|
2460
2469
|
if content is None:
|
|
2461
2470
|
content = ""
|
|
2462
2471
|
|
|
@@ -2805,7 +2814,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2805
2814
|
self,
|
|
2806
2815
|
messages: list[dict[str, Any]] | None = None,
|
|
2807
2816
|
sender: Optional["Agent"] = None,
|
|
2808
|
-
|
|
2817
|
+
exclude: Container[Any] = (),
|
|
2809
2818
|
) -> str | dict[str, Any] | None:
|
|
2810
2819
|
"""Reply based on the conversation history and the sender.
|
|
2811
2820
|
|
|
@@ -2827,8 +2836,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2827
2836
|
Args:
|
|
2828
2837
|
messages: a list of messages in the conversation history.
|
|
2829
2838
|
sender: sender of an Agent instance.
|
|
2830
|
-
|
|
2831
|
-
- exclude (List[Callable[..., Any]]): A list of reply functions to exclude from
|
|
2839
|
+
exclude: A list of reply functions to exclude from
|
|
2832
2840
|
the reply generation process. Functions in this list will be skipped even if
|
|
2833
2841
|
they would normally be triggered.
|
|
2834
2842
|
|
|
@@ -2856,7 +2864,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2856
2864
|
|
|
2857
2865
|
for reply_func_tuple in self._reply_func_list:
|
|
2858
2866
|
reply_func = reply_func_tuple["reply_func"]
|
|
2859
|
-
if
|
|
2867
|
+
if reply_func in exclude:
|
|
2860
2868
|
continue
|
|
2861
2869
|
if inspect.iscoroutinefunction(reply_func):
|
|
2862
2870
|
continue
|
|
@@ -2879,7 +2887,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2879
2887
|
self,
|
|
2880
2888
|
messages: list[dict[str, Any]] | None = None,
|
|
2881
2889
|
sender: Optional["Agent"] = None,
|
|
2882
|
-
|
|
2890
|
+
exclude: Container[Any] = (),
|
|
2883
2891
|
) -> str | dict[str, Any] | None:
|
|
2884
2892
|
"""(async) Reply based on the conversation history and the sender.
|
|
2885
2893
|
|
|
@@ -2901,8 +2909,7 @@ class ConversableAgent(LLMAgent):
|
|
|
2901
2909
|
Args:
|
|
2902
2910
|
messages: a list of messages in the conversation history.
|
|
2903
2911
|
sender: sender of an Agent instance.
|
|
2904
|
-
|
|
2905
|
-
- exclude (List[Callable[..., Any]]): A list of reply functions to exclude from
|
|
2912
|
+
exclude: A list of reply functions to exclude from
|
|
2906
2913
|
the reply generation process. Functions in this list will be skipped even if
|
|
2907
2914
|
they would normally be triggered.
|
|
2908
2915
|
|
|
@@ -2930,13 +2937,16 @@ class ConversableAgent(LLMAgent):
|
|
|
2930
2937
|
|
|
2931
2938
|
for reply_func_tuple in self._reply_func_list:
|
|
2932
2939
|
reply_func = reply_func_tuple["reply_func"]
|
|
2933
|
-
if
|
|
2940
|
+
if reply_func in exclude:
|
|
2934
2941
|
continue
|
|
2935
2942
|
|
|
2936
2943
|
if self._match_trigger(reply_func_tuple["trigger"], sender):
|
|
2937
2944
|
if inspect.iscoroutinefunction(reply_func):
|
|
2938
2945
|
final, reply = await reply_func(
|
|
2939
|
-
self,
|
|
2946
|
+
self,
|
|
2947
|
+
messages=messages,
|
|
2948
|
+
sender=sender,
|
|
2949
|
+
config=reply_func_tuple["config"],
|
|
2940
2950
|
)
|
|
2941
2951
|
else:
|
|
2942
2952
|
final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
|
|
@@ -2992,8 +3002,14 @@ class ConversableAgent(LLMAgent):
|
|
|
2992
3002
|
iostream = IOStream.get_default()
|
|
2993
3003
|
|
|
2994
3004
|
reply = iostream.input(prompt)
|
|
2995
|
-
|
|
2996
|
-
|
|
3005
|
+
|
|
3006
|
+
# Process the human input through hooks
|
|
3007
|
+
processed_reply = self._process_human_input(reply)
|
|
3008
|
+
if processed_reply is None:
|
|
3009
|
+
raise ValueError("safeguard_human_inputs hook returned None")
|
|
3010
|
+
|
|
3011
|
+
self._human_input.append(processed_reply)
|
|
3012
|
+
return processed_reply
|
|
2997
3013
|
|
|
2998
3014
|
async def a_get_human_input(self, prompt: str) -> str:
|
|
2999
3015
|
"""(Async) Get human input.
|
|
@@ -3425,13 +3441,30 @@ class ConversableAgent(LLMAgent):
|
|
|
3425
3441
|
logger.error(error_msg)
|
|
3426
3442
|
raise AssertionError(error_msg)
|
|
3427
3443
|
|
|
3444
|
+
self.llm_config = self._update_tool_config(
|
|
3445
|
+
self.llm_config,
|
|
3446
|
+
tool_sig=tool_sig,
|
|
3447
|
+
is_remove=is_remove,
|
|
3448
|
+
silent_override=silent_override,
|
|
3449
|
+
)
|
|
3450
|
+
|
|
3451
|
+
self.client = OpenAIWrapper(**self.llm_config)
|
|
3452
|
+
|
|
3453
|
+
def _update_tool_config(
|
|
3454
|
+
self,
|
|
3455
|
+
llm_config: dict[str, Any] | LLMConfig,
|
|
3456
|
+
tool_sig: str | dict[str, Any],
|
|
3457
|
+
is_remove: bool,
|
|
3458
|
+
silent_override: bool = False,
|
|
3459
|
+
) -> dict[str, Any]:
|
|
3428
3460
|
if is_remove:
|
|
3429
|
-
if "tools" not in
|
|
3461
|
+
if "tools" not in llm_config or len(llm_config["tools"]) == 0:
|
|
3430
3462
|
error_msg = f"The agent config doesn't have tool {tool_sig}."
|
|
3431
3463
|
logger.error(error_msg)
|
|
3432
3464
|
raise AssertionError(error_msg)
|
|
3465
|
+
|
|
3433
3466
|
else:
|
|
3434
|
-
current_tools =
|
|
3467
|
+
current_tools = llm_config["tools"]
|
|
3435
3468
|
filtered_tools = []
|
|
3436
3469
|
|
|
3437
3470
|
# Loop through and rebuild tools list without the tool to remove
|
|
@@ -3444,36 +3477,39 @@ class ConversableAgent(LLMAgent):
|
|
|
3444
3477
|
if is_different:
|
|
3445
3478
|
filtered_tools.append(tool)
|
|
3446
3479
|
|
|
3447
|
-
|
|
3480
|
+
llm_config["tools"] = filtered_tools
|
|
3481
|
+
|
|
3448
3482
|
else:
|
|
3449
3483
|
if not isinstance(tool_sig, dict):
|
|
3450
3484
|
raise ValueError(
|
|
3451
3485
|
f"The tool signature must be of the type dict. Received tool signature type {type(tool_sig)}"
|
|
3452
3486
|
)
|
|
3487
|
+
|
|
3453
3488
|
self._assert_valid_name(tool_sig["function"]["name"])
|
|
3454
|
-
if "tools" in
|
|
3489
|
+
if "tools" in llm_config and len(llm_config["tools"]) > 0:
|
|
3455
3490
|
if not silent_override and any(
|
|
3456
|
-
tool["function"]["name"] == tool_sig["function"]["name"] for tool in
|
|
3491
|
+
tool["function"]["name"] == tool_sig["function"]["name"] for tool in llm_config["tools"]
|
|
3457
3492
|
):
|
|
3458
3493
|
warnings.warn(f"Function '{tool_sig['function']['name']}' is being overridden.", UserWarning)
|
|
3459
|
-
|
|
3494
|
+
|
|
3495
|
+
llm_config["tools"] = [
|
|
3460
3496
|
tool
|
|
3461
|
-
for tool in
|
|
3497
|
+
for tool in llm_config["tools"]
|
|
3462
3498
|
if tool.get("function", {}).get("name") != tool_sig["function"]["name"]
|
|
3463
3499
|
] + [tool_sig]
|
|
3464
3500
|
else:
|
|
3465
|
-
|
|
3501
|
+
llm_config["tools"] = [tool_sig]
|
|
3466
3502
|
|
|
3467
3503
|
# Do this only if llm_config is a dict. If llm_config is LLMConfig, LLMConfig will handle this.
|
|
3468
|
-
if len(
|
|
3469
|
-
del
|
|
3504
|
+
if len(llm_config["tools"]) == 0 and isinstance(llm_config, dict):
|
|
3505
|
+
del llm_config["tools"]
|
|
3470
3506
|
|
|
3471
|
-
|
|
3507
|
+
return llm_config
|
|
3472
3508
|
|
|
3473
3509
|
def can_execute_function(self, name: list[str] | str) -> bool:
|
|
3474
3510
|
"""Whether the agent can execute the function."""
|
|
3475
3511
|
names = name if isinstance(name, list) else [name]
|
|
3476
|
-
return all(
|
|
3512
|
+
return all(n in self._function_map for n in names)
|
|
3477
3513
|
|
|
3478
3514
|
@property
|
|
3479
3515
|
def function_map(self) -> dict[str, Callable[..., Any]]:
|
|
@@ -3727,7 +3763,7 @@ class ConversableAgent(LLMAgent):
|
|
|
3727
3763
|
"""
|
|
3728
3764
|
tool = self._create_tool_if_needed(func_or_tool, name, description)
|
|
3729
3765
|
chat_context = ChatContext(self)
|
|
3730
|
-
chat_context_params =
|
|
3766
|
+
chat_context_params = dict.fromkeys(tool._chat_context_param_names, chat_context)
|
|
3731
3767
|
|
|
3732
3768
|
self.register_function(
|
|
3733
3769
|
{tool.name: self._wrap_function(tool.func, chat_context_params, serialize=serialize)},
|
|
@@ -3825,6 +3861,87 @@ class ConversableAgent(LLMAgent):
|
|
|
3825
3861
|
messages[-1]["content"] = processed_user_content
|
|
3826
3862
|
return messages
|
|
3827
3863
|
|
|
3864
|
+
def _process_tool_input(self, tool_input: dict[str, Any]) -> dict[str, Any] | None:
|
|
3865
|
+
"""Process tool input through registered hooks."""
|
|
3866
|
+
hook_list = self.hook_lists["safeguard_tool_inputs"]
|
|
3867
|
+
|
|
3868
|
+
# If no hooks are registered, allow the tool input
|
|
3869
|
+
if len(hook_list) == 0:
|
|
3870
|
+
return tool_input
|
|
3871
|
+
|
|
3872
|
+
# Process through each hook
|
|
3873
|
+
processed_input = tool_input
|
|
3874
|
+
for hook in hook_list:
|
|
3875
|
+
processed_input = hook(processed_input)
|
|
3876
|
+
if processed_input is None:
|
|
3877
|
+
return None
|
|
3878
|
+
|
|
3879
|
+
return processed_input
|
|
3880
|
+
|
|
3881
|
+
def _process_tool_output(self, response: dict[str, Any]) -> dict[str, Any]:
|
|
3882
|
+
"""Process tool output through registered hooks"""
|
|
3883
|
+
hook_list = self.hook_lists["safeguard_tool_outputs"]
|
|
3884
|
+
|
|
3885
|
+
# If no hooks are registered, return original response
|
|
3886
|
+
if len(hook_list) == 0:
|
|
3887
|
+
return response
|
|
3888
|
+
|
|
3889
|
+
# Process through each hook
|
|
3890
|
+
processed_response = response
|
|
3891
|
+
for hook in hook_list:
|
|
3892
|
+
processed_response = hook(processed_response)
|
|
3893
|
+
|
|
3894
|
+
return processed_response
|
|
3895
|
+
|
|
3896
|
+
def _process_llm_input(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]] | None:
|
|
3897
|
+
"""Process messages before sending to LLM through registered hooks."""
|
|
3898
|
+
hook_list = self.hook_lists["safeguard_llm_inputs"]
|
|
3899
|
+
|
|
3900
|
+
# If no hooks registered, allow the messages through
|
|
3901
|
+
if len(hook_list) == 0:
|
|
3902
|
+
return messages
|
|
3903
|
+
|
|
3904
|
+
# Process through each hook
|
|
3905
|
+
processed_messages = messages
|
|
3906
|
+
for hook in hook_list:
|
|
3907
|
+
processed_messages = hook(processed_messages)
|
|
3908
|
+
if processed_messages is None:
|
|
3909
|
+
return None
|
|
3910
|
+
|
|
3911
|
+
return processed_messages
|
|
3912
|
+
|
|
3913
|
+
def _process_llm_output(self, response: str | dict[str, Any]) -> str | dict[str, Any]:
|
|
3914
|
+
"""Process LLM response through registered hooks"""
|
|
3915
|
+
hook_list = self.hook_lists["safeguard_llm_outputs"]
|
|
3916
|
+
|
|
3917
|
+
# If no hooks registered, return original response
|
|
3918
|
+
if len(hook_list) == 0:
|
|
3919
|
+
return response
|
|
3920
|
+
|
|
3921
|
+
# Process through each hook
|
|
3922
|
+
processed_response = response
|
|
3923
|
+
for hook in hook_list:
|
|
3924
|
+
processed_response = hook(processed_response)
|
|
3925
|
+
|
|
3926
|
+
return processed_response
|
|
3927
|
+
|
|
3928
|
+
def _process_human_input(self, human_input: str) -> str | None:
|
|
3929
|
+
"""Process human input through registered hooks."""
|
|
3930
|
+
hook_list = self.hook_lists["safeguard_human_inputs"]
|
|
3931
|
+
|
|
3932
|
+
# If no hooks registered, allow the input through
|
|
3933
|
+
if len(hook_list) == 0:
|
|
3934
|
+
return human_input
|
|
3935
|
+
|
|
3936
|
+
# Process through each hook
|
|
3937
|
+
processed_input = human_input
|
|
3938
|
+
for hook in hook_list:
|
|
3939
|
+
processed_input = hook(processed_input)
|
|
3940
|
+
if processed_input is None:
|
|
3941
|
+
return None
|
|
3942
|
+
|
|
3943
|
+
return processed_input
|
|
3944
|
+
|
|
3828
3945
|
def print_usage_summary(self, mode: str | list[str] = ["actual", "total"]) -> None:
|
|
3829
3946
|
"""Print the usage summary."""
|
|
3830
3947
|
iostream = IOStream.get_default()
|
|
@@ -3871,7 +3988,11 @@ class ConversableAgent(LLMAgent):
|
|
|
3871
3988
|
if executor_kwargs is None:
|
|
3872
3989
|
executor_kwargs = {}
|
|
3873
3990
|
if "is_termination_msg" not in executor_kwargs:
|
|
3874
|
-
executor_kwargs["is_termination_msg"] = lambda x:
|
|
3991
|
+
executor_kwargs["is_termination_msg"] = lambda x: "TERMINATE" in (
|
|
3992
|
+
content_str(x.get("content"))
|
|
3993
|
+
if isinstance(x.get("content"), (str, list)) or x.get("content") is None
|
|
3994
|
+
else str(x.get("content"))
|
|
3995
|
+
)
|
|
3875
3996
|
|
|
3876
3997
|
try:
|
|
3877
3998
|
if not self.run_executor:
|
|
@@ -4056,6 +4177,32 @@ class ConversableAgent(LLMAgent):
|
|
|
4056
4177
|
"""
|
|
4057
4178
|
self.output_guardrails.extend(guardrails)
|
|
4058
4179
|
|
|
4180
|
+
def run_input_guardrails(self, messages: list[dict[str, Any]] | None = None) -> GuardrailResult | None:
|
|
4181
|
+
"""Run input guardrails for an agent before the reply is generated.
|
|
4182
|
+
|
|
4183
|
+
Args:
|
|
4184
|
+
messages (Optional[list[dict[str, Any]]]): The messages to check against the guardrails.
|
|
4185
|
+
"""
|
|
4186
|
+
for guardrail in self.input_guardrails:
|
|
4187
|
+
guardrail_result = guardrail.check(context=messages)
|
|
4188
|
+
|
|
4189
|
+
if guardrail_result.activated:
|
|
4190
|
+
return guardrail_result
|
|
4191
|
+
return None
|
|
4192
|
+
|
|
4193
|
+
def run_output_guardrails(self, reply: str | dict[str, Any]) -> GuardrailResult | None:
|
|
4194
|
+
"""Run output guardrails for an agent after the reply is generated.
|
|
4195
|
+
|
|
4196
|
+
Args:
|
|
4197
|
+
reply (str | dict[str, Any]): The reply generated by the agent.
|
|
4198
|
+
"""
|
|
4199
|
+
for guardrail in self.output_guardrails:
|
|
4200
|
+
guardrail_result = guardrail.check(context=reply)
|
|
4201
|
+
|
|
4202
|
+
if guardrail_result.activated:
|
|
4203
|
+
return guardrail_result
|
|
4204
|
+
return None
|
|
4205
|
+
|
|
4059
4206
|
|
|
4060
4207
|
@export_module("autogen")
|
|
4061
4208
|
def register_function(
|
|
@@ -4083,3 +4230,65 @@ def register_function(
|
|
|
4083
4230
|
"""
|
|
4084
4231
|
f = caller.register_for_llm(name=name, description=description)(f)
|
|
4085
4232
|
executor.register_for_execution(name=name)(f)
|
|
4233
|
+
|
|
4234
|
+
|
|
4235
|
+
def normilize_message_to_oai(
|
|
4236
|
+
message: dict[str, Any] | str,
|
|
4237
|
+
name: str,
|
|
4238
|
+
role: str = "assistant",
|
|
4239
|
+
) -> tuple[bool, dict[str, Any]]:
|
|
4240
|
+
message = message_to_dict(message)
|
|
4241
|
+
# create oai message to be appended to the oai conversation that can be passed to oai directly.
|
|
4242
|
+
oai_message = {
|
|
4243
|
+
k: message[k]
|
|
4244
|
+
for k in ("content", "function_call", "tool_responses", "tool_call_id", "name", "context")
|
|
4245
|
+
if k in message and message[k] is not None
|
|
4246
|
+
}
|
|
4247
|
+
|
|
4248
|
+
if tools := message.get("tool_calls"): # check for [], None and missed key
|
|
4249
|
+
oai_message["tool_calls"] = tools
|
|
4250
|
+
|
|
4251
|
+
if "content" not in oai_message:
|
|
4252
|
+
if "function_call" in oai_message or "tool_calls" in oai_message:
|
|
4253
|
+
oai_message["content"] = None # if only function_call is provided, content will be set to None.
|
|
4254
|
+
else:
|
|
4255
|
+
return False, oai_message
|
|
4256
|
+
|
|
4257
|
+
if message.get("role") in ["function", "tool"]:
|
|
4258
|
+
oai_message["role"] = message.get("role")
|
|
4259
|
+
if "tool_responses" in oai_message:
|
|
4260
|
+
for tool_response in oai_message["tool_responses"]:
|
|
4261
|
+
content_value = tool_response.get("content")
|
|
4262
|
+
tool_response["content"] = (
|
|
4263
|
+
content_str(content_value)
|
|
4264
|
+
if isinstance(content_value, (str, list)) or content_value is None
|
|
4265
|
+
else str(content_value)
|
|
4266
|
+
)
|
|
4267
|
+
elif "override_role" in message:
|
|
4268
|
+
# If we have a direction to override the role then set the
|
|
4269
|
+
# role accordingly. Used to customise the role for the
|
|
4270
|
+
# select speaker prompt.
|
|
4271
|
+
oai_message["role"] = message.get("override_role")
|
|
4272
|
+
else:
|
|
4273
|
+
oai_message["role"] = role
|
|
4274
|
+
|
|
4275
|
+
if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
|
|
4276
|
+
oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
|
|
4277
|
+
elif "name" not in oai_message:
|
|
4278
|
+
# If we don't have a name field, append it
|
|
4279
|
+
oai_message["name"] = name
|
|
4280
|
+
|
|
4281
|
+
return True, oai_message
|
|
4282
|
+
|
|
4283
|
+
|
|
4284
|
+
def message_to_dict(message: dict[str, Any] | str) -> dict:
|
|
4285
|
+
"""Convert a message to a dictionary.
|
|
4286
|
+
|
|
4287
|
+
The message can be a string or a dictionary. The string will be put in the "content" field of the new dictionary.
|
|
4288
|
+
"""
|
|
4289
|
+
if isinstance(message, str):
|
|
4290
|
+
return {"content": message}
|
|
4291
|
+
elif isinstance(message, dict):
|
|
4292
|
+
return message
|
|
4293
|
+
else:
|
|
4294
|
+
return dict(message)
|