ag2 0.9.9__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/METADATA +243 -214
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/RECORD +113 -87
- autogen/_website/generate_mkdocs.py +3 -3
- autogen/_website/notebook_processor.py +1 -1
- autogen/_website/utils.py +1 -1
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +105 -0
- autogen/a2a/client.py +280 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +165 -0
- autogen/agentchat/__init__.py +3 -0
- autogen/agentchat/agent.py +0 -2
- autogen/agentchat/assistant_agent.py +15 -15
- autogen/agentchat/chat.py +57 -41
- autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
- autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
- autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
- autogen/agentchat/contrib/capabilities/transforms.py +1 -1
- autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
- autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
- autogen/agentchat/contrib/llava_agent.py +1 -13
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
- autogen/agentchat/contrib/rag/query_engine.py +11 -11
- autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
- autogen/agentchat/contrib/swarm_agent.py +3 -2
- autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
- autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
- autogen/agentchat/contrib/web_surfer.py +1 -1
- autogen/agentchat/conversable_agent.py +359 -150
- autogen/agentchat/group/context_expression.py +21 -21
- autogen/agentchat/group/group_tool_executor.py +46 -15
- autogen/agentchat/group/guardrails.py +41 -33
- autogen/agentchat/group/handoffs.py +11 -11
- autogen/agentchat/group/multi_agent_chat.py +56 -2
- autogen/agentchat/group/on_condition.py +11 -11
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +119 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/groupchat.py +102 -49
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
- autogen/agentchat/realtime/experimental/function_observer.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +22 -13
- autogen/agentchat/user_proxy_agent.py +55 -53
- autogen/agents/experimental/document_agent/document_agent.py +1 -10
- autogen/agents/experimental/document_agent/parser_utils.py +5 -1
- autogen/browser_utils.py +4 -4
- autogen/cache/abstract_cache_base.py +2 -6
- autogen/cache/disk_cache.py +1 -6
- autogen/cache/in_memory_cache.py +2 -6
- autogen/cache/redis_cache.py +1 -5
- autogen/coding/__init__.py +10 -2
- autogen/coding/base.py +2 -1
- autogen/coding/docker_commandline_code_executor.py +1 -6
- autogen/coding/factory.py +9 -0
- autogen/coding/jupyter/docker_jupyter_server.py +1 -7
- autogen/coding/jupyter/jupyter_client.py +2 -9
- autogen/coding/jupyter/jupyter_code_executor.py +2 -7
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +0 -65
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/environments/docker_python_environment.py +3 -3
- autogen/environments/system_python_environment.py +5 -5
- autogen/environments/venv_python_environment.py +5 -5
- autogen/events/agent_events.py +1 -1
- autogen/events/client_events.py +1 -1
- autogen/fast_depends/utils.py +10 -0
- autogen/graph_utils.py +5 -7
- autogen/import_utils.py +3 -1
- autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
- autogen/io/processors/console_event_processor.py +8 -3
- autogen/llm_config/client.py +3 -2
- autogen/llm_config/config.py +168 -91
- autogen/llm_config/entry.py +38 -26
- autogen/llm_config/types.py +35 -0
- autogen/llm_config/utils.py +223 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
- autogen/messages/agent_messages.py +1 -1
- autogen/messages/client_messages.py +1 -1
- autogen/oai/__init__.py +8 -1
- autogen/oai/bedrock.py +0 -13
- autogen/oai/client.py +25 -11
- autogen/oai/client_utils.py +31 -1
- autogen/oai/cohere.py +4 -14
- autogen/oai/gemini.py +4 -6
- autogen/oai/gemini_types.py +1 -0
- autogen/oai/openai_utils.py +44 -115
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +142 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/tools/dependency_injection.py +4 -8
- autogen/tools/experimental/reliable/reliable.py +3 -2
- autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
- autogen/tools/function_utils.py +2 -1
- autogen/version.py +1 -1
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -13,27 +13,27 @@ from .context_variables import ContextVariables
|
|
|
13
13
|
@dataclass
|
|
14
14
|
@export_module("autogen")
|
|
15
15
|
class ContextExpression:
|
|
16
|
-
"""A class to evaluate logical expressions using context variables
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
16
|
+
"""A class to evaluate logical expressions using context variables.\n
|
|
17
|
+
\n
|
|
18
|
+
Args:\n
|
|
19
|
+
expression (str): A string containing a logical expression with context variable references.\n
|
|
20
|
+
- Variable references use ${var_name} syntax: ${logged_in}, ${attempts}\n
|
|
21
|
+
- String literals can use normal quotes: 'hello', "world"\n
|
|
22
|
+
- Supported operators:\n
|
|
23
|
+
- Logical: not/!, and/&, or/|\n
|
|
24
|
+
- Comparison: >, <, >=, <=, ==, !=\n
|
|
25
|
+
- Supported functions:\n
|
|
26
|
+
- len(${var_name}): Gets the length of a list, string, or other collection\n
|
|
27
|
+
- Parentheses can be used for grouping\n
|
|
28
|
+
- Examples:\n
|
|
29
|
+
- "not ${logged_in} and ${is_admin} or ${guest_checkout}"\n
|
|
30
|
+
- "!${logged_in} & ${is_admin} | ${guest_checkout}"\n
|
|
31
|
+
- "len(${orders}) > 0 & ${user_active}"\n
|
|
32
|
+
- "len(${cart_items}) == 0 | ${checkout_started}"\n
|
|
33
|
+
\n
|
|
34
|
+
Raises:\n
|
|
35
|
+
SyntaxError: If the expression cannot be parsed\n
|
|
36
|
+
ValueError: If the expression contains disallowed operations\n
|
|
37
37
|
"""
|
|
38
38
|
|
|
39
39
|
expression: str
|
|
@@ -7,6 +7,7 @@ from collections.abc import Callable
|
|
|
7
7
|
from copy import deepcopy
|
|
8
8
|
from typing import Annotated, Any
|
|
9
9
|
|
|
10
|
+
from ...code_utils import content_str
|
|
10
11
|
from ...oai import OpenAIWrapper
|
|
11
12
|
from ...tools import Depends, Tool
|
|
12
13
|
from ...tools.dependency_injection import inject_params, on
|
|
@@ -33,6 +34,9 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
33
34
|
# Store the next target from a tool call
|
|
34
35
|
self._group_next_target: TransitionTarget | None = None
|
|
35
36
|
|
|
37
|
+
# Track the original agent that initiated the tool call (for safeguards)
|
|
38
|
+
self._tool_call_originator: str | None = None
|
|
39
|
+
|
|
36
40
|
# Primary tool reply function for handling the tool reply and the ReplyResult and TransitionTarget returns
|
|
37
41
|
self.register_reply([Agent, None], self._generate_group_tool_reply, remove_other_reply_funcs=True)
|
|
38
42
|
|
|
@@ -57,6 +61,18 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
57
61
|
"""Clears the next target to transition to."""
|
|
58
62
|
self._group_next_target = None
|
|
59
63
|
|
|
64
|
+
def set_tool_call_originator(self, agent_name: str) -> None:
|
|
65
|
+
"""Sets the original agent that initiated the tool call (for safeguard transparency)."""
|
|
66
|
+
self._tool_call_originator = agent_name
|
|
67
|
+
|
|
68
|
+
def get_tool_call_originator(self) -> str | None:
|
|
69
|
+
"""Gets the original agent that initiated the tool call."""
|
|
70
|
+
return self._tool_call_originator
|
|
71
|
+
|
|
72
|
+
def clear_tool_call_originator(self) -> None:
|
|
73
|
+
"""Clears the tool call originator."""
|
|
74
|
+
self._tool_call_originator = None
|
|
75
|
+
|
|
60
76
|
def _modify_context_variables_param(
|
|
61
77
|
self, f: Callable[..., Any], context_variables: ContextVariables
|
|
62
78
|
) -> Callable[..., Any]:
|
|
@@ -71,6 +87,9 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
71
87
|
"""
|
|
72
88
|
sig = inspect.signature(f)
|
|
73
89
|
|
|
90
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
91
|
+
return f(*args, **kwargs)
|
|
92
|
+
|
|
74
93
|
# Check if context_variables parameter exists and update it if so
|
|
75
94
|
if __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters:
|
|
76
95
|
new_params = []
|
|
@@ -84,13 +103,13 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
84
103
|
|
|
85
104
|
# Update signature
|
|
86
105
|
new_sig = sig.replace(parameters=new_params)
|
|
87
|
-
|
|
106
|
+
wrapper.__signature__ = new_sig # type: ignore[attr-defined]
|
|
88
107
|
|
|
89
|
-
return
|
|
108
|
+
return wrapper
|
|
90
109
|
|
|
91
|
-
def
|
|
92
|
-
self,
|
|
93
|
-
) -> None:
|
|
110
|
+
def make_tool_copy_with_context_variables(
|
|
111
|
+
self, current_tool: Tool, context_variables: ContextVariables
|
|
112
|
+
) -> Tool | None:
|
|
94
113
|
"""Checks for the context_variables parameter in the tool and updates it to use dependency injection."""
|
|
95
114
|
# If the tool has a context_variables parameter, remove the tool and reregister it without the parameter
|
|
96
115
|
if __CONTEXT_VARIABLES_PARAM_NAME__ in current_tool.tool_schema["function"]["parameters"]["properties"]:
|
|
@@ -100,16 +119,19 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
100
119
|
# Remove the Tool from the agent
|
|
101
120
|
name = current_tool._name
|
|
102
121
|
description = current_tool._description
|
|
103
|
-
agent.remove_tool_for_llm(current_tool)
|
|
104
122
|
|
|
105
123
|
# Recreate the tool without the context_variables parameter
|
|
106
|
-
tool_func = self._modify_context_variables_param(
|
|
124
|
+
tool_func = self._modify_context_variables_param(tool_func, context_variables)
|
|
107
125
|
tool_func = inject_params(tool_func)
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
)
|
|
126
|
+
return ConversableAgent._create_tool_if_needed(func_or_tool=tool_func, name=name, description=description)
|
|
127
|
+
return None
|
|
111
128
|
|
|
112
|
-
|
|
129
|
+
def _change_tool_context_variables_to_depends(
|
|
130
|
+
self, agent: ConversableAgent, current_tool: Tool, context_variables: ContextVariables
|
|
131
|
+
) -> None:
|
|
132
|
+
"""Checks for the context_variables parameter in the tool and updates it to use dependency injection."""
|
|
133
|
+
if new_tool := self.make_tool_copy_with_context_variables(current_tool, context_variables):
|
|
134
|
+
agent.remove_tool_for_llm(current_tool)
|
|
113
135
|
agent.register_for_llm()(new_tool)
|
|
114
136
|
|
|
115
137
|
def register_agents_functions(self, agents: list[ConversableAgent], context_variables: ContextVariables) -> None:
|
|
@@ -140,15 +162,22 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
140
162
|
2. Generates the tool calls reply.
|
|
141
163
|
3. Updates context_variables and next_agent based on the tool call response.
|
|
142
164
|
"""
|
|
165
|
+
|
|
143
166
|
if config is None:
|
|
144
167
|
config = agent # type: ignore[assignment]
|
|
145
168
|
if messages is None:
|
|
146
169
|
messages = agent._oai_messages[sender]
|
|
147
170
|
|
|
148
171
|
message = messages[-1]
|
|
149
|
-
|
|
172
|
+
# Track the original agent that initiated this tool call (for safeguard transparency)
|
|
173
|
+
# Use sender.name as fallback when message doesn't have a name field (e.g., for tool_calls messages)
|
|
174
|
+
agent_name = message.get("name", sender.name if sender else "unknown")
|
|
175
|
+
self.set_tool_call_originator(agent_name)
|
|
176
|
+
|
|
177
|
+
if message.get("tool_calls"):
|
|
150
178
|
tool_call_count = len(message["tool_calls"])
|
|
151
179
|
|
|
180
|
+
tool_message = None
|
|
152
181
|
# Loop through tool calls individually (so context can be updated after each function call)
|
|
153
182
|
next_target: TransitionTarget | None = None
|
|
154
183
|
tool_responses_inner = []
|
|
@@ -182,11 +211,13 @@ class GroupToolExecutor(ConversableAgent):
|
|
|
182
211
|
next_target = content
|
|
183
212
|
|
|
184
213
|
# Serialize the content to a string
|
|
185
|
-
|
|
186
|
-
|
|
214
|
+
normalized_content = (
|
|
215
|
+
content_str(content) if isinstance(content, (str, list)) or content is None else str(content)
|
|
216
|
+
)
|
|
217
|
+
tool_response["content"] = normalized_content
|
|
187
218
|
|
|
188
219
|
tool_responses_inner.append(tool_response)
|
|
189
|
-
contents.append(
|
|
220
|
+
contents.append(normalized_content)
|
|
190
221
|
|
|
191
222
|
self._group_next_target = next_target # type: ignore[attr-defined]
|
|
192
223
|
|
|
@@ -7,7 +7,7 @@ import re
|
|
|
7
7
|
from abc import ABC, abstractmethod
|
|
8
8
|
from typing import TYPE_CHECKING, Any
|
|
9
9
|
|
|
10
|
-
from pydantic import BaseModel, Field
|
|
10
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
11
11
|
|
|
12
12
|
from ...oai.client import OpenAIWrapper
|
|
13
13
|
|
|
@@ -16,32 +16,6 @@ if TYPE_CHECKING:
|
|
|
16
16
|
from .targets.transition_target import TransitionTarget
|
|
17
17
|
|
|
18
18
|
|
|
19
|
-
class GuardrailResult(BaseModel):
|
|
20
|
-
"""Represents the outcome of a guardrail check."""
|
|
21
|
-
|
|
22
|
-
activated: bool
|
|
23
|
-
justification: str = Field(default="No justification provided")
|
|
24
|
-
|
|
25
|
-
def __str__(self) -> str:
|
|
26
|
-
return f"Guardrail Result: {self.activated}\nJustification: {self.justification}"
|
|
27
|
-
|
|
28
|
-
@staticmethod
|
|
29
|
-
def parse(text: str) -> "GuardrailResult":
|
|
30
|
-
"""Parses a JSON string into a GuardrailResult object.
|
|
31
|
-
|
|
32
|
-
Args:
|
|
33
|
-
text (str): The JSON string to parse.
|
|
34
|
-
|
|
35
|
-
Returns:
|
|
36
|
-
GuardrailResult: The parsed GuardrailResult object.
|
|
37
|
-
"""
|
|
38
|
-
try:
|
|
39
|
-
data = json.loads(text)
|
|
40
|
-
return GuardrailResult(**data)
|
|
41
|
-
except (json.JSONDecodeError, ValueError) as e:
|
|
42
|
-
raise ValueError(f"Failed to parse GuardrailResult from text: {text}") from e
|
|
43
|
-
|
|
44
|
-
|
|
45
19
|
class Guardrail(ABC):
|
|
46
20
|
"""Abstract base class for guardrails."""
|
|
47
21
|
|
|
@@ -59,7 +33,7 @@ class Guardrail(ABC):
|
|
|
59
33
|
def check(
|
|
60
34
|
self,
|
|
61
35
|
context: str | list[dict[str, Any]],
|
|
62
|
-
) -> GuardrailResult:
|
|
36
|
+
) -> "GuardrailResult":
|
|
63
37
|
"""Checks the text against the guardrail and returns a GuardrailResult.
|
|
64
38
|
|
|
65
39
|
Args:
|
|
@@ -99,7 +73,7 @@ You will activate the guardrail only if the condition is met.
|
|
|
99
73
|
def check(
|
|
100
74
|
self,
|
|
101
75
|
context: str | list[dict[str, Any]],
|
|
102
|
-
) -> GuardrailResult:
|
|
76
|
+
) -> "GuardrailResult":
|
|
103
77
|
"""Checks the context against the guardrail using an LLM.
|
|
104
78
|
|
|
105
79
|
Args:
|
|
@@ -120,7 +94,7 @@ You will activate the guardrail only if the condition is met.
|
|
|
120
94
|
raise ValueError("Context must be a string or a list of messages.")
|
|
121
95
|
# Call the LLM with the check messages
|
|
122
96
|
response = self.client.create(messages=check_messages)
|
|
123
|
-
return GuardrailResult.parse(response.choices[0].message.content) # type: ignore
|
|
97
|
+
return GuardrailResult.parse(response.choices[0].message.content, guardrail=self) # type: ignore
|
|
124
98
|
|
|
125
99
|
|
|
126
100
|
class RegexGuardrail(Guardrail):
|
|
@@ -143,7 +117,7 @@ class RegexGuardrail(Guardrail):
|
|
|
143
117
|
def check(
|
|
144
118
|
self,
|
|
145
119
|
context: str | list[dict[str, Any]],
|
|
146
|
-
) -> GuardrailResult:
|
|
120
|
+
) -> "GuardrailResult":
|
|
147
121
|
"""Checks the context against the guardrail using a regular expression.
|
|
148
122
|
|
|
149
123
|
Args:
|
|
@@ -167,5 +141,39 @@ class RegexGuardrail(Guardrail):
|
|
|
167
141
|
if match:
|
|
168
142
|
activated = True
|
|
169
143
|
justification = f"Match found -> {match.group(0)}"
|
|
170
|
-
return GuardrailResult(activated=activated, justification=justification)
|
|
171
|
-
return GuardrailResult(activated=False, justification="No match found in the context.")
|
|
144
|
+
return GuardrailResult(activated=activated, justification=justification, guardrail=self)
|
|
145
|
+
return GuardrailResult(activated=False, justification="No match found in the context.", guardrail=self)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class GuardrailResult(BaseModel):
|
|
149
|
+
"""Represents the outcome of a guardrail check."""
|
|
150
|
+
|
|
151
|
+
activated: bool
|
|
152
|
+
guardrail: Guardrail
|
|
153
|
+
justification: str = Field(default="No justification provided")
|
|
154
|
+
|
|
155
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
156
|
+
|
|
157
|
+
def __str__(self) -> str:
|
|
158
|
+
return f"Guardrail Result: {self.activated}\nJustification: {self.justification}"
|
|
159
|
+
|
|
160
|
+
@property
|
|
161
|
+
def reply(self) -> str:
|
|
162
|
+
return f"{self.guardrail.activation_message}\nJustification: {self.justification}"
|
|
163
|
+
|
|
164
|
+
@staticmethod
|
|
165
|
+
def parse(text: str, guardrail: "Guardrail") -> "GuardrailResult":
|
|
166
|
+
"""Parses a JSON string into a GuardrailResult object.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
text (str): The JSON string to parse.
|
|
170
|
+
guardrail (Guardrail): The guardrail that the result is for.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
GuardrailResult: The parsed GuardrailResult object.
|
|
174
|
+
"""
|
|
175
|
+
try:
|
|
176
|
+
data = json.loads(text)
|
|
177
|
+
return GuardrailResult(**data, guardrail=guardrail)
|
|
178
|
+
except (json.JSONDecodeError, ValueError) as e:
|
|
179
|
+
raise ValueError(f"Failed to parse GuardrailResult from text: {text}") from e
|
|
@@ -14,17 +14,17 @@ __all__ = ["Handoffs"]
|
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class Handoffs(BaseModel):
|
|
17
|
-
"""Container for all handoff transition conditions of a ConversableAgent
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
17
|
+
"""Container for all handoff transition conditions of a ConversableAgent.\n
|
|
18
|
+
\n
|
|
19
|
+
Three types of conditions can be added, each with a different order and time of use:\n
|
|
20
|
+
1. OnContextConditions (evaluated without an LLM)\n
|
|
21
|
+
2. OnConditions (evaluated with an LLM)\n
|
|
22
|
+
3. After work TransitionTarget (if no other transition is triggered)\n
|
|
23
|
+
\n
|
|
24
|
+
Supports method chaining:\n
|
|
25
|
+
agent.handoffs.add_context_conditions([condition1])\n
|
|
26
|
+
.add_llm_condition(condition2)\n
|
|
27
|
+
.set_after_work(after_work)\n
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
context_conditions: list[OnContextCondition] = Field(default_factory=list)
|
|
@@ -11,6 +11,7 @@ from ...events.agent_events import ErrorEvent, RunCompletionEvent
|
|
|
11
11
|
from ...io.base import IOStream
|
|
12
12
|
from ...io.run_response import AsyncRunResponse, AsyncRunResponseProtocol, RunResponse, RunResponseProtocol
|
|
13
13
|
from ...io.thread_io_stream import AsyncThreadIOStream, ThreadIOStream
|
|
14
|
+
from ...llm_config import LLMConfig
|
|
14
15
|
from ..chat import ChatResult
|
|
15
16
|
from .context_variables import ContextVariables
|
|
16
17
|
from .group_utils import cleanup_temp_user_messages
|
|
@@ -32,6 +33,9 @@ def initiate_group_chat(
|
|
|
32
33
|
pattern: "Pattern",
|
|
33
34
|
messages: list[dict[str, Any]] | str,
|
|
34
35
|
max_rounds: int = 20,
|
|
36
|
+
safeguard_policy: dict[str, Any] | str | None = None,
|
|
37
|
+
safeguard_llm_config: LLMConfig | None = None,
|
|
38
|
+
mask_llm_config: LLMConfig | None = None,
|
|
35
39
|
) -> tuple[ChatResult, ContextVariables, "Agent"]:
|
|
36
40
|
"""Initialize and run a group chat using a pattern for configuration.
|
|
37
41
|
|
|
@@ -39,6 +43,9 @@ def initiate_group_chat(
|
|
|
39
43
|
pattern: Pattern object that encapsulates the chat configuration.
|
|
40
44
|
messages: Initial message(s).
|
|
41
45
|
max_rounds: Maximum number of conversation rounds.
|
|
46
|
+
safeguard_policy: Optional safeguard policy dict or path to JSON file.
|
|
47
|
+
safeguard_llm_config: Optional LLM configuration for safeguard checks.
|
|
48
|
+
mask_llm_config: Optional LLM configuration for masking.
|
|
42
49
|
|
|
43
50
|
Returns:
|
|
44
51
|
ChatResult: Conversations chat history.
|
|
@@ -66,6 +73,17 @@ def initiate_group_chat(
|
|
|
66
73
|
messages=messages,
|
|
67
74
|
)
|
|
68
75
|
|
|
76
|
+
# Apply safeguards if provided
|
|
77
|
+
if safeguard_policy:
|
|
78
|
+
from .safeguards import apply_safeguard_policy
|
|
79
|
+
|
|
80
|
+
apply_safeguard_policy(
|
|
81
|
+
groupchat_manager=manager,
|
|
82
|
+
policy=safeguard_policy,
|
|
83
|
+
safeguard_llm_config=safeguard_llm_config,
|
|
84
|
+
mask_llm_config=mask_llm_config,
|
|
85
|
+
)
|
|
86
|
+
|
|
69
87
|
# Start or resume the conversation
|
|
70
88
|
if len(processed_messages) > 1:
|
|
71
89
|
last_agent, last_message = manager.resume(messages=processed_messages)
|
|
@@ -94,6 +112,9 @@ async def a_initiate_group_chat(
|
|
|
94
112
|
pattern: "Pattern",
|
|
95
113
|
messages: list[dict[str, Any]] | str,
|
|
96
114
|
max_rounds: int = 20,
|
|
115
|
+
safeguard_policy: dict[str, Any] | str | None = None,
|
|
116
|
+
safeguard_llm_config: LLMConfig | None = None,
|
|
117
|
+
mask_llm_config: LLMConfig | None = None,
|
|
97
118
|
) -> tuple[ChatResult, ContextVariables, "Agent"]:
|
|
98
119
|
"""Initialize and run a group chat using a pattern for configuration, asynchronously.
|
|
99
120
|
|
|
@@ -101,6 +122,9 @@ async def a_initiate_group_chat(
|
|
|
101
122
|
pattern: Pattern object that encapsulates the chat configuration.
|
|
102
123
|
messages: Initial message(s).
|
|
103
124
|
max_rounds: Maximum number of conversation rounds.
|
|
125
|
+
safeguard_policy: Optional safeguard policy dict or path to JSON file.
|
|
126
|
+
safeguard_llm_config: Optional LLM configuration for safeguard checks.
|
|
127
|
+
mask_llm_config: Optional LLM configuration for masking.
|
|
104
128
|
|
|
105
129
|
Returns:
|
|
106
130
|
ChatResult: Conversations chat history.
|
|
@@ -128,6 +152,17 @@ async def a_initiate_group_chat(
|
|
|
128
152
|
messages=messages,
|
|
129
153
|
)
|
|
130
154
|
|
|
155
|
+
# Apply safeguards if provided
|
|
156
|
+
if safeguard_policy:
|
|
157
|
+
from .safeguards import apply_safeguard_policy
|
|
158
|
+
|
|
159
|
+
apply_safeguard_policy(
|
|
160
|
+
groupchat_manager=manager,
|
|
161
|
+
policy=safeguard_policy,
|
|
162
|
+
safeguard_llm_config=safeguard_llm_config,
|
|
163
|
+
mask_llm_config=mask_llm_config,
|
|
164
|
+
)
|
|
165
|
+
|
|
131
166
|
# Start or resume the conversation
|
|
132
167
|
if len(processed_messages) > 1:
|
|
133
168
|
last_agent, last_message = await manager.a_resume(messages=processed_messages)
|
|
@@ -156,6 +191,9 @@ def run_group_chat(
|
|
|
156
191
|
pattern: "Pattern",
|
|
157
192
|
messages: list[dict[str, Any]] | str,
|
|
158
193
|
max_rounds: int = 20,
|
|
194
|
+
safeguard_policy: dict[str, Any] | str | None = None,
|
|
195
|
+
safeguard_llm_config: LLMConfig | None = None,
|
|
196
|
+
mask_llm_config: LLMConfig | None = None,
|
|
159
197
|
) -> RunResponseProtocol:
|
|
160
198
|
iostream = ThreadIOStream()
|
|
161
199
|
# todo: add agents
|
|
@@ -165,6 +203,9 @@ def run_group_chat(
|
|
|
165
203
|
pattern: "Pattern" = pattern,
|
|
166
204
|
messages: list[dict[str, Any]] | str = messages,
|
|
167
205
|
max_rounds: int = max_rounds,
|
|
206
|
+
safeguard_policy: dict[str, Any] | str | None = safeguard_policy,
|
|
207
|
+
safeguard_llm_config: LLMConfig | None = safeguard_llm_config,
|
|
208
|
+
mask_llm_config: LLMConfig | None = mask_llm_config,
|
|
168
209
|
iostream: ThreadIOStream = iostream,
|
|
169
210
|
response: RunResponse = response,
|
|
170
211
|
) -> None:
|
|
@@ -174,6 +215,9 @@ def run_group_chat(
|
|
|
174
215
|
pattern=pattern,
|
|
175
216
|
messages=messages,
|
|
176
217
|
max_rounds=max_rounds,
|
|
218
|
+
safeguard_policy=safeguard_policy,
|
|
219
|
+
safeguard_llm_config=safeguard_llm_config,
|
|
220
|
+
mask_llm_config=mask_llm_config,
|
|
177
221
|
)
|
|
178
222
|
|
|
179
223
|
IOStream.get_default().send(
|
|
@@ -200,6 +244,9 @@ async def a_run_group_chat(
|
|
|
200
244
|
pattern: "Pattern",
|
|
201
245
|
messages: list[dict[str, Any]] | str,
|
|
202
246
|
max_rounds: int = 20,
|
|
247
|
+
safeguard_policy: dict[str, Any] | str | None = None,
|
|
248
|
+
safeguard_llm_config: LLMConfig | None = None,
|
|
249
|
+
mask_llm_config: LLMConfig | None = None,
|
|
203
250
|
) -> AsyncRunResponseProtocol:
|
|
204
251
|
iostream = AsyncThreadIOStream()
|
|
205
252
|
# todo: add agents
|
|
@@ -209,6 +256,9 @@ async def a_run_group_chat(
|
|
|
209
256
|
pattern: "Pattern" = pattern,
|
|
210
257
|
messages: list[dict[str, Any]] | str = messages,
|
|
211
258
|
max_rounds: int = max_rounds,
|
|
259
|
+
safeguard_policy: dict[str, Any] | str | None = safeguard_policy,
|
|
260
|
+
safeguard_llm_config: LLMConfig | None = safeguard_llm_config,
|
|
261
|
+
mask_llm_config: LLMConfig | None = mask_llm_config,
|
|
212
262
|
iostream: AsyncThreadIOStream = iostream,
|
|
213
263
|
response: AsyncRunResponse = response,
|
|
214
264
|
) -> None:
|
|
@@ -218,6 +268,9 @@ async def a_run_group_chat(
|
|
|
218
268
|
pattern=pattern,
|
|
219
269
|
messages=messages,
|
|
220
270
|
max_rounds=max_rounds,
|
|
271
|
+
safeguard_policy=safeguard_policy,
|
|
272
|
+
safeguard_llm_config=safeguard_llm_config,
|
|
273
|
+
mask_llm_config=mask_llm_config,
|
|
221
274
|
)
|
|
222
275
|
|
|
223
276
|
IOStream.get_default().send(
|
|
@@ -232,6 +285,7 @@ async def a_run_group_chat(
|
|
|
232
285
|
except Exception as e:
|
|
233
286
|
response.iostream.send(ErrorEvent(error=e)) # type: ignore[call-arg]
|
|
234
287
|
|
|
235
|
-
asyncio.create_task(_initiate_group_chat())
|
|
236
|
-
|
|
288
|
+
task = asyncio.create_task(_initiate_group_chat())
|
|
289
|
+
# prevent the task from being garbage collected
|
|
290
|
+
response._task_ref = task # type: ignore[attr-defined]
|
|
237
291
|
return response
|
|
@@ -17,17 +17,17 @@ __all__ = [
|
|
|
17
17
|
|
|
18
18
|
@export_module("autogen")
|
|
19
19
|
class OnCondition(BaseModel): # noqa: N801
|
|
20
|
-
"""Defines a condition for transitioning to another agent or nested chats
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
20
|
+
"""Defines a condition for transitioning to another agent or nested chats.\n
|
|
21
|
+
\n
|
|
22
|
+
This is for LLM-based condition evaluation where these conditions are translated into tools and attached to the agent.\n
|
|
23
|
+
\n
|
|
24
|
+
These are evaluated after the OnCondition conditions but before the after work condition.\n
|
|
25
|
+
\n
|
|
26
|
+
Args:\n
|
|
27
|
+
target (TransitionTarget): The transition (essentially an agent) to hand off to.\n
|
|
28
|
+
condition (LLMCondition): The condition for transitioning to the target agent, evaluated by the LLM.\n
|
|
29
|
+
available (AvailableCondition): Optional condition to determine if this OnCondition is included for the LLM to evaluate based on context variables using classes like StringAvailableCondition and ContextExpressionAvailableCondition.\n
|
|
30
|
+
llm_function_name (Optional[str]): The name of the LLM function to use for this condition.\n
|
|
31
31
|
"""
|
|
32
32
|
|
|
33
33
|
target: TransitionTarget
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
"""Safeguards module for agent safety and compliance.
|
|
6
|
+
|
|
7
|
+
This module provides functionality for applying, managing, and enforcing
|
|
8
|
+
safeguards on agent interactions including inter-agent communication,
|
|
9
|
+
tool interactions, LLM interactions, and user interactions.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from .api import apply_safeguard_policy, reset_safeguard_policy
|
|
13
|
+
from .enforcer import SafeguardEnforcer
|
|
14
|
+
from .events import SafeguardEvent
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"SafeguardEnforcer",
|
|
18
|
+
"SafeguardEvent",
|
|
19
|
+
"apply_safeguard_policy",
|
|
20
|
+
"reset_safeguard_policy",
|
|
21
|
+
]
|