ag2 0.9.8.post1__py3-none-any.whl → 0.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (88) hide show
  1. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/METADATA +232 -210
  2. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/RECORD +88 -80
  3. autogen/_website/generate_mkdocs.py +3 -3
  4. autogen/_website/notebook_processor.py +1 -1
  5. autogen/_website/utils.py +1 -1
  6. autogen/agentchat/assistant_agent.py +15 -15
  7. autogen/agentchat/chat.py +52 -40
  8. autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
  9. autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
  10. autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
  11. autogen/agentchat/contrib/capabilities/transforms.py +1 -1
  12. autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
  13. autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
  14. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
  15. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
  16. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
  17. autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
  18. autogen/agentchat/contrib/rag/query_engine.py +11 -11
  19. autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
  20. autogen/agentchat/contrib/swarm_agent.py +3 -2
  21. autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
  22. autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
  23. autogen/agentchat/contrib/web_surfer.py +1 -1
  24. autogen/agentchat/conversable_agent.py +184 -80
  25. autogen/agentchat/group/context_expression.py +21 -21
  26. autogen/agentchat/group/handoffs.py +11 -11
  27. autogen/agentchat/group/multi_agent_chat.py +3 -2
  28. autogen/agentchat/group/on_condition.py +11 -11
  29. autogen/agentchat/group/safeguards/__init__.py +21 -0
  30. autogen/agentchat/group/safeguards/api.py +224 -0
  31. autogen/agentchat/group/safeguards/enforcer.py +1064 -0
  32. autogen/agentchat/group/safeguards/events.py +119 -0
  33. autogen/agentchat/group/safeguards/validator.py +435 -0
  34. autogen/agentchat/groupchat.py +60 -19
  35. autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
  36. autogen/agentchat/realtime/experimental/function_observer.py +2 -3
  37. autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
  38. autogen/agentchat/realtime/experimental/realtime_swarm.py +21 -10
  39. autogen/agentchat/user_proxy_agent.py +55 -53
  40. autogen/agents/experimental/document_agent/document_agent.py +1 -10
  41. autogen/agents/experimental/document_agent/parser_utils.py +5 -1
  42. autogen/browser_utils.py +4 -4
  43. autogen/cache/abstract_cache_base.py +2 -6
  44. autogen/cache/disk_cache.py +1 -6
  45. autogen/cache/in_memory_cache.py +2 -6
  46. autogen/cache/redis_cache.py +1 -5
  47. autogen/coding/__init__.py +10 -2
  48. autogen/coding/base.py +2 -1
  49. autogen/coding/docker_commandline_code_executor.py +1 -6
  50. autogen/coding/factory.py +9 -0
  51. autogen/coding/jupyter/docker_jupyter_server.py +1 -7
  52. autogen/coding/jupyter/jupyter_client.py +2 -9
  53. autogen/coding/jupyter/jupyter_code_executor.py +2 -7
  54. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  55. autogen/coding/local_commandline_code_executor.py +0 -65
  56. autogen/coding/yepcode_code_executor.py +197 -0
  57. autogen/environments/docker_python_environment.py +3 -3
  58. autogen/environments/system_python_environment.py +5 -5
  59. autogen/environments/venv_python_environment.py +5 -5
  60. autogen/events/agent_events.py +1 -1
  61. autogen/events/client_events.py +1 -1
  62. autogen/fast_depends/utils.py +10 -0
  63. autogen/graph_utils.py +5 -7
  64. autogen/import_utils.py +28 -15
  65. autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
  66. autogen/io/processors/console_event_processor.py +8 -3
  67. autogen/llm_config/config.py +168 -91
  68. autogen/llm_config/entry.py +38 -26
  69. autogen/llm_config/types.py +35 -0
  70. autogen/llm_config/utils.py +223 -0
  71. autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
  72. autogen/messages/agent_messages.py +1 -1
  73. autogen/messages/client_messages.py +1 -1
  74. autogen/oai/__init__.py +8 -1
  75. autogen/oai/client.py +10 -3
  76. autogen/oai/client_utils.py +1 -1
  77. autogen/oai/cohere.py +4 -4
  78. autogen/oai/gemini.py +4 -6
  79. autogen/oai/gemini_types.py +1 -0
  80. autogen/oai/openai_utils.py +44 -115
  81. autogen/tools/dependency_injection.py +4 -8
  82. autogen/tools/experimental/reliable/reliable.py +3 -2
  83. autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
  84. autogen/tools/function_utils.py +2 -1
  85. autogen/version.py +1 -1
  86. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/WHEEL +0 -0
  87. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/licenses/LICENSE +0 -0
  88. {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/licenses/NOTICE.md +0 -0
@@ -132,11 +132,11 @@ class ConversableAgent(LLMAgent):
132
132
  For example, AssistantAgent and UserProxyAgent are subclasses of this class,
133
133
  configured with different default settings.
134
134
 
135
- To modify auto reply, override `generate_reply` method.
136
- To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS".
137
- To modify the way to get human input, override `get_human_input` method.
138
- To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
139
- `run_code`, and `execute_function` methods respectively.
135
+ To modify auto reply, override `generate_reply` method. \n
136
+ To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS". \n
137
+ To modify the way to get human input, override `get_human_input` method. \n
138
+ To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`, \n
139
+ `run_code`, and `execute_function` methods respectively. \n
140
140
  """
141
141
 
142
142
  DEFAULT_CONFIG = False # False or dict, the default config for llm inference
@@ -144,7 +144,7 @@ class ConversableAgent(LLMAgent):
144
144
 
145
145
  DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
146
146
  DEFAULT_SUMMARY_METHOD = "last_msg"
147
- llm_config: dict[str, Any] | Literal[False]
147
+ llm_config: LLMConfig | Literal[False]
148
148
 
149
149
  def __init__(
150
150
  self,
@@ -168,60 +168,60 @@ class ConversableAgent(LLMAgent):
168
168
  | None = None,
169
169
  handoffs: Handoffs | None = None,
170
170
  ):
171
- """Args:
172
- name (str): name of the agent.
173
- system_message (str or list): system message for the ChatCompletion inference.
174
- is_termination_msg (function): a function that takes a message in the form of a dictionary
171
+ """Args:\n
172
+ 1) name (str): name of the agent.\n
173
+ 2) system_message (str or list): system message for the ChatCompletion inference.\n
174
+ 3) is_termination_msg (function): a function that takes a message in the form of a dictionary
175
175
  and returns a boolean value indicating if this received message is a termination message.
176
- The dict can contain the following keys: "content", "role", "name", "function_call".
177
- max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
176
+ The dict can contain the following keys: "content", "role", "name", "function_call".\n
177
+ 4) max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
178
178
  default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
179
- When set to 0, no auto reply will be generated.
180
- human_input_mode (str): whether to ask for human inputs every time a message is received.
181
- Possible values are "ALWAYS", "TERMINATE", "NEVER".
179
+ When set to 0, no auto reply will be generated.\n
180
+ 5) human_input_mode (str): whether to ask for human inputs every time a message is received.\n
181
+ Possible values are "ALWAYS", "TERMINATE", "NEVER".\n
182
182
  (1) When "ALWAYS", the agent prompts for human input every time a message is received.
183
183
  Under this mode, the conversation stops when the human input is "exit",
184
- or when is_termination_msg is True and there is no human input.
184
+ or when is_termination_msg is True and there is no human input.\n
185
185
  (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
186
- the number of auto reply reaches the max_consecutive_auto_reply.
186
+ the number of auto reply reaches the max_consecutive_auto_reply.\n
187
187
  (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
188
- when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
189
- function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls.
190
- code_execution_config (dict or False): config for the code execution.
191
- To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:
192
- - work_dir (Optional, str): The working directory for the code execution.
193
- If None, a default working directory will be used.
188
+ when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True. \n
189
+ 6) function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions, also used for tool calls. \n
190
+ 7) code_execution_config (dict or False): config for the code execution.\n
191
+ To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:\n
192
+ - work_dir (Optional, str): The working directory for the code execution.\n
193
+ If None, a default working directory will be used.\n
194
194
  The default working directory is the "extensions" directory under
195
- "path_to_autogen".
196
- - use_docker (Optional, list, str or bool): The docker image to use for code execution.
197
- Default is True, which means the code will be executed in a docker container. A default list of images will be used.
198
- If a list or a str of image name(s) is provided, the code will be executed in a docker container
199
- with the first image successfully pulled.
200
- If False, the code will be executed in the current environment.
201
- We strongly recommend using docker for code execution.
202
- - timeout (Optional, int): The maximum execution time in seconds.
195
+ "path_to_autogen".\n
196
+ - use_docker (Optional, list, str or bool): The docker image to use for code execution.\n
197
+ Default is True, which means the code will be executed in a docker container. A default list of images will be used.\n
198
+ If a list or a str of image name(s) is provided, the code will be executed in a docker container\n
199
+ with the first image successfully pulled.\n
200
+ If False, the code will be executed in the current environment.\n
201
+ We strongly recommend using docker for code execution.\n
202
+ - timeout (Optional, int): The maximum execution time in seconds.\n
203
203
  - last_n_messages (Experimental, int or str): The number of messages to look back for code execution.
204
- If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto)
205
- llm_config (LLMConfig or dict or False or None): llm inference configuration.
206
- Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)
207
- for available options.
208
- When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
209
- To disable llm-based auto reply, set to False.
210
- When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
211
- default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.
212
- description (str): a short description of the agent. This description is used by other agents
213
- (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
214
- chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
204
+ If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto)\n
205
+ 8) llm_config (LLMConfig or dict or False or None): llm inference configuration.\n
206
+ Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)\n
207
+ for available options.\n
208
+ When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.\n
209
+ To disable llm-based auto reply, set to False.\n
210
+ When set to None, will use self.DEFAULT_CONFIG, which defaults to False.\n
211
+ 9) default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.\n
212
+ 10) description (str): a short description of the agent. This description is used by other agents
213
+ (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)\n
214
+ 11) chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
215
215
  Can be used to give the agent a memory by providing the chat history. This will allow the agent to
216
- resume previous had conversations. Defaults to an empty chat history.
217
- silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
218
- silent in each function.
219
- context_variables (ContextVariables or None): Context variables that provide a persistent context for the agent.
220
- Note: This will be a reference to a shared context for multi-agent chats.
221
- Behaves like a dictionary with keys and values (akin to dict[str, Any]).
222
- functions (List[Callable[..., Any]]): A list of functions to register with the agent, these will be wrapped up as tools and registered for LLM (not execution).
223
- update_agent_state_before_reply (List[Callable[..., Any]]): A list of functions, including UpdateSystemMessage's, called to update the agent before it replies.
224
- handoffs (Handoffs): Handoffs object containing all handoff transition conditions.
216
+ resume previous had conversations. Defaults to an empty chat history.\n
217
+ 12) silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
218
+ silent in each function.\n
219
+ 13) context_variables (ContextVariables or None): Context variables that provide a persistent context for the agent.
220
+ Note: This will be a reference to a shared context for multi-agent chats.\n
221
+ Behaves like a dictionary with keys and values (akin to dict[str, Any]).\n
222
+ 14) functions (List[Callable[..., Any]]): A list of functions to register with the agent, these will be wrapped up as tools and registered for LLM (not execution).\n
223
+ 15) update_agent_state_before_reply (List[Callable[..., Any]]): A list of functions, including UpdateSystemMessage's, called to update the agent before it replies.\n
224
+ 16) handoffs (Handoffs): Handoffs object containing all handoff transition conditions.\n
225
225
  """
226
226
  self.handoffs = handoffs if handoffs is not None else Handoffs()
227
227
  self.input_guardrails: list[Guardrail] = []
@@ -370,6 +370,12 @@ class ConversableAgent(LLMAgent):
370
370
  "process_all_messages_before_reply": [],
371
371
  "process_message_before_send": [],
372
372
  "update_agent_state": [],
373
+ # Safeguard hooks for monitoring agent interactions
374
+ "safeguard_tool_inputs": [], # Hook for processing tool inputs before execution
375
+ "safeguard_tool_outputs": [], # Hook for processing tool outputs after execution
376
+ "safeguard_llm_inputs": [], # Hook for processing LLM inputs before sending
377
+ "safeguard_llm_outputs": [], # Hook for processing LLM outputs after receiving
378
+ "safeguard_human_inputs": [], # Hook for processing human inputs
373
379
  }
374
380
 
375
381
  # Associate agent update state hooks
@@ -379,9 +385,7 @@ class ConversableAgent(LLMAgent):
379
385
  if not self.llm_config:
380
386
  return
381
387
 
382
- if any([
383
- entry for entry in self.llm_config.config_list if entry.api_type == "openai" and re.search(r"\s", name)
384
- ]):
388
+ if any(entry for entry in self.llm_config.config_list if entry.api_type == "openai" and re.search(r"\s", name)):
385
389
  raise ValueError(f"The name of the agent cannot contain any whitespace. The name provided is: '{name}'")
386
390
 
387
391
  def _get_display_name(self):
@@ -485,25 +489,15 @@ class ConversableAgent(LLMAgent):
485
489
  def _validate_llm_config(
486
490
  cls, llm_config: LLMConfig | dict[str, Any] | Literal[False] | None
487
491
  ) -> LLMConfig | Literal[False]:
488
- # if not(llm_config in (None, False) or isinstance(llm_config, [dict, LLMConfig])):
489
- # raise ValueError(
490
- # "llm_config must be a dict or False or None."
491
- # )
492
-
493
492
  if llm_config is None:
494
493
  llm_config = LLMConfig.get_current_llm_config()
495
494
  if llm_config is None:
496
- llm_config = cls.DEFAULT_CONFIG
497
- elif isinstance(llm_config, dict):
498
- llm_config = LLMConfig(**llm_config)
499
- elif isinstance(llm_config, LLMConfig):
500
- llm_config = llm_config.copy()
495
+ return cls.DEFAULT_CONFIG
496
+
501
497
  elif llm_config is False:
502
- pass
503
- else:
504
- raise ValueError("llm_config must be a LLMConfig, dict or False or None.")
498
+ return False
505
499
 
506
- return llm_config
500
+ return LLMConfig.ensure_config(llm_config)
507
501
 
508
502
  @classmethod
509
503
  def _create_client(cls, llm_config: LLMConfig | Literal[False]) -> OpenAIWrapper | None:
@@ -2188,9 +2182,20 @@ class ConversableAgent(LLMAgent):
2188
2182
  return False, None
2189
2183
  if messages is None:
2190
2184
  messages = self._oai_messages[sender]
2191
- extracted_response = self._generate_oai_reply_from_client(
2192
- client, self._oai_system_message + messages, self.client_cache
2193
- )
2185
+
2186
+ # Process messages before sending to LLM, hook point for llm input monitoring
2187
+ processed_messages = self._process_llm_input(self._oai_system_message + messages)
2188
+ if processed_messages is None:
2189
+ return True, {"content": "LLM call blocked by safeguard", "role": "assistant"}
2190
+
2191
+ extracted_response = self._generate_oai_reply_from_client(client, processed_messages, self.client_cache)
2192
+
2193
+ # Process LLM response
2194
+ if extracted_response is not None:
2195
+ processed_extracted_response = self._process_llm_output(extracted_response)
2196
+ if processed_extracted_response is None:
2197
+ raise ValueError("safeguard_llm_outputs hook returned None")
2198
+
2194
2199
  return (False, None) if extracted_response is None else (True, extracted_response)
2195
2200
 
2196
2201
  def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> str | dict[str, Any] | None:
@@ -2449,14 +2454,26 @@ class ConversableAgent(LLMAgent):
2449
2454
  tool_returns = []
2450
2455
  for tool_call in message.get("tool_calls", []):
2451
2456
  function_call = tool_call.get("function", {})
2457
+
2458
+ # Hook: Process tool input before execution
2459
+ processed_call = self._process_tool_input(function_call)
2460
+ if processed_call is None:
2461
+ raise ValueError("safeguard_tool_inputs hook returned None")
2462
+
2452
2463
  tool_call_id = tool_call.get("id", None)
2453
- func = self._function_map.get(function_call.get("name", None), None)
2454
- if inspect.iscoroutinefunction(func):
2455
- coro = self.a_execute_function(function_call, call_id=tool_call_id)
2464
+ func = self._function_map.get(processed_call.get("name", None), None)
2465
+ if is_coroutine_callable(func):
2466
+ coro = self.a_execute_function(processed_call, call_id=tool_call_id)
2456
2467
  _, func_return = self._run_async_in_thread(coro)
2457
2468
  else:
2458
- _, func_return = self.execute_function(function_call, call_id=tool_call_id)
2459
- content = func_return.get("content", "")
2469
+ _, func_return = self.execute_function(processed_call, call_id=tool_call_id)
2470
+
2471
+ # Hook: Process tool output before returning
2472
+ processed_return = self._process_tool_output(func_return)
2473
+ if processed_return is None:
2474
+ raise ValueError("safeguard_tool_outputs hook returned None")
2475
+
2476
+ content = processed_return.get("content", "")
2460
2477
  if content is None:
2461
2478
  content = ""
2462
2479
 
@@ -2992,8 +3009,14 @@ class ConversableAgent(LLMAgent):
2992
3009
  iostream = IOStream.get_default()
2993
3010
 
2994
3011
  reply = iostream.input(prompt)
2995
- self._human_input.append(reply)
2996
- return reply
3012
+
3013
+ # Process the human input through hooks
3014
+ processed_reply = self._process_human_input(reply)
3015
+ if processed_reply is None:
3016
+ raise ValueError("safeguard_human_inputs hook returned None")
3017
+
3018
+ self._human_input.append(processed_reply)
3019
+ return processed_reply
2997
3020
 
2998
3021
  async def a_get_human_input(self, prompt: str) -> str:
2999
3022
  """(Async) Get human input.
@@ -3473,7 +3496,7 @@ class ConversableAgent(LLMAgent):
3473
3496
  def can_execute_function(self, name: list[str] | str) -> bool:
3474
3497
  """Whether the agent can execute the function."""
3475
3498
  names = name if isinstance(name, list) else [name]
3476
- return all([n in self._function_map for n in names])
3499
+ return all(n in self._function_map for n in names)
3477
3500
 
3478
3501
  @property
3479
3502
  def function_map(self) -> dict[str, Callable[..., Any]]:
@@ -3727,7 +3750,7 @@ class ConversableAgent(LLMAgent):
3727
3750
  """
3728
3751
  tool = self._create_tool_if_needed(func_or_tool, name, description)
3729
3752
  chat_context = ChatContext(self)
3730
- chat_context_params = {param: chat_context for param in tool._chat_context_param_names}
3753
+ chat_context_params = dict.fromkeys(tool._chat_context_param_names, chat_context)
3731
3754
 
3732
3755
  self.register_function(
3733
3756
  {tool.name: self._wrap_function(tool.func, chat_context_params, serialize=serialize)},
@@ -3825,6 +3848,87 @@ class ConversableAgent(LLMAgent):
3825
3848
  messages[-1]["content"] = processed_user_content
3826
3849
  return messages
3827
3850
 
3851
+ def _process_tool_input(self, tool_input: dict[str, Any]) -> dict[str, Any] | None:
3852
+ """Process tool input through registered hooks."""
3853
+ hook_list = self.hook_lists["safeguard_tool_inputs"]
3854
+
3855
+ # If no hooks are registered, allow the tool input
3856
+ if len(hook_list) == 0:
3857
+ return tool_input
3858
+
3859
+ # Process through each hook
3860
+ processed_input = tool_input
3861
+ for hook in hook_list:
3862
+ processed_input = hook(processed_input)
3863
+ if processed_input is None:
3864
+ return None
3865
+
3866
+ return processed_input
3867
+
3868
+ def _process_tool_output(self, response: dict[str, Any]) -> dict[str, Any]:
3869
+ """Process tool output through registered hooks"""
3870
+ hook_list = self.hook_lists["safeguard_tool_outputs"]
3871
+
3872
+ # If no hooks are registered, return original response
3873
+ if len(hook_list) == 0:
3874
+ return response
3875
+
3876
+ # Process through each hook
3877
+ processed_response = response
3878
+ for hook in hook_list:
3879
+ processed_response = hook(processed_response)
3880
+
3881
+ return processed_response
3882
+
3883
+ def _process_llm_input(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]] | None:
3884
+ """Process messages before sending to LLM through registered hooks."""
3885
+ hook_list = self.hook_lists["safeguard_llm_inputs"]
3886
+
3887
+ # If no hooks registered, allow the messages through
3888
+ if len(hook_list) == 0:
3889
+ return messages
3890
+
3891
+ # Process through each hook
3892
+ processed_messages = messages
3893
+ for hook in hook_list:
3894
+ processed_messages = hook(processed_messages)
3895
+ if processed_messages is None:
3896
+ return None
3897
+
3898
+ return processed_messages
3899
+
3900
+ def _process_llm_output(self, response: str | dict[str, Any]) -> str | dict[str, Any]:
3901
+ """Process LLM response through registered hooks"""
3902
+ hook_list = self.hook_lists["safeguard_llm_outputs"]
3903
+
3904
+ # If no hooks registered, return original response
3905
+ if len(hook_list) == 0:
3906
+ return response
3907
+
3908
+ # Process through each hook
3909
+ processed_response = response
3910
+ for hook in hook_list:
3911
+ processed_response = hook(processed_response)
3912
+
3913
+ return processed_response
3914
+
3915
+ def _process_human_input(self, human_input: str) -> str | None:
3916
+ """Process human input through registered hooks."""
3917
+ hook_list = self.hook_lists["safeguard_human_inputs"]
3918
+
3919
+ # If no hooks registered, allow the input through
3920
+ if len(hook_list) == 0:
3921
+ return human_input
3922
+
3923
+ # Process through each hook
3924
+ processed_input = human_input
3925
+ for hook in hook_list:
3926
+ processed_input = hook(processed_input)
3927
+ if processed_input is None:
3928
+ return None
3929
+
3930
+ return processed_input
3931
+
3828
3932
  def print_usage_summary(self, mode: str | list[str] = ["actual", "total"]) -> None:
3829
3933
  """Print the usage summary."""
3830
3934
  iostream = IOStream.get_default()
@@ -13,27 +13,27 @@ from .context_variables import ContextVariables
13
13
  @dataclass
14
14
  @export_module("autogen")
15
15
  class ContextExpression:
16
- """A class to evaluate logical expressions using context variables.
17
-
18
- Args:
19
- expression (str): A string containing a logical expression with context variable references.
20
- - Variable references use ${var_name} syntax: ${logged_in}, ${attempts}
21
- - String literals can use normal quotes: 'hello', "world"
22
- - Supported operators:
23
- - Logical: not/!, and/&, or/|
24
- - Comparison: >, <, >=, <=, ==, !=
25
- - Supported functions:
26
- - len(${var_name}): Gets the length of a list, string, or other collection
27
- - Parentheses can be used for grouping
28
- - Examples:
29
- - "not ${logged_in} and ${is_admin} or ${guest_checkout}"
30
- - "!${logged_in} & ${is_admin} | ${guest_checkout}"
31
- - "len(${orders}) > 0 & ${user_active}"
32
- - "len(${cart_items}) == 0 | ${checkout_started}"
33
-
34
- Raises:
35
- SyntaxError: If the expression cannot be parsed
36
- ValueError: If the expression contains disallowed operations
16
+ """A class to evaluate logical expressions using context variables.\n
17
+ \n
18
+ Args:\n
19
+ expression (str): A string containing a logical expression with context variable references.\n
20
+ - Variable references use ${var_name} syntax: ${logged_in}, ${attempts}\n
21
+ - String literals can use normal quotes: 'hello', "world"\n
22
+ - Supported operators:\n
23
+ - Logical: not/!, and/&, or/|\n
24
+ - Comparison: >, <, >=, <=, ==, !=\n
25
+ - Supported functions:\n
26
+ - len(${var_name}): Gets the length of a list, string, or other collection\n
27
+ - Parentheses can be used for grouping\n
28
+ - Examples:\n
29
+ - "not ${logged_in} and ${is_admin} or ${guest_checkout}"\n
30
+ - "!${logged_in} & ${is_admin} | ${guest_checkout}"\n
31
+ - "len(${orders}) > 0 & ${user_active}"\n
32
+ - "len(${cart_items}) == 0 | ${checkout_started}"\n
33
+ \n
34
+ Raises:\n
35
+ SyntaxError: If the expression cannot be parsed\n
36
+ ValueError: If the expression contains disallowed operations\n
37
37
  """
38
38
 
39
39
  expression: str
@@ -14,17 +14,17 @@ __all__ = ["Handoffs"]
14
14
 
15
15
 
16
16
  class Handoffs(BaseModel):
17
- """Container for all handoff transition conditions of a ConversableAgent.
18
-
19
- Three types of conditions can be added, each with a different order and time of use:
20
- 1. OnContextConditions (evaluated without an LLM)
21
- 2. OnConditions (evaluated with an LLM)
22
- 3. After work TransitionTarget (if no other transition is triggered)
23
-
24
- Supports method chaining:
25
- agent.handoffs.add_context_conditions([condition1]) \
26
- .add_llm_condition(condition2) \
27
- .set_after_work(after_work)
17
+ """Container for all handoff transition conditions of a ConversableAgent.\n
18
+ \n
19
+ Three types of conditions can be added, each with a different order and time of use:\n
20
+ 1. OnContextConditions (evaluated without an LLM)\n
21
+ 2. OnConditions (evaluated with an LLM)\n
22
+ 3. After work TransitionTarget (if no other transition is triggered)\n
23
+ \n
24
+ Supports method chaining:\n
25
+ agent.handoffs.add_context_conditions([condition1])\n
26
+ .add_llm_condition(condition2)\n
27
+ .set_after_work(after_work)\n
28
28
  """
29
29
 
30
30
  context_conditions: list[OnContextCondition] = Field(default_factory=list)
@@ -232,6 +232,7 @@ async def a_run_group_chat(
232
232
  except Exception as e:
233
233
  response.iostream.send(ErrorEvent(error=e)) # type: ignore[call-arg]
234
234
 
235
- asyncio.create_task(_initiate_group_chat())
236
-
235
+ task = asyncio.create_task(_initiate_group_chat())
236
+ # prevent the task from being garbage collected
237
+ response._task_ref = task # type: ignore[attr-defined]
237
238
  return response
@@ -17,17 +17,17 @@ __all__ = [
17
17
 
18
18
  @export_module("autogen")
19
19
  class OnCondition(BaseModel): # noqa: N801
20
- """Defines a condition for transitioning to another agent or nested chats.
21
-
22
- This is for LLM-based condition evaluation where these conditions are translated into tools and attached to the agent.
23
-
24
- These are evaluated after the OnCondition conditions but before the after work condition.
25
-
26
- Args:
27
- target (TransitionTarget): The transition (essentially an agent) to hand off to.
28
- condition (LLMCondition): The condition for transitioning to the target agent, evaluated by the LLM.
29
- available (AvailableCondition): Optional condition to determine if this OnCondition is included for the LLM to evaluate based on context variables using classes like StringAvailableCondition and ContextExpressionAvailableCondition.
30
- llm_function_name (Optional[str]): The name of the LLM function to use for this condition.
20
+ """Defines a condition for transitioning to another agent or nested chats.\n
21
+ \n
22
+ This is for LLM-based condition evaluation where these conditions are translated into tools and attached to the agent.\n
23
+ \n
24
+ These are evaluated after the OnCondition conditions but before the after work condition.\n
25
+ \n
26
+ Args:\n
27
+ target (TransitionTarget): The transition (essentially an agent) to hand off to.\n
28
+ condition (LLMCondition): The condition for transitioning to the target agent, evaluated by the LLM.\n
29
+ available (AvailableCondition): Optional condition to determine if this OnCondition is included for the LLM to evaluate based on context variables using classes like StringAvailableCondition and ContextExpressionAvailableCondition.\n
30
+ llm_function_name (Optional[str]): The name of the LLM function to use for this condition.\n
31
31
  """
32
32
 
33
33
  target: TransitionTarget
@@ -0,0 +1,21 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ """Safeguards module for agent safety and compliance.
6
+
7
+ This module provides functionality for applying, managing, and enforcing
8
+ safeguards on agent interactions including inter-agent communication,
9
+ tool interactions, LLM interactions, and user interactions.
10
+ """
11
+
12
+ from .api import apply_safeguard_policy, reset_safeguard_policy
13
+ from .enforcer import SafeguardEnforcer
14
+ from .events import SafeguardEvent
15
+
16
+ __all__ = [
17
+ "SafeguardEnforcer",
18
+ "SafeguardEvent",
19
+ "apply_safeguard_policy",
20
+ "reset_safeguard_policy",
21
+ ]