ag2 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (35) hide show
  1. {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/METADATA +14 -10
  2. {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/RECORD +35 -29
  3. autogen/agentchat/contrib/agent_optimizer.py +6 -3
  4. autogen/agentchat/contrib/capabilities/transforms.py +22 -9
  5. autogen/agentchat/conversable_agent.py +51 -5
  6. autogen/agentchat/group/group_utils.py +81 -27
  7. autogen/agentchat/group/guardrails.py +171 -0
  8. autogen/agentchat/group/handoffs.py +81 -5
  9. autogen/agentchat/group/on_context_condition.py +2 -2
  10. autogen/agentchat/group/patterns/pattern.py +7 -1
  11. autogen/agentchat/group/targets/transition_target.py +10 -0
  12. autogen/agentchat/groupchat.py +95 -8
  13. autogen/agentchat/realtime/experimental/realtime_swarm.py +12 -4
  14. autogen/agents/experimental/document_agent/document_agent.py +232 -40
  15. autogen/agents/experimental/websurfer/websurfer.py +9 -1
  16. autogen/events/agent_events.py +6 -0
  17. autogen/events/helpers.py +8 -0
  18. autogen/mcp/helpers.py +45 -0
  19. autogen/mcp/mcp_proxy/mcp_proxy.py +2 -3
  20. autogen/messages/agent_messages.py +1 -1
  21. autogen/oai/gemini.py +41 -17
  22. autogen/oai/gemini_types.py +2 -1
  23. autogen/oai/oai_models/chat_completion.py +1 -1
  24. autogen/tools/experimental/__init__.py +4 -0
  25. autogen/tools/experimental/browser_use/browser_use.py +4 -11
  26. autogen/tools/experimental/firecrawl/__init__.py +7 -0
  27. autogen/tools/experimental/firecrawl/firecrawl_tool.py +853 -0
  28. autogen/tools/experimental/searxng/__init__.py +7 -0
  29. autogen/tools/experimental/searxng/searxng_search.py +141 -0
  30. autogen/version.py +1 -1
  31. templates/client_template/main.jinja2 +5 -2
  32. templates/main.jinja2 +1 -1
  33. {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/WHEEL +0 -0
  34. {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/licenses/LICENSE +0 -0
  35. {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/licenses/NOTICE.md +0 -0
@@ -82,6 +82,46 @@ def link_agents_to_group_manager(agents: list[Agent], group_chat_manager: Agent)
82
82
  agent._group_manager = group_chat_manager # type: ignore[attr-defined]
83
83
 
84
84
 
85
+ def _evaluate_after_works_conditions(
86
+ agent: "ConversableAgent",
87
+ groupchat: GroupChat,
88
+ user_agent: Optional["ConversableAgent"],
89
+ ) -> Optional[Union[Agent, str]]:
90
+ """Evaluate after_works context conditions for an agent.
91
+
92
+ Args:
93
+ agent: The agent to evaluate after_works conditions for
94
+ groupchat: The current group chat
95
+ user_agent: Optional user proxy agent
96
+
97
+ Returns:
98
+ The resolved speaker selection result if a condition matches, None otherwise
99
+ """
100
+ if not hasattr(agent, "handoffs") or not agent.handoffs.after_works: # type: ignore[attr-defined]
101
+ return None
102
+
103
+ for after_work_condition in agent.handoffs.after_works: # type: ignore[attr-defined]
104
+ # Check if condition is available
105
+ is_available = (
106
+ after_work_condition.available.is_available(agent, groupchat.messages)
107
+ if after_work_condition.available
108
+ else True
109
+ )
110
+
111
+ # Evaluate the condition (None condition means always true)
112
+ if is_available and (
113
+ after_work_condition.condition is None or after_work_condition.condition.evaluate(agent.context_variables)
114
+ ):
115
+ # Condition matched, resolve and return
116
+ return after_work_condition.target.resolve(
117
+ groupchat,
118
+ agent,
119
+ user_agent,
120
+ ).get_speaker_selection_result(groupchat)
121
+
122
+ return None
123
+
124
+
85
125
  def _run_oncontextconditions(
86
126
  agent: "ConversableAgent",
87
127
  messages: Optional[list[dict[str, Any]]] = None,
@@ -94,14 +134,10 @@ def _run_oncontextconditions(
94
134
  on_condition.available.is_available(agent, messages if messages else []) if on_condition.available else True
95
135
  )
96
136
 
97
- if is_available and on_condition.condition.evaluate(agent.context_variables):
98
- # Condition has been met, we'll set the Tool Executor's next target
99
- # attribute and that will be picked up on the next iteration when
100
- # _determine_next_agent is called
101
- for agent in agent._group_manager.groupchat.agents: # type: ignore[attr-defined]
102
- if isinstance(agent, GroupToolExecutor):
103
- agent.set_next_target(on_condition.target)
104
- break
137
+ if is_available and (
138
+ on_condition.condition is None or on_condition.condition.evaluate(agent.context_variables)
139
+ ):
140
+ on_condition.target.activate_target(agent._group_manager.groupchat) # type: ignore[attr-defined]
105
141
 
106
142
  transfer_name = on_condition.target.display_name()
107
143
 
@@ -161,12 +197,25 @@ def ensure_handoff_agents_in_group(agents: list["ConversableAgent"]) -> None:
161
197
  and context_conditions.target.agent_name not in agent_names
162
198
  ):
163
199
  raise ValueError("Agent in OnContextCondition Hand-offs must be in the agents list")
164
- if (
165
- agent.handoffs.after_work is not None
166
- and isinstance(agent.handoffs.after_work, (AgentTarget, AgentNameTarget))
167
- and agent.handoffs.after_work.agent_name not in agent_names
168
- ):
169
- raise ValueError("Agent in after work target Hand-offs must be in the agents list")
200
+ # Check after_works targets
201
+ for after_work_condition in agent.handoffs.after_works:
202
+ if (
203
+ isinstance(after_work_condition.target, (AgentTarget, AgentNameTarget))
204
+ and after_work_condition.target.agent_name not in agent_names
205
+ ):
206
+ raise ValueError("Agent in after work target Hand-offs must be in the agents list")
207
+
208
+
209
+ def ensure_guardrail_agents_in_group(agents: list["ConversableAgent"]) -> None:
210
+ """Ensure the agents in handoffs are in the group chat."""
211
+ agent_names = [agent.name for agent in agents]
212
+ for agent in agents:
213
+ for guardrail in agent.input_guardrails + agent.output_guardrails:
214
+ if (
215
+ isinstance(guardrail.target, (AgentTarget, AgentNameTarget))
216
+ and guardrail.target.agent_name not in agent_names
217
+ ):
218
+ raise ValueError("Agent in guardrail's target must be in the agents list")
170
219
 
171
220
 
172
221
  def prepare_exclude_transit_messages(agents: list["ConversableAgent"]) -> None:
@@ -211,6 +260,9 @@ def prepare_group_agents(
211
260
  # Ensure all agents in hand-off after-works are in the passed in agents list
212
261
  ensure_handoff_agents_in_group(agents)
213
262
 
263
+ # Ensure all agents in guardrails are in the passed in agents list
264
+ ensure_guardrail_agents_in_group(agents)
265
+
214
266
  # Create Tool Executor for the group
215
267
  tool_execution = GroupToolExecutor()
216
268
 
@@ -320,17 +372,19 @@ def setup_context_variables(
320
372
  tool_execution: "ConversableAgent",
321
373
  agents: list["ConversableAgent"],
322
374
  manager: GroupChatManager,
375
+ user_agent: Optional["ConversableAgent"],
323
376
  context_variables: ContextVariables,
324
377
  ) -> None:
325
- """Assign a common context_variables reference to all agents in the group, including the tool executor and group chat manager.
378
+ """Assign a common context_variables reference to all agents in the group, including the tool executor, group chat manager, and user proxy agent.
326
379
 
327
380
  Args:
328
381
  tool_execution: The tool execution agent.
329
382
  agents: List of all agents in the conversation.
330
383
  manager: GroupChatManager instance.
384
+ user_agent: Optional user proxy agent.
331
385
  context_variables: Context variables to assign to all agents.
332
386
  """
333
- for agent in agents + [tool_execution] + [manager]:
387
+ for agent in agents + [tool_execution] + [manager] + ([user_agent] if user_agent else []):
334
388
  agent.context_variables = context_variables
335
389
 
336
390
 
@@ -426,22 +480,25 @@ def determine_next_agent(
426
480
 
427
481
  # If the user last spoke, return to the agent prior to them (if they don't have an after work, otherwise it's treated like any other agent)
428
482
  if user_agent and last_speaker == user_agent:
429
- if user_agent.handoffs.after_work is None:
483
+ if not user_agent.handoffs.after_works:
430
484
  return last_agent_speaker
431
485
  else:
432
486
  last_agent_speaker = user_agent
433
487
 
434
488
  # AFTER WORK:
435
489
 
436
- # Get the appropriate After Work condition (from the agent if they have one, otherwise the group level one)
437
- after_work_condition = (
438
- last_agent_speaker.handoffs.after_work # type: ignore[attr-defined]
439
- if last_agent_speaker.handoffs.after_work is not None # type: ignore[attr-defined]
440
- else group_after_work
490
+ # First, try to evaluate after_works context conditions
491
+ after_works_result = _evaluate_after_works_conditions(
492
+ last_agent_speaker, # type: ignore[arg-type]
493
+ groupchat,
494
+ user_agent,
441
495
  )
496
+ if after_works_result is not None:
497
+ return after_works_result
442
498
 
499
+ # If no after_works conditions matched, use the group-level after_work
443
500
  # Resolve the next agent, termination, or speaker selection method
444
- resolved_speaker_selection_result = after_work_condition.resolve(
501
+ resolved_speaker_selection_result = group_after_work.resolve(
445
502
  groupchat,
446
503
  last_agent_speaker, # type: ignore[arg-type]
447
504
  user_agent,
@@ -525,10 +582,7 @@ def create_group_manager(
525
582
  if (
526
583
  len(agent.handoffs.get_context_conditions_by_target_type(GroupManagerTarget)) > 0
527
584
  or len(agent.handoffs.get_llm_conditions_by_target_type(GroupManagerTarget)) > 0
528
- or (
529
- agent.handoffs.after_work is not None
530
- and isinstance(agent.handoffs.after_work, GroupManagerTarget)
531
- )
585
+ or any(isinstance(aw.target, GroupManagerTarget) for aw in agent.handoffs.after_works)
532
586
  ):
533
587
  has_group_manager_target = True
534
588
  break
@@ -0,0 +1,171 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import json
6
+ import re
7
+ from abc import ABC, abstractmethod
8
+ from typing import TYPE_CHECKING, Any, Optional, Union
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+ from ...oai.client import OpenAIWrapper
13
+
14
+ if TYPE_CHECKING:
15
+ from ...llm_config import LLMConfig
16
+ from .targets.transition_target import TransitionTarget
17
+
18
+
19
+ class GuardrailResult(BaseModel):
20
+ """Represents the outcome of a guardrail check."""
21
+
22
+ activated: bool
23
+ justification: str = Field(default="No justification provided")
24
+
25
+ def __str__(self) -> str:
26
+ return f"Guardrail Result: {self.activated}\nJustification: {self.justification}"
27
+
28
+ @staticmethod
29
+ def parse(text: str) -> "GuardrailResult":
30
+ """Parses a JSON string into a GuardrailResult object.
31
+
32
+ Args:
33
+ text (str): The JSON string to parse.
34
+
35
+ Returns:
36
+ GuardrailResult: The parsed GuardrailResult object.
37
+ """
38
+ try:
39
+ data = json.loads(text)
40
+ return GuardrailResult(**data)
41
+ except (json.JSONDecodeError, ValueError) as e:
42
+ raise ValueError(f"Failed to parse GuardrailResult from text: {text}") from e
43
+
44
+
45
+ class Guardrail(ABC):
46
+ """Abstract base class for guardrails."""
47
+
48
+ def __init__(
49
+ self, name: str, condition: str, target: "TransitionTarget", activation_message: Optional[str] = None
50
+ ) -> None:
51
+ self.name = name
52
+ self.condition = condition
53
+ self.target = target
54
+ self.activation_message = (
55
+ activation_message if activation_message else f"Guardrail '{name}' has been activated."
56
+ )
57
+
58
+ @abstractmethod
59
+ def check(
60
+ self,
61
+ context: Union[str, list[dict[str, Any]]],
62
+ ) -> GuardrailResult:
63
+ """Checks the text against the guardrail and returns a GuardrailResult.
64
+
65
+ Args:
66
+ context (Union[str, list[dict[str, Any]]]): The context to check against the guardrail.
67
+
68
+ Returns:
69
+ GuardrailResult: The result of the guardrail check.
70
+ """
71
+ pass
72
+
73
+
74
+ class LLMGuardrail(Guardrail):
75
+ """Guardrail that uses an LLM to check the context."""
76
+
77
+ def __init__(
78
+ self,
79
+ name: str,
80
+ condition: str,
81
+ target: "TransitionTarget",
82
+ llm_config: "LLMConfig",
83
+ activation_message: Optional[str] = None,
84
+ ) -> None:
85
+ super().__init__(name, condition, target, activation_message)
86
+
87
+ if not llm_config:
88
+ raise ValueError("LLMConfig is required.")
89
+
90
+ self.llm_config = llm_config.deepcopy()
91
+ setattr(self.llm_config, "response_format", GuardrailResult)
92
+ self.client = OpenAIWrapper(**self.llm_config.model_dump())
93
+
94
+ self.check_prompt = f"""You are a guardrail that checks if a condition is met in the conversation you are given.
95
+ You will activate the guardrail only if the condition is met.
96
+
97
+ **Condition: {self.condition}**"""
98
+
99
+ def check(
100
+ self,
101
+ context: Union[str, list[dict[str, Any]]],
102
+ ) -> GuardrailResult:
103
+ """Checks the context against the guardrail using an LLM.
104
+
105
+ Args:
106
+ context (Union[str, list[dict[str, Any]]]): The context to check against the guardrail.
107
+
108
+ Returns:
109
+ GuardrailResult: The result of the guardrail check.
110
+ """
111
+ # Set the check prompt as the system message
112
+ check_messages = [{"role": "system", "content": self.check_prompt}]
113
+ # If context is a string, wrap it in a user message and append it
114
+ if isinstance(context, str):
115
+ check_messages.append({"role": "user", "content": context})
116
+ # If context is a list of messages, append them
117
+ elif isinstance(context, list):
118
+ check_messages.extend(context)
119
+ else:
120
+ raise ValueError("Context must be a string or a list of messages.")
121
+ # Call the LLM with the check messages
122
+ response = self.client.create(messages=check_messages)
123
+ return GuardrailResult.parse(response.choices[0].message.content) # type: ignore
124
+
125
+
126
+ class RegexGuardrail(Guardrail):
127
+ """Guardrail that checks the context against a regular expression."""
128
+
129
+ def __init__(
130
+ self,
131
+ name: str,
132
+ condition: str,
133
+ target: "TransitionTarget",
134
+ activation_message: Optional[str] = None,
135
+ ) -> None:
136
+ super().__init__(name, condition, target, activation_message)
137
+ # Compile the regular expression condition
138
+ try:
139
+ self.regex = re.compile(condition)
140
+ except re.error as e:
141
+ raise ValueError(f"Invalid regex pattern '{condition}': {str(e)}")
142
+
143
+ def check(
144
+ self,
145
+ context: Union[str, list[dict[str, Any]]],
146
+ ) -> GuardrailResult:
147
+ """Checks the context against the guardrail using a regular expression.
148
+
149
+ Args:
150
+ context (Union[str, list[dict[str, Any]]]): The context to check against the guardrail.
151
+
152
+ Returns:
153
+ GuardrailResult: The result of the guardrail check.
154
+ """
155
+ # Create a list of the messages to check
156
+ if isinstance(context, str):
157
+ messages = [context]
158
+ elif isinstance(context, list):
159
+ messages = [message.get("content", "") for message in context]
160
+ else:
161
+ raise ValueError("Context must be a string or a list of messages.")
162
+
163
+ # Check each message against the regex
164
+ for message in messages:
165
+ match = self.regex.search(message)
166
+ # If a match is found, activate the guardrail and return the result
167
+ if match:
168
+ activated = True
169
+ justification = f"Match found -> {match.group(0)}"
170
+ return GuardrailResult(activated=activated, justification=justification)
171
+ return GuardrailResult(activated=False, justification="No match found in the context.")
@@ -2,7 +2,7 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from typing import Optional, Union, overload
5
+ from typing import Union, overload
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
@@ -30,7 +30,7 @@ class Handoffs(BaseModel):
30
30
 
31
31
  context_conditions: list[OnContextCondition] = Field(default_factory=list)
32
32
  llm_conditions: list[OnCondition] = Field(default_factory=list)
33
- after_work: Optional[TransitionTarget] = None
33
+ after_works: list[OnContextCondition] = Field(default_factory=list)
34
34
 
35
35
  def add_context_condition(self, condition: OnContextCondition) -> "Handoffs":
36
36
  """
@@ -102,7 +102,9 @@ class Handoffs(BaseModel):
102
102
 
103
103
  def set_after_work(self, target: TransitionTarget) -> "Handoffs":
104
104
  """
105
- Set the after work target (only one allowed).
105
+ Set the after work target (replaces all after_works with single entry).
106
+
107
+ For backward compatibility, this creates an OnContextCondition with no condition (always true).
106
108
 
107
109
  Args:
108
110
  target: The after work TransitionTarget to set
@@ -113,7 +115,81 @@ class Handoffs(BaseModel):
113
115
  if not isinstance(target, TransitionTarget):
114
116
  raise TypeError(f"Expected a TransitionTarget instance, got {type(target).__name__}")
115
117
 
116
- self.after_work = target
118
+ # Create OnContextCondition with no condition (always true)
119
+ after_work_condition = OnContextCondition(target=target, condition=None)
120
+ self.after_works = [after_work_condition]
121
+ return self
122
+
123
+ def add_after_work(self, condition: OnContextCondition) -> "Handoffs":
124
+ """
125
+ Add a single after-work condition.
126
+
127
+ If the condition has condition=None, it will replace any existing
128
+ condition=None entry and be placed at the end.
129
+
130
+ Args:
131
+ condition: The OnContextCondition to add
132
+
133
+ Returns:
134
+ Self for method chaining
135
+ """
136
+ if not isinstance(condition, OnContextCondition):
137
+ raise TypeError(f"Expected an OnContextCondition instance, got {type(condition).__name__}")
138
+
139
+ if condition.condition is None:
140
+ # Remove any existing condition=None entries
141
+ self.after_works = [c for c in self.after_works if c.condition is not None]
142
+ # Add the new one at the end
143
+ self.after_works.append(condition)
144
+ else:
145
+ # For regular conditions, check if we need to move condition=None to the end
146
+ none_conditions = [c for c in self.after_works if c.condition is None]
147
+ if none_conditions:
148
+ # Remove the None condition temporarily
149
+ self.after_works = [c for c in self.after_works if c.condition is not None]
150
+ # Add the new regular condition
151
+ self.after_works.append(condition)
152
+ # Re-add the None condition at the end
153
+ self.after_works.append(none_conditions[0])
154
+ else:
155
+ # No None condition exists, just append
156
+ self.after_works.append(condition)
157
+
158
+ return self
159
+
160
+ def add_after_works(self, conditions: list[OnContextCondition]) -> "Handoffs":
161
+ """
162
+ Add multiple after-work conditions.
163
+
164
+ Special handling for condition=None entries:
165
+ - Only one condition=None entry is allowed (the fallback)
166
+ - It will always be placed at the end of the list
167
+ - If multiple condition=None entries are provided, only the last one is kept
168
+
169
+ Args:
170
+ conditions: List of OnContextConditions to add
171
+
172
+ Returns:
173
+ Self for method chaining
174
+ """
175
+ # Validate that it is a list of OnContextConditions
176
+ if not all(isinstance(condition, OnContextCondition) for condition in conditions):
177
+ raise TypeError("All conditions must be of type OnContextCondition")
178
+
179
+ # Separate conditions with None and without None
180
+ none_conditions = [c for c in conditions if c.condition is None]
181
+ regular_conditions = [c for c in conditions if c.condition is not None]
182
+
183
+ # Remove any existing condition=None entries
184
+ self.after_works = [c for c in self.after_works if c.condition is not None]
185
+
186
+ # Add regular conditions
187
+ self.after_works.extend(regular_conditions)
188
+
189
+ # Add at most one None condition at the end
190
+ if none_conditions:
191
+ self.after_works.append(none_conditions[-1]) # Use the last one if multiple provided
192
+
117
193
  return self
118
194
 
119
195
  @overload
@@ -186,7 +262,7 @@ class Handoffs(BaseModel):
186
262
  """
187
263
  self.context_conditions.clear()
188
264
  self.llm_conditions.clear()
189
- self.after_work = None
265
+ self.after_works.clear()
190
266
  return self
191
267
 
192
268
  def get_llm_conditions_by_target_type(self, target_type: type) -> list[OnCondition]:
@@ -24,12 +24,12 @@ class OnContextCondition(BaseModel): # noqa: N801
24
24
 
25
25
  Args:
26
26
  target (TransitionTarget): The transition (essentially an agent) to hand off to.
27
- condition (ContextCondition): The context variable based condition for transitioning to the target agent.
27
+ condition (Optional[ContextCondition]): The context variable based condition for transitioning to the target agent. If None, the condition always evaluates to True.
28
28
  available (AvailableCondition): Optional condition to determine if this OnCondition is included for the LLM to evaluate based on context variables using classes like StringAvailableCondition and ContextExpressionAvailableCondition.
29
29
  """
30
30
 
31
31
  target: TransitionTarget
32
- condition: ContextCondition
32
+ condition: Optional[ContextCondition] = None
33
33
  available: Optional[AvailableCondition] = None
34
34
 
35
35
  def has_target_type(self, target_type: type) -> bool:
@@ -152,7 +152,13 @@ class Pattern(ABC):
152
152
  manager = create_group_manager(groupchat, self.group_manager_args, self.agents, self.group_after_work)
153
153
 
154
154
  # Point all agent's context variables to this function's context_variables
155
- setup_context_variables(tool_executor, self.agents, manager, self.context_variables)
155
+ setup_context_variables(
156
+ tool_execution=tool_executor,
157
+ agents=self.agents,
158
+ manager=manager,
159
+ user_agent=self.user_agent,
160
+ context_variables=self.context_variables,
161
+ )
156
162
 
157
163
  # Link all agents with the GroupChatManager to allow access to the group chat
158
164
  link_agents_to_group_manager(groupchat.agents, manager)
@@ -68,6 +68,16 @@ class TransitionTarget(BaseModel):
68
68
  """Create a wrapper agent for the target if needed."""
69
69
  raise NotImplementedError("Requires subclasses to implement.")
70
70
 
71
+ def activate_target(self, groupchat: "GroupChat") -> None:
72
+ """Activate the target in the groupchat, setting the next target for GroupToolExecutor.
73
+
74
+ The Tool Executor's next target attribute will be picked up on the next iteration when _determine_next_agent is called"""
75
+ for agent in groupchat.agents: # type: ignore[attr-defined]
76
+ # get the GroupToolExecutor agent
77
+ if type(agent).__name__ == "GroupToolExecutor":
78
+ agent.set_next_target(self) # type: ignore[attr-defined]
79
+ return
80
+
71
81
 
72
82
  class AgentTarget(TransitionTarget):
73
83
  """Target that represents a direct agent reference."""
@@ -1023,6 +1023,38 @@ class GroupChat:
1023
1023
  mentions[agent.name] = count
1024
1024
  return mentions
1025
1025
 
1026
+ def _run_input_guardrails(
1027
+ self,
1028
+ agent: "ConversableAgent",
1029
+ messages: Optional[list[dict[str, Any]]] = None,
1030
+ ) -> Optional[str]:
1031
+ """Run input guardrails for an agent before the reply is generated.
1032
+ Args:
1033
+ agent (ConversableAgent): The agent whose input guardrails to run.
1034
+ messages (Optional[list[dict[str, Any]]]): The messages to check against the guardrails.
1035
+ """
1036
+ for guardrail in agent.input_guardrails:
1037
+ guardrail_result = guardrail.check(context=messages)
1038
+
1039
+ if guardrail_result.activated:
1040
+ guardrail.target.activate_target(self)
1041
+ return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1042
+ return None
1043
+
1044
+ def _run_output_guardrails(self, agent: "ConversableAgent", reply: str) -> None:
1045
+ """Run output guardrails for an agent after the reply is generated.
1046
+ Args:
1047
+ agent (ConversableAgent): The agent whose output guardrails to run.
1048
+ reply (str): The reply generated by the agent.
1049
+ """
1050
+ for guardrail in agent.output_guardrails:
1051
+ guardrail_result = guardrail.check(context=reply)
1052
+
1053
+ if guardrail_result.activated:
1054
+ guardrail.target.activate_target(self)
1055
+ return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1056
+ return None
1057
+
1026
1058
 
1027
1059
  @export_module("autogen")
1028
1060
  class GroupChatManager(ConversableAgent):
@@ -1194,8 +1226,17 @@ class GroupChatManager(ConversableAgent):
1194
1226
  if not silent:
1195
1227
  iostream = IOStream.get_default()
1196
1228
  iostream.send(GroupChatRunChatEvent(speaker=speaker, silent=silent))
1197
- # let the speaker speak
1198
- reply = speaker.generate_reply(sender=self)
1229
+
1230
+ guardrails_activated = False
1231
+ guardrails_reply = groupchat._run_input_guardrails(speaker, speaker._oai_messages[self])
1232
+
1233
+ if guardrails_reply is not None:
1234
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1235
+ guardrails_activated = True
1236
+ reply = guardrails_reply
1237
+ else:
1238
+ # let the speaker speak
1239
+ reply = speaker.generate_reply(sender=self)
1199
1240
  except KeyboardInterrupt:
1200
1241
  # let the admin agent speak if interrupted
1201
1242
  if groupchat.admin_name in groupchat.agent_names:
@@ -1215,6 +1256,15 @@ class GroupChatManager(ConversableAgent):
1215
1256
  termination_reason = "No reply generated"
1216
1257
  break
1217
1258
 
1259
+ if not guardrails_activated:
1260
+ # if the input guardrails were not activated, and the agent returned a reply
1261
+ guardrails_reply = groupchat._run_output_guardrails(speaker, reply)
1262
+
1263
+ if guardrails_reply is not None:
1264
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1265
+ guardrails_activated = True
1266
+ reply = guardrails_reply
1267
+
1218
1268
  # check for "clear history" phrase in reply and activate clear history function if found
1219
1269
  if (
1220
1270
  groupchat.enable_clear_history
@@ -1233,7 +1283,11 @@ class GroupChatManager(ConversableAgent):
1233
1283
  a.previous_cache = None
1234
1284
 
1235
1285
  if termination_reason:
1236
- iostream.send(TerminationEvent(termination_reason=termination_reason))
1286
+ iostream.send(
1287
+ TerminationEvent(
1288
+ termination_reason=termination_reason, sender=self, recipient=speaker if speaker else None
1289
+ )
1290
+ )
1237
1291
 
1238
1292
  return True, None
1239
1293
 
@@ -1287,8 +1341,19 @@ class GroupChatManager(ConversableAgent):
1287
1341
  try:
1288
1342
  # select the next speaker
1289
1343
  speaker = await groupchat.a_select_speaker(speaker, self)
1290
- # let the speaker speak
1291
- reply = await speaker.a_generate_reply(sender=self)
1344
+ if not silent:
1345
+ iostream.send(GroupChatRunChatEvent(speaker=speaker, silent=silent))
1346
+
1347
+ guardrails_activated = False
1348
+ guardrails_reply = groupchat._run_input_guardrails(speaker, speaker._oai_messages[self])
1349
+
1350
+ if guardrails_reply is not None:
1351
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1352
+ guardrails_activated = True
1353
+ reply = guardrails_reply
1354
+ else:
1355
+ # let the speaker speak
1356
+ reply = await speaker.a_generate_reply(sender=self)
1292
1357
  except KeyboardInterrupt:
1293
1358
  # let the admin agent speak if interrupted
1294
1359
  if groupchat.admin_name in groupchat.agent_names:
@@ -1308,6 +1373,24 @@ class GroupChatManager(ConversableAgent):
1308
1373
  termination_reason = "No reply generated"
1309
1374
  break
1310
1375
 
1376
+ if not guardrails_activated:
1377
+ # if the input guardrails were not activated, and the agent returned a reply
1378
+ guardrails_reply = groupchat._run_output_guardrails(speaker, reply)
1379
+
1380
+ if guardrails_reply is not None:
1381
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1382
+ guardrails_activated = True
1383
+ reply = guardrails_reply
1384
+
1385
+ # check for "clear history" phrase in reply and activate clear history function if found
1386
+ if (
1387
+ groupchat.enable_clear_history
1388
+ and isinstance(reply, dict)
1389
+ and reply["content"]
1390
+ and "CLEAR HISTORY" in reply["content"].upper()
1391
+ ):
1392
+ reply["content"] = self.clear_agents_history(reply, groupchat)
1393
+
1311
1394
  # The speaker sends the message without requesting a reply
1312
1395
  await speaker.a_send(reply, self, request_reply=False, silent=silent)
1313
1396
  message = self.last_message(speaker)
@@ -1317,7 +1400,11 @@ class GroupChatManager(ConversableAgent):
1317
1400
  a.previous_cache = None
1318
1401
 
1319
1402
  if termination_reason:
1320
- iostream.send(TerminationEvent(termination_reason=termination_reason))
1403
+ iostream.send(
1404
+ TerminationEvent(
1405
+ termination_reason=termination_reason, sender=self, recipient=speaker if speaker else None
1406
+ )
1407
+ )
1321
1408
 
1322
1409
  return True, None
1323
1410
 
@@ -1489,10 +1576,10 @@ class GroupChatManager(ConversableAgent):
1489
1576
  for agent in self._groupchat.agents:
1490
1577
  if agent.name == message["name"]:
1491
1578
  # An agent`s message is sent to the Group Chat Manager
1492
- agent.a_send(message, self, request_reply=False, silent=True)
1579
+ await agent.a_send(message, self, request_reply=False, silent=True)
1493
1580
  else:
1494
1581
  # Otherwise, messages are sent from the Group Chat Manager to the agent
1495
- self.a_send(message, agent, request_reply=False, silent=True)
1582
+ await self.a_send(message, agent, request_reply=False, silent=True)
1496
1583
 
1497
1584
  # Add previous message to the new groupchat, if it's an admin message the name may not match so add the message directly
1498
1585
  if message_speaker_agent: