ag2 0.9.10__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (42) hide show
  1. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/METADATA +14 -7
  2. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/RECORD +42 -24
  3. autogen/a2a/__init__.py +36 -0
  4. autogen/a2a/agent_executor.py +105 -0
  5. autogen/a2a/client.py +280 -0
  6. autogen/a2a/errors.py +18 -0
  7. autogen/a2a/httpx_client_factory.py +79 -0
  8. autogen/a2a/server.py +221 -0
  9. autogen/a2a/utils.py +165 -0
  10. autogen/agentchat/__init__.py +3 -0
  11. autogen/agentchat/agent.py +0 -2
  12. autogen/agentchat/chat.py +5 -1
  13. autogen/agentchat/contrib/llava_agent.py +1 -13
  14. autogen/agentchat/conversable_agent.py +178 -73
  15. autogen/agentchat/group/group_tool_executor.py +46 -15
  16. autogen/agentchat/group/guardrails.py +41 -33
  17. autogen/agentchat/group/multi_agent_chat.py +53 -0
  18. autogen/agentchat/group/safeguards/api.py +19 -2
  19. autogen/agentchat/group/safeguards/enforcer.py +134 -40
  20. autogen/agentchat/groupchat.py +45 -33
  21. autogen/agentchat/realtime/experimental/realtime_swarm.py +1 -3
  22. autogen/interop/pydantic_ai/pydantic_ai.py +1 -1
  23. autogen/llm_config/client.py +3 -2
  24. autogen/oai/bedrock.py +0 -13
  25. autogen/oai/client.py +15 -8
  26. autogen/oai/client_utils.py +30 -0
  27. autogen/oai/cohere.py +0 -10
  28. autogen/remote/__init__.py +18 -0
  29. autogen/remote/agent.py +199 -0
  30. autogen/remote/agent_service.py +142 -0
  31. autogen/remote/errors.py +17 -0
  32. autogen/remote/httpx_client_factory.py +131 -0
  33. autogen/remote/protocol.py +37 -0
  34. autogen/remote/retry.py +102 -0
  35. autogen/remote/runtime.py +96 -0
  36. autogen/testing/__init__.py +12 -0
  37. autogen/testing/messages.py +45 -0
  38. autogen/testing/test_agent.py +111 -0
  39. autogen/version.py +1 -1
  40. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
  41. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
  42. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
@@ -14,7 +14,7 @@ import re
14
14
  import threading
15
15
  import warnings
16
16
  from collections import defaultdict
17
- from collections.abc import Callable, Generator, Iterable
17
+ from collections.abc import Callable, Container, Generator, Iterable
18
18
  from contextlib import contextmanager
19
19
  from dataclasses import dataclass
20
20
  from inspect import signature
@@ -77,7 +77,7 @@ from .chat import (
77
77
  initiate_chats,
78
78
  )
79
79
  from .group.context_variables import ContextVariables
80
- from .group.guardrails import Guardrail
80
+ from .group.guardrails import Guardrail, GuardrailResult
81
81
  from .group.handoffs import Handoffs
82
82
  from .utils import consolidate_chat_info, gather_usage_summary
83
83
 
@@ -1044,7 +1044,11 @@ class ConversableAgent(LLMAgent):
1044
1044
  return name
1045
1045
 
1046
1046
  def _append_oai_message(
1047
- self, message: dict[str, Any] | str, role, conversation_id: Agent, is_sending: bool
1047
+ self,
1048
+ message: dict[str, Any] | str,
1049
+ conversation_id: Agent,
1050
+ role: str = "assistant",
1051
+ name: str | None = None,
1048
1052
  ) -> bool:
1049
1053
  """Append a message to the ChatCompletion conversation.
1050
1054
 
@@ -1055,50 +1059,17 @@ class ConversableAgent(LLMAgent):
1055
1059
 
1056
1060
  Args:
1057
1061
  message (dict or str): message to be appended to the ChatCompletion conversation.
1058
- role (str): role of the message, can be "assistant" or "function".
1059
1062
  conversation_id (Agent): id of the conversation, should be the recipient or sender.
1060
- is_sending (bool): If the agent (aka self) is sending to the conversation_id agent, otherwise receiving.
1063
+ role (str): role of the message, can be "assistant" or "function".
1064
+ name (str | None): name of the message author, can be the name of the agent. If not provided, the name of the currentagent will be used.
1061
1065
 
1062
1066
  Returns:
1063
1067
  bool: whether the message is appended to the ChatCompletion conversation.
1064
1068
  """
1065
- message = self._message_to_dict(message)
1066
- # create oai message to be appended to the oai conversation that can be passed to oai directly.
1067
- oai_message = {
1068
- k: message[k]
1069
- for k in ("content", "function_call", "tool_calls", "tool_responses", "tool_call_id", "name", "context")
1070
- if k in message and message[k] is not None
1071
- }
1072
- if "content" not in oai_message:
1073
- if "function_call" in oai_message or "tool_calls" in oai_message:
1074
- oai_message["content"] = None # if only function_call is provided, content will be set to None.
1075
- else:
1076
- return False
1077
-
1078
- if message.get("role") in ["function", "tool"]:
1079
- oai_message["role"] = message.get("role")
1080
- if "tool_responses" in oai_message:
1081
- for tool_response in oai_message["tool_responses"]:
1082
- tool_response["content"] = str(tool_response["content"])
1083
- elif "override_role" in message:
1084
- # If we have a direction to override the role then set the
1085
- # role accordingly. Used to customise the role for the
1086
- # select speaker prompt.
1087
- oai_message["role"] = message.get("override_role")
1088
- else:
1089
- oai_message["role"] = role
1090
-
1091
- if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
1092
- oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
1093
- elif "name" not in oai_message:
1094
- # If we don't have a name field, append it
1095
- if is_sending:
1096
- oai_message["name"] = self.name
1097
- else:
1098
- oai_message["name"] = conversation_id.name
1099
-
1069
+ valid, oai_message = normilize_message_to_oai(message, role=role, name=name or self.name)
1070
+ if not valid:
1071
+ return False
1100
1072
  self._oai_messages[conversation_id].append(oai_message)
1101
-
1102
1073
  return True
1103
1074
 
1104
1075
  def _process_message_before_send(
@@ -1152,7 +1123,7 @@ class ConversableAgent(LLMAgent):
1152
1123
  message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
1153
1124
  # When the agent composes and sends the message, the role of the message is "assistant"
1154
1125
  # unless it's "function".
1155
- valid = self._append_oai_message(message, "assistant", recipient, is_sending=True)
1126
+ valid = self._append_oai_message(message, recipient, role="assistant", name=self.name)
1156
1127
  if valid:
1157
1128
  recipient.receive(message, self, request_reply, silent)
1158
1129
  else:
@@ -1200,7 +1171,7 @@ class ConversableAgent(LLMAgent):
1200
1171
  message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
1201
1172
  # When the agent composes and sends the message, the role of the message is "assistant"
1202
1173
  # unless it's "function".
1203
- valid = self._append_oai_message(message, "assistant", recipient, is_sending=True)
1174
+ valid = self._append_oai_message(message, recipient, role="assistant", name=self.name)
1204
1175
  if valid:
1205
1176
  await recipient.a_receive(message, self, request_reply, silent)
1206
1177
  else:
@@ -1209,7 +1180,7 @@ class ConversableAgent(LLMAgent):
1209
1180
  )
1210
1181
 
1211
1182
  def _print_received_message(self, message: dict[str, Any] | str, sender: Agent, skip_head: bool = False):
1212
- message = self._message_to_dict(message)
1183
+ message = message_to_dict(message)
1213
1184
  message_model = create_received_event_model(event=message, sender=sender, recipient=self)
1214
1185
  iostream = IOStream.get_default()
1215
1186
  # message_model.print(iostream.print)
@@ -1217,7 +1188,7 @@ class ConversableAgent(LLMAgent):
1217
1188
 
1218
1189
  def _process_received_message(self, message: dict[str, Any] | str, sender: Agent, silent: bool):
1219
1190
  # When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
1220
- valid = self._append_oai_message(message, "user", sender, is_sending=False)
1191
+ valid = self._append_oai_message(message, sender, role="user", name=sender.name)
1221
1192
  if logging_enabled():
1222
1193
  log_event(self, "received_message", message=message, sender=sender.name, valid=valid)
1223
1194
 
@@ -1349,9 +1320,10 @@ class ConversableAgent(LLMAgent):
1349
1320
  Returns:
1350
1321
  bool: True if the chat should be terminated, False otherwise.
1351
1322
  """
1323
+ content = message.get("content")
1352
1324
  return (
1353
1325
  isinstance(recipient, ConversableAgent)
1354
- and isinstance(message.get("content"), str)
1326
+ and content is not None
1355
1327
  and hasattr(recipient, "_is_termination_msg")
1356
1328
  and recipient._is_termination_msg(message)
1357
1329
  )
@@ -2175,6 +2147,7 @@ class ConversableAgent(LLMAgent):
2175
2147
  messages: list[dict[str, Any]] | None = None,
2176
2148
  sender: Agent | None = None,
2177
2149
  config: OpenAIWrapper | None = None,
2150
+ **kwargs: Any,
2178
2151
  ) -> tuple[bool, str | dict[str, Any] | None]:
2179
2152
  """Generate a reply using autogen.oai."""
2180
2153
  client = self.client if config is None else config
@@ -2188,7 +2161,12 @@ class ConversableAgent(LLMAgent):
2188
2161
  if processed_messages is None:
2189
2162
  return True, {"content": "LLM call blocked by safeguard", "role": "assistant"}
2190
2163
 
2191
- extracted_response = self._generate_oai_reply_from_client(client, processed_messages, self.client_cache)
2164
+ extracted_response = self._generate_oai_reply_from_client(
2165
+ client,
2166
+ self._oai_system_message + messages,
2167
+ self.client_cache,
2168
+ **kwargs,
2169
+ )
2192
2170
 
2193
2171
  # Process LLM response
2194
2172
  if extracted_response is not None:
@@ -2198,7 +2176,13 @@ class ConversableAgent(LLMAgent):
2198
2176
 
2199
2177
  return (False, None) if extracted_response is None else (True, extracted_response)
2200
2178
 
2201
- def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> str | dict[str, Any] | None:
2179
+ def _generate_oai_reply_from_client(
2180
+ self,
2181
+ llm_client,
2182
+ messages,
2183
+ cache,
2184
+ **kwargs: Any,
2185
+ ) -> str | dict[str, Any] | None:
2202
2186
  # unroll tool_responses
2203
2187
  all_messages = []
2204
2188
  for message in messages:
@@ -2217,6 +2201,7 @@ class ConversableAgent(LLMAgent):
2217
2201
  messages=all_messages,
2218
2202
  cache=cache,
2219
2203
  agent=self,
2204
+ **kwargs,
2220
2205
  )
2221
2206
  extracted_response = llm_client.extract_text_or_completion_object(response)[0]
2222
2207
 
@@ -2246,20 +2231,27 @@ class ConversableAgent(LLMAgent):
2246
2231
  messages: list[dict[str, Any]] | None = None,
2247
2232
  sender: Agent | None = None,
2248
2233
  config: Any | None = None,
2234
+ **kwargs: Any,
2249
2235
  ) -> tuple[bool, str | dict[str, Any] | None]:
2250
2236
  """Generate a reply using autogen.oai asynchronously."""
2251
2237
  iostream = IOStream.get_default()
2252
2238
 
2253
2239
  def _generate_oai_reply(
2254
- self, iostream: IOStream, *args: Any, **kwargs: Any
2240
+ self, iostream: IOStream, *args: Any, **kw: Any
2255
2241
  ) -> tuple[bool, str | dict[str, Any] | None]:
2256
2242
  with IOStream.set_default(iostream):
2257
- return self.generate_oai_reply(*args, **kwargs)
2243
+ return self.generate_oai_reply(*args, **kw)
2258
2244
 
2259
2245
  return await asyncio.get_event_loop().run_in_executor(
2260
2246
  None,
2261
2247
  functools.partial(
2262
- _generate_oai_reply, self=self, iostream=iostream, messages=messages, sender=sender, config=config
2248
+ _generate_oai_reply,
2249
+ self=self,
2250
+ iostream=iostream,
2251
+ messages=messages,
2252
+ sender=sender,
2253
+ config=config,
2254
+ **kwargs,
2263
2255
  ),
2264
2256
  )
2265
2257
 
@@ -2423,7 +2415,7 @@ class ConversableAgent(LLMAgent):
2423
2415
  if messages is None:
2424
2416
  messages = self._oai_messages[sender]
2425
2417
  message = messages[-1]
2426
- if "function_call" in message:
2418
+ if message.get("function_call"):
2427
2419
  call_id = message.get("id", None)
2428
2420
  func_call = message["function_call"]
2429
2421
  func_name = func_call.get("name", "")
@@ -2822,7 +2814,7 @@ class ConversableAgent(LLMAgent):
2822
2814
  self,
2823
2815
  messages: list[dict[str, Any]] | None = None,
2824
2816
  sender: Optional["Agent"] = None,
2825
- **kwargs: Any,
2817
+ exclude: Container[Any] = (),
2826
2818
  ) -> str | dict[str, Any] | None:
2827
2819
  """Reply based on the conversation history and the sender.
2828
2820
 
@@ -2844,8 +2836,7 @@ class ConversableAgent(LLMAgent):
2844
2836
  Args:
2845
2837
  messages: a list of messages in the conversation history.
2846
2838
  sender: sender of an Agent instance.
2847
- **kwargs (Any): Additional arguments to customize reply generation. Supported kwargs:
2848
- - exclude (List[Callable[..., Any]]): A list of reply functions to exclude from
2839
+ exclude: A list of reply functions to exclude from
2849
2840
  the reply generation process. Functions in this list will be skipped even if
2850
2841
  they would normally be triggered.
2851
2842
 
@@ -2873,7 +2864,7 @@ class ConversableAgent(LLMAgent):
2873
2864
 
2874
2865
  for reply_func_tuple in self._reply_func_list:
2875
2866
  reply_func = reply_func_tuple["reply_func"]
2876
- if "exclude" in kwargs and reply_func in kwargs["exclude"]:
2867
+ if reply_func in exclude:
2877
2868
  continue
2878
2869
  if inspect.iscoroutinefunction(reply_func):
2879
2870
  continue
@@ -2896,7 +2887,7 @@ class ConversableAgent(LLMAgent):
2896
2887
  self,
2897
2888
  messages: list[dict[str, Any]] | None = None,
2898
2889
  sender: Optional["Agent"] = None,
2899
- **kwargs: Any,
2890
+ exclude: Container[Any] = (),
2900
2891
  ) -> str | dict[str, Any] | None:
2901
2892
  """(async) Reply based on the conversation history and the sender.
2902
2893
 
@@ -2918,8 +2909,7 @@ class ConversableAgent(LLMAgent):
2918
2909
  Args:
2919
2910
  messages: a list of messages in the conversation history.
2920
2911
  sender: sender of an Agent instance.
2921
- **kwargs (Any): Additional arguments to customize reply generation. Supported kwargs:
2922
- - exclude (List[Callable[..., Any]]): A list of reply functions to exclude from
2912
+ exclude: A list of reply functions to exclude from
2923
2913
  the reply generation process. Functions in this list will be skipped even if
2924
2914
  they would normally be triggered.
2925
2915
 
@@ -2947,13 +2937,16 @@ class ConversableAgent(LLMAgent):
2947
2937
 
2948
2938
  for reply_func_tuple in self._reply_func_list:
2949
2939
  reply_func = reply_func_tuple["reply_func"]
2950
- if "exclude" in kwargs and reply_func in kwargs["exclude"]:
2940
+ if reply_func in exclude:
2951
2941
  continue
2952
2942
 
2953
2943
  if self._match_trigger(reply_func_tuple["trigger"], sender):
2954
2944
  if inspect.iscoroutinefunction(reply_func):
2955
2945
  final, reply = await reply_func(
2956
- self, messages=messages, sender=sender, config=reply_func_tuple["config"]
2946
+ self,
2947
+ messages=messages,
2948
+ sender=sender,
2949
+ config=reply_func_tuple["config"],
2957
2950
  )
2958
2951
  else:
2959
2952
  final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
@@ -3448,13 +3441,30 @@ class ConversableAgent(LLMAgent):
3448
3441
  logger.error(error_msg)
3449
3442
  raise AssertionError(error_msg)
3450
3443
 
3444
+ self.llm_config = self._update_tool_config(
3445
+ self.llm_config,
3446
+ tool_sig=tool_sig,
3447
+ is_remove=is_remove,
3448
+ silent_override=silent_override,
3449
+ )
3450
+
3451
+ self.client = OpenAIWrapper(**self.llm_config)
3452
+
3453
+ def _update_tool_config(
3454
+ self,
3455
+ llm_config: dict[str, Any] | LLMConfig,
3456
+ tool_sig: str | dict[str, Any],
3457
+ is_remove: bool,
3458
+ silent_override: bool = False,
3459
+ ) -> dict[str, Any]:
3451
3460
  if is_remove:
3452
- if "tools" not in self.llm_config or len(self.llm_config["tools"]) == 0:
3461
+ if "tools" not in llm_config or len(llm_config["tools"]) == 0:
3453
3462
  error_msg = f"The agent config doesn't have tool {tool_sig}."
3454
3463
  logger.error(error_msg)
3455
3464
  raise AssertionError(error_msg)
3465
+
3456
3466
  else:
3457
- current_tools = self.llm_config["tools"]
3467
+ current_tools = llm_config["tools"]
3458
3468
  filtered_tools = []
3459
3469
 
3460
3470
  # Loop through and rebuild tools list without the tool to remove
@@ -3467,31 +3477,34 @@ class ConversableAgent(LLMAgent):
3467
3477
  if is_different:
3468
3478
  filtered_tools.append(tool)
3469
3479
 
3470
- self.llm_config["tools"] = filtered_tools
3480
+ llm_config["tools"] = filtered_tools
3481
+
3471
3482
  else:
3472
3483
  if not isinstance(tool_sig, dict):
3473
3484
  raise ValueError(
3474
3485
  f"The tool signature must be of the type dict. Received tool signature type {type(tool_sig)}"
3475
3486
  )
3487
+
3476
3488
  self._assert_valid_name(tool_sig["function"]["name"])
3477
- if "tools" in self.llm_config and len(self.llm_config["tools"]) > 0:
3489
+ if "tools" in llm_config and len(llm_config["tools"]) > 0:
3478
3490
  if not silent_override and any(
3479
- tool["function"]["name"] == tool_sig["function"]["name"] for tool in self.llm_config["tools"]
3491
+ tool["function"]["name"] == tool_sig["function"]["name"] for tool in llm_config["tools"]
3480
3492
  ):
3481
3493
  warnings.warn(f"Function '{tool_sig['function']['name']}' is being overridden.", UserWarning)
3482
- self.llm_config["tools"] = [
3494
+
3495
+ llm_config["tools"] = [
3483
3496
  tool
3484
- for tool in self.llm_config["tools"]
3497
+ for tool in llm_config["tools"]
3485
3498
  if tool.get("function", {}).get("name") != tool_sig["function"]["name"]
3486
3499
  ] + [tool_sig]
3487
3500
  else:
3488
- self.llm_config["tools"] = [tool_sig]
3501
+ llm_config["tools"] = [tool_sig]
3489
3502
 
3490
3503
  # Do this only if llm_config is a dict. If llm_config is LLMConfig, LLMConfig will handle this.
3491
- if len(self.llm_config["tools"]) == 0 and isinstance(self.llm_config, dict):
3492
- del self.llm_config["tools"]
3504
+ if len(llm_config["tools"]) == 0 and isinstance(llm_config, dict):
3505
+ del llm_config["tools"]
3493
3506
 
3494
- self.client = OpenAIWrapper(**self.llm_config)
3507
+ return llm_config
3495
3508
 
3496
3509
  def can_execute_function(self, name: list[str] | str) -> bool:
3497
3510
  """Whether the agent can execute the function."""
@@ -3975,7 +3988,11 @@ class ConversableAgent(LLMAgent):
3975
3988
  if executor_kwargs is None:
3976
3989
  executor_kwargs = {}
3977
3990
  if "is_termination_msg" not in executor_kwargs:
3978
- executor_kwargs["is_termination_msg"] = lambda x: (x["content"] is not None) and "TERMINATE" in x["content"]
3991
+ executor_kwargs["is_termination_msg"] = lambda x: "TERMINATE" in (
3992
+ content_str(x.get("content"))
3993
+ if isinstance(x.get("content"), (str, list)) or x.get("content") is None
3994
+ else str(x.get("content"))
3995
+ )
3979
3996
 
3980
3997
  try:
3981
3998
  if not self.run_executor:
@@ -4160,6 +4177,32 @@ class ConversableAgent(LLMAgent):
4160
4177
  """
4161
4178
  self.output_guardrails.extend(guardrails)
4162
4179
 
4180
+ def run_input_guardrails(self, messages: list[dict[str, Any]] | None = None) -> GuardrailResult | None:
4181
+ """Run input guardrails for an agent before the reply is generated.
4182
+
4183
+ Args:
4184
+ messages (Optional[list[dict[str, Any]]]): The messages to check against the guardrails.
4185
+ """
4186
+ for guardrail in self.input_guardrails:
4187
+ guardrail_result = guardrail.check(context=messages)
4188
+
4189
+ if guardrail_result.activated:
4190
+ return guardrail_result
4191
+ return None
4192
+
4193
+ def run_output_guardrails(self, reply: str | dict[str, Any]) -> GuardrailResult | None:
4194
+ """Run output guardrails for an agent after the reply is generated.
4195
+
4196
+ Args:
4197
+ reply (str | dict[str, Any]): The reply generated by the agent.
4198
+ """
4199
+ for guardrail in self.output_guardrails:
4200
+ guardrail_result = guardrail.check(context=reply)
4201
+
4202
+ if guardrail_result.activated:
4203
+ return guardrail_result
4204
+ return None
4205
+
4163
4206
 
4164
4207
  @export_module("autogen")
4165
4208
  def register_function(
@@ -4187,3 +4230,65 @@ def register_function(
4187
4230
  """
4188
4231
  f = caller.register_for_llm(name=name, description=description)(f)
4189
4232
  executor.register_for_execution(name=name)(f)
4233
+
4234
+
4235
+ def normilize_message_to_oai(
4236
+ message: dict[str, Any] | str,
4237
+ name: str,
4238
+ role: str = "assistant",
4239
+ ) -> tuple[bool, dict[str, Any]]:
4240
+ message = message_to_dict(message)
4241
+ # create oai message to be appended to the oai conversation that can be passed to oai directly.
4242
+ oai_message = {
4243
+ k: message[k]
4244
+ for k in ("content", "function_call", "tool_responses", "tool_call_id", "name", "context")
4245
+ if k in message and message[k] is not None
4246
+ }
4247
+
4248
+ if tools := message.get("tool_calls"): # check for [], None and missed key
4249
+ oai_message["tool_calls"] = tools
4250
+
4251
+ if "content" not in oai_message:
4252
+ if "function_call" in oai_message or "tool_calls" in oai_message:
4253
+ oai_message["content"] = None # if only function_call is provided, content will be set to None.
4254
+ else:
4255
+ return False, oai_message
4256
+
4257
+ if message.get("role") in ["function", "tool"]:
4258
+ oai_message["role"] = message.get("role")
4259
+ if "tool_responses" in oai_message:
4260
+ for tool_response in oai_message["tool_responses"]:
4261
+ content_value = tool_response.get("content")
4262
+ tool_response["content"] = (
4263
+ content_str(content_value)
4264
+ if isinstance(content_value, (str, list)) or content_value is None
4265
+ else str(content_value)
4266
+ )
4267
+ elif "override_role" in message:
4268
+ # If we have a direction to override the role then set the
4269
+ # role accordingly. Used to customise the role for the
4270
+ # select speaker prompt.
4271
+ oai_message["role"] = message.get("override_role")
4272
+ else:
4273
+ oai_message["role"] = role
4274
+
4275
+ if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
4276
+ oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
4277
+ elif "name" not in oai_message:
4278
+ # If we don't have a name field, append it
4279
+ oai_message["name"] = name
4280
+
4281
+ return True, oai_message
4282
+
4283
+
4284
+ def message_to_dict(message: dict[str, Any] | str) -> dict:
4285
+ """Convert a message to a dictionary.
4286
+
4287
+ The message can be a string or a dictionary. The string will be put in the "content" field of the new dictionary.
4288
+ """
4289
+ if isinstance(message, str):
4290
+ return {"content": message}
4291
+ elif isinstance(message, dict):
4292
+ return message
4293
+ else:
4294
+ return dict(message)
@@ -7,6 +7,7 @@ from collections.abc import Callable
7
7
  from copy import deepcopy
8
8
  from typing import Annotated, Any
9
9
 
10
+ from ...code_utils import content_str
10
11
  from ...oai import OpenAIWrapper
11
12
  from ...tools import Depends, Tool
12
13
  from ...tools.dependency_injection import inject_params, on
@@ -33,6 +34,9 @@ class GroupToolExecutor(ConversableAgent):
33
34
  # Store the next target from a tool call
34
35
  self._group_next_target: TransitionTarget | None = None
35
36
 
37
+ # Track the original agent that initiated the tool call (for safeguards)
38
+ self._tool_call_originator: str | None = None
39
+
36
40
  # Primary tool reply function for handling the tool reply and the ReplyResult and TransitionTarget returns
37
41
  self.register_reply([Agent, None], self._generate_group_tool_reply, remove_other_reply_funcs=True)
38
42
 
@@ -57,6 +61,18 @@ class GroupToolExecutor(ConversableAgent):
57
61
  """Clears the next target to transition to."""
58
62
  self._group_next_target = None
59
63
 
64
+ def set_tool_call_originator(self, agent_name: str) -> None:
65
+ """Sets the original agent that initiated the tool call (for safeguard transparency)."""
66
+ self._tool_call_originator = agent_name
67
+
68
+ def get_tool_call_originator(self) -> str | None:
69
+ """Gets the original agent that initiated the tool call."""
70
+ return self._tool_call_originator
71
+
72
+ def clear_tool_call_originator(self) -> None:
73
+ """Clears the tool call originator."""
74
+ self._tool_call_originator = None
75
+
60
76
  def _modify_context_variables_param(
61
77
  self, f: Callable[..., Any], context_variables: ContextVariables
62
78
  ) -> Callable[..., Any]:
@@ -71,6 +87,9 @@ class GroupToolExecutor(ConversableAgent):
71
87
  """
72
88
  sig = inspect.signature(f)
73
89
 
90
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
91
+ return f(*args, **kwargs)
92
+
74
93
  # Check if context_variables parameter exists and update it if so
75
94
  if __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters:
76
95
  new_params = []
@@ -84,13 +103,13 @@ class GroupToolExecutor(ConversableAgent):
84
103
 
85
104
  # Update signature
86
105
  new_sig = sig.replace(parameters=new_params)
87
- f.__signature__ = new_sig # type: ignore[attr-defined]
106
+ wrapper.__signature__ = new_sig # type: ignore[attr-defined]
88
107
 
89
- return f
108
+ return wrapper
90
109
 
91
- def _change_tool_context_variables_to_depends(
92
- self, agent: ConversableAgent, current_tool: Tool, context_variables: ContextVariables
93
- ) -> None:
110
+ def make_tool_copy_with_context_variables(
111
+ self, current_tool: Tool, context_variables: ContextVariables
112
+ ) -> Tool | None:
94
113
  """Checks for the context_variables parameter in the tool and updates it to use dependency injection."""
95
114
  # If the tool has a context_variables parameter, remove the tool and reregister it without the parameter
96
115
  if __CONTEXT_VARIABLES_PARAM_NAME__ in current_tool.tool_schema["function"]["parameters"]["properties"]:
@@ -100,16 +119,19 @@ class GroupToolExecutor(ConversableAgent):
100
119
  # Remove the Tool from the agent
101
120
  name = current_tool._name
102
121
  description = current_tool._description
103
- agent.remove_tool_for_llm(current_tool)
104
122
 
105
123
  # Recreate the tool without the context_variables parameter
106
- tool_func = self._modify_context_variables_param(current_tool._func, context_variables)
124
+ tool_func = self._modify_context_variables_param(tool_func, context_variables)
107
125
  tool_func = inject_params(tool_func)
108
- new_tool = ConversableAgent._create_tool_if_needed(
109
- func_or_tool=tool_func, name=name, description=description
110
- )
126
+ return ConversableAgent._create_tool_if_needed(func_or_tool=tool_func, name=name, description=description)
127
+ return None
111
128
 
112
- # Re-register with the agent
129
+ def _change_tool_context_variables_to_depends(
130
+ self, agent: ConversableAgent, current_tool: Tool, context_variables: ContextVariables
131
+ ) -> None:
132
+ """Checks for the context_variables parameter in the tool and updates it to use dependency injection."""
133
+ if new_tool := self.make_tool_copy_with_context_variables(current_tool, context_variables):
134
+ agent.remove_tool_for_llm(current_tool)
113
135
  agent.register_for_llm()(new_tool)
114
136
 
115
137
  def register_agents_functions(self, agents: list[ConversableAgent], context_variables: ContextVariables) -> None:
@@ -140,15 +162,22 @@ class GroupToolExecutor(ConversableAgent):
140
162
  2. Generates the tool calls reply.
141
163
  3. Updates context_variables and next_agent based on the tool call response.
142
164
  """
165
+
143
166
  if config is None:
144
167
  config = agent # type: ignore[assignment]
145
168
  if messages is None:
146
169
  messages = agent._oai_messages[sender]
147
170
 
148
171
  message = messages[-1]
149
- if "tool_calls" in message:
172
+ # Track the original agent that initiated this tool call (for safeguard transparency)
173
+ # Use sender.name as fallback when message doesn't have a name field (e.g., for tool_calls messages)
174
+ agent_name = message.get("name", sender.name if sender else "unknown")
175
+ self.set_tool_call_originator(agent_name)
176
+
177
+ if message.get("tool_calls"):
150
178
  tool_call_count = len(message["tool_calls"])
151
179
 
180
+ tool_message = None
152
181
  # Loop through tool calls individually (so context can be updated after each function call)
153
182
  next_target: TransitionTarget | None = None
154
183
  tool_responses_inner = []
@@ -182,11 +211,13 @@ class GroupToolExecutor(ConversableAgent):
182
211
  next_target = content
183
212
 
184
213
  # Serialize the content to a string
185
- if content is not None:
186
- tool_response["content"] = str(content)
214
+ normalized_content = (
215
+ content_str(content) if isinstance(content, (str, list)) or content is None else str(content)
216
+ )
217
+ tool_response["content"] = normalized_content
187
218
 
188
219
  tool_responses_inner.append(tool_response)
189
- contents.append(str(tool_response["content"]))
220
+ contents.append(normalized_content)
190
221
 
191
222
  self._group_next_target = next_target # type: ignore[attr-defined]
192
223