ag2 0.9.10__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (42) hide show
  1. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/METADATA +14 -7
  2. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/RECORD +42 -24
  3. autogen/a2a/__init__.py +36 -0
  4. autogen/a2a/agent_executor.py +105 -0
  5. autogen/a2a/client.py +280 -0
  6. autogen/a2a/errors.py +18 -0
  7. autogen/a2a/httpx_client_factory.py +79 -0
  8. autogen/a2a/server.py +221 -0
  9. autogen/a2a/utils.py +165 -0
  10. autogen/agentchat/__init__.py +3 -0
  11. autogen/agentchat/agent.py +0 -2
  12. autogen/agentchat/chat.py +5 -1
  13. autogen/agentchat/contrib/llava_agent.py +1 -13
  14. autogen/agentchat/conversable_agent.py +178 -73
  15. autogen/agentchat/group/group_tool_executor.py +46 -15
  16. autogen/agentchat/group/guardrails.py +41 -33
  17. autogen/agentchat/group/multi_agent_chat.py +53 -0
  18. autogen/agentchat/group/safeguards/api.py +19 -2
  19. autogen/agentchat/group/safeguards/enforcer.py +134 -40
  20. autogen/agentchat/groupchat.py +45 -33
  21. autogen/agentchat/realtime/experimental/realtime_swarm.py +1 -3
  22. autogen/interop/pydantic_ai/pydantic_ai.py +1 -1
  23. autogen/llm_config/client.py +3 -2
  24. autogen/oai/bedrock.py +0 -13
  25. autogen/oai/client.py +15 -8
  26. autogen/oai/client_utils.py +30 -0
  27. autogen/oai/cohere.py +0 -10
  28. autogen/remote/__init__.py +18 -0
  29. autogen/remote/agent.py +199 -0
  30. autogen/remote/agent_service.py +142 -0
  31. autogen/remote/errors.py +17 -0
  32. autogen/remote/httpx_client_factory.py +131 -0
  33. autogen/remote/protocol.py +37 -0
  34. autogen/remote/retry.py +102 -0
  35. autogen/remote/runtime.py +96 -0
  36. autogen/testing/__init__.py +12 -0
  37. autogen/testing/messages.py +45 -0
  38. autogen/testing/test_agent.py +111 -0
  39. autogen/version.py +1 -1
  40. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
  41. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
  42. {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
@@ -1021,9 +1021,7 @@ class GroupChat:
1021
1021
  return mentions
1022
1022
 
1023
1023
  def _run_input_guardrails(
1024
- self,
1025
- agent: "ConversableAgent",
1026
- messages: list[dict[str, Any]] | None = None,
1024
+ self, agent: "ConversableAgent", messages: list[dict[str, Any]] | None = None
1027
1025
  ) -> str | None:
1028
1026
  """Run input guardrails for an agent before the reply is generated.
1029
1027
 
@@ -1031,27 +1029,21 @@ class GroupChat:
1031
1029
  agent (ConversableAgent): The agent whose input guardrails to run.
1032
1030
  messages (Optional[list[dict[str, Any]]]): The messages to check against the guardrails.
1033
1031
  """
1034
- for guardrail in agent.input_guardrails:
1035
- guardrail_result = guardrail.check(context=messages)
1036
-
1037
- if guardrail_result.activated:
1038
- guardrail.target.activate_target(self)
1039
- return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1032
+ if guardrail_result := agent.run_input_guardrails(messages):
1033
+ guardrail_result.guardrail.target.activate_target(self)
1034
+ return guardrail_result.reply
1040
1035
  return None
1041
1036
 
1042
- def _run_output_guardrails(self, agent: "ConversableAgent", reply: str) -> None:
1037
+ def _run_output_guardrails(self, agent: "ConversableAgent", reply: str | dict[str, Any]) -> str | None:
1043
1038
  """Run output guardrails for an agent after the reply is generated.
1044
1039
 
1045
1040
  Args:
1046
1041
  agent (ConversableAgent): The agent whose output guardrails to run.
1047
- reply (str): The reply generated by the agent.
1042
+ reply (str | dict[str, Any]): The reply generated by the agent.
1048
1043
  """
1049
- for guardrail in agent.output_guardrails:
1050
- guardrail_result = guardrail.check(context=reply)
1051
-
1052
- if guardrail_result.activated:
1053
- guardrail.target.activate_target(self)
1054
- return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1044
+ if guardrail_result := agent.run_output_guardrails(reply):
1045
+ guardrail_result.guardrail.target.activate_target(self)
1046
+ return guardrail_result.reply
1055
1047
  return None
1056
1048
 
1057
1049
  def _run_inter_agent_guardrails(
@@ -1303,13 +1295,16 @@ class GroupChatManager(ConversableAgent):
1303
1295
  reply = guardrails_reply
1304
1296
 
1305
1297
  # check for "clear history" phrase in reply and activate clear history function if found
1306
- if (
1307
- groupchat.enable_clear_history
1308
- and isinstance(reply, dict)
1309
- and reply["content"]
1310
- and "CLEAR HISTORY" in reply["content"].upper()
1311
- ):
1312
- reply["content"] = self.clear_agents_history(reply, groupchat)
1298
+ if groupchat.enable_clear_history and isinstance(reply, dict) and reply.get("content"):
1299
+ raw_content = reply.get("content")
1300
+ normalized_content = (
1301
+ content_str(raw_content)
1302
+ if isinstance(raw_content, (str, list)) or raw_content is None
1303
+ else str(raw_content)
1304
+ )
1305
+ if "CLEAR HISTORY" in normalized_content.upper():
1306
+ reply["content"] = normalized_content
1307
+ reply["content"] = self.clear_agents_history(reply, groupchat)
1313
1308
 
1314
1309
  # The speaker sends the message without requesting a reply
1315
1310
  speaker.send(reply, self, request_reply=False, silent=silent)
@@ -1420,13 +1415,16 @@ class GroupChatManager(ConversableAgent):
1420
1415
  reply = guardrails_reply
1421
1416
 
1422
1417
  # check for "clear history" phrase in reply and activate clear history function if found
1423
- if (
1424
- groupchat.enable_clear_history
1425
- and isinstance(reply, dict)
1426
- and reply["content"]
1427
- and "CLEAR HISTORY" in reply["content"].upper()
1428
- ):
1429
- reply["content"] = self.clear_agents_history(reply, groupchat)
1418
+ if groupchat.enable_clear_history and isinstance(reply, dict) and reply.get("content"):
1419
+ raw_content = reply.get("content")
1420
+ normalized_content = (
1421
+ content_str(raw_content)
1422
+ if isinstance(raw_content, (str, list)) or raw_content is None
1423
+ else str(raw_content)
1424
+ )
1425
+ if "CLEAR HISTORY" in normalized_content.upper():
1426
+ reply["content"] = normalized_content
1427
+ reply["content"] = self.clear_agents_history(reply, groupchat)
1430
1428
 
1431
1429
  # The speaker sends the message without requesting a reply
1432
1430
  await speaker.a_send(reply, self, request_reply=False, silent=silent)
@@ -1701,7 +1699,13 @@ class GroupChatManager(ConversableAgent):
1701
1699
  _remove_termination_string = remove_termination_string
1702
1700
 
1703
1701
  if _remove_termination_string and messages[-1].get("content"):
1704
- messages[-1]["content"] = _remove_termination_string(messages[-1]["content"])
1702
+ content_value = messages[-1]["content"]
1703
+ if isinstance(content_value, str):
1704
+ messages[-1]["content"] = _remove_termination_string(content_value)
1705
+ elif isinstance(content_value, list):
1706
+ messages[-1]["content"] = _remove_termination_string(content_str(content_value))
1707
+ else:
1708
+ messages[-1]["content"] = _remove_termination_string(str(content_value))
1705
1709
 
1706
1710
  # Check if the last message meets termination (if it has one)
1707
1711
  if self._is_termination_msg and self._is_termination_msg(last_message):
@@ -1764,7 +1768,15 @@ class GroupChatManager(ConversableAgent):
1764
1768
  """
1765
1769
  iostream = IOStream.get_default()
1766
1770
 
1767
- reply_content = reply["content"]
1771
+ raw_reply_content = reply.get("content")
1772
+ if isinstance(raw_reply_content, str):
1773
+ reply_content = raw_reply_content
1774
+ elif isinstance(raw_reply_content, (list, type(None))):
1775
+ reply_content = content_str(raw_reply_content)
1776
+ reply["content"] = reply_content
1777
+ else:
1778
+ reply_content = str(raw_reply_content)
1779
+ reply["content"] = reply_content
1768
1780
  # Split the reply into words
1769
1781
  words = reply_content.split()
1770
1782
  # Find the position of "clear" to determine where to start processing
@@ -185,7 +185,6 @@ class SwarmableAgent(Agent):
185
185
  self,
186
186
  messages: list[dict[str, Any]] | None = None,
187
187
  sender: Optional["Agent"] = None,
188
- **kwargs: Any,
189
188
  ) -> str | dict[str, Any] | None:
190
189
  if messages is None:
191
190
  if sender is None:
@@ -242,9 +241,8 @@ class SwarmableAgent(Agent):
242
241
  self,
243
242
  messages: list[dict[str, Any]] | None = None,
244
243
  sender: Optional["Agent"] = None,
245
- **kwargs: Any,
246
244
  ) -> str | dict[str, Any] | None:
247
- return self.generate_reply(messages=messages, sender=sender, **kwargs)
245
+ return self.generate_reply(messages=messages, sender=sender)
248
246
 
249
247
  async def a_receive(
250
248
  self,
@@ -78,7 +78,7 @@ class PydanticAIInteroperability:
78
78
  result = f(**kwargs) # type: ignore[call-arg]
79
79
  except Exception as e:
80
80
  if ctx_typed is not None:
81
- ctx_typed.retries[tool_typed.name] += 1
81
+ ctx_typed.retries[tool_typed.name] = ctx_typed.retries.get(tool_typed.name, 0) + 1
82
82
  raise e
83
83
 
84
84
  return result
@@ -4,6 +4,7 @@
4
4
  #
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
+ from collections.abc import Sequence
7
8
  from typing import Any, Protocol
8
9
 
9
10
  from ..doc_utils import export_module
@@ -31,11 +32,11 @@ class ModelClient(Protocol):
31
32
  class ModelClientResponseProtocol(Protocol):
32
33
  class Choice(Protocol):
33
34
  class Message(Protocol):
34
- content: str | dict[str, Any]
35
+ content: str | dict[str, Any] | list[dict[str, Any]]
35
36
 
36
37
  message: Message
37
38
 
38
- choices: list[Choice]
39
+ choices: Sequence[Choice]
39
40
  model: str
40
41
 
41
42
  def create(self, params: dict[str, Any]) -> ModelClientResponseProtocol: ... # pragma: no cover
autogen/oai/bedrock.py CHANGED
@@ -198,22 +198,9 @@ class BedrockClient:
198
198
  if "top_p" in params:
199
199
  base_params["topP"] = validate_parameter(params, "top_p", (float, int), False, None, None, None)
200
200
 
201
- if "topP" in params:
202
- warnings.warn(
203
- ("topP is deprecated, use top_p instead. Scheduled for removal in 0.10.0 version."), DeprecationWarning
204
- )
205
- base_params["topP"] = validate_parameter(params, "topP", (float, int), False, None, None, None)
206
-
207
201
  if "max_tokens" in params:
208
202
  base_params["maxTokens"] = validate_parameter(params, "max_tokens", (int,), False, None, None, None)
209
203
 
210
- if "maxTokens" in params:
211
- warnings.warn(
212
- ("maxTokens is deprecated, use max_tokens instead. Scheduled for removal in 0.10.0 version."),
213
- DeprecationWarning,
214
- )
215
- base_params["maxTokens"] = validate_parameter(params, "maxTokens", (int,), False, None, None, None)
216
-
217
204
  # Here are the possible "model-specific" parameters and their suitable types, known as additional parameters
218
205
  additional_params = {}
219
206
 
autogen/oai/client.py CHANGED
@@ -20,6 +20,7 @@ from pydantic import BaseModel, Field, HttpUrl
20
20
  from pydantic.type_adapter import TypeAdapter
21
21
 
22
22
  from ..cache import Cache
23
+ from ..code_utils import content_str
23
24
  from ..doc_utils import export_module
24
25
  from ..events.client_events import StreamEvent, UsageSummaryEvent
25
26
  from ..exception_utils import ModelToolNotSupportedError
@@ -30,7 +31,7 @@ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
30
31
  from ..logger.logger_utils import get_current_ts
31
32
  from ..runtime_logging import log_chat_completion, log_new_client, log_new_wrapper, logging_enabled
32
33
  from ..token_count_utils import count_token
33
- from .client_utils import FormatterProtocol, logging_formatter
34
+ from .client_utils import FormatterProtocol, logging_formatter, merge_config_with_tools
34
35
  from .openai_utils import OAI_PRICE1K, get_key, is_valid_api_key
35
36
 
36
37
  TOOL_ENABLED = False
@@ -365,11 +366,12 @@ class OpenAIClient:
365
366
  if isinstance(response, Completion):
366
367
  return [choice.text for choice in choices] # type: ignore [union-attr]
367
368
 
368
- def _format_content(content: str) -> str:
369
+ def _format_content(content: str | list[dict[str, Any]] | None) -> str:
370
+ normalized_content = content_str(content)
369
371
  return (
370
- self.response_format.model_validate_json(content).format()
372
+ self.response_format.model_validate_json(normalized_content).format()
371
373
  if isinstance(self.response_format, FormatterProtocol)
372
- else content
374
+ else normalized_content
373
375
  )
374
376
 
375
377
  if TOOL_ENABLED:
@@ -637,8 +639,11 @@ class OpenAIClient:
637
639
  warnings.warn(
638
640
  f"The {params.get('model')} model does not support streaming. The stream will be set to False."
639
641
  )
640
- if params.get("tools", False):
641
- raise ModelToolNotSupportedError(params.get("model"))
642
+ if "tools" in params:
643
+ if params["tools"]: # If tools exist, raise as unsupported
644
+ raise ModelToolNotSupportedError(params.get("model"))
645
+ else:
646
+ params.pop("tools") # Remove empty tools list
642
647
  self._process_reasoning_model_params(params)
643
648
  params["stream"] = False
644
649
  response = create_or_parse(**params)
@@ -1079,9 +1084,10 @@ class OpenAIWrapper:
1079
1084
  self._round_robin_index = (self._round_robin_index + 1) % len(self._clients)
1080
1085
 
1081
1086
  for i in ordered_clients_indices:
1082
- client = self._clients[i]
1083
1087
  # merge the input config with the i-th config in the config list
1084
- full_config = {**config, **self._config_list[i]}
1088
+ client_config = self._config_list[i]
1089
+ full_config = merge_config_with_tools(config, client_config)
1090
+
1085
1091
  # separate the config into create_config and extra_kwargs
1086
1092
  create_config, extra_kwargs = self._separate_create_config(full_config)
1087
1093
  # construct the create params
@@ -1112,6 +1118,7 @@ class OpenAIWrapper:
1112
1118
  # Legacy cache behavior, if cache_seed is given, use DiskCache.
1113
1119
  cache_client = Cache.disk(cache_seed, LEGACY_CACHE_DIR)
1114
1120
 
1121
+ client = self._clients[i]
1115
1122
  log_cache_seed_value(cache if cache is not None else cache_seed, client=client)
1116
1123
 
1117
1124
  if cache_client is not None:
@@ -110,6 +110,36 @@ def validate_parameter(
110
110
  return param_value
111
111
 
112
112
 
113
+ def merge_config_with_tools(config: dict[str, Any], client_config: dict[str, Any]) -> dict[str, Any]:
114
+ """Merge configuration dictionaries with proper tools and functions handling.
115
+
116
+ This function merges two configuration dictionaries while ensuring that:
117
+ 1. Empty 'tools' arrays are not added unnecessarily
118
+ 2. 'tools' and deprecated 'functions' parameters are not both present
119
+ 3. Actual tool configurations are properly merged
120
+
121
+ Args:
122
+ config: The base configuration dictionary (e.g., from create() call)
123
+ client_config: The client-specific configuration dictionary (e.g., from config_list)
124
+
125
+ Returns:
126
+ dict[str, Any]: The merged configuration with proper tools/functions handling
127
+ """
128
+ # Start with a clean merge of both configs
129
+ full_config = {**config, **client_config}
130
+
131
+ # Add tools if tools contains something AND are not using deprecated functions
132
+ config_tools = config.get("tools", [])
133
+ client_tools = client_config.get("tools", [])
134
+
135
+ if config_tools or client_tools:
136
+ # Don't add tools if functions parameter is present (deprecated API)
137
+ if "functions" not in full_config:
138
+ full_config["tools"] = config_tools + client_tools
139
+
140
+ return full_config
141
+
142
+
113
143
  def should_hide_tools(messages: list[dict[str, Any]], tools: list[dict[str, Any]], hide_tools_param: str) -> bool:
114
144
  """Determines if tools should be hidden. This function is used to hide tools when they have been run, minimising the chance of the LLM choosing them when they shouldn't.
115
145
  Parameters:
autogen/oai/cohere.py CHANGED
@@ -217,16 +217,6 @@ class CohereClient:
217
217
  if "top_p" in params:
218
218
  cohere_params["p"] = validate_parameter(params, "top_p", (int, float), False, 0.75, (0.01, 0.99), None)
219
219
 
220
- if "p" in params:
221
- warnings.warn(
222
- (
223
- "parameter 'p' is deprecated, use 'top_p' instead for consistency with OpenAI API spec. "
224
- "Scheduled for removal in 0.10.0 version."
225
- ),
226
- DeprecationWarning,
227
- )
228
- cohere_params["p"] = validate_parameter(params, "p", (int, float), False, 0.75, (0.01, 0.99), None)
229
-
230
220
  if "seed" in params:
231
221
  cohere_params["seed"] = validate_parameter(params, "seed", int, True, None, None, None)
232
222
 
@@ -0,0 +1,18 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ try:
6
+ import httpx # noqa: F401
7
+ except ImportError as e:
8
+ raise ImportError("httpx is not installed. Please install it with:\npip install httpx") from e
9
+
10
+ from .agent import HTTPRemoteAgent
11
+ from .httpx_client_factory import HttpxClientFactory
12
+ from .runtime import HTTPAgentBus
13
+
14
+ __all__ = (
15
+ "HTTPAgentBus",
16
+ "HTTPRemoteAgent",
17
+ "HttpxClientFactory",
18
+ )
@@ -0,0 +1,199 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ from typing import Any
5
+
6
+ import httpx
7
+
8
+ from autogen import ConversableAgent
9
+ from autogen.agentchat.group import ContextVariables
10
+ from autogen.oai.client import OpenAIWrapper
11
+
12
+ from .errors import RemoteAgentError, RemoteAgentNotFoundError
13
+ from .httpx_client_factory import ClientFactory, EmptyClientFactory
14
+ from .protocol import RequestMessage, ResponseMessage
15
+ from .retry import NoRetryPolicy, RetryPolicy
16
+
17
+
18
+ class HTTPRemoteAgent(ConversableAgent):
19
+ """A remote agent that communicates with other agents via HTTP long-polling.
20
+
21
+ This agent forwards messages to a remote endpoint and handles the response
22
+ through HTTP requests. It supports both synchronous and asynchronous operations.
23
+
24
+ Example:
25
+ >>> remote_agent = HTTPRemoteAgent(url="http://api.example.com/agents", name="my_remote_agent")
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ url: str,
31
+ name: str,
32
+ *,
33
+ silent: bool = False,
34
+ client: ClientFactory | None = None,
35
+ retry_policy: RetryPolicy | None = None,
36
+ ) -> None:
37
+ """Initialize the HTTPRemoteAgent.
38
+
39
+ Args:
40
+ url (str): The base URL of the remote agent service.
41
+ name (str): The name of this agent.
42
+ silent (bool): If True, suppresses logging output.
43
+ client (ClientFactory | None): HTTP client factory. If None, uses EmptyClientFactory.
44
+ retry_policy (RetryPolicy | None): Retry policy for HTTP requests. If None, uses NoRetryPolicy.
45
+ """
46
+
47
+ self.url = url
48
+ self.retry_policy: RetryPolicy = retry_policy or NoRetryPolicy
49
+
50
+ self._httpx_client_factory = client or EmptyClientFactory()
51
+
52
+ super().__init__(name, silent=silent)
53
+
54
+ self.__llm_config: dict[str, Any] = {}
55
+
56
+ self.replace_reply_func(
57
+ ConversableAgent.generate_oai_reply,
58
+ HTTPRemoteAgent.generate_remote_reply,
59
+ )
60
+ self.replace_reply_func(
61
+ ConversableAgent.a_generate_oai_reply,
62
+ HTTPRemoteAgent.a_generate_remote_reply,
63
+ )
64
+
65
+ def generate_remote_reply(
66
+ self,
67
+ messages: list[dict[str, Any]] | None = None,
68
+ sender: ConversableAgent | None = None,
69
+ config: OpenAIWrapper | None = None,
70
+ ) -> tuple[bool, dict[str, Any] | None]:
71
+ if messages is None:
72
+ messages = self._oai_messages[sender]
73
+
74
+ retry_policy = self.retry_policy()
75
+
76
+ task_id: Any = None
77
+ with self._httpx_client_factory.make_sync() as client:
78
+ while True:
79
+ with retry_policy:
80
+ if task_id is None:
81
+ # initiate remote procedure
82
+ task_id = self._process_create_remote_task_response(
83
+ client.post(
84
+ f"{self.url}/{self.name}",
85
+ content=RequestMessage(
86
+ messages=messages,
87
+ context=self.context_variables.data,
88
+ client_tools=self.__llm_config.get("tools", []),
89
+ ).model_dump_json(),
90
+ )
91
+ )
92
+
93
+ reply_response = client.get(f"{self.url}/{self.name}/{task_id}")
94
+
95
+ if reply_response.status_code in (200, 204): # valid answer codes
96
+ break
97
+
98
+ if reply_response.status_code == 425: # task still in progress
99
+ continue
100
+
101
+ if reply_response.status_code == 404:
102
+ task_id = None # recreate task due remote agent lost it
103
+ continue
104
+
105
+ raise RemoteAgentError(f"Remote client error: {reply_response}, {reply_response.content!r}")
106
+
107
+ if reply := self._process_remote_reply(reply_response):
108
+ if sender:
109
+ context_variables = ContextVariables(reply.context)
110
+ sender.context_variables.update(context_variables.to_dict())
111
+ # TODO: support multiple messages response for remote chat history
112
+ return True, reply.messages[-1]
113
+
114
+ return True, None
115
+
116
+ async def a_generate_remote_reply(
117
+ self,
118
+ messages: list[dict[str, Any]] | None = None,
119
+ sender: ConversableAgent | None = None,
120
+ config: OpenAIWrapper | None = None,
121
+ ) -> tuple[bool, dict[str, Any] | None]:
122
+ if messages is None:
123
+ messages = self._oai_messages[sender]
124
+
125
+ retry_policy = self.retry_policy()
126
+
127
+ task_id: Any = None
128
+ async with self._httpx_client_factory() as client:
129
+ while True:
130
+ with retry_policy:
131
+ if task_id is None:
132
+ # initiate remote procedure
133
+ task_id = self._process_create_remote_task_response(
134
+ await client.post(
135
+ f"{self.url}/{self.name}",
136
+ content=RequestMessage(
137
+ messages=messages,
138
+ context=self.context_variables.data,
139
+ client_tools=self.__llm_config.get("tools", []),
140
+ ).model_dump_json(),
141
+ )
142
+ )
143
+
144
+ reply_response = await client.get(f"{self.url}/{self.name}/{task_id}")
145
+
146
+ if reply_response.status_code in (200, 204): # valid answer codes
147
+ break
148
+
149
+ if reply_response.status_code == 425: # task still in progress
150
+ continue
151
+
152
+ if reply_response.status_code == 404:
153
+ task_id = None # recreate task due remote agent lost it
154
+ continue
155
+
156
+ raise RemoteAgentError(f"Remote client error: {reply_response}, {reply_response.content!r}")
157
+
158
+ if reply := self._process_remote_reply(reply_response):
159
+ if sender:
160
+ context_variables = ContextVariables(reply.context)
161
+ sender.context_variables.update(context_variables.to_dict())
162
+ # TODO: support multiple messages response for remote chat history
163
+ return True, reply.messages[-1]
164
+
165
+ return True, None
166
+
167
+ def _process_create_remote_task_response(self, response: httpx.Response) -> Any:
168
+ if response.status_code == 404:
169
+ raise RemoteAgentNotFoundError(self.name)
170
+
171
+ if response.status_code != 202:
172
+ raise RemoteAgentError(f"Remote client error: {response}, {response.content!r}")
173
+
174
+ return response.json()
175
+
176
+ def _process_remote_reply(self, reply_response: httpx.Response) -> ResponseMessage | None:
177
+ if reply_response.status_code == 204:
178
+ return None
179
+
180
+ try:
181
+ serialized_message = ResponseMessage.model_validate_json(reply_response.content)
182
+
183
+ except Exception as e:
184
+ raise RemoteAgentError(f"Remote client error: {reply_response}, {reply_response.content!r}") from e
185
+
186
+ return serialized_message
187
+
188
+ def update_tool_signature(
189
+ self,
190
+ tool_sig: str | dict[str, Any],
191
+ is_remove: bool,
192
+ silent_override: bool = False,
193
+ ) -> None:
194
+ self.__llm_config = self._update_tool_config(
195
+ self.__llm_config,
196
+ tool_sig=tool_sig,
197
+ is_remove=is_remove,
198
+ silent_override=silent_override,
199
+ )