ag2 0.9.10__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/METADATA +14 -7
- {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/RECORD +42 -24
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +105 -0
- autogen/a2a/client.py +280 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +165 -0
- autogen/agentchat/__init__.py +3 -0
- autogen/agentchat/agent.py +0 -2
- autogen/agentchat/chat.py +5 -1
- autogen/agentchat/contrib/llava_agent.py +1 -13
- autogen/agentchat/conversable_agent.py +178 -73
- autogen/agentchat/group/group_tool_executor.py +46 -15
- autogen/agentchat/group/guardrails.py +41 -33
- autogen/agentchat/group/multi_agent_chat.py +53 -0
- autogen/agentchat/group/safeguards/api.py +19 -2
- autogen/agentchat/group/safeguards/enforcer.py +134 -40
- autogen/agentchat/groupchat.py +45 -33
- autogen/agentchat/realtime/experimental/realtime_swarm.py +1 -3
- autogen/interop/pydantic_ai/pydantic_ai.py +1 -1
- autogen/llm_config/client.py +3 -2
- autogen/oai/bedrock.py +0 -13
- autogen/oai/client.py +15 -8
- autogen/oai/client_utils.py +30 -0
- autogen/oai/cohere.py +0 -10
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +142 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/version.py +1 -1
- {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
- {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.10.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
autogen/a2a/errors.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
from autogen.remote.errors import RemoteAgentError, RemoteAgentNotFoundError
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class A2aClientError(RemoteAgentError):
|
|
10
|
+
"""Base class for A2A agent errors"""
|
|
11
|
+
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class A2aAgentNotFoundError(A2aClientError, RemoteAgentNotFoundError):
|
|
16
|
+
"""Raised when a A2A agent is not found"""
|
|
17
|
+
|
|
18
|
+
pass
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
from uuid import uuid4
|
|
7
|
+
|
|
8
|
+
from a2a.types import AgentCapabilities, AgentCard, DataPart, Message, Part, Role, SendMessageSuccessResponse, TextPart
|
|
9
|
+
from a2a.utils.constants import AGENT_CARD_WELL_KNOWN_PATH, EXTENDED_AGENT_CARD_PATH, PREV_AGENT_CARD_WELL_KNOWN_PATH
|
|
10
|
+
from httpx import MockTransport, Request, Response
|
|
11
|
+
|
|
12
|
+
from autogen.doc_utils import export_module
|
|
13
|
+
from autogen.remote.httpx_client_factory import HttpxClientFactory
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@export_module("autogen.a2a")
|
|
17
|
+
def MockClient( # noqa: N802
|
|
18
|
+
response_message: str | dict[str, Any] | TextPart | DataPart | Part,
|
|
19
|
+
) -> HttpxClientFactory:
|
|
20
|
+
"""Create a mock HTTP client for testing A2A agent interactions.
|
|
21
|
+
|
|
22
|
+
This function creates a mock HTTP client that simulates responses from an A2A agent server.
|
|
23
|
+
It handles both agent card requests and message sending requests with configurable responses.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
response_message: The message to return in response to SendMessage requests.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
An HttpxClientFactory configured with a mock transport that handles requests
|
|
30
|
+
to agent card endpoints and message sending endpoints.
|
|
31
|
+
|
|
32
|
+
Example:
|
|
33
|
+
>>> client = MockClient("Hello, world!")
|
|
34
|
+
>>> agent = A2aRemoteAgent(name="remote", url="http://fake", client=client)
|
|
35
|
+
"""
|
|
36
|
+
if isinstance(response_message, Part):
|
|
37
|
+
parts = [response_message]
|
|
38
|
+
elif isinstance(response_message, (DataPart, TextPart)):
|
|
39
|
+
parts = [Part(root=response_message)]
|
|
40
|
+
elif isinstance(response_message, str):
|
|
41
|
+
parts = [Part(root=DataPart(data={"role": "assistant", "content": response_message}))]
|
|
42
|
+
elif isinstance(response_message, dict):
|
|
43
|
+
parts = [Part(root=DataPart(data={"role": "assistant", **response_message}))]
|
|
44
|
+
else:
|
|
45
|
+
raise ValueError(f"Invalid message type: {type(response_message)}")
|
|
46
|
+
|
|
47
|
+
async def mock_handler(request: Request) -> Response:
|
|
48
|
+
if (
|
|
49
|
+
request.url.path == AGENT_CARD_WELL_KNOWN_PATH
|
|
50
|
+
or request.url.path == EXTENDED_AGENT_CARD_PATH
|
|
51
|
+
or request.url.path == PREV_AGENT_CARD_WELL_KNOWN_PATH
|
|
52
|
+
):
|
|
53
|
+
return Response(
|
|
54
|
+
status_code=200,
|
|
55
|
+
content=AgentCard(
|
|
56
|
+
capabilities=AgentCapabilities(streaming=False),
|
|
57
|
+
default_input_modes=["text"],
|
|
58
|
+
default_output_modes=["text"],
|
|
59
|
+
name="mock_agent",
|
|
60
|
+
description="mock_agent",
|
|
61
|
+
url="http://localhost:8000",
|
|
62
|
+
supports_authenticated_extended_card=False,
|
|
63
|
+
version="0.1.0",
|
|
64
|
+
skills=[],
|
|
65
|
+
).model_dump_json(),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
return Response(
|
|
69
|
+
status_code=200,
|
|
70
|
+
content=SendMessageSuccessResponse(
|
|
71
|
+
result=Message(
|
|
72
|
+
message_id=str(uuid4()),
|
|
73
|
+
role=Role.agent,
|
|
74
|
+
parts=parts,
|
|
75
|
+
),
|
|
76
|
+
).model_dump_json(),
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
return HttpxClientFactory(transport=MockTransport(handler=mock_handler))
|
autogen/a2a/server.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import warnings
|
|
6
|
+
from collections.abc import Callable
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
from a2a.server.request_handlers import DefaultRequestHandler
|
|
10
|
+
from a2a.server.tasks import InMemoryTaskStore
|
|
11
|
+
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
|
|
12
|
+
from pydantic import Field
|
|
13
|
+
|
|
14
|
+
from autogen import ConversableAgent
|
|
15
|
+
from autogen.doc_utils import export_module
|
|
16
|
+
|
|
17
|
+
from .agent_executor import AutogenAgentExecutor
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from a2a.server.agent_execution import RequestContextBuilder
|
|
21
|
+
from a2a.server.apps import CallContextBuilder
|
|
22
|
+
from a2a.server.context import ServerCallContext
|
|
23
|
+
from a2a.server.events import QueueManager
|
|
24
|
+
from a2a.server.request_handlers import RequestHandler
|
|
25
|
+
from a2a.server.tasks import PushNotificationConfigStore, PushNotificationSender, TaskStore
|
|
26
|
+
from starlette.applications import Starlette
|
|
27
|
+
|
|
28
|
+
from autogen import ConversableAgent
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@export_module("autogen.a2a")
|
|
32
|
+
class CardSettings(AgentCard):
|
|
33
|
+
"""Original A2A AgentCard object inheritor making some fields optional."""
|
|
34
|
+
|
|
35
|
+
name: str | None = None # type: ignore[assignment]
|
|
36
|
+
"""
|
|
37
|
+
A human-readable name for the agent. Uses original agent name if not set.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
description: str | None = None # type: ignore[assignment]
|
|
41
|
+
"""
|
|
42
|
+
A human-readable description of the agent, assisting users and other agents
|
|
43
|
+
in understanding its purpose. Uses original agent description if not set.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
url: str | None = None # type: ignore[assignment]
|
|
47
|
+
"""
|
|
48
|
+
The preferred endpoint URL for interacting with the agent.
|
|
49
|
+
This URL MUST support the transport specified by 'preferredTransport'.
|
|
50
|
+
Uses original A2aAgentServer url if not set.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
version: str = "0.1.0"
|
|
54
|
+
"""
|
|
55
|
+
The agent's own version number. The format is defined by the provider.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
default_input_modes: list[str] = Field(default_factory=lambda: ["text"])
|
|
59
|
+
"""
|
|
60
|
+
Default set of supported input MIME types for all skills, which can be
|
|
61
|
+
overridden on a per-skill basis.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
default_output_modes: list[str] = Field(default_factory=lambda: ["text"])
|
|
65
|
+
"""
|
|
66
|
+
Default set of supported output MIME types for all skills, which can be
|
|
67
|
+
overridden on a per-skill basis.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
capabilities: AgentCapabilities = Field(default_factory=lambda: AgentCapabilities(streaming=True))
|
|
71
|
+
"""
|
|
72
|
+
A declaration of optional capabilities supported by the agent.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
skills: list[AgentSkill] = Field(default_factory=list)
|
|
76
|
+
"""
|
|
77
|
+
The set of skills, or distinct capabilities, that the agent can perform.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@export_module("autogen.a2a")
|
|
82
|
+
class A2aAgentServer:
|
|
83
|
+
"""A server wrapper for running an AG2 agent via the A2A protocol.
|
|
84
|
+
|
|
85
|
+
This class provides functionality to wrap an AG2 ConversableAgent into an A2A server
|
|
86
|
+
that can be used to interact with the agent through A2A requests.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(
|
|
90
|
+
self,
|
|
91
|
+
agent: "ConversableAgent",
|
|
92
|
+
*,
|
|
93
|
+
url: str | None = "http://localhost:8000",
|
|
94
|
+
agent_card: CardSettings | None = None,
|
|
95
|
+
card_modifier: Callable[["AgentCard"], "AgentCard"] | None = None,
|
|
96
|
+
extended_agent_card: CardSettings | None = None,
|
|
97
|
+
extended_card_modifier: Callable[["AgentCard", "ServerCallContext"], "AgentCard"] | None = None,
|
|
98
|
+
) -> None:
|
|
99
|
+
"""Initialize the A2aAgentServer.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
agent: The Autogen ConversableAgent to serve.
|
|
103
|
+
url: The base URL for the A2A server.
|
|
104
|
+
agent_card: Configuration for the base agent card.
|
|
105
|
+
card_modifier: Function to modify the base agent card.
|
|
106
|
+
extended_agent_card: Configuration for the extended agent card.
|
|
107
|
+
extended_card_modifier: Function to modify the extended agent card.
|
|
108
|
+
"""
|
|
109
|
+
self.agent = agent
|
|
110
|
+
|
|
111
|
+
if not agent_card:
|
|
112
|
+
agent_card = CardSettings()
|
|
113
|
+
|
|
114
|
+
if agent_card.url and url != "http://localhost:8000":
|
|
115
|
+
warnings.warn(
|
|
116
|
+
(
|
|
117
|
+
"You can't use `agent_card.url` and `url` options in the same time. "
|
|
118
|
+
f"`agent_card.url` has a higher priority, so `{agent_card.url}` will be used."
|
|
119
|
+
),
|
|
120
|
+
RuntimeWarning,
|
|
121
|
+
stacklevel=2,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
self.card = AgentCard.model_validate({
|
|
125
|
+
# use agent options by default
|
|
126
|
+
"name": agent.name,
|
|
127
|
+
"description": agent.description,
|
|
128
|
+
"url": url,
|
|
129
|
+
"supports_authenticated_extended_card": extended_agent_card is not None,
|
|
130
|
+
# exclude name and description if not provided
|
|
131
|
+
**agent_card.model_dump(exclude_none=True),
|
|
132
|
+
})
|
|
133
|
+
|
|
134
|
+
self.extended_agent_card: AgentCard | None = None
|
|
135
|
+
if extended_agent_card:
|
|
136
|
+
if extended_agent_card.url and url != "http://localhost:8000":
|
|
137
|
+
warnings.warn(
|
|
138
|
+
(
|
|
139
|
+
"You can't use `extended_agent_card.url` and `url` options in the same time. "
|
|
140
|
+
f"`agent_card.url` has a higher priority, so `{extended_agent_card.url}` will be used."
|
|
141
|
+
),
|
|
142
|
+
RuntimeWarning,
|
|
143
|
+
stacklevel=2,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
self.extended_agent_card = AgentCard.model_validate({
|
|
147
|
+
"name": agent.name,
|
|
148
|
+
"description": agent.description,
|
|
149
|
+
"url": url,
|
|
150
|
+
**extended_agent_card.model_dump(exclude_none=True),
|
|
151
|
+
})
|
|
152
|
+
|
|
153
|
+
self.card_modifier = card_modifier
|
|
154
|
+
self.extended_card_modifier = extended_card_modifier
|
|
155
|
+
|
|
156
|
+
@property
|
|
157
|
+
def executor(self) -> AutogenAgentExecutor:
|
|
158
|
+
"""Get the A2A agent executor."""
|
|
159
|
+
return AutogenAgentExecutor(self.agent)
|
|
160
|
+
|
|
161
|
+
def build_request_handler(
|
|
162
|
+
self,
|
|
163
|
+
*,
|
|
164
|
+
task_store: "TaskStore | None" = None,
|
|
165
|
+
queue_manager: "QueueManager | None" = None,
|
|
166
|
+
push_config_store: "PushNotificationConfigStore | None" = None,
|
|
167
|
+
push_sender: "PushNotificationSender | None" = None,
|
|
168
|
+
request_context_builder: "RequestContextBuilder | None" = None,
|
|
169
|
+
) -> "RequestHandler":
|
|
170
|
+
"""Build a request handler for A2A application.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
task_store: The task store to use.
|
|
174
|
+
queue_manager: The queue manager to use.
|
|
175
|
+
push_config_store: The push notification config store to use.
|
|
176
|
+
push_sender: The push notification sender to use.
|
|
177
|
+
request_context_builder: The request context builder to use.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
A configured RequestHandler instance.
|
|
181
|
+
"""
|
|
182
|
+
return DefaultRequestHandler(
|
|
183
|
+
agent_executor=self.executor,
|
|
184
|
+
task_store=task_store or InMemoryTaskStore(),
|
|
185
|
+
queue_manager=queue_manager,
|
|
186
|
+
push_config_store=push_config_store,
|
|
187
|
+
push_sender=push_sender,
|
|
188
|
+
request_context_builder=request_context_builder,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
def build_starlette_app(
|
|
192
|
+
self,
|
|
193
|
+
*,
|
|
194
|
+
request_handler: "RequestHandler | None" = None,
|
|
195
|
+
context_builder: "CallContextBuilder | None" = None,
|
|
196
|
+
) -> "Starlette":
|
|
197
|
+
"""Build a Starlette A2A application for ASGI server.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
request_handler: The request handler to use.
|
|
201
|
+
context_builder: The context builder to use.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
A configured Starlette application instance.
|
|
205
|
+
"""
|
|
206
|
+
from a2a.server.apps import A2AStarletteApplication
|
|
207
|
+
|
|
208
|
+
return A2AStarletteApplication(
|
|
209
|
+
agent_card=self.card,
|
|
210
|
+
extended_agent_card=self.extended_agent_card,
|
|
211
|
+
http_handler=request_handler
|
|
212
|
+
or DefaultRequestHandler(
|
|
213
|
+
agent_executor=self.executor,
|
|
214
|
+
task_store=InMemoryTaskStore(),
|
|
215
|
+
),
|
|
216
|
+
context_builder=context_builder,
|
|
217
|
+
card_modifier=self.card_modifier,
|
|
218
|
+
extended_card_modifier=self.extended_card_modifier,
|
|
219
|
+
).build()
|
|
220
|
+
|
|
221
|
+
build = build_starlette_app # default alias for build_starlette_app
|
autogen/a2a/utils.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from typing import Any, cast
|
|
6
|
+
from uuid import uuid4
|
|
7
|
+
|
|
8
|
+
from a2a.types import Artifact, DataPart, Message, Part, Role, TextPart
|
|
9
|
+
from a2a.utils import new_agent_parts_message, new_artifact
|
|
10
|
+
|
|
11
|
+
from autogen.remote.protocol import RequestMessage, ResponseMessage
|
|
12
|
+
|
|
13
|
+
CLIENT_TOOLS_KEY = "ag2_client_tools"
|
|
14
|
+
CONTEXT_KEY = "ag2_context_update"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def request_message_to_a2a(
|
|
18
|
+
request_message: RequestMessage,
|
|
19
|
+
context_id: str,
|
|
20
|
+
) -> Message:
|
|
21
|
+
metadata: dict[str, Any] = {}
|
|
22
|
+
if request_message.client_tools:
|
|
23
|
+
metadata[CLIENT_TOOLS_KEY] = request_message.client_tools
|
|
24
|
+
if request_message.context:
|
|
25
|
+
metadata[CONTEXT_KEY] = request_message.context
|
|
26
|
+
|
|
27
|
+
return Message(
|
|
28
|
+
role=Role.user,
|
|
29
|
+
parts=[message_to_part(message) for message in request_message.messages],
|
|
30
|
+
message_id=uuid4().hex,
|
|
31
|
+
context_id=context_id,
|
|
32
|
+
metadata=metadata,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def request_message_from_a2a(message: Message) -> RequestMessage:
|
|
37
|
+
metadata = message.metadata or {}
|
|
38
|
+
return RequestMessage(
|
|
39
|
+
messages=[message_from_part(part) for part in message.parts],
|
|
40
|
+
context=metadata.get(CONTEXT_KEY),
|
|
41
|
+
client_tools=metadata.get(CLIENT_TOOLS_KEY, []),
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def response_message_from_a2a_artifacts(artifacts: list[Artifact] | None) -> ResponseMessage | None:
|
|
46
|
+
if not artifacts:
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
if len(artifacts) > 1:
|
|
50
|
+
raise NotImplementedError("Multiple artifacts are not supported")
|
|
51
|
+
|
|
52
|
+
artifact = artifacts[-1]
|
|
53
|
+
|
|
54
|
+
if not artifact.parts:
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
if len(artifact.parts) > 1:
|
|
58
|
+
raise NotImplementedError("Multiple parts are not supported")
|
|
59
|
+
|
|
60
|
+
return ResponseMessage(
|
|
61
|
+
messages=[message_from_part(artifact.parts[-1])],
|
|
62
|
+
context=(artifact.metadata or {}).get(CONTEXT_KEY),
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def response_message_from_a2a_message(message: Message) -> ResponseMessage | None:
|
|
67
|
+
text_parts: list[Part] = []
|
|
68
|
+
data_parts: list[Part] = []
|
|
69
|
+
for part in message.parts:
|
|
70
|
+
if isinstance(part.root, TextPart):
|
|
71
|
+
text_parts.append(part)
|
|
72
|
+
elif isinstance(part.root, DataPart):
|
|
73
|
+
data_parts.append(part)
|
|
74
|
+
else:
|
|
75
|
+
raise NotImplementedError(f"Unsupported part type: {type(part.root)}")
|
|
76
|
+
|
|
77
|
+
tpn = len(text_parts)
|
|
78
|
+
if dpn := len(data_parts):
|
|
79
|
+
if dpn > 1:
|
|
80
|
+
raise NotImplementedError("Multiple data parts are not supported")
|
|
81
|
+
|
|
82
|
+
if tpn:
|
|
83
|
+
raise NotImplementedError("Data parts and text parts are not supported together")
|
|
84
|
+
|
|
85
|
+
messages = [message_from_part(data_parts[0])]
|
|
86
|
+
elif tpn == 1:
|
|
87
|
+
messages = [message_from_part(text_parts[0])]
|
|
88
|
+
else:
|
|
89
|
+
messages = [{"content": "\n".join(cast(TextPart, t.root).text for t in text_parts)}]
|
|
90
|
+
|
|
91
|
+
return ResponseMessage(
|
|
92
|
+
messages=messages,
|
|
93
|
+
context=(message.metadata or {}).get(CONTEXT_KEY),
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def response_message_to_a2a(
|
|
98
|
+
result: ResponseMessage | None,
|
|
99
|
+
context_id: str | None,
|
|
100
|
+
task_id: str | None,
|
|
101
|
+
) -> tuple[Artifact, list[Message]]:
|
|
102
|
+
# mypy ignores could be removed after
|
|
103
|
+
# https://github.com/a2aproject/a2a-python/pull/503
|
|
104
|
+
|
|
105
|
+
if not result:
|
|
106
|
+
return new_artifact(
|
|
107
|
+
name="result",
|
|
108
|
+
parts=[],
|
|
109
|
+
description=None, # type: ignore[arg-type]
|
|
110
|
+
), []
|
|
111
|
+
|
|
112
|
+
artifact = new_artifact(
|
|
113
|
+
name="result",
|
|
114
|
+
parts=[message_to_part(result.messages[-1])],
|
|
115
|
+
description=None, # type: ignore[arg-type]
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if result.context:
|
|
119
|
+
artifact.metadata = {CONTEXT_KEY: result.context}
|
|
120
|
+
|
|
121
|
+
return (
|
|
122
|
+
artifact,
|
|
123
|
+
[
|
|
124
|
+
new_agent_parts_message(
|
|
125
|
+
parts=[message_to_part(m) for m in result.messages],
|
|
126
|
+
context_id=context_id,
|
|
127
|
+
task_id=task_id,
|
|
128
|
+
),
|
|
129
|
+
],
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def message_to_part(message: dict[str, Any]) -> Part:
|
|
134
|
+
message = message.copy()
|
|
135
|
+
text = message.pop("content", "") or ""
|
|
136
|
+
return Part(
|
|
137
|
+
root=TextPart(
|
|
138
|
+
text=text,
|
|
139
|
+
metadata=message or None,
|
|
140
|
+
)
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def message_from_part(part: Part) -> dict[str, Any]:
|
|
145
|
+
root = part.root
|
|
146
|
+
|
|
147
|
+
if isinstance(root, TextPart):
|
|
148
|
+
return {
|
|
149
|
+
**(root.metadata or {}),
|
|
150
|
+
"content": root.text,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
elif isinstance(root, DataPart):
|
|
154
|
+
if ( # pydantic-ai specific
|
|
155
|
+
set(root.data.keys()) == {"result"}
|
|
156
|
+
and root.metadata
|
|
157
|
+
and "json_schema" in root.metadata
|
|
158
|
+
and isinstance(data := root.data["result"], dict)
|
|
159
|
+
):
|
|
160
|
+
return data
|
|
161
|
+
|
|
162
|
+
return root.data
|
|
163
|
+
|
|
164
|
+
else:
|
|
165
|
+
raise NotImplementedError(f"Unsupported part type: {type(part.root)}")
|
autogen/agentchat/__init__.py
CHANGED
|
@@ -15,6 +15,7 @@ from .contrib.swarm_agent import (
|
|
|
15
15
|
run_swarm,
|
|
16
16
|
)
|
|
17
17
|
from .conversable_agent import ConversableAgent, UpdateSystemMessage, register_function
|
|
18
|
+
from .group import ContextVariables, ReplyResult
|
|
18
19
|
from .group.multi_agent_chat import a_initiate_group_chat, a_run_group_chat, initiate_group_chat, run_group_chat
|
|
19
20
|
from .groupchat import GroupChat, GroupChatManager
|
|
20
21
|
from .user_proxy_agent import UserProxyAgent
|
|
@@ -24,10 +25,12 @@ __all__ = [
|
|
|
24
25
|
"Agent",
|
|
25
26
|
"AssistantAgent",
|
|
26
27
|
"ChatResult",
|
|
28
|
+
"ContextVariables",
|
|
27
29
|
"ConversableAgent",
|
|
28
30
|
"GroupChat",
|
|
29
31
|
"GroupChatManager",
|
|
30
32
|
"LLMAgent",
|
|
33
|
+
"ReplyResult",
|
|
31
34
|
"UpdateSystemMessage",
|
|
32
35
|
"UserProxyAgent",
|
|
33
36
|
"a_initiate_chats",
|
autogen/agentchat/agent.py
CHANGED
|
@@ -105,7 +105,6 @@ class Agent(Protocol):
|
|
|
105
105
|
self,
|
|
106
106
|
messages: list[dict[str, Any]] | None = None,
|
|
107
107
|
sender: Optional["Agent"] = None,
|
|
108
|
-
**kwargs: Any,
|
|
109
108
|
) -> str | dict[str, Any] | None:
|
|
110
109
|
"""Generate a reply based on the received messages.
|
|
111
110
|
|
|
@@ -124,7 +123,6 @@ class Agent(Protocol):
|
|
|
124
123
|
self,
|
|
125
124
|
messages: list[dict[str, Any]] | None = None,
|
|
126
125
|
sender: Optional["Agent"] = None,
|
|
127
|
-
**kwargs: Any,
|
|
128
126
|
) -> str | dict[str, Any] | None:
|
|
129
127
|
"""(Async) Generate a reply based on the received messages.
|
|
130
128
|
|
autogen/agentchat/chat.py
CHANGED
|
@@ -14,6 +14,7 @@ from dataclasses import dataclass, field
|
|
|
14
14
|
from functools import partial
|
|
15
15
|
from typing import Any, TypedDict
|
|
16
16
|
|
|
17
|
+
from ..code_utils import content_str
|
|
17
18
|
from ..doc_utils import export_module
|
|
18
19
|
from ..events.agent_events import PostCarryoverProcessingEvent
|
|
19
20
|
from ..io.base import IOStream
|
|
@@ -132,7 +133,10 @@ def _post_process_carryover_item(carryover_item):
|
|
|
132
133
|
if isinstance(carryover_item, str):
|
|
133
134
|
return carryover_item
|
|
134
135
|
elif isinstance(carryover_item, dict) and "content" in carryover_item:
|
|
135
|
-
|
|
136
|
+
content_value = carryover_item.get("content")
|
|
137
|
+
if isinstance(content_value, (str, list)) or content_value is None:
|
|
138
|
+
return content_str(content_value)
|
|
139
|
+
return str(content_value)
|
|
136
140
|
else:
|
|
137
141
|
return str(carryover_item)
|
|
138
142
|
|
|
@@ -6,7 +6,6 @@
|
|
|
6
6
|
# SPDX-License-Identifier: MIT
|
|
7
7
|
import json
|
|
8
8
|
import logging
|
|
9
|
-
import warnings
|
|
10
9
|
from typing import Any
|
|
11
10
|
|
|
12
11
|
import requests
|
|
@@ -85,18 +84,7 @@ class LLaVAAgent(MultimodalConversableAgent):
|
|
|
85
84
|
retry = 10
|
|
86
85
|
while len(out) == 0 and retry > 0:
|
|
87
86
|
# image names will be inferred automatically from llava_call
|
|
88
|
-
|
|
89
|
-
warnings.warn(
|
|
90
|
-
(
|
|
91
|
-
"`max_new_tokens` is deprecated in `llm_config` for llava agents. "
|
|
92
|
-
"Use `max_tokens` instead. "
|
|
93
|
-
"Scheduled for removal in 0.10.0 version."
|
|
94
|
-
),
|
|
95
|
-
DeprecationWarning,
|
|
96
|
-
)
|
|
97
|
-
max_tokens = self.llm_config["max_new_tokens"]
|
|
98
|
-
else:
|
|
99
|
-
max_tokens = self.llm_config.get("max_tokens")
|
|
87
|
+
max_tokens = self.llm_config.get("max_tokens")
|
|
100
88
|
|
|
101
89
|
out = llava_call_binary(
|
|
102
90
|
prompt=prompt,
|