fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/METADATA +27 -4
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/RECORD +51 -30
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +114 -8
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +89 -13
- mcp_agent/core/factory.py +14 -13
- mcp_agent/core/fastagent.py +15 -5
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +79 -36
- mcp_agent/logging/listeners.py +3 -6
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_agent_client_session.py +21 -145
- mcp_agent/mcp/mcp_aggregator.py +61 -12
- mcp_agent/mcp/mcp_connection_manager.py +0 -1
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +509 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +223 -0
- mcp_agent/mcp/stdio.py +23 -15
- mcp_agent/mcp_server_registry.py +5 -2
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/resources/examples/workflows/orchestrator.py +3 -3
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +139 -66
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
- mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +99 -1
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +20 -3
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/licenses/LICENSE +0 -0
@@ -7,7 +7,6 @@ from typing import Optional
|
|
7
7
|
|
8
8
|
from mcp import ClientSession
|
9
9
|
from mcp.shared.session import (
|
10
|
-
RequestResponder,
|
11
10
|
ReceiveResultT,
|
12
11
|
ReceiveNotificationT,
|
13
12
|
RequestId,
|
@@ -16,18 +15,11 @@ from mcp.shared.session import (
|
|
16
15
|
SendResultT,
|
17
16
|
)
|
18
17
|
from mcp.types import (
|
19
|
-
ClientResult,
|
20
|
-
CreateMessageRequest,
|
21
|
-
CreateMessageResult,
|
22
18
|
ErrorData,
|
23
|
-
JSONRPCNotification,
|
24
|
-
JSONRPCRequest,
|
25
|
-
ServerRequest,
|
26
|
-
TextContent,
|
27
|
-
ListRootsRequest,
|
28
19
|
ListRootsResult,
|
29
20
|
Root,
|
30
21
|
)
|
22
|
+
from pydantic import AnyUrl
|
31
23
|
|
32
24
|
from mcp_agent.config import MCPServerSettings
|
33
25
|
from mcp_agent.context_dependent import ContextDependent
|
@@ -36,6 +28,24 @@ from mcp_agent.logging.logger import get_logger
|
|
36
28
|
logger = get_logger(__name__)
|
37
29
|
|
38
30
|
|
31
|
+
async def list_roots(ctx: ClientSession) -> ListRootsResult:
|
32
|
+
"""List roots callback that will be called by the MCP library."""
|
33
|
+
|
34
|
+
roots = []
|
35
|
+
if (
|
36
|
+
hasattr(ctx, "session")
|
37
|
+
and hasattr(ctx.session, "server_config")
|
38
|
+
and ctx.session.server_config
|
39
|
+
and hasattr(ctx.session.server_config, "roots")
|
40
|
+
and ctx.session.server_config.roots
|
41
|
+
):
|
42
|
+
roots = [
|
43
|
+
Root(uri=AnyUrl(root.uri), name=root.name)
|
44
|
+
for root in ctx.session.server_config.roots
|
45
|
+
]
|
46
|
+
return ListRootsResult(roots=roots or [])
|
47
|
+
|
48
|
+
|
39
49
|
class MCPAgentClientSession(ClientSession, ContextDependent):
|
40
50
|
"""
|
41
51
|
MCP Agent framework acts as a client to the servers providing tools/resources/prompts for the agent workloads.
|
@@ -48,36 +58,9 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
48
58
|
"""
|
49
59
|
|
50
60
|
def __init__(self, *args, **kwargs):
|
51
|
-
super().__init__(*args, **kwargs)
|
61
|
+
super().__init__(*args, **kwargs, list_roots_callback=list_roots)
|
52
62
|
self.server_config: Optional[MCPServerSettings] = None
|
53
63
|
|
54
|
-
async def _received_request(
|
55
|
-
self, responder: RequestResponder[ServerRequest, ClientResult]
|
56
|
-
) -> None:
|
57
|
-
logger.debug("Received request:", data=responder.request.model_dump())
|
58
|
-
request = responder.request.root
|
59
|
-
|
60
|
-
if isinstance(request, CreateMessageRequest):
|
61
|
-
return await self.handle_sampling_request(request, responder)
|
62
|
-
elif isinstance(request, ListRootsRequest):
|
63
|
-
# Handle list_roots request by returning configured roots
|
64
|
-
if hasattr(self, "server_config") and self.server_config.roots:
|
65
|
-
roots = [
|
66
|
-
Root(
|
67
|
-
uri=root.server_uri_alias or root.uri,
|
68
|
-
name=root.name,
|
69
|
-
)
|
70
|
-
for root in self.server_config.roots
|
71
|
-
]
|
72
|
-
|
73
|
-
await responder.respond(ListRootsResult(roots=roots))
|
74
|
-
else:
|
75
|
-
await responder.respond(ListRootsResult(roots=[]))
|
76
|
-
return
|
77
|
-
|
78
|
-
# Handle other requests as usual
|
79
|
-
await super()._received_request(responder)
|
80
|
-
|
81
64
|
async def send_request(
|
82
65
|
self,
|
83
66
|
request: SendRequestT,
|
@@ -89,7 +72,7 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
89
72
|
logger.debug("send_request: response=", data=result.model_dump())
|
90
73
|
return result
|
91
74
|
except Exception as e:
|
92
|
-
logger.error(f"send_request failed: {e}")
|
75
|
+
logger.error(f"send_request failed: {str(e)}")
|
93
76
|
raise
|
94
77
|
|
95
78
|
async def send_notification(self, notification: SendNotificationT) -> None:
|
@@ -133,110 +116,3 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
133
116
|
return await super().send_progress_notification(
|
134
117
|
progress_token=progress_token, progress=progress, total=total
|
135
118
|
)
|
136
|
-
|
137
|
-
async def _receive_loop(self) -> None:
|
138
|
-
async with (
|
139
|
-
self._read_stream,
|
140
|
-
self._write_stream,
|
141
|
-
self._incoming_message_stream_writer,
|
142
|
-
):
|
143
|
-
async for message in self._read_stream:
|
144
|
-
if isinstance(message, Exception):
|
145
|
-
await self._incoming_message_stream_writer.send(message)
|
146
|
-
elif isinstance(message.root, JSONRPCRequest):
|
147
|
-
validated_request = self._receive_request_type.model_validate(
|
148
|
-
message.root.model_dump(
|
149
|
-
by_alias=True, mode="json", exclude_none=True
|
150
|
-
)
|
151
|
-
)
|
152
|
-
responder = RequestResponder(
|
153
|
-
request_id=message.root.id,
|
154
|
-
request_meta=validated_request.root.params.meta
|
155
|
-
if validated_request.root.params
|
156
|
-
else None,
|
157
|
-
request=validated_request,
|
158
|
-
session=self,
|
159
|
-
)
|
160
|
-
|
161
|
-
await self._received_request(responder)
|
162
|
-
if not responder._responded:
|
163
|
-
await self._incoming_message_stream_writer.send(responder)
|
164
|
-
elif isinstance(message.root, JSONRPCNotification):
|
165
|
-
notification = self._receive_notification_type.model_validate(
|
166
|
-
message.root.model_dump(
|
167
|
-
by_alias=True, mode="json", exclude_none=True
|
168
|
-
)
|
169
|
-
)
|
170
|
-
|
171
|
-
await self._received_notification(notification)
|
172
|
-
await self._incoming_message_stream_writer.send(notification)
|
173
|
-
else: # Response or error
|
174
|
-
stream = self._response_streams.pop(message.root.id, None)
|
175
|
-
if stream:
|
176
|
-
await stream.send(message.root)
|
177
|
-
else:
|
178
|
-
await self._incoming_message_stream_writer.send(
|
179
|
-
RuntimeError(
|
180
|
-
"Received response with an unknown "
|
181
|
-
f"request ID: {message}"
|
182
|
-
)
|
183
|
-
)
|
184
|
-
|
185
|
-
async def handle_sampling_request(
|
186
|
-
self,
|
187
|
-
request: CreateMessageRequest,
|
188
|
-
responder: RequestResponder[ServerRequest, ClientResult],
|
189
|
-
):
|
190
|
-
logger.info("Handling sampling request: %s", request)
|
191
|
-
config = self.context.config
|
192
|
-
session = self.context.upstream_session
|
193
|
-
if session is None:
|
194
|
-
# TODO: saqadri - consider whether we should be handling the sampling request here as a client
|
195
|
-
logger.warning(
|
196
|
-
"Error: No upstream client available for sampling requests. Request:",
|
197
|
-
data=request,
|
198
|
-
)
|
199
|
-
try:
|
200
|
-
from anthropic import AsyncAnthropic
|
201
|
-
|
202
|
-
client = AsyncAnthropic(api_key=config.anthropic.api_key)
|
203
|
-
|
204
|
-
params = request.params
|
205
|
-
response = await client.messages.create(
|
206
|
-
model="claude-3-sonnet-20240229",
|
207
|
-
max_tokens=params.maxTokens,
|
208
|
-
messages=[
|
209
|
-
{
|
210
|
-
"role": m.role,
|
211
|
-
"content": m.content.text
|
212
|
-
if hasattr(m.content, "text")
|
213
|
-
else m.content.data,
|
214
|
-
}
|
215
|
-
for m in params.messages
|
216
|
-
],
|
217
|
-
system=getattr(params, "systemPrompt", None),
|
218
|
-
temperature=getattr(params, "temperature", 0.7),
|
219
|
-
stop_sequences=getattr(params, "stopSequences", None),
|
220
|
-
)
|
221
|
-
|
222
|
-
await responder.respond(
|
223
|
-
CreateMessageResult(
|
224
|
-
model="claude-3-sonnet-20240229",
|
225
|
-
role="assistant",
|
226
|
-
content=TextContent(type="text", text=response.content[0].text),
|
227
|
-
)
|
228
|
-
)
|
229
|
-
except Exception as e:
|
230
|
-
logger.error(f"Error handling sampling request: {e}")
|
231
|
-
await responder.respond(ErrorData(code=-32603, message=str(e)))
|
232
|
-
else:
|
233
|
-
try:
|
234
|
-
# If a session is available, we'll pass-through the sampling request to the upstream client
|
235
|
-
result = await session.send_request(
|
236
|
-
request=ServerRequest(request), result_type=CreateMessageResult
|
237
|
-
)
|
238
|
-
|
239
|
-
# Pass the result from the upstream client back to the server. We just act as a pass-through client here.
|
240
|
-
await responder.send_result(result)
|
241
|
-
except Exception as e:
|
242
|
-
await responder.send_error(code=-32603, message=str(e))
|
mcp_agent/mcp/mcp_aggregator.py
CHANGED
@@ -8,8 +8,8 @@ from typing import (
|
|
8
8
|
Callable,
|
9
9
|
TypeVar,
|
10
10
|
)
|
11
|
-
from mcp import GetPromptResult
|
12
|
-
from pydantic import BaseModel, ConfigDict
|
11
|
+
from mcp import GetPromptResult, ReadResourceResult
|
12
|
+
from pydantic import AnyUrl, BaseModel, ConfigDict
|
13
13
|
from mcp.client.session import ClientSession
|
14
14
|
from mcp.server.lowlevel.server import Server
|
15
15
|
from mcp.server.stdio import stdio_server
|
@@ -210,6 +210,7 @@ class MCPAggregator(ContextDependent):
|
|
210
210
|
"agent_name": self.agent_name,
|
211
211
|
},
|
212
212
|
)
|
213
|
+
|
213
214
|
await self._persistent_connection_manager.get_server(
|
214
215
|
server_name, client_session_factory=MCPAgentClientSession
|
215
216
|
)
|
@@ -370,20 +371,11 @@ class MCPAggregator(ContextDependent):
|
|
370
371
|
Returns:
|
371
372
|
Result from the operation or an error result
|
372
373
|
"""
|
373
|
-
logger.info(
|
374
|
-
f"Requesting {operation_type}",
|
375
|
-
data={
|
376
|
-
"progress_action": ProgressAction.STARTING,
|
377
|
-
f"{operation_type}_name": operation_name,
|
378
|
-
"server_name": server_name,
|
379
|
-
"agent_name": self.agent_name,
|
380
|
-
},
|
381
|
-
)
|
382
374
|
|
383
375
|
async def try_execute(client: ClientSession):
|
384
376
|
try:
|
385
377
|
method = getattr(client, method_name)
|
386
|
-
return await method(**method_args)
|
378
|
+
return await method(**method_args)
|
387
379
|
except Exception as e:
|
388
380
|
error_msg = f"Failed to {method_name} '{operation_name}' on server '{server_name}': {e}"
|
389
381
|
logger.error(error_msg)
|
@@ -469,6 +461,16 @@ class MCPAggregator(ContextDependent):
|
|
469
461
|
logger.error(f"Error: Tool '{name}' not found")
|
470
462
|
return CallToolResult(isError=True, message=f"Tool '{name}' not found")
|
471
463
|
|
464
|
+
logger.info(
|
465
|
+
"Requesting tool call",
|
466
|
+
data={
|
467
|
+
"progress_action": ProgressAction.CALLING_TOOL,
|
468
|
+
"tool_name": local_tool_name,
|
469
|
+
"server_name": server_name,
|
470
|
+
"agent_name": self.agent_name,
|
471
|
+
},
|
472
|
+
)
|
473
|
+
|
472
474
|
return await self._execute_on_server(
|
473
475
|
server_name=server_name,
|
474
476
|
operation_type="tool",
|
@@ -820,6 +822,53 @@ class MCPAggregator(ContextDependent):
|
|
820
822
|
logger.debug(f"Available prompts across servers: {results}")
|
821
823
|
return results
|
822
824
|
|
825
|
+
async def get_resource(
|
826
|
+
self, server_name: str, resource_uri: str
|
827
|
+
) -> ReadResourceResult:
|
828
|
+
"""
|
829
|
+
Get a resource directly from an MCP server by URI.
|
830
|
+
|
831
|
+
Args:
|
832
|
+
server_name: Name of the MCP server to retrieve the resource from
|
833
|
+
resource_uri: URI of the resource to retrieve
|
834
|
+
|
835
|
+
Returns:
|
836
|
+
ReadResourceResult object containing the resource content
|
837
|
+
|
838
|
+
Raises:
|
839
|
+
ValueError: If the server doesn't exist or the resource couldn't be found
|
840
|
+
"""
|
841
|
+
if not self.initialized:
|
842
|
+
await self.load_servers()
|
843
|
+
|
844
|
+
if server_name not in self.server_names:
|
845
|
+
raise ValueError(f"Server '{server_name}' not found")
|
846
|
+
|
847
|
+
logger.info(
|
848
|
+
"Requesting resource",
|
849
|
+
data={
|
850
|
+
"progress_action": ProgressAction.CALLING_TOOL,
|
851
|
+
"resource_uri": resource_uri,
|
852
|
+
"server_name": server_name,
|
853
|
+
"agent_name": self.agent_name,
|
854
|
+
},
|
855
|
+
)
|
856
|
+
|
857
|
+
try:
|
858
|
+
uri = AnyUrl(resource_uri)
|
859
|
+
except Exception as e:
|
860
|
+
raise ValueError(f"Invalid resource URI: {resource_uri}. Error: {e}")
|
861
|
+
|
862
|
+
# Use the _execute_on_server method to call read_resource on the server
|
863
|
+
return await self._execute_on_server(
|
864
|
+
server_name=server_name,
|
865
|
+
operation_type="resource",
|
866
|
+
operation_name=resource_uri,
|
867
|
+
method_name="read_resource",
|
868
|
+
method_args={"uri": uri},
|
869
|
+
error_factory=lambda msg: ValueError(f"Failed to retrieve resource: {msg}"),
|
870
|
+
)
|
871
|
+
|
823
872
|
|
824
873
|
class MCPCompoundServer(Server):
|
825
874
|
"""
|
@@ -163,7 +163,6 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
163
163
|
async with transport_context as (read_stream, write_stream):
|
164
164
|
# try:
|
165
165
|
server_conn.create_session(read_stream, write_stream)
|
166
|
-
# except FileNotFoundError as e:
|
167
166
|
|
168
167
|
async with server_conn.session:
|
169
168
|
await server_conn.initialize_session()
|
@@ -0,0 +1,69 @@
|
|
1
|
+
# mime_utils.py
|
2
|
+
|
3
|
+
import mimetypes
|
4
|
+
|
5
|
+
# Initialize mimetypes database
|
6
|
+
mimetypes.init()
|
7
|
+
|
8
|
+
# Extend with additional types that might be missing
|
9
|
+
mimetypes.add_type("text/x-python", ".py")
|
10
|
+
mimetypes.add_type("image/webp", ".webp")
|
11
|
+
|
12
|
+
# Known text-based MIME types not starting with "text/"
|
13
|
+
TEXT_MIME_TYPES = {
|
14
|
+
"application/json",
|
15
|
+
"application/javascript",
|
16
|
+
"application/xml",
|
17
|
+
"application/ld+json",
|
18
|
+
"application/xhtml+xml",
|
19
|
+
"application/x-httpd-php",
|
20
|
+
"application/x-sh",
|
21
|
+
"application/ecmascript",
|
22
|
+
"application/graphql",
|
23
|
+
"application/x-www-form-urlencoded",
|
24
|
+
"application/yaml",
|
25
|
+
"application/toml",
|
26
|
+
"application/x-python-code",
|
27
|
+
"application/vnd.api+json",
|
28
|
+
}
|
29
|
+
|
30
|
+
# Common text-based MIME type patterns
|
31
|
+
TEXT_MIME_PATTERNS = ("+xml", "+json", "+yaml", "+text")
|
32
|
+
|
33
|
+
|
34
|
+
def guess_mime_type(file_path: str) -> str:
|
35
|
+
"""
|
36
|
+
Guess the MIME type of a file based on its extension.
|
37
|
+
"""
|
38
|
+
mime_type, _ = mimetypes.guess_type(file_path)
|
39
|
+
return mime_type or "application/octet-stream"
|
40
|
+
|
41
|
+
|
42
|
+
def is_text_mime_type(mime_type: str) -> bool:
|
43
|
+
"""Determine if a MIME type represents text content."""
|
44
|
+
if not mime_type:
|
45
|
+
return False
|
46
|
+
|
47
|
+
# Standard text types
|
48
|
+
if mime_type.startswith("text/"):
|
49
|
+
return True
|
50
|
+
|
51
|
+
# Known text types
|
52
|
+
if mime_type in TEXT_MIME_TYPES:
|
53
|
+
return True
|
54
|
+
|
55
|
+
# Common text patterns
|
56
|
+
if any(mime_type.endswith(pattern) for pattern in TEXT_MIME_PATTERNS):
|
57
|
+
return True
|
58
|
+
|
59
|
+
return False
|
60
|
+
|
61
|
+
|
62
|
+
def is_binary_content(mime_type: str) -> bool:
|
63
|
+
"""Check if content should be treated as binary."""
|
64
|
+
return not is_text_mime_type(mime_type)
|
65
|
+
|
66
|
+
|
67
|
+
def is_image_mime_type(mime_type: str) -> bool:
|
68
|
+
"""Check if a MIME type represents an image."""
|
69
|
+
return mime_type.startswith("image/") and mime_type != "image/svg+xml"
|
@@ -0,0 +1,64 @@
|
|
1
|
+
from typing import List, Union
|
2
|
+
from pydantic import BaseModel
|
3
|
+
|
4
|
+
from mcp.types import (
|
5
|
+
PromptMessage,
|
6
|
+
TextContent,
|
7
|
+
ImageContent,
|
8
|
+
EmbeddedResource,
|
9
|
+
Role,
|
10
|
+
GetPromptResult,
|
11
|
+
)
|
12
|
+
|
13
|
+
|
14
|
+
class PromptMessageMultipart(BaseModel):
|
15
|
+
"""
|
16
|
+
Extension of PromptMessage that handles multiple content parts.
|
17
|
+
Internally converts to/from a sequence of standard PromptMessages.
|
18
|
+
"""
|
19
|
+
|
20
|
+
role: Role
|
21
|
+
content: List[Union[TextContent, ImageContent, EmbeddedResource]]
|
22
|
+
|
23
|
+
@classmethod
|
24
|
+
def from_prompt_messages(
|
25
|
+
cls, messages: List[PromptMessage]
|
26
|
+
) -> List["PromptMessageMultipart"]:
|
27
|
+
"""Convert a sequence of PromptMessages into PromptMessageMultipart objects."""
|
28
|
+
if not messages:
|
29
|
+
return []
|
30
|
+
|
31
|
+
result = []
|
32
|
+
current_group = None
|
33
|
+
current_role = None
|
34
|
+
|
35
|
+
for msg in messages:
|
36
|
+
if msg.role != current_role:
|
37
|
+
# Role changed, start new message
|
38
|
+
if current_group is not None:
|
39
|
+
result.append(current_group)
|
40
|
+
current_role = msg.role
|
41
|
+
current_group = cls(role=msg.role, content=[msg.content])
|
42
|
+
else:
|
43
|
+
# Same role, add to current message
|
44
|
+
current_group.content.append(msg.content)
|
45
|
+
|
46
|
+
# Add the last group
|
47
|
+
if current_group is not None:
|
48
|
+
result.append(current_group)
|
49
|
+
|
50
|
+
return result
|
51
|
+
|
52
|
+
def to_prompt_messages(self) -> List[PromptMessage]:
|
53
|
+
"""Convert this PromptMessageMultipart to a sequence of standard PromptMessages."""
|
54
|
+
return [
|
55
|
+
PromptMessage(role=self.role, content=content_part)
|
56
|
+
for content_part in self.content
|
57
|
+
]
|
58
|
+
|
59
|
+
@classmethod
|
60
|
+
def parse_get_prompt_result(
|
61
|
+
cls, result: GetPromptResult
|
62
|
+
) -> List["PromptMessageMultipart"]:
|
63
|
+
"""Parse a GetPromptResult into PromptMessageMultipart objects."""
|
64
|
+
return cls.from_prompt_messages(result.messages)
|