fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/METADATA +27 -4
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/RECORD +51 -30
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +114 -8
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +89 -13
- mcp_agent/core/factory.py +14 -13
- mcp_agent/core/fastagent.py +15 -5
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +79 -36
- mcp_agent/logging/listeners.py +3 -6
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_agent_client_session.py +21 -145
- mcp_agent/mcp/mcp_aggregator.py +61 -12
- mcp_agent/mcp/mcp_connection_manager.py +0 -1
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +509 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +223 -0
- mcp_agent/mcp/stdio.py +23 -15
- mcp_agent/mcp_server_registry.py +5 -2
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/resources/examples/workflows/orchestrator.py +3 -3
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +139 -66
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
- mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +99 -1
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +20 -3
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,222 @@
|
|
1
|
+
"""
|
2
|
+
Helper functions for creating MCP content types with minimal code.
|
3
|
+
|
4
|
+
This module provides simple functions to create TextContent, ImageContent,
|
5
|
+
EmbeddedResource, and other MCP content types with minimal boilerplate.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import base64
|
9
|
+
from pathlib import Path
|
10
|
+
from typing import Literal, Optional, Union, List, Any
|
11
|
+
|
12
|
+
from mcp.types import (
|
13
|
+
TextContent,
|
14
|
+
ImageContent,
|
15
|
+
EmbeddedResource,
|
16
|
+
TextResourceContents,
|
17
|
+
BlobResourceContents,
|
18
|
+
)
|
19
|
+
|
20
|
+
from mcp_agent.mcp.mime_utils import (
|
21
|
+
guess_mime_type,
|
22
|
+
is_binary_content,
|
23
|
+
is_image_mime_type,
|
24
|
+
)
|
25
|
+
|
26
|
+
|
27
|
+
def MCPText(
|
28
|
+
text: str,
|
29
|
+
role: Literal["user", "assistant"] = "user",
|
30
|
+
annotations: Optional[dict] = None,
|
31
|
+
) -> dict:
|
32
|
+
"""
|
33
|
+
Create a message with text content.
|
34
|
+
|
35
|
+
Args:
|
36
|
+
text: The text content
|
37
|
+
role: Role of the message, defaults to "user"
|
38
|
+
annotations: Optional annotations
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
A dictionary with role and content that can be used in a prompt
|
42
|
+
"""
|
43
|
+
return {
|
44
|
+
"role": role,
|
45
|
+
"content": TextContent(type="text", text=text, annotations=annotations),
|
46
|
+
}
|
47
|
+
|
48
|
+
|
49
|
+
def MCPImage(
|
50
|
+
path: Union[str, Path] = None,
|
51
|
+
data: bytes = None,
|
52
|
+
mime_type: Optional[str] = None,
|
53
|
+
role: Literal["user", "assistant"] = "user",
|
54
|
+
annotations: Optional[dict] = None,
|
55
|
+
) -> dict:
|
56
|
+
"""
|
57
|
+
Create a message with image content.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
path: Path to the image file
|
61
|
+
data: Raw image data bytes (alternative to path)
|
62
|
+
mime_type: Optional mime type, will be guessed from path if not provided
|
63
|
+
role: Role of the message, defaults to "user"
|
64
|
+
annotations: Optional annotations
|
65
|
+
|
66
|
+
Returns:
|
67
|
+
A dictionary with role and content that can be used in a prompt
|
68
|
+
"""
|
69
|
+
if path is None and data is None:
|
70
|
+
raise ValueError("Either path or data must be provided")
|
71
|
+
|
72
|
+
if path is not None and data is not None:
|
73
|
+
raise ValueError("Only one of path or data can be provided")
|
74
|
+
|
75
|
+
if path is not None:
|
76
|
+
path = Path(path)
|
77
|
+
if not mime_type:
|
78
|
+
mime_type = guess_mime_type(str(path))
|
79
|
+
with open(path, "rb") as f:
|
80
|
+
data = f.read()
|
81
|
+
|
82
|
+
if not mime_type:
|
83
|
+
mime_type = "image/png" # Default
|
84
|
+
|
85
|
+
b64_data = base64.b64encode(data).decode("ascii")
|
86
|
+
|
87
|
+
return {
|
88
|
+
"role": role,
|
89
|
+
"content": ImageContent(
|
90
|
+
type="image", data=b64_data, mimeType=mime_type, annotations=annotations
|
91
|
+
),
|
92
|
+
}
|
93
|
+
|
94
|
+
|
95
|
+
def MCPFile(
|
96
|
+
path: Union[str, Path],
|
97
|
+
mime_type: Optional[str] = None,
|
98
|
+
role: Literal["user", "assistant"] = "user",
|
99
|
+
annotations: Optional[dict] = None,
|
100
|
+
) -> dict:
|
101
|
+
"""
|
102
|
+
Create a message with an embedded resource from a file.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
path: Path to the resource file
|
106
|
+
mime_type: Optional mime type, will be guessed from path if not provided
|
107
|
+
role: Role of the message, defaults to "user"
|
108
|
+
annotations: Optional annotations
|
109
|
+
|
110
|
+
Returns:
|
111
|
+
A dictionary with role and content that can be used in a prompt
|
112
|
+
"""
|
113
|
+
path = Path(path)
|
114
|
+
uri = f"file://{path.absolute()}"
|
115
|
+
|
116
|
+
if not mime_type:
|
117
|
+
mime_type = guess_mime_type(str(path))
|
118
|
+
|
119
|
+
# Determine if this is text or binary content
|
120
|
+
is_binary = is_binary_content(mime_type)
|
121
|
+
|
122
|
+
if is_binary:
|
123
|
+
# Read as binary
|
124
|
+
binary_data = path.read_bytes()
|
125
|
+
b64_data = base64.b64encode(binary_data).decode("ascii")
|
126
|
+
|
127
|
+
resource = BlobResourceContents(uri=uri, blob=b64_data, mimeType=mime_type)
|
128
|
+
else:
|
129
|
+
# Read as text
|
130
|
+
try:
|
131
|
+
text_data = path.read_text(encoding="utf-8")
|
132
|
+
resource = TextResourceContents(uri=uri, text=text_data, mimeType=mime_type)
|
133
|
+
except UnicodeDecodeError:
|
134
|
+
# Fallback to binary if text read fails
|
135
|
+
binary_data = path.read_bytes()
|
136
|
+
b64_data = base64.b64encode(binary_data).decode("ascii")
|
137
|
+
resource = BlobResourceContents(
|
138
|
+
uri=uri, blob=b64_data, mimeType=mime_type or "application/octet-stream"
|
139
|
+
)
|
140
|
+
|
141
|
+
return {
|
142
|
+
"role": role,
|
143
|
+
"content": EmbeddedResource(
|
144
|
+
type="resource", resource=resource, annotations=annotations
|
145
|
+
),
|
146
|
+
}
|
147
|
+
|
148
|
+
|
149
|
+
|
150
|
+
def MCPPrompt(
|
151
|
+
*content_items, role: Literal["user", "assistant"] = "user"
|
152
|
+
) -> List[dict]:
|
153
|
+
"""
|
154
|
+
Create one or more prompt messages with various content types.
|
155
|
+
|
156
|
+
This function intelligently creates different content types:
|
157
|
+
- Strings become TextContent
|
158
|
+
- File paths with image mime types become ImageContent
|
159
|
+
- File paths with text mime types or other mime types become EmbeddedResource
|
160
|
+
- Dicts with role and content are passed through unchanged
|
161
|
+
- Raw bytes become ImageContent
|
162
|
+
|
163
|
+
Args:
|
164
|
+
*content_items: Content items of various types
|
165
|
+
role: Role for all items (user or assistant)
|
166
|
+
|
167
|
+
Returns:
|
168
|
+
List of messages that can be used in a prompt
|
169
|
+
"""
|
170
|
+
result = []
|
171
|
+
|
172
|
+
for item in content_items:
|
173
|
+
if isinstance(item, dict) and "role" in item and "content" in item:
|
174
|
+
# Already a fully formed message
|
175
|
+
result.append(item)
|
176
|
+
elif isinstance(item, str) and not Path(item).exists():
|
177
|
+
# Simple text content (that's not a file path)
|
178
|
+
result.append(MCPText(item, role=role))
|
179
|
+
elif isinstance(item, Path) or isinstance(item, str):
|
180
|
+
# File path - determine the content type based on mime type
|
181
|
+
path_str = str(item)
|
182
|
+
mime_type = guess_mime_type(path_str)
|
183
|
+
|
184
|
+
if is_image_mime_type(mime_type):
|
185
|
+
# Image files (except SVG which is handled as text)
|
186
|
+
result.append(MCPImage(path=item, role=role))
|
187
|
+
else:
|
188
|
+
# All other file types (text documents, PDFs, SVGs, etc.)
|
189
|
+
result.append(MCPFile(path=item, role=role))
|
190
|
+
elif isinstance(item, bytes):
|
191
|
+
# Raw binary data, assume image
|
192
|
+
result.append(MCPImage(data=item, role=role))
|
193
|
+
else:
|
194
|
+
# Try to convert to string
|
195
|
+
result.append(MCPText(str(item), role=role))
|
196
|
+
|
197
|
+
return result
|
198
|
+
|
199
|
+
|
200
|
+
def User(*content_items) -> List[dict]:
|
201
|
+
"""Create user message(s) with various content types."""
|
202
|
+
return MCPPrompt(*content_items, role="user")
|
203
|
+
|
204
|
+
|
205
|
+
def Assistant(*content_items) -> List[dict]:
|
206
|
+
"""Create assistant message(s) with various content types."""
|
207
|
+
return MCPPrompt(*content_items, role="assistant")
|
208
|
+
|
209
|
+
|
210
|
+
def create_message(content: Any, role: Literal["user", "assistant"] = "user") -> dict:
|
211
|
+
"""
|
212
|
+
Create a single prompt message from content of various types.
|
213
|
+
|
214
|
+
Args:
|
215
|
+
content: Content of various types (str, Path, bytes, etc.)
|
216
|
+
role: Role of the message
|
217
|
+
|
218
|
+
Returns:
|
219
|
+
A dictionary with role and content that can be used in a prompt
|
220
|
+
"""
|
221
|
+
messages = MCPPrompt(content, role=role)
|
222
|
+
return messages[0] if messages else {}
|
mcp_agent/core/prompt.py
ADDED
@@ -0,0 +1,132 @@
|
|
1
|
+
"""
|
2
|
+
Prompt class for easily creating and working with MCP prompt content.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import List, Literal
|
6
|
+
|
7
|
+
from mcp.types import PromptMessage
|
8
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
9
|
+
|
10
|
+
# Import our content helper functions
|
11
|
+
from .mcp_content import User, Assistant, MCPPrompt
|
12
|
+
|
13
|
+
|
14
|
+
class Prompt:
|
15
|
+
"""
|
16
|
+
A helper class for working with MCP prompt content.
|
17
|
+
|
18
|
+
This class provides static methods to create:
|
19
|
+
- PromptMessage instances
|
20
|
+
- PromptMessageMultipart instances
|
21
|
+
- Lists of messages for conversations
|
22
|
+
|
23
|
+
All methods intelligently handle various content types:
|
24
|
+
- Strings become TextContent
|
25
|
+
- Image file paths become ImageContent
|
26
|
+
- Other file paths become EmbeddedResource
|
27
|
+
- Pre-formatted messages pass through unchanged
|
28
|
+
"""
|
29
|
+
|
30
|
+
@classmethod
|
31
|
+
def user(cls, *content_items) -> PromptMessageMultipart:
|
32
|
+
"""
|
33
|
+
Create a user PromptMessageMultipart with various content items.
|
34
|
+
|
35
|
+
Args:
|
36
|
+
*content_items: Content items (strings, file paths, etc.)
|
37
|
+
|
38
|
+
Returns:
|
39
|
+
A PromptMessageMultipart with user role and the specified content
|
40
|
+
"""
|
41
|
+
messages = User(*content_items)
|
42
|
+
return PromptMessageMultipart(
|
43
|
+
role="user", content=[msg["content"] for msg in messages]
|
44
|
+
)
|
45
|
+
|
46
|
+
@classmethod
|
47
|
+
def assistant(cls, *content_items) -> PromptMessageMultipart:
|
48
|
+
"""
|
49
|
+
Create an assistant PromptMessageMultipart with various content items.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
*content_items: Content items (strings, file paths, etc.)
|
53
|
+
|
54
|
+
Returns:
|
55
|
+
A PromptMessageMultipart with assistant role and the specified content
|
56
|
+
"""
|
57
|
+
messages = Assistant(*content_items)
|
58
|
+
return PromptMessageMultipart(
|
59
|
+
role="assistant", content=[msg["content"] for msg in messages]
|
60
|
+
)
|
61
|
+
|
62
|
+
@classmethod
|
63
|
+
def message(
|
64
|
+
cls, *content_items, role: Literal["user", "assistant"] = "user"
|
65
|
+
) -> PromptMessageMultipart:
|
66
|
+
"""
|
67
|
+
Create a PromptMessageMultipart with the specified role and content items.
|
68
|
+
|
69
|
+
Args:
|
70
|
+
*content_items: Content items (strings, file paths, etc.)
|
71
|
+
role: Role for the message (user or assistant)
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
A PromptMessageMultipart with the specified role and content
|
75
|
+
"""
|
76
|
+
messages = MCPPrompt(*content_items, role=role)
|
77
|
+
return PromptMessageMultipart(
|
78
|
+
role=messages[0]["role"] if messages else role,
|
79
|
+
content=[msg["content"] for msg in messages],
|
80
|
+
)
|
81
|
+
|
82
|
+
@classmethod
|
83
|
+
def conversation(cls, *messages) -> List[PromptMessage]:
|
84
|
+
"""
|
85
|
+
Create a list of PromptMessages from various inputs.
|
86
|
+
|
87
|
+
This method accepts:
|
88
|
+
- PromptMessageMultipart instances
|
89
|
+
- Dictionaries with role and content
|
90
|
+
- Lists of dictionaries with role and content
|
91
|
+
|
92
|
+
Args:
|
93
|
+
*messages: Messages to include in the conversation
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
A list of PromptMessage objects for the conversation
|
97
|
+
"""
|
98
|
+
result = []
|
99
|
+
|
100
|
+
for item in messages:
|
101
|
+
if isinstance(item, PromptMessageMultipart):
|
102
|
+
# Convert PromptMessageMultipart to a list of PromptMessages
|
103
|
+
result.extend(item.to_prompt_messages())
|
104
|
+
elif isinstance(item, dict) and "role" in item and "content" in item:
|
105
|
+
# Convert a single message dict to PromptMessage
|
106
|
+
result.append(PromptMessage(**item))
|
107
|
+
elif isinstance(item, list):
|
108
|
+
# Process each item in the list
|
109
|
+
for msg in item:
|
110
|
+
if isinstance(msg, dict) and "role" in msg and "content" in msg:
|
111
|
+
result.append(PromptMessage(**msg))
|
112
|
+
# Ignore other types
|
113
|
+
|
114
|
+
return result
|
115
|
+
|
116
|
+
@classmethod
|
117
|
+
def from_multipart(
|
118
|
+
cls, multipart: List[PromptMessageMultipart]
|
119
|
+
) -> List[PromptMessage]:
|
120
|
+
"""
|
121
|
+
Convert a list of PromptMessageMultipart objects to PromptMessages.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
multipart: List of PromptMessageMultipart objects
|
125
|
+
|
126
|
+
Returns:
|
127
|
+
A flat list of PromptMessage objects
|
128
|
+
"""
|
129
|
+
result = []
|
130
|
+
for mp in multipart:
|
131
|
+
result.extend(mp.to_prompt_messages())
|
132
|
+
return result
|
mcp_agent/core/proxies.py
CHANGED
@@ -3,10 +3,12 @@ Proxy classes for agent interactions.
|
|
3
3
|
These proxies provide a consistent interface for interacting with different types of agents.
|
4
4
|
"""
|
5
5
|
|
6
|
-
from typing import List, Optional, Dict, TYPE_CHECKING
|
6
|
+
from typing import List, Optional, Dict, Union, TYPE_CHECKING
|
7
7
|
|
8
8
|
from mcp_agent.agents.agent import Agent
|
9
9
|
from mcp_agent.app import MCPApp
|
10
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
11
|
+
from mcp.types import EmbeddedResource
|
10
12
|
|
11
13
|
# Handle circular imports
|
12
14
|
if TYPE_CHECKING:
|
@@ -34,27 +36,43 @@ class BaseAgentProxy:
|
|
34
36
|
return await self.prompt()
|
35
37
|
return await self.send(message)
|
36
38
|
|
37
|
-
async def send(
|
38
|
-
|
39
|
+
async def send(
|
40
|
+
self, message: Optional[Union[str, PromptMessageMultipart]] = None
|
41
|
+
) -> str:
|
42
|
+
"""
|
43
|
+
Allow: agent.researcher.send('message') or agent.researcher.send(Prompt.user('message'))
|
44
|
+
|
45
|
+
Args:
|
46
|
+
message: Either a string message or a PromptMessageMultipart object
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
The agent's response as a string
|
50
|
+
"""
|
39
51
|
if message is None:
|
40
52
|
# For consistency with agent(), use prompt() to open the interactive interface
|
41
53
|
return await self.prompt()
|
54
|
+
|
55
|
+
# If a PromptMessageMultipart is passed, use send_prompt
|
56
|
+
if isinstance(message, PromptMessageMultipart):
|
57
|
+
return await self.send_prompt(message)
|
58
|
+
|
59
|
+
# For string messages, use generate_str (traditional behavior)
|
42
60
|
return await self.generate_str(message)
|
43
61
|
|
44
62
|
async def prompt(self, default_prompt: str = "") -> str:
|
45
63
|
"""Allow: agent.researcher.prompt()"""
|
46
64
|
from mcp_agent.core.agent_app import AgentApp
|
47
|
-
|
65
|
+
|
48
66
|
# First check if _app is directly an AgentApp
|
49
67
|
if isinstance(self._app, AgentApp):
|
50
68
|
return await self._app.prompt(self._name, default_prompt)
|
51
|
-
|
69
|
+
|
52
70
|
# If not, check if it's an MCPApp with an _agent_app attribute
|
53
71
|
if hasattr(self._app, "_agent_app"):
|
54
72
|
agent_app = self._app._agent_app
|
55
73
|
if agent_app:
|
56
74
|
return await agent_app.prompt(self._name, default_prompt)
|
57
|
-
|
75
|
+
|
58
76
|
# If we can't find an AgentApp, return an error message
|
59
77
|
return "ERROR: Cannot prompt() - AgentApp not found"
|
60
78
|
|
@@ -62,23 +80,18 @@ class BaseAgentProxy:
|
|
62
80
|
"""Generate response for a message - must be implemented by subclasses"""
|
63
81
|
raise NotImplementedError("Subclasses must implement generate_str")
|
64
82
|
|
65
|
-
async def
|
66
|
-
"""
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
arguments: Optional dictionary of string arguments for prompt templating
|
73
|
-
"""
|
74
|
-
raise NotImplementedError("Subclasses must implement mcp-prompt")
|
75
|
-
|
76
|
-
async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
83
|
+
async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
|
84
|
+
"""Send a message to the agent and return the response"""
|
85
|
+
raise NotImplementedError("Subclasses must implement send(prompt)")
|
86
|
+
|
87
|
+
async def apply_prompt(
|
88
|
+
self, prompt_name: str = None, arguments: dict[str, str] = None
|
89
|
+
) -> str:
|
77
90
|
"""
|
78
91
|
Apply a Prompt from an MCP Server - implemented by subclasses.
|
79
92
|
This is the preferred method for applying prompts.
|
80
93
|
Always returns an Assistant message.
|
81
|
-
|
94
|
+
|
82
95
|
Args:
|
83
96
|
prompt_name: Name of the prompt to apply
|
84
97
|
arguments: Optional dictionary of string arguments for prompt templating
|
@@ -97,33 +110,63 @@ class LLMAgentProxy(BaseAgentProxy):
|
|
97
110
|
"""Forward message and all kwargs to the agent's LLM"""
|
98
111
|
return await self._agent._llm.generate_str(message, **kwargs)
|
99
112
|
|
100
|
-
async def
|
101
|
-
"""
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
Returns:
|
109
|
-
The assistant's response
|
110
|
-
"""
|
111
|
-
return await self._agent.load_prompt(prompt_name, arguments)
|
112
|
-
|
113
|
-
async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
113
|
+
async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
|
114
|
+
"""Send a message to the agent and return the response"""
|
115
|
+
return await self._agent._llm.generate_prompt(prompt, None)
|
116
|
+
|
117
|
+
async def apply_prompt(
|
118
|
+
self, prompt_name: str = None, arguments: dict[str, str] = None
|
119
|
+
) -> str:
|
114
120
|
"""
|
115
121
|
Apply a prompt from an MCP server.
|
116
122
|
This is the preferred method for applying prompts.
|
117
|
-
|
123
|
+
|
118
124
|
Args:
|
119
125
|
prompt_name: Name of the prompt to apply
|
120
126
|
arguments: Optional dictionary of string arguments for prompt templating
|
121
|
-
|
127
|
+
|
122
128
|
Returns:
|
123
129
|
The assistant's response
|
124
130
|
"""
|
125
131
|
return await self._agent.apply_prompt(prompt_name, arguments)
|
126
132
|
|
133
|
+
# Add the new methods
|
134
|
+
async def get_embedded_resources(
|
135
|
+
self, server_name: str, resource_name: str
|
136
|
+
) -> List[EmbeddedResource]:
|
137
|
+
"""
|
138
|
+
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
server_name: Name of the MCP server to retrieve the resource from
|
142
|
+
resource_name: Name or URI of the resource to retrieve
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
List of EmbeddedResource objects ready to use in a PromptMessageMultipart
|
146
|
+
"""
|
147
|
+
return await self._agent.get_embedded_resources(server_name, resource_name)
|
148
|
+
|
149
|
+
async def with_resource(
|
150
|
+
self,
|
151
|
+
prompt_content: Union[str, PromptMessageMultipart],
|
152
|
+
server_name: str,
|
153
|
+
resource_name: str,
|
154
|
+
) -> str:
|
155
|
+
"""
|
156
|
+
Create a prompt with the given content and resource, then send it to the agent.
|
157
|
+
|
158
|
+
Args:
|
159
|
+
prompt_content: Either a string message or an existing PromptMessageMultipart
|
160
|
+
server_name: Name of the MCP server to retrieve the resource from
|
161
|
+
resource_name: Name or URI of the resource to retrieve
|
162
|
+
|
163
|
+
Returns:
|
164
|
+
The agent's response as a string
|
165
|
+
"""
|
166
|
+
return await self._agent.with_resource(
|
167
|
+
prompt_content, server_name, resource_name
|
168
|
+
)
|
169
|
+
|
127
170
|
|
128
171
|
class WorkflowProxy(BaseAgentProxy):
|
129
172
|
"""Proxy for workflow types that implement generate_str() directly"""
|
@@ -220,4 +263,4 @@ class ChainProxy(BaseAgentProxy):
|
|
220
263
|
proxy = self._agent_proxies[agent_name]
|
221
264
|
current_message = await proxy.generate_str(current_message)
|
222
265
|
|
223
|
-
return current_message
|
266
|
+
return current_message
|
mcp_agent/logging/listeners.py
CHANGED
@@ -177,10 +177,7 @@ class BatchingListener(FilteredListener):
|
|
177
177
|
|
178
178
|
if self._flush_task and not self._flush_task.done():
|
179
179
|
self._flush_task.cancel()
|
180
|
-
|
181
|
-
await self._flush_task
|
182
|
-
except asyncio.CancelledError:
|
183
|
-
pass
|
180
|
+
await self._flush_task
|
184
181
|
self._flush_task = None
|
185
182
|
await self.flush()
|
186
183
|
|
@@ -193,8 +190,8 @@ class BatchingListener(FilteredListener):
|
|
193
190
|
)
|
194
191
|
except asyncio.TimeoutError:
|
195
192
|
await self.flush()
|
196
|
-
except asyncio.CancelledError:
|
197
|
-
|
193
|
+
# except asyncio.CancelledError:
|
194
|
+
# break
|
198
195
|
finally:
|
199
196
|
await self.flush() # Final flush
|
200
197
|
|
mcp_agent/logging/transport.py
CHANGED
@@ -290,6 +290,21 @@ class AsyncEventBus:
|
|
290
290
|
# Update transport if provided
|
291
291
|
cls._instance.transport = transport
|
292
292
|
return cls._instance
|
293
|
+
|
294
|
+
@classmethod
|
295
|
+
def reset(cls) -> None:
|
296
|
+
"""
|
297
|
+
Reset the singleton instance.
|
298
|
+
This is primarily useful for testing scenarios where you need to ensure
|
299
|
+
a clean state between tests.
|
300
|
+
"""
|
301
|
+
if cls._instance:
|
302
|
+
# Signal shutdown
|
303
|
+
cls._instance._running = False
|
304
|
+
cls._instance._stop_event.set()
|
305
|
+
|
306
|
+
# Clear the singleton instance
|
307
|
+
cls._instance = None
|
293
308
|
|
294
309
|
async def start(self):
|
295
310
|
"""Start the event bus and all lifecycle-aware listeners."""
|
@@ -383,11 +398,19 @@ class AsyncEventBus:
|
|
383
398
|
async def _process_events(self):
|
384
399
|
"""Process events from the queue until stopped."""
|
385
400
|
while self._running:
|
401
|
+
event = None
|
386
402
|
try:
|
387
403
|
# Use wait_for with a timeout to allow checking running state
|
388
404
|
try:
|
405
|
+
# Check if we should be stopping first
|
406
|
+
if not self._running or self._stop_event.is_set():
|
407
|
+
break
|
408
|
+
|
389
409
|
event = await asyncio.wait_for(self._queue.get(), timeout=0.1)
|
390
410
|
except asyncio.TimeoutError:
|
411
|
+
# Check again before continuing
|
412
|
+
if not self._running or self._stop_event.is_set():
|
413
|
+
break
|
391
414
|
continue
|
392
415
|
|
393
416
|
# Process the event through all listeners
|
@@ -407,13 +430,17 @@ class AsyncEventBus:
|
|
407
430
|
f"Stacktrace: {''.join(traceback.format_exception(type(r), r, r.__traceback__))}"
|
408
431
|
)
|
409
432
|
|
410
|
-
self._queue.task_done()
|
411
|
-
|
412
433
|
except asyncio.CancelledError:
|
434
|
+
# If we have a current event, mark it done before breaking
|
435
|
+
if event is not None:
|
436
|
+
self._queue.task_done()
|
413
437
|
break
|
414
438
|
except Exception as e:
|
415
439
|
print(f"Error in event processing loop: {e}")
|
416
|
-
|
440
|
+
finally:
|
441
|
+
# Always mark the task as done if we got an event
|
442
|
+
if event is not None:
|
443
|
+
self._queue.task_done()
|
417
444
|
|
418
445
|
# Process remaining events in queue
|
419
446
|
while not self._queue.empty():
|