fast-agent-mcp 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +9 -1
- fast_agent/agents/agent_types.py +11 -11
- fast_agent/agents/llm_agent.py +61 -39
- fast_agent/agents/llm_decorator.py +355 -6
- fast_agent/agents/mcp_agent.py +85 -62
- fast_agent/agents/tool_agent.py +50 -4
- fast_agent/cli/commands/go.py +3 -3
- fast_agent/constants.py +2 -0
- fast_agent/core/agent_app.py +2 -0
- fast_agent/core/direct_factory.py +39 -120
- fast_agent/core/fastagent.py +2 -2
- fast_agent/history/history_exporter.py +3 -3
- fast_agent/llm/fastagent_llm.py +3 -3
- fast_agent/llm/provider/openai/llm_openai.py +57 -8
- fast_agent/mcp/__init__.py +1 -2
- fast_agent/mcp/mcp_aggregator.py +6 -3
- fast_agent/mcp/prompt_message_extended.py +2 -0
- fast_agent/mcp/prompt_serialization.py +124 -39
- fast_agent/mcp/prompts/prompt_load.py +34 -32
- fast_agent/mcp/prompts/prompt_server.py +26 -11
- fast_agent/resources/setup/fastagent.config.yaml +2 -2
- fast_agent/types/__init__.py +3 -1
- fast_agent/ui/enhanced_prompt.py +111 -64
- fast_agent/ui/interactive_prompt.py +13 -41
- fast_agent/ui/rich_progress.py +12 -8
- {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.7.dist-info}/METADATA +3 -3
- {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.7.dist-info}/RECORD +30 -30
- {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.7.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.7.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -7,7 +7,7 @@ from mcp.types import (
|
|
|
7
7
|
ContentBlock,
|
|
8
8
|
TextContent,
|
|
9
9
|
)
|
|
10
|
-
from openai import AsyncOpenAI, AuthenticationError
|
|
10
|
+
from openai import APIError, AsyncOpenAI, AuthenticationError
|
|
11
11
|
from openai.lib.streaming.chat import ChatCompletionStreamState
|
|
12
12
|
|
|
13
13
|
# from openai.types.beta.chat import
|
|
@@ -19,19 +19,17 @@ from openai.types.chat import (
|
|
|
19
19
|
)
|
|
20
20
|
from pydantic_core import from_json
|
|
21
21
|
|
|
22
|
+
from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
|
|
22
23
|
from fast_agent.core.exceptions import ProviderKeyError
|
|
23
24
|
from fast_agent.core.logging.logger import get_logger
|
|
24
25
|
from fast_agent.core.prompt import Prompt
|
|
25
26
|
from fast_agent.event_progress import ProgressAction
|
|
26
|
-
from fast_agent.llm.fastagent_llm import
|
|
27
|
-
FastAgentLLM,
|
|
28
|
-
RequestParams,
|
|
29
|
-
)
|
|
27
|
+
from fast_agent.llm.fastagent_llm import FastAgentLLM, RequestParams
|
|
30
28
|
from fast_agent.llm.provider.openai.multipart_converter_openai import OpenAIConverter, OpenAIMessage
|
|
31
29
|
from fast_agent.llm.provider_types import Provider
|
|
32
30
|
from fast_agent.llm.usage_tracking import TurnUsage
|
|
33
|
-
from fast_agent.
|
|
34
|
-
from fast_agent.types
|
|
31
|
+
from fast_agent.mcp.helpers.content_helpers import text_content
|
|
32
|
+
from fast_agent.types import LlmStopReason, PromptMessageExtended
|
|
35
33
|
|
|
36
34
|
_logger = get_logger(__name__)
|
|
37
35
|
|
|
@@ -348,7 +346,11 @@ class OpenAILLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage])
|
|
|
348
346
|
# Use basic streaming API
|
|
349
347
|
stream = await self._openai_client().chat.completions.create(**arguments)
|
|
350
348
|
# Process the stream
|
|
351
|
-
|
|
349
|
+
try:
|
|
350
|
+
response = await self._process_stream(stream, model_name)
|
|
351
|
+
except APIError as error:
|
|
352
|
+
self.logger.error("Streaming APIError during OpenAI completion", exc_info=error)
|
|
353
|
+
return self._stream_failure_response(error, model_name)
|
|
352
354
|
# Track usage if response is valid and has usage data
|
|
353
355
|
if (
|
|
354
356
|
hasattr(response, "usage")
|
|
@@ -438,6 +440,53 @@ class OpenAILLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage])
|
|
|
438
440
|
*response_content_blocks, stop_reason=stop_reason, tool_calls=requested_tool_calls
|
|
439
441
|
)
|
|
440
442
|
|
|
443
|
+
def _stream_failure_response(self, error: APIError, model_name: str) -> PromptMessageExtended:
|
|
444
|
+
"""Convert streaming API errors into a graceful assistant reply."""
|
|
445
|
+
|
|
446
|
+
provider_label = (
|
|
447
|
+
self.provider.value if isinstance(self.provider, Provider) else str(self.provider)
|
|
448
|
+
)
|
|
449
|
+
detail = getattr(error, "message", None) or str(error)
|
|
450
|
+
detail = detail.strip() if isinstance(detail, str) else ""
|
|
451
|
+
|
|
452
|
+
parts: list[str] = [f"{provider_label} request failed"]
|
|
453
|
+
if model_name:
|
|
454
|
+
parts.append(f"for model '{model_name}'")
|
|
455
|
+
code = getattr(error, "code", None)
|
|
456
|
+
if code:
|
|
457
|
+
parts.append(f"(code: {code})")
|
|
458
|
+
status = getattr(error, "status_code", None)
|
|
459
|
+
if status:
|
|
460
|
+
parts.append(f"(status={status})")
|
|
461
|
+
|
|
462
|
+
message = " ".join(parts)
|
|
463
|
+
if detail:
|
|
464
|
+
message = f"{message}: {detail}"
|
|
465
|
+
|
|
466
|
+
user_summary = " ".join(message.split()) if message else ""
|
|
467
|
+
if user_summary and len(user_summary) > 280:
|
|
468
|
+
user_summary = user_summary[:277].rstrip() + "..."
|
|
469
|
+
|
|
470
|
+
if user_summary:
|
|
471
|
+
assistant_text = f"I hit an internal error while calling the model: {user_summary}"
|
|
472
|
+
if not assistant_text.endswith((".", "!", "?")):
|
|
473
|
+
assistant_text += "."
|
|
474
|
+
assistant_text += " See fast-agent-error for additional details."
|
|
475
|
+
else:
|
|
476
|
+
assistant_text = (
|
|
477
|
+
"I hit an internal error while calling the model; see fast-agent-error for details."
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
assistant_block = text_content(assistant_text)
|
|
481
|
+
error_block = text_content(message)
|
|
482
|
+
|
|
483
|
+
return PromptMessageExtended(
|
|
484
|
+
role="assistant",
|
|
485
|
+
content=[assistant_block],
|
|
486
|
+
channels={FAST_AGENT_ERROR_CHANNEL: [error_block]},
|
|
487
|
+
stop_reason=LlmStopReason.ERROR,
|
|
488
|
+
)
|
|
489
|
+
|
|
441
490
|
async def _is_tool_stop_reason(self, finish_reason: str) -> bool:
|
|
442
491
|
return True
|
|
443
492
|
|
fast_agent/mcp/__init__.py
CHANGED
|
@@ -24,11 +24,9 @@ from .helpers import (
|
|
|
24
24
|
split_thinking_content,
|
|
25
25
|
text_content,
|
|
26
26
|
)
|
|
27
|
-
from .prompt_message_extended import PromptMessageExtended
|
|
28
27
|
|
|
29
28
|
__all__ = [
|
|
30
29
|
"Prompt",
|
|
31
|
-
"PromptMessageExtended",
|
|
32
30
|
# Helpers
|
|
33
31
|
"get_text",
|
|
34
32
|
"get_image_data",
|
|
@@ -51,4 +49,5 @@ def __getattr__(name: str):
|
|
|
51
49
|
from .prompt import Prompt # local import
|
|
52
50
|
|
|
53
51
|
return Prompt
|
|
52
|
+
|
|
54
53
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
fast_agent/mcp/mcp_aggregator.py
CHANGED
|
@@ -470,12 +470,13 @@ class MCPAggregator(ContextDependent):
|
|
|
470
470
|
"""
|
|
471
471
|
instructions = {}
|
|
472
472
|
|
|
473
|
-
if self.connection_persistence and hasattr(self,
|
|
473
|
+
if self.connection_persistence and hasattr(self, "_persistent_connection_manager"):
|
|
474
474
|
# Get instructions from persistent connections
|
|
475
475
|
for server_name in self.server_names:
|
|
476
476
|
try:
|
|
477
477
|
server_conn = await self._persistent_connection_manager.get_server(
|
|
478
|
-
server_name,
|
|
478
|
+
server_name,
|
|
479
|
+
client_session_factory=self._create_session_factory(server_name),
|
|
479
480
|
)
|
|
480
481
|
# Always include server, even if no instructions
|
|
481
482
|
# Get tool names for this server
|
|
@@ -1074,7 +1075,9 @@ class MCPAggregator(ContextDependent):
|
|
|
1074
1075
|
logger.debug(f"Server '{server_name}' does not support tools")
|
|
1075
1076
|
return
|
|
1076
1077
|
|
|
1077
|
-
await self.display.show_tool_update(
|
|
1078
|
+
await self.display.show_tool_update(
|
|
1079
|
+
updated_server=server_name, agent_name="Tool List Change Notification"
|
|
1080
|
+
)
|
|
1078
1081
|
|
|
1079
1082
|
async with self._refresh_lock:
|
|
1080
1083
|
try:
|
|
@@ -12,6 +12,8 @@ from mcp.types import (
|
|
|
12
12
|
from pydantic import BaseModel
|
|
13
13
|
|
|
14
14
|
from fast_agent.mcp.helpers.content_helpers import get_text
|
|
15
|
+
|
|
16
|
+
# Import directly to avoid circular dependency with types/__init__.py
|
|
15
17
|
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
16
18
|
|
|
17
19
|
|
|
@@ -23,6 +23,7 @@ from mcp.types import (
|
|
|
23
23
|
EmbeddedResource,
|
|
24
24
|
GetPromptResult,
|
|
25
25
|
ImageContent,
|
|
26
|
+
PromptMessage,
|
|
26
27
|
TextContent,
|
|
27
28
|
TextResourceContents,
|
|
28
29
|
)
|
|
@@ -34,12 +35,30 @@ from fast_agent.mcp.prompts.prompt_constants import (
|
|
|
34
35
|
)
|
|
35
36
|
from fast_agent.types import PromptMessageExtended
|
|
36
37
|
|
|
38
|
+
# -------------------------------------------------------------------------
|
|
39
|
+
# Serialization Helpers
|
|
40
|
+
# -------------------------------------------------------------------------
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def serialize_to_dict(obj, exclude_none: bool = True):
|
|
44
|
+
"""Standardized Pydantic serialization to dictionary.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
obj: Pydantic model object to serialize
|
|
48
|
+
exclude_none: Whether to exclude None values (default: True)
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Dictionary representation suitable for JSON serialization
|
|
52
|
+
"""
|
|
53
|
+
return obj.model_dump(by_alias=True, mode="json", exclude_none=exclude_none)
|
|
54
|
+
|
|
55
|
+
|
|
37
56
|
# -------------------------------------------------------------------------
|
|
38
57
|
# JSON Serialization Functions
|
|
39
58
|
# -------------------------------------------------------------------------
|
|
40
59
|
|
|
41
60
|
|
|
42
|
-
def
|
|
61
|
+
def to_get_prompt_result(
|
|
43
62
|
messages: List[PromptMessageExtended],
|
|
44
63
|
) -> GetPromptResult:
|
|
45
64
|
"""
|
|
@@ -60,35 +79,58 @@ def multipart_messages_to_get_prompt_result(
|
|
|
60
79
|
return GetPromptResult(messages=flat_messages)
|
|
61
80
|
|
|
62
81
|
|
|
63
|
-
|
|
82
|
+
|
|
83
|
+
def to_get_prompt_result_json(messages: List[PromptMessageExtended]) -> str:
|
|
84
|
+
"""
|
|
85
|
+
Convert PromptMessageExtended objects to MCP-compatible GetPromptResult JSON.
|
|
86
|
+
|
|
87
|
+
This is a lossy conversion that flattens multipart messages and loses extended fields
|
|
88
|
+
like tool_calls, channels, and stop_reason. Use for MCP server compatibility.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
messages: List of PromptMessageExtended objects
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
JSON string in GetPromptResult format
|
|
95
|
+
"""
|
|
96
|
+
result = to_get_prompt_result(messages)
|
|
97
|
+
result_dict = serialize_to_dict(result)
|
|
98
|
+
return json.dumps(result_dict, indent=2)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def to_json(messages: List[PromptMessageExtended]) -> str:
|
|
64
102
|
"""
|
|
65
|
-
Convert PromptMessageExtended objects to
|
|
103
|
+
Convert PromptMessageExtended objects directly to JSON, preserving all extended fields.
|
|
66
104
|
|
|
67
|
-
This
|
|
68
|
-
the
|
|
105
|
+
This preserves tool_calls, tool_results, channels, and stop_reason that would be lost
|
|
106
|
+
in the standard GetPromptResult conversion.
|
|
69
107
|
|
|
70
108
|
Args:
|
|
71
109
|
messages: List of PromptMessageExtended objects
|
|
72
110
|
|
|
73
111
|
Returns:
|
|
74
|
-
JSON string representation
|
|
112
|
+
JSON string representation preserving all PromptMessageExtended data
|
|
75
113
|
"""
|
|
76
|
-
#
|
|
77
|
-
|
|
114
|
+
# Convert each message to dict using standardized serialization
|
|
115
|
+
messages_dicts = [serialize_to_dict(msg) for msg in messages]
|
|
78
116
|
|
|
79
|
-
#
|
|
80
|
-
result_dict =
|
|
117
|
+
# Wrap in a container similar to GetPromptResult for consistency
|
|
118
|
+
result_dict = {"messages": messages_dicts}
|
|
81
119
|
|
|
82
120
|
# Convert to JSON string
|
|
83
121
|
return json.dumps(result_dict, indent=2)
|
|
84
122
|
|
|
85
123
|
|
|
86
|
-
def
|
|
124
|
+
def from_json(json_str: str) -> List[PromptMessageExtended]:
|
|
87
125
|
"""
|
|
88
|
-
Parse a JSON string
|
|
126
|
+
Parse a JSON string into PromptMessageExtended objects.
|
|
127
|
+
|
|
128
|
+
Handles both:
|
|
129
|
+
- Enhanced format with full PromptMessageExtended data
|
|
130
|
+
- Legacy GetPromptResult format (missing extended fields default to None)
|
|
89
131
|
|
|
90
132
|
Args:
|
|
91
|
-
json_str: JSON string representation
|
|
133
|
+
json_str: JSON string representation
|
|
92
134
|
|
|
93
135
|
Returns:
|
|
94
136
|
List of PromptMessageExtended objects
|
|
@@ -96,31 +138,66 @@ def json_to_extended_messages(json_str: str) -> List[PromptMessageExtended]:
|
|
|
96
138
|
# Parse JSON to dictionary
|
|
97
139
|
result_dict = json.loads(json_str)
|
|
98
140
|
|
|
99
|
-
#
|
|
100
|
-
|
|
141
|
+
# Extract messages array
|
|
142
|
+
messages_data = result_dict.get("messages", [])
|
|
143
|
+
|
|
144
|
+
extended_messages: List[PromptMessageExtended] = []
|
|
145
|
+
basic_buffer: List[PromptMessage] = []
|
|
146
|
+
|
|
147
|
+
def flush_basic_buffer() -> None:
|
|
148
|
+
nonlocal basic_buffer
|
|
149
|
+
if not basic_buffer:
|
|
150
|
+
return
|
|
151
|
+
extended_messages.extend(PromptMessageExtended.to_extended(basic_buffer))
|
|
152
|
+
basic_buffer = []
|
|
153
|
+
|
|
154
|
+
for msg_data in messages_data:
|
|
155
|
+
content = msg_data.get("content")
|
|
156
|
+
is_enhanced = isinstance(content, list)
|
|
157
|
+
if is_enhanced:
|
|
158
|
+
try:
|
|
159
|
+
msg = PromptMessageExtended.model_validate(msg_data)
|
|
160
|
+
except Exception:
|
|
161
|
+
is_enhanced = False
|
|
162
|
+
else:
|
|
163
|
+
flush_basic_buffer()
|
|
164
|
+
extended_messages.append(msg)
|
|
165
|
+
continue
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
basic_msg = PromptMessage.model_validate(msg_data)
|
|
169
|
+
except Exception:
|
|
170
|
+
continue
|
|
171
|
+
basic_buffer.append(basic_msg)
|
|
101
172
|
|
|
102
|
-
|
|
103
|
-
return PromptMessageExtended.to_extended(result.messages)
|
|
173
|
+
flush_basic_buffer()
|
|
104
174
|
|
|
175
|
+
return extended_messages
|
|
105
176
|
|
|
106
|
-
|
|
177
|
+
|
|
178
|
+
def save_json(messages: List[PromptMessageExtended], file_path: str) -> None:
|
|
107
179
|
"""
|
|
108
|
-
Save PromptMessageExtended objects to a JSON file.
|
|
180
|
+
Save PromptMessageExtended objects to a JSON file using enhanced format.
|
|
181
|
+
|
|
182
|
+
Uses the enhanced format that preserves tool_calls, tool_results, channels,
|
|
183
|
+
and stop_reason data.
|
|
109
184
|
|
|
110
185
|
Args:
|
|
111
186
|
messages: List of PromptMessageExtended objects
|
|
112
187
|
file_path: Path to save the JSON file
|
|
113
188
|
"""
|
|
114
|
-
json_str =
|
|
189
|
+
json_str = to_json(messages)
|
|
115
190
|
|
|
116
191
|
with open(file_path, "w", encoding="utf-8") as f:
|
|
117
192
|
f.write(json_str)
|
|
118
193
|
|
|
119
194
|
|
|
120
|
-
def
|
|
195
|
+
def load_json(file_path: str) -> List[PromptMessageExtended]:
|
|
121
196
|
"""
|
|
122
197
|
Load PromptMessageExtended objects from a JSON file.
|
|
123
198
|
|
|
199
|
+
Handles both enhanced format and legacy GetPromptResult format.
|
|
200
|
+
|
|
124
201
|
Args:
|
|
125
202
|
file_path: Path to the JSON file
|
|
126
203
|
|
|
@@ -130,14 +207,14 @@ def load_messages_from_json_file(file_path: str) -> List[PromptMessageExtended]:
|
|
|
130
207
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
131
208
|
json_str = f.read()
|
|
132
209
|
|
|
133
|
-
return
|
|
210
|
+
return from_json(json_str)
|
|
134
211
|
|
|
135
212
|
|
|
136
|
-
def
|
|
213
|
+
def save_messages(messages: List[PromptMessageExtended], file_path: str) -> None:
|
|
137
214
|
"""
|
|
138
215
|
Save PromptMessageExtended objects to a file, with format determined by file extension.
|
|
139
216
|
|
|
140
|
-
Uses
|
|
217
|
+
Uses enhanced JSON format for .json files (preserves all fields) and
|
|
141
218
|
delimited text format for other extensions.
|
|
142
219
|
|
|
143
220
|
Args:
|
|
@@ -147,19 +224,16 @@ def save_messages_to_file(messages: List[PromptMessageExtended], file_path: str)
|
|
|
147
224
|
path_str = str(file_path).lower()
|
|
148
225
|
|
|
149
226
|
if path_str.endswith(".json"):
|
|
150
|
-
|
|
151
|
-
save_messages_to_json_file(messages, file_path)
|
|
227
|
+
save_json(messages, file_path)
|
|
152
228
|
else:
|
|
153
|
-
|
|
154
|
-
save_messages_to_delimited_file(messages, file_path)
|
|
229
|
+
save_delimited(messages, file_path)
|
|
155
230
|
|
|
156
231
|
|
|
157
|
-
def
|
|
232
|
+
def load_messages(file_path: str) -> List[PromptMessageExtended]:
|
|
158
233
|
"""
|
|
159
234
|
Load PromptMessageExtended objects from a file, with format determined by file extension.
|
|
160
235
|
|
|
161
|
-
Uses
|
|
162
|
-
delimited text format for other extensions.
|
|
236
|
+
Uses JSON format for .json files and delimited text format for other extensions.
|
|
163
237
|
|
|
164
238
|
Args:
|
|
165
239
|
file_path: Path to the file
|
|
@@ -170,11 +244,9 @@ def load_messages_from_file(file_path: str) -> List[PromptMessageExtended]:
|
|
|
170
244
|
path_str = str(file_path).lower()
|
|
171
245
|
|
|
172
246
|
if path_str.endswith(".json"):
|
|
173
|
-
|
|
174
|
-
return load_messages_from_json_file(file_path)
|
|
247
|
+
return load_json(file_path)
|
|
175
248
|
else:
|
|
176
|
-
|
|
177
|
-
return load_messages_from_delimited_file(file_path)
|
|
249
|
+
return load_delimited(file_path)
|
|
178
250
|
|
|
179
251
|
|
|
180
252
|
# -------------------------------------------------------------------------
|
|
@@ -238,7 +310,7 @@ def multipart_messages_to_delimited_format(
|
|
|
238
310
|
delimited_content.append(resource_delimiter)
|
|
239
311
|
|
|
240
312
|
# Convert to dictionary using proper JSON mode
|
|
241
|
-
content_dict = content
|
|
313
|
+
content_dict = serialize_to_dict(content)
|
|
242
314
|
|
|
243
315
|
# Add to delimited content as JSON
|
|
244
316
|
delimited_content.append(json.dumps(content_dict, indent=2))
|
|
@@ -253,7 +325,7 @@ def multipart_messages_to_delimited_format(
|
|
|
253
325
|
delimited_content.append(resource_delimiter)
|
|
254
326
|
|
|
255
327
|
# Convert to dictionary using proper JSON mode
|
|
256
|
-
content_dict = content
|
|
328
|
+
content_dict = serialize_to_dict(content)
|
|
257
329
|
|
|
258
330
|
# Add to delimited content as JSON
|
|
259
331
|
delimited_content.append(json.dumps(content_dict, indent=2))
|
|
@@ -281,6 +353,17 @@ def delimited_format_to_extended_messages(
|
|
|
281
353
|
Returns:
|
|
282
354
|
List of PromptMessageExtended objects
|
|
283
355
|
"""
|
|
356
|
+
if user_delimiter not in content and assistant_delimiter not in content:
|
|
357
|
+
stripped = content.strip()
|
|
358
|
+
if not stripped:
|
|
359
|
+
return []
|
|
360
|
+
return [
|
|
361
|
+
PromptMessageExtended(
|
|
362
|
+
role="user",
|
|
363
|
+
content=[TextContent(type="text", text=stripped)],
|
|
364
|
+
)
|
|
365
|
+
]
|
|
366
|
+
|
|
284
367
|
lines = content.split("\n")
|
|
285
368
|
messages = []
|
|
286
369
|
|
|
@@ -365,11 +448,13 @@ def delimited_format_to_extended_messages(
|
|
|
365
448
|
resource_uri = f"resource://fast-agent/{resource_uri}"
|
|
366
449
|
|
|
367
450
|
# Create a simple resource with just the URI
|
|
451
|
+
# For legacy format, we don't have the actual content, just the reference
|
|
368
452
|
resource = EmbeddedResource(
|
|
369
453
|
type="resource",
|
|
370
454
|
resource=TextResourceContents(
|
|
371
455
|
uri=resource_uri,
|
|
372
456
|
mimeType="text/plain",
|
|
457
|
+
text="", # Legacy format doesn't include content
|
|
373
458
|
),
|
|
374
459
|
)
|
|
375
460
|
resource_contents.append(resource)
|
|
@@ -436,7 +521,7 @@ def delimited_format_to_extended_messages(
|
|
|
436
521
|
return messages
|
|
437
522
|
|
|
438
523
|
|
|
439
|
-
def
|
|
524
|
+
def save_delimited(
|
|
440
525
|
messages: List[PromptMessageExtended],
|
|
441
526
|
file_path: str,
|
|
442
527
|
user_delimiter: str = USER_DELIMITER,
|
|
@@ -467,7 +552,7 @@ def save_messages_to_delimited_file(
|
|
|
467
552
|
f.write("\n".join(delimited_content))
|
|
468
553
|
|
|
469
554
|
|
|
470
|
-
def
|
|
555
|
+
def load_delimited(
|
|
471
556
|
file_path: str,
|
|
472
557
|
user_delimiter: str = USER_DELIMITER,
|
|
473
558
|
assistant_delimiter: str = ASSISTANT_DELIMITER,
|
|
@@ -12,8 +12,6 @@ from fast_agent.core.logging.logger import get_logger
|
|
|
12
12
|
from fast_agent.mcp import mime_utils, resource_utils
|
|
13
13
|
from fast_agent.mcp.prompts.prompt_template import (
|
|
14
14
|
PromptContent,
|
|
15
|
-
PromptTemplate,
|
|
16
|
-
PromptTemplateLoader,
|
|
17
15
|
)
|
|
18
16
|
from fast_agent.types import PromptMessageExtended
|
|
19
17
|
|
|
@@ -100,58 +98,62 @@ def create_resource_message(
|
|
|
100
98
|
return message_class(content=embedded_resource)
|
|
101
99
|
|
|
102
100
|
|
|
103
|
-
def load_prompt(file: Path) -> List[
|
|
101
|
+
def load_prompt(file: Path) -> List[PromptMessageExtended]:
|
|
104
102
|
"""
|
|
105
|
-
Load a prompt from a file and return as
|
|
103
|
+
Load a prompt from a file and return as PromptMessageExtended objects.
|
|
106
104
|
|
|
107
105
|
The loader uses file extension to determine the format:
|
|
108
|
-
- .json files are loaded
|
|
109
|
-
- All other files are loaded using the template-based delimited format
|
|
106
|
+
- .json files are loaded using enhanced format that preserves tool_calls, channels, etc.
|
|
107
|
+
- All other files are loaded using the template-based delimited format with resource loading
|
|
110
108
|
|
|
111
109
|
Args:
|
|
112
110
|
file: Path to the prompt file
|
|
113
111
|
|
|
114
112
|
Returns:
|
|
115
|
-
List of
|
|
113
|
+
List of PromptMessageExtended objects with full conversation state
|
|
116
114
|
"""
|
|
117
|
-
|
|
115
|
+
path_str = str(file).lower()
|
|
118
116
|
|
|
119
|
-
if
|
|
120
|
-
#
|
|
121
|
-
import
|
|
117
|
+
if path_str.endswith(".json"):
|
|
118
|
+
# JSON files use the serialization module directly
|
|
119
|
+
from fast_agent.mcp.prompt_serialization import load_messages
|
|
120
|
+
return load_messages(str(file))
|
|
121
|
+
else:
|
|
122
|
+
# Non-JSON files need template processing for resource loading
|
|
123
|
+
from fast_agent.mcp.prompts.prompt_template import PromptTemplateLoader
|
|
122
124
|
|
|
123
|
-
|
|
125
|
+
loader = PromptTemplateLoader()
|
|
126
|
+
template = loader.load_from_file(file)
|
|
124
127
|
|
|
125
|
-
#
|
|
126
|
-
|
|
127
|
-
|
|
128
|
+
# Render the template without arguments to get the messages
|
|
129
|
+
messages = create_messages_with_resources(
|
|
130
|
+
template.content_sections,
|
|
131
|
+
[file] # Pass the file path for resource resolution
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Convert to PromptMessageExtended
|
|
135
|
+
return PromptMessageExtended.to_extended(messages)
|
|
128
136
|
|
|
129
|
-
# Parse as GetPromptResult object
|
|
130
|
-
result = GetPromptResult.model_validate(json_data)
|
|
131
137
|
|
|
132
|
-
# Return the messages directly
|
|
133
|
-
return result.messages
|
|
134
|
-
else:
|
|
135
|
-
# Template-based format (delimited text)
|
|
136
|
-
template: PromptTemplate = PromptTemplateLoader().load_from_file(file)
|
|
137
|
-
return create_messages_with_resources(template.content_sections, [file])
|
|
138
138
|
|
|
139
139
|
|
|
140
|
-
def
|
|
140
|
+
def load_prompt_as_get_prompt_result(file: Path):
|
|
141
141
|
"""
|
|
142
|
-
Load a prompt from a file and
|
|
142
|
+
Load a prompt from a file and convert to GetPromptResult format for MCP compatibility.
|
|
143
143
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
- All other files are loaded using the template-based delimited format
|
|
144
|
+
This loses extended fields (tool_calls, channels, etc.) but provides
|
|
145
|
+
compatibility with MCP prompt servers.
|
|
147
146
|
|
|
148
147
|
Args:
|
|
149
148
|
file: Path to the prompt file
|
|
150
149
|
|
|
151
150
|
Returns:
|
|
152
|
-
|
|
151
|
+
GetPromptResult object for MCP compatibility
|
|
153
152
|
"""
|
|
154
|
-
|
|
153
|
+
from fast_agent.mcp.prompt_serialization import to_get_prompt_result
|
|
154
|
+
|
|
155
|
+
# Load with full data
|
|
155
156
|
messages = load_prompt(file)
|
|
156
|
-
|
|
157
|
-
|
|
157
|
+
|
|
158
|
+
# Convert to GetPromptResult (loses extended fields)
|
|
159
|
+
return to_get_prompt_result(messages)
|
|
@@ -11,7 +11,7 @@ import base64
|
|
|
11
11
|
import logging
|
|
12
12
|
import sys
|
|
13
13
|
from pathlib import Path
|
|
14
|
-
from typing import Any, Awaitable, Callable, Dict, List, Optional
|
|
14
|
+
from typing import Any, Awaitable, Callable, Dict, List, Optional, Union
|
|
15
15
|
|
|
16
16
|
from mcp.server.fastmcp import FastMCP
|
|
17
17
|
from mcp.server.fastmcp.prompts.base import (
|
|
@@ -38,6 +38,7 @@ from fast_agent.mcp.prompts.prompt_template import (
|
|
|
38
38
|
PromptMetadata,
|
|
39
39
|
PromptTemplateLoader,
|
|
40
40
|
)
|
|
41
|
+
from fast_agent.types import PromptMessageExtended
|
|
41
42
|
|
|
42
43
|
# Configure logging
|
|
43
44
|
logging.basicConfig(level=logging.ERROR)
|
|
@@ -47,13 +48,13 @@ logger = logging.getLogger("prompt_server")
|
|
|
47
48
|
mcp = FastMCP("Prompt Server")
|
|
48
49
|
|
|
49
50
|
|
|
50
|
-
def convert_to_fastmcp_messages(prompt_messages: List[PromptMessage]) -> List[Message]:
|
|
51
|
+
def convert_to_fastmcp_messages(prompt_messages: List[Union[PromptMessage, PromptMessageExtended]]) -> List[Message]:
|
|
51
52
|
"""
|
|
52
|
-
Convert PromptMessage
|
|
53
|
-
This adapter prevents double-wrapping of messages.
|
|
53
|
+
Convert PromptMessage or PromptMessageExtended objects to FastMCP Message objects.
|
|
54
|
+
This adapter prevents double-wrapping of messages and handles both types.
|
|
54
55
|
|
|
55
56
|
Args:
|
|
56
|
-
prompt_messages: List of PromptMessage
|
|
57
|
+
prompt_messages: List of PromptMessage or PromptMessageExtended objects
|
|
57
58
|
|
|
58
59
|
Returns:
|
|
59
60
|
List of FastMCP Message objects
|
|
@@ -61,13 +62,27 @@ def convert_to_fastmcp_messages(prompt_messages: List[PromptMessage]) -> List[Me
|
|
|
61
62
|
result = []
|
|
62
63
|
|
|
63
64
|
for msg in prompt_messages:
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
65
|
+
# Handle both PromptMessage and PromptMessageExtended
|
|
66
|
+
if hasattr(msg, 'from_multipart'):
|
|
67
|
+
# PromptMessageExtended - convert to regular PromptMessage format
|
|
68
|
+
flat_messages = msg.from_multipart()
|
|
69
|
+
for flat_msg in flat_messages:
|
|
70
|
+
if flat_msg.role == "user":
|
|
71
|
+
result.append(UserMessage(content=flat_msg.content))
|
|
72
|
+
elif flat_msg.role == "assistant":
|
|
73
|
+
result.append(AssistantMessage(content=flat_msg.content))
|
|
74
|
+
else:
|
|
75
|
+
logger.warning(f"Unknown message role: {flat_msg.role}, defaulting to user")
|
|
76
|
+
result.append(UserMessage(content=flat_msg.content))
|
|
68
77
|
else:
|
|
69
|
-
|
|
70
|
-
|
|
78
|
+
# Regular PromptMessage - use directly
|
|
79
|
+
if msg.role == "user":
|
|
80
|
+
result.append(UserMessage(content=msg.content))
|
|
81
|
+
elif msg.role == "assistant":
|
|
82
|
+
result.append(AssistantMessage(content=msg.content))
|
|
83
|
+
else:
|
|
84
|
+
logger.warning(f"Unknown message role: {msg.role}, defaulting to user")
|
|
85
|
+
result.append(UserMessage(content=msg.content))
|
|
71
86
|
|
|
72
87
|
return result
|
|
73
88
|
|
|
@@ -7,10 +7,10 @@
|
|
|
7
7
|
# Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
|
|
8
8
|
# and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini
|
|
9
9
|
#
|
|
10
|
-
# If not specified, defaults to "
|
|
10
|
+
# If not specified, defaults to "gpt-5-mini.low".
|
|
11
11
|
# Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
|
|
12
12
|
|
|
13
|
-
default_model:
|
|
13
|
+
default_model: gpt-5-mini.low
|
|
14
14
|
# mcp-ui support: disabled, enabled or auto. "auto" opens the web browser on the asset automatically
|
|
15
15
|
# mcp_ui_output_dir: ".fast-agent/ui" # Where to write MCP-UI HTML files (relative to CWD if not absolute)
|
|
16
16
|
# mcp_ui_mode: enabled
|
fast_agent/types/__init__.py
CHANGED
|
@@ -18,7 +18,9 @@ from fast_agent.mcp.helpers.content_helpers import (
|
|
|
18
18
|
|
|
19
19
|
# Public message model used across providers and MCP integration
|
|
20
20
|
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
|
|
21
|
-
|
|
21
|
+
|
|
22
|
+
# Stop reason enum - imported directly to avoid circular dependency
|
|
23
|
+
from .llm_stop_reason import LlmStopReason
|
|
22
24
|
|
|
23
25
|
__all__ = [
|
|
24
26
|
# Enums / types
|