fast-agent-mcp 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (40) hide show
  1. fast_agent/__init__.py +9 -1
  2. fast_agent/agents/agent_types.py +11 -11
  3. fast_agent/agents/llm_agent.py +60 -40
  4. fast_agent/agents/llm_decorator.py +351 -7
  5. fast_agent/agents/mcp_agent.py +87 -65
  6. fast_agent/agents/tool_agent.py +50 -4
  7. fast_agent/cli/commands/auth.py +14 -1
  8. fast_agent/cli/commands/go.py +3 -3
  9. fast_agent/constants.py +2 -0
  10. fast_agent/core/agent_app.py +2 -0
  11. fast_agent/core/direct_factory.py +39 -120
  12. fast_agent/core/fastagent.py +2 -2
  13. fast_agent/core/logging/listeners.py +2 -1
  14. fast_agent/history/history_exporter.py +3 -3
  15. fast_agent/interfaces.py +2 -2
  16. fast_agent/llm/fastagent_llm.py +3 -3
  17. fast_agent/llm/model_database.py +7 -1
  18. fast_agent/llm/model_factory.py +2 -3
  19. fast_agent/llm/provider/bedrock/llm_bedrock.py +1 -1
  20. fast_agent/llm/provider/google/llm_google_native.py +1 -3
  21. fast_agent/llm/provider/openai/llm_azure.py +1 -1
  22. fast_agent/llm/provider/openai/llm_openai.py +57 -8
  23. fast_agent/llm/provider/openai/llm_tensorzero_openai.py +1 -1
  24. fast_agent/llm/request_params.py +1 -1
  25. fast_agent/mcp/__init__.py +1 -2
  26. fast_agent/mcp/mcp_aggregator.py +6 -3
  27. fast_agent/mcp/prompt_message_extended.py +2 -0
  28. fast_agent/mcp/prompt_serialization.py +124 -39
  29. fast_agent/mcp/prompts/prompt_load.py +34 -32
  30. fast_agent/mcp/prompts/prompt_server.py +26 -11
  31. fast_agent/resources/setup/fastagent.config.yaml +2 -2
  32. fast_agent/types/__init__.py +3 -1
  33. fast_agent/ui/enhanced_prompt.py +111 -64
  34. fast_agent/ui/interactive_prompt.py +13 -41
  35. fast_agent/ui/rich_progress.py +12 -8
  36. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/METADATA +4 -4
  37. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/RECORD +40 -40
  38. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/WHEEL +0 -0
  39. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/entry_points.txt +0 -0
  40. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/licenses/LICENSE +0 -0
@@ -198,7 +198,7 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
198
198
  if messages[-1].first_text().startswith("***SAVE_HISTORY"):
199
199
  parts: list[str] = messages[-1].first_text().split(" ", 1)
200
200
  filename: str = (
201
- parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}_prompts.txt"
201
+ parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}.json"
202
202
  )
203
203
  await self._save_history(filename)
204
204
  return Prompt.assistant(f"History saved to {filename}")
@@ -589,10 +589,10 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
589
589
  Uses JSON format for .json files (MCP SDK compatible format) and
590
590
  delimited text format for other extensions.
591
591
  """
592
- from fast_agent.mcp.prompt_serialization import save_messages_to_file
592
+ from fast_agent.mcp.prompt_serialization import save_messages
593
593
 
594
594
  # Save messages using the unified save function that auto-detects format
595
- save_messages_to_file(self._message_history, filename)
595
+ save_messages(self._message_history, filename)
596
596
 
597
597
  @property
598
598
  def message_history(self) -> List[PromptMessageExtended]:
@@ -164,7 +164,11 @@ class ModelDatabase:
164
164
  )
165
165
 
166
166
  # FIXME: xAI has not documented the max output tokens for Grok 4. Using Grok 3 as a placeholder. Will need to update when available (if ever)
167
- GROK_4 = ModelParameters(context_window=256000, max_output_tokens=16385, tokenizes=XAI_VISION)
167
+ GROK_4 = ModelParameters(context_window=256000, max_output_tokens=16385, tokenizes=TEXT_ONLY)
168
+
169
+ GROK_4_VLM = ModelParameters(
170
+ context_window=2000000, max_output_tokens=16385, tokenizes=XAI_VISION
171
+ )
168
172
 
169
173
  # Source for Grok 3 max output: https://www.reddit.com/r/grok/comments/1j7209p/exploring_grok_3_beta_output_capacity_a_simple/
170
174
  # xAI does not document Grok 3 max output tokens, using the above source as a reference.
@@ -240,6 +244,8 @@ class ModelDatabase:
240
244
  "gemini-2.5-flash-preview-05-20": GEMINI_FLASH,
241
245
  "gemini-2.5-pro-preview-05-06": GEMINI_PRO,
242
246
  # xAI Grok Models
247
+ "grok-4-fast-reasoning": GROK_4_VLM,
248
+ "grok-4-fast-non-reasoning": GROK_4_VLM,
243
249
  "grok-4": GROK_4,
244
250
  "grok-4-0709": GROK_4,
245
251
  "grok-3": GROK_3,
@@ -12,9 +12,6 @@ from fast_agent.llm.internal.slow import SlowLLM
12
12
  from fast_agent.llm.provider_types import Provider
13
13
  from fast_agent.types import RequestParams
14
14
 
15
- # from fast_agent.workflows.llm.augmented_llm_deepseek import DeekSeekAugmentedLLM
16
-
17
-
18
15
  # Type alias for LLM classes
19
16
  LLMClass = Union[Type[PassthroughLLM], Type[PlaybackLLM], Type[SilentLLM], Type[SlowLLM], type]
20
17
 
@@ -123,6 +120,8 @@ class ModelFactory:
123
120
  "kimi": "groq.moonshotai/kimi-k2-instruct-0905",
124
121
  "gpt-oss": "groq.openai/gpt-oss-120b",
125
122
  "gpt-oss-20b": "groq.openai/gpt-oss-20b",
123
+ "grok-4-fast": "xai.grok-4-fast-non-reasoning",
124
+ "grok-4-fast-reasoning": "xai.grok-4-fast-reasoning",
126
125
  }
127
126
 
128
127
  @staticmethod
@@ -126,7 +126,7 @@ class ModelCapabilities:
126
126
 
127
127
  class BedrockLLM(FastAgentLLM[BedrockMessageParam, BedrockMessage]):
128
128
  """
129
- AWS Bedrock implementation of AugmentedLLM using the Converse API.
129
+ AWS Bedrock implementation of FastAgentLLM using the Converse API.
130
130
  Supports all Bedrock models including Nova, Claude, Meta, etc.
131
131
  """
132
132
 
@@ -36,10 +36,8 @@ GOOGLE_EXCLUDE_FIELDS = {
36
36
  FastAgentLLM.PARAM_MESSAGES, # Handled by contents
37
37
  FastAgentLLM.PARAM_MODEL, # Handled during client/call setup
38
38
  FastAgentLLM.PARAM_SYSTEM_PROMPT, # Handled by system_instruction in config
39
- # AugmentedLLM.PARAM_PARALLEL_TOOL_CALLS, # Handled by tool_config in config
40
- FastAgentLLM.PARAM_USE_HISTORY, # Handled by AugmentedLLM base / this class's logic
39
+ FastAgentLLM.PARAM_USE_HISTORY, # Handled by FastAgentLLM base / this class's logic
41
40
  FastAgentLLM.PARAM_MAX_ITERATIONS, # Handled by this class's loop
42
- # Add any other OpenAI-specific params not applicable to google.genai
43
41
  FastAgentLLM.PARAM_MCP_METADATA,
44
42
  }.union(FastAgentLLM.BASE_EXCLUDE_FIELDS)
45
43
 
@@ -23,7 +23,7 @@ DEFAULT_AZURE_API_VERSION = "2024-10-21"
23
23
 
24
24
  class AzureOpenAILLM(OpenAILLM):
25
25
  """
26
- Azure OpenAI implementation extending OpenAIAugmentedLLM.
26
+ Azure OpenAI implementation extending OpenAILLM.
27
27
  Handles both API Key and DefaultAzureCredential authentication.
28
28
  """
29
29
 
@@ -7,7 +7,7 @@ from mcp.types import (
7
7
  ContentBlock,
8
8
  TextContent,
9
9
  )
10
- from openai import AsyncOpenAI, AuthenticationError
10
+ from openai import APIError, AsyncOpenAI, AuthenticationError
11
11
  from openai.lib.streaming.chat import ChatCompletionStreamState
12
12
 
13
13
  # from openai.types.beta.chat import
@@ -19,19 +19,17 @@ from openai.types.chat import (
19
19
  )
20
20
  from pydantic_core import from_json
21
21
 
22
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
22
23
  from fast_agent.core.exceptions import ProviderKeyError
23
24
  from fast_agent.core.logging.logger import get_logger
24
25
  from fast_agent.core.prompt import Prompt
25
26
  from fast_agent.event_progress import ProgressAction
26
- from fast_agent.llm.fastagent_llm import (
27
- FastAgentLLM,
28
- RequestParams,
29
- )
27
+ from fast_agent.llm.fastagent_llm import FastAgentLLM, RequestParams
30
28
  from fast_agent.llm.provider.openai.multipart_converter_openai import OpenAIConverter, OpenAIMessage
31
29
  from fast_agent.llm.provider_types import Provider
32
30
  from fast_agent.llm.usage_tracking import TurnUsage
33
- from fast_agent.types import PromptMessageExtended
34
- from fast_agent.types.llm_stop_reason import LlmStopReason
31
+ from fast_agent.mcp.helpers.content_helpers import text_content
32
+ from fast_agent.types import LlmStopReason, PromptMessageExtended
35
33
 
36
34
  _logger = get_logger(__name__)
37
35
 
@@ -348,7 +346,11 @@ class OpenAILLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage])
348
346
  # Use basic streaming API
349
347
  stream = await self._openai_client().chat.completions.create(**arguments)
350
348
  # Process the stream
351
- response = await self._process_stream(stream, model_name)
349
+ try:
350
+ response = await self._process_stream(stream, model_name)
351
+ except APIError as error:
352
+ self.logger.error("Streaming APIError during OpenAI completion", exc_info=error)
353
+ return self._stream_failure_response(error, model_name)
352
354
  # Track usage if response is valid and has usage data
353
355
  if (
354
356
  hasattr(response, "usage")
@@ -438,6 +440,53 @@ class OpenAILLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage])
438
440
  *response_content_blocks, stop_reason=stop_reason, tool_calls=requested_tool_calls
439
441
  )
440
442
 
443
+ def _stream_failure_response(self, error: APIError, model_name: str) -> PromptMessageExtended:
444
+ """Convert streaming API errors into a graceful assistant reply."""
445
+
446
+ provider_label = (
447
+ self.provider.value if isinstance(self.provider, Provider) else str(self.provider)
448
+ )
449
+ detail = getattr(error, "message", None) or str(error)
450
+ detail = detail.strip() if isinstance(detail, str) else ""
451
+
452
+ parts: list[str] = [f"{provider_label} request failed"]
453
+ if model_name:
454
+ parts.append(f"for model '{model_name}'")
455
+ code = getattr(error, "code", None)
456
+ if code:
457
+ parts.append(f"(code: {code})")
458
+ status = getattr(error, "status_code", None)
459
+ if status:
460
+ parts.append(f"(status={status})")
461
+
462
+ message = " ".join(parts)
463
+ if detail:
464
+ message = f"{message}: {detail}"
465
+
466
+ user_summary = " ".join(message.split()) if message else ""
467
+ if user_summary and len(user_summary) > 280:
468
+ user_summary = user_summary[:277].rstrip() + "..."
469
+
470
+ if user_summary:
471
+ assistant_text = f"I hit an internal error while calling the model: {user_summary}"
472
+ if not assistant_text.endswith((".", "!", "?")):
473
+ assistant_text += "."
474
+ assistant_text += " See fast-agent-error for additional details."
475
+ else:
476
+ assistant_text = (
477
+ "I hit an internal error while calling the model; see fast-agent-error for details."
478
+ )
479
+
480
+ assistant_block = text_content(assistant_text)
481
+ error_block = text_content(message)
482
+
483
+ return PromptMessageExtended(
484
+ role="assistant",
485
+ content=[assistant_block],
486
+ channels={FAST_AGENT_ERROR_CHANNEL: [error_block]},
487
+ stop_reason=LlmStopReason.ERROR,
488
+ )
489
+
441
490
  async def _is_tool_stop_reason(self, finish_reason: str) -> bool:
442
491
  return True
443
492
 
@@ -26,7 +26,7 @@ class TensorZeroOpenAILLM(OpenAILLM):
26
26
  self._t0_function_name = kwargs.get("model", "")
27
27
 
28
28
  super().__init__(*args, provider=Provider.TENSORZERO, **kwargs)
29
- self.logger.info("TensorZeroOpenAIAugmentedLLM initialized.")
29
+ self.logger.info("TensorZeroOpenAILLM initialized.")
30
30
 
31
31
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
32
32
  """
@@ -11,7 +11,7 @@ from pydantic import Field
11
11
 
12
12
  class RequestParams(CreateMessageRequestParams):
13
13
  """
14
- Parameters to configure the AugmentedLLM 'generate' requests.
14
+ Parameters to configure the FastAgentLLM 'generate' requests.
15
15
  """
16
16
 
17
17
  messages: List[SamplingMessage] = Field(exclude=True, default=[])
@@ -24,11 +24,9 @@ from .helpers import (
24
24
  split_thinking_content,
25
25
  text_content,
26
26
  )
27
- from .prompt_message_extended import PromptMessageExtended
28
27
 
29
28
  __all__ = [
30
29
  "Prompt",
31
- "PromptMessageExtended",
32
30
  # Helpers
33
31
  "get_text",
34
32
  "get_image_data",
@@ -51,4 +49,5 @@ def __getattr__(name: str):
51
49
  from .prompt import Prompt # local import
52
50
 
53
51
  return Prompt
52
+
54
53
  raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
@@ -470,12 +470,13 @@ class MCPAggregator(ContextDependent):
470
470
  """
471
471
  instructions = {}
472
472
 
473
- if self.connection_persistence and hasattr(self, '_persistent_connection_manager'):
473
+ if self.connection_persistence and hasattr(self, "_persistent_connection_manager"):
474
474
  # Get instructions from persistent connections
475
475
  for server_name in self.server_names:
476
476
  try:
477
477
  server_conn = await self._persistent_connection_manager.get_server(
478
- server_name, client_session_factory=self._create_session_factory(server_name)
478
+ server_name,
479
+ client_session_factory=self._create_session_factory(server_name),
479
480
  )
480
481
  # Always include server, even if no instructions
481
482
  # Get tool names for this server
@@ -1074,7 +1075,9 @@ class MCPAggregator(ContextDependent):
1074
1075
  logger.debug(f"Server '{server_name}' does not support tools")
1075
1076
  return
1076
1077
 
1077
- await self.display.show_tool_update(aggregator=self, updated_server=server_name)
1078
+ await self.display.show_tool_update(
1079
+ updated_server=server_name, agent_name="Tool List Change Notification"
1080
+ )
1078
1081
 
1079
1082
  async with self._refresh_lock:
1080
1083
  try:
@@ -12,6 +12,8 @@ from mcp.types import (
12
12
  from pydantic import BaseModel
13
13
 
14
14
  from fast_agent.mcp.helpers.content_helpers import get_text
15
+
16
+ # Import directly to avoid circular dependency with types/__init__.py
15
17
  from fast_agent.types.llm_stop_reason import LlmStopReason
16
18
 
17
19
 
@@ -23,6 +23,7 @@ from mcp.types import (
23
23
  EmbeddedResource,
24
24
  GetPromptResult,
25
25
  ImageContent,
26
+ PromptMessage,
26
27
  TextContent,
27
28
  TextResourceContents,
28
29
  )
@@ -34,12 +35,30 @@ from fast_agent.mcp.prompts.prompt_constants import (
34
35
  )
35
36
  from fast_agent.types import PromptMessageExtended
36
37
 
38
+ # -------------------------------------------------------------------------
39
+ # Serialization Helpers
40
+ # -------------------------------------------------------------------------
41
+
42
+
43
+ def serialize_to_dict(obj, exclude_none: bool = True):
44
+ """Standardized Pydantic serialization to dictionary.
45
+
46
+ Args:
47
+ obj: Pydantic model object to serialize
48
+ exclude_none: Whether to exclude None values (default: True)
49
+
50
+ Returns:
51
+ Dictionary representation suitable for JSON serialization
52
+ """
53
+ return obj.model_dump(by_alias=True, mode="json", exclude_none=exclude_none)
54
+
55
+
37
56
  # -------------------------------------------------------------------------
38
57
  # JSON Serialization Functions
39
58
  # -------------------------------------------------------------------------
40
59
 
41
60
 
42
- def multipart_messages_to_get_prompt_result(
61
+ def to_get_prompt_result(
43
62
  messages: List[PromptMessageExtended],
44
63
  ) -> GetPromptResult:
45
64
  """
@@ -60,35 +79,58 @@ def multipart_messages_to_get_prompt_result(
60
79
  return GetPromptResult(messages=flat_messages)
61
80
 
62
81
 
63
- def multipart_messages_to_json(messages: List[PromptMessageExtended]) -> str:
82
+
83
+ def to_get_prompt_result_json(messages: List[PromptMessageExtended]) -> str:
84
+ """
85
+ Convert PromptMessageExtended objects to MCP-compatible GetPromptResult JSON.
86
+
87
+ This is a lossy conversion that flattens multipart messages and loses extended fields
88
+ like tool_calls, channels, and stop_reason. Use for MCP server compatibility.
89
+
90
+ Args:
91
+ messages: List of PromptMessageExtended objects
92
+
93
+ Returns:
94
+ JSON string in GetPromptResult format
95
+ """
96
+ result = to_get_prompt_result(messages)
97
+ result_dict = serialize_to_dict(result)
98
+ return json.dumps(result_dict, indent=2)
99
+
100
+
101
+ def to_json(messages: List[PromptMessageExtended]) -> str:
64
102
  """
65
- Convert PromptMessageExtended objects to a pure JSON string in GetPromptResult format.
103
+ Convert PromptMessageExtended objects directly to JSON, preserving all extended fields.
66
104
 
67
- This approach preserves all data and structure exactly as is, compatible with
68
- the MCP GetPromptResult type.
105
+ This preserves tool_calls, tool_results, channels, and stop_reason that would be lost
106
+ in the standard GetPromptResult conversion.
69
107
 
70
108
  Args:
71
109
  messages: List of PromptMessageExtended objects
72
110
 
73
111
  Returns:
74
- JSON string representation with GetPromptResult container
112
+ JSON string representation preserving all PromptMessageExtended data
75
113
  """
76
- # First convert to GetPromptResult
77
- result = multipart_messages_to_get_prompt_result(messages)
114
+ # Convert each message to dict using standardized serialization
115
+ messages_dicts = [serialize_to_dict(msg) for msg in messages]
78
116
 
79
- # Convert to dictionary using model_dump with proper JSON mode
80
- result_dict = result.model_dump(by_alias=True, mode="json", exclude_none=True)
117
+ # Wrap in a container similar to GetPromptResult for consistency
118
+ result_dict = {"messages": messages_dicts}
81
119
 
82
120
  # Convert to JSON string
83
121
  return json.dumps(result_dict, indent=2)
84
122
 
85
123
 
86
- def json_to_extended_messages(json_str: str) -> List[PromptMessageExtended]:
124
+ def from_json(json_str: str) -> List[PromptMessageExtended]:
87
125
  """
88
- Parse a JSON string in GetPromptResult format into PromptMessageExtended objects.
126
+ Parse a JSON string into PromptMessageExtended objects.
127
+
128
+ Handles both:
129
+ - Enhanced format with full PromptMessageExtended data
130
+ - Legacy GetPromptResult format (missing extended fields default to None)
89
131
 
90
132
  Args:
91
- json_str: JSON string representation of GetPromptResult
133
+ json_str: JSON string representation
92
134
 
93
135
  Returns:
94
136
  List of PromptMessageExtended objects
@@ -96,31 +138,66 @@ def json_to_extended_messages(json_str: str) -> List[PromptMessageExtended]:
96
138
  # Parse JSON to dictionary
97
139
  result_dict = json.loads(json_str)
98
140
 
99
- # Parse as GetPromptResult
100
- result = GetPromptResult.model_validate(result_dict)
141
+ # Extract messages array
142
+ messages_data = result_dict.get("messages", [])
143
+
144
+ extended_messages: List[PromptMessageExtended] = []
145
+ basic_buffer: List[PromptMessage] = []
146
+
147
+ def flush_basic_buffer() -> None:
148
+ nonlocal basic_buffer
149
+ if not basic_buffer:
150
+ return
151
+ extended_messages.extend(PromptMessageExtended.to_extended(basic_buffer))
152
+ basic_buffer = []
153
+
154
+ for msg_data in messages_data:
155
+ content = msg_data.get("content")
156
+ is_enhanced = isinstance(content, list)
157
+ if is_enhanced:
158
+ try:
159
+ msg = PromptMessageExtended.model_validate(msg_data)
160
+ except Exception:
161
+ is_enhanced = False
162
+ else:
163
+ flush_basic_buffer()
164
+ extended_messages.append(msg)
165
+ continue
166
+
167
+ try:
168
+ basic_msg = PromptMessage.model_validate(msg_data)
169
+ except Exception:
170
+ continue
171
+ basic_buffer.append(basic_msg)
101
172
 
102
- # Convert to multipart messages
103
- return PromptMessageExtended.to_extended(result.messages)
173
+ flush_basic_buffer()
104
174
 
175
+ return extended_messages
105
176
 
106
- def save_messages_to_json_file(messages: List[PromptMessageExtended], file_path: str) -> None:
177
+
178
+ def save_json(messages: List[PromptMessageExtended], file_path: str) -> None:
107
179
  """
108
- Save PromptMessageExtended objects to a JSON file.
180
+ Save PromptMessageExtended objects to a JSON file using enhanced format.
181
+
182
+ Uses the enhanced format that preserves tool_calls, tool_results, channels,
183
+ and stop_reason data.
109
184
 
110
185
  Args:
111
186
  messages: List of PromptMessageExtended objects
112
187
  file_path: Path to save the JSON file
113
188
  """
114
- json_str = multipart_messages_to_json(messages)
189
+ json_str = to_json(messages)
115
190
 
116
191
  with open(file_path, "w", encoding="utf-8") as f:
117
192
  f.write(json_str)
118
193
 
119
194
 
120
- def load_messages_from_json_file(file_path: str) -> List[PromptMessageExtended]:
195
+ def load_json(file_path: str) -> List[PromptMessageExtended]:
121
196
  """
122
197
  Load PromptMessageExtended objects from a JSON file.
123
198
 
199
+ Handles both enhanced format and legacy GetPromptResult format.
200
+
124
201
  Args:
125
202
  file_path: Path to the JSON file
126
203
 
@@ -130,14 +207,14 @@ def load_messages_from_json_file(file_path: str) -> List[PromptMessageExtended]:
130
207
  with open(file_path, "r", encoding="utf-8") as f:
131
208
  json_str = f.read()
132
209
 
133
- return json_to_extended_messages(json_str)
210
+ return from_json(json_str)
134
211
 
135
212
 
136
- def save_messages_to_file(messages: List[PromptMessageExtended], file_path: str) -> None:
213
+ def save_messages(messages: List[PromptMessageExtended], file_path: str) -> None:
137
214
  """
138
215
  Save PromptMessageExtended objects to a file, with format determined by file extension.
139
216
 
140
- Uses GetPromptResult JSON format for .json files (fully MCP compatible) and
217
+ Uses enhanced JSON format for .json files (preserves all fields) and
141
218
  delimited text format for other extensions.
142
219
 
143
220
  Args:
@@ -147,19 +224,16 @@ def save_messages_to_file(messages: List[PromptMessageExtended], file_path: str)
147
224
  path_str = str(file_path).lower()
148
225
 
149
226
  if path_str.endswith(".json"):
150
- # Use GetPromptResult JSON format for .json files (fully MCP compatible)
151
- save_messages_to_json_file(messages, file_path)
227
+ save_json(messages, file_path)
152
228
  else:
153
- # Use delimited text format for other extensions
154
- save_messages_to_delimited_file(messages, file_path)
229
+ save_delimited(messages, file_path)
155
230
 
156
231
 
157
- def load_messages_from_file(file_path: str) -> List[PromptMessageExtended]:
232
+ def load_messages(file_path: str) -> List[PromptMessageExtended]:
158
233
  """
159
234
  Load PromptMessageExtended objects from a file, with format determined by file extension.
160
235
 
161
- Uses GetPromptResult JSON format for .json files (fully MCP compatible) and
162
- delimited text format for other extensions.
236
+ Uses JSON format for .json files and delimited text format for other extensions.
163
237
 
164
238
  Args:
165
239
  file_path: Path to the file
@@ -170,11 +244,9 @@ def load_messages_from_file(file_path: str) -> List[PromptMessageExtended]:
170
244
  path_str = str(file_path).lower()
171
245
 
172
246
  if path_str.endswith(".json"):
173
- # Use GetPromptResult JSON format for .json files (fully MCP compatible)
174
- return load_messages_from_json_file(file_path)
247
+ return load_json(file_path)
175
248
  else:
176
- # Use delimited text format for other extensions
177
- return load_messages_from_delimited_file(file_path)
249
+ return load_delimited(file_path)
178
250
 
179
251
 
180
252
  # -------------------------------------------------------------------------
@@ -238,7 +310,7 @@ def multipart_messages_to_delimited_format(
238
310
  delimited_content.append(resource_delimiter)
239
311
 
240
312
  # Convert to dictionary using proper JSON mode
241
- content_dict = content.model_dump(by_alias=True, mode="json", exclude_none=True)
313
+ content_dict = serialize_to_dict(content)
242
314
 
243
315
  # Add to delimited content as JSON
244
316
  delimited_content.append(json.dumps(content_dict, indent=2))
@@ -253,7 +325,7 @@ def multipart_messages_to_delimited_format(
253
325
  delimited_content.append(resource_delimiter)
254
326
 
255
327
  # Convert to dictionary using proper JSON mode
256
- content_dict = content.model_dump(by_alias=True, mode="json", exclude_none=True)
328
+ content_dict = serialize_to_dict(content)
257
329
 
258
330
  # Add to delimited content as JSON
259
331
  delimited_content.append(json.dumps(content_dict, indent=2))
@@ -281,6 +353,17 @@ def delimited_format_to_extended_messages(
281
353
  Returns:
282
354
  List of PromptMessageExtended objects
283
355
  """
356
+ if user_delimiter not in content and assistant_delimiter not in content:
357
+ stripped = content.strip()
358
+ if not stripped:
359
+ return []
360
+ return [
361
+ PromptMessageExtended(
362
+ role="user",
363
+ content=[TextContent(type="text", text=stripped)],
364
+ )
365
+ ]
366
+
284
367
  lines = content.split("\n")
285
368
  messages = []
286
369
 
@@ -365,11 +448,13 @@ def delimited_format_to_extended_messages(
365
448
  resource_uri = f"resource://fast-agent/{resource_uri}"
366
449
 
367
450
  # Create a simple resource with just the URI
451
+ # For legacy format, we don't have the actual content, just the reference
368
452
  resource = EmbeddedResource(
369
453
  type="resource",
370
454
  resource=TextResourceContents(
371
455
  uri=resource_uri,
372
456
  mimeType="text/plain",
457
+ text="", # Legacy format doesn't include content
373
458
  ),
374
459
  )
375
460
  resource_contents.append(resource)
@@ -436,7 +521,7 @@ def delimited_format_to_extended_messages(
436
521
  return messages
437
522
 
438
523
 
439
- def save_messages_to_delimited_file(
524
+ def save_delimited(
440
525
  messages: List[PromptMessageExtended],
441
526
  file_path: str,
442
527
  user_delimiter: str = USER_DELIMITER,
@@ -467,7 +552,7 @@ def save_messages_to_delimited_file(
467
552
  f.write("\n".join(delimited_content))
468
553
 
469
554
 
470
- def load_messages_from_delimited_file(
555
+ def load_delimited(
471
556
  file_path: str,
472
557
  user_delimiter: str = USER_DELIMITER,
473
558
  assistant_delimiter: str = ASSISTANT_DELIMITER,