fast-agent-mcp 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -16,8 +16,6 @@ from typing import (
16
16
  Mapping,
17
17
  Optional,
18
18
  Sequence,
19
- Tuple,
20
- Type,
21
19
  TypeVar,
22
20
  Union,
23
21
  )
@@ -195,8 +193,9 @@ class McpAgent(ABC, ToolAgent):
195
193
  server_instructions = ""
196
194
 
197
195
  # Replace the template variable
198
- self.instruction = self.instruction.replace("{{serverInstructions}}", server_instructions)
199
-
196
+ self.instruction = self.instruction.replace(
197
+ "{{serverInstructions}}", server_instructions
198
+ )
200
199
 
201
200
  # Update default request params to match
202
201
  if self._default_request_params:
@@ -204,7 +203,9 @@ class McpAgent(ABC, ToolAgent):
204
203
 
205
204
  self.logger.debug(f"Applied instruction templates for agent {self._name}")
206
205
 
207
- def _format_server_instructions(self, instructions_data: Dict[str, tuple[str | None, List[str]]]) -> str:
206
+ def _format_server_instructions(
207
+ self, instructions_data: Dict[str, tuple[str | None, List[str]]]
208
+ ) -> str:
208
209
  """
209
210
  Format server instructions with XML tags and tool lists.
210
211
 
@@ -228,7 +229,7 @@ class McpAgent(ABC, ToolAgent):
228
229
  tools_list = ", ".join(prefixed_tools) if prefixed_tools else "No tools available"
229
230
 
230
231
  formatted_parts.append(
231
- f"<mcp-server name=\"{server_name}\">\n"
232
+ f'<mcp-server name="{server_name}">\n'
232
233
  f"<tools>{tools_list}</tools>\n"
233
234
  f"<instructions>\n{instructions}\n</instructions>\n"
234
235
  f"</mcp-server>"
@@ -249,31 +250,31 @@ class McpAgent(ABC, ToolAgent):
249
250
  ) -> str:
250
251
  return await self.send(message)
251
252
 
252
- async def send(
253
- self,
254
- message: Union[
255
- str,
256
- PromptMessage,
257
- PromptMessageExtended,
258
- Sequence[Union[str, PromptMessage, PromptMessageExtended]],
259
- ],
260
- request_params: RequestParams | None = None,
261
- ) -> str:
262
- """
263
- Send a message to the agent and get a response.
264
-
265
- Args:
266
- message: Message content in various formats:
267
- - String: Converted to a user PromptMessageExtended
268
- - PromptMessage: Converted to PromptMessageExtended
269
- - PromptMessageExtended: Used directly
270
- - request_params: Optional request parameters
271
-
272
- Returns:
273
- The agent's response as a string
274
- """
275
- response = await self.generate(message, request_params)
276
- return response.last_text() or ""
253
+ # async def send(
254
+ # self,
255
+ # message: Union[
256
+ # str,
257
+ # PromptMessage,
258
+ # PromptMessageExtended,
259
+ # Sequence[Union[str, PromptMessage, PromptMessageExtended]],
260
+ # ],
261
+ # request_params: RequestParams | None = None,
262
+ # ) -> str:
263
+ # """
264
+ # Send a message to the agent and get a response.
265
+
266
+ # Args:
267
+ # message: Message content in various formats:
268
+ # - String: Converted to a user PromptMessageExtended
269
+ # - PromptMessage: Converted to PromptMessageExtended
270
+ # - PromptMessageExtended: Used directly
271
+ # - request_params: Optional request parameters
272
+
273
+ # Returns:
274
+ # The agent's response as a string
275
+ # """
276
+ # response = await self.generate(message, request_params)
277
+ # return response.last_text() or ""
277
278
 
278
279
  def _matches_pattern(self, name: str, pattern: str, server_name: str) -> bool:
279
280
  """
@@ -597,6 +598,7 @@ class McpAgent(ABC, ToolAgent):
597
598
  return PromptMessageExtended(role="user", tool_results={})
598
599
 
599
600
  tool_results: dict[str, CallToolResult] = {}
601
+ self._tool_loop_error = None
600
602
 
601
603
  # Cache available tool names (original, not namespaced) for display
602
604
  available_tools = [
@@ -613,6 +615,27 @@ class McpAgent(ABC, ToolAgent):
613
615
  namespaced_tool = self._aggregator._namespaced_tool_map.get(tool_name)
614
616
  display_tool_name = namespaced_tool.tool.name if namespaced_tool else tool_name
615
617
 
618
+ tool_available = False
619
+ if tool_name == HUMAN_INPUT_TOOL_NAME:
620
+ tool_available = True
621
+ elif namespaced_tool:
622
+ tool_available = True
623
+ else:
624
+ tool_available = any(
625
+ candidate.tool.name == tool_name
626
+ for candidate in self._aggregator._namespaced_tool_map.values()
627
+ )
628
+
629
+ if not tool_available:
630
+ error_message = f"Tool '{display_tool_name}' is not available"
631
+ self.logger.error(error_message)
632
+ self._mark_tool_loop_error(
633
+ correlation_id=correlation_id,
634
+ error_message=error_message,
635
+ tool_results=tool_results,
636
+ )
637
+ break
638
+
616
639
  # Find the index of the current tool in available_tools for highlighting
617
640
  highlight_index = None
618
641
  try:
@@ -650,7 +673,7 @@ class McpAgent(ABC, ToolAgent):
650
673
  # Show error result too
651
674
  self.display.show_tool_result(name=self._name, result=error_result)
652
675
 
653
- return PromptMessageExtended(role="user", tool_results=tool_results)
676
+ return self._finalize_tool_results(tool_results)
654
677
 
655
678
  async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
656
679
  """
@@ -668,36 +691,36 @@ class McpAgent(ABC, ToolAgent):
668
691
  with self._tracer.start_as_current_span(f"Agent: '{self._name}' apply_prompt_template"):
669
692
  return await self._llm.apply_prompt_template(prompt_result, prompt_name)
670
693
 
671
- async def structured(
672
- self,
673
- messages: Union[
674
- str,
675
- PromptMessage,
676
- PromptMessageExtended,
677
- List[Union[str, PromptMessage, PromptMessageExtended]],
678
- ],
679
- model: Type[ModelT],
680
- request_params: RequestParams | None = None,
681
- ) -> Tuple[ModelT | None, PromptMessageExtended]:
682
- """
683
- Apply the prompt and return the result as a Pydantic model.
684
- Normalizes input messages and delegates to the attached LLM.
685
-
686
- Args:
687
- messages: Message(s) in various formats:
688
- - String: Converted to a user PromptMessageExtended
689
- - PromptMessage: Converted to PromptMessageExtended
690
- - PromptMessageExtended: Used directly
691
- - List of any combination of the above
692
- model: The Pydantic model class to parse the result into
693
- request_params: Optional parameters to configure the LLM request
694
-
695
- Returns:
696
- An instance of the specified model, or None if coercion fails
697
- """
698
-
699
- with self._tracer.start_as_current_span(f"Agent: '{self._name}' structured"):
700
- return await super().structured(messages, model, request_params)
694
+ # async def structured(
695
+ # self,
696
+ # messages: Union[
697
+ # str,
698
+ # PromptMessage,
699
+ # PromptMessageExtended,
700
+ # Sequence[Union[str, PromptMessage, PromptMessageExtended]],
701
+ # ],
702
+ # model: Type[ModelT],
703
+ # request_params: RequestParams | None = None,
704
+ # ) -> Tuple[ModelT | None, PromptMessageExtended]:
705
+ # """
706
+ # Apply the prompt and return the result as a Pydantic model.
707
+ # Normalizes input messages and delegates to the attached LLM.
708
+
709
+ # Args:
710
+ # messages: Message(s) in various formats:
711
+ # - String: Converted to a user PromptMessageExtended
712
+ # - PromptMessage: Converted to PromptMessageExtended
713
+ # - PromptMessageExtended: Used directly
714
+ # - List of any combination of the above
715
+ # model: The Pydantic model class to parse the result into
716
+ # request_params: Optional parameters to configure the LLM request
717
+
718
+ # Returns:
719
+ # An instance of the specified model, or None if coercion fails
720
+ # """
721
+
722
+ # with self._tracer.start_as_current_span(f"Agent: '{self._name}' structured"):
723
+ # return await super().structured(messages, model, request_params)
701
724
 
702
725
  async def apply_prompt_messages(
703
726
  self, prompts: List[PromptMessageExtended], request_params: RequestParams | None = None
@@ -5,7 +5,7 @@ from mcp.types import CallToolResult, ListToolsResult, Tool
5
5
 
6
6
  from fast_agent.agents.agent_types import AgentConfig
7
7
  from fast_agent.agents.llm_agent import LlmAgent
8
- from fast_agent.constants import HUMAN_INPUT_TOOL_NAME
8
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL, HUMAN_INPUT_TOOL_NAME
9
9
  from fast_agent.context import Context
10
10
  from fast_agent.core.logging.logger import get_logger
11
11
  from fast_agent.mcp.helpers.content_helpers import text_content
@@ -42,6 +42,7 @@ class ToolAgent(LlmAgent):
42
42
 
43
43
  self._execution_tools: dict[str, FastMCPTool] = {}
44
44
  self._tool_schemas: list[Tool] = []
45
+ self._tool_loop_error: str | None = None
45
46
 
46
47
  # Build a working list of tools and auto-inject human-input tool if missing
47
48
  working_tools: list[FastMCPTool | Callable] = list(tools) if tools else []
@@ -97,10 +98,19 @@ class ToolAgent(LlmAgent):
97
98
  )
98
99
 
99
100
  if LlmStopReason.TOOL_USE == result.stop_reason:
101
+ self._tool_loop_error = None
100
102
  if self.config.use_history:
101
- messages = [await self.run_tools(result)]
103
+ tool_message = await self.run_tools(result)
104
+ if self._tool_loop_error:
105
+ result.stop_reason = LlmStopReason.ERROR
106
+ break
107
+ messages = [tool_message]
102
108
  else:
103
- messages.extend([result, await self.run_tools(result)])
109
+ tool_message = await self.run_tools(result)
110
+ if self._tool_loop_error:
111
+ result.stop_reason = LlmStopReason.ERROR
112
+ break
113
+ messages.extend([result, tool_message])
104
114
  else:
105
115
  break
106
116
 
@@ -123,12 +133,23 @@ class ToolAgent(LlmAgent):
123
133
  return PromptMessageExtended(role="user", tool_results={})
124
134
 
125
135
  tool_results: dict[str, CallToolResult] = {}
136
+ self._tool_loop_error = None
126
137
  # TODO -- use gather() for parallel results, update display
127
138
  available_tools = [t.name for t in (await self.list_tools()).tools]
128
139
  for correlation_id, tool_request in request.tool_calls.items():
129
140
  tool_name = tool_request.params.name
130
141
  tool_args = tool_request.params.arguments or {}
131
142
 
143
+ if tool_name not in self._execution_tools:
144
+ error_message = f"Tool '{tool_name}' is not available"
145
+ logger.error(error_message)
146
+ self._mark_tool_loop_error(
147
+ correlation_id=correlation_id,
148
+ error_message=error_message,
149
+ tool_results=tool_results,
150
+ )
151
+ break
152
+
132
153
  # Find the index of the current tool in available_tools for highlighting
133
154
  highlight_index = None
134
155
  try:
@@ -151,7 +172,32 @@ class ToolAgent(LlmAgent):
151
172
  tool_results[correlation_id] = result
152
173
  self.display.show_tool_result(name=self.name, result=result)
153
174
 
154
- return PromptMessageExtended(role="user", tool_results=tool_results)
175
+ return self._finalize_tool_results(tool_results)
176
+
177
+ def _mark_tool_loop_error(
178
+ self,
179
+ *,
180
+ correlation_id: str,
181
+ error_message: str,
182
+ tool_results: dict[str, CallToolResult],
183
+ ) -> None:
184
+ error_result = CallToolResult(
185
+ content=[text_content(error_message)],
186
+ isError=True,
187
+ )
188
+ tool_results[correlation_id] = error_result
189
+ self.display.show_tool_result(name=self.name, result=error_result)
190
+ self._tool_loop_error = error_message
191
+
192
+ def _finalize_tool_results(
193
+ self, tool_results: dict[str, CallToolResult]
194
+ ) -> PromptMessageExtended:
195
+ channels = None
196
+ if self._tool_loop_error:
197
+ channels = {
198
+ FAST_AGENT_ERROR_CHANNEL: [text_content(self._tool_loop_error)],
199
+ }
200
+ return PromptMessageExtended(role="user", tool_results=tool_results, channels=channels)
155
201
 
156
202
  async def list_tools(self) -> ListToolsResult:
157
203
  """Return available tools for this agent. Overridable by subclasses."""
@@ -40,7 +40,7 @@ async def _run_agent(
40
40
  """Async implementation to run an interactive agent."""
41
41
  from pathlib import Path
42
42
 
43
- from fast_agent.mcp.prompts.prompt_load import load_prompt_multipart
43
+ from fast_agent.mcp.prompts.prompt_load import load_prompt
44
44
 
45
45
  # Create the FastAgent instance
46
46
 
@@ -110,7 +110,7 @@ async def _run_agent(
110
110
  display = ConsoleDisplay(config=None)
111
111
  display.show_parallel_results(agent.parallel)
112
112
  elif prompt_file:
113
- prompt = load_prompt_multipart(Path(prompt_file))
113
+ prompt = load_prompt(Path(prompt_file))
114
114
  await agent.parallel.generate(prompt)
115
115
  display = ConsoleDisplay(config=None)
116
116
  display.show_parallel_results(agent.parallel)
@@ -135,7 +135,7 @@ async def _run_agent(
135
135
  # Print the response and exit
136
136
  print(response)
137
137
  elif prompt_file:
138
- prompt = load_prompt_multipart(Path(prompt_file))
138
+ prompt = load_prompt(Path(prompt_file))
139
139
  response = await agent.agent.generate(prompt)
140
140
  print(f"\nLoaded {len(prompt)} messages from prompt file '{prompt_file}'")
141
141
  await agent.interactive()
fast_agent/constants.py CHANGED
@@ -6,3 +6,5 @@ Global constants for fast_agent with minimal dependencies to avoid circular impo
6
6
  HUMAN_INPUT_TOOL_NAME = "__human_input"
7
7
  MCP_UI = "mcp-ui"
8
8
  REASONING = "reasoning"
9
+ FAST_AGENT_ERROR_CHANNEL = "fast-agent-error"
10
+ FAST_AGENT_REMOVED_METADATA_CHANNEL = "fast-agent-removed-meta"
@@ -34,6 +34,8 @@ class AgentApp:
34
34
  Args:
35
35
  agents: Dictionary of agent instances keyed by name
36
36
  """
37
+ if len(agents) == 0:
38
+ raise ValueError("No agents provided!")
37
39
  self._agents = agents
38
40
 
39
41
  def __getitem__(self, key: str) -> AgentProtocol:
@@ -3,7 +3,8 @@ Direct factory functions for creating agent and workflow instances without proxi
3
3
  Implements type-safe factories with improved error handling.
4
4
  """
5
5
 
6
- from typing import Any, Dict, Optional, Protocol, TypeVar
6
+ from functools import partial
7
+ from typing import Any, Dict, List, Optional, Protocol, TypeVar
7
8
 
8
9
  from fast_agent.agents import McpAgent
9
10
  from fast_agent.agents.agent_types import AgentConfig, AgentType
@@ -379,6 +380,35 @@ async def create_agents_by_type(
379
380
  return result_agents
380
381
 
381
382
 
383
+ async def active_agents_in_dependency_group(
384
+ app_instance: Core,
385
+ agents_dict: AgentConfigDict,
386
+ model_factory_func: ModelFactoryFunctionProtocol,
387
+ group: List[str],
388
+ active_agents: AgentDict,
389
+ ):
390
+ """
391
+ For each of the possible agent types, create agents and update the active agents dictionary.
392
+
393
+ Notice: This function modifies the active_agents dictionary in-place which is a feature (no copies).
394
+ """
395
+ type_of_agents = list(map(lambda c: (c, c.value), AgentType))
396
+ for agent_type, agent_type_value in type_of_agents:
397
+ agents_dict_local = {
398
+ name: agents_dict[name]
399
+ for name in group
400
+ if agents_dict[name]["type"] == agent_type_value
401
+ }
402
+ agents = await create_agents_by_type(
403
+ app_instance,
404
+ agents_dict_local,
405
+ agent_type,
406
+ model_factory_func,
407
+ active_agents,
408
+ )
409
+ active_agents.update(agents)
410
+
411
+
382
412
  async def create_agents_in_dependency_order(
383
413
  app_instance: Core,
384
414
  agents_dict: AgentConfigDict,
@@ -403,127 +433,16 @@ async def create_agents_in_dependency_order(
403
433
  # Create a dictionary to store all active agents/workflows
404
434
  active_agents: AgentDict = {}
405
435
 
436
+ active_agents_in_dependency_group_partial = partial(
437
+ active_agents_in_dependency_group,
438
+ app_instance,
439
+ agents_dict,
440
+ model_factory_func,
441
+ )
442
+
406
443
  # Create agent proxies for each group in dependency order
407
444
  for group in dependencies:
408
- # Create basic agents first
409
- # Note: We compare string values from config with the Enum's string value
410
- if AgentType.BASIC.value in [agents_dict[name]["type"] for name in group]:
411
- basic_agents = await create_agents_by_type(
412
- app_instance,
413
- {
414
- name: agents_dict[name]
415
- for name in group
416
- if agents_dict[name]["type"] == AgentType.BASIC.value
417
- },
418
- AgentType.BASIC,
419
- model_factory_func,
420
- active_agents,
421
- )
422
- active_agents.update(basic_agents)
423
-
424
- # Create custom agents first
425
- if AgentType.CUSTOM.value in [agents_dict[name]["type"] for name in group]:
426
- basic_agents = await create_agents_by_type(
427
- app_instance,
428
- {
429
- name: agents_dict[name]
430
- for name in group
431
- if agents_dict[name]["type"] == AgentType.CUSTOM.value
432
- },
433
- AgentType.CUSTOM,
434
- model_factory_func,
435
- active_agents,
436
- )
437
- active_agents.update(basic_agents)
438
-
439
- # Create parallel agents
440
- if AgentType.PARALLEL.value in [agents_dict[name]["type"] for name in group]:
441
- parallel_agents = await create_agents_by_type(
442
- app_instance,
443
- {
444
- name: agents_dict[name]
445
- for name in group
446
- if agents_dict[name]["type"] == AgentType.PARALLEL.value
447
- },
448
- AgentType.PARALLEL,
449
- model_factory_func,
450
- active_agents,
451
- )
452
- active_agents.update(parallel_agents)
453
-
454
- # Create router agents
455
- if AgentType.ROUTER.value in [agents_dict[name]["type"] for name in group]:
456
- router_agents = await create_agents_by_type(
457
- app_instance,
458
- {
459
- name: agents_dict[name]
460
- for name in group
461
- if agents_dict[name]["type"] == AgentType.ROUTER.value
462
- },
463
- AgentType.ROUTER,
464
- model_factory_func,
465
- active_agents,
466
- )
467
- active_agents.update(router_agents)
468
-
469
- # Create chain agents
470
- if AgentType.CHAIN.value in [agents_dict[name]["type"] for name in group]:
471
- chain_agents = await create_agents_by_type(
472
- app_instance,
473
- {
474
- name: agents_dict[name]
475
- for name in group
476
- if agents_dict[name]["type"] == AgentType.CHAIN.value
477
- },
478
- AgentType.CHAIN,
479
- model_factory_func,
480
- active_agents,
481
- )
482
- active_agents.update(chain_agents)
483
-
484
- # Create evaluator-optimizer agents
485
- if AgentType.EVALUATOR_OPTIMIZER.value in [agents_dict[name]["type"] for name in group]:
486
- evaluator_agents = await create_agents_by_type(
487
- app_instance,
488
- {
489
- name: agents_dict[name]
490
- for name in group
491
- if agents_dict[name]["type"] == AgentType.EVALUATOR_OPTIMIZER.value
492
- },
493
- AgentType.EVALUATOR_OPTIMIZER,
494
- model_factory_func,
495
- active_agents,
496
- )
497
- active_agents.update(evaluator_agents)
498
-
499
- if AgentType.ORCHESTRATOR.value in [agents_dict[name]["type"] for name in group]:
500
- orchestrator_agents = await create_agents_by_type(
501
- app_instance,
502
- {
503
- name: agents_dict[name]
504
- for name in group
505
- if agents_dict[name]["type"] == AgentType.ORCHESTRATOR.value
506
- },
507
- AgentType.ORCHESTRATOR,
508
- model_factory_func,
509
- active_agents,
510
- )
511
- active_agents.update(orchestrator_agents)
512
-
513
- # Create orchestrator2 agents last since they might depend on other agents
514
- if AgentType.ITERATIVE_PLANNER.value in [agents_dict[name]["type"] for name in group]:
515
- orchestrator2_agents = await create_agents_by_type(
516
- app_instance,
517
- {
518
- name: agents_dict[name]
519
- for name in group
520
- if agents_dict[name]["type"] == AgentType.ITERATIVE_PLANNER.value
521
- },
522
- AgentType.ITERATIVE_PLANNER,
523
- model_factory_func,
524
- active_agents,
525
- )
526
- active_agents.update(orchestrator2_agents)
445
+ await active_agents_in_dependency_group_partial(group, active_agents)
527
446
 
528
447
  return active_agents
529
448
 
@@ -75,7 +75,7 @@ from fast_agent.core.validation import (
75
75
  validate_server_references,
76
76
  validate_workflow_references,
77
77
  )
78
- from fast_agent.mcp.prompts.prompt_load import load_prompt_multipart
78
+ from fast_agent.mcp.prompts.prompt_load import load_prompt
79
79
  from fast_agent.ui.usage_display import display_usage_report
80
80
 
81
81
  if TYPE_CHECKING:
@@ -543,7 +543,7 @@ class FastAgent:
543
543
 
544
544
  if hasattr(self.args, "prompt_file") and self.args.prompt_file:
545
545
  agent_name = self.args.agent
546
- prompt: List[PromptMessageExtended] = load_prompt_multipart(
546
+ prompt: List[PromptMessageExtended] = load_prompt(
547
547
  Path(self.args.prompt_file)
548
548
  )
549
549
  if agent_name not in active_agents:
@@ -10,7 +10,7 @@ from __future__ import annotations
10
10
 
11
11
  from typing import TYPE_CHECKING, Optional
12
12
 
13
- from fast_agent.mcp.prompt_serialization import save_messages_to_file
13
+ from fast_agent.mcp.prompt_serialization import save_messages
14
14
 
15
15
  if TYPE_CHECKING:
16
16
  from fast_agent.interfaces import AgentProtocol
@@ -35,10 +35,10 @@ class HistoryExporter:
35
35
  The path that was written to.
36
36
  """
37
37
  # Determine a default filename when not provided
38
- target = filename or f"{getattr(agent, 'name', 'assistant')}_prompts.txt"
38
+ target = filename or f"{getattr(agent, 'name', 'assistant')}.json"
39
39
 
40
40
  messages = agent.message_history
41
- save_messages_to_file(messages, target)
41
+ save_messages(messages, target)
42
42
 
43
43
  # Return and optionally print a small confirmation
44
44
  return target
@@ -198,7 +198,7 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
198
198
  if messages[-1].first_text().startswith("***SAVE_HISTORY"):
199
199
  parts: list[str] = messages[-1].first_text().split(" ", 1)
200
200
  filename: str = (
201
- parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}_prompts.txt"
201
+ parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}.json"
202
202
  )
203
203
  await self._save_history(filename)
204
204
  return Prompt.assistant(f"History saved to {filename}")
@@ -589,10 +589,10 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
589
589
  Uses JSON format for .json files (MCP SDK compatible format) and
590
590
  delimited text format for other extensions.
591
591
  """
592
- from fast_agent.mcp.prompt_serialization import save_messages_to_file
592
+ from fast_agent.mcp.prompt_serialization import save_messages
593
593
 
594
594
  # Save messages using the unified save function that auto-detects format
595
- save_messages_to_file(self._message_history, filename)
595
+ save_messages(self._message_history, filename)
596
596
 
597
597
  @property
598
598
  def message_history(self) -> List[PromptMessageExtended]: