fast-agent-mcp 0.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. fast_agent/__init__.py +183 -0
  2. fast_agent/acp/__init__.py +19 -0
  3. fast_agent/acp/acp_aware_mixin.py +304 -0
  4. fast_agent/acp/acp_context.py +437 -0
  5. fast_agent/acp/content_conversion.py +136 -0
  6. fast_agent/acp/filesystem_runtime.py +427 -0
  7. fast_agent/acp/permission_store.py +269 -0
  8. fast_agent/acp/server/__init__.py +5 -0
  9. fast_agent/acp/server/agent_acp_server.py +1472 -0
  10. fast_agent/acp/slash_commands.py +1050 -0
  11. fast_agent/acp/terminal_runtime.py +408 -0
  12. fast_agent/acp/tool_permission_adapter.py +125 -0
  13. fast_agent/acp/tool_permissions.py +474 -0
  14. fast_agent/acp/tool_progress.py +814 -0
  15. fast_agent/agents/__init__.py +85 -0
  16. fast_agent/agents/agent_types.py +64 -0
  17. fast_agent/agents/llm_agent.py +350 -0
  18. fast_agent/agents/llm_decorator.py +1139 -0
  19. fast_agent/agents/mcp_agent.py +1337 -0
  20. fast_agent/agents/tool_agent.py +271 -0
  21. fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
  22. fast_agent/agents/workflow/chain_agent.py +212 -0
  23. fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
  24. fast_agent/agents/workflow/iterative_planner.py +652 -0
  25. fast_agent/agents/workflow/maker_agent.py +379 -0
  26. fast_agent/agents/workflow/orchestrator_models.py +218 -0
  27. fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
  28. fast_agent/agents/workflow/parallel_agent.py +250 -0
  29. fast_agent/agents/workflow/router_agent.py +353 -0
  30. fast_agent/cli/__init__.py +0 -0
  31. fast_agent/cli/__main__.py +73 -0
  32. fast_agent/cli/commands/acp.py +159 -0
  33. fast_agent/cli/commands/auth.py +404 -0
  34. fast_agent/cli/commands/check_config.py +783 -0
  35. fast_agent/cli/commands/go.py +514 -0
  36. fast_agent/cli/commands/quickstart.py +557 -0
  37. fast_agent/cli/commands/serve.py +143 -0
  38. fast_agent/cli/commands/server_helpers.py +114 -0
  39. fast_agent/cli/commands/setup.py +174 -0
  40. fast_agent/cli/commands/url_parser.py +190 -0
  41. fast_agent/cli/constants.py +40 -0
  42. fast_agent/cli/main.py +115 -0
  43. fast_agent/cli/terminal.py +24 -0
  44. fast_agent/config.py +798 -0
  45. fast_agent/constants.py +41 -0
  46. fast_agent/context.py +279 -0
  47. fast_agent/context_dependent.py +50 -0
  48. fast_agent/core/__init__.py +92 -0
  49. fast_agent/core/agent_app.py +448 -0
  50. fast_agent/core/core_app.py +137 -0
  51. fast_agent/core/direct_decorators.py +784 -0
  52. fast_agent/core/direct_factory.py +620 -0
  53. fast_agent/core/error_handling.py +27 -0
  54. fast_agent/core/exceptions.py +90 -0
  55. fast_agent/core/executor/__init__.py +0 -0
  56. fast_agent/core/executor/executor.py +280 -0
  57. fast_agent/core/executor/task_registry.py +32 -0
  58. fast_agent/core/executor/workflow_signal.py +324 -0
  59. fast_agent/core/fastagent.py +1186 -0
  60. fast_agent/core/logging/__init__.py +5 -0
  61. fast_agent/core/logging/events.py +138 -0
  62. fast_agent/core/logging/json_serializer.py +164 -0
  63. fast_agent/core/logging/listeners.py +309 -0
  64. fast_agent/core/logging/logger.py +278 -0
  65. fast_agent/core/logging/transport.py +481 -0
  66. fast_agent/core/prompt.py +9 -0
  67. fast_agent/core/prompt_templates.py +183 -0
  68. fast_agent/core/validation.py +326 -0
  69. fast_agent/event_progress.py +62 -0
  70. fast_agent/history/history_exporter.py +49 -0
  71. fast_agent/human_input/__init__.py +47 -0
  72. fast_agent/human_input/elicitation_handler.py +123 -0
  73. fast_agent/human_input/elicitation_state.py +33 -0
  74. fast_agent/human_input/form_elements.py +59 -0
  75. fast_agent/human_input/form_fields.py +256 -0
  76. fast_agent/human_input/simple_form.py +113 -0
  77. fast_agent/human_input/types.py +40 -0
  78. fast_agent/interfaces.py +310 -0
  79. fast_agent/llm/__init__.py +9 -0
  80. fast_agent/llm/cancellation.py +22 -0
  81. fast_agent/llm/fastagent_llm.py +931 -0
  82. fast_agent/llm/internal/passthrough.py +161 -0
  83. fast_agent/llm/internal/playback.py +129 -0
  84. fast_agent/llm/internal/silent.py +41 -0
  85. fast_agent/llm/internal/slow.py +38 -0
  86. fast_agent/llm/memory.py +275 -0
  87. fast_agent/llm/model_database.py +490 -0
  88. fast_agent/llm/model_factory.py +388 -0
  89. fast_agent/llm/model_info.py +102 -0
  90. fast_agent/llm/prompt_utils.py +155 -0
  91. fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
  92. fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
  93. fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
  94. fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
  95. fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
  96. fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
  97. fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
  98. fast_agent/llm/provider/google/google_converter.py +466 -0
  99. fast_agent/llm/provider/google/llm_google_native.py +681 -0
  100. fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
  101. fast_agent/llm/provider/openai/llm_azure.py +143 -0
  102. fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
  103. fast_agent/llm/provider/openai/llm_generic.py +35 -0
  104. fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
  105. fast_agent/llm/provider/openai/llm_groq.py +42 -0
  106. fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
  107. fast_agent/llm/provider/openai/llm_openai.py +1195 -0
  108. fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
  109. fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
  110. fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
  111. fast_agent/llm/provider/openai/llm_xai.py +38 -0
  112. fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
  113. fast_agent/llm/provider/openai/openai_multipart.py +169 -0
  114. fast_agent/llm/provider/openai/openai_utils.py +67 -0
  115. fast_agent/llm/provider/openai/responses.py +133 -0
  116. fast_agent/llm/provider_key_manager.py +139 -0
  117. fast_agent/llm/provider_types.py +34 -0
  118. fast_agent/llm/request_params.py +61 -0
  119. fast_agent/llm/sampling_converter.py +98 -0
  120. fast_agent/llm/stream_types.py +9 -0
  121. fast_agent/llm/usage_tracking.py +445 -0
  122. fast_agent/mcp/__init__.py +56 -0
  123. fast_agent/mcp/common.py +26 -0
  124. fast_agent/mcp/elicitation_factory.py +84 -0
  125. fast_agent/mcp/elicitation_handlers.py +164 -0
  126. fast_agent/mcp/gen_client.py +83 -0
  127. fast_agent/mcp/helpers/__init__.py +36 -0
  128. fast_agent/mcp/helpers/content_helpers.py +352 -0
  129. fast_agent/mcp/helpers/server_config_helpers.py +25 -0
  130. fast_agent/mcp/hf_auth.py +147 -0
  131. fast_agent/mcp/interfaces.py +92 -0
  132. fast_agent/mcp/logger_textio.py +108 -0
  133. fast_agent/mcp/mcp_agent_client_session.py +411 -0
  134. fast_agent/mcp/mcp_aggregator.py +2175 -0
  135. fast_agent/mcp/mcp_connection_manager.py +723 -0
  136. fast_agent/mcp/mcp_content.py +262 -0
  137. fast_agent/mcp/mime_utils.py +108 -0
  138. fast_agent/mcp/oauth_client.py +509 -0
  139. fast_agent/mcp/prompt.py +159 -0
  140. fast_agent/mcp/prompt_message_extended.py +155 -0
  141. fast_agent/mcp/prompt_render.py +84 -0
  142. fast_agent/mcp/prompt_serialization.py +580 -0
  143. fast_agent/mcp/prompts/__init__.py +0 -0
  144. fast_agent/mcp/prompts/__main__.py +7 -0
  145. fast_agent/mcp/prompts/prompt_constants.py +18 -0
  146. fast_agent/mcp/prompts/prompt_helpers.py +238 -0
  147. fast_agent/mcp/prompts/prompt_load.py +186 -0
  148. fast_agent/mcp/prompts/prompt_server.py +552 -0
  149. fast_agent/mcp/prompts/prompt_template.py +438 -0
  150. fast_agent/mcp/resource_utils.py +215 -0
  151. fast_agent/mcp/sampling.py +200 -0
  152. fast_agent/mcp/server/__init__.py +4 -0
  153. fast_agent/mcp/server/agent_server.py +613 -0
  154. fast_agent/mcp/skybridge.py +44 -0
  155. fast_agent/mcp/sse_tracking.py +287 -0
  156. fast_agent/mcp/stdio_tracking_simple.py +59 -0
  157. fast_agent/mcp/streamable_http_tracking.py +309 -0
  158. fast_agent/mcp/tool_execution_handler.py +137 -0
  159. fast_agent/mcp/tool_permission_handler.py +88 -0
  160. fast_agent/mcp/transport_tracking.py +634 -0
  161. fast_agent/mcp/types.py +24 -0
  162. fast_agent/mcp/ui_agent.py +48 -0
  163. fast_agent/mcp/ui_mixin.py +209 -0
  164. fast_agent/mcp_server_registry.py +89 -0
  165. fast_agent/py.typed +0 -0
  166. fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
  167. fast_agent/resources/examples/data-analysis/analysis.py +68 -0
  168. fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
  169. fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
  170. fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
  171. fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
  172. fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
  173. fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
  174. fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
  175. fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
  176. fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
  177. fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
  178. fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
  179. fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
  180. fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
  181. fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
  182. fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
  183. fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
  184. fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
  185. fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
  186. fast_agent/resources/examples/researcher/researcher.py +36 -0
  187. fast_agent/resources/examples/tensorzero/.env.sample +2 -0
  188. fast_agent/resources/examples/tensorzero/Makefile +31 -0
  189. fast_agent/resources/examples/tensorzero/README.md +56 -0
  190. fast_agent/resources/examples/tensorzero/agent.py +35 -0
  191. fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  192. fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
  193. fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  194. fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
  195. fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
  196. fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
  197. fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
  198. fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
  199. fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
  200. fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
  201. fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
  202. fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
  203. fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
  204. fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
  205. fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
  206. fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
  207. fast_agent/resources/examples/workflows/chaining.py +37 -0
  208. fast_agent/resources/examples/workflows/evaluator.py +77 -0
  209. fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
  210. fast_agent/resources/examples/workflows/graded_report.md +89 -0
  211. fast_agent/resources/examples/workflows/human_input.py +28 -0
  212. fast_agent/resources/examples/workflows/maker.py +156 -0
  213. fast_agent/resources/examples/workflows/orchestrator.py +70 -0
  214. fast_agent/resources/examples/workflows/parallel.py +56 -0
  215. fast_agent/resources/examples/workflows/router.py +69 -0
  216. fast_agent/resources/examples/workflows/short_story.md +13 -0
  217. fast_agent/resources/examples/workflows/short_story.txt +19 -0
  218. fast_agent/resources/setup/.gitignore +30 -0
  219. fast_agent/resources/setup/agent.py +28 -0
  220. fast_agent/resources/setup/fastagent.config.yaml +65 -0
  221. fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
  222. fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
  223. fast_agent/skills/__init__.py +9 -0
  224. fast_agent/skills/registry.py +235 -0
  225. fast_agent/tools/elicitation.py +369 -0
  226. fast_agent/tools/shell_runtime.py +402 -0
  227. fast_agent/types/__init__.py +59 -0
  228. fast_agent/types/conversation_summary.py +294 -0
  229. fast_agent/types/llm_stop_reason.py +78 -0
  230. fast_agent/types/message_search.py +249 -0
  231. fast_agent/ui/__init__.py +38 -0
  232. fast_agent/ui/console.py +59 -0
  233. fast_agent/ui/console_display.py +1080 -0
  234. fast_agent/ui/elicitation_form.py +946 -0
  235. fast_agent/ui/elicitation_style.py +59 -0
  236. fast_agent/ui/enhanced_prompt.py +1400 -0
  237. fast_agent/ui/history_display.py +734 -0
  238. fast_agent/ui/interactive_prompt.py +1199 -0
  239. fast_agent/ui/markdown_helpers.py +104 -0
  240. fast_agent/ui/markdown_truncator.py +1004 -0
  241. fast_agent/ui/mcp_display.py +857 -0
  242. fast_agent/ui/mcp_ui_utils.py +235 -0
  243. fast_agent/ui/mermaid_utils.py +169 -0
  244. fast_agent/ui/message_primitives.py +50 -0
  245. fast_agent/ui/notification_tracker.py +205 -0
  246. fast_agent/ui/plain_text_truncator.py +68 -0
  247. fast_agent/ui/progress_display.py +10 -0
  248. fast_agent/ui/rich_progress.py +195 -0
  249. fast_agent/ui/streaming.py +774 -0
  250. fast_agent/ui/streaming_buffer.py +449 -0
  251. fast_agent/ui/tool_display.py +422 -0
  252. fast_agent/ui/usage_display.py +204 -0
  253. fast_agent/utils/__init__.py +5 -0
  254. fast_agent/utils/reasoning_stream_parser.py +77 -0
  255. fast_agent/utils/time.py +22 -0
  256. fast_agent/workflow_telemetry.py +261 -0
  257. fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
  258. fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
  259. fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
  260. fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
  261. fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,1139 @@
1
+ """
2
+ Decorator for LlmAgent, normalizes PromptMessageExtended, allows easy extension of Agents
3
+ """
4
+
5
+ import json
6
+ from collections import Counter, defaultdict
7
+ from copy import deepcopy
8
+ from dataclasses import dataclass
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Mapping,
14
+ Sequence,
15
+ Type,
16
+ TypeVar,
17
+ Union,
18
+ )
19
+
20
+ if TYPE_CHECKING:
21
+ from rich.text import Text
22
+
23
+ from fast_agent.agents.llm_agent import LlmAgent
24
+
25
+ from a2a.types import AgentCard
26
+ from mcp import ListToolsResult, Tool
27
+ from mcp.types import (
28
+ CallToolResult,
29
+ ContentBlock,
30
+ EmbeddedResource,
31
+ GetPromptResult,
32
+ ImageContent,
33
+ Prompt,
34
+ PromptMessage,
35
+ ReadResourceResult,
36
+ ResourceLink,
37
+ TextContent,
38
+ TextResourceContents,
39
+ )
40
+ from opentelemetry import trace
41
+ from pydantic import BaseModel
42
+
43
+ from fast_agent.agents.agent_types import AgentConfig, AgentType
44
+ from fast_agent.constants import (
45
+ CONTROL_MESSAGE_SAVE_HISTORY,
46
+ FAST_AGENT_ERROR_CHANNEL,
47
+ FAST_AGENT_REMOVED_METADATA_CHANNEL,
48
+ )
49
+ from fast_agent.context import Context
50
+ from fast_agent.core.logging.logger import get_logger
51
+ from fast_agent.interfaces import (
52
+ AgentProtocol,
53
+ FastAgentLLMProtocol,
54
+ LLMFactoryProtocol,
55
+ StreamingAgentProtocol,
56
+ )
57
+ from fast_agent.llm.model_database import ModelDatabase
58
+ from fast_agent.llm.provider_types import Provider
59
+ from fast_agent.llm.stream_types import StreamChunk
60
+ from fast_agent.llm.usage_tracking import UsageAccumulator
61
+ from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list, text_content
62
+ from fast_agent.mcp.mime_utils import is_text_mime_type
63
+ from fast_agent.types import PromptMessageExtended, RequestParams
64
+
65
+ # Define a TypeVar for models
66
+ ModelT = TypeVar("ModelT", bound=BaseModel)
67
+
68
+ LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
69
+
70
+
71
+ class StreamingNotAvailableError(RuntimeError):
72
+ """Raised when streaming APIs are accessed before an LLM is attached."""
73
+
74
+
75
+ logger = get_logger(__name__)
76
+
77
+
78
+ class StreamingAgentMixin(StreamingAgentProtocol):
79
+ """Mixin that forwards streaming listener registration to the attached LLM."""
80
+
81
+ def add_stream_listener(self, listener: Callable[[StreamChunk], None]) -> Callable[[], None]:
82
+ llm = getattr(self, "_llm", None)
83
+ if not llm:
84
+ logger.debug(
85
+ "Skipping stream listener registration because no LLM is attached",
86
+ name=getattr(self, "_name", "unknown"),
87
+ )
88
+
89
+ def remove_listener() -> None:
90
+ return None
91
+
92
+ return remove_listener
93
+ return llm.add_stream_listener(listener)
94
+
95
+ def add_structured_stream_listener(
96
+ self, listener: Callable[[str, bool], None]
97
+ ) -> Callable[[], None]:
98
+ llm = getattr(self, "_llm", None)
99
+ if not llm:
100
+ logger.debug(
101
+ "Skipping structured stream listener registration because no LLM is attached",
102
+ name=getattr(self, "_name", "unknown"),
103
+ )
104
+
105
+ def remove_listener() -> None:
106
+ return None
107
+
108
+ return remove_listener
109
+ return llm.add_structured_stream_listener(listener)
110
+
111
+ def add_tool_stream_listener(
112
+ self, listener: Callable[[str, dict[str, Any] | None], None]
113
+ ) -> Callable[[], None]:
114
+ llm = getattr(self, "_llm", None)
115
+ if not llm:
116
+ logger.debug(
117
+ "Skipping tool stream listener registration because no LLM is attached",
118
+ name=getattr(self, "_name", "unknown"),
119
+ )
120
+
121
+ def remove_listener() -> None:
122
+ return None
123
+
124
+ return remove_listener
125
+ return llm.add_tool_stream_listener(listener)
126
+
127
+
128
+ @dataclass
129
+ class _RemovedBlock:
130
+ """Internal representation of a removed content block."""
131
+
132
+ category: str
133
+ mime_type: str | None
134
+ source: str
135
+ tool_id: str | None
136
+ block: ContentBlock
137
+
138
+
139
+ @dataclass(frozen=True)
140
+ class RemovedContentSummary:
141
+ """Summary information about removed content for the last turn."""
142
+
143
+ model_name: str | None
144
+ counts: dict[str, int]
145
+ category_mimes: dict[str, tuple[str, ...]]
146
+ alert_flags: frozenset[str]
147
+ message: str
148
+
149
+
150
+ @dataclass
151
+ class _CallContext:
152
+ """Internal helper for assembling an LLM call."""
153
+
154
+ full_history: list[PromptMessageExtended]
155
+ call_params: RequestParams | None
156
+ persist_history: bool
157
+ sanitized_messages: list[PromptMessageExtended]
158
+ summary: RemovedContentSummary | None
159
+
160
+
161
+ class LlmDecorator(StreamingAgentMixin, AgentProtocol):
162
+ """
163
+ A pure delegation wrapper around LlmAgent instances.
164
+
165
+ This class provides simple delegation to an attached LLM without adding
166
+ any LLM interaction behaviors. Subclasses can add specialized logic
167
+ for stop reason handling, UI display, tool execution, etc.
168
+
169
+ Stub implementations of advanced convenience methods are supplied.
170
+ """
171
+
172
+ def __init__(
173
+ self,
174
+ config: AgentConfig,
175
+ context: Context | None = None,
176
+ ) -> None:
177
+ self.config = config
178
+
179
+ self._context = context
180
+ self._name = self.config.name
181
+ self._tracer = trace.get_tracer(__name__)
182
+ self.instruction = self.config.instruction
183
+
184
+ # Agent-owned conversation state (PromptMessageExtended only)
185
+ self._message_history: list[PromptMessageExtended] = []
186
+
187
+ # Store the default request params from config
188
+ self._default_request_params = self.config.default_request_params
189
+
190
+ # Initialize the LLM to None (will be set by attach_llm)
191
+ self._llm: FastAgentLLMProtocol | None = None
192
+ self._initialized = False
193
+ self._llm_factory_ref: LLMFactoryProtocol | None = None
194
+ self._llm_attach_kwargs: dict[str, Any] | None = None
195
+
196
+ @property
197
+ def context(self) -> Context | None:
198
+ """Optional execution context supplied at construction time."""
199
+ return self._context
200
+
201
+ @property
202
+ def initialized(self) -> bool:
203
+ """Check if the agent is initialized."""
204
+ return self._initialized
205
+
206
+ @initialized.setter
207
+ def initialized(self, value: bool) -> None:
208
+ """Set the initialized state."""
209
+ self._initialized = value
210
+
211
+ async def initialize(self) -> None:
212
+ self.initialized = True
213
+
214
+ async def shutdown(self) -> None:
215
+ self.initialized = False
216
+
217
+ @property
218
+ def agent_type(self) -> AgentType:
219
+ """
220
+ Return the type of this agent.
221
+ """
222
+ return AgentType.LLM
223
+
224
+ @property
225
+ def name(self) -> str:
226
+ """
227
+ Return the name of this agent.
228
+ """
229
+ return self._name
230
+
231
+ async def attach_llm(
232
+ self,
233
+ llm_factory: LLMFactoryProtocol,
234
+ model: str | None = None,
235
+ request_params: RequestParams | None = None,
236
+ **additional_kwargs,
237
+ ) -> FastAgentLLMProtocol:
238
+ """
239
+ Create and attach an LLM instance to this agent.
240
+
241
+ Parameters have the following precedence (highest to lowest):
242
+ 1. Explicitly passed parameters to this method
243
+ 2. Agent's default_request_params
244
+ 3. LLM's default values
245
+
246
+ Args:
247
+ llm_factory: A factory function that constructs an AugmentedLLM
248
+ model: Optional model name override
249
+ request_params: Optional request parameters override
250
+ **additional_kwargs: Additional parameters passed to the LLM constructor
251
+
252
+ Returns:
253
+ The created LLM instance
254
+ """
255
+ # Merge parameters with proper precedence
256
+ effective_params = self._merge_request_params(
257
+ self._default_request_params, request_params, model
258
+ )
259
+
260
+ # Create the LLM instance
261
+ self._llm = llm_factory(
262
+ agent=self, request_params=effective_params, context=self._context, **additional_kwargs
263
+ )
264
+
265
+ # Store attachment details for future cloning
266
+ self._llm_factory_ref = llm_factory
267
+ attach_kwargs: dict[str, Any] = dict(additional_kwargs)
268
+ attach_kwargs["request_params"] = deepcopy(effective_params)
269
+ self._llm_attach_kwargs = attach_kwargs
270
+
271
+ return self._llm
272
+
273
+ def _clone_constructor_kwargs(self) -> dict[str, Any]:
274
+ """Hook for subclasses/mixins to supply constructor kwargs when cloning."""
275
+ return {}
276
+
277
+ async def spawn_detached_instance(self, *, name: str | None = None) -> "LlmAgent":
278
+ """Create a fresh agent instance with its own MCP/LLM stack."""
279
+
280
+ new_config = deepcopy(self.config)
281
+ if name:
282
+ new_config.name = name
283
+
284
+ constructor_kwargs = self._clone_constructor_kwargs()
285
+ clone = type(self)(config=new_config, context=self.context, **constructor_kwargs)
286
+ await clone.initialize()
287
+
288
+ if self._llm_factory_ref is not None:
289
+ if self._llm_attach_kwargs is None:
290
+ raise RuntimeError(
291
+ "LLM attachment parameters missing despite factory being available"
292
+ )
293
+
294
+ attach_kwargs = dict(self._llm_attach_kwargs)
295
+ request_params = attach_kwargs.pop("request_params", None)
296
+ if request_params is not None:
297
+ request_params = deepcopy(request_params)
298
+
299
+ await clone.attach_llm(
300
+ self._llm_factory_ref,
301
+ request_params=request_params,
302
+ **attach_kwargs,
303
+ )
304
+
305
+ return clone
306
+
307
+ def merge_usage_from(self, other: "LlmAgent") -> None:
308
+ """Merge LLM usage metrics from another agent instance into this one."""
309
+
310
+ if not hasattr(self, "_llm") or not hasattr(other, "_llm"):
311
+ return
312
+
313
+ source_llm = getattr(other, "_llm", None)
314
+ target_llm = getattr(self, "_llm", None)
315
+ if not source_llm or not target_llm:
316
+ return
317
+
318
+ source_usage = getattr(source_llm, "usage_accumulator", None)
319
+ target_usage = getattr(target_llm, "usage_accumulator", None)
320
+ if not source_usage or not target_usage:
321
+ return
322
+
323
+ for turn in source_usage.turns:
324
+ try:
325
+ target_usage.add_turn(turn.model_copy(deep=True))
326
+ except AttributeError:
327
+ # Fallback if turn doesn't provide model_copy
328
+ target_usage.add_turn(turn)
329
+
330
+ async def __call__(
331
+ self,
332
+ message: Union[
333
+ str,
334
+ PromptMessage,
335
+ PromptMessageExtended,
336
+ Sequence[Union[str, PromptMessage, PromptMessageExtended]],
337
+ ],
338
+ ) -> str:
339
+ """
340
+ Make the agent callable to send messages.
341
+
342
+ Args:
343
+ message: Optional message to send to the agent
344
+
345
+ Returns:
346
+ The agent's response as a string
347
+ """
348
+ return await self.send(message)
349
+
350
+ async def send(
351
+ self,
352
+ message: Union[
353
+ str,
354
+ PromptMessage,
355
+ PromptMessageExtended,
356
+ Sequence[Union[str, PromptMessage, PromptMessageExtended]],
357
+ ],
358
+ request_params: RequestParams | None = None,
359
+ ) -> str:
360
+ """
361
+ Convenience method to generate and return a string directly
362
+ """
363
+ response = await self.generate(message, request_params)
364
+ return response.last_text() or ""
365
+
366
+ async def generate(
367
+ self,
368
+ messages: Union[
369
+ str,
370
+ PromptMessage,
371
+ PromptMessageExtended,
372
+ Sequence[Union[str, PromptMessage, PromptMessageExtended]],
373
+ ],
374
+ request_params: RequestParams | None = None,
375
+ tools: list[Tool] | None = None,
376
+ ) -> PromptMessageExtended:
377
+ """
378
+ Create a completion with the LLM using the provided messages.
379
+
380
+ This method provides the friendly agent interface by normalizing inputs
381
+ and delegating to generate_impl.
382
+
383
+ Args:
384
+ messages: Message(s) in various formats:
385
+ - String: Converted to a user PromptMessageExtended
386
+ - PromptMessage: Converted to PromptMessageExtended
387
+ - PromptMessageExtended: Used directly
388
+ - List of any combination of the above
389
+ request_params: Optional parameters to configure the request
390
+ tools: Optional list of tools available to the LLM
391
+
392
+ Returns:
393
+ The LLM's response as a PromptMessageExtended
394
+ """
395
+ # Normalize all input types to a list of PromptMessageExtended
396
+ multipart_messages = normalize_to_extended_list(messages)
397
+ final_request_params = (
398
+ self.llm.get_request_params(request_params) if self.llm else request_params
399
+ )
400
+
401
+ with self._tracer.start_as_current_span(f"Agent: '{self._name}' generate"):
402
+ return await self.generate_impl(
403
+ multipart_messages, final_request_params, tools
404
+ )
405
+
406
+ async def generate_impl(
407
+ self,
408
+ messages: list[PromptMessageExtended],
409
+ request_params: RequestParams | None = None,
410
+ tools: list[Tool] | None = None,
411
+ ) -> PromptMessageExtended:
412
+ """
413
+ Implementation method for generate.
414
+
415
+ Default implementation delegates to the attached LLM.
416
+ Subclasses can override this to customize behavior while still
417
+ benefiting from the message normalization in generate().
418
+
419
+ Args:
420
+ messages: Normalized list of PromptMessageExtended objects
421
+ request_params: Optional parameters to configure the request
422
+ tools: Optional list of tools available to the LLM
423
+
424
+ Returns:
425
+ The LLM's response as a PromptMessageExtended
426
+ """
427
+ response, _ = await self._generate_with_summary(
428
+ messages, request_params, tools
429
+ )
430
+ return response
431
+
432
+ async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
433
+ """
434
+ Apply a prompt template as persistent context that will be included in all future conversations.
435
+ Delegates to the attached LLM.
436
+
437
+ Args:
438
+ prompt_result: The GetPromptResult containing prompt messages
439
+ prompt_name: The name of the prompt being applied
440
+
441
+ Returns:
442
+ String representation of the assistant's response if generated
443
+ """
444
+ from fast_agent.types import PromptMessageExtended
445
+
446
+ assert self._llm
447
+
448
+ multipart_messages = PromptMessageExtended.parse_get_prompt_result(prompt_result)
449
+ for msg in multipart_messages:
450
+ msg.is_template = True
451
+
452
+ self._message_history = [msg.model_copy(deep=True) for msg in multipart_messages]
453
+
454
+ return await self._llm.apply_prompt_template(prompt_result, prompt_name)
455
+
456
+ async def apply_prompt(
457
+ self,
458
+ prompt: Union[str, GetPromptResult],
459
+ arguments: dict[str, str] | None = None,
460
+ as_template: bool = False,
461
+ namespace: str | None = None,
462
+ ) -> str:
463
+ """
464
+ Default, provider-agnostic apply_prompt implementation.
465
+
466
+ - If given a GetPromptResult, optionally store as template or generate once.
467
+ - If given a string, treat it as plain user text and generate.
468
+
469
+ Subclasses that integrate MCP servers should override this.
470
+ """
471
+ # If a prompt template object is provided
472
+ if isinstance(prompt, GetPromptResult):
473
+ namespaced_name = getattr(prompt, "namespaced_name", "template")
474
+ if as_template:
475
+ return await self.apply_prompt_template(prompt, namespaced_name)
476
+
477
+ messages = PromptMessageExtended.from_get_prompt_result(prompt)
478
+ response = await self.generate_impl(messages, None)
479
+ return response.first_text()
480
+
481
+ # Otherwise treat the string as plain content (ignore arguments here)
482
+ return await self.send(prompt)
483
+
484
+ def clear(self, *, clear_prompts: bool = False) -> None:
485
+ """Reset conversation state while optionally retaining applied prompt templates."""
486
+
487
+ if not self._llm:
488
+ return
489
+ self._llm.clear(clear_prompts=clear_prompts)
490
+ if clear_prompts:
491
+ self._message_history = []
492
+ else:
493
+ template_prefix = self._template_prefix_messages()
494
+ self._message_history = [msg.model_copy(deep=True) for msg in template_prefix]
495
+
496
+ async def structured(
497
+ self,
498
+ messages: Union[
499
+ str,
500
+ PromptMessage,
501
+ PromptMessageExtended,
502
+ Sequence[Union[str, PromptMessage, PromptMessageExtended]],
503
+ ],
504
+ model: Type[ModelT],
505
+ request_params: RequestParams | None = None,
506
+ ) -> tuple[ModelT | None, PromptMessageExtended]:
507
+ """
508
+ Apply the prompt and return the result as a Pydantic model.
509
+
510
+ This method provides the friendly agent interface by normalizing inputs
511
+ and delegating to structured_impl.
512
+
513
+ Args:
514
+ messages: Message(s) in various formats:
515
+ - String: Converted to a user PromptMessageExtended
516
+ - PromptMessage: Converted to PromptMessageExtended
517
+ - PromptMessageExtended: Used directly
518
+ - List of any combination of the above
519
+ model: The Pydantic model class to parse the result into
520
+ request_params: Optional parameters to configure the LLM request
521
+
522
+ Returns:
523
+ A tuple of (parsed model instance or None, assistant response message)
524
+ """
525
+ # Normalize all input types to a list of PromptMessageExtended
526
+ multipart_messages = normalize_to_extended_list(messages)
527
+ final_request_params = (
528
+ self.llm.get_request_params(request_params) if self.llm else request_params
529
+ )
530
+
531
+ with self._tracer.start_as_current_span(f"Agent: '{self._name}' structured"):
532
+ return await self.structured_impl(multipart_messages, model, final_request_params)
533
+
534
+ async def structured_impl(
535
+ self,
536
+ messages: list[PromptMessageExtended],
537
+ model: Type[ModelT],
538
+ request_params: RequestParams | None = None,
539
+ ) -> tuple[ModelT | None, PromptMessageExtended]:
540
+ """
541
+ Implementation method for structured.
542
+
543
+ Default implementation delegates to the attached LLM.
544
+ Subclasses can override this to customize behavior while still
545
+ benefiting from the message normalization in structured().
546
+
547
+ Args:
548
+ messages: Normalized list of PromptMessageExtended objects
549
+ model: The Pydantic model class to parse the result into
550
+ request_params: Optional parameters to configure the LLM request
551
+
552
+ Returns:
553
+ A tuple of (parsed model instance or None, assistant response message)
554
+ """
555
+ result, _ = await self._structured_with_summary(messages, model, request_params)
556
+ return result
557
+
558
+ async def _generate_with_summary(
559
+ self,
560
+ messages: list[PromptMessageExtended],
561
+ request_params: RequestParams | None = None,
562
+ tools: list[Tool] | None = None,
563
+ ) -> tuple[PromptMessageExtended, RemovedContentSummary | None]:
564
+ assert self._llm, "LLM is not attached"
565
+ call_ctx = self._prepare_llm_call(messages, request_params)
566
+
567
+ response = await self._llm.generate(
568
+ call_ctx.full_history, call_ctx.call_params, tools
569
+ )
570
+
571
+ if call_ctx.persist_history:
572
+ self._persist_history(call_ctx.sanitized_messages, response)
573
+
574
+ return response, call_ctx.summary
575
+
576
+ async def _structured_with_summary(
577
+ self,
578
+ messages: list[PromptMessageExtended],
579
+ model: Type[ModelT],
580
+ request_params: RequestParams | None = None,
581
+ ) -> tuple[tuple[ModelT | None, PromptMessageExtended], RemovedContentSummary | None]:
582
+ assert self._llm, "LLM is not attached"
583
+ call_ctx = self._prepare_llm_call(messages, request_params)
584
+
585
+ structured_result = await self._llm.structured(
586
+ call_ctx.full_history, model, call_ctx.call_params
587
+ )
588
+
589
+ if call_ctx.persist_history:
590
+ try:
591
+ _, assistant_message = structured_result
592
+ self._persist_history(call_ctx.sanitized_messages, assistant_message)
593
+ except Exception:
594
+ pass
595
+ return structured_result, call_ctx.summary
596
+
597
+ def _prepare_llm_call(
598
+ self, messages: list[PromptMessageExtended], request_params: RequestParams | None = None
599
+ ) -> _CallContext:
600
+ """Normalize template/history handling for both generate and structured."""
601
+ sanitized_messages, summary = self._sanitize_messages_for_llm(messages)
602
+ final_request_params = self._llm.get_request_params(request_params)
603
+
604
+ use_history = final_request_params.use_history if final_request_params else True
605
+ call_params = final_request_params.model_copy() if final_request_params else None
606
+ if call_params and not call_params.use_history:
607
+ call_params.use_history = True
608
+
609
+ base_history = self._message_history if use_history else self._template_prefix_messages()
610
+ full_history = [msg.model_copy(deep=True) for msg in base_history]
611
+ full_history.extend(sanitized_messages)
612
+
613
+ return _CallContext(
614
+ full_history=full_history,
615
+ call_params=call_params,
616
+ persist_history=use_history,
617
+ sanitized_messages=sanitized_messages,
618
+ summary=summary,
619
+ )
620
+
621
+ def _persist_history(
622
+ self,
623
+ sanitized_messages: list[PromptMessageExtended],
624
+ assistant_message: PromptMessageExtended,
625
+ ) -> None:
626
+ """Persist the last turn unless explicitly disabled by control text."""
627
+ if not sanitized_messages:
628
+ return
629
+ if sanitized_messages[-1].first_text().startswith(CONTROL_MESSAGE_SAVE_HISTORY):
630
+ return
631
+
632
+ history_messages = [self._strip_removed_metadata(msg) for msg in sanitized_messages]
633
+ self._message_history.extend(history_messages)
634
+ self._message_history.append(assistant_message)
635
+
636
+ @staticmethod
637
+ def _strip_removed_metadata(message: PromptMessageExtended) -> PromptMessageExtended:
638
+ """Remove per-turn removed-content metadata before persisting to history."""
639
+ msg_copy = message.model_copy(deep=True)
640
+ if msg_copy.channels and FAST_AGENT_REMOVED_METADATA_CHANNEL in msg_copy.channels:
641
+ channels = dict(msg_copy.channels)
642
+ channels.pop(FAST_AGENT_REMOVED_METADATA_CHANNEL, None)
643
+ msg_copy.channels = channels if channels else None
644
+ return msg_copy
645
+
646
+ def _sanitize_messages_for_llm(
647
+ self, messages: list[PromptMessageExtended]
648
+ ) -> tuple[list[PromptMessageExtended], RemovedContentSummary | None]:
649
+ """Filter out content blocks that the current model cannot tokenize."""
650
+ if not messages:
651
+ return [], None
652
+
653
+ removed_blocks: list[_RemovedBlock] = []
654
+ sanitized_messages: list[PromptMessageExtended] = []
655
+
656
+ for message in messages:
657
+ sanitized, removed = self._sanitize_message_for_llm(message)
658
+ sanitized_messages.append(sanitized)
659
+ removed_blocks.extend(removed)
660
+
661
+ summary = self._build_removed_summary(removed_blocks)
662
+ if summary:
663
+ # Attach metadata to the last user message for downstream UI usage
664
+ for msg in reversed(sanitized_messages):
665
+ if msg.role == "user":
666
+ channels = dict(msg.channels or {})
667
+ meta_entries = list(channels.get(FAST_AGENT_REMOVED_METADATA_CHANNEL, []))
668
+ meta_entries.extend(self._build_metadata_entries(removed_blocks))
669
+ channels[FAST_AGENT_REMOVED_METADATA_CHANNEL] = meta_entries
670
+ msg.channels = channels
671
+ break
672
+
673
+ return sanitized_messages, summary
674
+
675
+ def _sanitize_message_for_llm(
676
+ self, message: PromptMessageExtended
677
+ ) -> tuple[PromptMessageExtended, list[_RemovedBlock]]:
678
+ """Return a sanitized copy of a message and any removed content blocks."""
679
+ msg_copy = message.model_copy(deep=True)
680
+ removed: list[_RemovedBlock] = []
681
+
682
+ msg_copy.content = self._filter_block_list(
683
+ list(msg_copy.content or []), removed, source="message"
684
+ )
685
+
686
+ if msg_copy.tool_results:
687
+ new_tool_results: dict[str, CallToolResult] = {}
688
+ for tool_id, tool_result in msg_copy.tool_results.items():
689
+ original_blocks = list(tool_result.content or [])
690
+ filtered_blocks = self._filter_block_list(
691
+ original_blocks,
692
+ removed,
693
+ source="tool_result",
694
+ tool_id=tool_id,
695
+ )
696
+
697
+ if filtered_blocks != original_blocks:
698
+ try:
699
+ updated_result = tool_result.model_copy(update={"content": filtered_blocks})
700
+ except AttributeError:
701
+ updated_result = CallToolResult(
702
+ content=filtered_blocks, isError=getattr(tool_result, "isError", False)
703
+ )
704
+ else:
705
+ updated_result = tool_result
706
+
707
+ new_tool_results[tool_id] = updated_result
708
+
709
+ msg_copy.tool_results = new_tool_results
710
+
711
+ if removed:
712
+ channels = dict(msg_copy.channels or {})
713
+ error_entries = list(channels.get(FAST_AGENT_ERROR_CHANNEL, []))
714
+ error_entries.extend(self._build_error_channel_entries(removed))
715
+ channels[FAST_AGENT_ERROR_CHANNEL] = error_entries
716
+ msg_copy.channels = channels
717
+
718
+ return msg_copy, removed
719
+
720
+ def _filter_block_list(
721
+ self,
722
+ blocks: Sequence[ContentBlock],
723
+ removed: list[_RemovedBlock],
724
+ *,
725
+ source: str,
726
+ tool_id: str | None = None,
727
+ ) -> list[ContentBlock]:
728
+ kept: list[ContentBlock] = []
729
+ removed_in_this_call: list[_RemovedBlock] = []
730
+ model_name = self.llm.model_name if self.llm else None
731
+ model_display = model_name or "current model"
732
+
733
+ for block in blocks or []:
734
+ mime_type, category = self._extract_block_metadata(block)
735
+ if self._block_supported(mime_type, category):
736
+ kept.append(block)
737
+ else:
738
+ removed_block = _RemovedBlock(
739
+ category=category,
740
+ mime_type=mime_type,
741
+ source=source,
742
+ tool_id=tool_id,
743
+ block=block,
744
+ )
745
+ removed.append(removed_block)
746
+ removed_in_this_call.append(removed_block)
747
+
748
+ # Only add placeholder if ALL content was removed (kept is empty)
749
+ # This prevents ACP client hangs when content would be empty
750
+ if not kept and removed_in_this_call:
751
+ # Summarize what was removed
752
+ categories = set(r.category for r in removed_in_this_call)
753
+ category_label = ", ".join(self._category_label(c) for c in sorted(categories))
754
+ placeholder = text_content(
755
+ f"[{category_label} content was removed - "
756
+ f"{model_display} does not support this content type]"
757
+ )
758
+ kept.append(placeholder)
759
+
760
+ return kept
761
+
762
+ def _block_supported(self, mime_type: str | None, category: str) -> bool:
763
+ """Determine if the current model can process a content block."""
764
+ if category == "text":
765
+ return True
766
+
767
+ model_name = self.llm.model_name if self.llm else None
768
+ if not model_name:
769
+ return False
770
+
771
+ if mime_type:
772
+ return ModelDatabase.supports_mime(model_name, mime_type)
773
+
774
+ if category == "vision":
775
+ return ModelDatabase.supports_any_mime(
776
+ model_name, ["image/jpeg", "image/png", "image/webp"]
777
+ )
778
+
779
+ if category == "document":
780
+ return ModelDatabase.supports_mime(model_name, "application/pdf")
781
+
782
+ return False
783
+
784
+ def _extract_block_metadata(self, block: ContentBlock) -> tuple[str | None, str]:
785
+ """Infer the MIME type and high-level category for a content block."""
786
+ if isinstance(block, TextContent):
787
+ return "text/plain", "text"
788
+
789
+ if isinstance(block, TextResourceContents):
790
+ mime = getattr(block, "mimeType", None) or "text/plain"
791
+ return mime, "text"
792
+
793
+ if isinstance(block, ImageContent):
794
+ mime = getattr(block, "mimeType", None) or "image/*"
795
+ return mime, "vision"
796
+
797
+ if isinstance(block, EmbeddedResource):
798
+ resource = getattr(block, "resource", None)
799
+ mime = getattr(resource, "mimeType", None)
800
+ if isinstance(resource, TextResourceContents) or (mime and is_text_mime_type(mime)):
801
+ return mime or "text/plain", "text"
802
+ if mime and mime.startswith("image/"):
803
+ return mime, "vision"
804
+ return mime, "document"
805
+
806
+ if isinstance(block, ResourceLink):
807
+ mime = getattr(block, "mimeType", None)
808
+ if mime and mime.startswith("image/"):
809
+ return mime, "vision"
810
+ if mime and is_text_mime_type(mime):
811
+ return mime, "text"
812
+ return mime, "document"
813
+
814
+ return None, "document"
815
+
816
+ def _build_error_channel_entries(self, removed: list[_RemovedBlock]) -> list[ContentBlock]:
817
+ """Create informative entries for the error channel."""
818
+ entries: list[ContentBlock] = []
819
+ model_name = self.llm.model_name if self.llm else None
820
+ model_display = model_name or "current model"
821
+
822
+ for item in removed:
823
+ mime_display = item.mime_type or "unknown"
824
+ category_label = self._category_label(item.category)
825
+ if item.source == "message":
826
+ source_label = "user content"
827
+ elif item.tool_id:
828
+ source_label = f"tool result '{item.tool_id}'"
829
+ else:
830
+ source_label = "tool result"
831
+
832
+ message = (
833
+ f"Removed unsupported {category_label} {source_label} ({mime_display}) "
834
+ f"before sending to {model_display}."
835
+ )
836
+ entries.append(text_content(message))
837
+ entries.append(item.block)
838
+
839
+ return entries
840
+
841
+ def _build_metadata_entries(self, removed: list[_RemovedBlock]) -> list[ContentBlock]:
842
+ entries: list[ContentBlock] = []
843
+ for item in removed:
844
+ metadata_text = text_content(
845
+ json.dumps(
846
+ {
847
+ "type": "fast-agent-removed",
848
+ "category": item.category,
849
+ "mime_type": item.mime_type,
850
+ "source": item.source,
851
+ "tool_id": item.tool_id,
852
+ }
853
+ )
854
+ )
855
+ entries.append(metadata_text)
856
+ return entries
857
+
858
+ def _build_removed_summary(self, removed: list[_RemovedBlock]) -> RemovedContentSummary | None:
859
+ if not removed:
860
+ return None
861
+
862
+ counts = Counter(item.category for item in removed)
863
+ category_mimes: dict[str, tuple[str, ...]] = {}
864
+ mime_accumulator: dict[str, set[str]] = defaultdict(set)
865
+
866
+ for item in removed:
867
+ mime_accumulator[item.category].add(item.mime_type or "unknown")
868
+
869
+ for category, mimes in mime_accumulator.items():
870
+ category_mimes[category] = tuple(sorted(mimes))
871
+
872
+ alert_flags = frozenset(
873
+ flag
874
+ for category in counts
875
+ for flag in (self._category_to_flag(category),)
876
+ if flag is not None
877
+ )
878
+
879
+ model_name = self.llm.model_name if self.llm else None
880
+ model_display = model_name or "current model"
881
+
882
+ category_order = ["vision", "document", "other", "text"]
883
+ segments: list[str] = []
884
+ for category in category_order:
885
+ if category not in counts:
886
+ continue
887
+ count = counts[category]
888
+ mime_list = ", ".join(category_mimes.get(category, ()))
889
+ label = self._category_label(category)
890
+ plural = "s" if count != 1 else ""
891
+ if mime_list:
892
+ segments.append(f"{count} {label} block{plural} ({mime_list})")
893
+ else:
894
+ segments.append(f"{count} {label} block{plural}")
895
+
896
+ # Append any remaining categories not covered in the preferred order
897
+ for category, count in counts.items():
898
+ if category in category_order:
899
+ continue
900
+ mime_list = ", ".join(category_mimes.get(category, ()))
901
+ label = self._category_label(category)
902
+ plural = "s" if count != 1 else ""
903
+ if mime_list:
904
+ segments.append(f"{count} {label} block{plural} ({mime_list})")
905
+ else:
906
+ segments.append(f"{count} {label} block{plural}")
907
+
908
+ detail = "; ".join(segments) if segments else "unknown content"
909
+
910
+ capability_labels = []
911
+ for flag in alert_flags:
912
+ match flag:
913
+ case "V":
914
+ capability_labels.append("vision")
915
+ case "D":
916
+ capability_labels.append("document")
917
+ case "T":
918
+ capability_labels.append("text")
919
+
920
+ capability_note = ""
921
+ if capability_labels:
922
+ unique_caps = ", ".join(sorted(set(capability_labels)))
923
+ capability_note = f" Missing capability: {unique_caps}."
924
+
925
+ message = (
926
+ f"Removed unsupported content before sending to {model_display}: {detail}."
927
+ f"{capability_note} Stored original content in '{FAST_AGENT_ERROR_CHANNEL}'."
928
+ )
929
+
930
+ return RemovedContentSummary(
931
+ model_name=model_name,
932
+ counts=dict(counts),
933
+ category_mimes=category_mimes,
934
+ alert_flags=alert_flags,
935
+ message=message,
936
+ )
937
+
938
+ @staticmethod
939
+ def _category_to_flag(category: str) -> str | None:
940
+ mapping = {"text": "T", "document": "D", "vision": "V"}
941
+ return mapping.get(category)
942
+
943
+ @staticmethod
944
+ def _category_label(category: str) -> str:
945
+ if category == "vision":
946
+ return "vision"
947
+ if category == "document":
948
+ return "document"
949
+ if category == "text":
950
+ return "text"
951
+ return "content"
952
+
953
+ @property
954
+ def message_history(self) -> list[PromptMessageExtended]:
955
+ """
956
+ Return the agent's message history as PromptMessageExtended objects.
957
+
958
+ This history can be used to transfer state between agents or for
959
+ analysis and debugging purposes.
960
+
961
+ Returns:
962
+ List of PromptMessageExtended objects representing the conversation history
963
+ """
964
+ return self._message_history
965
+
966
+ @property
967
+ def template_messages(self) -> list[PromptMessageExtended]:
968
+ """
969
+ Return the template prefix of the message history.
970
+
971
+ Templates are identified via the is_template flag and are expected to
972
+ appear as a contiguous prefix of the history.
973
+ """
974
+ return [msg.model_copy(deep=True) for msg in self._template_prefix_messages()]
975
+
976
+ def _template_prefix_messages(self) -> list[PromptMessageExtended]:
977
+ """Return the leading messages marked as templates (non-copy)."""
978
+ prefix: list[PromptMessageExtended] = []
979
+ for msg in self._message_history:
980
+ if msg.is_template:
981
+ prefix.append(msg)
982
+ else:
983
+ break
984
+ return prefix
985
+
986
+ def load_message_history(self, messages: list[PromptMessageExtended] | None) -> None:
987
+ """Replace message history with a deep copy of supplied messages (or empty list)."""
988
+ msgs = messages or []
989
+ self._message_history = [
990
+ msg.model_copy(deep=True) if hasattr(msg, "model_copy") else msg for msg in msgs
991
+ ]
992
+
993
+ def append_history(self, messages: list[PromptMessageExtended] | None) -> None:
994
+ """Append messages to history as deep copies."""
995
+ if not messages:
996
+ return
997
+ for msg in messages:
998
+ self._message_history.append(
999
+ msg.model_copy(deep=True) if hasattr(msg, "model_copy") else msg
1000
+ )
1001
+
1002
+ def pop_last_message(self) -> PromptMessageExtended | None:
1003
+ """Remove and return the most recent message from the conversation history."""
1004
+ if self.llm:
1005
+ return self.llm.pop_last_message()
1006
+ return None
1007
+
1008
+ @property
1009
+ def usage_accumulator(self) -> UsageAccumulator | None:
1010
+ """
1011
+ Return the usage accumulator for tracking token usage across turns.
1012
+
1013
+ Returns:
1014
+ UsageAccumulator object if LLM is attached, None otherwise
1015
+ """
1016
+ if self.llm:
1017
+ return self.llm.usage_accumulator
1018
+ return None
1019
+
1020
+ @property
1021
+ def llm(self) -> FastAgentLLMProtocol | None:
1022
+ return self._llm
1023
+
1024
+ # --- Default MCP-facing convenience methods (no-op for plain LLM agents) ---
1025
+
1026
+ async def list_prompts(self, namespace: str | None = None) -> Mapping[str, list[Prompt]]:
1027
+ """Default: no prompts; return empty mapping."""
1028
+ return {}
1029
+
1030
+ async def get_prompt(
1031
+ self,
1032
+ prompt_name: str,
1033
+ arguments: dict[str, str] | None = None,
1034
+ namespace: str | None = None,
1035
+ ) -> GetPromptResult:
1036
+ """Default: prompts unsupported; return empty GetPromptResult."""
1037
+ return GetPromptResult(description="", messages=[])
1038
+
1039
+ async def list_resources(self, namespace: str | None = None) -> Mapping[str, list[str]]:
1040
+ """Default: no resources; return empty mapping."""
1041
+ return {}
1042
+
1043
+ async def list_tools(self) -> ListToolsResult:
1044
+ """Default: no tools; return empty ListToolsResult."""
1045
+ return ListToolsResult(tools=[])
1046
+
1047
+ async def list_mcp_tools(self, namespace: str | None = None) -> Mapping[str, list[Tool]]:
1048
+ """Default: no tools; return empty mapping."""
1049
+ return {}
1050
+
1051
+ async def get_resource(
1052
+ self, resource_uri: str, namespace: str | None = None
1053
+ ) -> ReadResourceResult:
1054
+ """Default: resources unsupported; raise capability error."""
1055
+ raise NotImplementedError("Resources are not supported by this agent")
1056
+
1057
+ async def with_resource(
1058
+ self,
1059
+ prompt_content: Union[str, PromptMessage, PromptMessageExtended],
1060
+ resource_uri: str,
1061
+ namespace: str | None = None,
1062
+ ) -> str:
1063
+ """Default: ignore resource, just send the prompt content."""
1064
+ return await self.send(prompt_content)
1065
+
1066
+ @property
1067
+ def provider(self) -> Provider:
1068
+ return self.llm.provider
1069
+
1070
+ def _merge_request_params(
1071
+ self,
1072
+ base_params: RequestParams | None,
1073
+ override_params: RequestParams | None,
1074
+ model_override: str | None = None,
1075
+ ) -> RequestParams | None:
1076
+ """
1077
+ Merge request parameters with proper precedence.
1078
+
1079
+ Args:
1080
+ base_params: Base parameters (lower precedence)
1081
+ override_params: Override parameters (higher precedence)
1082
+ model_override: Optional model name to override
1083
+
1084
+ Returns:
1085
+ Merged RequestParams or None if both inputs are None
1086
+ """
1087
+ if not base_params and not override_params:
1088
+ return None
1089
+
1090
+ if not base_params:
1091
+ result = override_params.model_copy() if override_params else None
1092
+ else:
1093
+ result = base_params.model_copy()
1094
+ if override_params:
1095
+ # Merge only the explicitly set values from override_params
1096
+ for k, v in override_params.model_dump(exclude_unset=True).items():
1097
+ if v is not None:
1098
+ setattr(result, k, v)
1099
+
1100
+ # Apply model override if specified
1101
+ if model_override and result:
1102
+ result.model = model_override
1103
+
1104
+ return result
1105
+
1106
+ async def agent_card(self) -> AgentCard:
1107
+ """
1108
+ Return an A2A card describing this Agent
1109
+ """
1110
+ from fast_agent.agents.llm_agent import DEFAULT_CAPABILITIES
1111
+
1112
+ return AgentCard(
1113
+ skills=[],
1114
+ name=self._name,
1115
+ description=self.instruction,
1116
+ url=f"fast-agent://agents/{self._name}/",
1117
+ version="0.1",
1118
+ capabilities=DEFAULT_CAPABILITIES,
1119
+ # TODO -- get these from the _llm
1120
+ default_input_modes=["text/plain"],
1121
+ default_output_modes=["text/plain"],
1122
+ provider=None,
1123
+ documentation_url=None,
1124
+ )
1125
+
1126
+ async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtended:
1127
+ return request
1128
+
1129
+ async def show_assistant_message(
1130
+ self,
1131
+ message: PromptMessageExtended,
1132
+ bottom_items: list[str] | None = None,
1133
+ highlight_items: str | list[str] | None = None,
1134
+ max_item_length: int | None = None,
1135
+ name: str | None = None,
1136
+ model: str | None = None,
1137
+ additional_message: Union["Text", None] = None,
1138
+ ) -> None:
1139
+ pass