fast-agent-mcp 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (40) hide show
  1. fast_agent/__init__.py +9 -1
  2. fast_agent/agents/agent_types.py +11 -11
  3. fast_agent/agents/llm_agent.py +60 -40
  4. fast_agent/agents/llm_decorator.py +351 -7
  5. fast_agent/agents/mcp_agent.py +87 -65
  6. fast_agent/agents/tool_agent.py +50 -4
  7. fast_agent/cli/commands/auth.py +14 -1
  8. fast_agent/cli/commands/go.py +3 -3
  9. fast_agent/constants.py +2 -0
  10. fast_agent/core/agent_app.py +2 -0
  11. fast_agent/core/direct_factory.py +39 -120
  12. fast_agent/core/fastagent.py +2 -2
  13. fast_agent/core/logging/listeners.py +2 -1
  14. fast_agent/history/history_exporter.py +3 -3
  15. fast_agent/interfaces.py +2 -2
  16. fast_agent/llm/fastagent_llm.py +3 -3
  17. fast_agent/llm/model_database.py +7 -1
  18. fast_agent/llm/model_factory.py +2 -3
  19. fast_agent/llm/provider/bedrock/llm_bedrock.py +1 -1
  20. fast_agent/llm/provider/google/llm_google_native.py +1 -3
  21. fast_agent/llm/provider/openai/llm_azure.py +1 -1
  22. fast_agent/llm/provider/openai/llm_openai.py +57 -8
  23. fast_agent/llm/provider/openai/llm_tensorzero_openai.py +1 -1
  24. fast_agent/llm/request_params.py +1 -1
  25. fast_agent/mcp/__init__.py +1 -2
  26. fast_agent/mcp/mcp_aggregator.py +6 -3
  27. fast_agent/mcp/prompt_message_extended.py +2 -0
  28. fast_agent/mcp/prompt_serialization.py +124 -39
  29. fast_agent/mcp/prompts/prompt_load.py +34 -32
  30. fast_agent/mcp/prompts/prompt_server.py +26 -11
  31. fast_agent/resources/setup/fastagent.config.yaml +2 -2
  32. fast_agent/types/__init__.py +3 -1
  33. fast_agent/ui/enhanced_prompt.py +111 -64
  34. fast_agent/ui/interactive_prompt.py +13 -41
  35. fast_agent/ui/rich_progress.py +12 -8
  36. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/METADATA +4 -4
  37. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/RECORD +40 -40
  38. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/WHEEL +0 -0
  39. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/entry_points.txt +0 -0
  40. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/licenses/LICENSE +0 -0
fast_agent/__init__.py CHANGED
@@ -27,8 +27,11 @@ from fast_agent.config import (
27
27
  XAISettings,
28
28
  )
29
29
 
30
+ # Prompt helpers (safe - no heavy dependencies)
31
+ from fast_agent.mcp.prompt import Prompt
32
+
30
33
  # Type definitions and enums (safe - no dependencies)
31
- from fast_agent.types import LlmStopReason, RequestParams
34
+ from fast_agent.types import LlmStopReason, PromptMessageExtended, RequestParams
32
35
 
33
36
 
34
37
  def __getattr__(name: str):
@@ -91,6 +94,8 @@ def __getattr__(name: str):
91
94
  if TYPE_CHECKING: # pragma: no cover - typing aid only
92
95
  # Provide a concrete import path for type checkers/IDEs
93
96
  from fast_agent.core.fastagent import FastAgent as FastAgent # noqa: F401
97
+ from fast_agent.mcp.prompt import Prompt as Prompt # noqa: F401
98
+ from fast_agent.types import PromptMessageExtended as PromptMessageExtended # noqa: F401
94
99
 
95
100
 
96
101
  __all__ = [
@@ -127,6 +132,9 @@ __all__ = [
127
132
  # Type definitions and enums (eagerly loaded)
128
133
  "LlmStopReason",
129
134
  "RequestParams",
135
+ "PromptMessageExtended",
136
+ # Prompt helpers (eagerly loaded)
137
+ "Prompt",
130
138
  # Agents (lazy loaded)
131
139
  "LlmAgent",
132
140
  "LlmDecorator",
@@ -3,7 +3,7 @@ Type definitions for agents and agent configurations.
3
3
  """
4
4
 
5
5
  from dataclasses import dataclass, field
6
- from enum import Enum
6
+ from enum import StrEnum, auto
7
7
  from typing import Dict, List, Optional
8
8
 
9
9
  from mcp.client.session import ElicitationFnT
@@ -12,18 +12,18 @@ from mcp.client.session import ElicitationFnT
12
12
  from fast_agent.types import RequestParams
13
13
 
14
14
 
15
- class AgentType(Enum):
15
+ class AgentType(StrEnum):
16
16
  """Enumeration of supported agent types."""
17
17
 
18
- LLM = "llm" # simple llm delegator
19
- BASIC = "agent"
20
- CUSTOM = "custom"
21
- ORCHESTRATOR = "orchestrator"
22
- PARALLEL = "parallel"
23
- EVALUATOR_OPTIMIZER = "evaluator_optimizer"
24
- ROUTER = "router"
25
- CHAIN = "chain"
26
- ITERATIVE_PLANNER = "iterative_planner"
18
+ LLM = auto()
19
+ BASIC = auto()
20
+ CUSTOM = auto()
21
+ ORCHESTRATOR = auto()
22
+ PARALLEL = auto()
23
+ EVALUATOR_OPTIMIZER = auto()
24
+ ROUTER = auto()
25
+ CHAIN = auto()
26
+ ITERATIVE_PLANNER = auto()
27
27
 
28
28
 
29
29
  @dataclass
@@ -45,7 +45,7 @@ class LlmAgent(LlmDecorator):
45
45
 
46
46
  This class provides LLM-specific functionality including UI display methods,
47
47
  tool call tracking, and chat interaction patterns while delegating core
48
- LLM operations to the attached AugmentedLLMProtocol.
48
+ LLM operations to the attached FastAgentLLMProtocol.
49
49
  """
50
50
 
51
51
  def __init__(
@@ -81,56 +81,71 @@ class LlmAgent(LlmDecorator):
81
81
  """
82
82
 
83
83
  # Determine display content based on stop reason if not provided
84
- if additional_message is None:
85
- # Generate additional message based on stop reason
86
- match message.stop_reason:
87
- case LlmStopReason.END_TURN:
88
- # No additional message needed for normal end turn
89
- additional_message_text = None
90
-
91
- case LlmStopReason.MAX_TOKENS:
92
- additional_message_text = Text(
84
+ additional_segments: List[Text] = []
85
+
86
+ # Generate additional message based on stop reason
87
+ match message.stop_reason:
88
+ case LlmStopReason.END_TURN:
89
+ pass
90
+
91
+ case LlmStopReason.MAX_TOKENS:
92
+ additional_segments.append(
93
+ Text(
93
94
  "\n\nMaximum output tokens reached - generation stopped.",
94
95
  style="dim red italic",
95
96
  )
97
+ )
96
98
 
97
- case LlmStopReason.SAFETY:
98
- additional_message_text = Text(
99
- "\n\nContent filter activated - generation stopped.", style="dim red italic"
99
+ case LlmStopReason.SAFETY:
100
+ additional_segments.append(
101
+ Text(
102
+ "\n\nContent filter activated - generation stopped.",
103
+ style="dim red italic",
100
104
  )
105
+ )
101
106
 
102
- case LlmStopReason.PAUSE:
103
- additional_message_text = Text(
104
- "\n\nLLM has requested a pause.", style="dim green italic"
105
- )
107
+ case LlmStopReason.PAUSE:
108
+ additional_segments.append(
109
+ Text("\n\nLLM has requested a pause.", style="dim green italic")
110
+ )
106
111
 
107
- case LlmStopReason.STOP_SEQUENCE:
108
- additional_message_text = Text(
109
- "\n\nStop Sequence activated - generation stopped.", style="dim red italic"
112
+ case LlmStopReason.STOP_SEQUENCE:
113
+ additional_segments.append(
114
+ Text(
115
+ "\n\nStop Sequence activated - generation stopped.",
116
+ style="dim red italic",
110
117
  )
118
+ )
111
119
 
112
- case LlmStopReason.TOOL_USE:
113
- if None is message.last_text():
114
- additional_message_text = Text(
115
- "The assistant requested tool calls", style="dim green italic"
116
- )
117
- else:
118
- additional_message_text = None
120
+ case LlmStopReason.TOOL_USE:
121
+ if None is message.last_text():
122
+ additional_segments.append(
123
+ Text("The assistant requested tool calls", style="dim green italic")
124
+ )
119
125
 
120
- case _:
121
- if message.stop_reason:
122
- additional_message_text = Text(
126
+ case _:
127
+ if message.stop_reason:
128
+ additional_segments.append(
129
+ Text(
123
130
  f"\n\nGeneration stopped for an unhandled reason ({message.stop_reason})",
124
131
  style="dim red italic",
125
132
  )
126
- else:
127
- additional_message_text = None
128
- else:
129
- # Use provided additional message
130
- additional_message_text = (
131
- additional_message if isinstance(additional_message, Text) else None
133
+ )
134
+
135
+ if additional_message is not None:
136
+ additional_segments.append(
137
+ additional_message
138
+ if isinstance(additional_message, Text)
139
+ else Text(str(additional_message))
132
140
  )
133
141
 
142
+ additional_message_text = None
143
+ if additional_segments:
144
+ combined = Text()
145
+ for segment in additional_segments:
146
+ combined += segment
147
+ additional_message_text = combined
148
+
134
149
  message_text = message.last_text() or ""
135
150
 
136
151
  # Use provided name/model or fall back to defaults
@@ -182,9 +197,11 @@ class LlmAgent(LlmDecorator):
182
197
 
183
198
  # TODO -- we should merge the request parameters here with the LLM defaults?
184
199
  # TODO - manage error catch, recovery, pause
185
- result = await super().generate_impl(messages, request_params, tools)
200
+ result, summary = await self._generate_with_summary(messages, request_params, tools)
201
+
202
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
186
203
 
187
- await self.show_assistant_message(result)
204
+ await self.show_assistant_message(result, additional_message=summary_text)
188
205
  return result
189
206
 
190
207
  async def structured_impl(
@@ -196,8 +213,11 @@ class LlmAgent(LlmDecorator):
196
213
  if "user" == messages[-1].role:
197
214
  self.show_user_message(message=messages[-1])
198
215
 
199
- result, message = await super().structured_impl(messages, model, request_params)
200
- await self.show_assistant_message(message=message)
216
+ (result, message), summary = await self._structured_with_summary(
217
+ messages, model, request_params
218
+ )
219
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
220
+ await self.show_assistant_message(message=message, additional_message=summary_text)
201
221
  return result, message
202
222
 
203
223
  # async def show_prompt_loaded(
@@ -2,6 +2,9 @@
2
2
  Decorator for LlmAgent, normalizes PromptMessageExtended, allows easy extension of Agents
3
3
  """
4
4
 
5
+ import json
6
+ from collections import Counter, defaultdict
7
+ from dataclasses import dataclass
5
8
  from typing import (
6
9
  TYPE_CHECKING,
7
10
  Dict,
@@ -21,35 +24,64 @@ if TYPE_CHECKING:
21
24
  from a2a.types import AgentCard
22
25
  from mcp import Tool
23
26
  from mcp.types import (
27
+ CallToolResult,
28
+ ContentBlock,
29
+ EmbeddedResource,
24
30
  GetPromptResult,
31
+ ImageContent,
25
32
  Prompt,
26
33
  PromptMessage,
27
34
  ReadResourceResult,
35
+ ResourceLink,
36
+ TextContent,
37
+ TextResourceContents,
28
38
  )
29
39
  from opentelemetry import trace
30
40
  from pydantic import BaseModel
31
41
 
32
42
  from fast_agent.agents.agent_types import AgentConfig, AgentType
43
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL, FAST_AGENT_REMOVED_METADATA_CHANNEL
33
44
  from fast_agent.context import Context
34
- from fast_agent.core.logging.logger import get_logger
35
45
  from fast_agent.interfaces import (
36
46
  AgentProtocol,
37
47
  FastAgentLLMProtocol,
38
48
  LLMFactoryProtocol,
39
49
  )
50
+ from fast_agent.llm.model_database import ModelDatabase
40
51
  from fast_agent.llm.provider_types import Provider
41
52
  from fast_agent.llm.usage_tracking import UsageAccumulator
42
- from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list
53
+ from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list, text_content
54
+ from fast_agent.mcp.mime_utils import is_text_mime_type
43
55
  from fast_agent.types import PromptMessageExtended, RequestParams
44
56
 
45
- logger = get_logger(__name__)
46
57
  # Define a TypeVar for models
47
58
  ModelT = TypeVar("ModelT", bound=BaseModel)
48
59
 
49
- # Define a TypeVar for AugmentedLLM and its subclasses
50
60
  LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
51
61
 
52
62
 
63
+ @dataclass
64
+ class _RemovedBlock:
65
+ """Internal representation of a removed content block."""
66
+
67
+ category: str
68
+ mime_type: str | None
69
+ source: str
70
+ tool_id: str | None
71
+ block: ContentBlock
72
+
73
+
74
+ @dataclass(frozen=True)
75
+ class RemovedContentSummary:
76
+ """Summary information about removed content for the last turn."""
77
+
78
+ model_name: str | None
79
+ counts: Dict[str, int]
80
+ category_mimes: Dict[str, Tuple[str, ...]]
81
+ alert_flags: frozenset[str]
82
+ message: str
83
+
84
+
53
85
  class LlmDecorator(AgentProtocol):
54
86
  """
55
87
  A pure delegation wrapper around LlmAgent instances.
@@ -236,8 +268,8 @@ class LlmDecorator(AgentProtocol):
236
268
  Returns:
237
269
  The LLM's response as a PromptMessageExtended
238
270
  """
239
- assert self._llm, "LLM is not attached"
240
- return await self._llm.generate(messages, request_params, tools)
271
+ response, _ = await self._generate_with_summary(messages, request_params, tools)
272
+ return response
241
273
 
242
274
  async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
243
275
  """
@@ -338,8 +370,320 @@ class LlmDecorator(AgentProtocol):
338
370
  Returns:
339
371
  A tuple of (parsed model instance or None, assistant response message)
340
372
  """
373
+ result, _ = await self._structured_with_summary(messages, model, request_params)
374
+ return result
375
+
376
+ async def _generate_with_summary(
377
+ self,
378
+ messages: List[PromptMessageExtended],
379
+ request_params: RequestParams | None = None,
380
+ tools: List[Tool] | None = None,
381
+ ) -> Tuple[PromptMessageExtended, RemovedContentSummary | None]:
341
382
  assert self._llm, "LLM is not attached"
342
- return await self._llm.structured(messages, model, request_params)
383
+ sanitized_messages, summary = self._sanitize_messages_for_llm(messages)
384
+ response = await self._llm.generate(sanitized_messages, request_params, tools)
385
+ return response, summary
386
+
387
+ async def _structured_with_summary(
388
+ self,
389
+ messages: List[PromptMessageExtended],
390
+ model: Type[ModelT],
391
+ request_params: RequestParams | None = None,
392
+ ) -> Tuple[Tuple[ModelT | None, PromptMessageExtended], RemovedContentSummary | None]:
393
+ assert self._llm, "LLM is not attached"
394
+ sanitized_messages, summary = self._sanitize_messages_for_llm(messages)
395
+ structured_result = await self._llm.structured(sanitized_messages, model, request_params)
396
+ return structured_result, summary
397
+
398
+ def _sanitize_messages_for_llm(
399
+ self, messages: List[PromptMessageExtended]
400
+ ) -> Tuple[List[PromptMessageExtended], RemovedContentSummary | None]:
401
+ """Filter out content blocks that the current model cannot tokenize."""
402
+ if not messages:
403
+ return [], None
404
+
405
+ removed_blocks: List[_RemovedBlock] = []
406
+ sanitized_messages: List[PromptMessageExtended] = []
407
+
408
+ for message in messages:
409
+ sanitized, removed = self._sanitize_message_for_llm(message)
410
+ sanitized_messages.append(sanitized)
411
+ removed_blocks.extend(removed)
412
+
413
+ summary = self._build_removed_summary(removed_blocks)
414
+ if summary:
415
+ # Attach metadata to the last user message for downstream UI usage
416
+ for msg in reversed(sanitized_messages):
417
+ if msg.role == "user":
418
+ channels = dict(msg.channels or {})
419
+ meta_entries = list(channels.get(FAST_AGENT_REMOVED_METADATA_CHANNEL, []))
420
+ meta_entries.extend(self._build_metadata_entries(removed_blocks))
421
+ channels[FAST_AGENT_REMOVED_METADATA_CHANNEL] = meta_entries
422
+ msg.channels = channels
423
+ break
424
+
425
+ return sanitized_messages, summary
426
+
427
+ def _sanitize_message_for_llm(
428
+ self, message: PromptMessageExtended
429
+ ) -> Tuple[PromptMessageExtended, List[_RemovedBlock]]:
430
+ """Return a sanitized copy of a message and any removed content blocks."""
431
+ msg_copy = message.model_copy(deep=True)
432
+ removed: List[_RemovedBlock] = []
433
+
434
+ msg_copy.content = self._filter_block_list(
435
+ list(msg_copy.content or []), removed, source="message"
436
+ )
437
+
438
+ if msg_copy.tool_results:
439
+ new_tool_results: Dict[str, CallToolResult] = {}
440
+ for tool_id, tool_result in msg_copy.tool_results.items():
441
+ original_blocks = list(tool_result.content or [])
442
+ filtered_blocks = self._filter_block_list(
443
+ original_blocks,
444
+ removed,
445
+ source="tool_result",
446
+ tool_id=tool_id,
447
+ )
448
+
449
+ if filtered_blocks != original_blocks:
450
+ try:
451
+ updated_result = tool_result.model_copy(update={"content": filtered_blocks})
452
+ except AttributeError:
453
+ updated_result = CallToolResult(
454
+ content=filtered_blocks, isError=getattr(tool_result, "isError", False)
455
+ )
456
+ else:
457
+ updated_result = tool_result
458
+
459
+ new_tool_results[tool_id] = updated_result
460
+
461
+ msg_copy.tool_results = new_tool_results
462
+
463
+ if removed:
464
+ channels = dict(msg_copy.channels or {})
465
+ error_entries = list(channels.get(FAST_AGENT_ERROR_CHANNEL, []))
466
+ error_entries.extend(self._build_error_channel_entries(removed))
467
+ channels[FAST_AGENT_ERROR_CHANNEL] = error_entries
468
+ msg_copy.channels = channels
469
+
470
+ return msg_copy, removed
471
+
472
+ def _filter_block_list(
473
+ self,
474
+ blocks: Sequence[ContentBlock],
475
+ removed: List[_RemovedBlock],
476
+ *,
477
+ source: str,
478
+ tool_id: str | None = None,
479
+ ) -> List[ContentBlock]:
480
+ kept: List[ContentBlock] = []
481
+ for block in blocks or []:
482
+ mime_type, category = self._extract_block_metadata(block)
483
+ if self._block_supported(mime_type, category):
484
+ kept.append(block)
485
+ else:
486
+ removed.append(
487
+ _RemovedBlock(
488
+ category=category,
489
+ mime_type=mime_type,
490
+ source=source,
491
+ tool_id=tool_id,
492
+ block=block,
493
+ )
494
+ )
495
+ return kept
496
+
497
+ def _block_supported(self, mime_type: str | None, category: str) -> bool:
498
+ """Determine if the current model can process a content block."""
499
+ if category == "text":
500
+ return True
501
+
502
+ model_name = self._llm.model_name if self._llm else None
503
+ if not model_name:
504
+ return False
505
+
506
+ if mime_type:
507
+ return ModelDatabase.supports_mime(model_name, mime_type)
508
+
509
+ if category == "vision":
510
+ return ModelDatabase.supports_any_mime(
511
+ model_name, ["image/jpeg", "image/png", "image/webp"]
512
+ )
513
+
514
+ if category == "document":
515
+ return ModelDatabase.supports_mime(model_name, "application/pdf")
516
+
517
+ return False
518
+
519
+ def _extract_block_metadata(self, block: ContentBlock) -> Tuple[str | None, str]:
520
+ """Infer the MIME type and high-level category for a content block."""
521
+ if isinstance(block, TextContent):
522
+ return "text/plain", "text"
523
+
524
+ if isinstance(block, TextResourceContents):
525
+ mime = getattr(block, "mimeType", None) or "text/plain"
526
+ return mime, "text"
527
+
528
+ if isinstance(block, ImageContent):
529
+ mime = getattr(block, "mimeType", None) or "image/*"
530
+ return mime, "vision"
531
+
532
+ if isinstance(block, EmbeddedResource):
533
+ resource = getattr(block, "resource", None)
534
+ mime = getattr(resource, "mimeType", None)
535
+ if isinstance(resource, TextResourceContents) or (mime and is_text_mime_type(mime)):
536
+ return mime or "text/plain", "text"
537
+ if mime and mime.startswith("image/"):
538
+ return mime, "vision"
539
+ return mime, "document"
540
+
541
+ if isinstance(block, ResourceLink):
542
+ mime = getattr(block, "mimeType", None)
543
+ if mime and mime.startswith("image/"):
544
+ return mime, "vision"
545
+ if mime and is_text_mime_type(mime):
546
+ return mime, "text"
547
+ return mime, "document"
548
+
549
+ return None, "document"
550
+
551
+ def _build_error_channel_entries(self, removed: List[_RemovedBlock]) -> List[ContentBlock]:
552
+ """Create informative entries for the error channel."""
553
+ entries: List[ContentBlock] = []
554
+ model_name = self._llm.model_name if self._llm else None
555
+ model_display = model_name or "current model"
556
+
557
+ for item in removed:
558
+ mime_display = item.mime_type or "unknown"
559
+ category_label = self._category_label(item.category)
560
+ if item.source == "message":
561
+ source_label = "user content"
562
+ elif item.tool_id:
563
+ source_label = f"tool result '{item.tool_id}'"
564
+ else:
565
+ source_label = "tool result"
566
+
567
+ message = (
568
+ f"Removed unsupported {category_label} {source_label} ({mime_display}) "
569
+ f"before sending to {model_display}."
570
+ )
571
+ entries.append(text_content(message))
572
+ entries.append(item.block)
573
+
574
+ return entries
575
+
576
+ def _build_metadata_entries(self, removed: List[_RemovedBlock]) -> List[ContentBlock]:
577
+ entries: List[ContentBlock] = []
578
+ for item in removed:
579
+ metadata_text = text_content(
580
+ json.dumps(
581
+ {
582
+ "type": "fast-agent-removed",
583
+ "category": item.category,
584
+ "mime_type": item.mime_type,
585
+ "source": item.source,
586
+ "tool_id": item.tool_id,
587
+ }
588
+ )
589
+ )
590
+ entries.append(metadata_text)
591
+ return entries
592
+
593
+ def _build_removed_summary(self, removed: List[_RemovedBlock]) -> RemovedContentSummary | None:
594
+ if not removed:
595
+ return None
596
+
597
+ counts = Counter(item.category for item in removed)
598
+ category_mimes: Dict[str, Tuple[str, ...]] = {}
599
+ mime_accumulator: Dict[str, set[str]] = defaultdict(set)
600
+
601
+ for item in removed:
602
+ mime_accumulator[item.category].add(item.mime_type or "unknown")
603
+
604
+ for category, mimes in mime_accumulator.items():
605
+ category_mimes[category] = tuple(sorted(mimes))
606
+
607
+ alert_flags = frozenset(
608
+ flag
609
+ for category in counts
610
+ for flag in (self._category_to_flag(category),)
611
+ if flag is not None
612
+ )
613
+
614
+ model_name = self._llm.model_name if self._llm else None
615
+ model_display = model_name or "current model"
616
+
617
+ category_order = ["vision", "document", "other", "text"]
618
+ segments: List[str] = []
619
+ for category in category_order:
620
+ if category not in counts:
621
+ continue
622
+ count = counts[category]
623
+ mime_list = ", ".join(category_mimes.get(category, ()))
624
+ label = self._category_label(category)
625
+ plural = "s" if count != 1 else ""
626
+ if mime_list:
627
+ segments.append(f"{count} {label} block{plural} ({mime_list})")
628
+ else:
629
+ segments.append(f"{count} {label} block{plural}")
630
+
631
+ # Append any remaining categories not covered in the preferred order
632
+ for category, count in counts.items():
633
+ if category in category_order:
634
+ continue
635
+ mime_list = ", ".join(category_mimes.get(category, ()))
636
+ label = self._category_label(category)
637
+ plural = "s" if count != 1 else ""
638
+ if mime_list:
639
+ segments.append(f"{count} {label} block{plural} ({mime_list})")
640
+ else:
641
+ segments.append(f"{count} {label} block{plural}")
642
+
643
+ detail = "; ".join(segments) if segments else "unknown content"
644
+
645
+ capability_labels = []
646
+ for flag in alert_flags:
647
+ match flag:
648
+ case "V":
649
+ capability_labels.append("vision")
650
+ case "D":
651
+ capability_labels.append("document")
652
+ case "T":
653
+ capability_labels.append("text")
654
+
655
+ capability_note = ""
656
+ if capability_labels:
657
+ unique_caps = ", ".join(sorted(set(capability_labels)))
658
+ capability_note = f" Missing capability: {unique_caps}."
659
+
660
+ message = (
661
+ f"Removed unsupported content before sending to {model_display}: {detail}."
662
+ f"{capability_note} Stored original content in '{FAST_AGENT_ERROR_CHANNEL}'."
663
+ )
664
+
665
+ return RemovedContentSummary(
666
+ model_name=model_name,
667
+ counts=dict(counts),
668
+ category_mimes=category_mimes,
669
+ alert_flags=alert_flags,
670
+ message=message,
671
+ )
672
+
673
+ @staticmethod
674
+ def _category_to_flag(category: str) -> str | None:
675
+ mapping = {"text": "T", "document": "D", "vision": "V"}
676
+ return mapping.get(category)
677
+
678
+ @staticmethod
679
+ def _category_label(category: str) -> str:
680
+ if category == "vision":
681
+ return "vision"
682
+ if category == "document":
683
+ return "document"
684
+ if category == "text":
685
+ return "text"
686
+ return "content"
343
687
 
344
688
  @property
345
689
  def message_history(self) -> List[PromptMessageExtended]: