fast-agent-mcp 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

fast_agent/__init__.py CHANGED
@@ -27,8 +27,11 @@ from fast_agent.config import (
27
27
  XAISettings,
28
28
  )
29
29
 
30
+ # Prompt helpers (safe - no heavy dependencies)
31
+ from fast_agent.mcp.prompt import Prompt
32
+
30
33
  # Type definitions and enums (safe - no dependencies)
31
- from fast_agent.types import LlmStopReason, RequestParams
34
+ from fast_agent.types import LlmStopReason, PromptMessageExtended, RequestParams
32
35
 
33
36
 
34
37
  def __getattr__(name: str):
@@ -91,6 +94,8 @@ def __getattr__(name: str):
91
94
  if TYPE_CHECKING: # pragma: no cover - typing aid only
92
95
  # Provide a concrete import path for type checkers/IDEs
93
96
  from fast_agent.core.fastagent import FastAgent as FastAgent # noqa: F401
97
+ from fast_agent.mcp.prompt import Prompt as Prompt # noqa: F401
98
+ from fast_agent.types import PromptMessageExtended as PromptMessageExtended # noqa: F401
94
99
 
95
100
 
96
101
  __all__ = [
@@ -127,6 +132,9 @@ __all__ = [
127
132
  # Type definitions and enums (eagerly loaded)
128
133
  "LlmStopReason",
129
134
  "RequestParams",
135
+ "PromptMessageExtended",
136
+ # Prompt helpers (eagerly loaded)
137
+ "Prompt",
130
138
  # Agents (lazy loaded)
131
139
  "LlmAgent",
132
140
  "LlmDecorator",
@@ -3,7 +3,7 @@ Type definitions for agents and agent configurations.
3
3
  """
4
4
 
5
5
  from dataclasses import dataclass, field
6
- from enum import Enum
6
+ from enum import StrEnum, auto
7
7
  from typing import Dict, List, Optional
8
8
 
9
9
  from mcp.client.session import ElicitationFnT
@@ -12,18 +12,18 @@ from mcp.client.session import ElicitationFnT
12
12
  from fast_agent.types import RequestParams
13
13
 
14
14
 
15
- class AgentType(Enum):
15
+ class AgentType(StrEnum):
16
16
  """Enumeration of supported agent types."""
17
17
 
18
- LLM = "llm" # simple llm delegator
19
- BASIC = "agent"
20
- CUSTOM = "custom"
21
- ORCHESTRATOR = "orchestrator"
22
- PARALLEL = "parallel"
23
- EVALUATOR_OPTIMIZER = "evaluator_optimizer"
24
- ROUTER = "router"
25
- CHAIN = "chain"
26
- ITERATIVE_PLANNER = "iterative_planner"
18
+ LLM = auto()
19
+ BASIC = auto()
20
+ CUSTOM = auto()
21
+ ORCHESTRATOR = auto()
22
+ PARALLEL = auto()
23
+ EVALUATOR_OPTIMIZER = auto()
24
+ ROUTER = auto()
25
+ CHAIN = auto()
26
+ ITERATIVE_PLANNER = auto()
27
27
 
28
28
 
29
29
  @dataclass
@@ -81,56 +81,69 @@ class LlmAgent(LlmDecorator):
81
81
  """
82
82
 
83
83
  # Determine display content based on stop reason if not provided
84
- if additional_message is None:
85
- # Generate additional message based on stop reason
86
- match message.stop_reason:
87
- case LlmStopReason.END_TURN:
88
- # No additional message needed for normal end turn
89
- additional_message_text = None
90
-
91
- case LlmStopReason.MAX_TOKENS:
92
- additional_message_text = Text(
84
+ additional_segments: List[Text] = []
85
+
86
+ # Generate additional message based on stop reason
87
+ match message.stop_reason:
88
+ case LlmStopReason.END_TURN:
89
+ pass
90
+
91
+ case LlmStopReason.MAX_TOKENS:
92
+ additional_segments.append(
93
+ Text(
93
94
  "\n\nMaximum output tokens reached - generation stopped.",
94
95
  style="dim red italic",
95
96
  )
97
+ )
96
98
 
97
- case LlmStopReason.SAFETY:
98
- additional_message_text = Text(
99
- "\n\nContent filter activated - generation stopped.", style="dim red italic"
99
+ case LlmStopReason.SAFETY:
100
+ additional_segments.append(
101
+ Text(
102
+ "\n\nContent filter activated - generation stopped.",
103
+ style="dim red italic",
100
104
  )
105
+ )
101
106
 
102
- case LlmStopReason.PAUSE:
103
- additional_message_text = Text(
104
- "\n\nLLM has requested a pause.", style="dim green italic"
105
- )
107
+ case LlmStopReason.PAUSE:
108
+ additional_segments.append(
109
+ Text("\n\nLLM has requested a pause.", style="dim green italic")
110
+ )
106
111
 
107
- case LlmStopReason.STOP_SEQUENCE:
108
- additional_message_text = Text(
109
- "\n\nStop Sequence activated - generation stopped.", style="dim red italic"
112
+ case LlmStopReason.STOP_SEQUENCE:
113
+ additional_segments.append(
114
+ Text(
115
+ "\n\nStop Sequence activated - generation stopped.",
116
+ style="dim red italic",
110
117
  )
118
+ )
111
119
 
112
- case LlmStopReason.TOOL_USE:
113
- if None is message.last_text():
114
- additional_message_text = Text(
115
- "The assistant requested tool calls", style="dim green italic"
116
- )
117
- else:
118
- additional_message_text = None
120
+ case LlmStopReason.TOOL_USE:
121
+ if None is message.last_text():
122
+ additional_segments.append(
123
+ Text("The assistant requested tool calls", style="dim green italic")
124
+ )
119
125
 
120
- case _:
121
- if message.stop_reason:
122
- additional_message_text = Text(
126
+ case _:
127
+ if message.stop_reason:
128
+ additional_segments.append(
129
+ Text(
123
130
  f"\n\nGeneration stopped for an unhandled reason ({message.stop_reason})",
124
131
  style="dim red italic",
125
132
  )
126
- else:
127
- additional_message_text = None
128
- else:
129
- # Use provided additional message
130
- additional_message_text = (
131
- additional_message if isinstance(additional_message, Text) else None
133
+ )
134
+
135
+ if additional_message is not None:
136
+ additional_segments.append(
137
+ additional_message if isinstance(additional_message, Text) else Text(str(additional_message))
132
138
  )
133
139
 
140
+ additional_message_text = None
141
+ if additional_segments:
142
+ combined = Text()
143
+ for segment in additional_segments:
144
+ combined += segment
145
+ additional_message_text = combined
146
+
134
147
  message_text = message.last_text() or ""
135
148
 
136
149
  # Use provided name/model or fall back to defaults
@@ -182,9 +195,13 @@ class LlmAgent(LlmDecorator):
182
195
 
183
196
  # TODO -- we should merge the request parameters here with the LLM defaults?
184
197
  # TODO - manage error catch, recovery, pause
185
- result = await super().generate_impl(messages, request_params, tools)
198
+ result, summary = await self._generate_with_summary(messages, request_params, tools)
186
199
 
187
- await self.show_assistant_message(result)
200
+ summary_text = (
201
+ Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
202
+ )
203
+
204
+ await self.show_assistant_message(result, additional_message=summary_text)
188
205
  return result
189
206
 
190
207
  async def structured_impl(
@@ -196,8 +213,13 @@ class LlmAgent(LlmDecorator):
196
213
  if "user" == messages[-1].role:
197
214
  self.show_user_message(message=messages[-1])
198
215
 
199
- result, message = await super().structured_impl(messages, model, request_params)
200
- await self.show_assistant_message(message=message)
216
+ (result, message), summary = await self._structured_with_summary(
217
+ messages, model, request_params
218
+ )
219
+ summary_text = (
220
+ Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
221
+ )
222
+ await self.show_assistant_message(message=message, additional_message=summary_text)
201
223
  return result, message
202
224
 
203
225
  # async def show_prompt_loaded(
@@ -2,6 +2,9 @@
2
2
  Decorator for LlmAgent, normalizes PromptMessageExtended, allows easy extension of Agents
3
3
  """
4
4
 
5
+ import json
6
+ from collections import Counter, defaultdict
7
+ from dataclasses import dataclass
5
8
  from typing import (
6
9
  TYPE_CHECKING,
7
10
  Dict,
@@ -21,28 +24,36 @@ if TYPE_CHECKING:
21
24
  from a2a.types import AgentCard
22
25
  from mcp import Tool
23
26
  from mcp.types import (
27
+ CallToolResult,
28
+ ContentBlock,
29
+ EmbeddedResource,
24
30
  GetPromptResult,
31
+ ImageContent,
25
32
  Prompt,
26
33
  PromptMessage,
27
34
  ReadResourceResult,
35
+ ResourceLink,
36
+ TextContent,
37
+ TextResourceContents,
28
38
  )
29
39
  from opentelemetry import trace
30
40
  from pydantic import BaseModel
31
41
 
32
42
  from fast_agent.agents.agent_types import AgentConfig, AgentType
43
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL, FAST_AGENT_REMOVED_METADATA_CHANNEL
33
44
  from fast_agent.context import Context
34
- from fast_agent.core.logging.logger import get_logger
35
45
  from fast_agent.interfaces import (
36
46
  AgentProtocol,
37
47
  FastAgentLLMProtocol,
38
48
  LLMFactoryProtocol,
39
49
  )
50
+ from fast_agent.llm.model_database import ModelDatabase
40
51
  from fast_agent.llm.provider_types import Provider
41
52
  from fast_agent.llm.usage_tracking import UsageAccumulator
42
- from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list
53
+ from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list, text_content
54
+ from fast_agent.mcp.mime_utils import is_text_mime_type
43
55
  from fast_agent.types import PromptMessageExtended, RequestParams
44
56
 
45
- logger = get_logger(__name__)
46
57
  # Define a TypeVar for models
47
58
  ModelT = TypeVar("ModelT", bound=BaseModel)
48
59
 
@@ -50,6 +61,28 @@ ModelT = TypeVar("ModelT", bound=BaseModel)
50
61
  LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
51
62
 
52
63
 
64
+ @dataclass
65
+ class _RemovedBlock:
66
+ """Internal representation of a removed content block."""
67
+
68
+ category: str
69
+ mime_type: str | None
70
+ source: str
71
+ tool_id: str | None
72
+ block: ContentBlock
73
+
74
+
75
+ @dataclass(frozen=True)
76
+ class RemovedContentSummary:
77
+ """Summary information about removed content for the last turn."""
78
+
79
+ model_name: str | None
80
+ counts: Dict[str, int]
81
+ category_mimes: Dict[str, Tuple[str, ...]]
82
+ alert_flags: frozenset[str]
83
+ message: str
84
+
85
+
53
86
  class LlmDecorator(AgentProtocol):
54
87
  """
55
88
  A pure delegation wrapper around LlmAgent instances.
@@ -236,8 +269,8 @@ class LlmDecorator(AgentProtocol):
236
269
  Returns:
237
270
  The LLM's response as a PromptMessageExtended
238
271
  """
239
- assert self._llm, "LLM is not attached"
240
- return await self._llm.generate(messages, request_params, tools)
272
+ response, _ = await self._generate_with_summary(messages, request_params, tools)
273
+ return response
241
274
 
242
275
  async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
243
276
  """
@@ -338,8 +371,324 @@ class LlmDecorator(AgentProtocol):
338
371
  Returns:
339
372
  A tuple of (parsed model instance or None, assistant response message)
340
373
  """
374
+ result, _ = await self._structured_with_summary(messages, model, request_params)
375
+ return result
376
+
377
+ async def _generate_with_summary(
378
+ self,
379
+ messages: List[PromptMessageExtended],
380
+ request_params: RequestParams | None = None,
381
+ tools: List[Tool] | None = None,
382
+ ) -> Tuple[PromptMessageExtended, RemovedContentSummary | None]:
341
383
  assert self._llm, "LLM is not attached"
342
- return await self._llm.structured(messages, model, request_params)
384
+ sanitized_messages, summary = self._sanitize_messages_for_llm(messages)
385
+ response = await self._llm.generate(sanitized_messages, request_params, tools)
386
+ return response, summary
387
+
388
+ async def _structured_with_summary(
389
+ self,
390
+ messages: List[PromptMessageExtended],
391
+ model: Type[ModelT],
392
+ request_params: RequestParams | None = None,
393
+ ) -> Tuple[Tuple[ModelT | None, PromptMessageExtended], RemovedContentSummary | None]:
394
+ assert self._llm, "LLM is not attached"
395
+ sanitized_messages, summary = self._sanitize_messages_for_llm(messages)
396
+ structured_result = await self._llm.structured(sanitized_messages, model, request_params)
397
+ return structured_result, summary
398
+
399
+ def _sanitize_messages_for_llm(
400
+ self, messages: List[PromptMessageExtended]
401
+ ) -> Tuple[List[PromptMessageExtended], RemovedContentSummary | None]:
402
+ """Filter out content blocks that the current model cannot tokenize."""
403
+ if not messages:
404
+ return [], None
405
+
406
+ removed_blocks: List[_RemovedBlock] = []
407
+ sanitized_messages: List[PromptMessageExtended] = []
408
+
409
+ for message in messages:
410
+ sanitized, removed = self._sanitize_message_for_llm(message)
411
+ sanitized_messages.append(sanitized)
412
+ removed_blocks.extend(removed)
413
+
414
+ summary = self._build_removed_summary(removed_blocks)
415
+ if summary:
416
+ # Attach metadata to the last user message for downstream UI usage
417
+ for msg in reversed(sanitized_messages):
418
+ if msg.role == "user":
419
+ channels = dict(msg.channels or {})
420
+ meta_entries = list(channels.get(FAST_AGENT_REMOVED_METADATA_CHANNEL, []))
421
+ meta_entries.extend(self._build_metadata_entries(removed_blocks))
422
+ channels[FAST_AGENT_REMOVED_METADATA_CHANNEL] = meta_entries
423
+ msg.channels = channels
424
+ break
425
+
426
+ return sanitized_messages, summary
427
+
428
+ def _sanitize_message_for_llm(
429
+ self, message: PromptMessageExtended
430
+ ) -> Tuple[PromptMessageExtended, List[_RemovedBlock]]:
431
+ """Return a sanitized copy of a message and any removed content blocks."""
432
+ msg_copy = message.model_copy(deep=True)
433
+ removed: List[_RemovedBlock] = []
434
+
435
+ msg_copy.content = self._filter_block_list(
436
+ list(msg_copy.content or []), removed, source="message"
437
+ )
438
+
439
+ if msg_copy.tool_results:
440
+ new_tool_results: Dict[str, CallToolResult] = {}
441
+ for tool_id, tool_result in msg_copy.tool_results.items():
442
+ original_blocks = list(tool_result.content or [])
443
+ filtered_blocks = self._filter_block_list(
444
+ original_blocks,
445
+ removed,
446
+ source="tool_result",
447
+ tool_id=tool_id,
448
+ )
449
+
450
+ if filtered_blocks != original_blocks:
451
+ try:
452
+ updated_result = tool_result.model_copy(update={"content": filtered_blocks})
453
+ except AttributeError:
454
+ updated_result = CallToolResult(
455
+ content=filtered_blocks, isError=getattr(tool_result, "isError", False)
456
+ )
457
+ else:
458
+ updated_result = tool_result
459
+
460
+ new_tool_results[tool_id] = updated_result
461
+
462
+ msg_copy.tool_results = new_tool_results
463
+
464
+ if removed:
465
+ channels = dict(msg_copy.channels or {})
466
+ error_entries = list(channels.get(FAST_AGENT_ERROR_CHANNEL, []))
467
+ error_entries.extend(self._build_error_channel_entries(removed))
468
+ channels[FAST_AGENT_ERROR_CHANNEL] = error_entries
469
+ msg_copy.channels = channels
470
+
471
+ return msg_copy, removed
472
+
473
+ def _filter_block_list(
474
+ self,
475
+ blocks: Sequence[ContentBlock],
476
+ removed: List[_RemovedBlock],
477
+ *,
478
+ source: str,
479
+ tool_id: str | None = None,
480
+ ) -> List[ContentBlock]:
481
+ kept: List[ContentBlock] = []
482
+ for block in blocks or []:
483
+ mime_type, category = self._extract_block_metadata(block)
484
+ if self._block_supported(mime_type, category):
485
+ kept.append(block)
486
+ else:
487
+ removed.append(
488
+ _RemovedBlock(
489
+ category=category,
490
+ mime_type=mime_type,
491
+ source=source,
492
+ tool_id=tool_id,
493
+ block=block,
494
+ )
495
+ )
496
+ return kept
497
+
498
+ def _block_supported(self, mime_type: str | None, category: str) -> bool:
499
+ """Determine if the current model can process a content block."""
500
+ if category == "text":
501
+ return True
502
+
503
+ model_name = self._llm.model_name if self._llm else None
504
+ if not model_name:
505
+ return False
506
+
507
+ if mime_type:
508
+ return ModelDatabase.supports_mime(model_name, mime_type)
509
+
510
+ if category == "vision":
511
+ return ModelDatabase.supports_any_mime(
512
+ model_name, ["image/jpeg", "image/png", "image/webp"]
513
+ )
514
+
515
+ if category == "document":
516
+ return ModelDatabase.supports_mime(model_name, "application/pdf")
517
+
518
+ return False
519
+
520
+ def _extract_block_metadata(self, block: ContentBlock) -> Tuple[str | None, str]:
521
+ """Infer the MIME type and high-level category for a content block."""
522
+ if isinstance(block, TextContent):
523
+ return "text/plain", "text"
524
+
525
+ if isinstance(block, TextResourceContents):
526
+ mime = getattr(block, "mimeType", None) or "text/plain"
527
+ return mime, "text"
528
+
529
+ if isinstance(block, ImageContent):
530
+ mime = getattr(block, "mimeType", None) or "image/*"
531
+ return mime, "vision"
532
+
533
+ if isinstance(block, EmbeddedResource):
534
+ resource = getattr(block, "resource", None)
535
+ mime = getattr(resource, "mimeType", None)
536
+ if isinstance(resource, TextResourceContents) or (
537
+ mime and is_text_mime_type(mime)
538
+ ):
539
+ return mime or "text/plain", "text"
540
+ if mime and mime.startswith("image/"):
541
+ return mime, "vision"
542
+ return mime, "document"
543
+
544
+ if isinstance(block, ResourceLink):
545
+ mime = getattr(block, "mimeType", None)
546
+ if mime and mime.startswith("image/"):
547
+ return mime, "vision"
548
+ if mime and is_text_mime_type(mime):
549
+ return mime, "text"
550
+ return mime, "document"
551
+
552
+ return None, "document"
553
+
554
+ def _build_error_channel_entries(self, removed: List[_RemovedBlock]) -> List[ContentBlock]:
555
+ """Create informative entries for the error channel."""
556
+ entries: List[ContentBlock] = []
557
+ model_name = self._llm.model_name if self._llm else None
558
+ model_display = model_name or "current model"
559
+
560
+ for item in removed:
561
+ mime_display = item.mime_type or "unknown"
562
+ category_label = self._category_label(item.category)
563
+ if item.source == "message":
564
+ source_label = "user content"
565
+ elif item.tool_id:
566
+ source_label = f"tool result '{item.tool_id}'"
567
+ else:
568
+ source_label = "tool result"
569
+
570
+ message = (
571
+ f"Removed unsupported {category_label} {source_label} ({mime_display}) "
572
+ f"before sending to {model_display}."
573
+ )
574
+ entries.append(text_content(message))
575
+ entries.append(item.block)
576
+
577
+ return entries
578
+
579
+ def _build_metadata_entries(self, removed: List[_RemovedBlock]) -> List[ContentBlock]:
580
+ entries: List[ContentBlock] = []
581
+ for item in removed:
582
+ metadata_text = text_content(
583
+ json.dumps(
584
+ {
585
+ "type": "fast-agent-removed",
586
+ "category": item.category,
587
+ "mime_type": item.mime_type,
588
+ "source": item.source,
589
+ "tool_id": item.tool_id,
590
+ }
591
+ )
592
+ )
593
+ entries.append(metadata_text)
594
+ return entries
595
+
596
+ def _build_removed_summary(
597
+ self, removed: List[_RemovedBlock]
598
+ ) -> RemovedContentSummary | None:
599
+ if not removed:
600
+ return None
601
+
602
+ counts = Counter(item.category for item in removed)
603
+ category_mimes: Dict[str, Tuple[str, ...]] = {}
604
+ mime_accumulator: Dict[str, set[str]] = defaultdict(set)
605
+
606
+ for item in removed:
607
+ mime_accumulator[item.category].add(item.mime_type or "unknown")
608
+
609
+ for category, mimes in mime_accumulator.items():
610
+ category_mimes[category] = tuple(sorted(mimes))
611
+
612
+ alert_flags = frozenset(
613
+ flag
614
+ for category in counts
615
+ for flag in (self._category_to_flag(category),)
616
+ if flag is not None
617
+ )
618
+
619
+ model_name = self._llm.model_name if self._llm else None
620
+ model_display = model_name or "current model"
621
+
622
+ category_order = ["vision", "document", "other", "text"]
623
+ segments: List[str] = []
624
+ for category in category_order:
625
+ if category not in counts:
626
+ continue
627
+ count = counts[category]
628
+ mime_list = ", ".join(category_mimes.get(category, ()))
629
+ label = self._category_label(category)
630
+ plural = "s" if count != 1 else ""
631
+ if mime_list:
632
+ segments.append(f"{count} {label} block{plural} ({mime_list})")
633
+ else:
634
+ segments.append(f"{count} {label} block{plural}")
635
+
636
+ # Append any remaining categories not covered in the preferred order
637
+ for category, count in counts.items():
638
+ if category in category_order:
639
+ continue
640
+ mime_list = ", ".join(category_mimes.get(category, ()))
641
+ label = self._category_label(category)
642
+ plural = "s" if count != 1 else ""
643
+ if mime_list:
644
+ segments.append(f"{count} {label} block{plural} ({mime_list})")
645
+ else:
646
+ segments.append(f"{count} {label} block{plural}")
647
+
648
+ detail = "; ".join(segments) if segments else "unknown content"
649
+
650
+ capability_labels = []
651
+ for flag in alert_flags:
652
+ match flag:
653
+ case "V":
654
+ capability_labels.append("vision")
655
+ case "D":
656
+ capability_labels.append("document")
657
+ case "T":
658
+ capability_labels.append("text")
659
+
660
+ capability_note = ""
661
+ if capability_labels:
662
+ unique_caps = ", ".join(sorted(set(capability_labels)))
663
+ capability_note = f" Missing capability: {unique_caps}."
664
+
665
+ message = (
666
+ f"Removed unsupported content before sending to {model_display}: {detail}."
667
+ f"{capability_note} Stored original content in '{FAST_AGENT_ERROR_CHANNEL}'."
668
+ )
669
+
670
+ return RemovedContentSummary(
671
+ model_name=model_name,
672
+ counts=dict(counts),
673
+ category_mimes=category_mimes,
674
+ alert_flags=alert_flags,
675
+ message=message,
676
+ )
677
+
678
+ @staticmethod
679
+ def _category_to_flag(category: str) -> str | None:
680
+ mapping = {"text": "T", "document": "D", "vision": "V"}
681
+ return mapping.get(category)
682
+
683
+ @staticmethod
684
+ def _category_label(category: str) -> str:
685
+ if category == "vision":
686
+ return "vision"
687
+ if category == "document":
688
+ return "document"
689
+ if category == "text":
690
+ return "text"
691
+ return "content"
343
692
 
344
693
  @property
345
694
  def message_history(self) -> List[PromptMessageExtended]: