fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +61 -415
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +15 -19
  14. mcp_agent/cli/commands/bootstrap.py +19 -38
  15. mcp_agent/cli/commands/config.py +4 -4
  16. mcp_agent/cli/commands/setup.py +7 -14
  17. mcp_agent/cli/main.py +7 -10
  18. mcp_agent/cli/terminal.py +3 -3
  19. mcp_agent/config.py +25 -40
  20. mcp_agent/context.py +12 -21
  21. mcp_agent/context_dependent.py +3 -5
  22. mcp_agent/core/agent_types.py +10 -7
  23. mcp_agent/core/direct_agent_app.py +179 -0
  24. mcp_agent/core/direct_decorators.py +443 -0
  25. mcp_agent/core/direct_factory.py +476 -0
  26. mcp_agent/core/enhanced_prompt.py +23 -55
  27. mcp_agent/core/exceptions.py +8 -8
  28. mcp_agent/core/fastagent.py +145 -371
  29. mcp_agent/core/interactive_prompt.py +424 -0
  30. mcp_agent/core/mcp_content.py +17 -17
  31. mcp_agent/core/prompt.py +6 -9
  32. mcp_agent/core/request_params.py +6 -3
  33. mcp_agent/core/validation.py +92 -18
  34. mcp_agent/executor/decorator_registry.py +9 -17
  35. mcp_agent/executor/executor.py +8 -17
  36. mcp_agent/executor/task_registry.py +2 -4
  37. mcp_agent/executor/temporal.py +19 -41
  38. mcp_agent/executor/workflow.py +3 -5
  39. mcp_agent/executor/workflow_signal.py +15 -21
  40. mcp_agent/human_input/handler.py +4 -7
  41. mcp_agent/human_input/types.py +2 -3
  42. mcp_agent/llm/__init__.py +2 -0
  43. mcp_agent/llm/augmented_llm.py +450 -0
  44. mcp_agent/llm/augmented_llm_passthrough.py +162 -0
  45. mcp_agent/llm/augmented_llm_playback.py +83 -0
  46. mcp_agent/llm/memory.py +103 -0
  47. mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
  48. mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
  49. mcp_agent/llm/providers/__init__.py +8 -0
  50. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
  51. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
  52. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  53. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
  54. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
  55. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
  56. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
  57. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
  58. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
  59. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
  60. mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
  61. mcp_agent/llm/sampling_format_converter.py +37 -0
  62. mcp_agent/logging/events.py +1 -5
  63. mcp_agent/logging/json_serializer.py +7 -6
  64. mcp_agent/logging/listeners.py +20 -23
  65. mcp_agent/logging/logger.py +17 -19
  66. mcp_agent/logging/rich_progress.py +10 -8
  67. mcp_agent/logging/tracing.py +4 -6
  68. mcp_agent/logging/transport.py +22 -22
  69. mcp_agent/mcp/gen_client.py +1 -3
  70. mcp_agent/mcp/interfaces.py +117 -110
  71. mcp_agent/mcp/logger_textio.py +97 -0
  72. mcp_agent/mcp/mcp_agent_client_session.py +7 -7
  73. mcp_agent/mcp/mcp_agent_server.py +8 -8
  74. mcp_agent/mcp/mcp_aggregator.py +102 -143
  75. mcp_agent/mcp/mcp_connection_manager.py +20 -27
  76. mcp_agent/mcp/prompt_message_multipart.py +68 -16
  77. mcp_agent/mcp/prompt_render.py +77 -0
  78. mcp_agent/mcp/prompt_serialization.py +30 -48
  79. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  80. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  81. mcp_agent/mcp/prompts/prompt_load.py +109 -0
  82. mcp_agent/mcp/prompts/prompt_server.py +155 -195
  83. mcp_agent/mcp/prompts/prompt_template.py +35 -66
  84. mcp_agent/mcp/resource_utils.py +7 -14
  85. mcp_agent/mcp/sampling.py +17 -17
  86. mcp_agent/mcp_server/agent_server.py +13 -17
  87. mcp_agent/mcp_server_registry.py +13 -22
  88. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
  89. mcp_agent/resources/examples/in_dev/slides.py +110 -0
  90. mcp_agent/resources/examples/internal/agent.py +6 -3
  91. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  92. mcp_agent/resources/examples/internal/job.py +2 -1
  93. mcp_agent/resources/examples/internal/prompt_category.py +1 -1
  94. mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
  95. mcp_agent/resources/examples/internal/sizer.py +2 -1
  96. mcp_agent/resources/examples/internal/social.py +2 -1
  97. mcp_agent/resources/examples/prompting/agent.py +2 -1
  98. mcp_agent/resources/examples/prompting/image_server.py +4 -8
  99. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  100. mcp_agent/ui/console_display.py +16 -20
  101. fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
  102. mcp_agent/core/agent_app.py +0 -646
  103. mcp_agent/core/agent_utils.py +0 -71
  104. mcp_agent/core/decorators.py +0 -455
  105. mcp_agent/core/factory.py +0 -463
  106. mcp_agent/core/proxies.py +0 -269
  107. mcp_agent/core/types.py +0 -24
  108. mcp_agent/eval/__init__.py +0 -0
  109. mcp_agent/mcp/stdio.py +0 -111
  110. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  111. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  112. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  113. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  114. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  115. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  116. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  117. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
  118. mcp_agent/resources/examples/researcher/researcher.py +0 -38
  119. mcp_agent/resources/examples/workflows/chaining.py +0 -44
  120. mcp_agent/resources/examples/workflows/evaluator.py +0 -78
  121. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  122. mcp_agent/resources/examples/workflows/human_input.py +0 -25
  123. mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
  124. mcp_agent/resources/examples/workflows/parallel.py +0 -78
  125. mcp_agent/resources/examples/workflows/router.py +0 -53
  126. mcp_agent/resources/examples/workflows/sse.py +0 -23
  127. mcp_agent/telemetry/__init__.py +0 -0
  128. mcp_agent/telemetry/usage_tracking.py +0 -18
  129. mcp_agent/workflows/__init__.py +0 -0
  130. mcp_agent/workflows/embedding/__init__.py +0 -0
  131. mcp_agent/workflows/embedding/embedding_base.py +0 -61
  132. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  133. mcp_agent/workflows/embedding/embedding_openai.py +0 -46
  134. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  135. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
  136. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  137. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
  138. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
  139. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
  140. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
  141. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
  142. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  143. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
  144. mcp_agent/workflows/llm/__init__.py +0 -0
  145. mcp_agent/workflows/llm/augmented_llm.py +0 -753
  146. mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
  147. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
  148. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  149. mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
  150. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  151. mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
  152. mcp_agent/workflows/parallel/__init__.py +0 -0
  153. mcp_agent/workflows/parallel/fan_in.py +0 -350
  154. mcp_agent/workflows/parallel/fan_out.py +0 -187
  155. mcp_agent/workflows/parallel/parallel_llm.py +0 -166
  156. mcp_agent/workflows/router/__init__.py +0 -0
  157. mcp_agent/workflows/router/router_base.py +0 -368
  158. mcp_agent/workflows/router/router_embedding.py +0 -240
  159. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  160. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  161. mcp_agent/workflows/router/router_llm.py +0 -320
  162. mcp_agent/workflows/swarm/__init__.py +0 -0
  163. mcp_agent/workflows/swarm/swarm.py +0 -320
  164. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  165. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  166. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  167. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  168. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  169. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -1,13 +1,14 @@
1
1
  import asyncio
2
+
2
3
  from rich.panel import Panel
3
4
 
4
5
  from mcp_agent.console import console
6
+ from mcp_agent.core.enhanced_prompt import get_enhanced_input, handle_special_commands
5
7
  from mcp_agent.human_input.types import (
6
8
  HumanInputRequest,
7
9
  HumanInputResponse,
8
10
  )
9
11
  from mcp_agent.progress_display import progress_display
10
- from mcp_agent.core.enhanced_prompt import get_enhanced_input, handle_special_commands
11
12
 
12
13
 
13
14
  async def console_input_callback(request: HumanInputRequest) -> HumanInputResponse:
@@ -30,9 +31,7 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
30
31
 
31
32
  # Extract agent name from metadata dictionary
32
33
  agent_name = (
33
- request.metadata.get("agent_name", "Unknown Agent")
34
- if request.metadata
35
- else "Unknown Agent"
34
+ request.metadata.get("agent_name", "Unknown Agent") if request.metadata else "Unknown Agent"
36
35
  )
37
36
 
38
37
  # Use the context manager to pause the progress display while getting input
@@ -70,9 +69,7 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
70
69
  if isinstance(command_result, dict) and "list_prompts" in command_result:
71
70
  from rich import print as rich_print
72
71
 
73
- rich_print(
74
- "[yellow]Prompt listing not available in human input context[/yellow]"
75
- )
72
+ rich_print("[yellow]Prompt listing not available in human input context[/yellow]")
76
73
 
77
74
  except KeyboardInterrupt:
78
75
  console.print("\n[yellow]Input interrupted[/yellow]")
@@ -1,4 +1,5 @@
1
1
  from typing import Any, AsyncIterator, Protocol
2
+
2
3
  from pydantic import BaseModel
3
4
 
4
5
  HUMAN_INPUT_SIGNAL_NAME = "__human_input__"
@@ -42,9 +43,7 @@ class HumanInputResponse(BaseModel):
42
43
  class HumanInputCallback(Protocol):
43
44
  """Protocol for callbacks that handle human input requests."""
44
45
 
45
- async def __call__(
46
- self, request: HumanInputRequest
47
- ) -> AsyncIterator[HumanInputResponse]:
46
+ async def __call__(self, request: HumanInputRequest) -> AsyncIterator[HumanInputResponse]:
48
47
  """
49
48
  Handle a human input request.
50
49
 
@@ -0,0 +1,2 @@
1
+ # LLM module
2
+ # Contains code for working with large language models
@@ -0,0 +1,450 @@
1
+ from abc import abstractmethod
2
+ from typing import (
3
+ TYPE_CHECKING,
4
+ Any,
5
+ Generic,
6
+ List,
7
+ Optional,
8
+ Type,
9
+ TypeVar,
10
+ cast,
11
+ )
12
+
13
+ from mcp.types import (
14
+ CallToolRequest,
15
+ CallToolResult,
16
+ GetPromptResult,
17
+ PromptMessage,
18
+ TextContent,
19
+ )
20
+ from pydantic_core import from_json
21
+ from rich.text import Text
22
+
23
+ from mcp_agent.context_dependent import ContextDependent
24
+ from mcp_agent.core.exceptions import PromptExitError
25
+ from mcp_agent.core.prompt import Prompt
26
+ from mcp_agent.core.request_params import RequestParams
27
+ from mcp_agent.event_progress import ProgressAction
28
+ from mcp_agent.llm.memory import Memory, SimpleMemory
29
+ from mcp_agent.llm.sampling_format_converter import (
30
+ BasicFormatConverter,
31
+ ProviderFormatConverter,
32
+ )
33
+ from mcp_agent.logging.logger import get_logger
34
+ from mcp_agent.mcp.interfaces import (
35
+ AugmentedLLMProtocol,
36
+ ModelT,
37
+ )
38
+ from mcp_agent.mcp.mcp_aggregator import MCPAggregator
39
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
40
+ from mcp_agent.mcp.prompt_render import render_multipart_message
41
+ from mcp_agent.mcp.prompt_serialization import multipart_messages_to_delimited_format
42
+ from mcp_agent.ui.console_display import ConsoleDisplay
43
+
44
+ # Define type variables locally
45
+ MessageParamT = TypeVar("MessageParamT")
46
+ MessageT = TypeVar("MessageT")
47
+
48
+ # Forward reference for type annotations
49
+ if TYPE_CHECKING:
50
+ from mcp_agent.agents.agent import Agent
51
+ from mcp_agent.context import Context
52
+
53
+
54
+ # TODO -- move this to a constant
55
+ HUMAN_INPUT_TOOL_NAME = "__human_input__"
56
+
57
+
58
+ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT, MessageT]):
59
+ """
60
+ The basic building block of agentic systems is an LLM enhanced with augmentations
61
+ such as retrieval, tools, and memory provided from a collection of MCP servers.
62
+ Our current models can actively use these capabilities—generating their own search queries,
63
+ selecting appropriate tools, and determining what information to retain.
64
+ """
65
+
66
+ provider: str | None = None
67
+
68
+ def __init__(
69
+ self,
70
+ agent: Optional["Agent"] = None,
71
+ server_names: List[str] | None = None,
72
+ instruction: str | None = None,
73
+ name: str | None = None,
74
+ request_params: RequestParams | None = None,
75
+ type_converter: Type[
76
+ ProviderFormatConverter[MessageParamT, MessageT]
77
+ ] = BasicFormatConverter,
78
+ context: Optional["Context"] = None,
79
+ **kwargs: dict[str, Any],
80
+ ) -> None:
81
+ """
82
+ Initialize the LLM with a list of server names and an instruction.
83
+ If a name is provided, it will be used to identify the LLM.
84
+ If an agent is provided, all other properties are optional
85
+ """
86
+ # Extract request_params before super() call
87
+ self._init_request_params = request_params
88
+ super().__init__(context=context, **kwargs)
89
+ self.logger = get_logger(__name__)
90
+ self.executor = self.context.executor
91
+ self.aggregator = agent if agent is not None else MCPAggregator(server_names or [])
92
+ self.name = agent.name if agent else name
93
+ self.instruction = agent.instruction if agent else instruction
94
+
95
+ # memory contains provider specific API types.
96
+ self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
97
+
98
+ self.message_history: List[PromptMessageMultipart] = []
99
+
100
+ # Initialize the display component
101
+ self.display = ConsoleDisplay(config=self.context.config)
102
+
103
+ # Initialize default parameters
104
+ self.default_request_params = self._initialize_default_params(kwargs)
105
+
106
+ # Merge with provided params if any
107
+ if self._init_request_params:
108
+ self.default_request_params = self._merge_request_params(
109
+ self.default_request_params, self._init_request_params
110
+ )
111
+
112
+ self.type_converter = type_converter
113
+ self.verb = kwargs.get("verb")
114
+
115
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
116
+ """Initialize default parameters for the LLM.
117
+ Should be overridden by provider implementations to set provider-specific defaults."""
118
+ return RequestParams(
119
+ systemPrompt=self.instruction,
120
+ parallel_tool_calls=True,
121
+ max_iterations=10,
122
+ use_history=True,
123
+ )
124
+
125
+ async def structured(
126
+ self,
127
+ prompt: List[PromptMessageMultipart],
128
+ model: Type[ModelT],
129
+ request_params: RequestParams | None = None,
130
+ ) -> ModelT | None:
131
+ """Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
132
+ try:
133
+ result: PromptMessageMultipart = await self.generate(prompt, request_params)
134
+ json_data = from_json(result.first_text(), allow_partial=True)
135
+ validated_model = model.model_validate(json_data)
136
+ return cast("ModelT", validated_model)
137
+ except Exception as e:
138
+ logger = get_logger(__name__)
139
+ logger.error(f"Failed to parse structured response: {str(e)}")
140
+ return None
141
+
142
+ async def generate(
143
+ self,
144
+ multipart_messages: List[PromptMessageMultipart],
145
+ request_params: RequestParams | None = None,
146
+ ) -> PromptMessageMultipart:
147
+ """
148
+ Create a completion with the LLM using the provided messages.
149
+ """
150
+ if multipart_messages[-1].first_text().startswith("***SAVE_HISTORY"):
151
+ parts: list[str] = multipart_messages[-1].first_text().split(" ", 1)
152
+ filename: str = (
153
+ parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}_prompts.txt"
154
+ )
155
+ await self._save_history(filename)
156
+ self.show_user_message(
157
+ f"History saved to {filename}", model=self.default_request_params.model, chat_turn=0
158
+ )
159
+ return Prompt.assistant(f"History saved to {filename}")
160
+
161
+ self.message_history.extend(multipart_messages)
162
+
163
+ if multipart_messages[-1].role == "user":
164
+ self.show_user_message(
165
+ render_multipart_message(multipart_messages[-1]),
166
+ model=self.default_request_params.model,
167
+ chat_turn=self.chat_turn(),
168
+ )
169
+
170
+ assistant_response: PromptMessageMultipart = await self._apply_prompt_provider_specific(
171
+ multipart_messages, request_params
172
+ )
173
+
174
+ self.message_history.append(assistant_response)
175
+ return assistant_response
176
+
177
+ def chat_turn(self) -> int:
178
+ """Return the current chat turn number"""
179
+ return 1 + sum(1 for message in self.message_history if message.role == "assistant")
180
+
181
+ def _merge_request_params(
182
+ self, default_params: RequestParams, provided_params: RequestParams
183
+ ) -> RequestParams:
184
+ """Merge default and provided request parameters"""
185
+
186
+ merged = default_params.model_dump()
187
+ merged.update(provided_params.model_dump(exclude_unset=True))
188
+ final_params = RequestParams(**merged)
189
+
190
+ return final_params
191
+
192
+ def get_request_params(
193
+ self,
194
+ request_params: RequestParams | None = None,
195
+ default: RequestParams | None = None,
196
+ ) -> RequestParams:
197
+ """
198
+ Get request parameters with merged-in defaults and overrides.
199
+ Args:
200
+ request_params: The request parameters to use as overrides.
201
+ default: The default request parameters to use as the base.
202
+ If unspecified, self.default_request_params will be used.
203
+ """
204
+ # Start with the defaults
205
+ default_request_params = default or self.default_request_params
206
+
207
+ if not default_request_params:
208
+ default_request_params = self._initialize_default_params({})
209
+
210
+ # If user provides overrides, merge them with defaults
211
+ if request_params:
212
+ return self._merge_request_params(default_request_params, request_params)
213
+
214
+ return default_request_params
215
+
216
+ @classmethod
217
+ def convert_message_to_message_param(
218
+ cls, message: MessageT, **kwargs: dict[str, Any]
219
+ ) -> MessageParamT:
220
+ """Convert a response object to an input parameter object to allow LLM calls to be chained."""
221
+ # Many LLM implementations will allow the same type for input and output messages
222
+ return cast("MessageParamT", message)
223
+
224
+ def show_tool_result(self, result: CallToolResult) -> None:
225
+ """Display a tool result in a formatted panel."""
226
+ self.display.show_tool_result(result)
227
+
228
+ def show_oai_tool_result(self, result: str) -> None:
229
+ """Display a tool result in a formatted panel."""
230
+ self.display.show_oai_tool_result(result)
231
+
232
+ def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
233
+ """Display a tool call in a formatted panel."""
234
+ self.display.show_tool_call(available_tools, tool_name, tool_args)
235
+
236
+ async def show_assistant_message(
237
+ self,
238
+ message_text: str | Text | None,
239
+ highlight_namespaced_tool: str = "",
240
+ title: str = "ASSISTANT",
241
+ ) -> None:
242
+ if message_text is None:
243
+ message_text = Text("No content to display", style="dim green italic")
244
+ """Display an assistant message in a formatted panel."""
245
+ await self.display.show_assistant_message(
246
+ message_text,
247
+ aggregator=self.aggregator,
248
+ highlight_namespaced_tool=highlight_namespaced_tool,
249
+ title=title,
250
+ name=self.name,
251
+ )
252
+
253
+ def show_user_message(self, message, model: str | None, chat_turn: int) -> None:
254
+ """Display a user message in a formatted panel."""
255
+ self.display.show_user_message(message, model, chat_turn, name=self.name)
256
+
257
+ async def pre_tool_call(
258
+ self, tool_call_id: str | None, request: CallToolRequest
259
+ ) -> CallToolRequest | bool:
260
+ """Called before a tool is executed. Return False to prevent execution."""
261
+ return request
262
+
263
+ async def post_tool_call(
264
+ self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
265
+ ) -> CallToolResult:
266
+ """Called after a tool execution. Can modify the result before it's returned."""
267
+ return result
268
+
269
+ async def call_tool(
270
+ self,
271
+ request: CallToolRequest,
272
+ tool_call_id: str | None = None,
273
+ ) -> CallToolResult:
274
+ """Call a tool with the given parameters and optional ID"""
275
+
276
+ try:
277
+ preprocess = await self.pre_tool_call(
278
+ tool_call_id=tool_call_id,
279
+ request=request,
280
+ )
281
+
282
+ if isinstance(preprocess, bool):
283
+ if not preprocess:
284
+ return CallToolResult(
285
+ isError=True,
286
+ content=[
287
+ TextContent(
288
+ type="text",
289
+ text=f"Error: Tool '{request.params.name}' was not allowed to run.",
290
+ )
291
+ ],
292
+ )
293
+ else:
294
+ request = preprocess
295
+
296
+ tool_name = request.params.name
297
+ tool_args = request.params.arguments
298
+ result = await self.aggregator.call_tool(tool_name, tool_args)
299
+
300
+ postprocess = await self.post_tool_call(
301
+ tool_call_id=tool_call_id, request=request, result=result
302
+ )
303
+
304
+ if isinstance(postprocess, CallToolResult):
305
+ result = postprocess
306
+
307
+ return result
308
+ except PromptExitError:
309
+ raise
310
+ except Exception as e:
311
+ return CallToolResult(
312
+ isError=True,
313
+ content=[
314
+ TextContent(
315
+ type="text",
316
+ text=f"Error executing tool '{request.params.name}': {str(e)}",
317
+ )
318
+ ],
319
+ )
320
+
321
+ def _log_chat_progress(
322
+ self, chat_turn: Optional[int] = None, model: Optional[str] = None
323
+ ) -> None:
324
+ """Log a chat progress event"""
325
+ # Determine action type based on verb
326
+ if hasattr(self, "verb") and self.verb:
327
+ # Use verb directly regardless of type
328
+ act = self.verb
329
+ else:
330
+ act = ProgressAction.CHATTING
331
+
332
+ data = {
333
+ "progress_action": act,
334
+ "model": model,
335
+ "agent_name": self.name,
336
+ "chat_turn": chat_turn if chat_turn is not None else None,
337
+ }
338
+ self.logger.debug("Chat in progress", data=data)
339
+
340
+ def _log_chat_finished(self, model: Optional[str] = None) -> None:
341
+ """Log a chat finished event"""
342
+ data = {
343
+ "progress_action": ProgressAction.READY,
344
+ "model": model,
345
+ "agent_name": self.name,
346
+ }
347
+ self.logger.debug("Chat finished", data=data)
348
+
349
+ def _convert_prompt_messages(self, prompt_messages: List[PromptMessage]) -> List[MessageParamT]:
350
+ """
351
+ Convert prompt messages to this LLM's specific message format.
352
+ To be implemented by concrete LLM classes.
353
+ """
354
+ raise NotImplementedError("Must be implemented by subclass")
355
+
356
+ async def show_prompt_loaded(
357
+ self,
358
+ prompt_name: str,
359
+ description: Optional[str] = None,
360
+ message_count: int = 0,
361
+ arguments: Optional[dict[str, str]] = None,
362
+ ) -> None:
363
+ """
364
+ Display information about a loaded prompt template.
365
+
366
+ Args:
367
+ prompt_name: The name of the prompt
368
+ description: Optional description of the prompt
369
+ message_count: Number of messages in the prompt
370
+ arguments: Optional dictionary of arguments passed to the prompt
371
+ """
372
+ await self.display.show_prompt_loaded(
373
+ prompt_name=prompt_name,
374
+ description=description,
375
+ message_count=message_count,
376
+ agent_name=self.name,
377
+ aggregator=self.aggregator,
378
+ arguments=arguments,
379
+ )
380
+
381
+ async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
382
+ """
383
+ Apply a prompt template by adding it to the conversation history.
384
+ If the last message in the prompt is from a user, automatically
385
+ generate an assistant response.
386
+
387
+ Args:
388
+ prompt_result: The GetPromptResult containing prompt messages
389
+ prompt_name: The name of the prompt being applied
390
+
391
+ Returns:
392
+ String representation of the assistant's response if generated,
393
+ or the last assistant message in the prompt
394
+ """
395
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
396
+
397
+ # Check if we have any messages
398
+ if not prompt_result.messages:
399
+ return "Prompt contains no messages"
400
+
401
+ # Extract arguments if they were stored in the result
402
+ arguments = getattr(prompt_result, "arguments", None)
403
+
404
+ # Display information about the loaded prompt
405
+ await self.show_prompt_loaded(
406
+ prompt_name=prompt_name,
407
+ description=prompt_result.description,
408
+ message_count=len(prompt_result.messages),
409
+ arguments=arguments,
410
+ )
411
+
412
+ # Convert to PromptMessageMultipart objects
413
+ multipart_messages = PromptMessageMultipart.parse_get_prompt_result(prompt_result)
414
+
415
+ # Delegate to the provider-specific implementation
416
+ result = await self._apply_prompt_provider_specific(multipart_messages, None)
417
+ return result.first_text()
418
+
419
+ async def _save_history(self, filename: str) -> None:
420
+ """
421
+ Save the Message History to a file in a simple delimeted format.
422
+ """
423
+ # Convert to delimited format
424
+ delimited_content = multipart_messages_to_delimited_format(
425
+ self.message_history,
426
+ )
427
+
428
+ # Write to file
429
+ with open(filename, "w", encoding="utf-8") as f:
430
+ f.write("\n\n".join(delimited_content))
431
+
432
+ @abstractmethod
433
+ async def _apply_prompt_provider_specific(
434
+ self,
435
+ multipart_messages: List["PromptMessageMultipart"],
436
+ request_params: RequestParams | None = None,
437
+ ) -> PromptMessageMultipart:
438
+ """
439
+ Provider-specific implementation of apply_prompt_template.
440
+ This default implementation handles basic text content for any LLM type.
441
+ Provider-specific subclasses should override this method to handle
442
+ multimodal content appropriately.
443
+
444
+ Args:
445
+ multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
446
+
447
+ Returns:
448
+ String representation of the assistant's response if generated,
449
+ or the last assistant message in the prompt
450
+ """
@@ -0,0 +1,162 @@
1
+ import json # Import at the module level
2
+ from typing import Any, List, Optional, Union
3
+
4
+ from mcp.types import PromptMessage
5
+
6
+ from mcp_agent.core.prompt import Prompt
7
+ from mcp_agent.llm.augmented_llm import (
8
+ AugmentedLLM,
9
+ MessageParamT,
10
+ RequestParams,
11
+ )
12
+ from mcp_agent.logging.logger import get_logger
13
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
14
+
15
+ CALL_TOOL_INDICATOR = "***CALL_TOOL"
16
+ FIXED_RESPONSE_INDICATOR = "***FIXED_RESPONSE"
17
+
18
+
19
+ class PassthroughLLM(AugmentedLLM):
20
+ """
21
+ A specialized LLM implementation that simply passes through input messages without modification.
22
+
23
+ This is useful for cases where you need an object with the AugmentedLLM interface
24
+ but want to preserve the original message without any processing, such as in a
25
+ parallel workflow where no fan-in aggregation is needed.
26
+ """
27
+
28
+ def __init__(self, name: str = "Passthrough", **kwargs: dict[str, Any]) -> None:
29
+ super().__init__(name=name, **kwargs)
30
+ self.provider = "fast-agent"
31
+ self.logger = get_logger(__name__)
32
+ self._messages = [PromptMessage]
33
+ self._fixed_response: str | None = None
34
+
35
+ async def generate_str(
36
+ self,
37
+ message: Union[str, MessageParamT, List[MessageParamT]],
38
+ request_params: Optional[RequestParams] = None,
39
+ ) -> str:
40
+ """Return the input message as a string."""
41
+ # Check if this is a special command to call a tool
42
+ if isinstance(message, str) and message.startswith("***CALL_TOOL "):
43
+ return await self._call_tool_and_return_result(message)
44
+
45
+ self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
46
+ await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
47
+
48
+ # Handle PromptMessage by concatenating all parts
49
+ if isinstance(message, PromptMessage):
50
+ parts_text = []
51
+ for part in message.content:
52
+ parts_text.append(str(part))
53
+ return "\n".join(parts_text)
54
+
55
+ return str(message)
56
+
57
+ async def initialize(self) -> None:
58
+ pass
59
+
60
+ async def _call_tool_and_return_result(self, command: str) -> str:
61
+ """
62
+ Call a tool based on the command and return its result as a string.
63
+
64
+ Args:
65
+ command: The command string, expected format: "***CALL_TOOL <server>-<tool_name> [arguments_json]"
66
+
67
+ Returns:
68
+ Tool result as a string
69
+ """
70
+ try:
71
+ tool_name, arguments = self._parse_tool_command(command)
72
+ result = await self.aggregator.call_tool(tool_name, arguments)
73
+ return self._format_tool_result(tool_name, result)
74
+ except Exception as e:
75
+ self.logger.error(f"Error calling tool: {str(e)}")
76
+ return f"Error calling tool: {str(e)}"
77
+
78
+ def _parse_tool_command(self, command: str) -> tuple[str, Optional[dict]]:
79
+ """
80
+ Parse a tool command string into tool name and arguments.
81
+
82
+ Args:
83
+ command: The command string in format "***CALL_TOOL <tool_name> [arguments_json]"
84
+
85
+ Returns:
86
+ Tuple of (tool_name, arguments_dict)
87
+
88
+ Raises:
89
+ ValueError: If command format is invalid
90
+ """
91
+ parts = command.split(" ", 2)
92
+ if len(parts) < 2:
93
+ raise ValueError("Invalid format. Expected '***CALL_TOOL <tool_name> [arguments_json]'")
94
+
95
+ tool_name = parts[1].strip()
96
+ arguments = None
97
+
98
+ if len(parts) > 2:
99
+ try:
100
+ arguments = json.loads(parts[2])
101
+ except json.JSONDecodeError:
102
+ raise ValueError(f"Invalid JSON arguments: {parts[2]}")
103
+
104
+ self.logger.info(f"Calling tool {tool_name} with arguments {arguments}")
105
+ return tool_name, arguments
106
+
107
+ def _format_tool_result(self, tool_name: str, result) -> str:
108
+ """
109
+ Format tool execution result as a string.
110
+
111
+ Args:
112
+ tool_name: The name of the tool that was called
113
+ result: The result returned from the tool
114
+
115
+ Returns:
116
+ Formatted result as a string
117
+ """
118
+ if result.isError:
119
+ error_text = []
120
+ for content_item in result.content:
121
+ if hasattr(content_item, "text"):
122
+ error_text.append(content_item.text)
123
+ else:
124
+ error_text.append(str(content_item))
125
+ error_message = "\n".join(error_text) if error_text else "Unknown error"
126
+ return f"Error calling tool '{tool_name}': {error_message}"
127
+
128
+ result_text = []
129
+ for content_item in result.content:
130
+ if hasattr(content_item, "text"):
131
+ result_text.append(content_item.text)
132
+ else:
133
+ result_text.append(str(content_item))
134
+
135
+ return "\n".join(result_text)
136
+
137
+ async def _apply_prompt_provider_specific(
138
+ self,
139
+ multipart_messages: List["PromptMessageMultipart"],
140
+ request_params: RequestParams | None = None,
141
+ ) -> PromptMessageMultipart:
142
+ last_message = multipart_messages[-1]
143
+
144
+ # TODO -- improve when we support Audio/Multimodal gen
145
+ if self.is_tool_call(last_message):
146
+ return Prompt.assistant(await self.generate_str(last_message.first_text()))
147
+
148
+ if last_message.first_text().startswith(FIXED_RESPONSE_INDICATOR):
149
+ self._fixed_response = (
150
+ last_message.first_text().split(FIXED_RESPONSE_INDICATOR, 1)[1].strip()
151
+ )
152
+
153
+ if self._fixed_response:
154
+ await self.show_assistant_message(self._fixed_response)
155
+ return Prompt.assistant(self._fixed_response)
156
+ else:
157
+ concatenated: str = "\n".join(message.all_text() for message in multipart_messages)
158
+ await self.show_assistant_message(concatenated)
159
+ return Prompt.assistant(concatenated)
160
+
161
+ def is_tool_call(self, message: PromptMessageMultipart) -> bool:
162
+ return message.first_text().startswith(CALL_TOOL_INDICATOR)