fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +61 -415
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +15 -19
  14. mcp_agent/cli/commands/bootstrap.py +19 -38
  15. mcp_agent/cli/commands/config.py +4 -4
  16. mcp_agent/cli/commands/setup.py +7 -14
  17. mcp_agent/cli/main.py +7 -10
  18. mcp_agent/cli/terminal.py +3 -3
  19. mcp_agent/config.py +25 -40
  20. mcp_agent/context.py +12 -21
  21. mcp_agent/context_dependent.py +3 -5
  22. mcp_agent/core/agent_types.py +10 -7
  23. mcp_agent/core/direct_agent_app.py +179 -0
  24. mcp_agent/core/direct_decorators.py +443 -0
  25. mcp_agent/core/direct_factory.py +476 -0
  26. mcp_agent/core/enhanced_prompt.py +23 -55
  27. mcp_agent/core/exceptions.py +8 -8
  28. mcp_agent/core/fastagent.py +145 -371
  29. mcp_agent/core/interactive_prompt.py +424 -0
  30. mcp_agent/core/mcp_content.py +17 -17
  31. mcp_agent/core/prompt.py +6 -9
  32. mcp_agent/core/request_params.py +6 -3
  33. mcp_agent/core/validation.py +92 -18
  34. mcp_agent/executor/decorator_registry.py +9 -17
  35. mcp_agent/executor/executor.py +8 -17
  36. mcp_agent/executor/task_registry.py +2 -4
  37. mcp_agent/executor/temporal.py +19 -41
  38. mcp_agent/executor/workflow.py +3 -5
  39. mcp_agent/executor/workflow_signal.py +15 -21
  40. mcp_agent/human_input/handler.py +4 -7
  41. mcp_agent/human_input/types.py +2 -3
  42. mcp_agent/llm/__init__.py +2 -0
  43. mcp_agent/llm/augmented_llm.py +450 -0
  44. mcp_agent/llm/augmented_llm_passthrough.py +162 -0
  45. mcp_agent/llm/augmented_llm_playback.py +83 -0
  46. mcp_agent/llm/memory.py +103 -0
  47. mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
  48. mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
  49. mcp_agent/llm/providers/__init__.py +8 -0
  50. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
  51. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
  52. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  53. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
  54. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
  55. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
  56. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
  57. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
  58. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
  59. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
  60. mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
  61. mcp_agent/llm/sampling_format_converter.py +37 -0
  62. mcp_agent/logging/events.py +1 -5
  63. mcp_agent/logging/json_serializer.py +7 -6
  64. mcp_agent/logging/listeners.py +20 -23
  65. mcp_agent/logging/logger.py +17 -19
  66. mcp_agent/logging/rich_progress.py +10 -8
  67. mcp_agent/logging/tracing.py +4 -6
  68. mcp_agent/logging/transport.py +22 -22
  69. mcp_agent/mcp/gen_client.py +1 -3
  70. mcp_agent/mcp/interfaces.py +117 -110
  71. mcp_agent/mcp/logger_textio.py +97 -0
  72. mcp_agent/mcp/mcp_agent_client_session.py +7 -7
  73. mcp_agent/mcp/mcp_agent_server.py +8 -8
  74. mcp_agent/mcp/mcp_aggregator.py +102 -143
  75. mcp_agent/mcp/mcp_connection_manager.py +20 -27
  76. mcp_agent/mcp/prompt_message_multipart.py +68 -16
  77. mcp_agent/mcp/prompt_render.py +77 -0
  78. mcp_agent/mcp/prompt_serialization.py +30 -48
  79. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  80. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  81. mcp_agent/mcp/prompts/prompt_load.py +109 -0
  82. mcp_agent/mcp/prompts/prompt_server.py +155 -195
  83. mcp_agent/mcp/prompts/prompt_template.py +35 -66
  84. mcp_agent/mcp/resource_utils.py +7 -14
  85. mcp_agent/mcp/sampling.py +17 -17
  86. mcp_agent/mcp_server/agent_server.py +13 -17
  87. mcp_agent/mcp_server_registry.py +13 -22
  88. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
  89. mcp_agent/resources/examples/in_dev/slides.py +110 -0
  90. mcp_agent/resources/examples/internal/agent.py +6 -3
  91. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  92. mcp_agent/resources/examples/internal/job.py +2 -1
  93. mcp_agent/resources/examples/internal/prompt_category.py +1 -1
  94. mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
  95. mcp_agent/resources/examples/internal/sizer.py +2 -1
  96. mcp_agent/resources/examples/internal/social.py +2 -1
  97. mcp_agent/resources/examples/prompting/agent.py +2 -1
  98. mcp_agent/resources/examples/prompting/image_server.py +4 -8
  99. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  100. mcp_agent/ui/console_display.py +16 -20
  101. fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
  102. mcp_agent/core/agent_app.py +0 -646
  103. mcp_agent/core/agent_utils.py +0 -71
  104. mcp_agent/core/decorators.py +0 -455
  105. mcp_agent/core/factory.py +0 -463
  106. mcp_agent/core/proxies.py +0 -269
  107. mcp_agent/core/types.py +0 -24
  108. mcp_agent/eval/__init__.py +0 -0
  109. mcp_agent/mcp/stdio.py +0 -111
  110. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  111. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  112. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  113. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  114. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  115. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  116. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  117. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
  118. mcp_agent/resources/examples/researcher/researcher.py +0 -38
  119. mcp_agent/resources/examples/workflows/chaining.py +0 -44
  120. mcp_agent/resources/examples/workflows/evaluator.py +0 -78
  121. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  122. mcp_agent/resources/examples/workflows/human_input.py +0 -25
  123. mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
  124. mcp_agent/resources/examples/workflows/parallel.py +0 -78
  125. mcp_agent/resources/examples/workflows/router.py +0 -53
  126. mcp_agent/resources/examples/workflows/sse.py +0 -23
  127. mcp_agent/telemetry/__init__.py +0 -0
  128. mcp_agent/telemetry/usage_tracking.py +0 -18
  129. mcp_agent/workflows/__init__.py +0 -0
  130. mcp_agent/workflows/embedding/__init__.py +0 -0
  131. mcp_agent/workflows/embedding/embedding_base.py +0 -61
  132. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  133. mcp_agent/workflows/embedding/embedding_openai.py +0 -46
  134. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  135. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
  136. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  137. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
  138. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
  139. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
  140. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
  141. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
  142. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  143. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
  144. mcp_agent/workflows/llm/__init__.py +0 -0
  145. mcp_agent/workflows/llm/augmented_llm.py +0 -753
  146. mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
  147. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
  148. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  149. mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
  150. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  151. mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
  152. mcp_agent/workflows/parallel/__init__.py +0 -0
  153. mcp_agent/workflows/parallel/fan_in.py +0 -350
  154. mcp_agent/workflows/parallel/fan_out.py +0 -187
  155. mcp_agent/workflows/parallel/parallel_llm.py +0 -166
  156. mcp_agent/workflows/router/__init__.py +0 -0
  157. mcp_agent/workflows/router/router_base.py +0 -368
  158. mcp_agent/workflows/router/router_embedding.py +0 -240
  159. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  160. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  161. mcp_agent/workflows/router/router_llm.py +0 -320
  162. mcp_agent/workflows/swarm/__init__.py +0 -0
  163. mcp_agent/workflows/swarm/swarm.py +0 -320
  164. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  165. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  166. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  167. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  168. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  169. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -1,753 +0,0 @@
1
- from abc import abstractmethod
2
-
3
- from typing import (
4
- Generic,
5
- List,
6
- Optional,
7
- Protocol,
8
- Type,
9
- TypeVar,
10
- TYPE_CHECKING,
11
- )
12
-
13
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
14
- from mcp_agent.workflows.llm.sampling_format_converter import (
15
- SamplingFormatConverter,
16
- MessageParamT,
17
- MessageT,
18
- )
19
-
20
- # Forward reference for type annotations
21
- if TYPE_CHECKING:
22
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
23
- from mcp_agent.agents.agent import Agent
24
- from mcp_agent.context import Context
25
-
26
-
27
-
28
- from mcp.types import (
29
- CallToolRequest,
30
- CallToolResult,
31
- PromptMessage,
32
- TextContent,
33
- GetPromptResult,
34
- )
35
-
36
- from mcp_agent.context_dependent import ContextDependent
37
- from mcp_agent.core.exceptions import ModelConfigError, PromptExitError
38
- from mcp_agent.core.request_params import RequestParams
39
- from mcp_agent.event_progress import ProgressAction
40
-
41
- try:
42
- from mcp_agent.mcp.mcp_aggregator import MCPAggregator
43
- except ImportError:
44
- # For testing purposes
45
- class MCPAggregator:
46
- pass
47
-
48
-
49
- from mcp_agent.ui.console_display import ConsoleDisplay
50
- from rich.text import Text
51
-
52
-
53
- ModelT = TypeVar("ModelT")
54
- """A type representing a structured output message from an LLM."""
55
-
56
-
57
- # TODO -- move this to a constant
58
- HUMAN_INPUT_TOOL_NAME = "__human_input__"
59
-
60
-
61
- class Memory(Protocol, Generic[MessageParamT]):
62
- """
63
- Simple memory management for storing past interactions in-memory.
64
- """
65
-
66
- # TODO: saqadri - add checkpointing and other advanced memory capabilities
67
-
68
- def __init__(self): ...
69
-
70
- def extend(
71
- self, messages: List[MessageParamT], is_prompt: bool = False
72
- ) -> None: ...
73
-
74
- def set(self, messages: List[MessageParamT], is_prompt: bool = False) -> None: ...
75
-
76
- def append(self, message: MessageParamT, is_prompt: bool = False) -> None: ...
77
-
78
- def get(self, include_history: bool = True) -> List[MessageParamT]: ...
79
-
80
- def clear(self, clear_prompts: bool = False) -> None: ...
81
-
82
-
83
- class SimpleMemory(Memory, Generic[MessageParamT]):
84
- """
85
- Simple memory management for storing past interactions in-memory.
86
-
87
- Maintains both prompt messages (which are always included) and
88
- generated conversation history (which is included based on use_history setting).
89
- """
90
-
91
- def __init__(self):
92
- self.history: List[MessageParamT] = []
93
- self.prompt_messages: List[MessageParamT] = [] # Always included
94
-
95
- def extend(self, messages: List[MessageParamT], is_prompt: bool = False):
96
- """
97
- Add multiple messages to history.
98
-
99
- Args:
100
- messages: Messages to add
101
- is_prompt: If True, add to prompt_messages instead of regular history
102
- """
103
- if is_prompt:
104
- self.prompt_messages.extend(messages)
105
- else:
106
- self.history.extend(messages)
107
-
108
- def set(self, messages: List[MessageParamT], is_prompt: bool = False):
109
- """
110
- Replace messages in history.
111
-
112
- Args:
113
- messages: Messages to set
114
- is_prompt: If True, replace prompt_messages instead of regular history
115
- """
116
- if is_prompt:
117
- self.prompt_messages = messages.copy()
118
- else:
119
- self.history = messages.copy()
120
-
121
- def append(self, message: MessageParamT, is_prompt: bool = False):
122
- """
123
- Add a single message to history.
124
-
125
- Args:
126
- message: Message to add
127
- is_prompt: If True, add to prompt_messages instead of regular history
128
- """
129
- if is_prompt:
130
- self.prompt_messages.append(message)
131
- else:
132
- self.history.append(message)
133
-
134
- def get(self, include_history: bool = True) -> List[MessageParamT]:
135
- """
136
- Get all messages in memory.
137
-
138
- Args:
139
- include_history: If True, include regular history messages
140
- If False, only return prompt messages
141
-
142
- Returns:
143
- Combined list of prompt messages and optionally history messages
144
- """
145
- if include_history:
146
- return self.prompt_messages + self.history
147
- else:
148
- return self.prompt_messages.copy()
149
-
150
- def clear(self, clear_prompts: bool = False):
151
- """
152
- Clear history and optionally prompt messages.
153
-
154
- Args:
155
- clear_prompts: If True, also clear prompt messages
156
- """
157
- self.history = []
158
- if clear_prompts:
159
- self.prompt_messages = []
160
-
161
-
162
- class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
163
- """Protocol defining the interface for augmented LLMs"""
164
-
165
- async def generate(
166
- self,
167
- message: str | MessageParamT | List[MessageParamT],
168
- request_params: RequestParams | None = None,
169
- ) -> List[MessageT]:
170
- """Request an LLM generation, which may run multiple iterations, and return the result"""
171
-
172
- async def generate_str(
173
- self,
174
- message: str | MessageParamT | List[MessageParamT],
175
- request_params: RequestParams | None = None,
176
- ) -> str:
177
- """Request an LLM generation and return the string representation of the result"""
178
-
179
- async def generate_structured(
180
- self,
181
- message: str | MessageParamT | List[MessageParamT],
182
- response_model: Type[ModelT],
183
- request_params: RequestParams | None = None,
184
- ) -> ModelT:
185
- """Request a structured LLM generation and return the result as a Pydantic model."""
186
-
187
- async def generate_prompt(
188
- self, prompt: PromptMessageMultipart, request_params: RequestParams | None
189
- ) -> str:
190
- """Request an LLM generation and return a string representation of the result"""
191
-
192
-
193
- class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, MessageT]):
194
- """
195
- The basic building block of agentic systems is an LLM enhanced with augmentations
196
- such as retrieval, tools, and memory provided from a collection of MCP servers.
197
- Our current models can actively use these capabilities—generating their own search queries,
198
- selecting appropriate tools, and determining what information to retain.
199
- """
200
-
201
- # TODO: saqadri - add streaming support (e.g. generate_stream)
202
- # TODO: saqadri - consider adding middleware patterns for pre/post processing of messages, for now we have pre/post_tool_call
203
-
204
- provider: str | None = None
205
-
206
- def __init__(
207
- self,
208
- agent: Optional["Agent"] = None,
209
- server_names: List[str] | None = None,
210
- instruction: str | None = None,
211
- name: str | None = None,
212
- request_params: RequestParams | None = None,
213
- type_converter: Type[SamplingFormatConverter[MessageParamT, MessageT]] = None,
214
- context: Optional["Context"] = None,
215
- **kwargs,
216
- ):
217
- """
218
- Initialize the LLM with a list of server names and an instruction.
219
- If a name is provided, it will be used to identify the LLM.
220
- If an agent is provided, all other properties are optional
221
- """
222
- # Extract request_params before super() call
223
- self._init_request_params = request_params
224
- super().__init__(context=context, **kwargs)
225
-
226
- self.executor = self.context.executor
227
- self.aggregator = (
228
- agent if agent is not None else MCPAggregator(server_names or [])
229
- )
230
- self.name = name or (agent.name if agent else None)
231
- self.instruction = instruction or (
232
- agent.instruction if agent and isinstance(agent.instruction, str) else None
233
- )
234
- self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
235
-
236
- # Initialize the display component
237
- self.display = ConsoleDisplay(config=self.context.config)
238
-
239
- # Initialize default parameters
240
- self.default_request_params = self._initialize_default_params(kwargs)
241
-
242
- # Merge with provided params if any
243
- if self._init_request_params:
244
- self.default_request_params = self._merge_request_params(
245
- self.default_request_params, self._init_request_params
246
- )
247
-
248
- self.type_converter = type_converter
249
- self.verb = kwargs.get("verb")
250
-
251
- @abstractmethod
252
- async def generate(
253
- self,
254
- message: str | MessageParamT | List[MessageParamT],
255
- request_params: RequestParams | None = None,
256
- ) -> List[MessageT]:
257
- """Request an LLM generation, which may run multiple iterations, and return the result"""
258
-
259
- @abstractmethod
260
- async def generate_str(
261
- self,
262
- message: str | MessageParamT | List[MessageParamT],
263
- request_params: RequestParams | None = None,
264
- ) -> str:
265
- """Request an LLM generation and return the string representation of the result"""
266
-
267
- @abstractmethod
268
- async def generate_structured(
269
- self,
270
- message: str | MessageParamT | List[MessageParamT],
271
- response_model: Type[ModelT],
272
- request_params: RequestParams | None = None,
273
- ) -> ModelT:
274
- """Request a structured LLM generation and return the result as a Pydantic model."""
275
-
276
- async def select_model(
277
- self, request_params: RequestParams | None = None
278
- ) -> str | None:
279
- """
280
- Return the configured model (legacy support)
281
- """
282
- if request_params.model:
283
- return request_params.model
284
-
285
- raise ModelConfigError("Internal Error: Model is not configured correctly")
286
-
287
- def _initialize_default_params(self, kwargs: dict) -> RequestParams:
288
- """Initialize default parameters for the LLM.
289
- Should be overridden by provider implementations to set provider-specific defaults."""
290
- return RequestParams(
291
- systemPrompt=self.instruction,
292
- parallel_tool_calls=True,
293
- max_iterations=10,
294
- use_history=True,
295
- )
296
-
297
- def _merge_request_params(
298
- self, default_params: RequestParams, provided_params: RequestParams
299
- ) -> RequestParams:
300
- """Merge default and provided request parameters"""
301
-
302
- merged = default_params.model_dump()
303
- merged.update(provided_params.model_dump(exclude_unset=True))
304
- final_params = RequestParams(**merged)
305
-
306
- return final_params
307
-
308
- def get_request_params(
309
- self,
310
- request_params: RequestParams | None = None,
311
- default: RequestParams | None = None,
312
- ) -> RequestParams:
313
- """
314
- Get request parameters with merged-in defaults and overrides.
315
- Args:
316
- request_params: The request parameters to use as overrides.
317
- default: The default request parameters to use as the base.
318
- If unspecified, self.default_request_params will be used.
319
- """
320
- # Start with the defaults
321
- default_request_params = default or self.default_request_params
322
-
323
- if not default_request_params:
324
- default_request_params = self._initialize_default_params({})
325
-
326
- # If user provides overrides, merge them with defaults
327
- if request_params:
328
- return self._merge_request_params(default_request_params, request_params)
329
-
330
- return default_request_params
331
-
332
- @classmethod
333
- def convert_message_to_message_param(
334
- cls, message: MessageT, **kwargs
335
- ) -> MessageParamT:
336
- """Convert a response object to an input parameter object to allow LLM calls to be chained."""
337
- # Many LLM implementations will allow the same type for input and output messages
338
- return message
339
-
340
- async def get_last_message(self) -> MessageParamT | None:
341
- """
342
- Return the last message generated by the LLM or None if history is empty.
343
- This is useful for prompt chaining workflows where the last message from one LLM is used as input to another.
344
- """
345
- history = self.history.get()
346
- return history[-1] if history else None
347
-
348
- async def get_last_message_str(self) -> str | None:
349
- """Return the string representation of the last message generated by the LLM or None if history is empty."""
350
- last_message = await self.get_last_message()
351
- return self.message_param_str(last_message) if last_message else None
352
-
353
- def show_tool_result(self, result: CallToolResult):
354
- """Display a tool result in a formatted panel."""
355
- self.display.show_tool_result(result)
356
-
357
- def show_oai_tool_result(self, result):
358
- """Display a tool result in a formatted panel."""
359
- self.display.show_oai_tool_result(result)
360
-
361
- def show_tool_call(self, available_tools, tool_name, tool_args):
362
- """Display a tool call in a formatted panel."""
363
- self.display.show_tool_call(available_tools, tool_name, tool_args)
364
-
365
- async def show_assistant_message(
366
- self,
367
- message_text: str | Text,
368
- highlight_namespaced_tool: str = "",
369
- title: str = "ASSISTANT",
370
- ):
371
- """Display an assistant message in a formatted panel."""
372
- await self.display.show_assistant_message(
373
- message_text,
374
- aggregator=self.aggregator,
375
- highlight_namespaced_tool=highlight_namespaced_tool,
376
- title=title,
377
- name=self.name,
378
- )
379
-
380
- def show_user_message(self, message, model: str | None, chat_turn: int):
381
- """Display a user message in a formatted panel."""
382
- self.display.show_user_message(message, model, chat_turn, name=self.name)
383
-
384
- async def pre_tool_call(
385
- self, tool_call_id: str | None, request: CallToolRequest
386
- ) -> CallToolRequest | bool:
387
- """Called before a tool is executed. Return False to prevent execution."""
388
- return request
389
-
390
- async def post_tool_call(
391
- self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
392
- ) -> CallToolResult:
393
- """Called after a tool execution. Can modify the result before it's returned."""
394
- return result
395
-
396
- async def call_tool(
397
- self,
398
- request: CallToolRequest,
399
- tool_call_id: str | None = None,
400
- ) -> CallToolResult:
401
- """Call a tool with the given parameters and optional ID"""
402
-
403
- try:
404
- preprocess = await self.pre_tool_call(
405
- tool_call_id=tool_call_id,
406
- request=request,
407
- )
408
-
409
- if isinstance(preprocess, bool):
410
- if not preprocess:
411
- return CallToolResult(
412
- isError=True,
413
- content=[
414
- TextContent(
415
- text=f"Error: Tool '{request.params.name}' was not allowed to run."
416
- )
417
- ],
418
- )
419
- else:
420
- request = preprocess
421
-
422
- tool_name = request.params.name
423
- tool_args = request.params.arguments
424
- result = await self.aggregator.call_tool(tool_name, tool_args)
425
-
426
- postprocess = await self.post_tool_call(
427
- tool_call_id=tool_call_id, request=request, result=result
428
- )
429
-
430
- if isinstance(postprocess, CallToolResult):
431
- result = postprocess
432
-
433
- return result
434
- except PromptExitError:
435
- raise
436
- except Exception as e:
437
- return CallToolResult(
438
- isError=True,
439
- content=[
440
- TextContent(
441
- type="text",
442
- text=f"Error executing tool '{request.params.name}': {str(e)}",
443
- )
444
- ],
445
- )
446
-
447
- def message_param_str(self, message: MessageParamT) -> str:
448
- """
449
- Convert an input message to a string representation.
450
- Tries to extract just the content when possible.
451
- """
452
- if isinstance(message, dict):
453
- # For dictionary format messages
454
- if "content" in message:
455
- content = message["content"]
456
- # Handle both string and structured content formats
457
- if isinstance(content, str):
458
- return content
459
- elif isinstance(content, list) and content:
460
- # Try to extract text from content parts
461
- text_parts = []
462
- for part in content:
463
- if isinstance(part, dict) and "text" in part:
464
- text_parts.append(part["text"])
465
- elif hasattr(part, "text"):
466
- text_parts.append(part.text)
467
- if text_parts:
468
- return "\n".join(text_parts)
469
-
470
- # For objects with content attribute
471
- if hasattr(message, "content"):
472
- content = message.content
473
- if isinstance(content, str):
474
- return content
475
- elif hasattr(content, "text"):
476
- return content.text
477
-
478
- # Default fallback
479
- return str(message)
480
-
481
- def message_str(self, message: MessageT) -> str:
482
- """
483
- Convert an output message to a string representation.
484
- Tries to extract just the content when possible.
485
- """
486
- # First try to use the same method for consistency
487
- result = self.message_param_str(message)
488
- if result != str(message):
489
- return result
490
-
491
- # Additional handling for output-specific formats
492
- if hasattr(message, "content"):
493
- content = message.content
494
- if isinstance(content, list):
495
- # Extract text from content blocks
496
- text_parts = []
497
- for block in content:
498
- if hasattr(block, "text") and block.text:
499
- text_parts.append(block.text)
500
- if text_parts:
501
- return "\n".join(text_parts)
502
-
503
- # Default fallback
504
- return str(message)
505
-
506
- def _log_chat_progress(
507
- self, chat_turn: Optional[int] = None, model: Optional[str] = None
508
- ):
509
- """Log a chat progress event"""
510
- # Determine action type based on verb
511
- if hasattr(self, "verb") and self.verb:
512
- # Use verb directly regardless of type
513
- act = self.verb
514
- else:
515
- act = ProgressAction.CHATTING
516
-
517
- data = {
518
- "progress_action": act,
519
- "model": model,
520
- "agent_name": self.name,
521
- "chat_turn": chat_turn if chat_turn is not None else None,
522
- }
523
- self.logger.debug("Chat in progress", data=data)
524
-
525
- def _log_chat_finished(self, model: Optional[str] = None):
526
- """Log a chat finished event"""
527
- data = {
528
- "progress_action": ProgressAction.READY,
529
- "model": model,
530
- "agent_name": self.name,
531
- }
532
- self.logger.debug("Chat finished", data=data)
533
-
534
- def _convert_prompt_messages(
535
- self, prompt_messages: List[PromptMessage]
536
- ) -> List[MessageParamT]:
537
- """
538
- Convert prompt messages to this LLM's specific message format.
539
- To be implemented by concrete LLM classes.
540
- """
541
- raise NotImplementedError("Must be implemented by subclass")
542
-
543
- async def show_prompt_loaded(
544
- self,
545
- prompt_name: str,
546
- description: Optional[str] = None,
547
- message_count: int = 0,
548
- arguments: Optional[dict[str, str]] = None,
549
- ):
550
- """
551
- Display information about a loaded prompt template.
552
-
553
- Args:
554
- prompt_name: The name of the prompt
555
- description: Optional description of the prompt
556
- message_count: Number of messages in the prompt
557
- arguments: Optional dictionary of arguments passed to the prompt
558
- """
559
- await self.display.show_prompt_loaded(
560
- prompt_name=prompt_name,
561
- description=description,
562
- message_count=message_count,
563
- agent_name=self.name,
564
- aggregator=self.aggregator,
565
- arguments=arguments,
566
- )
567
-
568
- async def apply_prompt_template(
569
- self, prompt_result: GetPromptResult, prompt_name: str
570
- ) -> str:
571
- """
572
- Apply a prompt template by adding it to the conversation history.
573
- If the last message in the prompt is from a user, automatically
574
- generate an assistant response.
575
-
576
- Args:
577
- prompt_result: The GetPromptResult containing prompt messages
578
- prompt_name: The name of the prompt being applied
579
-
580
- Returns:
581
- String representation of the assistant's response if generated,
582
- or the last assistant message in the prompt
583
- """
584
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
585
-
586
- # Check if we have any messages
587
- if not prompt_result.messages:
588
- return "Prompt contains no messages"
589
-
590
- # Extract arguments if they were stored in the result
591
- arguments = getattr(prompt_result, "arguments", None)
592
-
593
- # Display information about the loaded prompt
594
- await self.show_prompt_loaded(
595
- prompt_name=prompt_name,
596
- description=prompt_result.description,
597
- message_count=len(prompt_result.messages),
598
- arguments=arguments,
599
- )
600
-
601
- # Convert to PromptMessageMultipart objects
602
- multipart_messages = PromptMessageMultipart.parse_get_prompt_result(
603
- prompt_result
604
- )
605
-
606
- # Delegate to the provider-specific implementation
607
- return await self._apply_prompt_template_provider_specific(
608
- multipart_messages, None
609
- )
610
-
611
- async def apply_prompt(
612
- self,
613
- multipart_messages: List["PromptMessageMultipart"],
614
- request_params: RequestParams | None = None,
615
- ) -> str:
616
- """
617
- Apply a list of PromptMessageMultipart messages directly to the LLM.
618
- This is a cleaner interface to _apply_prompt_template_provider_specific.
619
-
620
- Args:
621
- multipart_messages: List of PromptMessageMultipart objects
622
- request_params: Optional parameters to configure the LLM request
623
-
624
- Returns:
625
- String representation of the assistant's response
626
- """
627
- # Delegate to the provider-specific implementation
628
- return await self._apply_prompt_template_provider_specific(
629
- multipart_messages, request_params
630
- )
631
-
632
- async def _apply_prompt_template_provider_specific(
633
- self,
634
- multipart_messages: List["PromptMessageMultipart"],
635
- request_params: RequestParams | None = None,
636
- ) -> str:
637
- """
638
- Provider-specific implementation of apply_prompt_template.
639
- This default implementation handles basic text content for any LLM type.
640
- Provider-specific subclasses should override this method to handle
641
- multimodal content appropriately.
642
-
643
- Args:
644
- multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
645
-
646
- Returns:
647
- String representation of the assistant's response if generated,
648
- or the last assistant message in the prompt
649
- """
650
- # Check the last message role
651
- last_message = multipart_messages[-1]
652
-
653
- if last_message.role == "user":
654
- # For user messages: Add all previous messages to history, then generate response to the last one
655
- self.logger.debug(
656
- "Last message in prompt is from user, generating assistant response"
657
- )
658
-
659
- # Add all but the last message to history
660
- if len(multipart_messages) > 1:
661
- previous_messages = multipart_messages[:-1]
662
- converted = []
663
-
664
- # Fallback generic method for all LLM types
665
- for msg in previous_messages:
666
- # Convert each PromptMessageMultipart to individual PromptMessages
667
- prompt_messages = msg.to_prompt_messages()
668
- for prompt_msg in prompt_messages:
669
- converted.append(
670
- self.type_converter.from_prompt_message(prompt_msg)
671
- )
672
-
673
- self.history.extend(converted, is_prompt=True)
674
-
675
- # For generic LLMs, extract text and describe non-text content
676
- user_text_parts = []
677
- for content in last_message.content:
678
- if content.type == "text":
679
- user_text_parts.append(content.text)
680
- elif content.type == "resource" and hasattr(content.resource, "text"):
681
- user_text_parts.append(content.resource.text)
682
- elif content.type == "image":
683
- # Add a placeholder for images
684
- mime_type = getattr(content, "mimeType", "image/unknown")
685
- user_text_parts.append(f"[Image: {mime_type}]")
686
-
687
- user_text = "\n".join(user_text_parts) if user_text_parts else ""
688
- if not user_text:
689
- # Fallback to original method if we couldn't extract text
690
- user_text = str(last_message.content)
691
-
692
- return await self.generate_str(user_text)
693
- else:
694
- # For assistant messages: Add all messages to history and return the last one
695
- self.logger.debug(
696
- "Last message in prompt is from assistant, returning it directly"
697
- )
698
-
699
- # Convert and add all messages to history
700
- converted = []
701
-
702
- # Fallback to the original method for all LLM types
703
- for msg in multipart_messages:
704
- # Convert each PromptMessageMultipart to individual PromptMessages
705
- prompt_messages = msg.to_prompt_messages()
706
- for prompt_msg in prompt_messages:
707
- converted.append(
708
- self.type_converter.from_prompt_message(prompt_msg)
709
- )
710
-
711
- self.history.extend(converted, is_prompt=True)
712
-
713
- # Return the assistant's message with proper handling of different content types
714
- assistant_text_parts = []
715
- has_non_text_content = False
716
-
717
- for content in last_message.content:
718
- if content.type == "text":
719
- assistant_text_parts.append(content.text)
720
- elif content.type == "resource" and hasattr(content.resource, "text"):
721
- # Add resource text with metadata
722
- mime_type = getattr(content.resource, "mimeType", "text/plain")
723
- uri = getattr(content.resource, "uri", "")
724
- if uri:
725
- assistant_text_parts.append(
726
- f"[Resource: {uri}, Type: {mime_type}]\n{content.resource.text}"
727
- )
728
- else:
729
- assistant_text_parts.append(
730
- f"[Resource Type: {mime_type}]\n{content.resource.text}"
731
- )
732
- elif content.type == "image":
733
- # Note the presence of images
734
- mime_type = getattr(content, "mimeType", "image/unknown")
735
- assistant_text_parts.append(f"[Image: {mime_type}]")
736
- has_non_text_content = True
737
- else:
738
- # Other content types
739
- assistant_text_parts.append(f"[Content of type: {content.type}]")
740
- has_non_text_content = True
741
-
742
- # Join all parts with double newlines for better readability
743
- result = (
744
- "\n\n".join(assistant_text_parts)
745
- if assistant_text_parts
746
- else str(last_message.content)
747
- )
748
-
749
- # Add a note if non-text content was present
750
- if has_non_text_content:
751
- result += "\n\n[Note: This message contained non-text content that may not be fully represented in text format]"
752
-
753
- return result