fast-agent-mcp 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (100) hide show
  1. fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
  2. fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
  3. fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
  4. fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
  5. fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
  6. mcp_agent/__init__.py +0 -0
  7. mcp_agent/agents/__init__.py +0 -0
  8. mcp_agent/agents/agent.py +277 -0
  9. mcp_agent/app.py +303 -0
  10. mcp_agent/cli/__init__.py +0 -0
  11. mcp_agent/cli/__main__.py +4 -0
  12. mcp_agent/cli/commands/bootstrap.py +221 -0
  13. mcp_agent/cli/commands/config.py +11 -0
  14. mcp_agent/cli/commands/setup.py +229 -0
  15. mcp_agent/cli/main.py +68 -0
  16. mcp_agent/cli/terminal.py +24 -0
  17. mcp_agent/config.py +334 -0
  18. mcp_agent/console.py +28 -0
  19. mcp_agent/context.py +251 -0
  20. mcp_agent/context_dependent.py +48 -0
  21. mcp_agent/core/fastagent.py +1013 -0
  22. mcp_agent/eval/__init__.py +0 -0
  23. mcp_agent/event_progress.py +88 -0
  24. mcp_agent/executor/__init__.py +0 -0
  25. mcp_agent/executor/decorator_registry.py +120 -0
  26. mcp_agent/executor/executor.py +293 -0
  27. mcp_agent/executor/task_registry.py +34 -0
  28. mcp_agent/executor/temporal.py +405 -0
  29. mcp_agent/executor/workflow.py +197 -0
  30. mcp_agent/executor/workflow_signal.py +325 -0
  31. mcp_agent/human_input/__init__.py +0 -0
  32. mcp_agent/human_input/handler.py +49 -0
  33. mcp_agent/human_input/types.py +58 -0
  34. mcp_agent/logging/__init__.py +0 -0
  35. mcp_agent/logging/events.py +123 -0
  36. mcp_agent/logging/json_serializer.py +163 -0
  37. mcp_agent/logging/listeners.py +216 -0
  38. mcp_agent/logging/logger.py +365 -0
  39. mcp_agent/logging/rich_progress.py +120 -0
  40. mcp_agent/logging/tracing.py +140 -0
  41. mcp_agent/logging/transport.py +461 -0
  42. mcp_agent/mcp/__init__.py +0 -0
  43. mcp_agent/mcp/gen_client.py +85 -0
  44. mcp_agent/mcp/mcp_activity.py +18 -0
  45. mcp_agent/mcp/mcp_agent_client_session.py +242 -0
  46. mcp_agent/mcp/mcp_agent_server.py +56 -0
  47. mcp_agent/mcp/mcp_aggregator.py +394 -0
  48. mcp_agent/mcp/mcp_connection_manager.py +330 -0
  49. mcp_agent/mcp/stdio.py +104 -0
  50. mcp_agent/mcp_server_registry.py +275 -0
  51. mcp_agent/progress_display.py +10 -0
  52. mcp_agent/resources/examples/decorator/main.py +26 -0
  53. mcp_agent/resources/examples/decorator/optimizer.py +78 -0
  54. mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
  55. mcp_agent/resources/examples/decorator/parallel.py +81 -0
  56. mcp_agent/resources/examples/decorator/router.py +56 -0
  57. mcp_agent/resources/examples/decorator/tiny.py +22 -0
  58. mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
  59. mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
  60. mcp_agent/telemetry/__init__.py +0 -0
  61. mcp_agent/telemetry/usage_tracking.py +18 -0
  62. mcp_agent/workflows/__init__.py +0 -0
  63. mcp_agent/workflows/embedding/__init__.py +0 -0
  64. mcp_agent/workflows/embedding/embedding_base.py +61 -0
  65. mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
  66. mcp_agent/workflows/embedding/embedding_openai.py +46 -0
  67. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  68. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
  69. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  70. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
  71. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
  72. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
  73. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
  74. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
  75. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
  76. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
  77. mcp_agent/workflows/llm/__init__.py +0 -0
  78. mcp_agent/workflows/llm/augmented_llm.py +645 -0
  79. mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
  80. mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
  81. mcp_agent/workflows/llm/llm_selector.py +345 -0
  82. mcp_agent/workflows/llm/model_factory.py +175 -0
  83. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  84. mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
  85. mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
  86. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
  87. mcp_agent/workflows/parallel/__init__.py +0 -0
  88. mcp_agent/workflows/parallel/fan_in.py +350 -0
  89. mcp_agent/workflows/parallel/fan_out.py +187 -0
  90. mcp_agent/workflows/parallel/parallel_llm.py +141 -0
  91. mcp_agent/workflows/router/__init__.py +0 -0
  92. mcp_agent/workflows/router/router_base.py +276 -0
  93. mcp_agent/workflows/router/router_embedding.py +240 -0
  94. mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
  95. mcp_agent/workflows/router/router_embedding_openai.py +59 -0
  96. mcp_agent/workflows/router/router_llm.py +301 -0
  97. mcp_agent/workflows/swarm/__init__.py +0 -0
  98. mcp_agent/workflows/swarm/swarm.py +320 -0
  99. mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
  100. mcp_agent/workflows/swarm/swarm_openai.py +41 -0
@@ -0,0 +1,645 @@
1
+ from abc import abstractmethod
2
+
3
+ from typing import Generic, List, Optional, Protocol, Type, TypeVar, TYPE_CHECKING
4
+
5
+ from pydantic import Field
6
+
7
+ from mcp.types import (
8
+ CallToolRequest,
9
+ CallToolResult,
10
+ CreateMessageRequestParams,
11
+ CreateMessageResult,
12
+ ModelPreferences,
13
+ SamplingMessage,
14
+ TextContent,
15
+ )
16
+
17
+ from mcp_agent.context_dependent import ContextDependent
18
+ from mcp_agent.event_progress import ProgressAction
19
+ from mcp_agent.mcp.mcp_aggregator import MCPAggregator, SEP
20
+ from mcp_agent.workflows.llm.llm_selector import ModelSelector
21
+ from rich.panel import Panel
22
+ from rich.text import Text
23
+ from mcp_agent import console
24
+
25
+ if TYPE_CHECKING:
26
+ from mcp_agent.agents.agent import Agent
27
+ from mcp_agent.context import Context
28
+
29
+ MessageParamT = TypeVar("MessageParamT")
30
+ """A type representing an input message to an LLM."""
31
+
32
+ MessageT = TypeVar("MessageT")
33
+ """A type representing an output message from an LLM."""
34
+
35
+ ModelT = TypeVar("ModelT")
36
+ """A type representing a structured output message from an LLM."""
37
+
38
+ # TODO: saqadri - SamplingMessage is fairly limiting - consider extending
39
+ MCPMessageParam = SamplingMessage
40
+ MCPMessageResult = CreateMessageResult
41
+
42
+ # TODO -- move this to a constant
43
+ HUMAN_INPUT_TOOL_NAME = "__human_input__"
44
+
45
+
46
+ class Memory(Protocol, Generic[MessageParamT]):
47
+ """
48
+ Simple memory management for storing past interactions in-memory.
49
+ """
50
+
51
+ # TODO: saqadri - add checkpointing and other advanced memory capabilities
52
+
53
+ def __init__(self): ...
54
+
55
+ def extend(self, messages: List[MessageParamT]) -> None: ...
56
+
57
+ def set(self, messages: List[MessageParamT]) -> None: ...
58
+
59
+ def append(self, message: MessageParamT) -> None: ...
60
+
61
+ def get(self) -> List[MessageParamT]: ...
62
+
63
+ def clear(self) -> None: ...
64
+
65
+
66
+ class SimpleMemory(Memory, Generic[MessageParamT]):
67
+ """
68
+ Simple memory management for storing past interactions in-memory.
69
+ """
70
+
71
+ def __init__(self):
72
+ self.history: List[MessageParamT] = []
73
+
74
+ def extend(self, messages: List[MessageParamT]):
75
+ self.history.extend(messages)
76
+
77
+ def set(self, messages: List[MessageParamT]):
78
+ self.history = messages.copy()
79
+
80
+ def append(self, message: MessageParamT):
81
+ self.history.append(message)
82
+
83
+ def get(self) -> List[MessageParamT]:
84
+ return self.history
85
+
86
+ def clear(self):
87
+ self.history = []
88
+
89
+
90
+ class RequestParams(CreateMessageRequestParams):
91
+ """
92
+ Parameters to configure the AugmentedLLM 'generate' requests.
93
+ """
94
+
95
+ messages: None = Field(exclude=True, default=None)
96
+ """
97
+ Ignored. 'messages' are removed from CreateMessageRequestParams
98
+ to avoid confusion with the 'message' parameter on 'generate' method.
99
+ """
100
+
101
+ maxTokens: int = 2048
102
+ """The maximum number of tokens to sample, as requested by the server."""
103
+
104
+ model: str | None = None
105
+ """
106
+ The model to use for the LLM generation.
107
+ If specified, this overrides the 'modelPreferences' selection criteria.
108
+ """
109
+
110
+ use_history: bool = True
111
+ """
112
+ Include the message history in the generate request.
113
+ """
114
+
115
+ max_iterations: int = 10
116
+ """
117
+ The maximum number of iterations to run the LLM for.
118
+ """
119
+
120
+ parallel_tool_calls: bool = True
121
+ """
122
+ Whether to allow multiple tool calls per iteration.
123
+ Also known as multi-step tool use.
124
+ """
125
+
126
+
127
+ class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
128
+ """Protocol defining the interface for augmented LLMs"""
129
+
130
+ async def generate(
131
+ self,
132
+ message: str | MessageParamT | List[MessageParamT],
133
+ request_params: RequestParams | None = None,
134
+ ) -> List[MessageT]:
135
+ """Request an LLM generation, which may run multiple iterations, and return the result"""
136
+
137
+ async def generate_str(
138
+ self,
139
+ message: str | MessageParamT | List[MessageParamT],
140
+ request_params: RequestParams | None = None,
141
+ ) -> str:
142
+ """Request an LLM generation and return the string representation of the result"""
143
+
144
+ async def generate_structured(
145
+ self,
146
+ message: str | MessageParamT | List[MessageParamT],
147
+ response_model: Type[ModelT],
148
+ request_params: RequestParams | None = None,
149
+ ) -> ModelT:
150
+ """Request a structured LLM generation and return the result as a Pydantic model."""
151
+
152
+
153
+ class ProviderToMCPConverter(Protocol, Generic[MessageParamT, MessageT]):
154
+ """Conversions between LLM provider and MCP types"""
155
+
156
+ @classmethod
157
+ def to_mcp_message_result(cls, result: MessageT) -> MCPMessageResult:
158
+ """Convert an LLM response to an MCP message result type."""
159
+
160
+ @classmethod
161
+ def from_mcp_message_result(cls, result: MCPMessageResult) -> MessageT:
162
+ """Convert an MCP message result to an LLM response type."""
163
+
164
+ @classmethod
165
+ def to_mcp_message_param(cls, param: MessageParamT) -> MCPMessageParam:
166
+ """Convert an LLM input to an MCP message (SamplingMessage) type."""
167
+
168
+ @classmethod
169
+ def from_mcp_message_param(cls, param: MCPMessageParam) -> MessageParamT:
170
+ """Convert an MCP message (SamplingMessage) to an LLM input type."""
171
+
172
+
173
+ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, MessageT]):
174
+ """
175
+ The basic building block of agentic systems is an LLM enhanced with augmentations
176
+ such as retrieval, tools, and memory provided from a collection of MCP servers.
177
+ Our current models can actively use these capabilities—generating their own search queries,
178
+ selecting appropriate tools, and determining what information to retain.
179
+ """
180
+
181
+ # TODO: saqadri - add streaming support (e.g. generate_stream)
182
+ # TODO: saqadri - consider adding middleware patterns for pre/post processing of messages, for now we have pre/post_tool_call
183
+
184
+ provider: str | None = None
185
+
186
+ def __init__(
187
+ self,
188
+ agent: Optional["Agent"] = None,
189
+ server_names: List[str] | None = None,
190
+ instruction: str | None = None,
191
+ name: str | None = None,
192
+ request_params: RequestParams | None = None,
193
+ type_converter: Type[ProviderToMCPConverter[MessageParamT, MessageT]] = None,
194
+ context: Optional["Context"] = None,
195
+ **kwargs,
196
+ ):
197
+ """
198
+ Initialize the LLM with a list of server names and an instruction.
199
+ If a name is provided, it will be used to identify the LLM.
200
+ If an agent is provided, all other properties are optional
201
+ """
202
+ # Extract request_params before super() call
203
+ self._init_request_params = request_params
204
+ super().__init__(context=context, **kwargs)
205
+
206
+ self.executor = self.context.executor
207
+ self.aggregator = (
208
+ agent if agent is not None else MCPAggregator(server_names or [])
209
+ )
210
+ self.name = name or (agent.name if agent else None)
211
+ self.instruction = instruction or (
212
+ agent.instruction if agent and isinstance(agent.instruction, str) else None
213
+ )
214
+ self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
215
+
216
+ # Set initial model preferences
217
+ self.model_preferences = ModelPreferences(
218
+ costPriority=0.3,
219
+ speedPriority=0.4,
220
+ intelligencePriority=0.3,
221
+ )
222
+
223
+ # Initialize default parameters
224
+ self.default_request_params = self._initialize_default_params(kwargs)
225
+
226
+ # Update model preferences from default params
227
+ if self.default_request_params and self.default_request_params.modelPreferences:
228
+ self.model_preferences = self.default_request_params.modelPreferences
229
+
230
+ # Merge with provided params if any
231
+ if self._init_request_params:
232
+ self.default_request_params = self._merge_request_params(
233
+ self.default_request_params, self._init_request_params
234
+ )
235
+ # Update model preferences again if they changed in the merge
236
+ if self.default_request_params.modelPreferences:
237
+ self.model_preferences = self.default_request_params.modelPreferences
238
+
239
+ self.model_selector = self.context.model_selector
240
+ self.type_converter = type_converter
241
+
242
+ @abstractmethod
243
+ async def generate(
244
+ self,
245
+ message: str | MessageParamT | List[MessageParamT],
246
+ request_params: RequestParams | None = None,
247
+ ) -> List[MessageT]:
248
+ """Request an LLM generation, which may run multiple iterations, and return the result"""
249
+
250
+ @abstractmethod
251
+ async def generate_str(
252
+ self,
253
+ message: str | MessageParamT | List[MessageParamT],
254
+ request_params: RequestParams | None = None,
255
+ ) -> str:
256
+ """Request an LLM generation and return the string representation of the result"""
257
+
258
+ @abstractmethod
259
+ async def generate_structured(
260
+ self,
261
+ message: str | MessageParamT | List[MessageParamT],
262
+ response_model: Type[ModelT],
263
+ request_params: RequestParams | None = None,
264
+ ) -> ModelT:
265
+ """Request a structured LLM generation and return the result as a Pydantic model."""
266
+
267
+ async def select_model(
268
+ self, request_params: RequestParams | None = None
269
+ ) -> str | None:
270
+ """
271
+ Select an LLM based on the request parameters.
272
+ If a model is specified in the request, it will override the model selection criteria.
273
+ """
274
+ model_preferences = self.model_preferences
275
+ if request_params is not None:
276
+ model_preferences = request_params.modelPreferences or model_preferences
277
+ model = request_params.model
278
+ if model:
279
+ return model
280
+
281
+ ## TODO -- can't have been tested, returns invalid model strings (e.g. claude-35-sonnet)
282
+ if not self.model_selector:
283
+ self.model_selector = ModelSelector()
284
+
285
+ model_info = self.model_selector.select_best_model(
286
+ model_preferences=model_preferences, provider=self.provider
287
+ )
288
+
289
+ return model_info.name
290
+
291
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
292
+ """Initialize default parameters for the LLM.
293
+ Should be overridden by provider implementations to set provider-specific defaults."""
294
+ return RequestParams(
295
+ modelPreferences=self.model_preferences,
296
+ systemPrompt=self.instruction,
297
+ parallel_tool_calls=True,
298
+ max_iterations=10,
299
+ use_history=True,
300
+ )
301
+
302
+ def _merge_request_params(
303
+ self, default_params: RequestParams, provided_params: RequestParams
304
+ ) -> RequestParams:
305
+ """Merge default and provided request parameters"""
306
+ # Log parameter merging if debug logging is enabled
307
+ # self.context.config.logger.debug(
308
+ # "Merging provided request params with defaults",
309
+ # extra={
310
+ # "defaults": default_params.model_dump(),
311
+ # "provided": provided_params.model_dump(),
312
+ # },
313
+ # )
314
+
315
+ merged = default_params.model_dump()
316
+ merged.update(provided_params.model_dump(exclude_unset=True))
317
+ final_params = RequestParams(**merged)
318
+
319
+ # self.logger.debug(
320
+ # "Final merged params:", extra={"params": final_params.model_dump()}
321
+ # )
322
+
323
+ return final_params
324
+
325
+ def get_request_params(
326
+ self,
327
+ request_params: RequestParams | None = None,
328
+ default: RequestParams | None = None,
329
+ ) -> RequestParams:
330
+ """
331
+ Get request parameters with merged-in defaults and overrides.
332
+ Args:
333
+ request_params: The request parameters to use as overrides.
334
+ default: The default request parameters to use as the base.
335
+ If unspecified, self.default_request_params will be used.
336
+ """
337
+ # Start with the defaults
338
+ default_request_params = default or self.default_request_params
339
+
340
+ if not default_request_params:
341
+ default_request_params = self._initialize_default_params({})
342
+
343
+ # If user provides overrides, merge them with defaults
344
+ if request_params:
345
+ return self._merge_request_params(default_request_params, request_params)
346
+
347
+ return default_request_params
348
+
349
+ def to_mcp_message_result(self, result: MessageT) -> MCPMessageResult:
350
+ """Convert an LLM response to an MCP message result type."""
351
+ return self.type_converter.to_mcp_message_result(result)
352
+
353
+ def from_mcp_message_result(self, result: MCPMessageResult) -> MessageT:
354
+ """Convert an MCP message result to an LLM response type."""
355
+ return self.type_converter.from_mcp_message_result(result)
356
+
357
+ def to_mcp_message_param(self, param: MessageParamT) -> MCPMessageParam:
358
+ """Convert an LLM input to an MCP message (SamplingMessage) type."""
359
+ return self.type_converter.to_mcp_message_param(param)
360
+
361
+ def from_mcp_message_param(self, param: MCPMessageParam) -> MessageParamT:
362
+ """Convert an MCP message (SamplingMessage) to an LLM input type."""
363
+ return self.type_converter.from_mcp_message_param(param)
364
+
365
+ @classmethod
366
+ def convert_message_to_message_param(
367
+ cls, message: MessageT, **kwargs
368
+ ) -> MessageParamT:
369
+ """Convert a response object to an input parameter object to allow LLM calls to be chained."""
370
+ # Many LLM implementations will allow the same type for input and output messages
371
+ return message
372
+
373
+ async def get_last_message(self) -> MessageParamT | None:
374
+ """
375
+ Return the last message generated by the LLM or None if history is empty.
376
+ This is useful for prompt chaining workflows where the last message from one LLM is used as input to another.
377
+ """
378
+ history = self.history.get()
379
+ return history[-1] if history else None
380
+
381
+ async def get_last_message_str(self) -> str | None:
382
+ """Return the string representation of the last message generated by the LLM or None if history is empty."""
383
+ last_message = await self.get_last_message()
384
+ return self.message_param_str(last_message) if last_message else None
385
+
386
+ def show_tool_result(self, result: CallToolResult):
387
+ """Display a tool result in a formatted panel."""
388
+
389
+ if not self.context.config.logger.show_tools:
390
+ return
391
+
392
+ if result.isError:
393
+ style = "red"
394
+ else:
395
+ style = "magenta"
396
+
397
+ panel = Panel(
398
+ Text(
399
+ str(result.content), overflow="..."
400
+ ), # TODO support multi-model/multi-part responses
401
+ title="[TOOL RESULT]",
402
+ title_align="right",
403
+ style=style,
404
+ border_style="bold white",
405
+ padding=(1, 2),
406
+ )
407
+
408
+ if self.context.config.logger.truncate_tools:
409
+ if len(str(result.content)) > 360:
410
+ panel.height = 8
411
+
412
+ console.console.print(panel)
413
+ console.console.print("\n")
414
+
415
+ def show_oai_tool_result(self, result):
416
+ """Display a tool result in a formatted panel."""
417
+
418
+ if not self.context.config.logger.show_tools:
419
+ return
420
+
421
+ panel = Panel(
422
+ Text(str(result), overflow="..."), # TODO update openai support
423
+ title="[TOOL RESULT]",
424
+ title_align="right",
425
+ style="magenta",
426
+ border_style="bold white",
427
+ padding=(1, 2),
428
+ )
429
+
430
+ if self.context.config.logger.truncate_tools:
431
+ if len(str(result)) > 360:
432
+ panel.height = 8
433
+
434
+ console.console.print(panel)
435
+ console.console.print("\n")
436
+
437
+ def show_tool_call(self, available_tools, tool_name, tool_args):
438
+ """Display a tool call in a formatted panel."""
439
+
440
+ if not self.context.config.logger.show_tools:
441
+ return
442
+
443
+ display_tool_list = Text()
444
+ for display_tool in available_tools:
445
+ # Handle both OpenAI and Anthropic tool formats
446
+ # TODO -- this should really be using the ToolCall abstraction and converting at the concrete layer??
447
+ if isinstance(display_tool, dict):
448
+ if "function" in display_tool:
449
+ # OpenAI format
450
+ tool_call_name = display_tool["function"]["name"]
451
+ else:
452
+ # Anthropic format
453
+ tool_call_name = display_tool["name"]
454
+ else:
455
+ # Handle potential object format (e.g., Pydantic models)
456
+ tool_call_name = (
457
+ display_tool.function.name
458
+ if hasattr(display_tool, "function")
459
+ else display_tool.name
460
+ )
461
+
462
+ parts = (
463
+ tool_call_name.split(SEP)
464
+ if SEP in tool_call_name
465
+ else [tool_call_name, tool_call_name]
466
+ )
467
+ if tool_name.split(SEP)[0] == parts[0]:
468
+ if tool_call_name == tool_name:
469
+ style = "magenta"
470
+ else:
471
+ style = "dim white"
472
+
473
+ shortened_name = (
474
+ parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
475
+ )
476
+ display_tool_list.append(f"[{shortened_name}] ", style)
477
+
478
+ panel = Panel(
479
+ Text(str(tool_args), overflow="ellipsis"),
480
+ title="[TOOL CALL]",
481
+ title_align="left",
482
+ style="magenta",
483
+ border_style="bold white",
484
+ subtitle=display_tool_list,
485
+ subtitle_align="left",
486
+ padding=(1, 2),
487
+ )
488
+
489
+ if self.context.config.logger.truncate_tools:
490
+ if len(str(tool_args)) > 360:
491
+ panel.height = 8
492
+
493
+ console.console.print(panel)
494
+ console.console.print("\n")
495
+
496
+ async def show_assistant_message(
497
+ self, message_text: str | Text, highlight_namespaced_tool: str = ""
498
+ ):
499
+ """Display an assistant message in a formatted panel."""
500
+
501
+ if not self.context.config.logger.show_chat:
502
+ return
503
+
504
+ mcp_server_name = (
505
+ highlight_namespaced_tool.split(SEP)
506
+ if SEP in highlight_namespaced_tool
507
+ else [highlight_namespaced_tool]
508
+ )
509
+
510
+ display_server_list = Text()
511
+
512
+ tools = await self.aggregator.list_tools()
513
+ if any(tool.name == HUMAN_INPUT_TOOL_NAME for tool in tools.tools):
514
+ style = (
515
+ "green"
516
+ if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME
517
+ else "dim white"
518
+ )
519
+ display_server_list.append("[human] ", style)
520
+
521
+ for server_name in await self.aggregator.list_servers():
522
+ style = "green" if server_name == mcp_server_name[0] else "dim white"
523
+ display_server_list.append(f"[{server_name}] ", style)
524
+
525
+ panel = Panel(
526
+ message_text,
527
+ title=f"[ASSISTANT]{f' ({self.name})' if self.name else ''}",
528
+ title_align="left",
529
+ style="green",
530
+ border_style="bold white",
531
+ padding=(1, 2),
532
+ subtitle=display_server_list,
533
+ subtitle_align="left",
534
+ )
535
+ console.console.print(panel)
536
+ console.console.print("\n")
537
+
538
+ def show_user_message(self, message, model: str | None, chat_turn: int):
539
+ """Display a user message in a formatted panel."""
540
+
541
+ if not self.context.config.logger.show_chat:
542
+ return
543
+
544
+ panel = Panel(
545
+ message,
546
+ title=f"{f'({self.name}) [USER]' if self.name else '[USER]'}",
547
+ title_align="right",
548
+ style="blue",
549
+ border_style="bold white",
550
+ padding=(1, 2),
551
+ subtitle=Text(f"{model} turn {chat_turn}", style="dim white"),
552
+ subtitle_align="left",
553
+ )
554
+ console.console.print(panel)
555
+ console.console.print("\n")
556
+
557
+ async def pre_tool_call(
558
+ self, tool_call_id: str | None, request: CallToolRequest
559
+ ) -> CallToolRequest | bool:
560
+ """Called before a tool is executed. Return False to prevent execution."""
561
+ return request
562
+
563
+ async def post_tool_call(
564
+ self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
565
+ ) -> CallToolResult:
566
+ """Called after a tool execution. Can modify the result before it's returned."""
567
+ return result
568
+
569
+ async def call_tool(
570
+ self,
571
+ request: CallToolRequest,
572
+ tool_call_id: str | None = None,
573
+ ) -> CallToolResult:
574
+ """Call a tool with the given parameters and optional ID"""
575
+
576
+ try:
577
+ preprocess = await self.pre_tool_call(
578
+ tool_call_id=tool_call_id,
579
+ request=request,
580
+ )
581
+
582
+ if isinstance(preprocess, bool):
583
+ if not preprocess:
584
+ return CallToolResult(
585
+ isError=True,
586
+ content=[
587
+ TextContent(
588
+ text=f"Error: Tool '{request.params.name}' was not allowed to run."
589
+ )
590
+ ],
591
+ )
592
+ else:
593
+ request = preprocess
594
+
595
+ tool_name = request.params.name
596
+ tool_args = request.params.arguments
597
+ result = await self.aggregator.call_tool(tool_name, tool_args)
598
+
599
+ postprocess = await self.post_tool_call(
600
+ tool_call_id=tool_call_id, request=request, result=result
601
+ )
602
+
603
+ if isinstance(postprocess, CallToolResult):
604
+ result = postprocess
605
+
606
+ return result
607
+ except Exception as e:
608
+ return CallToolResult(
609
+ isError=True,
610
+ content=[
611
+ TextContent(
612
+ type="text",
613
+ text=f"Error executing tool '{request.params.name}': {str(e)}",
614
+ )
615
+ ],
616
+ )
617
+
618
+ def message_param_str(self, message: MessageParamT) -> str:
619
+ """Convert an input message to a string representation."""
620
+ return str(message)
621
+
622
+ def message_str(self, message: MessageT) -> str:
623
+ """Convert an output message to a string representation."""
624
+ return str(message)
625
+
626
+ def _log_chat_progress(
627
+ self, chat_turn: Optional[int] = None, model: Optional[str] = None
628
+ ):
629
+ """Log a chat progress event"""
630
+ data = {
631
+ "progress_action": ProgressAction.CHATTING,
632
+ "model": model,
633
+ "agent_name": self.name,
634
+ "chat_turn": chat_turn if chat_turn is not None else None,
635
+ }
636
+ self.logger.debug("Chat in progress", data=data)
637
+
638
+ def _log_chat_finished(self, model: Optional[str] = None):
639
+ """Log a chat finished event"""
640
+ data = {
641
+ "progress_action": ProgressAction.READY,
642
+ "model": model,
643
+ "agent_name": self.name,
644
+ }
645
+ self.logger.debug("Chat finished", data=data)