fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,1337 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base Agent class that implements the AgentProtocol interface.
|
|
3
|
+
|
|
4
|
+
This class provides default implementations of the standard agent methods
|
|
5
|
+
and delegates operations to an attached FastAgentLLMProtocol instance.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import fnmatch
|
|
10
|
+
from abc import ABC
|
|
11
|
+
from typing import (
|
|
12
|
+
TYPE_CHECKING,
|
|
13
|
+
Any,
|
|
14
|
+
Callable,
|
|
15
|
+
Iterable,
|
|
16
|
+
Mapping,
|
|
17
|
+
Sequence,
|
|
18
|
+
TypeVar,
|
|
19
|
+
Union,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
import mcp
|
|
23
|
+
from a2a.types import AgentCard, AgentSkill
|
|
24
|
+
from mcp.types import (
|
|
25
|
+
CallToolResult,
|
|
26
|
+
EmbeddedResource,
|
|
27
|
+
GetPromptResult,
|
|
28
|
+
ListToolsResult,
|
|
29
|
+
PromptMessage,
|
|
30
|
+
ReadResourceResult,
|
|
31
|
+
TextContent,
|
|
32
|
+
Tool,
|
|
33
|
+
)
|
|
34
|
+
from pydantic import BaseModel
|
|
35
|
+
|
|
36
|
+
from fast_agent.agents.agent_types import AgentConfig, AgentType
|
|
37
|
+
from fast_agent.agents.llm_agent import DEFAULT_CAPABILITIES
|
|
38
|
+
from fast_agent.agents.tool_agent import ToolAgent
|
|
39
|
+
from fast_agent.constants import HUMAN_INPUT_TOOL_NAME
|
|
40
|
+
from fast_agent.core.exceptions import PromptExitError
|
|
41
|
+
from fast_agent.core.logging.logger import get_logger
|
|
42
|
+
from fast_agent.interfaces import FastAgentLLMProtocol
|
|
43
|
+
from fast_agent.mcp.common import (
|
|
44
|
+
create_namespaced_name,
|
|
45
|
+
get_resource_name,
|
|
46
|
+
get_server_name,
|
|
47
|
+
is_namespaced_name,
|
|
48
|
+
)
|
|
49
|
+
from fast_agent.mcp.mcp_aggregator import MCPAggregator, NamespacedTool, ServerStatus
|
|
50
|
+
from fast_agent.skills.registry import format_skills_for_prompt
|
|
51
|
+
from fast_agent.tools.elicitation import (
|
|
52
|
+
get_elicitation_tool,
|
|
53
|
+
run_elicitation_form,
|
|
54
|
+
set_elicitation_input_callback,
|
|
55
|
+
)
|
|
56
|
+
from fast_agent.tools.shell_runtime import ShellRuntime
|
|
57
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
58
|
+
from fast_agent.ui import console
|
|
59
|
+
|
|
60
|
+
# Define a TypeVar for models
|
|
61
|
+
ModelT = TypeVar("ModelT", bound=BaseModel)
|
|
62
|
+
ItemT = TypeVar("ItemT")
|
|
63
|
+
|
|
64
|
+
LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
|
|
65
|
+
|
|
66
|
+
if TYPE_CHECKING:
|
|
67
|
+
from rich.text import Text
|
|
68
|
+
|
|
69
|
+
from fast_agent.context import Context
|
|
70
|
+
from fast_agent.llm.usage_tracking import UsageAccumulator
|
|
71
|
+
from fast_agent.skills import SkillManifest
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class McpAgent(ABC, ToolAgent):
|
|
75
|
+
"""
|
|
76
|
+
A base Agent class that implements the AgentProtocol interface.
|
|
77
|
+
|
|
78
|
+
This class provides default implementations of the standard agent methods
|
|
79
|
+
and delegates LLM operations to an attached FastAgentLLMProtocol instance.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
config: AgentConfig,
|
|
85
|
+
connection_persistence: bool = True,
|
|
86
|
+
context: "Context | None" = None,
|
|
87
|
+
**kwargs,
|
|
88
|
+
) -> None:
|
|
89
|
+
super().__init__(
|
|
90
|
+
config=config,
|
|
91
|
+
context=context,
|
|
92
|
+
**kwargs,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Create aggregator with composition
|
|
96
|
+
self._aggregator = MCPAggregator(
|
|
97
|
+
server_names=self.config.servers,
|
|
98
|
+
connection_persistence=connection_persistence,
|
|
99
|
+
name=self.config.name,
|
|
100
|
+
context=context,
|
|
101
|
+
config=self.config, # Pass the full config for access to elicitation_handler
|
|
102
|
+
**kwargs,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
self.instruction = self.config.instruction
|
|
106
|
+
self.executor = context.executor if context else None
|
|
107
|
+
self.logger = get_logger(f"{__name__}.{self._name}")
|
|
108
|
+
manifests: list[SkillManifest] = list(getattr(self.config, "skill_manifests", []) or [])
|
|
109
|
+
if not manifests and context and getattr(context, "skill_registry", None):
|
|
110
|
+
try:
|
|
111
|
+
manifests = list(context.skill_registry.load_manifests()) # type: ignore[assignment]
|
|
112
|
+
except Exception:
|
|
113
|
+
manifests = []
|
|
114
|
+
|
|
115
|
+
self._skill_manifests = list(manifests)
|
|
116
|
+
self._skill_map: dict[str, SkillManifest] = {
|
|
117
|
+
manifest.name: manifest for manifest in manifests
|
|
118
|
+
}
|
|
119
|
+
self._agent_skills_warning_shown = False
|
|
120
|
+
shell_flag_requested = bool(context and getattr(context, "shell_runtime", False))
|
|
121
|
+
skills_configured = bool(self._skill_manifests)
|
|
122
|
+
self._shell_runtime_activation_reason: str | None = None
|
|
123
|
+
|
|
124
|
+
if shell_flag_requested and skills_configured:
|
|
125
|
+
self._shell_runtime_activation_reason = (
|
|
126
|
+
"via --shell flag and agent skills configuration"
|
|
127
|
+
)
|
|
128
|
+
elif shell_flag_requested:
|
|
129
|
+
self._shell_runtime_activation_reason = "via --shell flag"
|
|
130
|
+
elif skills_configured:
|
|
131
|
+
self._shell_runtime_activation_reason = "because agent skills are configured"
|
|
132
|
+
|
|
133
|
+
# Get timeout configuration from context
|
|
134
|
+
timeout_seconds = 90 # default
|
|
135
|
+
warning_interval_seconds = 30 # default
|
|
136
|
+
if context and context.config:
|
|
137
|
+
shell_config = getattr(context.config, "shell_execution", None)
|
|
138
|
+
if shell_config:
|
|
139
|
+
timeout_seconds = getattr(shell_config, "timeout_seconds", 90)
|
|
140
|
+
warning_interval_seconds = getattr(shell_config, "warning_interval_seconds", 30)
|
|
141
|
+
|
|
142
|
+
# Derive skills directory from this agent's manifests (respects per-agent config)
|
|
143
|
+
skills_directory = None
|
|
144
|
+
if self._skill_manifests:
|
|
145
|
+
# Get the skills directory from the first manifest's path
|
|
146
|
+
# Path structure: .fast-agent/skills/skill-name/SKILL.md
|
|
147
|
+
# So we need parent.parent of the manifest path
|
|
148
|
+
first_manifest = self._skill_manifests[0]
|
|
149
|
+
if first_manifest.path:
|
|
150
|
+
skills_directory = first_manifest.path.parent.parent
|
|
151
|
+
|
|
152
|
+
self._shell_runtime = ShellRuntime(
|
|
153
|
+
self._shell_runtime_activation_reason,
|
|
154
|
+
self.logger,
|
|
155
|
+
timeout_seconds=timeout_seconds,
|
|
156
|
+
warning_interval_seconds=warning_interval_seconds,
|
|
157
|
+
skills_directory=skills_directory,
|
|
158
|
+
)
|
|
159
|
+
self._shell_runtime_enabled = self._shell_runtime.enabled
|
|
160
|
+
self._shell_access_modes: tuple[str, ...] = ()
|
|
161
|
+
if self._shell_runtime_enabled:
|
|
162
|
+
modes: list[str] = ["[red]direct[/red]"]
|
|
163
|
+
if skills_configured:
|
|
164
|
+
modes.append("skills")
|
|
165
|
+
if shell_flag_requested:
|
|
166
|
+
modes.append("switch")
|
|
167
|
+
self._shell_access_modes = tuple(modes)
|
|
168
|
+
self._bash_tool = self._shell_runtime.tool
|
|
169
|
+
if self._shell_runtime_enabled:
|
|
170
|
+
self._shell_runtime.announce()
|
|
171
|
+
|
|
172
|
+
# Allow external runtime injection (e.g., for ACP terminal support)
|
|
173
|
+
self._external_runtime = None
|
|
174
|
+
|
|
175
|
+
# Allow filesystem runtime injection (e.g., for ACP filesystem support)
|
|
176
|
+
self._filesystem_runtime = None
|
|
177
|
+
|
|
178
|
+
# Store the default request params from config
|
|
179
|
+
self._default_request_params = self.config.default_request_params
|
|
180
|
+
|
|
181
|
+
# set with the "attach" method
|
|
182
|
+
self._llm: FastAgentLLMProtocol | None = None
|
|
183
|
+
|
|
184
|
+
# Instantiate human input tool once if enabled in config
|
|
185
|
+
self._human_input_tool: Tool | None = None
|
|
186
|
+
if self.config.human_input:
|
|
187
|
+
try:
|
|
188
|
+
self._human_input_tool = get_elicitation_tool()
|
|
189
|
+
except Exception:
|
|
190
|
+
self._human_input_tool = None
|
|
191
|
+
|
|
192
|
+
# Register the MCP UI handler as the elicitation callback so fast_agent.tools can call it
|
|
193
|
+
# without importing MCP types. This avoids circular imports and ensures the callback is ready.
|
|
194
|
+
try:
|
|
195
|
+
from fast_agent.human_input.elicitation_handler import elicitation_input_callback
|
|
196
|
+
from fast_agent.human_input.types import HumanInputRequest
|
|
197
|
+
|
|
198
|
+
async def _mcp_elicitation_adapter(
|
|
199
|
+
request_payload: dict,
|
|
200
|
+
agent_name: str | None = None,
|
|
201
|
+
server_name: str | None = None,
|
|
202
|
+
server_info: dict | None = None,
|
|
203
|
+
) -> str:
|
|
204
|
+
req = HumanInputRequest(**request_payload)
|
|
205
|
+
resp = await elicitation_input_callback(
|
|
206
|
+
request=req,
|
|
207
|
+
agent_name=agent_name,
|
|
208
|
+
server_name=server_name,
|
|
209
|
+
server_info=server_info,
|
|
210
|
+
)
|
|
211
|
+
return resp.response if isinstance(resp.response, str) else str(resp.response)
|
|
212
|
+
|
|
213
|
+
set_elicitation_input_callback(_mcp_elicitation_adapter)
|
|
214
|
+
except Exception:
|
|
215
|
+
# If UI handler import fails, leave callback unset; tool will error with a clear message
|
|
216
|
+
pass
|
|
217
|
+
|
|
218
|
+
async def __aenter__(self):
|
|
219
|
+
"""Initialize the agent and its MCP aggregator."""
|
|
220
|
+
await self._aggregator.__aenter__()
|
|
221
|
+
return self
|
|
222
|
+
|
|
223
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
224
|
+
"""Clean up the agent and its MCP aggregator."""
|
|
225
|
+
await self._aggregator.__aexit__(exc_type, exc_val, exc_tb)
|
|
226
|
+
|
|
227
|
+
async def initialize(self) -> None:
|
|
228
|
+
"""
|
|
229
|
+
Initialize the agent and connect to the MCP servers.
|
|
230
|
+
NOTE: This method is called automatically when the agent is used as an async context manager.
|
|
231
|
+
"""
|
|
232
|
+
await self.__aenter__()
|
|
233
|
+
|
|
234
|
+
# Apply template substitution to the instruction with server instructions
|
|
235
|
+
await self._apply_instruction_templates()
|
|
236
|
+
|
|
237
|
+
async def shutdown(self) -> None:
|
|
238
|
+
"""
|
|
239
|
+
Shutdown the agent and close all MCP server connections.
|
|
240
|
+
NOTE: This method is called automatically when the agent is used as an async context manager.
|
|
241
|
+
"""
|
|
242
|
+
await self._aggregator.close()
|
|
243
|
+
|
|
244
|
+
async def get_server_status(self) -> dict[str, ServerStatus]:
|
|
245
|
+
"""Expose server status details for UI and diagnostics consumers."""
|
|
246
|
+
if not self._aggregator:
|
|
247
|
+
return {}
|
|
248
|
+
return await self._aggregator.collect_server_status()
|
|
249
|
+
|
|
250
|
+
@property
|
|
251
|
+
def aggregator(self) -> MCPAggregator:
|
|
252
|
+
"""Expose the MCP aggregator for UI integrations."""
|
|
253
|
+
return self._aggregator
|
|
254
|
+
|
|
255
|
+
@property
|
|
256
|
+
def initialized(self) -> bool:
|
|
257
|
+
"""Check if both the agent and aggregator are initialized."""
|
|
258
|
+
return self._initialized and self._aggregator.initialized
|
|
259
|
+
|
|
260
|
+
@initialized.setter
|
|
261
|
+
def initialized(self, value: bool) -> None:
|
|
262
|
+
"""Set the initialized state of both agent and aggregator."""
|
|
263
|
+
self._initialized = value
|
|
264
|
+
self._aggregator.initialized = value
|
|
265
|
+
|
|
266
|
+
async def _apply_instruction_templates(self) -> None:
|
|
267
|
+
"""
|
|
268
|
+
Apply template substitution to the instruction, including server instructions.
|
|
269
|
+
This is called during initialization after servers are connected.
|
|
270
|
+
"""
|
|
271
|
+
if not self.instruction:
|
|
272
|
+
return
|
|
273
|
+
|
|
274
|
+
# Gather server instructions if the template includes {{serverInstructions}}
|
|
275
|
+
if "{{serverInstructions}}" in self.instruction:
|
|
276
|
+
try:
|
|
277
|
+
instructions_data = await self._aggregator.get_server_instructions()
|
|
278
|
+
server_instructions = self._format_server_instructions(instructions_data)
|
|
279
|
+
except Exception as e:
|
|
280
|
+
self.logger.warning(f"Failed to get server instructions: {e}")
|
|
281
|
+
server_instructions = ""
|
|
282
|
+
|
|
283
|
+
# Replace the template variable
|
|
284
|
+
self.instruction = self.instruction.replace(
|
|
285
|
+
"{{serverInstructions}}", server_instructions
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
skills_placeholder_present = "{{agentSkills}}" in self.instruction
|
|
289
|
+
|
|
290
|
+
if skills_placeholder_present:
|
|
291
|
+
agent_skills = format_skills_for_prompt(self._skill_manifests)
|
|
292
|
+
self.instruction = self.instruction.replace("{{agentSkills}}", agent_skills)
|
|
293
|
+
self._agent_skills_warning_shown = True
|
|
294
|
+
elif self._skill_manifests and not self._agent_skills_warning_shown:
|
|
295
|
+
warning_message = (
|
|
296
|
+
"Agent skills are configured but the system prompt does not include {{agentSkills}}. "
|
|
297
|
+
"Skill descriptions will not be added to the system prompt."
|
|
298
|
+
)
|
|
299
|
+
self.logger.warning(warning_message)
|
|
300
|
+
try:
|
|
301
|
+
console.console.print(f"[yellow]{warning_message}[/yellow]")
|
|
302
|
+
except Exception: # pragma: no cover - console fallback
|
|
303
|
+
pass
|
|
304
|
+
self._agent_skills_warning_shown = True
|
|
305
|
+
|
|
306
|
+
# Update default request params to match
|
|
307
|
+
if self._default_request_params:
|
|
308
|
+
self._default_request_params.systemPrompt = self.instruction
|
|
309
|
+
|
|
310
|
+
self.logger.debug(f"Applied instruction templates for agent {self._name}")
|
|
311
|
+
|
|
312
|
+
def _format_server_instructions(
|
|
313
|
+
self, instructions_data: dict[str, tuple[str | None, list[str]]]
|
|
314
|
+
) -> str:
|
|
315
|
+
"""
|
|
316
|
+
Format server instructions with XML tags and tool lists.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
instructions_data: Dict mapping server name to (instructions, tool_names)
|
|
320
|
+
|
|
321
|
+
Returns:
|
|
322
|
+
Formatted string with server instructions
|
|
323
|
+
"""
|
|
324
|
+
if not instructions_data:
|
|
325
|
+
return ""
|
|
326
|
+
|
|
327
|
+
formatted_parts = []
|
|
328
|
+
for server_name, (instructions, tool_names) in instructions_data.items():
|
|
329
|
+
# Skip servers with no instructions
|
|
330
|
+
if instructions is None:
|
|
331
|
+
continue
|
|
332
|
+
|
|
333
|
+
# Format tool names with server prefix using the new namespacing convention
|
|
334
|
+
prefixed_tools = [create_namespaced_name(server_name, tool) for tool in tool_names]
|
|
335
|
+
tools_list = ", ".join(prefixed_tools) if prefixed_tools else "No tools available"
|
|
336
|
+
|
|
337
|
+
formatted_parts.append(
|
|
338
|
+
f'<mcp-server name="{server_name}">\n'
|
|
339
|
+
f"<tools>{tools_list}</tools>\n"
|
|
340
|
+
f"<instructions>\n{instructions}\n</instructions>\n"
|
|
341
|
+
f"</mcp-server>"
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
if formatted_parts:
|
|
345
|
+
return "\n\n".join(formatted_parts)
|
|
346
|
+
return ""
|
|
347
|
+
|
|
348
|
+
async def __call__(
|
|
349
|
+
self,
|
|
350
|
+
message: Union[
|
|
351
|
+
str,
|
|
352
|
+
PromptMessage,
|
|
353
|
+
PromptMessageExtended,
|
|
354
|
+
Sequence[Union[str, PromptMessage, PromptMessageExtended]],
|
|
355
|
+
],
|
|
356
|
+
) -> str:
|
|
357
|
+
return await self.send(message)
|
|
358
|
+
|
|
359
|
+
def _matches_pattern(self, name: str, pattern: str) -> bool:
|
|
360
|
+
"""
|
|
361
|
+
Check if a name matches a pattern for a specific server.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
name: The name to match (could be tool name, resource URI, or prompt name)
|
|
365
|
+
pattern: The pattern to match against (e.g., "add", "math*", "resource://math/*")
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
True if the name matches the pattern
|
|
369
|
+
"""
|
|
370
|
+
|
|
371
|
+
# For resources and prompts, match directly against the pattern
|
|
372
|
+
return fnmatch.fnmatch(name, pattern)
|
|
373
|
+
|
|
374
|
+
def _filter_namespaced_tools(self, tools: Sequence[Tool] | None) -> list[Tool]:
|
|
375
|
+
"""
|
|
376
|
+
Apply configuration-based filtering to a collection of tools.
|
|
377
|
+
"""
|
|
378
|
+
if not tools:
|
|
379
|
+
return []
|
|
380
|
+
|
|
381
|
+
return [
|
|
382
|
+
tool
|
|
383
|
+
for tool in tools
|
|
384
|
+
if is_namespaced_name(tool.name) and self._tool_matches_filter(tool.name)
|
|
385
|
+
]
|
|
386
|
+
|
|
387
|
+
def _filter_server_collections(
|
|
388
|
+
self,
|
|
389
|
+
items_by_server: Mapping[str, Sequence[ItemT]],
|
|
390
|
+
filters: Mapping[str, Sequence[str]] | None,
|
|
391
|
+
value_getter: Callable[[ItemT], str],
|
|
392
|
+
) -> dict[str, list[ItemT]]:
|
|
393
|
+
"""
|
|
394
|
+
Apply server-specific filters to a mapping of collections.
|
|
395
|
+
"""
|
|
396
|
+
if not items_by_server:
|
|
397
|
+
return {}
|
|
398
|
+
|
|
399
|
+
if not filters:
|
|
400
|
+
return {server: list(items) for server, items in items_by_server.items()}
|
|
401
|
+
|
|
402
|
+
filtered: dict[str, list[ItemT]] = {}
|
|
403
|
+
for server, items in items_by_server.items():
|
|
404
|
+
patterns = filters.get(server)
|
|
405
|
+
if patterns is None:
|
|
406
|
+
filtered[server] = list(items)
|
|
407
|
+
continue
|
|
408
|
+
|
|
409
|
+
matches = [
|
|
410
|
+
item
|
|
411
|
+
for item in items
|
|
412
|
+
if any(self._matches_pattern(value_getter(item), pattern) for pattern in patterns)
|
|
413
|
+
]
|
|
414
|
+
if matches:
|
|
415
|
+
filtered[server] = matches
|
|
416
|
+
|
|
417
|
+
return filtered
|
|
418
|
+
|
|
419
|
+
def _filter_server_tools(self, tools: list[Tool] | None, namespace: str) -> list[Tool]:
|
|
420
|
+
"""
|
|
421
|
+
Filter items for a Server (not namespaced)
|
|
422
|
+
"""
|
|
423
|
+
if not tools:
|
|
424
|
+
return []
|
|
425
|
+
|
|
426
|
+
filters = self.config.tools
|
|
427
|
+
if not filters:
|
|
428
|
+
return list(tools)
|
|
429
|
+
|
|
430
|
+
if namespace not in filters:
|
|
431
|
+
return list(tools)
|
|
432
|
+
|
|
433
|
+
filtered = self._filter_server_collections({namespace: tools}, filters, lambda tool: tool.name)
|
|
434
|
+
return filtered.get(namespace, [])
|
|
435
|
+
|
|
436
|
+
async def _get_filtered_mcp_tools(self) -> list[Tool]:
|
|
437
|
+
"""
|
|
438
|
+
Get the list of tools available to this agent, applying configured filters.
|
|
439
|
+
|
|
440
|
+
Returns:
|
|
441
|
+
List of Tool objects
|
|
442
|
+
"""
|
|
443
|
+
aggregator_result = await self._aggregator.list_tools()
|
|
444
|
+
return self._filter_namespaced_tools(aggregator_result.tools)
|
|
445
|
+
|
|
446
|
+
def _tool_matches_filter(self, packed_name: str) -> bool:
|
|
447
|
+
"""
|
|
448
|
+
Check if a tool name matches the agent's tool configuration.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
tool_name: The name of the tool to check (namespaced)
|
|
452
|
+
"""
|
|
453
|
+
server_name = get_server_name(packed_name)
|
|
454
|
+
config_tools = self.config.tools or {}
|
|
455
|
+
if server_name not in config_tools:
|
|
456
|
+
return True
|
|
457
|
+
resource_name = get_resource_name(packed_name)
|
|
458
|
+
patterns = config_tools.get(server_name, [])
|
|
459
|
+
return any(self._matches_pattern(resource_name, pattern) for pattern in patterns)
|
|
460
|
+
|
|
461
|
+
def set_external_runtime(self, runtime) -> None:
|
|
462
|
+
"""
|
|
463
|
+
Set an external runtime (e.g., ACPTerminalRuntime) to replace ShellRuntime.
|
|
464
|
+
|
|
465
|
+
This allows ACP mode to inject terminal support that uses the client's
|
|
466
|
+
terminal capabilities instead of local process execution.
|
|
467
|
+
|
|
468
|
+
Args:
|
|
469
|
+
runtime: Runtime instance with tool and execute() method
|
|
470
|
+
"""
|
|
471
|
+
self._external_runtime = runtime
|
|
472
|
+
self.logger.info(
|
|
473
|
+
f"External runtime injected: {type(runtime).__name__}",
|
|
474
|
+
runtime_type=type(runtime).__name__,
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
def set_filesystem_runtime(self, runtime) -> None:
|
|
478
|
+
"""
|
|
479
|
+
Set a filesystem runtime (e.g., ACPFilesystemRuntime) to add filesystem tools.
|
|
480
|
+
|
|
481
|
+
This allows ACP mode to inject filesystem support that uses the client's
|
|
482
|
+
filesystem capabilities for reading and writing files.
|
|
483
|
+
|
|
484
|
+
Args:
|
|
485
|
+
runtime: Runtime instance with tools property and read_text_file/write_text_file methods
|
|
486
|
+
"""
|
|
487
|
+
self._filesystem_runtime = runtime
|
|
488
|
+
self.logger.info(
|
|
489
|
+
f"Filesystem runtime injected: {type(runtime).__name__}",
|
|
490
|
+
runtime_type=type(runtime).__name__,
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
async def call_tool(
|
|
494
|
+
self, name: str, arguments: dict[str, Any] | None = None, tool_use_id: str | None = None
|
|
495
|
+
) -> CallToolResult:
|
|
496
|
+
"""
|
|
497
|
+
Call a tool by name with the given arguments.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
name: Name of the tool to call
|
|
501
|
+
arguments: Arguments to pass to the tool
|
|
502
|
+
tool_use_id: LLM's tool use ID (for matching with stream events)
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
Result of the tool call
|
|
506
|
+
"""
|
|
507
|
+
# Check external runtime first (e.g., ACP terminal)
|
|
508
|
+
if self._external_runtime and hasattr(self._external_runtime, "tool"):
|
|
509
|
+
if self._external_runtime.tool and name == self._external_runtime.tool.name:
|
|
510
|
+
return await self._external_runtime.execute(arguments, tool_use_id)
|
|
511
|
+
|
|
512
|
+
# Check filesystem runtime (e.g., ACP filesystem)
|
|
513
|
+
if self._filesystem_runtime and hasattr(self._filesystem_runtime, "tools"):
|
|
514
|
+
for tool in self._filesystem_runtime.tools:
|
|
515
|
+
if tool.name == name:
|
|
516
|
+
# Route to the appropriate method based on tool name
|
|
517
|
+
if name == "read_text_file":
|
|
518
|
+
return await self._filesystem_runtime.read_text_file(arguments, tool_use_id)
|
|
519
|
+
elif name == "write_text_file":
|
|
520
|
+
return await self._filesystem_runtime.write_text_file(arguments, tool_use_id)
|
|
521
|
+
|
|
522
|
+
# Fall back to shell runtime
|
|
523
|
+
if self._shell_runtime.tool and name == self._shell_runtime.tool.name:
|
|
524
|
+
return await self._shell_runtime.execute(arguments)
|
|
525
|
+
|
|
526
|
+
if name == HUMAN_INPUT_TOOL_NAME:
|
|
527
|
+
# Call the elicitation-backed human input tool
|
|
528
|
+
return await self._call_human_input_tool(arguments)
|
|
529
|
+
|
|
530
|
+
if name in self._execution_tools:
|
|
531
|
+
return await super().call_tool(name, arguments)
|
|
532
|
+
else:
|
|
533
|
+
return await self._aggregator.call_tool(name, arguments, tool_use_id)
|
|
534
|
+
|
|
535
|
+
async def _call_human_input_tool(
|
|
536
|
+
self, arguments: dict[str, Any] | None = None
|
|
537
|
+
) -> CallToolResult:
|
|
538
|
+
"""
|
|
539
|
+
Handle human input via an elicitation form.
|
|
540
|
+
|
|
541
|
+
Expected inputs:
|
|
542
|
+
- Either an object with optional 'message' and a 'schema' JSON Schema (object), or
|
|
543
|
+
- The JSON Schema (object) itself as the arguments.
|
|
544
|
+
|
|
545
|
+
Constraints:
|
|
546
|
+
- No more than 7 top-level properties are allowed in the schema.
|
|
547
|
+
"""
|
|
548
|
+
try:
|
|
549
|
+
# Run via shared tool runner
|
|
550
|
+
resp_text = await run_elicitation_form(arguments, agent_name=self._name)
|
|
551
|
+
if resp_text == "__DECLINED__":
|
|
552
|
+
return CallToolResult(
|
|
553
|
+
isError=False,
|
|
554
|
+
content=[TextContent(type="text", text="The Human declined the input request")],
|
|
555
|
+
)
|
|
556
|
+
if resp_text in ("__CANCELLED__", "__DISABLE_SERVER__"):
|
|
557
|
+
return CallToolResult(
|
|
558
|
+
isError=False,
|
|
559
|
+
content=[
|
|
560
|
+
TextContent(type="text", text="The Human cancelled the input request")
|
|
561
|
+
],
|
|
562
|
+
)
|
|
563
|
+
# Success path: return the (JSON) response as-is
|
|
564
|
+
return CallToolResult(
|
|
565
|
+
isError=False,
|
|
566
|
+
content=[TextContent(type="text", text=resp_text)],
|
|
567
|
+
)
|
|
568
|
+
|
|
569
|
+
except PromptExitError:
|
|
570
|
+
raise
|
|
571
|
+
except asyncio.TimeoutError as e:
|
|
572
|
+
return CallToolResult(
|
|
573
|
+
isError=True,
|
|
574
|
+
content=[
|
|
575
|
+
TextContent(
|
|
576
|
+
type="text",
|
|
577
|
+
text=f"Error: Human input request timed out: {str(e)}",
|
|
578
|
+
)
|
|
579
|
+
],
|
|
580
|
+
)
|
|
581
|
+
except Exception as e:
|
|
582
|
+
import traceback
|
|
583
|
+
|
|
584
|
+
print(f"Error in _call_human_input_tool: {traceback.format_exc()}")
|
|
585
|
+
return CallToolResult(
|
|
586
|
+
isError=True,
|
|
587
|
+
content=[TextContent(type="text", text=f"Error requesting human input: {str(e)}")],
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
async def get_prompt(
|
|
591
|
+
self,
|
|
592
|
+
prompt_name: str,
|
|
593
|
+
arguments: dict[str, str] | None = None,
|
|
594
|
+
namespace: str | None = None,
|
|
595
|
+
server_name: str | None = None,
|
|
596
|
+
) -> GetPromptResult:
|
|
597
|
+
"""
|
|
598
|
+
Get a prompt from a server.
|
|
599
|
+
|
|
600
|
+
Args:
|
|
601
|
+
prompt_name: Name of the prompt, optionally namespaced
|
|
602
|
+
arguments: Optional dictionary of arguments to pass to the prompt template
|
|
603
|
+
namespace: Optional namespace (server) to get the prompt from
|
|
604
|
+
|
|
605
|
+
Returns:
|
|
606
|
+
GetPromptResult containing the prompt information
|
|
607
|
+
"""
|
|
608
|
+
target = namespace if namespace is not None else server_name
|
|
609
|
+
return await self._aggregator.get_prompt(prompt_name, arguments, target)
|
|
610
|
+
|
|
611
|
+
async def apply_prompt(
|
|
612
|
+
self,
|
|
613
|
+
prompt: Union[str, GetPromptResult],
|
|
614
|
+
arguments: dict[str, str] | None = None,
|
|
615
|
+
as_template: bool = False,
|
|
616
|
+
namespace: str | None = None,
|
|
617
|
+
**_: Any,
|
|
618
|
+
) -> str:
|
|
619
|
+
"""
|
|
620
|
+
Apply an MCP Server Prompt by name or GetPromptResult and return the assistant's response.
|
|
621
|
+
Will search all available servers for the prompt if not namespaced and no server_name provided.
|
|
622
|
+
|
|
623
|
+
If the last message in the prompt is from a user, this will automatically
|
|
624
|
+
generate an assistant response to ensure we always end with an assistant message.
|
|
625
|
+
|
|
626
|
+
Args:
|
|
627
|
+
prompt: The name of the prompt to apply OR a GetPromptResult object
|
|
628
|
+
arguments: Optional dictionary of string arguments to pass to the prompt template
|
|
629
|
+
as_template: If True, store as persistent template (always included in context)
|
|
630
|
+
namespace: Optional namespace/server to resolve the prompt from
|
|
631
|
+
|
|
632
|
+
Returns:
|
|
633
|
+
The assistant's response or error message
|
|
634
|
+
"""
|
|
635
|
+
|
|
636
|
+
# Handle both string and GetPromptResult inputs
|
|
637
|
+
if isinstance(prompt, str):
|
|
638
|
+
prompt_name = prompt
|
|
639
|
+
# Get the prompt - this will search all servers if needed
|
|
640
|
+
self.logger.debug(f"Loading prompt '{prompt_name}'")
|
|
641
|
+
prompt_result: GetPromptResult = await self.get_prompt(
|
|
642
|
+
prompt_name, arguments, namespace
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
if not prompt_result or not prompt_result.messages:
|
|
646
|
+
error_msg = f"Prompt '{prompt_name}' could not be found or contains no messages"
|
|
647
|
+
self.logger.warning(error_msg)
|
|
648
|
+
return error_msg
|
|
649
|
+
|
|
650
|
+
# Get the display name (namespaced version)
|
|
651
|
+
namespaced_name = getattr(prompt_result, "namespaced_name", prompt_name)
|
|
652
|
+
else:
|
|
653
|
+
# prompt is a GetPromptResult object
|
|
654
|
+
prompt_result = prompt
|
|
655
|
+
if not prompt_result or not prompt_result.messages:
|
|
656
|
+
error_msg = "Provided GetPromptResult contains no messages"
|
|
657
|
+
self.logger.warning(error_msg)
|
|
658
|
+
return error_msg
|
|
659
|
+
|
|
660
|
+
# Use a reasonable display name
|
|
661
|
+
namespaced_name = getattr(prompt_result, "namespaced_name", "provided_prompt")
|
|
662
|
+
|
|
663
|
+
self.logger.debug(f"Using prompt '{namespaced_name}'")
|
|
664
|
+
|
|
665
|
+
# Convert prompt messages to multipart format using the safer method
|
|
666
|
+
multipart_messages = PromptMessageExtended.from_get_prompt_result(prompt_result)
|
|
667
|
+
|
|
668
|
+
if as_template:
|
|
669
|
+
# Use apply_prompt_template to store as persistent prompt messages
|
|
670
|
+
return await self.apply_prompt_template(prompt_result, namespaced_name)
|
|
671
|
+
else:
|
|
672
|
+
# Always call generate to ensure LLM implementations can handle prompt templates
|
|
673
|
+
# This is critical for stateful LLMs like PlaybackLLM
|
|
674
|
+
response = await self.generate(multipart_messages, None)
|
|
675
|
+
return response.first_text()
|
|
676
|
+
|
|
677
|
+
async def get_embedded_resources(
|
|
678
|
+
self, resource_uri: str, server_name: str | None = None
|
|
679
|
+
) -> list[EmbeddedResource]:
|
|
680
|
+
"""
|
|
681
|
+
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
|
682
|
+
|
|
683
|
+
Args:
|
|
684
|
+
resource_uri: URI of the resource to retrieve
|
|
685
|
+
server_name: Optional name of the MCP server to retrieve the resource from
|
|
686
|
+
|
|
687
|
+
Returns:
|
|
688
|
+
List of EmbeddedResource objects ready to use in a PromptMessageExtended
|
|
689
|
+
|
|
690
|
+
Raises:
|
|
691
|
+
ValueError: If the server doesn't exist or the resource couldn't be found
|
|
692
|
+
"""
|
|
693
|
+
# Get the raw resource result
|
|
694
|
+
result: ReadResourceResult = await self._aggregator.get_resource(resource_uri, server_name)
|
|
695
|
+
|
|
696
|
+
# Convert each resource content to an EmbeddedResource
|
|
697
|
+
embedded_resources: list[EmbeddedResource] = []
|
|
698
|
+
for resource_content in result.contents:
|
|
699
|
+
embedded_resource = EmbeddedResource(
|
|
700
|
+
type="resource", resource=resource_content, annotations=None
|
|
701
|
+
)
|
|
702
|
+
embedded_resources.append(embedded_resource)
|
|
703
|
+
|
|
704
|
+
return embedded_resources
|
|
705
|
+
|
|
706
|
+
async def get_resource(
|
|
707
|
+
self, resource_uri: str, namespace: str | None = None, server_name: str | None = None
|
|
708
|
+
) -> ReadResourceResult:
|
|
709
|
+
"""
|
|
710
|
+
Get a resource from an MCP server.
|
|
711
|
+
|
|
712
|
+
Args:
|
|
713
|
+
resource_uri: URI of the resource to retrieve
|
|
714
|
+
namespace: Optional namespace (server) to retrieve the resource from
|
|
715
|
+
|
|
716
|
+
Returns:
|
|
717
|
+
ReadResourceResult containing the resource data
|
|
718
|
+
|
|
719
|
+
Raises:
|
|
720
|
+
ValueError: If the server doesn't exist or the resource couldn't be found
|
|
721
|
+
"""
|
|
722
|
+
# Get the raw resource result
|
|
723
|
+
target = namespace if namespace is not None else server_name
|
|
724
|
+
result: ReadResourceResult = await self._aggregator.get_resource(resource_uri, target)
|
|
725
|
+
return result
|
|
726
|
+
|
|
727
|
+
async def with_resource(
|
|
728
|
+
self,
|
|
729
|
+
prompt_content: Union[str, PromptMessage, PromptMessageExtended],
|
|
730
|
+
resource_uri: str,
|
|
731
|
+
namespace: str | None = None,
|
|
732
|
+
server_name: str | None = None,
|
|
733
|
+
) -> str:
|
|
734
|
+
"""
|
|
735
|
+
Create a prompt with the given content and resource, then send it to the agent.
|
|
736
|
+
|
|
737
|
+
Args:
|
|
738
|
+
prompt_content: Content in various formats:
|
|
739
|
+
- String: Converted to a user message with the text
|
|
740
|
+
- PromptMessage: Converted to PromptMessageExtended
|
|
741
|
+
- PromptMessageExtended: Used directly
|
|
742
|
+
resource_uri: URI of the resource to retrieve
|
|
743
|
+
namespace: Optional namespace (server) to retrieve the resource from
|
|
744
|
+
|
|
745
|
+
Returns:
|
|
746
|
+
The agent's response as a string
|
|
747
|
+
"""
|
|
748
|
+
# Get the embedded resources
|
|
749
|
+
embedded_resources: list[EmbeddedResource] = await self.get_embedded_resources(
|
|
750
|
+
resource_uri, namespace if namespace is not None else server_name
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
# Create or update the prompt message
|
|
754
|
+
prompt: PromptMessageExtended
|
|
755
|
+
if isinstance(prompt_content, str):
|
|
756
|
+
# Create a new prompt with the text and resources
|
|
757
|
+
content = [TextContent(type="text", text=prompt_content)]
|
|
758
|
+
content.extend(embedded_resources)
|
|
759
|
+
prompt = PromptMessageExtended(role="user", content=content)
|
|
760
|
+
elif isinstance(prompt_content, PromptMessage):
|
|
761
|
+
# Convert PromptMessage to PromptMessageExtended and add resources
|
|
762
|
+
content = [prompt_content.content]
|
|
763
|
+
content.extend(embedded_resources)
|
|
764
|
+
prompt = PromptMessageExtended(role=prompt_content.role, content=content)
|
|
765
|
+
elif isinstance(prompt_content, PromptMessageExtended):
|
|
766
|
+
# Add resources to the existing prompt
|
|
767
|
+
prompt = prompt_content
|
|
768
|
+
prompt.content.extend(embedded_resources)
|
|
769
|
+
else:
|
|
770
|
+
raise TypeError(
|
|
771
|
+
"prompt_content must be a string, PromptMessage, or PromptMessageExtended"
|
|
772
|
+
)
|
|
773
|
+
|
|
774
|
+
response: PromptMessageExtended = await self.generate([prompt], None)
|
|
775
|
+
return response.first_text()
|
|
776
|
+
|
|
777
|
+
async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtended:
|
|
778
|
+
"""Override ToolAgent's run_tools to use MCP tools via aggregator."""
|
|
779
|
+
import time
|
|
780
|
+
|
|
781
|
+
if not request.tool_calls:
|
|
782
|
+
self.logger.warning("No tool calls found in request", data=request)
|
|
783
|
+
return PromptMessageExtended(role="user", tool_results={})
|
|
784
|
+
|
|
785
|
+
tool_results: dict[str, CallToolResult] = {}
|
|
786
|
+
tool_timings: dict[str, float] = {} # Track timing for each tool call
|
|
787
|
+
tool_loop_error: str | None = None
|
|
788
|
+
|
|
789
|
+
# Cache available tool names exactly as advertised to the LLM for display/highlighting
|
|
790
|
+
try:
|
|
791
|
+
listed_tools = await self.list_tools()
|
|
792
|
+
except Exception as exc: # pragma: no cover - defensive guard, should not happen
|
|
793
|
+
self.logger.warning(f"Failed to list tools before execution: {exc}")
|
|
794
|
+
listed_tools = ListToolsResult(tools=[])
|
|
795
|
+
|
|
796
|
+
available_tools: list[str] = []
|
|
797
|
+
seen_tool_names: set[str] = set()
|
|
798
|
+
for tool_schema in listed_tools.tools:
|
|
799
|
+
if tool_schema.name in seen_tool_names:
|
|
800
|
+
continue
|
|
801
|
+
available_tools.append(tool_schema.name)
|
|
802
|
+
seen_tool_names.add(tool_schema.name)
|
|
803
|
+
|
|
804
|
+
# Cache namespaced tools for routing/metadata
|
|
805
|
+
namespaced_tools = self._aggregator._namespaced_tool_map
|
|
806
|
+
|
|
807
|
+
# Process each tool call using our aggregator
|
|
808
|
+
for correlation_id, tool_request in request.tool_calls.items():
|
|
809
|
+
tool_name = tool_request.params.name
|
|
810
|
+
tool_args = tool_request.params.arguments or {}
|
|
811
|
+
# correlation_id is the tool_use_id from the LLM
|
|
812
|
+
|
|
813
|
+
# Determine which tool we are calling (namespaced MCP, local, etc.)
|
|
814
|
+
namespaced_tool = namespaced_tools.get(tool_name)
|
|
815
|
+
local_tool = self._execution_tools.get(tool_name)
|
|
816
|
+
candidate_namespaced_tool = None
|
|
817
|
+
if namespaced_tool is None and local_tool is None:
|
|
818
|
+
candidate_namespaced_tool = next(
|
|
819
|
+
(
|
|
820
|
+
candidate
|
|
821
|
+
for candidate in namespaced_tools.values()
|
|
822
|
+
if candidate.tool.name == tool_name
|
|
823
|
+
),
|
|
824
|
+
None,
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
# Select display/highlight names
|
|
828
|
+
display_tool_name = (
|
|
829
|
+
(namespaced_tool or candidate_namespaced_tool).namespaced_tool_name
|
|
830
|
+
if (namespaced_tool or candidate_namespaced_tool) is not None
|
|
831
|
+
else tool_name
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
# Check if tool is available from various sources
|
|
835
|
+
is_external_runtime_tool = (
|
|
836
|
+
self._external_runtime
|
|
837
|
+
and hasattr(self._external_runtime, "tool")
|
|
838
|
+
and self._external_runtime.tool
|
|
839
|
+
and tool_name == self._external_runtime.tool.name
|
|
840
|
+
)
|
|
841
|
+
is_filesystem_runtime_tool = (
|
|
842
|
+
self._filesystem_runtime
|
|
843
|
+
and hasattr(self._filesystem_runtime, "tools")
|
|
844
|
+
and any(tool.name == tool_name for tool in self._filesystem_runtime.tools)
|
|
845
|
+
)
|
|
846
|
+
|
|
847
|
+
tool_available = (
|
|
848
|
+
tool_name == HUMAN_INPUT_TOOL_NAME
|
|
849
|
+
or (self._shell_runtime.tool and tool_name == self._shell_runtime.tool.name)
|
|
850
|
+
or is_external_runtime_tool
|
|
851
|
+
or is_filesystem_runtime_tool
|
|
852
|
+
or namespaced_tool is not None
|
|
853
|
+
or local_tool is not None
|
|
854
|
+
or candidate_namespaced_tool is not None
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
if not tool_available:
|
|
858
|
+
error_message = f"Tool '{display_tool_name}' is not available"
|
|
859
|
+
self.logger.error(error_message)
|
|
860
|
+
tool_loop_error = self._mark_tool_loop_error(
|
|
861
|
+
correlation_id=correlation_id,
|
|
862
|
+
error_message=error_message,
|
|
863
|
+
tool_results=tool_results,
|
|
864
|
+
)
|
|
865
|
+
break
|
|
866
|
+
|
|
867
|
+
metadata: dict[str, Any] | None = None
|
|
868
|
+
if (
|
|
869
|
+
self._shell_runtime_enabled
|
|
870
|
+
and self._shell_runtime.tool
|
|
871
|
+
and tool_name == self._shell_runtime.tool.name
|
|
872
|
+
):
|
|
873
|
+
metadata = self._shell_runtime.metadata(tool_args.get("command"))
|
|
874
|
+
elif is_external_runtime_tool and hasattr(self._external_runtime, "metadata"):
|
|
875
|
+
metadata = self._external_runtime.metadata()
|
|
876
|
+
elif is_filesystem_runtime_tool and hasattr(self._filesystem_runtime, "metadata"):
|
|
877
|
+
metadata = self._filesystem_runtime.metadata()
|
|
878
|
+
|
|
879
|
+
display_tool_name, bottom_items, highlight_index = self._prepare_tool_display(
|
|
880
|
+
tool_name=tool_name,
|
|
881
|
+
namespaced_tool=namespaced_tool,
|
|
882
|
+
candidate_namespaced_tool=candidate_namespaced_tool,
|
|
883
|
+
local_tool=local_tool,
|
|
884
|
+
fallback_order=self._unique_preserving_order(available_tools),
|
|
885
|
+
)
|
|
886
|
+
|
|
887
|
+
self.display.show_tool_call(
|
|
888
|
+
name=self._name,
|
|
889
|
+
tool_args=tool_args,
|
|
890
|
+
bottom_items=bottom_items,
|
|
891
|
+
tool_name=display_tool_name,
|
|
892
|
+
highlight_index=highlight_index,
|
|
893
|
+
max_item_length=12,
|
|
894
|
+
metadata=metadata,
|
|
895
|
+
)
|
|
896
|
+
|
|
897
|
+
try:
|
|
898
|
+
# Track timing for tool execution
|
|
899
|
+
start_time = time.perf_counter()
|
|
900
|
+
result = await self.call_tool(tool_name, tool_args, correlation_id)
|
|
901
|
+
end_time = time.perf_counter()
|
|
902
|
+
duration_ms = round((end_time - start_time) * 1000, 2)
|
|
903
|
+
|
|
904
|
+
tool_results[correlation_id] = result
|
|
905
|
+
# Store timing and transport channel info
|
|
906
|
+
tool_timings[correlation_id] = {
|
|
907
|
+
"timing_ms": duration_ms,
|
|
908
|
+
"transport_channel": getattr(result, "transport_channel", None)
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
# Show tool result (like ToolAgent does)
|
|
912
|
+
skybridge_config = None
|
|
913
|
+
skybridge_tool = namespaced_tool or candidate_namespaced_tool
|
|
914
|
+
if skybridge_tool:
|
|
915
|
+
skybridge_config = await self._aggregator.get_skybridge_config(
|
|
916
|
+
skybridge_tool.server_name
|
|
917
|
+
)
|
|
918
|
+
|
|
919
|
+
if not getattr(result, "_suppress_display", False):
|
|
920
|
+
self.display.show_tool_result(
|
|
921
|
+
name=self._name,
|
|
922
|
+
result=result,
|
|
923
|
+
tool_name=display_tool_name,
|
|
924
|
+
skybridge_config=skybridge_config,
|
|
925
|
+
timing_ms=duration_ms, # Use local duration_ms variable for display
|
|
926
|
+
)
|
|
927
|
+
|
|
928
|
+
self.logger.debug(f"MCP tool {display_tool_name} executed successfully")
|
|
929
|
+
except Exception as e:
|
|
930
|
+
self.logger.error(f"MCP tool {display_tool_name} failed: {e}")
|
|
931
|
+
error_result = CallToolResult(
|
|
932
|
+
content=[TextContent(type="text", text=f"Error: {str(e)}")],
|
|
933
|
+
isError=True,
|
|
934
|
+
)
|
|
935
|
+
tool_results[correlation_id] = error_result
|
|
936
|
+
|
|
937
|
+
# Show error result too (no need for skybridge config on errors)
|
|
938
|
+
self.display.show_tool_result(name=self._name, result=error_result)
|
|
939
|
+
|
|
940
|
+
return self._finalize_tool_results(tool_results, tool_timings=tool_timings, tool_loop_error=tool_loop_error)
|
|
941
|
+
|
|
942
|
+
def _prepare_tool_display(
|
|
943
|
+
self,
|
|
944
|
+
*,
|
|
945
|
+
tool_name: str,
|
|
946
|
+
namespaced_tool: "NamespacedTool | None",
|
|
947
|
+
candidate_namespaced_tool: "NamespacedTool | None",
|
|
948
|
+
local_tool: Any | None,
|
|
949
|
+
fallback_order: list[str],
|
|
950
|
+
) -> tuple[str, list[str] | None, int | None]:
|
|
951
|
+
"""
|
|
952
|
+
Determine how we present tool metadata for the console display.
|
|
953
|
+
|
|
954
|
+
Returns a tuple of (display_tool_name, bottom_items, highlight_index).
|
|
955
|
+
"""
|
|
956
|
+
active_namespaced = namespaced_tool or candidate_namespaced_tool
|
|
957
|
+
display_tool_name = (
|
|
958
|
+
active_namespaced.namespaced_tool_name if active_namespaced is not None else tool_name
|
|
959
|
+
)
|
|
960
|
+
|
|
961
|
+
bottom_items: list[str] | None = None
|
|
962
|
+
highlight_target: str | None = None
|
|
963
|
+
|
|
964
|
+
if active_namespaced is not None:
|
|
965
|
+
server_tools = self._aggregator._server_to_tool_map.get(
|
|
966
|
+
active_namespaced.server_name, []
|
|
967
|
+
)
|
|
968
|
+
if server_tools:
|
|
969
|
+
bottom_items = self._unique_preserving_order(
|
|
970
|
+
tool_entry.tool.name for tool_entry in server_tools
|
|
971
|
+
)
|
|
972
|
+
highlight_target = active_namespaced.tool.name
|
|
973
|
+
elif local_tool is not None:
|
|
974
|
+
bottom_items = self._unique_preserving_order(self._execution_tools.keys())
|
|
975
|
+
highlight_target = tool_name
|
|
976
|
+
elif tool_name == HUMAN_INPUT_TOOL_NAME:
|
|
977
|
+
bottom_items = [HUMAN_INPUT_TOOL_NAME]
|
|
978
|
+
highlight_target = HUMAN_INPUT_TOOL_NAME
|
|
979
|
+
|
|
980
|
+
highlight_index: int | None = None
|
|
981
|
+
if bottom_items and highlight_target:
|
|
982
|
+
try:
|
|
983
|
+
highlight_index = bottom_items.index(highlight_target)
|
|
984
|
+
except ValueError:
|
|
985
|
+
highlight_index = None
|
|
986
|
+
|
|
987
|
+
if bottom_items is None and fallback_order:
|
|
988
|
+
bottom_items = fallback_order
|
|
989
|
+
fallback_target = display_tool_name if display_tool_name in bottom_items else tool_name
|
|
990
|
+
try:
|
|
991
|
+
highlight_index = bottom_items.index(fallback_target)
|
|
992
|
+
except ValueError:
|
|
993
|
+
highlight_index = None
|
|
994
|
+
|
|
995
|
+
return display_tool_name, bottom_items, highlight_index
|
|
996
|
+
|
|
997
|
+
@staticmethod
|
|
998
|
+
def _unique_preserving_order(items: Iterable[str]) -> list[str]:
|
|
999
|
+
"""Return a list of unique items while preserving original order."""
|
|
1000
|
+
seen: set[str] = set()
|
|
1001
|
+
result: list[str] = []
|
|
1002
|
+
for item in items:
|
|
1003
|
+
if item in seen:
|
|
1004
|
+
continue
|
|
1005
|
+
seen.add(item)
|
|
1006
|
+
result.append(item)
|
|
1007
|
+
return result
|
|
1008
|
+
|
|
1009
|
+
async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
|
|
1010
|
+
"""
|
|
1011
|
+
Apply a prompt template as persistent context that will be included in all future conversations.
|
|
1012
|
+
Delegates to the attached LLM.
|
|
1013
|
+
|
|
1014
|
+
Args:
|
|
1015
|
+
prompt_result: The GetPromptResult containing prompt messages
|
|
1016
|
+
prompt_name: The name of the prompt being applied
|
|
1017
|
+
|
|
1018
|
+
Returns:
|
|
1019
|
+
String representation of the assistant's response if generated
|
|
1020
|
+
"""
|
|
1021
|
+
assert self._llm
|
|
1022
|
+
with self._tracer.start_as_current_span(f"Agent: '{self._name}' apply_prompt_template"):
|
|
1023
|
+
return await self._llm.apply_prompt_template(prompt_result, prompt_name)
|
|
1024
|
+
|
|
1025
|
+
async def apply_prompt_messages(
|
|
1026
|
+
self, prompts: list[PromptMessageExtended], request_params: RequestParams | None = None
|
|
1027
|
+
) -> str:
|
|
1028
|
+
"""
|
|
1029
|
+
Apply a list of prompt messages and return the result.
|
|
1030
|
+
|
|
1031
|
+
Args:
|
|
1032
|
+
prompts: List of PromptMessageExtended messages
|
|
1033
|
+
request_params: Optional request parameters
|
|
1034
|
+
|
|
1035
|
+
Returns:
|
|
1036
|
+
The text response from the LLM
|
|
1037
|
+
"""
|
|
1038
|
+
|
|
1039
|
+
response = await self.generate(prompts, request_params)
|
|
1040
|
+
return response.first_text()
|
|
1041
|
+
|
|
1042
|
+
async def list_prompts(
|
|
1043
|
+
self, namespace: str | None = None, server_name: str | None = None
|
|
1044
|
+
) -> Mapping[str, list[mcp.types.Prompt]]:
|
|
1045
|
+
"""
|
|
1046
|
+
List all prompts available to this agent, filtered by configuration.
|
|
1047
|
+
|
|
1048
|
+
Args:
|
|
1049
|
+
namespace: Optional namespace (server) to list prompts from
|
|
1050
|
+
|
|
1051
|
+
Returns:
|
|
1052
|
+
Dictionary mapping server names to lists of Prompt objects
|
|
1053
|
+
"""
|
|
1054
|
+
# Get all prompts from the aggregator
|
|
1055
|
+
target = namespace if namespace is not None else server_name
|
|
1056
|
+
result = await self._aggregator.list_prompts(target)
|
|
1057
|
+
|
|
1058
|
+
return self._filter_server_collections(
|
|
1059
|
+
result,
|
|
1060
|
+
self.config.prompts,
|
|
1061
|
+
lambda prompt: prompt.name,
|
|
1062
|
+
)
|
|
1063
|
+
|
|
1064
|
+
async def list_resources(
|
|
1065
|
+
self, namespace: str | None = None, server_name: str | None = None
|
|
1066
|
+
) -> dict[str, list[str]]:
|
|
1067
|
+
"""
|
|
1068
|
+
List all resources available to this agent, filtered by configuration.
|
|
1069
|
+
|
|
1070
|
+
Args:
|
|
1071
|
+
namespace: Optional namespace (server) to list resources from
|
|
1072
|
+
|
|
1073
|
+
Returns:
|
|
1074
|
+
Dictionary mapping server names to lists of resource URIs
|
|
1075
|
+
"""
|
|
1076
|
+
# Get all resources from the aggregator
|
|
1077
|
+
target = namespace if namespace is not None else server_name
|
|
1078
|
+
result = await self._aggregator.list_resources(target)
|
|
1079
|
+
|
|
1080
|
+
return self._filter_server_collections(
|
|
1081
|
+
result,
|
|
1082
|
+
self.config.resources,
|
|
1083
|
+
lambda resource: resource,
|
|
1084
|
+
)
|
|
1085
|
+
|
|
1086
|
+
async def list_mcp_tools(self, namespace: str | None = None) -> Mapping[str, list[Tool]]:
|
|
1087
|
+
"""
|
|
1088
|
+
List all tools available to this agent, grouped by server and filtered by configuration.
|
|
1089
|
+
|
|
1090
|
+
Args:
|
|
1091
|
+
namespace: Optional namespace (server) to list tools from
|
|
1092
|
+
|
|
1093
|
+
Returns:
|
|
1094
|
+
Dictionary mapping server names to lists of Tool objects (with original names, not namespaced)
|
|
1095
|
+
"""
|
|
1096
|
+
# Get all tools from the aggregator
|
|
1097
|
+
result = await self._aggregator.list_mcp_tools(namespace)
|
|
1098
|
+
filtered_result: dict[str, list[Tool]] = {}
|
|
1099
|
+
|
|
1100
|
+
for server, server_tools in result.items():
|
|
1101
|
+
filtered_result[server] = self._filter_server_tools(server_tools, server)
|
|
1102
|
+
|
|
1103
|
+
# Add elicitation-backed human input tool to a special server if enabled and available
|
|
1104
|
+
if self.config.human_input and self._human_input_tool:
|
|
1105
|
+
special_server_name = "__human_input__"
|
|
1106
|
+
filtered_result.setdefault(special_server_name, []).append(self._human_input_tool)
|
|
1107
|
+
|
|
1108
|
+
return filtered_result
|
|
1109
|
+
|
|
1110
|
+
async def list_tools(self) -> ListToolsResult:
|
|
1111
|
+
"""
|
|
1112
|
+
List all tools available to this agent, filtered by configuration.
|
|
1113
|
+
|
|
1114
|
+
Returns:
|
|
1115
|
+
ListToolsResult with available tools
|
|
1116
|
+
"""
|
|
1117
|
+
# Start with filtered aggregator tools and merge in subclass/local tools
|
|
1118
|
+
merged_tools: list[Tool] = await self._get_filtered_mcp_tools()
|
|
1119
|
+
existing_names = {tool.name for tool in merged_tools}
|
|
1120
|
+
|
|
1121
|
+
local_tools = (await super().list_tools()).tools
|
|
1122
|
+
for tool in local_tools:
|
|
1123
|
+
if tool.name not in existing_names:
|
|
1124
|
+
merged_tools.append(tool)
|
|
1125
|
+
existing_names.add(tool.name)
|
|
1126
|
+
|
|
1127
|
+
# Add external runtime tool (e.g., ACP terminal) if available, otherwise bash tool
|
|
1128
|
+
if self._external_runtime and hasattr(self._external_runtime, "tool"):
|
|
1129
|
+
external_tool = self._external_runtime.tool
|
|
1130
|
+
if external_tool and external_tool.name not in existing_names:
|
|
1131
|
+
merged_tools.append(external_tool)
|
|
1132
|
+
existing_names.add(external_tool.name)
|
|
1133
|
+
elif self._bash_tool and self._bash_tool.name not in existing_names:
|
|
1134
|
+
merged_tools.append(self._bash_tool)
|
|
1135
|
+
existing_names.add(self._bash_tool.name)
|
|
1136
|
+
|
|
1137
|
+
# Add filesystem runtime tools (e.g., ACP filesystem) if available
|
|
1138
|
+
if self._filesystem_runtime and hasattr(self._filesystem_runtime, "tools"):
|
|
1139
|
+
for fs_tool in self._filesystem_runtime.tools:
|
|
1140
|
+
if fs_tool and fs_tool.name not in existing_names:
|
|
1141
|
+
merged_tools.append(fs_tool)
|
|
1142
|
+
existing_names.add(fs_tool.name)
|
|
1143
|
+
|
|
1144
|
+
if self.config.human_input:
|
|
1145
|
+
human_tool = getattr(self, "_human_input_tool", None)
|
|
1146
|
+
if human_tool and human_tool.name not in existing_names:
|
|
1147
|
+
merged_tools.append(human_tool)
|
|
1148
|
+
existing_names.add(human_tool.name)
|
|
1149
|
+
|
|
1150
|
+
return ListToolsResult(tools=merged_tools)
|
|
1151
|
+
|
|
1152
|
+
@property
|
|
1153
|
+
def agent_type(self) -> AgentType:
|
|
1154
|
+
"""
|
|
1155
|
+
Return the type of this agent.
|
|
1156
|
+
"""
|
|
1157
|
+
return AgentType.BASIC
|
|
1158
|
+
|
|
1159
|
+
async def agent_card(self) -> AgentCard:
|
|
1160
|
+
"""
|
|
1161
|
+
Return an A2A card describing this Agent
|
|
1162
|
+
"""
|
|
1163
|
+
|
|
1164
|
+
skills: list[AgentSkill] = []
|
|
1165
|
+
tools: ListToolsResult = await self.list_tools()
|
|
1166
|
+
for tool in tools.tools:
|
|
1167
|
+
skills.append(await self.convert(tool))
|
|
1168
|
+
|
|
1169
|
+
return AgentCard(
|
|
1170
|
+
skills=skills,
|
|
1171
|
+
name=self._name,
|
|
1172
|
+
description=self.instruction,
|
|
1173
|
+
url=f"fast-agent://agents/{self._name}/",
|
|
1174
|
+
version="0.1",
|
|
1175
|
+
capabilities=DEFAULT_CAPABILITIES,
|
|
1176
|
+
default_input_modes=["text/plain"],
|
|
1177
|
+
default_output_modes=["text/plain"],
|
|
1178
|
+
provider=None,
|
|
1179
|
+
documentation_url=None,
|
|
1180
|
+
)
|
|
1181
|
+
|
|
1182
|
+
async def show_assistant_message(
|
|
1183
|
+
self,
|
|
1184
|
+
message: PromptMessageExtended,
|
|
1185
|
+
bottom_items: list[str] | None = None,
|
|
1186
|
+
highlight_items: str | list[str] | None = None,
|
|
1187
|
+
max_item_length: int | None = None,
|
|
1188
|
+
name: str | None = None,
|
|
1189
|
+
model: str | None = None,
|
|
1190
|
+
additional_message: Union["Text", None] = None,
|
|
1191
|
+
) -> None:
|
|
1192
|
+
"""
|
|
1193
|
+
Display an assistant message with MCP servers in the bottom bar.
|
|
1194
|
+
|
|
1195
|
+
This override adds the list of connected MCP servers to the bottom bar
|
|
1196
|
+
and highlights servers that were used for tool calls in this message.
|
|
1197
|
+
"""
|
|
1198
|
+
# Get the list of MCP servers (if not provided)
|
|
1199
|
+
if bottom_items is None:
|
|
1200
|
+
if self._aggregator and self._aggregator.server_names:
|
|
1201
|
+
server_names = list(self._aggregator.server_names)
|
|
1202
|
+
else:
|
|
1203
|
+
server_names = []
|
|
1204
|
+
else:
|
|
1205
|
+
server_names = list(bottom_items)
|
|
1206
|
+
|
|
1207
|
+
server_names = self._unique_preserving_order(server_names)
|
|
1208
|
+
|
|
1209
|
+
shell_label = self._shell_server_label()
|
|
1210
|
+
if shell_label:
|
|
1211
|
+
server_names = [shell_label, *(name for name in server_names if name != shell_label)]
|
|
1212
|
+
|
|
1213
|
+
# Extract servers from tool calls in the message for highlighting
|
|
1214
|
+
if highlight_items is None:
|
|
1215
|
+
highlight_servers = self._extract_servers_from_message(message)
|
|
1216
|
+
else:
|
|
1217
|
+
# Convert to list if needed
|
|
1218
|
+
if isinstance(highlight_items, str):
|
|
1219
|
+
highlight_servers = [highlight_items]
|
|
1220
|
+
else:
|
|
1221
|
+
highlight_servers = highlight_items
|
|
1222
|
+
|
|
1223
|
+
# Call parent's implementation with server information
|
|
1224
|
+
await super().show_assistant_message(
|
|
1225
|
+
message=message,
|
|
1226
|
+
bottom_items=server_names,
|
|
1227
|
+
highlight_items=highlight_servers,
|
|
1228
|
+
max_item_length=max_item_length or 12,
|
|
1229
|
+
name=name,
|
|
1230
|
+
model=model,
|
|
1231
|
+
additional_message=additional_message,
|
|
1232
|
+
)
|
|
1233
|
+
|
|
1234
|
+
def _extract_servers_from_message(self, message: PromptMessageExtended) -> list[str]:
|
|
1235
|
+
"""
|
|
1236
|
+
Extract server names from tool calls in the message.
|
|
1237
|
+
|
|
1238
|
+
Args:
|
|
1239
|
+
message: The message containing potential tool calls
|
|
1240
|
+
|
|
1241
|
+
Returns:
|
|
1242
|
+
List of server names that were called
|
|
1243
|
+
"""
|
|
1244
|
+
servers: list[str] = []
|
|
1245
|
+
|
|
1246
|
+
# Check if message has tool calls
|
|
1247
|
+
if message.tool_calls:
|
|
1248
|
+
for tool_request in message.tool_calls.values():
|
|
1249
|
+
tool_name = tool_request.params.name
|
|
1250
|
+
|
|
1251
|
+
if (
|
|
1252
|
+
self._shell_runtime_enabled
|
|
1253
|
+
and self._shell_runtime.tool
|
|
1254
|
+
and tool_name == self._shell_runtime.tool.name
|
|
1255
|
+
):
|
|
1256
|
+
shell_label = self._shell_server_label()
|
|
1257
|
+
if shell_label and shell_label not in servers:
|
|
1258
|
+
servers.append(shell_label)
|
|
1259
|
+
continue
|
|
1260
|
+
|
|
1261
|
+
# Use aggregator's mapping to find the server for this tool
|
|
1262
|
+
if tool_name in self._aggregator._namespaced_tool_map:
|
|
1263
|
+
namespaced_tool = self._aggregator._namespaced_tool_map[tool_name]
|
|
1264
|
+
if namespaced_tool.server_name not in servers:
|
|
1265
|
+
servers.append(namespaced_tool.server_name)
|
|
1266
|
+
|
|
1267
|
+
return servers
|
|
1268
|
+
|
|
1269
|
+
def _shell_server_label(self) -> str | None:
|
|
1270
|
+
"""Return the display label for the local shell runtime."""
|
|
1271
|
+
if not self._shell_runtime_enabled or not self._shell_runtime.tool:
|
|
1272
|
+
return None
|
|
1273
|
+
|
|
1274
|
+
runtime_info = self._shell_runtime.runtime_info()
|
|
1275
|
+
runtime_name = runtime_info.get("name")
|
|
1276
|
+
return runtime_name or "shell"
|
|
1277
|
+
|
|
1278
|
+
async def _parse_resource_name(self, name: str, resource_type: str) -> tuple[str, str]:
|
|
1279
|
+
"""Delegate resource name parsing to the aggregator."""
|
|
1280
|
+
return await self._aggregator._parse_resource_name(name, resource_type)
|
|
1281
|
+
|
|
1282
|
+
async def convert(self, tool: Tool) -> AgentSkill:
|
|
1283
|
+
"""
|
|
1284
|
+
Convert a Tool to an AgentSkill.
|
|
1285
|
+
"""
|
|
1286
|
+
|
|
1287
|
+
if tool.name in self._skill_map:
|
|
1288
|
+
manifest = self._skill_map[tool.name]
|
|
1289
|
+
return AgentSkill(
|
|
1290
|
+
id=f"skill:{manifest.name}",
|
|
1291
|
+
name=manifest.name,
|
|
1292
|
+
description=manifest.description or "",
|
|
1293
|
+
tags=["skill"],
|
|
1294
|
+
examples=None,
|
|
1295
|
+
input_modes=None,
|
|
1296
|
+
output_modes=None,
|
|
1297
|
+
)
|
|
1298
|
+
|
|
1299
|
+
_, tool_without_namespace = await self._parse_resource_name(tool.name, "tool")
|
|
1300
|
+
return AgentSkill(
|
|
1301
|
+
id=tool.name,
|
|
1302
|
+
name=tool_without_namespace,
|
|
1303
|
+
description=tool.description or "",
|
|
1304
|
+
tags=["tool"],
|
|
1305
|
+
examples=None,
|
|
1306
|
+
input_modes=None, # ["text/plain"],
|
|
1307
|
+
# cover TextContent | ImageContent ->
|
|
1308
|
+
# https://github.com/modelcontextprotocol/modelcontextprotocol/pull/223
|
|
1309
|
+
# https://github.com/modelcontextprotocol/modelcontextprotocol/pull/93
|
|
1310
|
+
output_modes=None, # ,["text/plain", "image/*"],
|
|
1311
|
+
)
|
|
1312
|
+
|
|
1313
|
+
@property
|
|
1314
|
+
def message_history(self) -> list[PromptMessageExtended]:
|
|
1315
|
+
"""
|
|
1316
|
+
Return the agent's message history as PromptMessageExtended objects.
|
|
1317
|
+
|
|
1318
|
+
This history can be used to transfer state between agents or for
|
|
1319
|
+
analysis and debugging purposes.
|
|
1320
|
+
|
|
1321
|
+
Returns:
|
|
1322
|
+
List of PromptMessageExtended objects representing the conversation history
|
|
1323
|
+
"""
|
|
1324
|
+
# Conversation history is maintained at the agent layer; LLM history is diagnostic only.
|
|
1325
|
+
return super().message_history
|
|
1326
|
+
|
|
1327
|
+
@property
|
|
1328
|
+
def usage_accumulator(self) -> Union["UsageAccumulator", None]:
|
|
1329
|
+
"""
|
|
1330
|
+
Return the usage accumulator for tracking token usage across turns.
|
|
1331
|
+
|
|
1332
|
+
Returns:
|
|
1333
|
+
UsageAccumulator object if LLM is attached, None otherwise
|
|
1334
|
+
"""
|
|
1335
|
+
if self.llm:
|
|
1336
|
+
return self.llm.usage_accumulator
|
|
1337
|
+
return None
|