fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,2175 @@
|
|
|
1
|
+
from asyncio import Lock, gather
|
|
2
|
+
from collections import Counter
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from typing import (
|
|
6
|
+
TYPE_CHECKING,
|
|
7
|
+
Any,
|
|
8
|
+
Callable,
|
|
9
|
+
Mapping,
|
|
10
|
+
TypeVar,
|
|
11
|
+
Union,
|
|
12
|
+
cast,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from mcp import GetPromptResult, ReadResourceResult
|
|
16
|
+
from mcp.client.session import ClientSession
|
|
17
|
+
from mcp.shared.session import ProgressFnT
|
|
18
|
+
from mcp.types import (
|
|
19
|
+
CallToolResult,
|
|
20
|
+
ListToolsResult,
|
|
21
|
+
Prompt,
|
|
22
|
+
ServerCapabilities,
|
|
23
|
+
TextContent,
|
|
24
|
+
Tool,
|
|
25
|
+
)
|
|
26
|
+
from opentelemetry import trace
|
|
27
|
+
from pydantic import AnyUrl, BaseModel, ConfigDict, Field
|
|
28
|
+
|
|
29
|
+
from fast_agent.context_dependent import ContextDependent
|
|
30
|
+
from fast_agent.core.exceptions import ServerSessionTerminatedError
|
|
31
|
+
from fast_agent.core.logging.logger import get_logger
|
|
32
|
+
from fast_agent.event_progress import ProgressAction
|
|
33
|
+
from fast_agent.mcp.common import SEP, create_namespaced_name, is_namespaced_name
|
|
34
|
+
from fast_agent.mcp.gen_client import gen_client
|
|
35
|
+
from fast_agent.mcp.mcp_agent_client_session import MCPAgentClientSession
|
|
36
|
+
from fast_agent.mcp.mcp_connection_manager import MCPConnectionManager
|
|
37
|
+
from fast_agent.mcp.skybridge import (
|
|
38
|
+
SKYBRIDGE_MIME_TYPE,
|
|
39
|
+
SkybridgeResourceConfig,
|
|
40
|
+
SkybridgeServerConfig,
|
|
41
|
+
SkybridgeToolConfig,
|
|
42
|
+
)
|
|
43
|
+
from fast_agent.mcp.tool_execution_handler import NoOpToolExecutionHandler, ToolExecutionHandler
|
|
44
|
+
from fast_agent.mcp.tool_permission_handler import NoOpToolPermissionHandler, ToolPermissionHandler
|
|
45
|
+
from fast_agent.mcp.transport_tracking import TransportSnapshot
|
|
46
|
+
|
|
47
|
+
if TYPE_CHECKING:
|
|
48
|
+
from fast_agent.context import Context
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
logger = get_logger(__name__) # This will be replaced per-instance when agent_name is available
|
|
52
|
+
|
|
53
|
+
# Define type variables for the generalized method
|
|
54
|
+
T = TypeVar("T")
|
|
55
|
+
R = TypeVar("R")
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class NamespacedTool(BaseModel):
|
|
59
|
+
"""
|
|
60
|
+
A tool that is namespaced by server name.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
tool: Tool
|
|
64
|
+
server_name: str
|
|
65
|
+
namespaced_tool_name: str
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@dataclass
|
|
69
|
+
class ServerStats:
|
|
70
|
+
call_counts: Counter = field(default_factory=Counter)
|
|
71
|
+
last_call_at: datetime | None = None
|
|
72
|
+
last_error_at: datetime | None = None
|
|
73
|
+
reconnect_count: int = 0
|
|
74
|
+
|
|
75
|
+
def record(self, operation_type: str, success: bool) -> None:
|
|
76
|
+
self.call_counts[operation_type] += 1
|
|
77
|
+
now = datetime.now(timezone.utc)
|
|
78
|
+
self.last_call_at = now
|
|
79
|
+
if not success:
|
|
80
|
+
self.last_error_at = now
|
|
81
|
+
|
|
82
|
+
def record_reconnect(self) -> None:
|
|
83
|
+
"""Record a successful reconnection."""
|
|
84
|
+
self.reconnect_count += 1
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class ServerStatus(BaseModel):
|
|
88
|
+
server_name: str
|
|
89
|
+
implementation_name: str | None = None
|
|
90
|
+
implementation_version: str | None = None
|
|
91
|
+
server_capabilities: ServerCapabilities | None = None
|
|
92
|
+
client_capabilities: Mapping[str, Any] | None = None
|
|
93
|
+
client_info_name: str | None = None
|
|
94
|
+
client_info_version: str | None = None
|
|
95
|
+
transport: str | None = None
|
|
96
|
+
is_connected: bool | None = None
|
|
97
|
+
last_call_at: datetime | None = None
|
|
98
|
+
last_error_at: datetime | None = None
|
|
99
|
+
staleness_seconds: float | None = None
|
|
100
|
+
call_counts: dict[str, int] = Field(default_factory=dict)
|
|
101
|
+
error_message: str | None = None
|
|
102
|
+
instructions_available: bool | None = None
|
|
103
|
+
instructions_enabled: bool | None = None
|
|
104
|
+
instructions_included: bool | None = None
|
|
105
|
+
roots_configured: bool | None = None
|
|
106
|
+
roots_count: int | None = None
|
|
107
|
+
elicitation_mode: str | None = None
|
|
108
|
+
sampling_mode: str | None = None
|
|
109
|
+
spoofing_enabled: bool | None = None
|
|
110
|
+
session_id: str | None = None
|
|
111
|
+
transport_channels: TransportSnapshot | None = None
|
|
112
|
+
skybridge: SkybridgeServerConfig | None = None
|
|
113
|
+
reconnect_count: int = 0
|
|
114
|
+
|
|
115
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class MCPAggregator(ContextDependent):
|
|
119
|
+
"""
|
|
120
|
+
Aggregates multiple MCP servers. When a developer calls, e.g. call_tool(...),
|
|
121
|
+
the aggregator searches all servers in its list for a server that provides that tool.
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
initialized: bool = False
|
|
125
|
+
"""Whether the aggregator has been initialized with tools and resources from all servers."""
|
|
126
|
+
|
|
127
|
+
connection_persistence: bool = False
|
|
128
|
+
"""Whether to maintain a persistent connection to the server."""
|
|
129
|
+
|
|
130
|
+
server_names: list[str]
|
|
131
|
+
"""A list of server names to connect to."""
|
|
132
|
+
|
|
133
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
|
134
|
+
|
|
135
|
+
async def __aenter__(self):
|
|
136
|
+
if self.initialized:
|
|
137
|
+
return self
|
|
138
|
+
|
|
139
|
+
# Keep a connection manager to manage persistent connections for this aggregator
|
|
140
|
+
if self.connection_persistence:
|
|
141
|
+
# Try to get existing connection manager from context
|
|
142
|
+
context = self.context
|
|
143
|
+
if not hasattr(context, "_connection_manager") or context._connection_manager is None:
|
|
144
|
+
server_registry = context.server_registry
|
|
145
|
+
if server_registry is None:
|
|
146
|
+
raise RuntimeError("Context is missing server registry for MCP connections")
|
|
147
|
+
manager = MCPConnectionManager(server_registry, context=context)
|
|
148
|
+
await manager.__aenter__()
|
|
149
|
+
context._connection_manager = manager
|
|
150
|
+
self._owns_connection_manager = True
|
|
151
|
+
self._persistent_connection_manager = cast(
|
|
152
|
+
"MCPConnectionManager", context._connection_manager
|
|
153
|
+
)
|
|
154
|
+
else:
|
|
155
|
+
self._persistent_connection_manager = None
|
|
156
|
+
|
|
157
|
+
# Import the display component here to avoid circular imports
|
|
158
|
+
from fast_agent.ui.console_display import ConsoleDisplay
|
|
159
|
+
|
|
160
|
+
# Initialize the display component
|
|
161
|
+
self.display = ConsoleDisplay(config=self.context.config)
|
|
162
|
+
|
|
163
|
+
await self.load_servers()
|
|
164
|
+
|
|
165
|
+
return self
|
|
166
|
+
|
|
167
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
168
|
+
await self.close()
|
|
169
|
+
|
|
170
|
+
def __init__(
|
|
171
|
+
self,
|
|
172
|
+
server_names: list[str],
|
|
173
|
+
connection_persistence: bool = True,
|
|
174
|
+
context: Union["Context", None] = None,
|
|
175
|
+
name: str | None = None,
|
|
176
|
+
config: Any | None = None, # Accept the agent config for elicitation_handler access
|
|
177
|
+
tool_handler: ToolExecutionHandler | None = None,
|
|
178
|
+
permission_handler: ToolPermissionHandler | None = None,
|
|
179
|
+
**kwargs,
|
|
180
|
+
) -> None:
|
|
181
|
+
"""
|
|
182
|
+
:param server_names: A list of server names to connect to.
|
|
183
|
+
:param connection_persistence: Whether to maintain persistent connections to servers (default: True).
|
|
184
|
+
:param config: Optional agent config containing elicitation_handler and other settings.
|
|
185
|
+
:param tool_handler: Optional handler for tool execution lifecycle events (e.g., for ACP notifications).
|
|
186
|
+
:param permission_handler: Optional handler for tool permission checks (e.g., for ACP permissions).
|
|
187
|
+
Note: The server names must be resolvable by the gen_client function, and specified in the server registry.
|
|
188
|
+
"""
|
|
189
|
+
super().__init__(
|
|
190
|
+
context=context,
|
|
191
|
+
**kwargs,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
self.server_names = server_names
|
|
195
|
+
self.connection_persistence = connection_persistence
|
|
196
|
+
self.agent_name = name
|
|
197
|
+
self.config = config # Store the config for access in session factory
|
|
198
|
+
self._persistent_connection_manager: MCPConnectionManager | None = None
|
|
199
|
+
self._owns_connection_manager = False
|
|
200
|
+
|
|
201
|
+
# Store tool execution handler for integration with ACP or other protocols
|
|
202
|
+
# Default to NoOpToolExecutionHandler if none provided
|
|
203
|
+
self._tool_handler = tool_handler or NoOpToolExecutionHandler()
|
|
204
|
+
|
|
205
|
+
# Store tool permission handler for ACP or other permission systems
|
|
206
|
+
# Default to NoOpToolPermissionHandler if none provided (allows all)
|
|
207
|
+
self._permission_handler = permission_handler or NoOpToolPermissionHandler()
|
|
208
|
+
|
|
209
|
+
# Set up logger with agent name in namespace if available
|
|
210
|
+
global logger
|
|
211
|
+
logger_name = f"{__name__}.{name}" if name else __name__
|
|
212
|
+
logger = get_logger(logger_name)
|
|
213
|
+
|
|
214
|
+
# Maps namespaced_tool_name -> namespaced tool info
|
|
215
|
+
self._namespaced_tool_map: dict[str, NamespacedTool] = {}
|
|
216
|
+
# Maps server_name -> list of tools
|
|
217
|
+
self._server_to_tool_map: dict[str, list[NamespacedTool]] = {}
|
|
218
|
+
self._tool_map_lock = Lock()
|
|
219
|
+
|
|
220
|
+
# Cache for prompt objects, maps server_name -> list of prompt objects
|
|
221
|
+
self._prompt_cache: dict[str, list[Prompt]] = {}
|
|
222
|
+
self._prompt_cache_lock = Lock()
|
|
223
|
+
|
|
224
|
+
# Lock for refreshing tools from a server
|
|
225
|
+
self._refresh_lock = Lock()
|
|
226
|
+
|
|
227
|
+
# Track runtime stats per server
|
|
228
|
+
self._server_stats: dict[str, ServerStats] = {}
|
|
229
|
+
self._stats_lock = Lock()
|
|
230
|
+
|
|
231
|
+
# Track discovered Skybridge configurations per server
|
|
232
|
+
self._skybridge_configs: dict[str, SkybridgeServerConfig] = {}
|
|
233
|
+
|
|
234
|
+
def _create_progress_callback(
|
|
235
|
+
self, server_name: str, tool_name: str, tool_call_id: str
|
|
236
|
+
) -> "ProgressFnT":
|
|
237
|
+
"""Create a progress callback function for tool execution."""
|
|
238
|
+
|
|
239
|
+
async def progress_callback(
|
|
240
|
+
progress: float, total: float | None, message: str | None
|
|
241
|
+
) -> None:
|
|
242
|
+
"""Handle progress notifications from MCP tool execution."""
|
|
243
|
+
logger.info(
|
|
244
|
+
"Tool progress update",
|
|
245
|
+
data={
|
|
246
|
+
"progress_action": ProgressAction.TOOL_PROGRESS,
|
|
247
|
+
"tool_name": tool_name,
|
|
248
|
+
"server_name": server_name,
|
|
249
|
+
"agent_name": self.agent_name,
|
|
250
|
+
"progress": progress,
|
|
251
|
+
"total": total,
|
|
252
|
+
"details": message or "", # Put the message in details column
|
|
253
|
+
},
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
# Forward progress to tool handler (e.g., for ACP notifications)
|
|
257
|
+
try:
|
|
258
|
+
await self._tool_handler.on_tool_progress(tool_call_id, progress, total, message)
|
|
259
|
+
except Exception as e:
|
|
260
|
+
logger.error(f"Error in tool progress handler: {e}", exc_info=True)
|
|
261
|
+
|
|
262
|
+
return progress_callback
|
|
263
|
+
|
|
264
|
+
async def close(self) -> None:
|
|
265
|
+
"""
|
|
266
|
+
Close all persistent connections when the aggregator is deleted.
|
|
267
|
+
"""
|
|
268
|
+
if self.connection_persistence and self._persistent_connection_manager:
|
|
269
|
+
try:
|
|
270
|
+
# Only attempt cleanup if we own the connection manager
|
|
271
|
+
if self._owns_connection_manager and (
|
|
272
|
+
hasattr(self.context, "_connection_manager")
|
|
273
|
+
and self.context._connection_manager == self._persistent_connection_manager
|
|
274
|
+
):
|
|
275
|
+
logger.info("Shutting down all persistent connections...")
|
|
276
|
+
await self._persistent_connection_manager.disconnect_all()
|
|
277
|
+
await self._persistent_connection_manager.__aexit__(None, None, None)
|
|
278
|
+
delattr(self.context, "_connection_manager")
|
|
279
|
+
self.initialized = False
|
|
280
|
+
except Exception as e:
|
|
281
|
+
logger.error(f"Error during connection manager cleanup: {e}")
|
|
282
|
+
|
|
283
|
+
@classmethod
|
|
284
|
+
async def create(
|
|
285
|
+
cls,
|
|
286
|
+
server_names: list[str],
|
|
287
|
+
connection_persistence: bool = False,
|
|
288
|
+
) -> "MCPAggregator":
|
|
289
|
+
"""
|
|
290
|
+
Factory method to create and initialize an MCPAggregator.
|
|
291
|
+
"""
|
|
292
|
+
|
|
293
|
+
logger.info(f"Creating MCPAggregator with servers: {server_names}")
|
|
294
|
+
|
|
295
|
+
instance = cls(
|
|
296
|
+
server_names=server_names,
|
|
297
|
+
connection_persistence=connection_persistence,
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
try:
|
|
301
|
+
await instance.__aenter__()
|
|
302
|
+
|
|
303
|
+
logger.debug("Loading servers...")
|
|
304
|
+
await instance.load_servers()
|
|
305
|
+
|
|
306
|
+
logger.debug("MCPAggregator created and initialized.")
|
|
307
|
+
return instance
|
|
308
|
+
except Exception as e:
|
|
309
|
+
logger.error(f"Error creating MCPAggregator: {e}")
|
|
310
|
+
await instance.__aexit__(None, None, None)
|
|
311
|
+
|
|
312
|
+
def _create_session_factory(self, server_name: str):
|
|
313
|
+
"""
|
|
314
|
+
Create a session factory function for the given server.
|
|
315
|
+
This centralizes the logic for creating MCPAgentClientSession instances.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
server_name: The name of the server to create a session for
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
A factory function that creates MCPAgentClientSession instances
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
def session_factory(read_stream, write_stream, read_timeout, **kwargs):
|
|
325
|
+
# Get agent's model and name from config if available
|
|
326
|
+
agent_model: str | None = None
|
|
327
|
+
agent_name: str | None = None
|
|
328
|
+
elicitation_handler = None
|
|
329
|
+
api_key: str | None = None
|
|
330
|
+
|
|
331
|
+
# Access config directly if it was passed from BaseAgent
|
|
332
|
+
if self.config:
|
|
333
|
+
agent_model = self.config.model
|
|
334
|
+
agent_name = self.config.name
|
|
335
|
+
elicitation_handler = self.config.elicitation_handler
|
|
336
|
+
api_key = self.config.api_key
|
|
337
|
+
|
|
338
|
+
return MCPAgentClientSession(
|
|
339
|
+
read_stream,
|
|
340
|
+
write_stream,
|
|
341
|
+
read_timeout,
|
|
342
|
+
server_name=server_name,
|
|
343
|
+
agent_model=agent_model,
|
|
344
|
+
agent_name=agent_name,
|
|
345
|
+
api_key=api_key,
|
|
346
|
+
elicitation_handler=elicitation_handler,
|
|
347
|
+
tool_list_changed_callback=self._handle_tool_list_changed,
|
|
348
|
+
**kwargs, # Pass through any additional kwargs like server_config
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
return session_factory
|
|
352
|
+
|
|
353
|
+
async def load_servers(self, *, force_connect: bool = False) -> None:
|
|
354
|
+
"""
|
|
355
|
+
Discover tools from each server in parallel and build an index of namespaced tool names.
|
|
356
|
+
Also populate the prompt cache.
|
|
357
|
+
|
|
358
|
+
Set force_connect=True to override load_on_start guards (e.g., when a user issues /connect).
|
|
359
|
+
"""
|
|
360
|
+
if self.initialized and not force_connect:
|
|
361
|
+
logger.debug("MCPAggregator already initialized.")
|
|
362
|
+
return
|
|
363
|
+
|
|
364
|
+
async with self._tool_map_lock:
|
|
365
|
+
self._namespaced_tool_map.clear()
|
|
366
|
+
self._server_to_tool_map.clear()
|
|
367
|
+
|
|
368
|
+
async with self._prompt_cache_lock:
|
|
369
|
+
self._prompt_cache.clear()
|
|
370
|
+
|
|
371
|
+
self._skybridge_configs.clear()
|
|
372
|
+
|
|
373
|
+
servers_to_load: list[str] = []
|
|
374
|
+
skipped_servers: list[str] = []
|
|
375
|
+
|
|
376
|
+
for server_name in self.server_names:
|
|
377
|
+
# Check if server should be loaded on start
|
|
378
|
+
if self.context and getattr(self.context, "server_registry", None):
|
|
379
|
+
server_config = self.context.server_registry.get_server_config(server_name)
|
|
380
|
+
if (
|
|
381
|
+
server_config
|
|
382
|
+
and not getattr(server_config, "load_on_start", True)
|
|
383
|
+
and not force_connect
|
|
384
|
+
):
|
|
385
|
+
logger.debug(f"Skipping server '{server_name}' - load_on_start=False")
|
|
386
|
+
skipped_servers.append(server_name)
|
|
387
|
+
continue
|
|
388
|
+
|
|
389
|
+
servers_to_load.append(server_name)
|
|
390
|
+
|
|
391
|
+
if self.connection_persistence:
|
|
392
|
+
logger.info(
|
|
393
|
+
f"Creating persistent connection to server: {server_name}",
|
|
394
|
+
data={
|
|
395
|
+
"progress_action": ProgressAction.STARTING,
|
|
396
|
+
"server_name": server_name,
|
|
397
|
+
"agent_name": self.agent_name,
|
|
398
|
+
},
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
await self._persistent_connection_manager.get_server(
|
|
402
|
+
server_name, client_session_factory=self._create_session_factory(server_name)
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
# Record the initialize call that happened during connection setup
|
|
406
|
+
await self._record_server_call(server_name, "initialize", True)
|
|
407
|
+
|
|
408
|
+
logger.info(
|
|
409
|
+
f"MCP Servers initialized for agent '{self.agent_name}'",
|
|
410
|
+
data={
|
|
411
|
+
"progress_action": ProgressAction.INITIALIZED,
|
|
412
|
+
"agent_name": self.agent_name,
|
|
413
|
+
},
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
if skipped_servers:
|
|
417
|
+
logger.debug(
|
|
418
|
+
"Deferred MCP servers due to load_on_start=False",
|
|
419
|
+
data={
|
|
420
|
+
"agent_name": self.agent_name,
|
|
421
|
+
"servers": skipped_servers,
|
|
422
|
+
},
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
async def fetch_tools(server_name: str) -> list[Tool]:
|
|
426
|
+
# Only fetch tools if the server supports them
|
|
427
|
+
if not await self.server_supports_feature(server_name, "tools"):
|
|
428
|
+
logger.debug(f"Server '{server_name}' does not support tools")
|
|
429
|
+
return []
|
|
430
|
+
|
|
431
|
+
try:
|
|
432
|
+
result: ListToolsResult = await self._execute_on_server(
|
|
433
|
+
server_name=server_name,
|
|
434
|
+
operation_type="tools/list",
|
|
435
|
+
operation_name="",
|
|
436
|
+
method_name="list_tools",
|
|
437
|
+
method_args={},
|
|
438
|
+
)
|
|
439
|
+
return result.tools or []
|
|
440
|
+
except Exception as e:
|
|
441
|
+
logger.error(f"Error loading tools from server '{server_name}'", data=e)
|
|
442
|
+
return []
|
|
443
|
+
|
|
444
|
+
async def fetch_prompts(server_name: str) -> list[Prompt]:
|
|
445
|
+
# Only fetch prompts if the server supports them
|
|
446
|
+
if not await self.server_supports_feature(server_name, "prompts"):
|
|
447
|
+
logger.debug(f"Server '{server_name}' does not support prompts")
|
|
448
|
+
return []
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
result = await self._execute_on_server(
|
|
452
|
+
server_name=server_name,
|
|
453
|
+
operation_type="prompts/list",
|
|
454
|
+
operation_name="",
|
|
455
|
+
method_name="list_prompts",
|
|
456
|
+
method_args={},
|
|
457
|
+
)
|
|
458
|
+
return getattr(result, "prompts", [])
|
|
459
|
+
except Exception as e:
|
|
460
|
+
logger.debug(f"Error loading prompts from server '{server_name}': {e}")
|
|
461
|
+
return []
|
|
462
|
+
|
|
463
|
+
async def load_server_data(server_name: str):
|
|
464
|
+
tools: list[Tool] = []
|
|
465
|
+
prompts: list[Prompt] = []
|
|
466
|
+
|
|
467
|
+
# Use _execute_on_server for consistent tracking regardless of connection mode
|
|
468
|
+
tools = await fetch_tools(server_name)
|
|
469
|
+
prompts = await fetch_prompts(server_name)
|
|
470
|
+
|
|
471
|
+
return server_name, tools, prompts
|
|
472
|
+
|
|
473
|
+
if not servers_to_load:
|
|
474
|
+
self.initialized = True
|
|
475
|
+
return
|
|
476
|
+
|
|
477
|
+
# Gather data from all servers concurrently
|
|
478
|
+
results = await gather(
|
|
479
|
+
*(load_server_data(server_name) for server_name in servers_to_load),
|
|
480
|
+
return_exceptions=True,
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
total_tool_count = 0
|
|
484
|
+
total_prompt_count = 0
|
|
485
|
+
|
|
486
|
+
for result in results:
|
|
487
|
+
if isinstance(result, BaseException):
|
|
488
|
+
logger.error(f"Error loading server data: {result}")
|
|
489
|
+
continue
|
|
490
|
+
|
|
491
|
+
server_name, tools, prompts = result
|
|
492
|
+
|
|
493
|
+
# Process tools
|
|
494
|
+
self._server_to_tool_map[server_name] = []
|
|
495
|
+
for tool in tools:
|
|
496
|
+
namespaced_tool_name = create_namespaced_name(server_name, tool.name)
|
|
497
|
+
namespaced_tool = NamespacedTool(
|
|
498
|
+
tool=tool,
|
|
499
|
+
server_name=server_name,
|
|
500
|
+
namespaced_tool_name=namespaced_tool_name,
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
self._namespaced_tool_map[namespaced_tool_name] = namespaced_tool
|
|
504
|
+
self._server_to_tool_map[server_name].append(namespaced_tool)
|
|
505
|
+
|
|
506
|
+
total_tool_count += len(tools)
|
|
507
|
+
|
|
508
|
+
# Process prompts
|
|
509
|
+
async with self._prompt_cache_lock:
|
|
510
|
+
self._prompt_cache[server_name] = prompts
|
|
511
|
+
|
|
512
|
+
total_prompt_count += len(prompts)
|
|
513
|
+
|
|
514
|
+
logger.debug(
|
|
515
|
+
f"MCP Aggregator initialized for server '{server_name}'",
|
|
516
|
+
data={
|
|
517
|
+
"progress_action": ProgressAction.INITIALIZED,
|
|
518
|
+
"server_name": server_name,
|
|
519
|
+
"agent_name": self.agent_name,
|
|
520
|
+
"tool_count": len(tools),
|
|
521
|
+
"prompt_count": len(prompts),
|
|
522
|
+
},
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
await self._initialize_skybridge_configs(servers_to_load)
|
|
526
|
+
|
|
527
|
+
self._display_startup_state(total_tool_count, total_prompt_count)
|
|
528
|
+
|
|
529
|
+
self.initialized = True
|
|
530
|
+
|
|
531
|
+
async def _initialize_skybridge_configs(self, server_names: list[str] | None = None) -> None:
|
|
532
|
+
"""Discover Skybridge resources across servers."""
|
|
533
|
+
target_servers = server_names if server_names is not None else self.server_names
|
|
534
|
+
if not target_servers:
|
|
535
|
+
return
|
|
536
|
+
|
|
537
|
+
tasks = [
|
|
538
|
+
self._evaluate_skybridge_for_server(server_name) for server_name in target_servers
|
|
539
|
+
]
|
|
540
|
+
results = await gather(*tasks, return_exceptions=True)
|
|
541
|
+
|
|
542
|
+
for result in results:
|
|
543
|
+
if isinstance(result, BaseException):
|
|
544
|
+
logger.debug("Skybridge discovery failed: %s", str(result))
|
|
545
|
+
continue
|
|
546
|
+
|
|
547
|
+
server_name, config = result
|
|
548
|
+
self._skybridge_configs[server_name] = config
|
|
549
|
+
|
|
550
|
+
async def _evaluate_skybridge_for_server(
|
|
551
|
+
self, server_name: str
|
|
552
|
+
) -> tuple[str, SkybridgeServerConfig]:
|
|
553
|
+
"""Inspect a single server for Skybridge-compatible resources."""
|
|
554
|
+
config = SkybridgeServerConfig(server_name=server_name)
|
|
555
|
+
|
|
556
|
+
tool_entries = self._server_to_tool_map.get(server_name, [])
|
|
557
|
+
tool_configs: list[SkybridgeToolConfig] = []
|
|
558
|
+
|
|
559
|
+
for namespaced_tool in tool_entries:
|
|
560
|
+
tool_meta = getattr(namespaced_tool.tool, "meta", None) or {}
|
|
561
|
+
template_value = tool_meta.get("openai/outputTemplate")
|
|
562
|
+
if not template_value:
|
|
563
|
+
continue
|
|
564
|
+
|
|
565
|
+
try:
|
|
566
|
+
template_uri = AnyUrl(template_value)
|
|
567
|
+
except Exception as exc:
|
|
568
|
+
warning = (
|
|
569
|
+
f"Tool '{namespaced_tool.namespaced_tool_name}' outputTemplate "
|
|
570
|
+
f"'{template_value}' is invalid: {exc}"
|
|
571
|
+
)
|
|
572
|
+
config.warnings.append(warning)
|
|
573
|
+
logger.error(warning)
|
|
574
|
+
tool_configs.append(
|
|
575
|
+
SkybridgeToolConfig(
|
|
576
|
+
tool_name=namespaced_tool.tool.name,
|
|
577
|
+
namespaced_tool_name=namespaced_tool.namespaced_tool_name,
|
|
578
|
+
warning=warning,
|
|
579
|
+
)
|
|
580
|
+
)
|
|
581
|
+
continue
|
|
582
|
+
|
|
583
|
+
tool_configs.append(
|
|
584
|
+
SkybridgeToolConfig(
|
|
585
|
+
tool_name=namespaced_tool.tool.name,
|
|
586
|
+
namespaced_tool_name=namespaced_tool.namespaced_tool_name,
|
|
587
|
+
template_uri=template_uri,
|
|
588
|
+
)
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
raw_resources_capability = await self.server_supports_feature(server_name, "resources")
|
|
592
|
+
supports_resources = bool(raw_resources_capability)
|
|
593
|
+
config.supports_resources = supports_resources
|
|
594
|
+
config.tools = tool_configs
|
|
595
|
+
|
|
596
|
+
if not supports_resources:
|
|
597
|
+
return server_name, config
|
|
598
|
+
|
|
599
|
+
try:
|
|
600
|
+
resources = await self._list_resources_from_server(server_name, check_support=False)
|
|
601
|
+
except Exception as exc: # noqa: BLE001 - logging and surfacing gracefully
|
|
602
|
+
config.warnings.append(f"Failed to list resources: {exc}")
|
|
603
|
+
return server_name, config
|
|
604
|
+
|
|
605
|
+
for resource_entry in resources:
|
|
606
|
+
uri = getattr(resource_entry, "uri", None)
|
|
607
|
+
if not uri:
|
|
608
|
+
continue
|
|
609
|
+
|
|
610
|
+
uri_str = str(uri)
|
|
611
|
+
if not uri_str.startswith("ui://"):
|
|
612
|
+
continue
|
|
613
|
+
|
|
614
|
+
try:
|
|
615
|
+
uri_value = AnyUrl(uri_str)
|
|
616
|
+
except Exception as exc: # noqa: BLE001
|
|
617
|
+
warning = f"Ignoring Skybridge candidate '{uri_str}': invalid URI ({exc})"
|
|
618
|
+
config.warnings.append(warning)
|
|
619
|
+
logger.debug(warning)
|
|
620
|
+
continue
|
|
621
|
+
|
|
622
|
+
sky_resource = SkybridgeResourceConfig(uri=uri_value)
|
|
623
|
+
config.ui_resources.append(sky_resource)
|
|
624
|
+
|
|
625
|
+
try:
|
|
626
|
+
read_result: ReadResourceResult = await self._get_resource_from_server(
|
|
627
|
+
server_name, uri_str
|
|
628
|
+
)
|
|
629
|
+
except Exception as exc: # noqa: BLE001
|
|
630
|
+
warning = f"Failed to read resource '{uri_str}': {exc}"
|
|
631
|
+
sky_resource.warning = warning
|
|
632
|
+
config.warnings.append(warning)
|
|
633
|
+
continue
|
|
634
|
+
|
|
635
|
+
contents = getattr(read_result, "contents", []) or []
|
|
636
|
+
seen_mime_types: list[str] = []
|
|
637
|
+
|
|
638
|
+
for content in contents:
|
|
639
|
+
mime_type = getattr(content, "mimeType", None)
|
|
640
|
+
if mime_type:
|
|
641
|
+
seen_mime_types.append(mime_type)
|
|
642
|
+
if mime_type == SKYBRIDGE_MIME_TYPE:
|
|
643
|
+
sky_resource.mime_type = mime_type
|
|
644
|
+
sky_resource.is_skybridge = True
|
|
645
|
+
break
|
|
646
|
+
|
|
647
|
+
if sky_resource.mime_type is None and seen_mime_types:
|
|
648
|
+
sky_resource.mime_type = seen_mime_types[0]
|
|
649
|
+
|
|
650
|
+
if not sky_resource.is_skybridge:
|
|
651
|
+
observed_type = sky_resource.mime_type or "unknown MIME type"
|
|
652
|
+
warning = f"served as '{observed_type}' instead of '{SKYBRIDGE_MIME_TYPE}'"
|
|
653
|
+
sky_resource.warning = warning
|
|
654
|
+
config.warnings.append(f"{uri_str}: {warning}")
|
|
655
|
+
|
|
656
|
+
resource_lookup = {str(resource.uri): resource for resource in config.ui_resources}
|
|
657
|
+
for tool_config in tool_configs:
|
|
658
|
+
if tool_config.template_uri is None:
|
|
659
|
+
continue
|
|
660
|
+
|
|
661
|
+
resource_match = resource_lookup.get(str(tool_config.template_uri))
|
|
662
|
+
if not resource_match:
|
|
663
|
+
warning = (
|
|
664
|
+
f"Tool '{tool_config.namespaced_tool_name}' references missing "
|
|
665
|
+
f"Skybridge resource '{tool_config.template_uri}'"
|
|
666
|
+
)
|
|
667
|
+
tool_config.warning = warning
|
|
668
|
+
config.warnings.append(warning)
|
|
669
|
+
logger.error(warning)
|
|
670
|
+
continue
|
|
671
|
+
|
|
672
|
+
tool_config.resource_uri = resource_match.uri
|
|
673
|
+
tool_config.is_valid = resource_match.is_skybridge
|
|
674
|
+
|
|
675
|
+
if not resource_match.is_skybridge:
|
|
676
|
+
warning = (
|
|
677
|
+
f"Tool '{tool_config.namespaced_tool_name}' references resource "
|
|
678
|
+
f"'{resource_match.uri}' served as '{resource_match.mime_type or 'unknown'}' "
|
|
679
|
+
f"instead of '{SKYBRIDGE_MIME_TYPE}'"
|
|
680
|
+
)
|
|
681
|
+
tool_config.warning = warning
|
|
682
|
+
config.warnings.append(warning)
|
|
683
|
+
logger.warning(warning)
|
|
684
|
+
|
|
685
|
+
config.tools = tool_configs
|
|
686
|
+
|
|
687
|
+
valid_tool_count = sum(1 for tool in tool_configs if tool.is_valid)
|
|
688
|
+
if config.enabled and valid_tool_count == 0:
|
|
689
|
+
warning = (
|
|
690
|
+
f"Skybridge resources detected on server '{server_name}' but no tools expose them"
|
|
691
|
+
)
|
|
692
|
+
config.warnings.append(warning)
|
|
693
|
+
logger.warning(warning)
|
|
694
|
+
|
|
695
|
+
return server_name, config
|
|
696
|
+
|
|
697
|
+
def _display_startup_state(self, total_tool_count: int, total_prompt_count: int) -> None:
|
|
698
|
+
"""Display startup summary and Skybridge status information."""
|
|
699
|
+
# In interactive contexts the UI helper will render both the agent summary and the
|
|
700
|
+
# Skybridge status. For non-interactive contexts, the warnings collected during
|
|
701
|
+
# discovery are emitted through the logger, so we don't need to duplicate output here.
|
|
702
|
+
if not self._skybridge_configs:
|
|
703
|
+
return
|
|
704
|
+
|
|
705
|
+
logger.debug(
|
|
706
|
+
"Skybridge discovery completed",
|
|
707
|
+
data={
|
|
708
|
+
"agent_name": self.agent_name,
|
|
709
|
+
"server_count": len(self._skybridge_configs),
|
|
710
|
+
},
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
async def get_capabilities(self, server_name: str):
|
|
714
|
+
"""Get server capabilities if available."""
|
|
715
|
+
if not self.connection_persistence:
|
|
716
|
+
# For non-persistent connections, we can't easily check capabilities
|
|
717
|
+
return None
|
|
718
|
+
|
|
719
|
+
try:
|
|
720
|
+
server_conn = await self._persistent_connection_manager.get_server(
|
|
721
|
+
server_name, client_session_factory=self._create_session_factory(server_name)
|
|
722
|
+
)
|
|
723
|
+
# server_capabilities is a property, not a coroutine
|
|
724
|
+
return server_conn.server_capabilities
|
|
725
|
+
except Exception as e:
|
|
726
|
+
logger.debug(f"Error getting capabilities for server '{server_name}': {e}")
|
|
727
|
+
return None
|
|
728
|
+
|
|
729
|
+
async def validate_server(self, server_name: str) -> bool:
|
|
730
|
+
"""
|
|
731
|
+
Validate that a server exists in our server list.
|
|
732
|
+
|
|
733
|
+
Args:
|
|
734
|
+
server_name: Name of the server to validate
|
|
735
|
+
|
|
736
|
+
Returns:
|
|
737
|
+
True if the server exists, False otherwise
|
|
738
|
+
"""
|
|
739
|
+
valid = server_name in self.server_names
|
|
740
|
+
if not valid:
|
|
741
|
+
logger.debug(f"Server '{server_name}' not found")
|
|
742
|
+
return valid
|
|
743
|
+
|
|
744
|
+
async def server_supports_feature(self, server_name: str, feature: str) -> bool:
|
|
745
|
+
"""
|
|
746
|
+
Check if a server supports a specific feature.
|
|
747
|
+
|
|
748
|
+
Args:
|
|
749
|
+
server_name: Name of the server to check
|
|
750
|
+
feature: Feature to check for (e.g., "prompts", "resources")
|
|
751
|
+
|
|
752
|
+
Returns:
|
|
753
|
+
True if the server supports the feature, False otherwise
|
|
754
|
+
"""
|
|
755
|
+
if not await self.validate_server(server_name):
|
|
756
|
+
return False
|
|
757
|
+
|
|
758
|
+
capabilities = await self.get_capabilities(server_name)
|
|
759
|
+
if not capabilities:
|
|
760
|
+
return False
|
|
761
|
+
|
|
762
|
+
feature_value = getattr(capabilities, feature, False)
|
|
763
|
+
if isinstance(feature_value, bool):
|
|
764
|
+
return feature_value
|
|
765
|
+
if feature_value is None:
|
|
766
|
+
return False
|
|
767
|
+
try:
|
|
768
|
+
return bool(feature_value)
|
|
769
|
+
except Exception: # noqa: BLE001
|
|
770
|
+
return True
|
|
771
|
+
|
|
772
|
+
async def list_servers(self) -> list[str]:
|
|
773
|
+
"""Return the list of server names aggregated by this agent."""
|
|
774
|
+
if not self.initialized:
|
|
775
|
+
await self.load_servers()
|
|
776
|
+
|
|
777
|
+
return self.server_names
|
|
778
|
+
|
|
779
|
+
async def list_tools(self) -> ListToolsResult:
|
|
780
|
+
"""
|
|
781
|
+
:return: Tools from all servers aggregated, and renamed to be dot-namespaced by server name.
|
|
782
|
+
"""
|
|
783
|
+
if not self.initialized:
|
|
784
|
+
await self.load_servers()
|
|
785
|
+
|
|
786
|
+
tools: list[Tool] = []
|
|
787
|
+
|
|
788
|
+
for namespaced_tool_name, namespaced_tool in self._namespaced_tool_map.items():
|
|
789
|
+
tool_copy = namespaced_tool.tool.model_copy(
|
|
790
|
+
deep=True, update={"name": namespaced_tool_name}
|
|
791
|
+
)
|
|
792
|
+
skybridge_config = self._skybridge_configs.get(namespaced_tool.server_name)
|
|
793
|
+
if skybridge_config:
|
|
794
|
+
matching_tool = next(
|
|
795
|
+
(
|
|
796
|
+
tool
|
|
797
|
+
for tool in skybridge_config.tools
|
|
798
|
+
if tool.namespaced_tool_name == namespaced_tool_name and tool.is_valid
|
|
799
|
+
),
|
|
800
|
+
None,
|
|
801
|
+
)
|
|
802
|
+
if matching_tool:
|
|
803
|
+
meta = dict(tool_copy.meta or {})
|
|
804
|
+
meta["openai/skybridgeEnabled"] = True
|
|
805
|
+
meta["openai/skybridgeTemplate"] = str(matching_tool.template_uri)
|
|
806
|
+
tool_copy.meta = meta
|
|
807
|
+
tools.append(tool_copy)
|
|
808
|
+
|
|
809
|
+
return ListToolsResult(tools=tools)
|
|
810
|
+
|
|
811
|
+
async def refresh_all_tools(self) -> None:
|
|
812
|
+
"""
|
|
813
|
+
Refresh the tools for all servers.
|
|
814
|
+
This is useful when you know tools have changed but haven't received notifications.
|
|
815
|
+
"""
|
|
816
|
+
logger.info("Refreshing tools for all servers")
|
|
817
|
+
for server_name in self.server_names:
|
|
818
|
+
await self._refresh_server_tools(server_name)
|
|
819
|
+
|
|
820
|
+
async def _record_server_call(
|
|
821
|
+
self, server_name: str, operation_type: str, success: bool
|
|
822
|
+
) -> None:
|
|
823
|
+
async with self._stats_lock:
|
|
824
|
+
stats = self._server_stats.setdefault(server_name, ServerStats())
|
|
825
|
+
stats.record(operation_type, success)
|
|
826
|
+
|
|
827
|
+
# For stdio servers, also emit synthetic transport events to create activity timeline
|
|
828
|
+
await self._notify_stdio_transport_activity(server_name, operation_type, success)
|
|
829
|
+
|
|
830
|
+
async def _record_reconnect(self, server_name: str) -> None:
|
|
831
|
+
"""Record a successful server reconnection."""
|
|
832
|
+
async with self._stats_lock:
|
|
833
|
+
stats = self._server_stats.setdefault(server_name, ServerStats())
|
|
834
|
+
stats.record_reconnect()
|
|
835
|
+
|
|
836
|
+
async def _notify_stdio_transport_activity(
|
|
837
|
+
self, server_name: str, operation_type: str, success: bool
|
|
838
|
+
) -> None:
|
|
839
|
+
"""Notify transport metrics of activity for stdio servers to create activity timeline."""
|
|
840
|
+
if not self._persistent_connection_manager:
|
|
841
|
+
return
|
|
842
|
+
|
|
843
|
+
try:
|
|
844
|
+
# Get the server connection and check if it's stdio transport
|
|
845
|
+
server_conn = self._persistent_connection_manager.running_servers.get(server_name)
|
|
846
|
+
if not server_conn:
|
|
847
|
+
return
|
|
848
|
+
|
|
849
|
+
server_config = getattr(server_conn, "server_config", None)
|
|
850
|
+
if not server_config or server_config.transport != "stdio":
|
|
851
|
+
return
|
|
852
|
+
|
|
853
|
+
# Get transport metrics and emit synthetic message event
|
|
854
|
+
transport_metrics = getattr(server_conn, "transport_metrics", None)
|
|
855
|
+
if transport_metrics:
|
|
856
|
+
# Import here to avoid circular imports
|
|
857
|
+
from fast_agent.mcp.transport_tracking import ChannelEvent
|
|
858
|
+
|
|
859
|
+
# Create a synthetic message event to represent the MCP operation
|
|
860
|
+
event = ChannelEvent(
|
|
861
|
+
channel="stdio",
|
|
862
|
+
event_type="message",
|
|
863
|
+
detail=f"{operation_type} ({'success' if success else 'error'})",
|
|
864
|
+
)
|
|
865
|
+
transport_metrics.record_event(event)
|
|
866
|
+
except Exception:
|
|
867
|
+
# Don't let transport tracking errors break normal operation
|
|
868
|
+
logger.debug(
|
|
869
|
+
"Failed to notify stdio transport activity for %s", server_name, exc_info=True
|
|
870
|
+
)
|
|
871
|
+
|
|
872
|
+
async def get_server_instructions(self) -> dict[str, tuple[str, list[str]]]:
|
|
873
|
+
"""
|
|
874
|
+
Get instructions from all connected servers along with their tool names.
|
|
875
|
+
|
|
876
|
+
Returns:
|
|
877
|
+
Dict mapping server name to tuple of (instructions, list of tool names)
|
|
878
|
+
"""
|
|
879
|
+
instructions = {}
|
|
880
|
+
|
|
881
|
+
if self.connection_persistence and hasattr(self, "_persistent_connection_manager"):
|
|
882
|
+
# Get instructions from persistent connections
|
|
883
|
+
for server_name in self.server_names:
|
|
884
|
+
try:
|
|
885
|
+
server_conn = await self._persistent_connection_manager.get_server(
|
|
886
|
+
server_name,
|
|
887
|
+
client_session_factory=self._create_session_factory(server_name),
|
|
888
|
+
)
|
|
889
|
+
# Always include server, even if no instructions
|
|
890
|
+
# Get tool names for this server
|
|
891
|
+
tool_names = [
|
|
892
|
+
namespaced_tool.tool.name
|
|
893
|
+
for namespaced_tool_name, namespaced_tool in self._namespaced_tool_map.items()
|
|
894
|
+
if namespaced_tool.server_name == server_name
|
|
895
|
+
]
|
|
896
|
+
# Include server even if instructions is None
|
|
897
|
+
instructions[server_name] = (server_conn.server_instructions, tool_names)
|
|
898
|
+
except Exception as e:
|
|
899
|
+
logger.debug(f"Failed to get instructions from server {server_name}: {e}")
|
|
900
|
+
|
|
901
|
+
return instructions
|
|
902
|
+
|
|
903
|
+
async def collect_server_status(self) -> dict[str, ServerStatus]:
|
|
904
|
+
"""Return aggregated status information for each configured server."""
|
|
905
|
+
if not self.initialized:
|
|
906
|
+
await self.load_servers()
|
|
907
|
+
|
|
908
|
+
now = datetime.now(timezone.utc)
|
|
909
|
+
status_map: dict[str, ServerStatus] = {}
|
|
910
|
+
|
|
911
|
+
for server_name in self.server_names:
|
|
912
|
+
stats = self._server_stats.get(server_name)
|
|
913
|
+
last_call = stats.last_call_at if stats else None
|
|
914
|
+
last_error = stats.last_error_at if stats else None
|
|
915
|
+
staleness = (now - last_call).total_seconds() if last_call else None
|
|
916
|
+
call_counts = dict(stats.call_counts) if stats else {}
|
|
917
|
+
reconnect_count = stats.reconnect_count if stats else 0
|
|
918
|
+
|
|
919
|
+
implementation_name = None
|
|
920
|
+
implementation_version = None
|
|
921
|
+
capabilities: ServerCapabilities | None = None
|
|
922
|
+
client_capabilities: Mapping[str, Any] | None = None
|
|
923
|
+
client_info_name = None
|
|
924
|
+
client_info_version = None
|
|
925
|
+
is_connected = None
|
|
926
|
+
error_message = None
|
|
927
|
+
instructions_available = None
|
|
928
|
+
instructions_enabled = None
|
|
929
|
+
instructions_included = None
|
|
930
|
+
roots_configured = None
|
|
931
|
+
roots_count = None
|
|
932
|
+
elicitation_mode = None
|
|
933
|
+
sampling_mode = None
|
|
934
|
+
spoofing_enabled = None
|
|
935
|
+
server_cfg = None
|
|
936
|
+
session_id = None
|
|
937
|
+
server_conn = None
|
|
938
|
+
transport: str | None = None
|
|
939
|
+
transport_snapshot: TransportSnapshot | None = None
|
|
940
|
+
|
|
941
|
+
manager = getattr(self, "_persistent_connection_manager", None)
|
|
942
|
+
if self.connection_persistence and manager is not None:
|
|
943
|
+
try:
|
|
944
|
+
server_conn = await manager.get_server(
|
|
945
|
+
server_name,
|
|
946
|
+
client_session_factory=self._create_session_factory(server_name),
|
|
947
|
+
)
|
|
948
|
+
implementation = getattr(server_conn, "server_implementation", None)
|
|
949
|
+
if implementation:
|
|
950
|
+
implementation_name = getattr(implementation, "name", None)
|
|
951
|
+
implementation_version = getattr(implementation, "version", None)
|
|
952
|
+
capabilities = getattr(server_conn, "server_capabilities", None)
|
|
953
|
+
client_capabilities = getattr(server_conn, "client_capabilities", None)
|
|
954
|
+
session = server_conn.session
|
|
955
|
+
client_info = getattr(session, "client_info", None) if session else None
|
|
956
|
+
if client_info:
|
|
957
|
+
client_info_name = getattr(client_info, "name", None)
|
|
958
|
+
client_info_version = getattr(client_info, "version", None)
|
|
959
|
+
is_connected = server_conn.is_healthy()
|
|
960
|
+
error_message = getattr(server_conn, "_error_message", None)
|
|
961
|
+
instructions_available = getattr(
|
|
962
|
+
server_conn, "server_instructions_available", None
|
|
963
|
+
)
|
|
964
|
+
instructions_enabled = getattr(server_conn, "server_instructions_enabled", None)
|
|
965
|
+
instructions_included = bool(getattr(server_conn, "server_instructions", None))
|
|
966
|
+
server_cfg = getattr(server_conn, "server_config", None)
|
|
967
|
+
if session:
|
|
968
|
+
elicitation_mode = getattr(
|
|
969
|
+
session, "effective_elicitation_mode", elicitation_mode
|
|
970
|
+
)
|
|
971
|
+
session_id = getattr(server_conn, "session_id", None)
|
|
972
|
+
if not session_id and getattr(server_conn, "_get_session_id_cb", None):
|
|
973
|
+
try:
|
|
974
|
+
session_id = server_conn._get_session_id_cb() # type: ignore[attr-defined]
|
|
975
|
+
except Exception:
|
|
976
|
+
session_id = None
|
|
977
|
+
metrics = getattr(server_conn, "transport_metrics", None)
|
|
978
|
+
if metrics is not None:
|
|
979
|
+
try:
|
|
980
|
+
transport_snapshot = metrics.snapshot()
|
|
981
|
+
except Exception:
|
|
982
|
+
logger.debug(
|
|
983
|
+
"Failed to snapshot transport metrics for server '%s'",
|
|
984
|
+
server_name,
|
|
985
|
+
exc_info=True,
|
|
986
|
+
)
|
|
987
|
+
except Exception as exc:
|
|
988
|
+
logger.debug(
|
|
989
|
+
f"Failed to collect status for server '{server_name}'",
|
|
990
|
+
data={"error": str(exc)},
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
if (
|
|
994
|
+
server_cfg is None
|
|
995
|
+
and self.context
|
|
996
|
+
and getattr(self.context, "server_registry", None)
|
|
997
|
+
):
|
|
998
|
+
try:
|
|
999
|
+
server_cfg = self.context.server_registry.get_server_config(server_name)
|
|
1000
|
+
except Exception:
|
|
1001
|
+
server_cfg = None
|
|
1002
|
+
|
|
1003
|
+
if server_cfg is not None:
|
|
1004
|
+
instructions_enabled = (
|
|
1005
|
+
instructions_enabled
|
|
1006
|
+
if instructions_enabled is not None
|
|
1007
|
+
else server_cfg.include_instructions
|
|
1008
|
+
)
|
|
1009
|
+
roots = getattr(server_cfg, "roots", None)
|
|
1010
|
+
roots_configured = bool(roots)
|
|
1011
|
+
roots_count = len(roots) if roots else 0
|
|
1012
|
+
transport = getattr(server_cfg, "transport", transport)
|
|
1013
|
+
elicitation = getattr(server_cfg, "elicitation", None)
|
|
1014
|
+
elicitation_mode = (
|
|
1015
|
+
getattr(elicitation, "mode", None) if elicitation else elicitation_mode
|
|
1016
|
+
)
|
|
1017
|
+
sampling_cfg = getattr(server_cfg, "sampling", None)
|
|
1018
|
+
spoofing_enabled = bool(getattr(server_cfg, "implementation", None))
|
|
1019
|
+
if implementation_name is None and getattr(server_cfg, "implementation", None):
|
|
1020
|
+
implementation_name = server_cfg.implementation.name
|
|
1021
|
+
implementation_version = getattr(server_cfg.implementation, "version", None)
|
|
1022
|
+
if session_id is None:
|
|
1023
|
+
if server_cfg.transport == "stdio":
|
|
1024
|
+
session_id = "local"
|
|
1025
|
+
elif server_conn and getattr(server_conn, "_get_session_id_cb", None):
|
|
1026
|
+
try:
|
|
1027
|
+
session_id = server_conn._get_session_id_cb() # type: ignore[attr-defined]
|
|
1028
|
+
except Exception:
|
|
1029
|
+
session_id = None
|
|
1030
|
+
|
|
1031
|
+
if sampling_cfg is not None:
|
|
1032
|
+
sampling_mode = "configured"
|
|
1033
|
+
else:
|
|
1034
|
+
auto_sampling = True
|
|
1035
|
+
if self.context and getattr(self.context, "config", None):
|
|
1036
|
+
auto_sampling = getattr(self.context.config, "auto_sampling", True)
|
|
1037
|
+
sampling_mode = "auto" if auto_sampling else "off"
|
|
1038
|
+
else:
|
|
1039
|
+
# Fall back to defaults when config missing
|
|
1040
|
+
auto_sampling = True
|
|
1041
|
+
if self.context and getattr(self.context, "config", None):
|
|
1042
|
+
auto_sampling = getattr(self.context.config, "auto_sampling", True)
|
|
1043
|
+
sampling_mode = sampling_mode or ("auto" if auto_sampling else "off")
|
|
1044
|
+
|
|
1045
|
+
status_map[server_name] = ServerStatus(
|
|
1046
|
+
server_name=server_name,
|
|
1047
|
+
implementation_name=implementation_name,
|
|
1048
|
+
implementation_version=implementation_version,
|
|
1049
|
+
server_capabilities=capabilities,
|
|
1050
|
+
client_capabilities=client_capabilities,
|
|
1051
|
+
client_info_name=client_info_name,
|
|
1052
|
+
client_info_version=client_info_version,
|
|
1053
|
+
transport=transport,
|
|
1054
|
+
is_connected=is_connected,
|
|
1055
|
+
last_call_at=last_call,
|
|
1056
|
+
last_error_at=last_error,
|
|
1057
|
+
staleness_seconds=staleness,
|
|
1058
|
+
call_counts=call_counts,
|
|
1059
|
+
error_message=error_message,
|
|
1060
|
+
instructions_available=instructions_available,
|
|
1061
|
+
instructions_enabled=instructions_enabled,
|
|
1062
|
+
instructions_included=instructions_included,
|
|
1063
|
+
roots_configured=roots_configured,
|
|
1064
|
+
roots_count=roots_count,
|
|
1065
|
+
elicitation_mode=elicitation_mode,
|
|
1066
|
+
sampling_mode=sampling_mode,
|
|
1067
|
+
spoofing_enabled=spoofing_enabled,
|
|
1068
|
+
session_id=session_id,
|
|
1069
|
+
transport_channels=transport_snapshot,
|
|
1070
|
+
skybridge=self._skybridge_configs.get(server_name),
|
|
1071
|
+
reconnect_count=reconnect_count,
|
|
1072
|
+
)
|
|
1073
|
+
|
|
1074
|
+
return status_map
|
|
1075
|
+
|
|
1076
|
+
async def get_skybridge_configs(self) -> dict[str, SkybridgeServerConfig]:
|
|
1077
|
+
"""Expose discovered Skybridge configurations keyed by server."""
|
|
1078
|
+
if not self.initialized:
|
|
1079
|
+
await self.load_servers()
|
|
1080
|
+
return dict(self._skybridge_configs)
|
|
1081
|
+
|
|
1082
|
+
async def get_skybridge_config(self, server_name: str) -> SkybridgeServerConfig | None:
|
|
1083
|
+
"""Return the Skybridge configuration for a specific server, loading if necessary."""
|
|
1084
|
+
if not self.initialized:
|
|
1085
|
+
await self.load_servers()
|
|
1086
|
+
return self._skybridge_configs.get(server_name)
|
|
1087
|
+
|
|
1088
|
+
async def _execute_on_server(
|
|
1089
|
+
self,
|
|
1090
|
+
server_name: str,
|
|
1091
|
+
operation_type: str,
|
|
1092
|
+
operation_name: str,
|
|
1093
|
+
method_name: str,
|
|
1094
|
+
method_args: dict[str, Any] = None,
|
|
1095
|
+
error_factory: Callable[[str], R] | None = None,
|
|
1096
|
+
progress_callback: ProgressFnT | None = None,
|
|
1097
|
+
) -> R:
|
|
1098
|
+
"""
|
|
1099
|
+
Generic method to execute operations on a specific server.
|
|
1100
|
+
|
|
1101
|
+
Args:
|
|
1102
|
+
server_name: Name of the server to execute the operation on
|
|
1103
|
+
operation_type: Type of operation (for logging) e.g., "tool", "prompt"
|
|
1104
|
+
operation_name: Name of the specific operation being called (for logging)
|
|
1105
|
+
method_name: Name of the method to call on the client session
|
|
1106
|
+
method_args: Arguments to pass to the method
|
|
1107
|
+
error_factory: Function to create an error return value if the operation fails
|
|
1108
|
+
progress_callback: Optional progress callback for operations that support it
|
|
1109
|
+
|
|
1110
|
+
Returns:
|
|
1111
|
+
Result from the operation or an error result
|
|
1112
|
+
"""
|
|
1113
|
+
|
|
1114
|
+
async def try_execute(client: ClientSession):
|
|
1115
|
+
try:
|
|
1116
|
+
method = getattr(client, method_name)
|
|
1117
|
+
|
|
1118
|
+
# Get metadata from context for tool, resource, and prompt calls
|
|
1119
|
+
metadata = None
|
|
1120
|
+
if method_name in ["call_tool", "read_resource", "get_prompt"]:
|
|
1121
|
+
from fast_agent.llm.fastagent_llm import _mcp_metadata_var
|
|
1122
|
+
|
|
1123
|
+
metadata = _mcp_metadata_var.get()
|
|
1124
|
+
|
|
1125
|
+
# Prepare kwargs
|
|
1126
|
+
kwargs = method_args or {}
|
|
1127
|
+
if metadata:
|
|
1128
|
+
kwargs["_meta"] = metadata
|
|
1129
|
+
|
|
1130
|
+
# For call_tool method, check if we need to add progress_callback
|
|
1131
|
+
if method_name == "call_tool" and progress_callback:
|
|
1132
|
+
# The call_tool method signature includes progress_callback parameter
|
|
1133
|
+
return await method(progress_callback=progress_callback, **kwargs)
|
|
1134
|
+
else:
|
|
1135
|
+
return await method(**(kwargs or {}))
|
|
1136
|
+
except ConnectionError:
|
|
1137
|
+
# Let ConnectionError pass through for reconnection logic
|
|
1138
|
+
raise
|
|
1139
|
+
except ServerSessionTerminatedError:
|
|
1140
|
+
# Let ServerSessionTerminatedError pass through for reconnection logic
|
|
1141
|
+
raise
|
|
1142
|
+
except Exception as e:
|
|
1143
|
+
error_msg = (
|
|
1144
|
+
f"Failed to {method_name} '{operation_name}' on server '{server_name}': {e}"
|
|
1145
|
+
)
|
|
1146
|
+
logger.error(error_msg)
|
|
1147
|
+
if error_factory:
|
|
1148
|
+
return error_factory(error_msg)
|
|
1149
|
+
else:
|
|
1150
|
+
# Re-raise the original exception to propagate it
|
|
1151
|
+
raise e
|
|
1152
|
+
|
|
1153
|
+
success_flag: bool | None = None
|
|
1154
|
+
result: R | None = None
|
|
1155
|
+
|
|
1156
|
+
# Try initial execution
|
|
1157
|
+
try:
|
|
1158
|
+
if self.connection_persistence:
|
|
1159
|
+
server_connection = await self._persistent_connection_manager.get_server(
|
|
1160
|
+
server_name, client_session_factory=self._create_session_factory(server_name)
|
|
1161
|
+
)
|
|
1162
|
+
result = await try_execute(server_connection.session)
|
|
1163
|
+
success_flag = True
|
|
1164
|
+
else:
|
|
1165
|
+
logger.debug(
|
|
1166
|
+
f"Creating temporary connection to server: {server_name}",
|
|
1167
|
+
data={
|
|
1168
|
+
"progress_action": ProgressAction.STARTING,
|
|
1169
|
+
"server_name": server_name,
|
|
1170
|
+
"agent_name": self.agent_name,
|
|
1171
|
+
},
|
|
1172
|
+
)
|
|
1173
|
+
async with gen_client(
|
|
1174
|
+
server_name, server_registry=self.context.server_registry
|
|
1175
|
+
) as client:
|
|
1176
|
+
result = await try_execute(client)
|
|
1177
|
+
logger.debug(
|
|
1178
|
+
f"Closing temporary connection to server: {server_name}",
|
|
1179
|
+
data={
|
|
1180
|
+
"progress_action": ProgressAction.SHUTDOWN,
|
|
1181
|
+
"server_name": server_name,
|
|
1182
|
+
"agent_name": self.agent_name,
|
|
1183
|
+
},
|
|
1184
|
+
)
|
|
1185
|
+
success_flag = True
|
|
1186
|
+
except ConnectionError:
|
|
1187
|
+
# Server offline - attempt reconnection
|
|
1188
|
+
result, success_flag = await self._handle_connection_error(
|
|
1189
|
+
server_name, try_execute, error_factory
|
|
1190
|
+
)
|
|
1191
|
+
except ServerSessionTerminatedError as exc:
|
|
1192
|
+
# Session terminated (e.g., 404 from restarted server)
|
|
1193
|
+
result, success_flag = await self._handle_session_terminated(
|
|
1194
|
+
server_name, try_execute, error_factory, exc
|
|
1195
|
+
)
|
|
1196
|
+
except Exception:
|
|
1197
|
+
success_flag = False
|
|
1198
|
+
raise
|
|
1199
|
+
finally:
|
|
1200
|
+
if success_flag is not None:
|
|
1201
|
+
await self._record_server_call(server_name, operation_type, success_flag)
|
|
1202
|
+
|
|
1203
|
+
return result
|
|
1204
|
+
|
|
1205
|
+
async def _handle_connection_error(
|
|
1206
|
+
self,
|
|
1207
|
+
server_name: str,
|
|
1208
|
+
try_execute: Callable,
|
|
1209
|
+
error_factory: Callable[[str], R] | None,
|
|
1210
|
+
) -> tuple[R | None, bool]:
|
|
1211
|
+
"""Handle ConnectionError by attempting to reconnect to the server."""
|
|
1212
|
+
from fast_agent.ui import console
|
|
1213
|
+
|
|
1214
|
+
console.console.print(f"[dim yellow]MCP server {server_name} reconnecting...[/dim yellow]")
|
|
1215
|
+
|
|
1216
|
+
try:
|
|
1217
|
+
if self.connection_persistence:
|
|
1218
|
+
# Force disconnect and create fresh connection
|
|
1219
|
+
server_connection = await self._persistent_connection_manager.reconnect_server(
|
|
1220
|
+
server_name,
|
|
1221
|
+
client_session_factory=self._create_session_factory(server_name),
|
|
1222
|
+
)
|
|
1223
|
+
result = await try_execute(server_connection.session)
|
|
1224
|
+
else:
|
|
1225
|
+
# For non-persistent connections, just try again
|
|
1226
|
+
async with gen_client(
|
|
1227
|
+
server_name, server_registry=self.context.server_registry
|
|
1228
|
+
) as client:
|
|
1229
|
+
result = await try_execute(client)
|
|
1230
|
+
|
|
1231
|
+
# Success!
|
|
1232
|
+
console.console.print(f"[dim green]MCP server {server_name} online[/dim green]")
|
|
1233
|
+
return result, True
|
|
1234
|
+
|
|
1235
|
+
except ServerSessionTerminatedError:
|
|
1236
|
+
# After reconnecting for connection error, we got session terminated
|
|
1237
|
+
# Don't loop - just report the error
|
|
1238
|
+
console.console.print(
|
|
1239
|
+
f"[dim red]MCP server {server_name} session terminated after reconnect[/dim red]"
|
|
1240
|
+
)
|
|
1241
|
+
error_msg = (
|
|
1242
|
+
f"MCP server {server_name} reconnected but session was immediately terminated. "
|
|
1243
|
+
"Please check server status."
|
|
1244
|
+
)
|
|
1245
|
+
if error_factory:
|
|
1246
|
+
return error_factory(error_msg), False
|
|
1247
|
+
else:
|
|
1248
|
+
raise Exception(error_msg)
|
|
1249
|
+
|
|
1250
|
+
except Exception as e:
|
|
1251
|
+
# Reconnection failed
|
|
1252
|
+
console.console.print(
|
|
1253
|
+
f"[dim red]MCP server {server_name} offline - failed to reconnect: {e}[/dim red]"
|
|
1254
|
+
)
|
|
1255
|
+
error_msg = f"MCP server {server_name} offline - failed to reconnect"
|
|
1256
|
+
if error_factory:
|
|
1257
|
+
return error_factory(error_msg), False
|
|
1258
|
+
else:
|
|
1259
|
+
raise Exception(error_msg)
|
|
1260
|
+
|
|
1261
|
+
async def _handle_session_terminated(
|
|
1262
|
+
self,
|
|
1263
|
+
server_name: str,
|
|
1264
|
+
try_execute: Callable,
|
|
1265
|
+
error_factory: Callable[[str], R] | None,
|
|
1266
|
+
exc: ServerSessionTerminatedError,
|
|
1267
|
+
) -> tuple[R | None, bool]:
|
|
1268
|
+
"""Handle ServerSessionTerminatedError by attempting to reconnect if configured."""
|
|
1269
|
+
from fast_agent.ui import console
|
|
1270
|
+
|
|
1271
|
+
# Check if reconnect_on_disconnect is enabled for this server
|
|
1272
|
+
server_config = None
|
|
1273
|
+
if self.context and getattr(self.context, "server_registry", None):
|
|
1274
|
+
server_config = self.context.server_registry.get_server_config(server_name)
|
|
1275
|
+
|
|
1276
|
+
reconnect_enabled = server_config and server_config.reconnect_on_disconnect
|
|
1277
|
+
|
|
1278
|
+
if not reconnect_enabled:
|
|
1279
|
+
# Reconnection not enabled - inform user and fail
|
|
1280
|
+
console.console.print(
|
|
1281
|
+
f"[dim red]MCP server {server_name} session terminated (404)[/dim red]"
|
|
1282
|
+
)
|
|
1283
|
+
console.console.print(
|
|
1284
|
+
"[dim]Tip: Enable 'reconnect_on_disconnect: true' in config to auto-reconnect[/dim]"
|
|
1285
|
+
)
|
|
1286
|
+
error_msg = f"MCP server {server_name} session terminated - reconnection not enabled"
|
|
1287
|
+
if error_factory:
|
|
1288
|
+
return error_factory(error_msg), False
|
|
1289
|
+
else:
|
|
1290
|
+
raise exc
|
|
1291
|
+
|
|
1292
|
+
# Attempt reconnection
|
|
1293
|
+
console.console.print(
|
|
1294
|
+
f"[dim yellow]MCP server {server_name} session terminated - reconnecting...[/dim yellow]"
|
|
1295
|
+
)
|
|
1296
|
+
|
|
1297
|
+
try:
|
|
1298
|
+
if self.connection_persistence:
|
|
1299
|
+
server_connection = await self._persistent_connection_manager.reconnect_server(
|
|
1300
|
+
server_name,
|
|
1301
|
+
client_session_factory=self._create_session_factory(server_name),
|
|
1302
|
+
)
|
|
1303
|
+
result = await try_execute(server_connection.session)
|
|
1304
|
+
else:
|
|
1305
|
+
# For non-persistent connections, just try again
|
|
1306
|
+
async with gen_client(
|
|
1307
|
+
server_name, server_registry=self.context.server_registry
|
|
1308
|
+
) as client:
|
|
1309
|
+
result = await try_execute(client)
|
|
1310
|
+
|
|
1311
|
+
# Success! Record the reconnection
|
|
1312
|
+
await self._record_reconnect(server_name)
|
|
1313
|
+
console.console.print(
|
|
1314
|
+
f"[dim green]MCP server {server_name} reconnected successfully[/dim green]"
|
|
1315
|
+
)
|
|
1316
|
+
return result, True
|
|
1317
|
+
|
|
1318
|
+
except ServerSessionTerminatedError:
|
|
1319
|
+
# Retry after reconnection ALSO failed with session terminated
|
|
1320
|
+
# Do NOT attempt another reconnection - this would cause an infinite loop
|
|
1321
|
+
console.console.print(
|
|
1322
|
+
f"[dim red]MCP server {server_name} session terminated again after reconnect[/dim red]"
|
|
1323
|
+
)
|
|
1324
|
+
error_msg = (
|
|
1325
|
+
f"MCP server {server_name} session terminated even after reconnection. "
|
|
1326
|
+
"The server may be persistently rejecting this session. "
|
|
1327
|
+
"Please check server status or try again later."
|
|
1328
|
+
)
|
|
1329
|
+
if error_factory:
|
|
1330
|
+
return error_factory(error_msg), False
|
|
1331
|
+
else:
|
|
1332
|
+
raise Exception(error_msg)
|
|
1333
|
+
|
|
1334
|
+
except Exception as e:
|
|
1335
|
+
# Other reconnection failure
|
|
1336
|
+
console.console.print(
|
|
1337
|
+
f"[dim red]MCP server {server_name} failed to reconnect: {e}[/dim red]"
|
|
1338
|
+
)
|
|
1339
|
+
error_msg = f"MCP server {server_name} failed to reconnect: {e}"
|
|
1340
|
+
if error_factory:
|
|
1341
|
+
return error_factory(error_msg), False
|
|
1342
|
+
else:
|
|
1343
|
+
raise Exception(error_msg)
|
|
1344
|
+
|
|
1345
|
+
async def _parse_resource_name(self, name: str, resource_type: str) -> tuple[str, str]:
|
|
1346
|
+
"""
|
|
1347
|
+
Parse a possibly namespaced resource name into server name and local resource name.
|
|
1348
|
+
|
|
1349
|
+
Args:
|
|
1350
|
+
name: The resource name, possibly namespaced
|
|
1351
|
+
resource_type: Type of resource (for error messages), e.g. "tool", "prompt"
|
|
1352
|
+
|
|
1353
|
+
Returns:
|
|
1354
|
+
Tuple of (server_name, local_resource_name)
|
|
1355
|
+
"""
|
|
1356
|
+
# First, check if this is a direct hit in our namespaced tool map
|
|
1357
|
+
# This handles both namespaced and non-namespaced direct lookups
|
|
1358
|
+
if resource_type == "tool" and name in self._namespaced_tool_map:
|
|
1359
|
+
namespaced_tool = self._namespaced_tool_map[name]
|
|
1360
|
+
return namespaced_tool.server_name, namespaced_tool.tool.name
|
|
1361
|
+
|
|
1362
|
+
# Next, attempt to interpret as a namespaced name
|
|
1363
|
+
if is_namespaced_name(name):
|
|
1364
|
+
# Try to match against known server names, handling server names with hyphens
|
|
1365
|
+
for server_name in self.server_names:
|
|
1366
|
+
if name.startswith(f"{server_name}{SEP}"):
|
|
1367
|
+
local_name = name[len(server_name) + len(SEP) :]
|
|
1368
|
+
return server_name, local_name
|
|
1369
|
+
|
|
1370
|
+
# If no server name matched, it might be a tool with a hyphen in its name
|
|
1371
|
+
# Fall through to the next checks
|
|
1372
|
+
|
|
1373
|
+
# For tools, search all servers for the tool by exact name match
|
|
1374
|
+
if resource_type == "tool":
|
|
1375
|
+
for server_name, tools in self._server_to_tool_map.items():
|
|
1376
|
+
for namespaced_tool in tools:
|
|
1377
|
+
if namespaced_tool.tool.name == name:
|
|
1378
|
+
return server_name, name
|
|
1379
|
+
|
|
1380
|
+
# For all other resource types, use the first server
|
|
1381
|
+
return (self.server_names[0] if self.server_names else None, name)
|
|
1382
|
+
|
|
1383
|
+
async def call_tool(
|
|
1384
|
+
self, name: str, arguments: dict | None = None, tool_use_id: str | None = None
|
|
1385
|
+
) -> CallToolResult:
|
|
1386
|
+
"""
|
|
1387
|
+
Call a namespaced tool, e.g., 'server_name__tool_name'.
|
|
1388
|
+
|
|
1389
|
+
Args:
|
|
1390
|
+
name: Tool name (possibly namespaced)
|
|
1391
|
+
arguments: Tool arguments
|
|
1392
|
+
tool_use_id: LLM's tool use ID (for matching with stream events)
|
|
1393
|
+
"""
|
|
1394
|
+
if not self.initialized:
|
|
1395
|
+
await self.load_servers()
|
|
1396
|
+
|
|
1397
|
+
# Use the common parser to get server and tool name
|
|
1398
|
+
server_name, local_tool_name = await self._parse_resource_name(name, "tool")
|
|
1399
|
+
|
|
1400
|
+
if server_name is None:
|
|
1401
|
+
logger.error(f"Error: Tool '{name}' not found")
|
|
1402
|
+
return CallToolResult(
|
|
1403
|
+
isError=True,
|
|
1404
|
+
content=[TextContent(type="text", text=f"Tool '{name}' not found")],
|
|
1405
|
+
)
|
|
1406
|
+
|
|
1407
|
+
namespaced_tool_name = create_namespaced_name(server_name, local_tool_name)
|
|
1408
|
+
|
|
1409
|
+
# Check tool permission before execution
|
|
1410
|
+
try:
|
|
1411
|
+
permission_result = await self._permission_handler.check_permission(
|
|
1412
|
+
tool_name=local_tool_name,
|
|
1413
|
+
server_name=server_name,
|
|
1414
|
+
arguments=arguments,
|
|
1415
|
+
tool_use_id=tool_use_id,
|
|
1416
|
+
)
|
|
1417
|
+
if not permission_result.allowed:
|
|
1418
|
+
error_msg = permission_result.error_message
|
|
1419
|
+
if error_msg is None:
|
|
1420
|
+
if permission_result.remember:
|
|
1421
|
+
error_msg = (
|
|
1422
|
+
f"The user has permanently declined permission to use this tool: "
|
|
1423
|
+
f"{namespaced_tool_name}"
|
|
1424
|
+
)
|
|
1425
|
+
else:
|
|
1426
|
+
error_msg = f"The user has declined permission to use this tool: {namespaced_tool_name}"
|
|
1427
|
+
|
|
1428
|
+
# Notify tool handler so ACP clients can reflect the cancellation/denial
|
|
1429
|
+
if hasattr(self._tool_handler, "on_tool_permission_denied"):
|
|
1430
|
+
try:
|
|
1431
|
+
await self._tool_handler.on_tool_permission_denied(
|
|
1432
|
+
local_tool_name, server_name, tool_use_id, error_msg
|
|
1433
|
+
)
|
|
1434
|
+
except Exception as e:
|
|
1435
|
+
logger.error(f"Error notifying permission denial: {e}", exc_info=True)
|
|
1436
|
+
logger.info(
|
|
1437
|
+
"Tool execution denied by permission handler",
|
|
1438
|
+
data={
|
|
1439
|
+
"tool_name": local_tool_name,
|
|
1440
|
+
"server_name": server_name,
|
|
1441
|
+
"cancelled": permission_result.is_cancelled,
|
|
1442
|
+
},
|
|
1443
|
+
)
|
|
1444
|
+
return CallToolResult(
|
|
1445
|
+
isError=True,
|
|
1446
|
+
content=[TextContent(type="text", text=error_msg)],
|
|
1447
|
+
)
|
|
1448
|
+
except Exception as e:
|
|
1449
|
+
logger.error(f"Error checking tool permission: {e}", exc_info=True)
|
|
1450
|
+
# Fail-safe: deny on permission check error
|
|
1451
|
+
return CallToolResult(
|
|
1452
|
+
isError=True,
|
|
1453
|
+
content=[TextContent(type="text", text=f"Permission check failed: {e}")],
|
|
1454
|
+
)
|
|
1455
|
+
|
|
1456
|
+
logger.info(
|
|
1457
|
+
"Requesting tool call",
|
|
1458
|
+
data={
|
|
1459
|
+
"progress_action": ProgressAction.CALLING_TOOL,
|
|
1460
|
+
"tool_name": local_tool_name,
|
|
1461
|
+
"server_name": server_name,
|
|
1462
|
+
"agent_name": self.agent_name,
|
|
1463
|
+
},
|
|
1464
|
+
)
|
|
1465
|
+
|
|
1466
|
+
# Notify tool handler that execution is starting
|
|
1467
|
+
try:
|
|
1468
|
+
tool_call_id = await self._tool_handler.on_tool_start(
|
|
1469
|
+
local_tool_name, server_name, arguments, tool_use_id
|
|
1470
|
+
)
|
|
1471
|
+
except Exception as e:
|
|
1472
|
+
logger.error(f"Error in tool start handler: {e}", exc_info=True)
|
|
1473
|
+
# Generate fallback ID if handler fails
|
|
1474
|
+
import uuid
|
|
1475
|
+
|
|
1476
|
+
tool_call_id = str(uuid.uuid4())
|
|
1477
|
+
|
|
1478
|
+
tracer = trace.get_tracer(__name__)
|
|
1479
|
+
with tracer.start_as_current_span(f"MCP Tool: {namespaced_tool_name}"):
|
|
1480
|
+
trace.get_current_span().set_attribute("tool_name", local_tool_name)
|
|
1481
|
+
trace.get_current_span().set_attribute("server_name", server_name)
|
|
1482
|
+
trace.get_current_span().set_attribute("namespaced_tool_name", namespaced_tool_name)
|
|
1483
|
+
|
|
1484
|
+
# Create progress callback for this tool execution
|
|
1485
|
+
progress_callback = self._create_progress_callback(
|
|
1486
|
+
server_name, local_tool_name, tool_call_id
|
|
1487
|
+
)
|
|
1488
|
+
|
|
1489
|
+
try:
|
|
1490
|
+
result = await self._execute_on_server(
|
|
1491
|
+
server_name=server_name,
|
|
1492
|
+
operation_type="tools/call",
|
|
1493
|
+
operation_name=local_tool_name,
|
|
1494
|
+
method_name="call_tool",
|
|
1495
|
+
method_args={
|
|
1496
|
+
"name": local_tool_name,
|
|
1497
|
+
"arguments": arguments,
|
|
1498
|
+
},
|
|
1499
|
+
error_factory=lambda msg: CallToolResult(
|
|
1500
|
+
isError=True, content=[TextContent(type="text", text=msg)]
|
|
1501
|
+
),
|
|
1502
|
+
progress_callback=progress_callback,
|
|
1503
|
+
)
|
|
1504
|
+
|
|
1505
|
+
# Notify tool handler of completion
|
|
1506
|
+
try:
|
|
1507
|
+
# Pass the full content blocks to the handler
|
|
1508
|
+
content = result.content if result.content else None
|
|
1509
|
+
|
|
1510
|
+
logger.debug(
|
|
1511
|
+
f"Tool execution completed, notifying handler: {tool_call_id}",
|
|
1512
|
+
name="mcp_tool_complete_notify",
|
|
1513
|
+
tool_call_id=tool_call_id,
|
|
1514
|
+
has_content=content is not None,
|
|
1515
|
+
content_count=len(content) if content else 0,
|
|
1516
|
+
is_error=result.isError,
|
|
1517
|
+
)
|
|
1518
|
+
|
|
1519
|
+
# If there's an error, extract error text
|
|
1520
|
+
error_text = None
|
|
1521
|
+
if result.isError and content:
|
|
1522
|
+
# Extract text from content for error message
|
|
1523
|
+
text_parts = [c.text for c in content if hasattr(c, "text") and c.text]
|
|
1524
|
+
error_text = "\n".join(text_parts) if text_parts else None
|
|
1525
|
+
content = None # Don't send content when there's an error
|
|
1526
|
+
|
|
1527
|
+
await self._tool_handler.on_tool_complete(
|
|
1528
|
+
tool_call_id, not result.isError, content, error_text
|
|
1529
|
+
)
|
|
1530
|
+
|
|
1531
|
+
logger.debug(
|
|
1532
|
+
f"Tool handler notified successfully: {tool_call_id}",
|
|
1533
|
+
name="mcp_tool_complete_done",
|
|
1534
|
+
)
|
|
1535
|
+
except Exception as e:
|
|
1536
|
+
logger.error(f"Error in tool complete handler: {e}", exc_info=True)
|
|
1537
|
+
|
|
1538
|
+
return result
|
|
1539
|
+
|
|
1540
|
+
except Exception as e:
|
|
1541
|
+
# Notify tool handler of error
|
|
1542
|
+
try:
|
|
1543
|
+
await self._tool_handler.on_tool_complete(tool_call_id, False, None, str(e))
|
|
1544
|
+
except Exception as handler_error:
|
|
1545
|
+
logger.error(f"Error in tool complete handler: {handler_error}", exc_info=True)
|
|
1546
|
+
raise
|
|
1547
|
+
|
|
1548
|
+
async def get_prompt(
|
|
1549
|
+
self,
|
|
1550
|
+
prompt_name: str,
|
|
1551
|
+
arguments: dict[str, str] | None = None,
|
|
1552
|
+
server_name: str | None = None,
|
|
1553
|
+
) -> GetPromptResult:
|
|
1554
|
+
"""
|
|
1555
|
+
Get a prompt from a server.
|
|
1556
|
+
|
|
1557
|
+
:param prompt_name: Name of the prompt, optionally namespaced with server name
|
|
1558
|
+
using the format 'server_name-prompt_name'
|
|
1559
|
+
:param arguments: Optional dictionary of string arguments to pass to the prompt template
|
|
1560
|
+
for templating
|
|
1561
|
+
:param server_name: Optional name of the server to get the prompt from. If not provided
|
|
1562
|
+
and prompt_name is not namespaced, will search all servers.
|
|
1563
|
+
:return: GetPromptResult containing the prompt description and messages
|
|
1564
|
+
with a namespaced_name property for display purposes
|
|
1565
|
+
"""
|
|
1566
|
+
if not self.initialized:
|
|
1567
|
+
await self.load_servers()
|
|
1568
|
+
|
|
1569
|
+
# If server_name is explicitly provided, use it
|
|
1570
|
+
if server_name:
|
|
1571
|
+
local_prompt_name = prompt_name
|
|
1572
|
+
# Otherwise, check if prompt_name is namespaced and validate the server exists
|
|
1573
|
+
elif is_namespaced_name(prompt_name):
|
|
1574
|
+
parts = prompt_name.split(SEP, 1)
|
|
1575
|
+
potential_server = parts[0]
|
|
1576
|
+
|
|
1577
|
+
# Only treat as namespaced if the server part is valid
|
|
1578
|
+
if potential_server in self.server_names:
|
|
1579
|
+
server_name = potential_server
|
|
1580
|
+
local_prompt_name = parts[1]
|
|
1581
|
+
else:
|
|
1582
|
+
# The hyphen is part of the prompt name, not a namespace separator
|
|
1583
|
+
local_prompt_name = prompt_name
|
|
1584
|
+
# Otherwise, use prompt_name as-is for searching
|
|
1585
|
+
else:
|
|
1586
|
+
local_prompt_name = prompt_name
|
|
1587
|
+
# We'll search all servers below
|
|
1588
|
+
|
|
1589
|
+
# If we have a specific server to check
|
|
1590
|
+
if server_name:
|
|
1591
|
+
if not await self.validate_server(server_name):
|
|
1592
|
+
logger.error(f"Error: Server '{server_name}' not found")
|
|
1593
|
+
return GetPromptResult(
|
|
1594
|
+
description=f"Error: Server '{server_name}' not found",
|
|
1595
|
+
messages=[],
|
|
1596
|
+
)
|
|
1597
|
+
|
|
1598
|
+
# Check if server supports prompts
|
|
1599
|
+
if not await self.server_supports_feature(server_name, "prompts"):
|
|
1600
|
+
logger.debug(f"Server '{server_name}' does not support prompts")
|
|
1601
|
+
return GetPromptResult(
|
|
1602
|
+
description=f"Server '{server_name}' does not support prompts",
|
|
1603
|
+
messages=[],
|
|
1604
|
+
)
|
|
1605
|
+
|
|
1606
|
+
# Check the prompt cache to avoid unnecessary errors
|
|
1607
|
+
if local_prompt_name:
|
|
1608
|
+
async with self._prompt_cache_lock:
|
|
1609
|
+
if server_name in self._prompt_cache:
|
|
1610
|
+
# Check if any prompt in the cache has this name
|
|
1611
|
+
prompt_names = [prompt.name for prompt in self._prompt_cache[server_name]]
|
|
1612
|
+
if local_prompt_name not in prompt_names:
|
|
1613
|
+
logger.debug(
|
|
1614
|
+
f"Prompt '{local_prompt_name}' not found in cache for server '{server_name}'"
|
|
1615
|
+
)
|
|
1616
|
+
return GetPromptResult(
|
|
1617
|
+
description=f"Prompt '{local_prompt_name}' not found on server '{server_name}'",
|
|
1618
|
+
messages=[],
|
|
1619
|
+
)
|
|
1620
|
+
|
|
1621
|
+
# Try to get the prompt from the specified server
|
|
1622
|
+
method_args = {"name": local_prompt_name} if local_prompt_name else {}
|
|
1623
|
+
if arguments:
|
|
1624
|
+
method_args["arguments"] = arguments
|
|
1625
|
+
|
|
1626
|
+
result = await self._execute_on_server(
|
|
1627
|
+
server_name=server_name,
|
|
1628
|
+
operation_type="prompts/get",
|
|
1629
|
+
operation_name=local_prompt_name or "default",
|
|
1630
|
+
method_name="get_prompt",
|
|
1631
|
+
method_args=method_args,
|
|
1632
|
+
error_factory=lambda msg: GetPromptResult(description=msg, messages=[]),
|
|
1633
|
+
)
|
|
1634
|
+
|
|
1635
|
+
# Add namespaced name and source server to the result
|
|
1636
|
+
if result and result.messages:
|
|
1637
|
+
result.namespaced_name = create_namespaced_name(server_name, local_prompt_name)
|
|
1638
|
+
|
|
1639
|
+
# Store the arguments in the result for display purposes
|
|
1640
|
+
if arguments:
|
|
1641
|
+
result.arguments = arguments
|
|
1642
|
+
|
|
1643
|
+
return result
|
|
1644
|
+
|
|
1645
|
+
# No specific server - use the cache to find servers that have this prompt
|
|
1646
|
+
logger.debug(f"Searching for prompt '{local_prompt_name}' using cache")
|
|
1647
|
+
|
|
1648
|
+
# Find potential servers from the cache
|
|
1649
|
+
potential_servers = []
|
|
1650
|
+
async with self._prompt_cache_lock:
|
|
1651
|
+
for s_name, prompt_list in self._prompt_cache.items():
|
|
1652
|
+
prompt_names = [prompt.name for prompt in prompt_list]
|
|
1653
|
+
if local_prompt_name in prompt_names:
|
|
1654
|
+
potential_servers.append(s_name)
|
|
1655
|
+
|
|
1656
|
+
if potential_servers:
|
|
1657
|
+
logger.debug(
|
|
1658
|
+
f"Found prompt '{local_prompt_name}' in cache for servers: {potential_servers}"
|
|
1659
|
+
)
|
|
1660
|
+
|
|
1661
|
+
# Try each server from the cache
|
|
1662
|
+
for s_name in potential_servers:
|
|
1663
|
+
# Check if this server supports prompts
|
|
1664
|
+
capabilities = await self.get_capabilities(s_name)
|
|
1665
|
+
if not capabilities or not capabilities.prompts:
|
|
1666
|
+
logger.debug(f"Server '{s_name}' does not support prompts, skipping")
|
|
1667
|
+
continue
|
|
1668
|
+
|
|
1669
|
+
try:
|
|
1670
|
+
method_args = {"name": local_prompt_name}
|
|
1671
|
+
if arguments:
|
|
1672
|
+
method_args["arguments"] = arguments
|
|
1673
|
+
|
|
1674
|
+
result = await self._execute_on_server(
|
|
1675
|
+
server_name=s_name,
|
|
1676
|
+
operation_type="prompts/get",
|
|
1677
|
+
operation_name=local_prompt_name,
|
|
1678
|
+
method_name="get_prompt",
|
|
1679
|
+
method_args=method_args,
|
|
1680
|
+
error_factory=lambda _: None, # Return None instead of an error
|
|
1681
|
+
)
|
|
1682
|
+
|
|
1683
|
+
# If we got a successful result with messages, return it
|
|
1684
|
+
if result and result.messages:
|
|
1685
|
+
logger.debug(
|
|
1686
|
+
f"Successfully retrieved prompt '{local_prompt_name}' from server '{s_name}'"
|
|
1687
|
+
)
|
|
1688
|
+
# Add namespaced name using the actual server where found
|
|
1689
|
+
result.namespaced_name = create_namespaced_name(s_name, local_prompt_name)
|
|
1690
|
+
|
|
1691
|
+
# Store the arguments in the result for display purposes
|
|
1692
|
+
if arguments:
|
|
1693
|
+
result.arguments = arguments
|
|
1694
|
+
|
|
1695
|
+
return result
|
|
1696
|
+
|
|
1697
|
+
except Exception as e:
|
|
1698
|
+
logger.debug(f"Error retrieving prompt from server '{s_name}': {e}")
|
|
1699
|
+
else:
|
|
1700
|
+
logger.debug(f"Prompt '{local_prompt_name}' not found in any server's cache")
|
|
1701
|
+
|
|
1702
|
+
# If not in cache, perform a full search as fallback (cache might be outdated)
|
|
1703
|
+
# First identify servers that support prompts
|
|
1704
|
+
supported_servers = []
|
|
1705
|
+
for s_name in self.server_names:
|
|
1706
|
+
capabilities = await self.get_capabilities(s_name)
|
|
1707
|
+
if capabilities and capabilities.prompts:
|
|
1708
|
+
supported_servers.append(s_name)
|
|
1709
|
+
else:
|
|
1710
|
+
logger.debug(
|
|
1711
|
+
f"Server '{s_name}' does not support prompts, skipping from fallback search"
|
|
1712
|
+
)
|
|
1713
|
+
|
|
1714
|
+
# Try all supported servers in order
|
|
1715
|
+
for s_name in supported_servers:
|
|
1716
|
+
try:
|
|
1717
|
+
# Use a quiet approach - don't log errors if not found
|
|
1718
|
+
method_args = {"name": local_prompt_name}
|
|
1719
|
+
if arguments:
|
|
1720
|
+
method_args["arguments"] = arguments
|
|
1721
|
+
|
|
1722
|
+
result = await self._execute_on_server(
|
|
1723
|
+
server_name=s_name,
|
|
1724
|
+
operation_type="prompts/get",
|
|
1725
|
+
operation_name=local_prompt_name,
|
|
1726
|
+
method_name="get_prompt",
|
|
1727
|
+
method_args=method_args,
|
|
1728
|
+
error_factory=lambda _: None, # Return None instead of an error
|
|
1729
|
+
)
|
|
1730
|
+
|
|
1731
|
+
# If we got a successful result with messages, return it
|
|
1732
|
+
if result and result.messages:
|
|
1733
|
+
logger.debug(
|
|
1734
|
+
f"Found prompt '{local_prompt_name}' on server '{s_name}' (not in cache)"
|
|
1735
|
+
)
|
|
1736
|
+
# Add namespaced name using the actual server where found
|
|
1737
|
+
result.namespaced_name = create_namespaced_name(s_name, local_prompt_name)
|
|
1738
|
+
|
|
1739
|
+
# Store the arguments in the result for display purposes
|
|
1740
|
+
if arguments:
|
|
1741
|
+
result.arguments = arguments
|
|
1742
|
+
|
|
1743
|
+
# Update the cache - need to fetch the prompt object to store in cache
|
|
1744
|
+
try:
|
|
1745
|
+
prompt_list_result = await self._execute_on_server(
|
|
1746
|
+
server_name=s_name,
|
|
1747
|
+
operation_type="prompts/list",
|
|
1748
|
+
operation_name="",
|
|
1749
|
+
method_name="list_prompts",
|
|
1750
|
+
error_factory=lambda _: None,
|
|
1751
|
+
)
|
|
1752
|
+
|
|
1753
|
+
prompts = getattr(prompt_list_result, "prompts", [])
|
|
1754
|
+
matching_prompts = [p for p in prompts if p.name == local_prompt_name]
|
|
1755
|
+
if matching_prompts:
|
|
1756
|
+
async with self._prompt_cache_lock:
|
|
1757
|
+
if s_name not in self._prompt_cache:
|
|
1758
|
+
self._prompt_cache[s_name] = []
|
|
1759
|
+
# Add if not already in the cache
|
|
1760
|
+
prompt_names_in_cache = [
|
|
1761
|
+
p.name for p in self._prompt_cache[s_name]
|
|
1762
|
+
]
|
|
1763
|
+
if local_prompt_name not in prompt_names_in_cache:
|
|
1764
|
+
self._prompt_cache[s_name].append(matching_prompts[0])
|
|
1765
|
+
except Exception:
|
|
1766
|
+
# Ignore errors when updating cache
|
|
1767
|
+
pass
|
|
1768
|
+
|
|
1769
|
+
return result
|
|
1770
|
+
|
|
1771
|
+
except Exception:
|
|
1772
|
+
# Don't log errors during fallback search
|
|
1773
|
+
pass
|
|
1774
|
+
|
|
1775
|
+
# If we get here, we couldn't find the prompt on any server
|
|
1776
|
+
logger.info(f"Prompt '{local_prompt_name}' not found on any server")
|
|
1777
|
+
return GetPromptResult(
|
|
1778
|
+
description=f"Prompt '{local_prompt_name}' not found on any server",
|
|
1779
|
+
messages=[],
|
|
1780
|
+
)
|
|
1781
|
+
|
|
1782
|
+
async def list_prompts(
|
|
1783
|
+
self, server_name: str | None = None, agent_name: str | None = None
|
|
1784
|
+
) -> Mapping[str, list[Prompt]]:
|
|
1785
|
+
"""
|
|
1786
|
+
List available prompts from one or all servers.
|
|
1787
|
+
|
|
1788
|
+
:param server_name: Optional server name to list prompts from. If not provided,
|
|
1789
|
+
lists prompts from all servers.
|
|
1790
|
+
:param agent_name: Optional agent name (ignored at this level, used by multi-agent apps)
|
|
1791
|
+
:return: Dictionary mapping server names to lists of Prompt objects
|
|
1792
|
+
"""
|
|
1793
|
+
if not self.initialized:
|
|
1794
|
+
await self.load_servers()
|
|
1795
|
+
|
|
1796
|
+
results: dict[str, list[Prompt]] = {}
|
|
1797
|
+
|
|
1798
|
+
# If specific server requested
|
|
1799
|
+
if server_name:
|
|
1800
|
+
if server_name not in self.server_names:
|
|
1801
|
+
logger.error(f"Server '{server_name}' not found")
|
|
1802
|
+
return results
|
|
1803
|
+
|
|
1804
|
+
# Check cache first
|
|
1805
|
+
async with self._prompt_cache_lock:
|
|
1806
|
+
if server_name in self._prompt_cache:
|
|
1807
|
+
results[server_name] = self._prompt_cache[server_name]
|
|
1808
|
+
logger.debug(f"Returning cached prompts for server '{server_name}'")
|
|
1809
|
+
return results
|
|
1810
|
+
|
|
1811
|
+
# Check if server supports prompts
|
|
1812
|
+
capabilities = await self.get_capabilities(server_name)
|
|
1813
|
+
if not capabilities or not capabilities.prompts:
|
|
1814
|
+
logger.debug(f"Server '{server_name}' does not support prompts")
|
|
1815
|
+
results[server_name] = []
|
|
1816
|
+
return results
|
|
1817
|
+
|
|
1818
|
+
# Fetch from server
|
|
1819
|
+
result = await self._execute_on_server(
|
|
1820
|
+
server_name=server_name,
|
|
1821
|
+
operation_type="prompts/list",
|
|
1822
|
+
operation_name="",
|
|
1823
|
+
method_name="list_prompts",
|
|
1824
|
+
error_factory=lambda _: None,
|
|
1825
|
+
)
|
|
1826
|
+
|
|
1827
|
+
# Get prompts from result
|
|
1828
|
+
prompts = getattr(result, "prompts", [])
|
|
1829
|
+
|
|
1830
|
+
# Update cache
|
|
1831
|
+
async with self._prompt_cache_lock:
|
|
1832
|
+
self._prompt_cache[server_name] = prompts
|
|
1833
|
+
|
|
1834
|
+
results[server_name] = prompts
|
|
1835
|
+
return results
|
|
1836
|
+
|
|
1837
|
+
# No specific server - check if we can use the cache for all servers
|
|
1838
|
+
async with self._prompt_cache_lock:
|
|
1839
|
+
if all(s_name in self._prompt_cache for s_name in self.server_names):
|
|
1840
|
+
for s_name, prompt_list in self._prompt_cache.items():
|
|
1841
|
+
results[s_name] = prompt_list
|
|
1842
|
+
logger.debug("Returning cached prompts for all servers")
|
|
1843
|
+
return results
|
|
1844
|
+
|
|
1845
|
+
# Identify servers that support prompts
|
|
1846
|
+
supported_servers = []
|
|
1847
|
+
for s_name in self.server_names:
|
|
1848
|
+
capabilities = await self.get_capabilities(s_name)
|
|
1849
|
+
if capabilities and capabilities.prompts:
|
|
1850
|
+
supported_servers.append(s_name)
|
|
1851
|
+
else:
|
|
1852
|
+
logger.debug(f"Server '{s_name}' does not support prompts, skipping")
|
|
1853
|
+
results[s_name] = []
|
|
1854
|
+
|
|
1855
|
+
# Fetch prompts from supported servers
|
|
1856
|
+
for s_name in supported_servers:
|
|
1857
|
+
try:
|
|
1858
|
+
result = await self._execute_on_server(
|
|
1859
|
+
server_name=s_name,
|
|
1860
|
+
operation_type="prompts/list",
|
|
1861
|
+
operation_name="",
|
|
1862
|
+
method_name="list_prompts",
|
|
1863
|
+
error_factory=lambda _: None,
|
|
1864
|
+
)
|
|
1865
|
+
|
|
1866
|
+
prompts = getattr(result, "prompts", [])
|
|
1867
|
+
|
|
1868
|
+
# Update cache and results
|
|
1869
|
+
async with self._prompt_cache_lock:
|
|
1870
|
+
self._prompt_cache[s_name] = prompts
|
|
1871
|
+
|
|
1872
|
+
results[s_name] = prompts
|
|
1873
|
+
except Exception as e:
|
|
1874
|
+
logger.debug(f"Error fetching prompts from {s_name}: {e}")
|
|
1875
|
+
results[s_name] = []
|
|
1876
|
+
|
|
1877
|
+
logger.debug(f"Available prompts across servers: {results}")
|
|
1878
|
+
return results
|
|
1879
|
+
|
|
1880
|
+
async def _handle_tool_list_changed(self, server_name: str) -> None:
|
|
1881
|
+
"""
|
|
1882
|
+
Callback handler for ToolListChangedNotification.
|
|
1883
|
+
This will refresh the tools for the specified server.
|
|
1884
|
+
|
|
1885
|
+
Args:
|
|
1886
|
+
server_name: The name of the server whose tools have changed
|
|
1887
|
+
"""
|
|
1888
|
+
logger.info(f"Tool list changed for server '{server_name}', refreshing tools")
|
|
1889
|
+
|
|
1890
|
+
# Refresh the tools for this server
|
|
1891
|
+
await self._refresh_server_tools(server_name)
|
|
1892
|
+
|
|
1893
|
+
async def _refresh_server_tools(self, server_name: str) -> None:
|
|
1894
|
+
"""
|
|
1895
|
+
Refresh the tools for a specific server.
|
|
1896
|
+
|
|
1897
|
+
Args:
|
|
1898
|
+
server_name: The name of the server to refresh tools for
|
|
1899
|
+
"""
|
|
1900
|
+
if not await self.validate_server(server_name):
|
|
1901
|
+
logger.error(f"Cannot refresh tools for unknown server '{server_name}'")
|
|
1902
|
+
return
|
|
1903
|
+
|
|
1904
|
+
# Check if server supports tools capability
|
|
1905
|
+
if not await self.server_supports_feature(server_name, "tools"):
|
|
1906
|
+
logger.debug(f"Server '{server_name}' does not support tools")
|
|
1907
|
+
return
|
|
1908
|
+
|
|
1909
|
+
await self.display.show_tool_update(
|
|
1910
|
+
updated_server=server_name, agent_name="Tool List Change Notification"
|
|
1911
|
+
)
|
|
1912
|
+
|
|
1913
|
+
async with self._refresh_lock:
|
|
1914
|
+
try:
|
|
1915
|
+
# Fetch new tools from the server using _execute_on_server to properly record stats
|
|
1916
|
+
tools_result = await self._execute_on_server(
|
|
1917
|
+
server_name=server_name,
|
|
1918
|
+
operation_type="tools/list",
|
|
1919
|
+
operation_name="",
|
|
1920
|
+
method_name="list_tools",
|
|
1921
|
+
method_args={},
|
|
1922
|
+
)
|
|
1923
|
+
new_tools = tools_result.tools or []
|
|
1924
|
+
|
|
1925
|
+
# Update tool maps
|
|
1926
|
+
async with self._tool_map_lock:
|
|
1927
|
+
# Remove old tools for this server
|
|
1928
|
+
old_tools = self._server_to_tool_map.get(server_name, [])
|
|
1929
|
+
for old_tool in old_tools:
|
|
1930
|
+
if old_tool.namespaced_tool_name in self._namespaced_tool_map:
|
|
1931
|
+
del self._namespaced_tool_map[old_tool.namespaced_tool_name]
|
|
1932
|
+
|
|
1933
|
+
# Add new tools
|
|
1934
|
+
self._server_to_tool_map[server_name] = []
|
|
1935
|
+
for tool in new_tools:
|
|
1936
|
+
namespaced_tool_name = create_namespaced_name(server_name, tool.name)
|
|
1937
|
+
namespaced_tool = NamespacedTool(
|
|
1938
|
+
tool=tool,
|
|
1939
|
+
server_name=server_name,
|
|
1940
|
+
namespaced_tool_name=namespaced_tool_name,
|
|
1941
|
+
)
|
|
1942
|
+
|
|
1943
|
+
self._namespaced_tool_map[namespaced_tool_name] = namespaced_tool
|
|
1944
|
+
self._server_to_tool_map[server_name].append(namespaced_tool)
|
|
1945
|
+
|
|
1946
|
+
logger.info(
|
|
1947
|
+
f"Successfully refreshed tools for server '{server_name}'",
|
|
1948
|
+
data={
|
|
1949
|
+
"progress_action": ProgressAction.UPDATED,
|
|
1950
|
+
"server_name": server_name,
|
|
1951
|
+
"agent_name": self.agent_name,
|
|
1952
|
+
"tool_count": len(new_tools),
|
|
1953
|
+
},
|
|
1954
|
+
)
|
|
1955
|
+
except Exception as e:
|
|
1956
|
+
logger.error(f"Failed to refresh tools for server '{server_name}': {e}")
|
|
1957
|
+
|
|
1958
|
+
async def get_resource(
|
|
1959
|
+
self, resource_uri: str, server_name: str | None = None
|
|
1960
|
+
) -> ReadResourceResult:
|
|
1961
|
+
"""
|
|
1962
|
+
Get a resource directly from an MCP server by URI.
|
|
1963
|
+
If server_name is None, will search all available servers.
|
|
1964
|
+
|
|
1965
|
+
Args:
|
|
1966
|
+
resource_uri: URI of the resource to retrieve
|
|
1967
|
+
server_name: Optional name of the MCP server to retrieve the resource from
|
|
1968
|
+
|
|
1969
|
+
Returns:
|
|
1970
|
+
ReadResourceResult object containing the resource content
|
|
1971
|
+
|
|
1972
|
+
Raises:
|
|
1973
|
+
ValueError: If the server doesn't exist or the resource couldn't be found
|
|
1974
|
+
"""
|
|
1975
|
+
if not self.initialized:
|
|
1976
|
+
await self.load_servers()
|
|
1977
|
+
|
|
1978
|
+
# If specific server requested, use only that server
|
|
1979
|
+
if server_name is not None:
|
|
1980
|
+
if server_name not in self.server_names:
|
|
1981
|
+
raise ValueError(f"Server '{server_name}' not found")
|
|
1982
|
+
|
|
1983
|
+
# Get the resource from the specified server
|
|
1984
|
+
return await self._get_resource_from_server(server_name, resource_uri)
|
|
1985
|
+
|
|
1986
|
+
# If no server specified, search all servers
|
|
1987
|
+
if not self.server_names:
|
|
1988
|
+
raise ValueError("No servers available to get resource from")
|
|
1989
|
+
|
|
1990
|
+
# Try each server in order - simply attempt to get the resource
|
|
1991
|
+
for s_name in self.server_names:
|
|
1992
|
+
try:
|
|
1993
|
+
return await self._get_resource_from_server(s_name, resource_uri)
|
|
1994
|
+
except Exception:
|
|
1995
|
+
# Continue to next server if not found
|
|
1996
|
+
continue
|
|
1997
|
+
|
|
1998
|
+
# If we reach here, we couldn't find the resource on any server
|
|
1999
|
+
raise ValueError(f"Resource '{resource_uri}' not found on any server")
|
|
2000
|
+
|
|
2001
|
+
async def _get_resource_from_server(
|
|
2002
|
+
self, server_name: str, resource_uri: str
|
|
2003
|
+
) -> ReadResourceResult:
|
|
2004
|
+
"""
|
|
2005
|
+
Internal helper method to get a resource from a specific server.
|
|
2006
|
+
|
|
2007
|
+
Args:
|
|
2008
|
+
server_name: Name of the server to get the resource from
|
|
2009
|
+
resource_uri: URI of the resource to retrieve
|
|
2010
|
+
|
|
2011
|
+
Returns:
|
|
2012
|
+
ReadResourceResult containing the resource
|
|
2013
|
+
|
|
2014
|
+
Raises:
|
|
2015
|
+
Exception: If the resource couldn't be found or other error occurs
|
|
2016
|
+
"""
|
|
2017
|
+
# Check if server supports resources capability
|
|
2018
|
+
if not await self.server_supports_feature(server_name, "resources"):
|
|
2019
|
+
raise ValueError(f"Server '{server_name}' does not support resources")
|
|
2020
|
+
|
|
2021
|
+
logger.info(
|
|
2022
|
+
"Requesting resource",
|
|
2023
|
+
data={
|
|
2024
|
+
"progress_action": ProgressAction.CALLING_TOOL,
|
|
2025
|
+
"resource_uri": resource_uri,
|
|
2026
|
+
"server_name": server_name,
|
|
2027
|
+
"agent_name": self.agent_name,
|
|
2028
|
+
},
|
|
2029
|
+
)
|
|
2030
|
+
|
|
2031
|
+
try:
|
|
2032
|
+
uri = AnyUrl(resource_uri)
|
|
2033
|
+
except Exception as e:
|
|
2034
|
+
raise ValueError(f"Invalid resource URI: {resource_uri}. Error: {e}")
|
|
2035
|
+
|
|
2036
|
+
# Use the _execute_on_server method to call read_resource on the server
|
|
2037
|
+
result = await self._execute_on_server(
|
|
2038
|
+
server_name=server_name,
|
|
2039
|
+
operation_type="resources/read",
|
|
2040
|
+
operation_name=resource_uri,
|
|
2041
|
+
method_name="read_resource",
|
|
2042
|
+
method_args={"uri": uri},
|
|
2043
|
+
# Don't create ValueError, just return None on error so we can catch it
|
|
2044
|
+
# error_factory=lambda _: None,
|
|
2045
|
+
)
|
|
2046
|
+
|
|
2047
|
+
# If result is None, the resource was not found
|
|
2048
|
+
if result is None:
|
|
2049
|
+
raise ValueError(f"Resource '{resource_uri}' not found on server '{server_name}'")
|
|
2050
|
+
|
|
2051
|
+
return result
|
|
2052
|
+
|
|
2053
|
+
async def _list_resources_from_server(
|
|
2054
|
+
self, server_name: str, *, check_support: bool = True
|
|
2055
|
+
) -> list[Any]:
|
|
2056
|
+
"""
|
|
2057
|
+
Internal helper method to list resources from a specific server.
|
|
2058
|
+
|
|
2059
|
+
Args:
|
|
2060
|
+
server_name: Name of the server whose resources to list
|
|
2061
|
+
check_support: Whether to verify the server supports resources before listing
|
|
2062
|
+
|
|
2063
|
+
Returns:
|
|
2064
|
+
A list of resources as returned by the MCP server
|
|
2065
|
+
"""
|
|
2066
|
+
if check_support and not await self.server_supports_feature(server_name, "resources"):
|
|
2067
|
+
return []
|
|
2068
|
+
|
|
2069
|
+
result = await self._execute_on_server(
|
|
2070
|
+
server_name=server_name,
|
|
2071
|
+
operation_type="resources/list",
|
|
2072
|
+
operation_name="",
|
|
2073
|
+
method_name="list_resources",
|
|
2074
|
+
method_args={},
|
|
2075
|
+
)
|
|
2076
|
+
|
|
2077
|
+
return getattr(result, "resources", []) or []
|
|
2078
|
+
|
|
2079
|
+
async def list_resources(self, server_name: str | None = None) -> dict[str, list[str]]:
|
|
2080
|
+
"""
|
|
2081
|
+
List available resources from one or all servers.
|
|
2082
|
+
|
|
2083
|
+
Args:
|
|
2084
|
+
server_name: Optional server name to list resources from. If not provided,
|
|
2085
|
+
lists resources from all servers.
|
|
2086
|
+
|
|
2087
|
+
Returns:
|
|
2088
|
+
Dictionary mapping server names to lists of resource URIs
|
|
2089
|
+
"""
|
|
2090
|
+
if not self.initialized:
|
|
2091
|
+
await self.load_servers()
|
|
2092
|
+
|
|
2093
|
+
results: dict[str, list[str]] = {}
|
|
2094
|
+
|
|
2095
|
+
# Get the list of servers to check
|
|
2096
|
+
servers_to_check = [server_name] if server_name else self.server_names
|
|
2097
|
+
|
|
2098
|
+
# For each server, try to list its resources
|
|
2099
|
+
for s_name in servers_to_check:
|
|
2100
|
+
if s_name not in self.server_names:
|
|
2101
|
+
logger.error(f"Server '{s_name}' not found")
|
|
2102
|
+
continue
|
|
2103
|
+
|
|
2104
|
+
# Initialize empty list for this server
|
|
2105
|
+
results[s_name] = []
|
|
2106
|
+
|
|
2107
|
+
# Check if server supports resources capability
|
|
2108
|
+
if not await self.server_supports_feature(s_name, "resources"):
|
|
2109
|
+
logger.debug(f"Server '{s_name}' does not support resources")
|
|
2110
|
+
continue
|
|
2111
|
+
|
|
2112
|
+
try:
|
|
2113
|
+
resources = await self._list_resources_from_server(s_name, check_support=False)
|
|
2114
|
+
formatted_resources: list[str] = []
|
|
2115
|
+
for resource in resources:
|
|
2116
|
+
uri = getattr(resource, "uri", None)
|
|
2117
|
+
if uri is not None:
|
|
2118
|
+
formatted_resources.append(str(uri))
|
|
2119
|
+
results[s_name] = formatted_resources
|
|
2120
|
+
except Exception as e:
|
|
2121
|
+
logger.error(f"Error fetching resources from {s_name}: {e}")
|
|
2122
|
+
|
|
2123
|
+
return results
|
|
2124
|
+
|
|
2125
|
+
async def list_mcp_tools(self, server_name: str | None = None) -> dict[str, list[Tool]]:
|
|
2126
|
+
"""
|
|
2127
|
+
List available tools from one or all servers, grouped by server name.
|
|
2128
|
+
|
|
2129
|
+
Args:
|
|
2130
|
+
server_name: Optional server name to list tools from. If not provided,
|
|
2131
|
+
lists tools from all servers.
|
|
2132
|
+
|
|
2133
|
+
Returns:
|
|
2134
|
+
Dictionary mapping server names to lists of Tool objects (with original names, not namespaced)
|
|
2135
|
+
"""
|
|
2136
|
+
if not self.initialized:
|
|
2137
|
+
await self.load_servers()
|
|
2138
|
+
|
|
2139
|
+
results: dict[str, list[Tool]] = {}
|
|
2140
|
+
|
|
2141
|
+
# Get the list of servers to check
|
|
2142
|
+
servers_to_check = [server_name] if server_name else self.server_names
|
|
2143
|
+
|
|
2144
|
+
# For each server, try to list its tools
|
|
2145
|
+
for s_name in servers_to_check:
|
|
2146
|
+
if s_name not in self.server_names:
|
|
2147
|
+
logger.error(f"Server '{s_name}' not found")
|
|
2148
|
+
continue
|
|
2149
|
+
|
|
2150
|
+
# Initialize empty list for this server
|
|
2151
|
+
results[s_name] = []
|
|
2152
|
+
|
|
2153
|
+
# Check if server supports tools capability
|
|
2154
|
+
if not await self.server_supports_feature(s_name, "tools"):
|
|
2155
|
+
logger.debug(f"Server '{s_name}' does not support tools")
|
|
2156
|
+
continue
|
|
2157
|
+
|
|
2158
|
+
try:
|
|
2159
|
+
# Use the _execute_on_server method to call list_tools on the server
|
|
2160
|
+
result = await self._execute_on_server(
|
|
2161
|
+
server_name=s_name,
|
|
2162
|
+
operation_type="tools/list",
|
|
2163
|
+
operation_name="",
|
|
2164
|
+
method_name="list_tools",
|
|
2165
|
+
method_args={},
|
|
2166
|
+
)
|
|
2167
|
+
|
|
2168
|
+
# Get tools from result (these have original names, not namespaced)
|
|
2169
|
+
tools = getattr(result, "tools", [])
|
|
2170
|
+
results[s_name] = tools
|
|
2171
|
+
|
|
2172
|
+
except Exception as e:
|
|
2173
|
+
logger.error(f"Error fetching tools from {s_name}: {e}")
|
|
2174
|
+
|
|
2175
|
+
return results
|