fast-agent-mcp 0.2.19__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.19
3
+ Version: 0.2.20
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -1,11 +1,11 @@
1
- mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
1
+ mcp_agent/__init__.py,sha256=18T0AG0W9sJhTY38O9GFFOzliDhxx9p87CvRyti9zbw,1620
2
2
  mcp_agent/app.py,sha256=WRsiUdwy_9IAnaGRDwuLm7pzgQpt2wgsg10vBOpfcwM,5539
3
- mcp_agent/config.py,sha256=4MB8QBwGb6MPPRc85p-xdCzRloGwpWsPfEgxsoS_4N0,12159
3
+ mcp_agent/config.py,sha256=_b5JeS2nWHScSUUTu6wYxXzdfKefoqII305ecKcw7Gs,12248
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
5
- mcp_agent/context.py,sha256=fHyDjeZpHYRBOCVY58hVcFQxybOXSJmuui2o51kLbuA,7307
5
+ mcp_agent/context.py,sha256=Kb3s_0MolHx7AeTs1NVcY3ly-xFBd35o8LT7Srpx9is,7334
6
6
  mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
7
7
  mcp_agent/event_progress.py,sha256=3dqk5Pn1tAG_m_wn4IPNwLWLyzm7CyKIidqHN-4l-JY,2736
8
- mcp_agent/mcp_server_registry.py,sha256=pSD3euU-Oc2LAVenqkLU7UmutAzk6A9liYVLjCj4J70,10068
8
+ mcp_agent/mcp_server_registry.py,sha256=jUmCdfcpTitXm1-3TxpWsdRWY_8phdKNYgXwB16ZSVU,10100
9
9
  mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
10
10
  mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  mcp_agent/agents/agent.py,sha256=GgaUHoilgqzh9PQYr5k2WiPj4pagwicf9-ZLFsHkNNo,3848
@@ -34,7 +34,7 @@ mcp_agent/core/direct_factory.py,sha256=d96OM1yS3eIocIiaA9FQt6C2zr6VDUyCJBTZCp_D
34
34
  mcp_agent/core/enhanced_prompt.py,sha256=bzvcengS7XzHWB7NWhyxHM3hhO2HI4zP5DbGXAOw0Jw,19155
35
35
  mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
36
36
  mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
37
- mcp_agent/core/fastagent.py,sha256=OkS1mazgMUJyA02RbW-9z2nI6XQ4kKCLfyjgAkgv0O8,22708
37
+ mcp_agent/core/fastagent.py,sha256=WEEGz2WBAddDGNeWJwqwFIPLiQnLjaNxZLoMR0peyyU,22884
38
38
  mcp_agent/core/interactive_prompt.py,sha256=w3VyRzW4hzn0xhWZRwo_qRRAD5WVSrJYe8QDe1XZ55Y,24252
39
39
  mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
40
40
  mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
@@ -48,7 +48,7 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
48
48
  mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
49
49
  mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
50
50
  mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
51
- mcp_agent/llm/augmented_llm.py,sha256=fP2uWIFY9qaEuY0mehti4A3NjhvGuj-TebLI0FVTbcM,23380
51
+ mcp_agent/llm/augmented_llm.py,sha256=ASe604OhrMZ9dVoGEUEpUQaY6fFamz4gL8ttzWP_9m0,24212
52
52
  mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyzFphLikwF_yVE0,6288
53
53
  mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
54
54
  mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
@@ -60,11 +60,11 @@ mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6c
60
60
  mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
61
61
  mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
62
62
  mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
63
- mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=poouQMsDoZSH-5a_TL2Z2EFPSqFlsAgXuKuYcqge-Gg,15468
63
+ mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=RQ4r5Q84VJ_dyuNo23b-EMzvq6RrpspzIQWtfVUfw6M,15468
64
64
  mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=NiZK5nv91ZS2VgVFXpbsFNFYLsLcppcbo_RstlRMd7I,1145
65
65
  mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
66
66
  mcp_agent/llm/providers/augmented_llm_google.py,sha256=N0a2fphVtkvNYxKQpEX6J4tlO1C_mRw4sw3LBXnrOeI,1130
67
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=XFFoIMmXCoigC98zrR0_1c7DsyS4ep7hLvklmdn4jqU,14085
67
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=TumZs1y678IvyvYIehf8xSDqYWqC44dWrIbqFGtz03g,14085
68
68
  mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
69
69
  mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
70
70
  mcp_agent/llm/providers/multipart_converter_openai.py,sha256=XPIulWntNpZWNGWrc240StPzok2RqrDAV7OigDwQ1uU,15850
@@ -84,8 +84,8 @@ mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3
84
84
  mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
85
85
  mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
86
86
  mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
87
- mcp_agent/mcp/mcp_aggregator.py,sha256=RjRcYHMKt5Wn85JWVar6X0hZLYtBeHrctiBBAK5AYcc,40584
88
- mcp_agent/mcp/mcp_connection_manager.py,sha256=R_oGvFkolZJ_i3SizIIlKS_NPjXscsWLSOf1x9Zu0dM,14008
87
+ mcp_agent/mcp/mcp_aggregator.py,sha256=c3UDWsTgHMcpHPx1p-vVru4y3eVO1jBQyLzwEMH2RHU,40237
88
+ mcp_agent/mcp/mcp_connection_manager.py,sha256=L5Dk4cyarN_v2rfktkrfZJR4xUuD3yN_hUyQnKHBWgM,14044
89
89
  mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
90
90
  mcp_agent/mcp/prompt_message_multipart.py,sha256=BDwRdNwyWHb2q2bccDb2iR2VlORqVvkvoG3xYzcMpCE,4403
91
91
  mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
@@ -102,7 +102,7 @@ mcp_agent/mcp/prompts/prompt_load.py,sha256=Zo0FogqWFEG5FtF1d9ZH-RWsCSSMsi5FIEQH
102
102
  mcp_agent/mcp/prompts/prompt_server.py,sha256=SiUR2xYfd3vEpghnYRdzz2rFEMtAbCKx2xzUXgvz1g8,18501
103
103
  mcp_agent/mcp/prompts/prompt_template.py,sha256=EejiqGkau8OizORNyKTUwUjrPof5V-hH1H_MBQoQfXw,15732
104
104
  mcp_agent/mcp_server/__init__.py,sha256=zBU51ITHIEPScd9nRafnhEddsWqXRPAAvHhkrbRI2_4,155
105
- mcp_agent/mcp_server/agent_server.py,sha256=yne6wdc2sHwzOGuLFikLxUnHEuPEr3l8cfPlT_jbzBI,19811
105
+ mcp_agent/mcp_server/agent_server.py,sha256=s-nI0uTNWx4nYDDM_5GmuY5x6ZeFkymfNoCSuwuBRd8,19891
106
106
  mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=16gxrQ5kM8fb8tPwSCMXaitonk3PSEhz28njWwPxXrw,7269
107
107
  mcp_agent/resources/examples/data-analysis/analysis.py,sha256=M9z8Q4YC5OGuqSa5uefYmmfmctqMn-WqCSfg5LI407o,2609
108
108
  mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
@@ -143,8 +143,8 @@ mcp_agent/resources/examples/workflows/parallel.py,sha256=DQ5vY5-h8Qa5QHcYjsWXhZ
143
143
  mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
144
144
  mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
145
145
  mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
146
- fast_agent_mcp-0.2.19.dist-info/METADATA,sha256=slU0l8N_MAEEmGn3bla4jeHibkCq3DGTtngvii6xrro,30142
147
- fast_agent_mcp-0.2.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
148
- fast_agent_mcp-0.2.19.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
149
- fast_agent_mcp-0.2.19.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
150
- fast_agent_mcp-0.2.19.dist-info/RECORD,,
146
+ fast_agent_mcp-0.2.20.dist-info/METADATA,sha256=RlU6MHHAJoP4xuuA8QsIspMEZfGdSKDo76so374wzA4,30142
147
+ fast_agent_mcp-0.2.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
148
+ fast_agent_mcp-0.2.20.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
149
+ fast_agent_mcp-0.2.20.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
150
+ fast_agent_mcp-0.2.20.dist-info/RECORD,,
mcp_agent/__init__.py CHANGED
@@ -36,7 +36,7 @@ from mcp_agent.core.request_params import RequestParams
36
36
 
37
37
  # Core protocol interfaces
38
38
  from mcp_agent.mcp.interfaces import AgentProtocol, AugmentedLLMProtocol
39
- from mcp_agent.mcp.mcp_aggregator import MCPAggregator, MCPCompoundServer
39
+ from mcp_agent.mcp.mcp_aggregator import MCPAggregator
40
40
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
41
41
 
42
42
  __all__ = [
@@ -58,7 +58,6 @@ __all__ = [
58
58
  "Agent",
59
59
  "AgentConfig",
60
60
  "MCPAggregator",
61
- "MCPCompoundServer",
62
61
  "PromptMessageMultipart",
63
62
  # FastAgent components
64
63
  "FastAgent",
mcp_agent/config.py CHANGED
@@ -93,6 +93,9 @@ class MCPServerSettings(BaseModel):
93
93
  sampling: MCPSamplingSettings | None = None
94
94
  """Sampling settings for this Client/Server pair"""
95
95
 
96
+ cwd: str | None = None
97
+ """Working directory for the executed server command."""
98
+
96
99
 
97
100
  class MCPSettings(BaseModel):
98
101
  """Configuration for all MCP servers."""
mcp_agent/context.py CHANGED
@@ -4,6 +4,7 @@ A central context object to store global state that is shared across the applica
4
4
 
5
5
  import asyncio
6
6
  import concurrent.futures
7
+ import uuid
7
8
  from typing import TYPE_CHECKING, Any, Optional, Union
8
9
 
9
10
  from mcp import ServerSession
@@ -79,12 +80,12 @@ async def configure_otel(config: "Settings") -> None:
79
80
  except: # noqa: E722
80
81
  app_version = "unknown"
81
82
 
82
- # Create resource identifying this service
83
83
  resource = Resource.create(
84
84
  attributes={
85
85
  key: value
86
86
  for key, value in {
87
87
  "service.name": service_name,
88
+ "service.instance.id": str(uuid.uuid4())[:6],
88
89
  "service.version": app_version,
89
90
  }.items()
90
91
  if value is not None
@@ -324,13 +324,16 @@ class FastAgent:
324
324
  print("\nServer stopped by user (Ctrl+C)")
325
325
  except Exception as e:
326
326
  if not quiet_mode:
327
+ import traceback
328
+
329
+ traceback.print_exc()
327
330
  print(f"\nServer stopped with error: {e}")
328
331
 
329
332
  # Exit after server shutdown
330
333
  raise SystemExit(0)
331
334
 
332
335
  # Handle direct message sending if --message is provided
333
- if self.args.message:
336
+ if hasattr(self.args, "message") and self.args.message:
334
337
  agent_name = self.args.agent
335
338
  message = self.args.message
336
339
 
@@ -356,7 +359,7 @@ class FastAgent:
356
359
  print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
357
360
  raise SystemExit(1)
358
361
 
359
- if self.args.prompt_file:
362
+ if hasattr(self.args, "prompt_file") and self.args.prompt_file:
360
363
  agent_name = self.args.agent
361
364
  prompt: List[PromptMessageMultipart] = load_prompt_multipart(
362
365
  Path(self.args.prompt_file)
@@ -2,6 +2,7 @@ from abc import abstractmethod
2
2
  from typing import (
3
3
  TYPE_CHECKING,
4
4
  Any,
5
+ Dict,
5
6
  Generic,
6
7
  List,
7
8
  Optional,
@@ -59,7 +60,36 @@ if TYPE_CHECKING:
59
60
  HUMAN_INPUT_TOOL_NAME = "__human_input__"
60
61
 
61
62
 
62
- class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT, MessageT]):
63
+ def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]:
64
+ """
65
+ Recursively merges `dict2` into `dict1` in place.
66
+
67
+ If a key exists in both dictionaries and their values are dictionaries,
68
+ the function merges them recursively. Otherwise, the value from `dict2`
69
+ overwrites or is added to `dict1`.
70
+
71
+ Args:
72
+ dict1 (Dict): The dictionary to be updated.
73
+ dict2 (Dict): The dictionary to merge into `dict1`.
74
+
75
+ Returns:
76
+ Dict: The updated `dict1`.
77
+ """
78
+ for key in dict2:
79
+ if (
80
+ key in dict1
81
+ and isinstance(dict1[key], dict)
82
+ and isinstance(dict2[key], dict)
83
+ ):
84
+ deep_merge(dict1[key], dict2[key])
85
+ else:
86
+ dict1[key] = dict2[key]
87
+ return dict1
88
+
89
+
90
+ class AugmentedLLM(
91
+ ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT, MessageT]
92
+ ):
63
93
  # Common parameter names used across providers
64
94
  PARAM_MESSAGES = "messages"
65
95
  PARAM_MODEL = "model"
@@ -357,8 +387,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
357
387
  ) -> RequestParams:
358
388
  """Merge default and provided request parameters"""
359
389
 
360
- merged = default_params.model_dump()
361
- merged.update(provided_params.model_dump(exclude_unset=True))
390
+ merged = deep_merge(
391
+ default_params.model_dump(),
392
+ provided_params.model_dump(exclude_unset=True),
393
+ )
362
394
  final_params = RequestParams(**merged)
363
395
 
364
396
  return final_params
@@ -79,7 +79,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
79
79
  maxTokens=4096, # default haiku3
80
80
  systemPrompt=self.instruction,
81
81
  parallel_tool_calls=True,
82
- max_iterations=10,
82
+ max_iterations=20,
83
83
  use_history=True,
84
84
  )
85
85
 
@@ -95,7 +95,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
95
95
  model=chosen_model,
96
96
  systemPrompt=self.instruction,
97
97
  parallel_tool_calls=True,
98
- max_iterations=10,
98
+ max_iterations=20,
99
99
  use_history=True,
100
100
  )
101
101
 
@@ -12,8 +12,6 @@ from typing import (
12
12
 
13
13
  from mcp import GetPromptResult, ReadResourceResult
14
14
  from mcp.client.session import ClientSession
15
- from mcp.server.lowlevel.server import Server
16
- from mcp.server.stdio import stdio_server
17
15
  from mcp.types import (
18
16
  CallToolResult,
19
17
  ListToolsResult,
@@ -43,6 +41,14 @@ SEP = "-"
43
41
  T = TypeVar("T")
44
42
  R = TypeVar("R")
45
43
 
44
+ def create_namespaced_name(server_name: str, resource_name: str) -> str:
45
+ """Create a namespaced resource name from server and resource names"""
46
+ return f"{server_name}{SEP}{resource_name}"
47
+
48
+ def is_namespaced_name(name: str) -> bool:
49
+ """Check if a name is already namespaced"""
50
+ return SEP in name
51
+
46
52
 
47
53
  class NamespacedTool(BaseModel):
48
54
  """
@@ -231,8 +237,7 @@ class MCPAggregator(ContextDependent):
231
237
 
232
238
  async def fetch_prompts(client: ClientSession, server_name: str) -> List[Prompt]:
233
239
  # Only fetch prompts if the server supports them
234
- capabilities = await self.get_capabilities(server_name)
235
- if not capabilities or not capabilities.prompts:
240
+ if not await self.server_supports_feature(server_name, "prompts"):
236
241
  logger.debug(f"Server '{server_name}' does not support prompts")
237
242
  return []
238
243
 
@@ -278,7 +283,7 @@ class MCPAggregator(ContextDependent):
278
283
  # Process tools
279
284
  self._server_to_tool_map[server_name] = []
280
285
  for tool in tools:
281
- namespaced_tool_name = f"{server_name}{SEP}{tool.name}"
286
+ namespaced_tool_name = create_namespaced_name(server_name, tool.name)
282
287
  namespaced_tool = NamespacedTool(
283
288
  tool=tool,
284
289
  server_name=server_name,
@@ -320,6 +325,41 @@ class MCPAggregator(ContextDependent):
320
325
  except Exception as e:
321
326
  logger.debug(f"Error getting capabilities for server '{server_name}': {e}")
322
327
  return None
328
+
329
+ async def validate_server(self, server_name: str) -> bool:
330
+ """
331
+ Validate that a server exists in our server list.
332
+
333
+ Args:
334
+ server_name: Name of the server to validate
335
+
336
+ Returns:
337
+ True if the server exists, False otherwise
338
+ """
339
+ valid = server_name in self.server_names
340
+ if not valid:
341
+ logger.debug(f"Server '{server_name}' not found")
342
+ return valid
343
+
344
+ async def server_supports_feature(self, server_name: str, feature: str) -> bool:
345
+ """
346
+ Check if a server supports a specific feature.
347
+
348
+ Args:
349
+ server_name: Name of the server to check
350
+ feature: Feature to check for (e.g., "prompts", "resources")
351
+
352
+ Returns:
353
+ True if the server supports the feature, False otherwise
354
+ """
355
+ if not await self.validate_server(server_name):
356
+ return False
357
+
358
+ capabilities = await self.get_capabilities(server_name)
359
+ if not capabilities:
360
+ return False
361
+
362
+ return getattr(capabilities, feature, False)
323
363
 
324
364
  async def list_servers(self) -> List[str]:
325
365
  """Return the list of server names aggregated by this agent."""
@@ -420,40 +460,45 @@ class MCPAggregator(ContextDependent):
420
460
  Returns:
421
461
  Tuple of (server_name, local_resource_name)
422
462
  """
423
- server_name = None
424
- local_name = None
425
-
426
- if SEP in name: # Namespaced resource name
427
- server_name, local_name = name.split(SEP, 1)
428
- else:
429
- # For tools, search all servers for the tool
430
- if resource_type == "tool":
431
- for _, tools in self._server_to_tool_map.items():
432
- for namespaced_tool in tools:
433
- if namespaced_tool.tool.name == name:
434
- server_name = namespaced_tool.server_name
435
- local_name = name
436
- break
437
- if server_name:
438
- break
439
- # For all other resource types, use the first server
440
- # (prompt resource type is specially handled in get_prompt)
441
- else:
442
- local_name = name
443
- server_name = self.server_names[0] if self.server_names else None
444
-
445
- return server_name, local_name
463
+ # First, check if this is a direct hit in our namespaced tool map
464
+ # This handles both namespaced and non-namespaced direct lookups
465
+ if resource_type == "tool" and name in self._namespaced_tool_map:
466
+ namespaced_tool = self._namespaced_tool_map[name]
467
+ return namespaced_tool.server_name, namespaced_tool.tool.name
468
+
469
+ # Next, attempt to interpret as a namespaced name
470
+ if is_namespaced_name(name):
471
+ parts = name.split(SEP, 1)
472
+ server_name, local_name = parts[0], parts[1]
473
+
474
+ # Validate that the parsed server actually exists
475
+ if server_name in self.server_names:
476
+ return server_name, local_name
477
+
478
+ # If the server name doesn't exist, it might be a tool with a hyphen in its name
479
+ # Fall through to the next checks
480
+
481
+ # For tools, search all servers for the tool by exact name match
482
+ if resource_type == "tool":
483
+ for server_name, tools in self._server_to_tool_map.items():
484
+ for namespaced_tool in tools:
485
+ if namespaced_tool.tool.name == name:
486
+ return server_name, name
487
+
488
+ # For all other resource types, use the first server
489
+ return (self.server_names[0] if self.server_names else None, name)
446
490
 
447
491
  async def call_tool(self, name: str, arguments: dict | None = None) -> CallToolResult:
448
492
  """
449
- Call a namespaced tool, e.g., 'server_name.tool_name'.
493
+ Call a namespaced tool, e.g., 'server_name-tool_name'.
450
494
  """
451
495
  if not self.initialized:
452
496
  await self.load_servers()
453
497
 
498
+ # Use the common parser to get server and tool name
454
499
  server_name, local_tool_name = await self._parse_resource_name(name, "tool")
455
500
 
456
- if server_name is None or local_tool_name is None:
501
+ if server_name is None:
457
502
  logger.error(f"Error: Tool '{name}' not found")
458
503
  return CallToolResult(
459
504
  isError=True,
@@ -506,27 +551,37 @@ class MCPAggregator(ContextDependent):
506
551
  if not self.initialized:
507
552
  await self.load_servers()
508
553
 
509
- # Handle the case where prompt_name is None
510
- if SEP in prompt_name and server_name is None:
511
- server_name, local_prompt_name = prompt_name.split(SEP, 1)
512
- namespaced_name = prompt_name # Already namespaced
513
- # Plain prompt name - use provided server or search
554
+ # If server_name is explicitly provided, use it
555
+ if server_name:
556
+ local_prompt_name = prompt_name
557
+ # Otherwise, check if prompt_name is namespaced and validate the server exists
558
+ elif is_namespaced_name(prompt_name):
559
+ parts = prompt_name.split(SEP, 1)
560
+ potential_server = parts[0]
561
+
562
+ # Only treat as namespaced if the server part is valid
563
+ if potential_server in self.server_names:
564
+ server_name = potential_server
565
+ local_prompt_name = parts[1]
566
+ else:
567
+ # The hyphen is part of the prompt name, not a namespace separator
568
+ local_prompt_name = prompt_name
569
+ # Otherwise, use prompt_name as-is for searching
514
570
  else:
515
571
  local_prompt_name = prompt_name
516
- namespaced_name = None # Will be set when server is found
517
-
572
+ # We'll search all servers below
573
+
518
574
  # If we have a specific server to check
519
575
  if server_name:
520
- if server_name not in self.server_names:
576
+ if not await self.validate_server(server_name):
521
577
  logger.error(f"Error: Server '{server_name}' not found")
522
578
  return GetPromptResult(
523
579
  description=f"Error: Server '{server_name}' not found",
524
580
  messages=[],
525
581
  )
526
-
582
+
527
583
  # Check if server supports prompts
528
- capabilities = await self.get_capabilities(server_name)
529
- if not capabilities or not capabilities.prompts:
584
+ if not await self.server_supports_feature(server_name, "prompts"):
530
585
  logger.debug(f"Server '{server_name}' does not support prompts")
531
586
  return GetPromptResult(
532
587
  description=f"Server '{server_name}' does not support prompts",
@@ -564,7 +619,7 @@ class MCPAggregator(ContextDependent):
564
619
 
565
620
  # Add namespaced name and source server to the result
566
621
  if result and result.messages:
567
- result.namespaced_name = namespaced_name or f"{server_name}{SEP}{local_prompt_name}"
622
+ result.namespaced_name = create_namespaced_name(server_name, local_prompt_name)
568
623
 
569
624
  # Store the arguments in the result for display purposes
570
625
  if arguments:
@@ -616,7 +671,7 @@ class MCPAggregator(ContextDependent):
616
671
  f"Successfully retrieved prompt '{local_prompt_name}' from server '{s_name}'"
617
672
  )
618
673
  # Add namespaced name using the actual server where found
619
- result.namespaced_name = f"{s_name}{SEP}{local_prompt_name}"
674
+ result.namespaced_name = create_namespaced_name(s_name, local_prompt_name)
620
675
 
621
676
  # Store the arguments in the result for display purposes
622
677
  if arguments:
@@ -664,7 +719,7 @@ class MCPAggregator(ContextDependent):
664
719
  f"Found prompt '{local_prompt_name}' on server '{s_name}' (not in cache)"
665
720
  )
666
721
  # Add namespaced name using the actual server where found
667
- result.namespaced_name = f"{s_name}{SEP}{local_prompt_name}"
722
+ result.namespaced_name = create_namespaced_name(s_name, local_prompt_name)
668
723
 
669
724
  # Store the arguments in the result for display purposes
670
725
  if arguments:
@@ -942,68 +997,3 @@ class MCPAggregator(ContextDependent):
942
997
  logger.error(f"Error fetching resources from {s_name}: {e}")
943
998
 
944
999
  return results
945
-
946
-
947
- class MCPCompoundServer(Server):
948
- """
949
- A compound server (server-of-servers) that aggregates multiple MCP servers and is itself an MCP server
950
- """
951
-
952
- def __init__(self, server_names: List[str], name: str = "MCPCompoundServer") -> None:
953
- super().__init__(name)
954
- self.aggregator = MCPAggregator(server_names)
955
-
956
- # Register handlers for tools, prompts, and resources
957
- self.list_tools()(self._list_tools)
958
- self.call_tool()(self._call_tool)
959
- self.get_prompt()(self._get_prompt)
960
- self.list_prompts()(self._list_prompts)
961
-
962
- async def _list_tools(self) -> List[Tool]:
963
- """List all tools aggregated from connected MCP servers."""
964
- tools_result = await self.aggregator.list_tools()
965
- return tools_result.tools
966
-
967
- async def _call_tool(self, name: str, arguments: dict | None = None) -> CallToolResult:
968
- """Call a specific tool from the aggregated servers."""
969
- try:
970
- result = await self.aggregator.call_tool(name=name, arguments=arguments)
971
- return result.content
972
- except Exception as e:
973
- return CallToolResult(
974
- isError=True,
975
- content=[TextContent(type="text", text=f"Error calling tool: {e}")],
976
- )
977
-
978
- async def _get_prompt(
979
- self, name: str = None, arguments: dict[str, str] = None
980
- ) -> GetPromptResult:
981
- """
982
- Get a prompt from the aggregated servers.
983
-
984
- Args:
985
- name: Name of the prompt to get (optionally namespaced)
986
- arguments: Optional dictionary of string arguments for prompt templating
987
- """
988
- try:
989
- result = await self.aggregator.get_prompt(prompt_name=name, arguments=arguments)
990
- return result
991
- except Exception as e:
992
- return GetPromptResult(description=f"Error getting prompt: {e}", messages=[])
993
-
994
- async def _list_prompts(self, server_name: str = None) -> Dict[str, List[Prompt]]:
995
- """List available prompts from the aggregated servers."""
996
- try:
997
- return await self.aggregator.list_prompts(server_name=server_name)
998
- except Exception as e:
999
- logger.error(f"Error listing prompts: {e}")
1000
- return {}
1001
-
1002
- async def run_stdio_async(self) -> None:
1003
- """Run the server using stdio transport."""
1004
- async with stdio_server() as (read_stream, write_stream):
1005
- await self.run(
1006
- read_stream=read_stream,
1007
- write_stream=write_stream,
1008
- initialization_options=self.create_initialization_options(),
1009
- )
@@ -264,6 +264,7 @@ class MCPConnectionManager(ContextDependent):
264
264
  command=config.command,
265
265
  args=config.args if config.args is not None else [],
266
266
  env={**get_default_environment(), **(config.env or {})},
267
+ cwd=config.cwd,
267
268
  )
268
269
  # Create custom error handler to ensure all output is captured
269
270
  error_handler = get_stderr_handler(server_name)
@@ -110,7 +110,10 @@ class AgentMCPServer:
110
110
 
111
111
  # Register handlers for SIGINT (Ctrl+C) and SIGTERM
112
112
  for sig, is_term in [(signal.SIGINT, False), (signal.SIGTERM, True)]:
113
- loop.add_signal_handler(sig, lambda term=is_term: handle_signal(term))
113
+ import platform
114
+
115
+ if platform.system() != "Windows":
116
+ loop.add_signal_handler(sig, lambda term=is_term: handle_signal(term))
114
117
 
115
118
  logger.debug("Signal handlers installed")
116
119
 
@@ -128,6 +128,7 @@ class ServerRegistry:
128
128
  command=config.command,
129
129
  args=config.args,
130
130
  env={**get_default_environment(), **(config.env or {})},
131
+ cwd=config.cwd,
131
132
  )
132
133
 
133
134
  # Create a stderr handler that logs to our application logger