fast-agent-mcp 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/METADATA +24 -57
- {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/RECORD +31 -24
- mcp_agent/agents/agent.py +8 -4
- mcp_agent/app.py +5 -1
- mcp_agent/cli/commands/bootstrap.py +183 -121
- mcp_agent/cli/commands/setup.py +20 -16
- mcp_agent/core/__init__.py +0 -0
- mcp_agent/core/exceptions.py +47 -0
- mcp_agent/core/fastagent.py +250 -124
- mcp_agent/core/server_validation.py +44 -0
- mcp_agent/event_progress.py +4 -1
- mcp_agent/logging/rich_progress.py +11 -0
- mcp_agent/mcp/mcp_connection_manager.py +11 -2
- mcp_agent/resources/examples/data-analysis/analysis.py +35 -0
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +22 -0
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- mcp_agent/resources/examples/workflows/chaining.py +31 -0
- mcp_agent/resources/examples/{decorator/optimizer.py → workflows/evaluator.py} +7 -10
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +9 -0
- mcp_agent/resources/examples/workflows/human_input.py +25 -0
- mcp_agent/resources/examples/{decorator → workflows}/orchestrator.py +20 -17
- mcp_agent/resources/examples/{decorator → workflows}/parallel.py +14 -18
- mcp_agent/resources/examples/{decorator → workflows}/router.py +9 -10
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +54 -14
- mcp_agent/workflows/llm/augmented_llm_openai.py +38 -9
- mcp_agent/workflows/orchestrator/orchestrator.py +53 -108
- mcp_agent/resources/examples/decorator/main.py +0 -26
- mcp_agent/resources/examples/decorator/tiny.py +0 -22
- {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/resources/examples/mcp_researcher/{main-evalopt.py → researcher-eval.py} +0 -0
- /mcp_agent/resources/examples/mcp_researcher/{main.py → researcher.py} +0 -0
mcp_agent/core/fastagent.py
CHANGED
|
@@ -9,6 +9,13 @@ import yaml
|
|
|
9
9
|
import argparse
|
|
10
10
|
from contextlib import asynccontextmanager
|
|
11
11
|
|
|
12
|
+
from mcp_agent.core.exceptions import (
|
|
13
|
+
AgentConfigError,
|
|
14
|
+
ServerConfigError,
|
|
15
|
+
ProviderKeyError,
|
|
16
|
+
ServerInitializationError,
|
|
17
|
+
)
|
|
18
|
+
|
|
12
19
|
from mcp_agent.app import MCPApp
|
|
13
20
|
from mcp_agent.agents.agent import Agent, AgentConfig
|
|
14
21
|
from mcp_agent.context_dependent import ContextDependent
|
|
@@ -24,10 +31,10 @@ from rich.prompt import Prompt
|
|
|
24
31
|
from rich import print
|
|
25
32
|
from mcp_agent.progress_display import progress_display
|
|
26
33
|
from mcp_agent.workflows.llm.model_factory import ModelFactory
|
|
27
|
-
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
|
28
|
-
|
|
29
|
-
import readline # noqa: F401
|
|
34
|
+
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams
|
|
30
35
|
|
|
36
|
+
# TODO -- resintate once Windows&Python 3.13 platform issues are fixed
|
|
37
|
+
# import readline # noqa: F401
|
|
31
38
|
|
|
32
39
|
# Type aliases for better readability
|
|
33
40
|
WorkflowType: TypeAlias = Union[
|
|
@@ -304,6 +311,98 @@ class FastAgent(ContextDependent):
|
|
|
304
311
|
with open(self.config_path) as f:
|
|
305
312
|
self.config = yaml.safe_load(f)
|
|
306
313
|
|
|
314
|
+
def _validate_server_references(self) -> None:
|
|
315
|
+
"""
|
|
316
|
+
Validate that all server references in agent configurations exist in config.
|
|
317
|
+
Raises ServerConfigError if any referenced servers are not defined.
|
|
318
|
+
"""
|
|
319
|
+
if not self.context.config.mcp or not self.context.config.mcp.servers:
|
|
320
|
+
available_servers = set()
|
|
321
|
+
else:
|
|
322
|
+
available_servers = set(self.context.config.mcp.servers.keys())
|
|
323
|
+
|
|
324
|
+
# Check each agent's server references
|
|
325
|
+
for name, agent_data in self.agents.items():
|
|
326
|
+
config = agent_data["config"]
|
|
327
|
+
if config.servers:
|
|
328
|
+
missing = [s for s in config.servers if s not in available_servers]
|
|
329
|
+
if missing:
|
|
330
|
+
raise ServerConfigError(
|
|
331
|
+
f"Missing server configuration for agent '{name}'",
|
|
332
|
+
f"The following servers are referenced but not defined in config: {', '.join(missing)}",
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
def _validate_workflow_references(self) -> None:
|
|
336
|
+
"""
|
|
337
|
+
Validate that all workflow references point to valid agents/workflows.
|
|
338
|
+
Also validates that referenced agents have required configuration.
|
|
339
|
+
Raises AgentConfigError if any validation fails.
|
|
340
|
+
"""
|
|
341
|
+
available_components = set(self.agents.keys())
|
|
342
|
+
|
|
343
|
+
for name, agent_data in self.agents.items():
|
|
344
|
+
agent_type = agent_data["type"]
|
|
345
|
+
|
|
346
|
+
if agent_type == AgentType.PARALLEL.value:
|
|
347
|
+
# Check fan_in exists
|
|
348
|
+
fan_in = agent_data["fan_in"]
|
|
349
|
+
if fan_in not in available_components:
|
|
350
|
+
raise AgentConfigError(
|
|
351
|
+
f"Parallel workflow '{name}' references non-existent fan_in component: {fan_in}"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
# Check fan_out agents exist
|
|
355
|
+
fan_out = agent_data["fan_out"]
|
|
356
|
+
missing = [a for a in fan_out if a not in available_components]
|
|
357
|
+
if missing:
|
|
358
|
+
raise AgentConfigError(
|
|
359
|
+
f"Parallel workflow '{name}' references non-existent fan_out components: {', '.join(missing)}"
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
elif agent_type == AgentType.ORCHESTRATOR.value:
|
|
363
|
+
# Check all child agents exist and are properly configured
|
|
364
|
+
child_agents = agent_data["child_agents"]
|
|
365
|
+
missing = [a for a in child_agents if a not in available_components]
|
|
366
|
+
if missing:
|
|
367
|
+
raise AgentConfigError(
|
|
368
|
+
f"Orchestrator '{name}' references non-existent agents: {', '.join(missing)}"
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# Validate child agents have required LLM configuration
|
|
372
|
+
for agent_name in child_agents:
|
|
373
|
+
child_data = self.agents[agent_name]
|
|
374
|
+
if child_data["type"] == AgentType.BASIC.value:
|
|
375
|
+
# For basic agents, we'll validate LLM config during creation
|
|
376
|
+
continue
|
|
377
|
+
elif not isinstance(child_data["func"], AugmentedLLM):
|
|
378
|
+
raise AgentConfigError(
|
|
379
|
+
f"Agent '{agent_name}' used by orchestrator '{name}' lacks LLM capability",
|
|
380
|
+
"All agents used by orchestrators must be LLM-capable",
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
elif agent_type == AgentType.ROUTER.value:
|
|
384
|
+
# Check all referenced agents exist
|
|
385
|
+
router_agents = agent_data["agents"]
|
|
386
|
+
missing = [a for a in router_agents if a not in available_components]
|
|
387
|
+
if missing:
|
|
388
|
+
raise AgentConfigError(
|
|
389
|
+
f"Router '{name}' references non-existent agents: {', '.join(missing)}"
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
|
|
393
|
+
# Check both evaluator and optimizer exist
|
|
394
|
+
evaluator = agent_data["evaluator"]
|
|
395
|
+
optimizer = agent_data["optimizer"]
|
|
396
|
+
missing = []
|
|
397
|
+
if evaluator not in available_components:
|
|
398
|
+
missing.append(f"evaluator: {evaluator}")
|
|
399
|
+
if optimizer not in available_components:
|
|
400
|
+
missing.append(f"optimizer: {optimizer}")
|
|
401
|
+
if missing:
|
|
402
|
+
raise AgentConfigError(
|
|
403
|
+
f"Evaluator-Optimizer '{name}' references non-existent components: {', '.join(missing)}"
|
|
404
|
+
)
|
|
405
|
+
|
|
307
406
|
def _get_model_factory(
|
|
308
407
|
self,
|
|
309
408
|
model: Optional[str] = None,
|
|
@@ -347,9 +446,10 @@ class FastAgent(ContextDependent):
|
|
|
347
446
|
*,
|
|
348
447
|
instruction: str = "You are a helpful agent.",
|
|
349
448
|
servers: List[str] = [],
|
|
350
|
-
model:
|
|
449
|
+
model: str | None = None,
|
|
351
450
|
use_history: bool = True,
|
|
352
451
|
request_params: Optional[Dict] = None,
|
|
452
|
+
human_input: bool = False,
|
|
353
453
|
) -> Callable:
|
|
354
454
|
"""
|
|
355
455
|
Decorator to create and register an agent with configuration.
|
|
@@ -362,9 +462,9 @@ class FastAgent(ContextDependent):
|
|
|
362
462
|
use_history: Whether to maintain conversation history
|
|
363
463
|
request_params: Additional request parameters for the LLM
|
|
364
464
|
"""
|
|
365
|
-
# print(f"\nDecorating agent {name} with model={model}")
|
|
366
465
|
|
|
367
466
|
def decorator(func: Callable) -> Callable:
|
|
467
|
+
# Create base request params
|
|
368
468
|
base_params = RequestParams(
|
|
369
469
|
use_history=use_history,
|
|
370
470
|
model=model, # Include model in initial params
|
|
@@ -380,6 +480,7 @@ class FastAgent(ContextDependent):
|
|
|
380
480
|
model=model, # Highest precedence
|
|
381
481
|
use_history=use_history,
|
|
382
482
|
default_request_params=base_params,
|
|
483
|
+
human_input=human_input,
|
|
383
484
|
)
|
|
384
485
|
|
|
385
486
|
# Store the agent configuration
|
|
@@ -389,21 +490,20 @@ class FastAgent(ContextDependent):
|
|
|
389
490
|
"func": func,
|
|
390
491
|
}
|
|
391
492
|
|
|
392
|
-
|
|
393
|
-
return await func(*args, **kwargs)
|
|
394
|
-
|
|
395
|
-
return wrapper
|
|
493
|
+
return func # Don't wrap the function, just return it
|
|
396
494
|
|
|
397
495
|
return decorator
|
|
398
496
|
|
|
399
497
|
def orchestrator(
|
|
400
498
|
self,
|
|
401
|
-
name: str,
|
|
402
|
-
|
|
499
|
+
name: str = "Orchestrator",
|
|
500
|
+
*,
|
|
501
|
+
instruction: str | None = None,
|
|
403
502
|
agents: List[str],
|
|
404
503
|
model: str | None = None,
|
|
405
|
-
use_history: bool =
|
|
504
|
+
use_history: bool = False,
|
|
406
505
|
request_params: Optional[Dict] = None,
|
|
506
|
+
human_input: bool = False,
|
|
407
507
|
) -> Callable:
|
|
408
508
|
"""
|
|
409
509
|
Decorator to create and register an orchestrator.
|
|
@@ -413,14 +513,14 @@ class FastAgent(ContextDependent):
|
|
|
413
513
|
instruction: Base instruction for the orchestrator
|
|
414
514
|
agents: List of agent names this orchestrator can use
|
|
415
515
|
model: Model specification string (highest precedence)
|
|
416
|
-
use_history: Whether to maintain conversation history
|
|
516
|
+
use_history: Whether to maintain conversation history (forced false)
|
|
417
517
|
request_params: Additional request parameters for the LLM
|
|
418
518
|
"""
|
|
419
519
|
|
|
420
520
|
def decorator(func: Callable) -> Callable:
|
|
421
521
|
# Create base request params
|
|
422
522
|
base_params = RequestParams(
|
|
423
|
-
use_history=use_history, **(request_params or {})
|
|
523
|
+
use_history=use_history, model=model, **(request_params or {})
|
|
424
524
|
)
|
|
425
525
|
|
|
426
526
|
# Create agent configuration
|
|
@@ -431,6 +531,7 @@ class FastAgent(ContextDependent):
|
|
|
431
531
|
model=model, # Highest precedence
|
|
432
532
|
use_history=use_history,
|
|
433
533
|
default_request_params=base_params,
|
|
534
|
+
human_input=human_input,
|
|
434
535
|
)
|
|
435
536
|
|
|
436
537
|
# Store the orchestrator configuration
|
|
@@ -441,10 +542,7 @@ class FastAgent(ContextDependent):
|
|
|
441
542
|
"func": func,
|
|
442
543
|
}
|
|
443
544
|
|
|
444
|
-
|
|
445
|
-
return await func(*args, **kwargs)
|
|
446
|
-
|
|
447
|
-
return wrapper
|
|
545
|
+
return func
|
|
448
546
|
|
|
449
547
|
return decorator
|
|
450
548
|
|
|
@@ -495,10 +593,7 @@ class FastAgent(ContextDependent):
|
|
|
495
593
|
"func": func,
|
|
496
594
|
}
|
|
497
595
|
|
|
498
|
-
|
|
499
|
-
return await func(*args, **kwargs)
|
|
500
|
-
|
|
501
|
-
return wrapper
|
|
596
|
+
return func
|
|
502
597
|
|
|
503
598
|
return decorator
|
|
504
599
|
|
|
@@ -557,10 +652,11 @@ class FastAgent(ContextDependent):
|
|
|
557
652
|
self,
|
|
558
653
|
name: str,
|
|
559
654
|
agents: List[str],
|
|
560
|
-
servers: List[str] = [],
|
|
655
|
+
# servers: List[str] = [],
|
|
561
656
|
model: Optional[str] = None,
|
|
562
657
|
use_history: bool = True,
|
|
563
658
|
request_params: Optional[Dict] = None,
|
|
659
|
+
human_input: bool = False,
|
|
564
660
|
) -> Callable:
|
|
565
661
|
"""
|
|
566
662
|
Decorator to create and register a router.
|
|
@@ -584,10 +680,11 @@ class FastAgent(ContextDependent):
|
|
|
584
680
|
config = AgentConfig(
|
|
585
681
|
name=name,
|
|
586
682
|
instruction="", # Router uses its own routing instruction
|
|
587
|
-
servers=servers
|
|
683
|
+
servers=[], # , servers are not supported now
|
|
588
684
|
model=model,
|
|
589
685
|
use_history=use_history,
|
|
590
686
|
default_request_params=base_params,
|
|
687
|
+
human_input=human_input,
|
|
591
688
|
)
|
|
592
689
|
|
|
593
690
|
# Store the router configuration
|
|
@@ -639,7 +736,7 @@ class FastAgent(ContextDependent):
|
|
|
639
736
|
|
|
640
737
|
return active_agents
|
|
641
738
|
|
|
642
|
-
def _create_orchestrators(
|
|
739
|
+
async def _create_orchestrators(
|
|
643
740
|
self, agent_app: MCPApp, active_agents: ProxyDict
|
|
644
741
|
) -> ProxyDict:
|
|
645
742
|
"""
|
|
@@ -657,44 +754,57 @@ class FastAgent(ContextDependent):
|
|
|
657
754
|
if agent_data["type"] == AgentType.ORCHESTRATOR.value:
|
|
658
755
|
config = agent_data["config"]
|
|
659
756
|
|
|
660
|
-
#
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
# Start with existing params if available
|
|
666
|
-
if config.default_request_params:
|
|
667
|
-
base_params = config.default_request_params.model_copy()
|
|
668
|
-
# Update with orchestrator-specific settings
|
|
669
|
-
base_params.use_history = config.use_history
|
|
670
|
-
base_params.model = resolved_model
|
|
671
|
-
else:
|
|
672
|
-
base_params = RequestParams(
|
|
673
|
-
use_history=config.use_history, model=resolved_model
|
|
674
|
-
)
|
|
675
|
-
|
|
676
|
-
llm_factory = self._get_model_factory(
|
|
677
|
-
model=config.model, # Use original model string for factory creation
|
|
678
|
-
request_params=base_params,
|
|
757
|
+
# Get base params configured with model settings
|
|
758
|
+
base_params = (
|
|
759
|
+
config.default_request_params.model_copy()
|
|
760
|
+
if config.default_request_params
|
|
761
|
+
else RequestParams()
|
|
679
762
|
)
|
|
763
|
+
base_params.use_history = False # Force no history for orchestrator
|
|
680
764
|
|
|
681
|
-
# Get the child agents - need to unwrap proxies
|
|
765
|
+
# Get the child agents - need to unwrap proxies and validate LLM config
|
|
682
766
|
child_agents = []
|
|
683
767
|
for agent_name in agent_data["child_agents"]:
|
|
684
768
|
proxy = active_agents[agent_name]
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
769
|
+
instance = self._unwrap_proxy(proxy)
|
|
770
|
+
# Validate basic agents have LLM
|
|
771
|
+
if isinstance(instance, Agent):
|
|
772
|
+
if not hasattr(instance, "_llm") or not instance._llm:
|
|
773
|
+
raise AgentConfigError(
|
|
774
|
+
f"Agent '{agent_name}' used by orchestrator '{name}' missing LLM configuration",
|
|
775
|
+
"All agents must be fully configured with LLMs before being used in an orchestrator",
|
|
776
|
+
)
|
|
777
|
+
child_agents.append(instance)
|
|
778
|
+
|
|
779
|
+
# Create a properly configured planner agent
|
|
780
|
+
planner_config = AgentConfig(
|
|
781
|
+
name=f"{name}", # Use orchestrator name as prefix
|
|
782
|
+
instruction=config.instruction
|
|
783
|
+
or """
|
|
784
|
+
You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
|
|
785
|
+
or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
|
|
786
|
+
which can be performed by LLMs with access to the servers or agents.
|
|
787
|
+
""",
|
|
788
|
+
servers=[], # Planner doesn't need server access
|
|
789
|
+
model=config.model, # Use same model as orchestrator
|
|
790
|
+
default_request_params=base_params,
|
|
791
|
+
)
|
|
792
|
+
planner_agent = Agent(config=planner_config, context=agent_app.context)
|
|
793
|
+
planner_factory = self._get_model_factory(
|
|
794
|
+
model=config.model,
|
|
795
|
+
request_params=config.default_request_params,
|
|
796
|
+
)
|
|
690
797
|
|
|
798
|
+
async with planner_agent:
|
|
799
|
+
planner = await planner_agent.attach_llm(planner_factory)
|
|
800
|
+
|
|
801
|
+
# Create the orchestrator with pre-configured planner
|
|
691
802
|
orchestrator = Orchestrator(
|
|
692
803
|
name=config.name,
|
|
693
|
-
|
|
804
|
+
planner=planner, # Pass pre-configured planner
|
|
694
805
|
available_agents=child_agents,
|
|
695
806
|
context=agent_app.context,
|
|
696
|
-
|
|
697
|
-
request_params=base_params, # Use our base params that include model
|
|
807
|
+
request_params=planner.default_request_params, # Base params already include model settings
|
|
698
808
|
plan_type="full",
|
|
699
809
|
)
|
|
700
810
|
|
|
@@ -702,6 +812,7 @@ class FastAgent(ContextDependent):
|
|
|
702
812
|
orchestrators[name] = self._create_proxy(
|
|
703
813
|
name, orchestrator, AgentType.ORCHESTRATOR.value
|
|
704
814
|
)
|
|
815
|
+
|
|
705
816
|
return orchestrators
|
|
706
817
|
|
|
707
818
|
async def _create_evaluator_optimizers(
|
|
@@ -796,7 +907,7 @@ class FastAgent(ContextDependent):
|
|
|
796
907
|
|
|
797
908
|
return deps
|
|
798
909
|
|
|
799
|
-
def _create_parallel_agents(
|
|
910
|
+
async def _create_parallel_agents(
|
|
800
911
|
self, agent_app: MCPApp, active_agents: ProxyDict
|
|
801
912
|
) -> ProxyDict:
|
|
802
913
|
"""
|
|
@@ -864,7 +975,9 @@ class FastAgent(ContextDependent):
|
|
|
864
975
|
|
|
865
976
|
return parallel_agents
|
|
866
977
|
|
|
867
|
-
def _create_routers(
|
|
978
|
+
async def _create_routers(
|
|
979
|
+
self, agent_app: MCPApp, active_agents: ProxyDict
|
|
980
|
+
) -> ProxyDict:
|
|
868
981
|
"""
|
|
869
982
|
Create router agents.
|
|
870
983
|
|
|
@@ -937,77 +1050,90 @@ class FastAgent(ContextDependent):
|
|
|
937
1050
|
async def run(self):
|
|
938
1051
|
"""
|
|
939
1052
|
Context manager for running the application.
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
agent_app
|
|
952
|
-
|
|
953
|
-
|
|
1053
|
+
Performs validation and provides user-friendly error messages.
|
|
1054
|
+
"""
|
|
1055
|
+
active_agents = {}
|
|
1056
|
+
had_error = False
|
|
1057
|
+
try:
|
|
1058
|
+
async with self.app.run() as agent_app:
|
|
1059
|
+
# Pre-flight validation
|
|
1060
|
+
self._validate_server_references()
|
|
1061
|
+
self._validate_workflow_references()
|
|
1062
|
+
|
|
1063
|
+
# Create all types of agents in dependency order
|
|
1064
|
+
active_agents = await self._create_basic_agents(agent_app)
|
|
1065
|
+
orchestrators = await self._create_orchestrators(
|
|
1066
|
+
agent_app, active_agents
|
|
1067
|
+
)
|
|
1068
|
+
parallel_agents = await self._create_parallel_agents(
|
|
1069
|
+
agent_app, active_agents
|
|
1070
|
+
)
|
|
1071
|
+
evaluator_optimizers = await self._create_evaluator_optimizers(
|
|
1072
|
+
agent_app, active_agents
|
|
1073
|
+
)
|
|
1074
|
+
routers = await self._create_routers(agent_app, active_agents)
|
|
954
1075
|
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
1076
|
+
# Merge all agents into active_agents
|
|
1077
|
+
active_agents.update(orchestrators)
|
|
1078
|
+
active_agents.update(parallel_agents)
|
|
1079
|
+
active_agents.update(evaluator_optimizers)
|
|
1080
|
+
active_agents.update(routers)
|
|
960
1081
|
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
try:
|
|
1082
|
+
# Create wrapper with all agents
|
|
1083
|
+
wrapper = AgentApp(agent_app, active_agents)
|
|
964
1084
|
yield wrapper
|
|
965
|
-
|
|
966
|
-
|
|
1085
|
+
|
|
1086
|
+
except ServerConfigError as e:
|
|
1087
|
+
had_error = True
|
|
1088
|
+
print("\n[bold red]Server Configuration Error:")
|
|
1089
|
+
print(e.message)
|
|
1090
|
+
if e.details:
|
|
1091
|
+
print("\nDetails:")
|
|
1092
|
+
print(e.details)
|
|
1093
|
+
print(
|
|
1094
|
+
"\nPlease check your 'fastagent.config.yaml' configuration file and add the missing server definitions."
|
|
1095
|
+
)
|
|
1096
|
+
raise SystemExit(1)
|
|
1097
|
+
|
|
1098
|
+
except ProviderKeyError as e:
|
|
1099
|
+
had_error = True
|
|
1100
|
+
print("\n[bold red]Provider Configuration Error:")
|
|
1101
|
+
print(e.message)
|
|
1102
|
+
if e.details:
|
|
1103
|
+
print("\nDetails:")
|
|
1104
|
+
print(e.details)
|
|
1105
|
+
print(
|
|
1106
|
+
"\nPlease check your 'fastagent.secrets.yaml' configuration file and ensure all required API keys are set."
|
|
1107
|
+
)
|
|
1108
|
+
raise SystemExit(1)
|
|
1109
|
+
|
|
1110
|
+
except AgentConfigError as e:
|
|
1111
|
+
had_error = True
|
|
1112
|
+
print("\n[bold red]Workflow or Agent Configuration Error:")
|
|
1113
|
+
print(e.message)
|
|
1114
|
+
if e.details:
|
|
1115
|
+
print("\nDetails:")
|
|
1116
|
+
print(e.details)
|
|
1117
|
+
print(
|
|
1118
|
+
"\nPlease check your agent definition and ensure names and references are correct."
|
|
1119
|
+
)
|
|
1120
|
+
raise SystemExit(1)
|
|
1121
|
+
|
|
1122
|
+
except ServerInitializationError as e:
|
|
1123
|
+
had_error = True
|
|
1124
|
+
print("\n[bold red]Server Startup Error:")
|
|
1125
|
+
print(e.message)
|
|
1126
|
+
if e.details:
|
|
1127
|
+
print("\nDetails:")
|
|
1128
|
+
print(e.details)
|
|
1129
|
+
print("\nThere was an error starting up the MCP Server.")
|
|
1130
|
+
raise SystemExit(1)
|
|
1131
|
+
finally:
|
|
1132
|
+
# Clean up any active agents without re-raising errors
|
|
1133
|
+
if active_agents and not had_error:
|
|
967
1134
|
for name, proxy in active_agents.items():
|
|
968
1135
|
if isinstance(proxy, LLMAgentProxy):
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
# Send a message to a specific agent and get the response.
|
|
974
|
-
|
|
975
|
-
# Args:
|
|
976
|
-
# agent_name: Name of the target agent
|
|
977
|
-
# message: Message to send
|
|
978
|
-
|
|
979
|
-
# Returns:
|
|
980
|
-
# Agent's response
|
|
981
|
-
|
|
982
|
-
# Raises:
|
|
983
|
-
# ValueError: If agent not found
|
|
984
|
-
# RuntimeError: If agent has no LLM attached
|
|
985
|
-
# """
|
|
986
|
-
# if agent_name not in self.agents:
|
|
987
|
-
# raise ValueError(f"Agent {agent_name} not found")
|
|
988
|
-
|
|
989
|
-
# agent = self.agents[agent_name]
|
|
990
|
-
|
|
991
|
-
# # Special handling for routers
|
|
992
|
-
# if isinstance(agent._llm, LLMRouter):
|
|
993
|
-
# # Route the message and get results
|
|
994
|
-
# results = await agent._llm.route(message)
|
|
995
|
-
# if not results:
|
|
996
|
-
# return "No appropriate route found for the request."
|
|
997
|
-
|
|
998
|
-
# # Get the top result
|
|
999
|
-
# top_result = results[0]
|
|
1000
|
-
# if isinstance(top_result.result, Agent):
|
|
1001
|
-
# # Agent route - delegate to the agent
|
|
1002
|
-
# return await top_result.result._llm.generate_str(message)
|
|
1003
|
-
# elif isinstance(top_result.result, str):
|
|
1004
|
-
# # Server route - use the router directly
|
|
1005
|
-
# return "Tool call requested by router - not yet supported"
|
|
1006
|
-
# else:
|
|
1007
|
-
# return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
|
|
1008
|
-
|
|
1009
|
-
# # Normal agent handling
|
|
1010
|
-
# if not hasattr(agent, "_llm") or agent._llm is None:
|
|
1011
|
-
# raise RuntimeError(f"Agent {agent_name} has no LLM attached")
|
|
1012
|
-
|
|
1013
|
-
# return await agent._llm.generate_str(message)
|
|
1136
|
+
try:
|
|
1137
|
+
await proxy._agent.__aexit__(None, None, None)
|
|
1138
|
+
except Exception:
|
|
1139
|
+
pass # Ignore cleanup errors
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""FastAgent validation methods."""
|
|
2
|
+
|
|
3
|
+
from mcp_agent.core.exceptions import ServerConfigError
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _validate_server_references(self) -> None:
|
|
7
|
+
"""
|
|
8
|
+
Validate that all server references in agent configurations exist in config.
|
|
9
|
+
Raises ServerConfigError if any referenced servers are not defined.
|
|
10
|
+
"""
|
|
11
|
+
# First check if any agents need servers
|
|
12
|
+
agents_needing_servers = {
|
|
13
|
+
name: agent_data["config"].servers
|
|
14
|
+
for name, agent_data in self.agents.items()
|
|
15
|
+
if agent_data["config"].servers
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
if not agents_needing_servers:
|
|
19
|
+
return # No validation needed
|
|
20
|
+
|
|
21
|
+
# If we need servers, verify MCP config exists
|
|
22
|
+
if not hasattr(self.context.config, "mcp"):
|
|
23
|
+
raise ServerConfigError(
|
|
24
|
+
"MCP configuration missing",
|
|
25
|
+
"Agents require server access but no MCP configuration found.\n"
|
|
26
|
+
"Add an 'mcp' section to your configuration file.",
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
if not self.context.config.mcp.servers:
|
|
30
|
+
raise ServerConfigError(
|
|
31
|
+
"No MCP servers configured",
|
|
32
|
+
"Agents require server access but no servers are defined.\n"
|
|
33
|
+
"Add server definitions under mcp.servers in your configuration file.",
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Now check each agent's servers exist
|
|
37
|
+
available_servers = set(self.context.config.mcp.servers.keys())
|
|
38
|
+
for name, servers in agents_needing_servers.items():
|
|
39
|
+
missing = [s for s in servers if s not in available_servers]
|
|
40
|
+
if missing:
|
|
41
|
+
raise ServerConfigError(
|
|
42
|
+
f"Missing server configuration for agent '{name}'",
|
|
43
|
+
f"The following servers are referenced but not defined in config: {', '.join(missing)}",
|
|
44
|
+
)
|
mcp_agent/event_progress.py
CHANGED
|
@@ -19,6 +19,7 @@ class ProgressAction(str, Enum):
|
|
|
19
19
|
SHUTDOWN = "Shutdown"
|
|
20
20
|
AGGREGATOR_INITIALIZED = "Running"
|
|
21
21
|
ROUTING = "Routing"
|
|
22
|
+
FATAL_ERROR = "Error"
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
@dataclass
|
|
@@ -61,7 +62,9 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
|
|
|
61
62
|
agent_name = event_data.get("agent_name")
|
|
62
63
|
target = agent_name
|
|
63
64
|
details = ""
|
|
64
|
-
if
|
|
65
|
+
if progress_action == ProgressAction.FATAL_ERROR:
|
|
66
|
+
details = event_data.get("error_message", "An error occurred")
|
|
67
|
+
elif "mcp_aggregator" in namespace:
|
|
65
68
|
server_name = event_data.get("server_name", "")
|
|
66
69
|
tool_name = event_data.get("tool_name")
|
|
67
70
|
if tool_name:
|
|
@@ -76,6 +76,7 @@ class RichProgressDisplay:
|
|
|
76
76
|
ProgressAction.FINISHED: "black on green",
|
|
77
77
|
ProgressAction.SHUTDOWN: "black on red",
|
|
78
78
|
ProgressAction.AGGREGATOR_INITIALIZED: "bold green",
|
|
79
|
+
ProgressAction.FATAL_ERROR: "black on red",
|
|
79
80
|
}.get(action, "white")
|
|
80
81
|
|
|
81
82
|
def update(self, event: ProgressEvent) -> None:
|
|
@@ -116,5 +117,15 @@ class RichProgressDisplay:
|
|
|
116
117
|
for task in self._progress.tasks:
|
|
117
118
|
if task.id != task_id:
|
|
118
119
|
task.visible = False
|
|
120
|
+
elif event.action == ProgressAction.FATAL_ERROR:
|
|
121
|
+
self._progress.update(
|
|
122
|
+
task_id,
|
|
123
|
+
completed=100,
|
|
124
|
+
total=100,
|
|
125
|
+
details=f" / {event.details}",
|
|
126
|
+
)
|
|
127
|
+
for task in self._progress.tasks:
|
|
128
|
+
if task.id != task_id:
|
|
129
|
+
task.visible = False
|
|
119
130
|
else:
|
|
120
131
|
self._progress.reset(task_id)
|
|
@@ -24,6 +24,8 @@ from mcp.client.sse import sse_client
|
|
|
24
24
|
from mcp.types import JSONRPCMessage
|
|
25
25
|
|
|
26
26
|
from mcp_agent.config import MCPServerSettings
|
|
27
|
+
from mcp_agent.core.exceptions import ServerInitializationError
|
|
28
|
+
from mcp_agent.event_progress import ProgressAction
|
|
27
29
|
from mcp_agent.logging.logger import get_logger
|
|
28
30
|
from mcp_agent.mcp.stdio import stdio_client_with_rich_stderr
|
|
29
31
|
from mcp_agent.context_dependent import ContextDependent
|
|
@@ -144,7 +146,9 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
|
144
146
|
transport_context = server_conn._transport_context_factory()
|
|
145
147
|
|
|
146
148
|
async with transport_context as (read_stream, write_stream):
|
|
149
|
+
# try:
|
|
147
150
|
server_conn.create_session(read_stream, write_stream)
|
|
151
|
+
# except FileNotFoundError as e:
|
|
148
152
|
|
|
149
153
|
async with server_conn.session:
|
|
150
154
|
await server_conn.initialize_session()
|
|
@@ -153,7 +157,12 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
|
153
157
|
|
|
154
158
|
except Exception as exc:
|
|
155
159
|
logger.error(
|
|
156
|
-
f"{server_name}: Lifecycle task encountered an error: {exc}",
|
|
160
|
+
f"{server_name}: Lifecycle task encountered an error: {exc}",
|
|
161
|
+
exc_info=True,
|
|
162
|
+
data={
|
|
163
|
+
"progress_action": ProgressAction.FATAL_ERROR,
|
|
164
|
+
"server_name": server_name,
|
|
165
|
+
},
|
|
157
166
|
)
|
|
158
167
|
# If there's an error, we should also set the event so that
|
|
159
168
|
# 'get_server' won't hang
|
|
@@ -295,7 +304,7 @@ class MCPConnectionManager(ContextDependent):
|
|
|
295
304
|
|
|
296
305
|
# If the session is still None, it means the lifecycle task crashed
|
|
297
306
|
if not server_conn or not server_conn.session:
|
|
298
|
-
raise
|
|
307
|
+
raise ServerInitializationError(
|
|
299
308
|
f"{server_name}: Failed to initialize server; check logs for errors."
|
|
300
309
|
)
|
|
301
310
|
return server_conn
|