fast-agent-mcp 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.0.8
3
+ Version: 0.0.9
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -252,13 +252,15 @@ Description-Content-Type: text/markdown
252
252
 
253
253
  Install the [uv package manager](https://docs.astral.sh/uv/).
254
254
 
255
- `uv pip install fast-agent-mcp` - download fast-agent
255
+ `uv pip install fast-agent-mcp` - download and install fast-agent
256
256
 
257
257
  `fast-agent setup` - setup an agent and configuration files.
258
258
 
259
259
  `uv run agent.py` - run and interact with your first agent.
260
260
 
261
- `fast-agent bootstrap workflow` - generate example agents demonstrating each of the workflows from Anthropic's "[Building Effective Agents](https://www.anthropic.com/research/building-effective-agents)" paper.
261
+ `fast-agent bootstrap workflow` - generate example agents and workflows demonstrating each of the patterns from Anthropic's "[Building Effective Agents](https://www.anthropic.com/research/building-effective-agents)" paper.
262
+
263
+ `fast-agent bootstrap` -
262
264
 
263
265
  It's built on top of [mcp-agent](todo).
264
266
 
@@ -13,12 +13,12 @@ mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  mcp_agent/cli/__main__.py,sha256=bhxe66GYqy0q78OQhi7dkuubY1Tn0bQL6hU5Nn47E34,73
14
14
  mcp_agent/cli/main.py,sha256=wyOvUg0BihD1NpoiFcIaOruevgaHxs1-Xy_bnwb1Ik4,2449
15
15
  mcp_agent/cli/terminal.py,sha256=5fqrKlJvIpKEuvpvZ653OueQSYFFktBEbosjr2ucMUc,1026
16
- mcp_agent/cli/commands/bootstrap.py,sha256=mVng2bZPKfVEULrOPI7sQQaENAJwU-3SsGZb10mbu6Q,10614
16
+ mcp_agent/cli/commands/bootstrap.py,sha256=5OAxs1IsWarFNX1IoTRbVyjMqktxJ3pweM3LwGOVAhk,10768
17
17
  mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
18
18
  mcp_agent/cli/commands/setup.py,sha256=Km4-EFJljjMU5XjsSa-srg_7lbPuH5e40sx4lQDkia4,6198
19
19
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  mcp_agent/core/exceptions.py,sha256=xDdhYh83ni3t0NiXQTEL0_Yyx0qQxBPQL1gSwRToeaw,1469
21
- mcp_agent/core/fastagent.py,sha256=WuT0B9XpGM7XowJivNkRcIm-0BthlObVtoNwcBaNKCA,40641
21
+ mcp_agent/core/fastagent.py,sha256=THy6lDg3CjgF_nMKq8GtWkFT03r6-8VKFFhjjECx6aE,42699
22
22
  mcp_agent/core/server_validation.py,sha256=_59cn16nNT4HGPwg19HgxMtHK4MsdWYDUw_CuL-5xek,1696
23
23
  mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -47,15 +47,16 @@ mcp_agent/mcp/mcp_agent_server.py,sha256=xP09HZTeguJi4Fq0p3fjLBP55uSYe5AdqM90xCg
47
47
  mcp_agent/mcp/mcp_aggregator.py,sha256=SQNErk20jTT_5cHTQ9zQBYEsAedd6QEweQj-YSzkdOo,14602
48
48
  mcp_agent/mcp/mcp_connection_manager.py,sha256=tdz2B2BxmO2gBDPe4r6eEnUp4QpwD0p0-ag5J_hle-w,11750
49
49
  mcp_agent/mcp/stdio.py,sha256=tW075R5rQ-UlflXWFKIFDgCbWbuhKqxhiYolWvyEkFs,3985
50
- mcp_agent/resources/examples/data-analysis/analysis.py,sha256=LrfbuK68JqcB9Y21NMedJ8RiEVU5Kr6jOIRLGmwz524,1379
51
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=VwoXyaxAuLNUm7dtRj5rTxeFDSM_wRhhX_HZunb8jUI,422
50
+ mcp_agent/resources/examples/data-analysis/analysis.py,sha256=Sp-umPPfwVjG3yNrHdQA6blGtG6jc5of1e_0oS4njYc,1379
51
+ mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=eTKGbjnTHhDTeNRPQvG_fr9OQpEZ5Y9v7X2NyCj0V70,530
52
52
  mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
53
53
  mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=NI1vujVuLeTrcF8dM_ipZZ3Tg-1AL35CaltmuzxWrU4,1807
54
54
  mcp_agent/resources/examples/mcp_researcher/researcher.py,sha256=jPRafm7jbpHKkX_dQiYGG3Sw-e1Dm86q-JZT-WZDhM0,1425
55
55
  mcp_agent/resources/examples/workflows/chaining.py,sha256=o9vf45BtJP6PT7kCYfIyNTcJrVGnMeKCUD37vI0cepw,770
56
56
  mcp_agent/resources/examples/workflows/evaluator.py,sha256=ByILFY7PsA8UXtmNa4YtLIGSsnVfZVjKlHGH9G0ie2I,3069
57
- mcp_agent/resources/examples/workflows/human_input.py,sha256=RlUch5A8N9pb3REusiIAZKfAGTxWWE82s1Npmkfbg1Y,625
58
- mcp_agent/resources/examples/workflows/orchestrator.py,sha256=pt6HTyJl5r5zR6FUICi05-Eaulyh_W-PAfh9XCbuuRc,2986
57
+ mcp_agent/resources/examples/workflows/fastagent.config.yaml,sha256=s8USBUpEymJbOLVp-NiFuo86h4sCxL9TB5H-Ub8i0hQ,234
58
+ mcp_agent/resources/examples/workflows/human_input.py,sha256=c8cBdLEPbaMXddFwsfN3Z7RFs5PZXsdrjANfvq1VTPM,605
59
+ mcp_agent/resources/examples/workflows/orchestrator.py,sha256=LaV5rzBy6NvnlFd91f96bKFrle6voxDA1TFa8hVxQ9Y,2550
59
60
  mcp_agent/resources/examples/workflows/parallel.py,sha256=cNYcIcsdo0-KK-S7KEPCc11aWELeVlQJdJ2LIC9xgDs,3090
60
61
  mcp_agent/resources/examples/workflows/router.py,sha256=XT_ewCrxPxdUTMCYQGw34qZQ3GGu8TYY_v5Lige8By4,1707
61
62
  mcp_agent/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -77,12 +78,12 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=
77
78
  mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
78
79
  mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
79
80
  mcp_agent/workflows/llm/augmented_llm.py,sha256=o5Vdn1sAgVBhmetilbiYJuVz5BAtEhNcwGMEBGNzU_A,22960
80
- mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=jowYvWBdVspxQhcvZt14iL_1kEZK2t4f6Wzgn_8zLaY,21048
81
+ mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=rcaFn4ZhxJgBuTsRCKCcCx4U8UYqAhJNh3EK5M5hgew,21118
81
82
  mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=RqsbX0Fc5By1AvQ2N85hxzz0d84mVwuPggslxwqSJVM,24190
82
83
  mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
83
84
  mcp_agent/workflows/llm/model_factory.py,sha256=cAjG8UxzUC_wcB6mPaMfEhwnGnVS4-IOmOXDabO1_0g,6119
84
85
  mcp_agent/workflows/orchestrator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
- mcp_agent/workflows/orchestrator/orchestrator.py,sha256=tSRMCRFT_gAQApRRiXXznCbk_x11o86oi5LfKLnXzdk,15146
86
+ mcp_agent/workflows/orchestrator/orchestrator.py,sha256=BMGToWE-C2WiL74U5s0oT5wKoHWWxhWZ_lRfHm-8ryg,12494
86
87
  mcp_agent/workflows/orchestrator/orchestrator_models.py,sha256=UWn7_HFLcqFGlcjZ1Rn2SYQfm5k9seS6QJN_FRST5Kc,4513
87
88
  mcp_agent/workflows/orchestrator/orchestrator_prompts.py,sha256=AQ9-1WyMIl6l36yAMh1dtJ6Vhi1jPrvGXF2CNNAtlpA,3946
88
89
  mcp_agent/workflows/parallel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -99,8 +100,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
99
100
  mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
100
101
  mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
101
102
  mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
102
- fast_agent_mcp-0.0.8.dist-info/METADATA,sha256=tPypBpbGkwNNIRHnJJF7Nn68O1bJHIdV3WFvVup3xyI,15914
103
- fast_agent_mcp-0.0.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
104
- fast_agent_mcp-0.0.8.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
105
- fast_agent_mcp-0.0.8.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
106
- fast_agent_mcp-0.0.8.dist-info/RECORD,,
103
+ fast_agent_mcp-0.0.9.dist-info/METADATA,sha256=KBbUbJ7Sx0c9ZvVd9j2zJCdgr1BIDUwvfqN6nAzMjew,15966
104
+ fast_agent_mcp-0.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
105
+ fast_agent_mcp-0.0.9.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
106
+ fast_agent_mcp-0.0.9.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
107
+ fast_agent_mcp-0.0.9.dist-info/RECORD,,
@@ -269,6 +269,9 @@ def _show_completion_message(example_type: str, created: list[str]):
269
269
  )
270
270
  console.print("2. The dataset is available in the mount-point directory:")
271
271
  console.print(" - mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv")
272
+ console.print(
273
+ "On Windows platforms, please edit the fastagent.config.yaml and adjust the volume mount point."
274
+ )
272
275
  else:
273
276
  console.print("\n[yellow]No files were created.[/yellow]")
274
277
 
@@ -31,9 +31,10 @@ from rich.prompt import Prompt
31
31
  from rich import print
32
32
  from mcp_agent.progress_display import progress_display
33
33
  from mcp_agent.workflows.llm.model_factory import ModelFactory
34
- from mcp_agent.workflows.llm.augmented_llm import RequestParams
34
+ from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams
35
35
 
36
- import readline # noqa: F401
36
+ # TODO -- resintate once Windows&Python 3.13 platform issues are fixed
37
+ # import readline # noqa: F401
37
38
 
38
39
  # Type aliases for better readability
39
40
  WorkflowType: TypeAlias = Union[
@@ -334,7 +335,8 @@ class FastAgent(ContextDependent):
334
335
  def _validate_workflow_references(self) -> None:
335
336
  """
336
337
  Validate that all workflow references point to valid agents/workflows.
337
- Raises ValueError if any referenced components are not defined.
338
+ Also validates that referenced agents have required configuration.
339
+ Raises AgentConfigError if any validation fails.
338
340
  """
339
341
  available_components = set(self.agents.keys())
340
342
 
@@ -358,7 +360,7 @@ class FastAgent(ContextDependent):
358
360
  )
359
361
 
360
362
  elif agent_type == AgentType.ORCHESTRATOR.value:
361
- # Check all child agents exist
363
+ # Check all child agents exist and are properly configured
362
364
  child_agents = agent_data["child_agents"]
363
365
  missing = [a for a in child_agents if a not in available_components]
364
366
  if missing:
@@ -366,6 +368,18 @@ class FastAgent(ContextDependent):
366
368
  f"Orchestrator '{name}' references non-existent agents: {', '.join(missing)}"
367
369
  )
368
370
 
371
+ # Validate child agents have required LLM configuration
372
+ for agent_name in child_agents:
373
+ child_data = self.agents[agent_name]
374
+ if child_data["type"] == AgentType.BASIC.value:
375
+ # For basic agents, we'll validate LLM config during creation
376
+ continue
377
+ elif not isinstance(child_data["func"], AugmentedLLM):
378
+ raise AgentConfigError(
379
+ f"Agent '{agent_name}' used by orchestrator '{name}' lacks LLM capability",
380
+ "All agents used by orchestrators must be LLM-capable",
381
+ )
382
+
369
383
  elif agent_type == AgentType.ROUTER.value:
370
384
  # Check all referenced agents exist
371
385
  router_agents = agent_data["agents"]
@@ -432,7 +446,7 @@ class FastAgent(ContextDependent):
432
446
  *,
433
447
  instruction: str = "You are a helpful agent.",
434
448
  servers: List[str] = [],
435
- model: Optional[str] = None,
449
+ model: str | None = None,
436
450
  use_history: bool = True,
437
451
  request_params: Optional[Dict] = None,
438
452
  human_input: bool = False,
@@ -482,11 +496,12 @@ class FastAgent(ContextDependent):
482
496
 
483
497
  def orchestrator(
484
498
  self,
485
- name: str,
486
- instruction: str,
499
+ name: str = "Orchestrator",
500
+ *,
501
+ instruction: str | None = None,
487
502
  agents: List[str],
488
503
  model: str | None = None,
489
- use_history: bool = True,
504
+ use_history: bool = False,
490
505
  request_params: Optional[Dict] = None,
491
506
  human_input: bool = False,
492
507
  ) -> Callable:
@@ -498,14 +513,14 @@ class FastAgent(ContextDependent):
498
513
  instruction: Base instruction for the orchestrator
499
514
  agents: List of agent names this orchestrator can use
500
515
  model: Model specification string (highest precedence)
501
- use_history: Whether to maintain conversation history
516
+ use_history: Whether to maintain conversation history (forced false)
502
517
  request_params: Additional request parameters for the LLM
503
518
  """
504
519
 
505
520
  def decorator(func: Callable) -> Callable:
506
521
  # Create base request params
507
522
  base_params = RequestParams(
508
- use_history=use_history, **(request_params or {})
523
+ use_history=use_history, model=model, **(request_params or {})
509
524
  )
510
525
 
511
526
  # Create agent configuration
@@ -721,7 +736,7 @@ class FastAgent(ContextDependent):
721
736
 
722
737
  return active_agents
723
738
 
724
- def _create_orchestrators(
739
+ async def _create_orchestrators(
725
740
  self, agent_app: MCPApp, active_agents: ProxyDict
726
741
  ) -> ProxyDict:
727
742
  """
@@ -739,44 +754,57 @@ class FastAgent(ContextDependent):
739
754
  if agent_data["type"] == AgentType.ORCHESTRATOR.value:
740
755
  config = agent_data["config"]
741
756
 
742
- # TODO: Remove legacy - This model/params setup should be in Agent class
743
- # Resolve model alias if present
744
- model_config = ModelFactory.parse_model_string(config.model)
745
- resolved_model = model_config.model_name
746
-
747
- # Start with existing params if available
748
- if config.default_request_params:
749
- base_params = config.default_request_params.model_copy()
750
- # Update with orchestrator-specific settings
751
- base_params.use_history = config.use_history
752
- base_params.model = resolved_model
753
- else:
754
- base_params = RequestParams(
755
- use_history=config.use_history, model=resolved_model
756
- )
757
-
758
- llm_factory = self._get_model_factory(
759
- model=config.model, # Use original model string for factory creation
760
- request_params=base_params,
757
+ # Get base params configured with model settings
758
+ base_params = (
759
+ config.default_request_params.model_copy()
760
+ if config.default_request_params
761
+ else RequestParams()
761
762
  )
763
+ base_params.use_history = False # Force no history for orchestrator
762
764
 
763
- # Get the child agents - need to unwrap proxies
765
+ # Get the child agents - need to unwrap proxies and validate LLM config
764
766
  child_agents = []
765
767
  for agent_name in agent_data["child_agents"]:
766
768
  proxy = active_agents[agent_name]
767
- if isinstance(proxy, LLMAgentProxy):
768
- child_agents.append(proxy._agent) # Get the actual Agent
769
- else:
770
- # Handle case where it might be another workflow
771
- child_agents.append(proxy._workflow)
769
+ instance = self._unwrap_proxy(proxy)
770
+ # Validate basic agents have LLM
771
+ if isinstance(instance, Agent):
772
+ if not hasattr(instance, "_llm") or not instance._llm:
773
+ raise AgentConfigError(
774
+ f"Agent '{agent_name}' used by orchestrator '{name}' missing LLM configuration",
775
+ "All agents must be fully configured with LLMs before being used in an orchestrator",
776
+ )
777
+ child_agents.append(instance)
778
+
779
+ # Create a properly configured planner agent
780
+ planner_config = AgentConfig(
781
+ name=f"{name}", # Use orchestrator name as prefix
782
+ instruction=config.instruction
783
+ or """
784
+ You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
785
+ or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
786
+ which can be performed by LLMs with access to the servers or agents.
787
+ """,
788
+ servers=[], # Planner doesn't need server access
789
+ model=config.model, # Use same model as orchestrator
790
+ default_request_params=base_params,
791
+ )
792
+ planner_agent = Agent(config=planner_config, context=agent_app.context)
793
+ planner_factory = self._get_model_factory(
794
+ model=config.model,
795
+ request_params=config.default_request_params,
796
+ )
772
797
 
798
+ async with planner_agent:
799
+ planner = await planner_agent.attach_llm(planner_factory)
800
+
801
+ # Create the orchestrator with pre-configured planner
773
802
  orchestrator = Orchestrator(
774
803
  name=config.name,
775
- instruction=config.instruction,
804
+ planner=planner, # Pass pre-configured planner
776
805
  available_agents=child_agents,
777
806
  context=agent_app.context,
778
- llm_factory=llm_factory,
779
- request_params=base_params, # Use our base params that include model
807
+ request_params=planner.default_request_params, # Base params already include model settings
780
808
  plan_type="full",
781
809
  )
782
810
 
@@ -784,6 +812,7 @@ class FastAgent(ContextDependent):
784
812
  orchestrators[name] = self._create_proxy(
785
813
  name, orchestrator, AgentType.ORCHESTRATOR.value
786
814
  )
815
+
787
816
  return orchestrators
788
817
 
789
818
  async def _create_evaluator_optimizers(
@@ -878,7 +907,7 @@ class FastAgent(ContextDependent):
878
907
 
879
908
  return deps
880
909
 
881
- def _create_parallel_agents(
910
+ async def _create_parallel_agents(
882
911
  self, agent_app: MCPApp, active_agents: ProxyDict
883
912
  ) -> ProxyDict:
884
913
  """
@@ -946,7 +975,9 @@ class FastAgent(ContextDependent):
946
975
 
947
976
  return parallel_agents
948
977
 
949
- def _create_routers(self, agent_app: MCPApp, active_agents: ProxyDict) -> ProxyDict:
978
+ async def _create_routers(
979
+ self, agent_app: MCPApp, active_agents: ProxyDict
980
+ ) -> ProxyDict:
950
981
  """
951
982
  Create router agents.
952
983
 
@@ -1029,14 +1060,18 @@ class FastAgent(ContextDependent):
1029
1060
  self._validate_server_references()
1030
1061
  self._validate_workflow_references()
1031
1062
 
1032
- # Create all types of agents
1063
+ # Create all types of agents in dependency order
1033
1064
  active_agents = await self._create_basic_agents(agent_app)
1034
- orchestrators = self._create_orchestrators(agent_app, active_agents)
1035
- parallel_agents = self._create_parallel_agents(agent_app, active_agents)
1065
+ orchestrators = await self._create_orchestrators(
1066
+ agent_app, active_agents
1067
+ )
1068
+ parallel_agents = await self._create_parallel_agents(
1069
+ agent_app, active_agents
1070
+ )
1036
1071
  evaluator_optimizers = await self._create_evaluator_optimizers(
1037
1072
  agent_app, active_agents
1038
1073
  )
1039
- routers = self._create_routers(agent_app, active_agents)
1074
+ routers = await self._create_routers(agent_app, active_agents)
1040
1075
 
1041
1076
  # Merge all agents into active_agents
1042
1077
  active_agents.update(orchestrators)
@@ -7,7 +7,7 @@ fast = FastAgent("Data Analysis (Roots)")
7
7
 
8
8
 
9
9
  @fast.agent(
10
- name="Data_Analysis",
10
+ name="data_analysis",
11
11
  instruction="""
12
12
  You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
13
13
  Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
@@ -1,5 +1,7 @@
1
1
  default_model: sonnet
2
2
 
3
+ # on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/
4
+
3
5
  mcp:
4
6
  servers:
5
7
  interpreter:
@@ -0,0 +1,9 @@
1
+ default_model: sonnet
2
+ mcp:
3
+ servers:
4
+ fetch:
5
+ command: "uvx"
6
+ args: ["mcp-server-fetch"]
7
+ filesystem:
8
+ command: "npx"
9
+ args: ["@modelcontextprotocol/server-filesystem","."]
@@ -13,7 +13,6 @@ fast = FastAgent("Human Input")
13
13
  @fast.agent(
14
14
  instruction="An AI agent that assists with basic tasks. Request Human Input when needed.",
15
15
  human_input=True,
16
- model="gpt-4o",
17
16
  )
18
17
  async def main():
19
18
  async with fast.run() as agent:
@@ -44,12 +44,6 @@ fast = FastAgent("Orchestrator-Workers")
44
44
  # Define the orchestrator to coordinate the other agents
45
45
  @fast.orchestrator(
46
46
  name="orchestrate",
47
- instruction="""Load the student's short story from short_story.md,
48
- and generate a report with feedback across proofreading,
49
- factuality/logical consistency and style adherence. Use the style rules from
50
- https://apastyle.apa.org/learn/quick-guide-on-formatting and
51
- https://apastyle.apa.org/learn/quick-guide-on-references.
52
- Write the graded report to graded_report.md in the same directory as short_story.md""",
53
47
  agents=["finder", "writer", "proofreader"],
54
48
  model="sonnet",
55
49
  )
@@ -79,25 +79,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
79
79
  Process a query using an LLM and available tools.
80
80
  Override this method to use a different LLM.
81
81
  """
82
- config = self.context.config
83
82
 
84
- api_key = None
85
-
86
- if hasattr(config, "anthropic") and config.anthropic:
87
- api_key = config.anthropic.api_key
88
- if api_key == "<your-api-key-here>":
89
- api_key = None
90
-
91
- if api_key is None:
92
- api_key = os.getenv("ANTHROPIC_API_KEY")
93
-
94
- if not api_key:
95
- raise ProviderKeyError(
96
- "Anthropic API key not configured",
97
- "The Anthropic API key is required but not set.\n"
98
- "Add it to your configuration file under anthropic.api_key "
99
- "or set the ANTHROPIC_API_KEY environment variable.",
100
- )
83
+ api_key = self._api_key(self.context.config)
101
84
  try:
102
85
  anthropic = Anthropic(api_key=api_key)
103
86
  messages: List[MessageParam] = []
@@ -264,6 +247,27 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
264
247
 
265
248
  return responses
266
249
 
250
+ def _api_key(self, config):
251
+ api_key = None
252
+
253
+ if hasattr(config, "anthropic") and config.anthropic:
254
+ api_key = config.anthropic.api_key
255
+ if api_key == "<your-api-key-here>":
256
+ api_key = None
257
+
258
+ if api_key is None:
259
+ api_key = os.getenv("ANTHROPIC_API_KEY")
260
+
261
+ if not api_key:
262
+ raise ProviderKeyError(
263
+ "Anthropic API key not configured",
264
+ "The Anthropic API key is required but not set.\n"
265
+ "Add it to your configuration file under anthropic.api_key "
266
+ "or set the ANTHROPIC_API_KEY environment variable.",
267
+ )
268
+
269
+ return api_key
270
+
267
271
  async def generate_str(
268
272
  self,
269
273
  message,
@@ -313,7 +317,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
313
317
 
314
318
  # Next we pass the text through instructor to extract structured data
315
319
  client = instructor.from_anthropic(
316
- Anthropic(api_key=self.context.config.anthropic.api_key),
320
+ Anthropic(api_key=self._api_key(self.context.config)),
317
321
  )
318
322
 
319
323
  params = self.get_request_params(request_params)
@@ -2,9 +2,7 @@
2
2
  Orchestrator implementation for MCP Agent applications.
3
3
  """
4
4
 
5
- import contextlib
6
5
  from typing import (
7
- Callable,
8
6
  List,
9
7
  Literal,
10
8
  Optional,
@@ -12,7 +10,7 @@ from typing import (
12
10
  TYPE_CHECKING,
13
11
  )
14
12
 
15
- from mcp_agent.agents.agent import Agent, AgentConfig
13
+ from mcp_agent.agents.agent import Agent
16
14
  from mcp_agent.workflows.llm.augmented_llm import (
17
15
  AugmentedLLM,
18
16
  MessageParamT,
@@ -20,7 +18,6 @@ from mcp_agent.workflows.llm.augmented_llm import (
20
18
  ModelT,
21
19
  RequestParams,
22
20
  )
23
- from mcp_agent.workflows.llm.model_factory import ModelFactory
24
21
  from mcp_agent.workflows.orchestrator.orchestrator_models import (
25
22
  format_plan_result,
26
23
  format_step_result,
@@ -47,9 +44,9 @@ logger = get_logger(__name__)
47
44
 
48
45
  class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
49
46
  """
50
- In the orchestrator-workers workflow, a central LLM dynamically breaks down tasks,
51
- delegates them to worker LLMs, and synthesizes their results. It does this
52
- in a loop until the task is complete.
47
+ In the orchestrator-workers workflow, a central planner LLM dynamically breaks down tasks and
48
+ delegates them to pre-configured worker LLMs. The planner synthesizes their results in a loop
49
+ until the task is complete.
53
50
 
54
51
  When to use this workflow:
55
52
  - This workflow is well-suited for complex tasks where you can't predict the
@@ -60,65 +57,59 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
60
57
  - Coding products that make complex changes to multiple files each time.
61
58
  - Search tasks that involve gathering and analyzing information from multiple sources
62
59
  for possible relevant information.
60
+
61
+ Note:
62
+ All agents must be pre-configured with LLMs before being passed to the orchestrator.
63
+ This ensures consistent model behavior and configuration across all components.
63
64
  """
64
65
 
65
66
  def __init__(
66
67
  self,
67
- llm_factory: Callable[[Agent], AugmentedLLM[MessageParamT, MessageT]],
68
- planner: AugmentedLLM | None = None,
69
- available_agents: List[Agent | AugmentedLLM] | None = None,
68
+ name: str,
69
+ planner: AugmentedLLM, # Pre-configured planner
70
+ available_agents: List[Agent | AugmentedLLM],
70
71
  plan_type: Literal["full", "iterative"] = "full",
71
72
  context: Optional["Context"] = None,
72
73
  **kwargs,
73
74
  ):
74
75
  """
75
76
  Args:
76
- llm_factory: Factory function to create an LLM for a given agent
77
- planner: LLM to use for planning steps (if not provided, a default planner will be used)
78
- plan_type: "full" planning generates the full plan first, then executes. "iterative" plans the next step, and loops until success.
79
- available_agents: List of agents available to tasks executed by this orchestrator
77
+ name: Name of the orchestrator workflow
78
+ planner: Pre-configured planner LLM to use for planning steps
79
+ available_agents: List of pre-configured agents available to this orchestrator
80
+ plan_type: "full" planning generates the full plan first, then executes. "iterative" plans next step and loops.
80
81
  context: Application context
81
82
  """
82
83
  # Initialize with orchestrator-specific defaults
83
84
  orchestrator_params = RequestParams(
84
85
  use_history=False, # Orchestrator doesn't support history
85
86
  max_iterations=30, # Higher default for complex tasks
86
- maxTokens=8192, # Higher default for planning TODO this will break some models - make configurable.
87
+ maxTokens=8192, # Higher default for planning
87
88
  parallel_tool_calls=True,
88
89
  )
89
90
 
90
- # If kwargs contains request_params, merge with our defaults but force use_history False
91
+ # If kwargs contains request_params, merge our defaults while preserving the model config
91
92
  if "request_params" in kwargs:
92
93
  base_params = kwargs["request_params"]
93
- merged = base_params.model_copy()
94
- merged.use_history = False # Force this setting
94
+ # Create merged params starting with our defaults
95
+ merged = orchestrator_params.model_copy()
96
+ # Update with base params to get model config
97
+ if isinstance(base_params, dict):
98
+ merged = merged.model_copy(update=base_params)
99
+ else:
100
+ merged = merged.model_copy(update=base_params.model_dump())
101
+ # Force specific settings
102
+ merged.use_history = False
95
103
  kwargs["request_params"] = merged
96
104
  else:
97
105
  kwargs["request_params"] = orchestrator_params
98
106
 
99
107
  super().__init__(context=context, **kwargs)
100
108
 
101
- self.llm_factory = llm_factory
102
-
103
- # Create default planner with AgentConfig
104
- request_params = self.get_request_params(kwargs.get("request_params"))
105
- planner_config = AgentConfig(
106
- name="LLM Orchestrator",
107
- instruction="""
108
- You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
109
- or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
110
- which can be performed by LLMs with access to the servers or agents.
111
- """,
112
- servers=[], # Planner doesn't need direct server access
113
- default_request_params=request_params,
114
- model=request_params.model if request_params else None,
115
- )
116
-
117
- self.planner = planner or llm_factory(agent=Agent(config=planner_config))
118
-
119
- self.plan_type: Literal["full", "iterative"] = plan_type
109
+ self.planner = planner
110
+ self.plan_type = plan_type
120
111
  self.server_registry = self.context.server_registry
121
- self.agents = {agent.name: agent for agent in available_agents or []}
112
+ self.agents = {agent.name: agent for agent in available_agents}
122
113
 
123
114
  async def generate(
124
115
  self,
@@ -155,25 +146,15 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
155
146
  ) -> ModelT:
156
147
  """Request a structured LLM generation and return the result as a Pydantic model."""
157
148
  params = self.get_request_params(request_params)
158
-
159
149
  result_str = await self.generate_str(message=message, request_params=params)
160
150
 
161
- structured_config = AgentConfig(
162
- name="Structured Output",
163
- instruction="Produce a structured output given a message",
164
- servers=[], # No server access needed for structured output
165
- )
166
-
167
- llm = self.llm_factory(agent=Agent(config=structured_config))
168
-
169
- structured_result = await llm.generate_structured(
151
+ # Use AugmentedLLM's structured output handling
152
+ return await super().generate_structured(
170
153
  message=result_str,
171
154
  response_model=response_model,
172
155
  request_params=params,
173
156
  )
174
157
 
175
- return structured_result
176
-
177
158
  async def execute(
178
159
  self, objective: str, request_params: RequestParams | None = None
179
160
  ) -> PlanResult:
@@ -233,8 +214,6 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
233
214
 
234
215
  plan_result.add_step_result(step_result)
235
216
 
236
- plan_result.add_step_result(step_result)
237
-
238
217
  logger.debug(
239
218
  f"Iteration {iterations}: Intermediate plan result:", data=plan_result
240
219
  )
@@ -251,70 +230,36 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
251
230
  request_params: RequestParams | None = None,
252
231
  ) -> StepResult:
253
232
  """Execute a step's subtasks in parallel and synthesize results"""
254
- params = self.get_request_params(request_params)
255
233
 
256
234
  step_result = StepResult(step=step, task_results=[])
257
235
  context = format_plan_result(previous_result)
258
236
 
259
- # Prepare tasks and LLMs
260
- task_llms = []
261
- async with contextlib.AsyncExitStack() as stack:
262
- for task in step.tasks:
263
- agent = self.agents.get(task.agent)
264
- if not agent:
265
- raise ValueError(f"No agent found matching {task.agent}")
266
-
267
- if isinstance(agent, AugmentedLLM):
268
- llm = agent
269
- else:
270
- # Use existing LLM if agent has one
271
- if hasattr(agent, "_llm") and agent._llm:
272
- llm = agent._llm
273
- else:
274
- # Only create new context if needed
275
- ctx_agent = await stack.enter_async_context(agent)
276
- # Create factory with agent's own configuration
277
- agent_factory = ModelFactory.create_factory(
278
- model_string=agent.config.model,
279
- request_params=agent.config.default_request_params,
280
- )
281
- llm = await ctx_agent.attach_llm(agent_factory)
282
-
283
- task_llms.append((task, llm))
284
-
285
- # Execute all tasks within the same context
286
- futures = []
287
- for task, llm in task_llms:
288
- task_description = TASK_PROMPT_TEMPLATE.format(
289
- objective=previous_result.objective,
290
- task=task.description,
291
- context=context,
292
- )
293
- # Get the agent's config for task execution
294
- agent = self.agents.get(task.agent)
295
- task_params = (
296
- agent.config.default_request_params
297
- if hasattr(agent, "config")
298
- else params
299
- )
300
- futures.append(
301
- llm.generate_str(
302
- message=task_description, request_params=task_params
303
- )
304
- )
237
+ # Execute tasks
238
+ futures = []
239
+ for task in step.tasks:
240
+ agent = self.agents.get(task.agent)
241
+ if not agent:
242
+ raise ValueError(f"No agent found matching {task.agent}")
243
+
244
+ task_description = TASK_PROMPT_TEMPLATE.format(
245
+ objective=previous_result.objective,
246
+ task=task.description,
247
+ context=context,
248
+ )
305
249
 
306
- # Wait for all tasks, including any tool calls they make
307
- results = await self.executor.execute(*futures)
250
+ # All agents should now be LLM-capable
251
+ futures.append(agent._llm.generate_str(message=task_description))
308
252
 
309
- # Process results while contexts are still active
310
- for (task, _), result in zip(task_llms, results):
311
- step_result.add_task_result(
312
- TaskWithResult(**task.model_dump(), result=str(result))
313
- )
253
+ # Wait for all tasks
254
+ results = await self.executor.execute(*futures)
314
255
 
315
- # Format final result while contexts are still active
316
- step_result.result = format_step_result(step_result)
256
+ # Process results
257
+ for task, result in zip(step.tasks, results):
258
+ step_result.add_task_result(
259
+ TaskWithResult(**task.model_dump(), result=str(result))
260
+ )
317
261
 
262
+ step_result.result = format_step_result(step_result)
318
263
  return step_result
319
264
 
320
265
  async def _get_full_plan(