fast-agent-mcp 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (33) hide show
  1. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/METADATA +24 -57
  2. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/RECORD +31 -24
  3. mcp_agent/agents/agent.py +8 -4
  4. mcp_agent/app.py +5 -1
  5. mcp_agent/cli/commands/bootstrap.py +183 -121
  6. mcp_agent/cli/commands/setup.py +20 -16
  7. mcp_agent/core/__init__.py +0 -0
  8. mcp_agent/core/exceptions.py +47 -0
  9. mcp_agent/core/fastagent.py +250 -124
  10. mcp_agent/core/server_validation.py +44 -0
  11. mcp_agent/event_progress.py +4 -1
  12. mcp_agent/logging/rich_progress.py +11 -0
  13. mcp_agent/mcp/mcp_connection_manager.py +11 -2
  14. mcp_agent/resources/examples/data-analysis/analysis.py +35 -0
  15. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +22 -0
  16. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
  17. mcp_agent/resources/examples/workflows/chaining.py +31 -0
  18. mcp_agent/resources/examples/{decorator/optimizer.py → workflows/evaluator.py} +7 -10
  19. mcp_agent/resources/examples/workflows/fastagent.config.yaml +9 -0
  20. mcp_agent/resources/examples/workflows/human_input.py +25 -0
  21. mcp_agent/resources/examples/{decorator → workflows}/orchestrator.py +20 -17
  22. mcp_agent/resources/examples/{decorator → workflows}/parallel.py +14 -18
  23. mcp_agent/resources/examples/{decorator → workflows}/router.py +9 -10
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +54 -14
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +38 -9
  26. mcp_agent/workflows/orchestrator/orchestrator.py +53 -108
  27. mcp_agent/resources/examples/decorator/main.py +0 -26
  28. mcp_agent/resources/examples/decorator/tiny.py +0 -22
  29. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/WHEEL +0 -0
  30. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/entry_points.txt +0 -0
  31. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.9.dist-info}/licenses/LICENSE +0 -0
  32. /mcp_agent/resources/examples/mcp_researcher/{main-evalopt.py → researcher-eval.py} +0 -0
  33. /mcp_agent/resources/examples/mcp_researcher/{main.py → researcher.py} +0 -0
@@ -2,9 +2,7 @@
2
2
  Orchestrator implementation for MCP Agent applications.
3
3
  """
4
4
 
5
- import contextlib
6
5
  from typing import (
7
- Callable,
8
6
  List,
9
7
  Literal,
10
8
  Optional,
@@ -12,7 +10,7 @@ from typing import (
12
10
  TYPE_CHECKING,
13
11
  )
14
12
 
15
- from mcp_agent.agents.agent import Agent, AgentConfig
13
+ from mcp_agent.agents.agent import Agent
16
14
  from mcp_agent.workflows.llm.augmented_llm import (
17
15
  AugmentedLLM,
18
16
  MessageParamT,
@@ -20,7 +18,6 @@ from mcp_agent.workflows.llm.augmented_llm import (
20
18
  ModelT,
21
19
  RequestParams,
22
20
  )
23
- from mcp_agent.workflows.llm.model_factory import ModelFactory
24
21
  from mcp_agent.workflows.orchestrator.orchestrator_models import (
25
22
  format_plan_result,
26
23
  format_step_result,
@@ -47,9 +44,9 @@ logger = get_logger(__name__)
47
44
 
48
45
  class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
49
46
  """
50
- In the orchestrator-workers workflow, a central LLM dynamically breaks down tasks,
51
- delegates them to worker LLMs, and synthesizes their results. It does this
52
- in a loop until the task is complete.
47
+ In the orchestrator-workers workflow, a central planner LLM dynamically breaks down tasks and
48
+ delegates them to pre-configured worker LLMs. The planner synthesizes their results in a loop
49
+ until the task is complete.
53
50
 
54
51
  When to use this workflow:
55
52
  - This workflow is well-suited for complex tasks where you can't predict the
@@ -60,65 +57,59 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
60
57
  - Coding products that make complex changes to multiple files each time.
61
58
  - Search tasks that involve gathering and analyzing information from multiple sources
62
59
  for possible relevant information.
60
+
61
+ Note:
62
+ All agents must be pre-configured with LLMs before being passed to the orchestrator.
63
+ This ensures consistent model behavior and configuration across all components.
63
64
  """
64
65
 
65
66
  def __init__(
66
67
  self,
67
- llm_factory: Callable[[Agent], AugmentedLLM[MessageParamT, MessageT]],
68
- planner: AugmentedLLM | None = None,
69
- available_agents: List[Agent | AugmentedLLM] | None = None,
68
+ name: str,
69
+ planner: AugmentedLLM, # Pre-configured planner
70
+ available_agents: List[Agent | AugmentedLLM],
70
71
  plan_type: Literal["full", "iterative"] = "full",
71
72
  context: Optional["Context"] = None,
72
73
  **kwargs,
73
74
  ):
74
75
  """
75
76
  Args:
76
- llm_factory: Factory function to create an LLM for a given agent
77
- planner: LLM to use for planning steps (if not provided, a default planner will be used)
78
- plan_type: "full" planning generates the full plan first, then executes. "iterative" plans the next step, and loops until success.
79
- available_agents: List of agents available to tasks executed by this orchestrator
77
+ name: Name of the orchestrator workflow
78
+ planner: Pre-configured planner LLM to use for planning steps
79
+ available_agents: List of pre-configured agents available to this orchestrator
80
+ plan_type: "full" planning generates the full plan first, then executes. "iterative" plans next step and loops.
80
81
  context: Application context
81
82
  """
82
83
  # Initialize with orchestrator-specific defaults
83
84
  orchestrator_params = RequestParams(
84
85
  use_history=False, # Orchestrator doesn't support history
85
86
  max_iterations=30, # Higher default for complex tasks
86
- maxTokens=8192, # Higher default for planning TODO this will break some models - make configurable.
87
+ maxTokens=8192, # Higher default for planning
87
88
  parallel_tool_calls=True,
88
89
  )
89
90
 
90
- # If kwargs contains request_params, merge with our defaults but force use_history False
91
+ # If kwargs contains request_params, merge our defaults while preserving the model config
91
92
  if "request_params" in kwargs:
92
93
  base_params = kwargs["request_params"]
93
- merged = base_params.model_copy()
94
- merged.use_history = False # Force this setting
94
+ # Create merged params starting with our defaults
95
+ merged = orchestrator_params.model_copy()
96
+ # Update with base params to get model config
97
+ if isinstance(base_params, dict):
98
+ merged = merged.model_copy(update=base_params)
99
+ else:
100
+ merged = merged.model_copy(update=base_params.model_dump())
101
+ # Force specific settings
102
+ merged.use_history = False
95
103
  kwargs["request_params"] = merged
96
104
  else:
97
105
  kwargs["request_params"] = orchestrator_params
98
106
 
99
107
  super().__init__(context=context, **kwargs)
100
108
 
101
- self.llm_factory = llm_factory
102
-
103
- # Create default planner with AgentConfig
104
- request_params = self.get_request_params(kwargs.get("request_params"))
105
- planner_config = AgentConfig(
106
- name="LLM Orchestrator",
107
- instruction="""
108
- You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
109
- or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
110
- which can be performed by LLMs with access to the servers or agents.
111
- """,
112
- servers=[], # Planner doesn't need direct server access
113
- default_request_params=request_params,
114
- model=request_params.model if request_params else None,
115
- )
116
-
117
- self.planner = planner or llm_factory(agent=Agent(config=planner_config))
118
-
119
- self.plan_type: Literal["full", "iterative"] = plan_type
109
+ self.planner = planner
110
+ self.plan_type = plan_type
120
111
  self.server_registry = self.context.server_registry
121
- self.agents = {agent.name: agent for agent in available_agents or []}
112
+ self.agents = {agent.name: agent for agent in available_agents}
122
113
 
123
114
  async def generate(
124
115
  self,
@@ -155,25 +146,15 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
155
146
  ) -> ModelT:
156
147
  """Request a structured LLM generation and return the result as a Pydantic model."""
157
148
  params = self.get_request_params(request_params)
158
-
159
149
  result_str = await self.generate_str(message=message, request_params=params)
160
150
 
161
- structured_config = AgentConfig(
162
- name="Structured Output",
163
- instruction="Produce a structured output given a message",
164
- servers=[], # No server access needed for structured output
165
- )
166
-
167
- llm = self.llm_factory(agent=Agent(config=structured_config))
168
-
169
- structured_result = await llm.generate_structured(
151
+ # Use AugmentedLLM's structured output handling
152
+ return await super().generate_structured(
170
153
  message=result_str,
171
154
  response_model=response_model,
172
155
  request_params=params,
173
156
  )
174
157
 
175
- return structured_result
176
-
177
158
  async def execute(
178
159
  self, objective: str, request_params: RequestParams | None = None
179
160
  ) -> PlanResult:
@@ -233,8 +214,6 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
233
214
 
234
215
  plan_result.add_step_result(step_result)
235
216
 
236
- plan_result.add_step_result(step_result)
237
-
238
217
  logger.debug(
239
218
  f"Iteration {iterations}: Intermediate plan result:", data=plan_result
240
219
  )
@@ -251,70 +230,36 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
251
230
  request_params: RequestParams | None = None,
252
231
  ) -> StepResult:
253
232
  """Execute a step's subtasks in parallel and synthesize results"""
254
- params = self.get_request_params(request_params)
255
233
 
256
234
  step_result = StepResult(step=step, task_results=[])
257
235
  context = format_plan_result(previous_result)
258
236
 
259
- # Prepare tasks and LLMs
260
- task_llms = []
261
- async with contextlib.AsyncExitStack() as stack:
262
- for task in step.tasks:
263
- agent = self.agents.get(task.agent)
264
- if not agent:
265
- raise ValueError(f"No agent found matching {task.agent}")
266
-
267
- if isinstance(agent, AugmentedLLM):
268
- llm = agent
269
- else:
270
- # Use existing LLM if agent has one
271
- if hasattr(agent, "_llm") and agent._llm:
272
- llm = agent._llm
273
- else:
274
- # Only create new context if needed
275
- ctx_agent = await stack.enter_async_context(agent)
276
- # Create factory with agent's own configuration
277
- agent_factory = ModelFactory.create_factory(
278
- model_string=agent.config.model,
279
- request_params=agent.config.default_request_params,
280
- )
281
- llm = await ctx_agent.attach_llm(agent_factory)
282
-
283
- task_llms.append((task, llm))
284
-
285
- # Execute all tasks within the same context
286
- futures = []
287
- for task, llm in task_llms:
288
- task_description = TASK_PROMPT_TEMPLATE.format(
289
- objective=previous_result.objective,
290
- task=task.description,
291
- context=context,
292
- )
293
- # Get the agent's config for task execution
294
- agent = self.agents.get(task.agent)
295
- task_params = (
296
- agent.config.default_request_params
297
- if hasattr(agent, "config")
298
- else params
299
- )
300
- futures.append(
301
- llm.generate_str(
302
- message=task_description, request_params=task_params
303
- )
304
- )
237
+ # Execute tasks
238
+ futures = []
239
+ for task in step.tasks:
240
+ agent = self.agents.get(task.agent)
241
+ if not agent:
242
+ raise ValueError(f"No agent found matching {task.agent}")
243
+
244
+ task_description = TASK_PROMPT_TEMPLATE.format(
245
+ objective=previous_result.objective,
246
+ task=task.description,
247
+ context=context,
248
+ )
305
249
 
306
- # Wait for all tasks, including any tool calls they make
307
- results = await self.executor.execute(*futures)
250
+ # All agents should now be LLM-capable
251
+ futures.append(agent._llm.generate_str(message=task_description))
308
252
 
309
- # Process results while contexts are still active
310
- for (task, _), result in zip(task_llms, results):
311
- step_result.add_task_result(
312
- TaskWithResult(**task.model_dump(), result=str(result))
313
- )
253
+ # Wait for all tasks
254
+ results = await self.executor.execute(*futures)
314
255
 
315
- # Format final result while contexts are still active
316
- step_result.result = format_step_result(step_result)
256
+ # Process results
257
+ for task, result in zip(step.tasks, results):
258
+ step_result.add_task_result(
259
+ TaskWithResult(**task.model_dump(), result=str(result))
260
+ )
317
261
 
262
+ step_result.result = format_step_result(step_result)
318
263
  return step_result
319
264
 
320
265
  async def _get_full_plan(
@@ -1,26 +0,0 @@
1
- """
2
- Example MCP Agent application showing simplified agent access.
3
- """
4
-
5
- import asyncio
6
- from mcp_agent.core.fastagent import FastAgent
7
-
8
- # Create the application
9
- agent_app = FastAgent("Interactive Agent Example")
10
-
11
-
12
- # Define the agent
13
- @agent_app.agent(
14
- instruction="A simple agent that helps with basic tasks. Request Human Input when needed.",
15
- servers=["mcp_root"],
16
- # model="gpt-4o", model override here takes precedence
17
- )
18
- async def main():
19
- # use the --model= command line switch to specify model
20
- async with agent_app.run() as agent:
21
- await agent("print the next number in the sequence")
22
- await agent.prompt(default="STOP")
23
-
24
-
25
- if __name__ == "__main__":
26
- asyncio.run(main())
@@ -1,22 +0,0 @@
1
- """
2
- Example MCP Agent application showing simplified agent access.
3
- """
4
-
5
- import asyncio
6
- from mcp_agent.core.fastagent import FastAgent
7
-
8
- # Create the application
9
- agent_app = FastAgent("Interactive Agent Example")
10
- # agent_app.app._human_input_callback = None
11
-
12
-
13
- # Define the agent
14
- @agent_app.agent()
15
- async def main():
16
- # use the --model= command line switch to specify model
17
- async with agent_app.run() as agent:
18
- await agent()
19
-
20
-
21
- if __name__ == "__main__":
22
- asyncio.run(main())