fast-agent-mcp 0.0.9__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (37) hide show
  1. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/METADATA +17 -11
  2. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/RECORD +36 -28
  3. mcp_agent/app.py +4 -4
  4. mcp_agent/cli/commands/bootstrap.py +2 -5
  5. mcp_agent/cli/commands/setup.py +1 -1
  6. mcp_agent/cli/main.py +4 -4
  7. mcp_agent/core/enhanced_prompt.py +315 -0
  8. mcp_agent/core/fastagent.py +520 -388
  9. mcp_agent/event_progress.py +5 -2
  10. mcp_agent/human_input/handler.py +6 -2
  11. mcp_agent/logging/rich_progress.py +10 -5
  12. mcp_agent/mcp/mcp_aggregator.py +2 -1
  13. mcp_agent/mcp/mcp_connection_manager.py +67 -37
  14. mcp_agent/resources/examples/internal/agent.py +17 -0
  15. mcp_agent/resources/examples/internal/job.py +83 -0
  16. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
  17. mcp_agent/resources/examples/researcher/fastagent.config.yaml +53 -0
  18. mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
  19. mcp_agent/resources/examples/researcher/researcher.py +38 -0
  20. mcp_agent/resources/examples/workflows/agent.py +17 -0
  21. mcp_agent/resources/examples/workflows/agent_build.py +61 -0
  22. mcp_agent/resources/examples/workflows/chaining.py +0 -1
  23. mcp_agent/resources/examples/workflows/evaluator.py +6 -3
  24. mcp_agent/resources/examples/workflows/fastagent.py +22 -0
  25. mcp_agent/resources/examples/workflows/orchestrator.py +1 -1
  26. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +91 -92
  27. mcp_agent/workflows/llm/augmented_llm.py +14 -3
  28. mcp_agent/workflows/llm/augmented_llm_anthropic.py +8 -5
  29. mcp_agent/workflows/llm/augmented_llm_openai.py +20 -9
  30. mcp_agent/workflows/llm/model_factory.py +25 -11
  31. mcp_agent/workflows/orchestrator/orchestrator.py +68 -7
  32. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +11 -6
  33. mcp_agent/workflows/router/router_llm.py +13 -2
  34. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -9
  35. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/WHEEL +0 -0
  36. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/entry_points.txt +0 -0
  37. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/licenses/LICENSE +0 -0
@@ -11,6 +11,7 @@ from typing import (
11
11
  )
12
12
 
13
13
  from mcp_agent.agents.agent import Agent
14
+ from mcp_agent.event_progress import ProgressAction
14
15
  from mcp_agent.workflows.llm.augmented_llm import (
15
16
  AugmentedLLM,
16
17
  MessageParamT,
@@ -80,6 +81,12 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
80
81
  plan_type: "full" planning generates the full plan first, then executes. "iterative" plans next step and loops.
81
82
  context: Application context
82
83
  """
84
+ # Initialize logger early so we can log
85
+ self.logger = logger
86
+
87
+ # Set a fixed verb - always use PLANNING for all orchestrator activities
88
+ self.verb = ProgressAction.PLANNING
89
+
83
90
  # Initialize with orchestrator-specific defaults
84
91
  orchestrator_params = RequestParams(
85
92
  use_history=False, # Orchestrator doesn't support history
@@ -104,13 +111,24 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
104
111
  else:
105
112
  kwargs["request_params"] = orchestrator_params
106
113
 
114
+ # Pass verb to AugmentedLLM
115
+ kwargs["verb"] = self.verb
116
+
107
117
  super().__init__(context=context, **kwargs)
108
118
 
109
119
  self.planner = planner
120
+
121
+ if hasattr(self.planner, "verb"):
122
+ self.planner.verb = self.verb
123
+
110
124
  self.plan_type = plan_type
111
125
  self.server_registry = self.context.server_registry
112
126
  self.agents = {agent.name: agent for agent in available_agents}
113
127
 
128
+ # Initialize logger
129
+ self.logger = logger
130
+ self.name = name
131
+
114
132
  async def generate(
115
133
  self,
116
134
  message: str | MessageParamT | List[MessageParamT],
@@ -130,7 +148,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
130
148
  ) -> str:
131
149
  """Request an LLM generation and return the string representation of the result"""
132
150
  params = self.get_request_params(request_params)
133
-
151
+ # TODO -- properly incorporate this in to message display etc.
134
152
  result = await self.generate(
135
153
  message=message,
136
154
  request_params=params,
@@ -163,6 +181,20 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
163
181
 
164
182
  params = self.get_request_params(request_params)
165
183
 
184
+ # Single progress event for orchestration start
185
+ model = await self.select_model(params) or "unknown-model"
186
+
187
+ # Log the progress with minimal required fields
188
+ self.logger.info(
189
+ "Planning task execution",
190
+ data={
191
+ "progress_action": self.verb,
192
+ "model": model,
193
+ "agent_name": self.name,
194
+ "target": self.name,
195
+ },
196
+ )
197
+
166
198
  plan_result = PlanResult(objective=objective, step_results=[])
167
199
 
168
200
  while iterations < params.max_iterations:
@@ -193,6 +225,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
193
225
  plan_result=format_plan_result(plan_result)
194
226
  )
195
227
 
228
+ # Use planner directly - planner already has PLANNING verb
196
229
  plan_result.result = await self.planner.generate_str(
197
230
  message=synthesis_prompt,
198
231
  request_params=params.model_copy(update={"max_iterations": 1}),
@@ -236,10 +269,22 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
236
269
 
237
270
  # Execute tasks
238
271
  futures = []
272
+ error_tasks = []
273
+
239
274
  for task in step.tasks:
240
275
  agent = self.agents.get(task.agent)
241
276
  if not agent:
242
- raise ValueError(f"No agent found matching {task.agent}")
277
+ # Instead of failing the entire step, track this as an error task
278
+ self.logger.error(
279
+ f"No agent found matching '{task.agent}'. Available agents: {list(self.agents.keys())}"
280
+ )
281
+ error_tasks.append(
282
+ (
283
+ task,
284
+ f"Error: Agent '{task.agent}' not found. Available agents: {', '.join(self.agents.keys())}",
285
+ )
286
+ )
287
+ continue
243
288
 
244
289
  task_description = TASK_PROMPT_TEMPLATE.format(
245
290
  objective=previous_result.objective,
@@ -250,13 +295,27 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
250
295
  # All agents should now be LLM-capable
251
296
  futures.append(agent._llm.generate_str(message=task_description))
252
297
 
253
- # Wait for all tasks
254
- results = await self.executor.execute(*futures)
298
+ # Wait for all tasks (only if we have valid futures)
299
+ results = await self.executor.execute(*futures) if futures else []
300
+
301
+ # Process successful results
302
+ task_index = 0
303
+ for task in step.tasks:
304
+ # Skip tasks that had agent errors (they're in error_tasks)
305
+ if any(et[0] == task for et in error_tasks):
306
+ continue
307
+
308
+ if task_index < len(results):
309
+ result = results[task_index]
310
+ step_result.add_task_result(
311
+ TaskWithResult(**task.model_dump(), result=str(result))
312
+ )
313
+ task_index += 1
255
314
 
256
- # Process results
257
- for task, result in zip(step.tasks, results):
315
+ # Add error task results
316
+ for task, error_message in error_tasks:
258
317
  step_result.add_task_result(
259
- TaskWithResult(**task.model_dump(), result=str(result))
318
+ TaskWithResult(**task.model_dump(), result=error_message)
260
319
  )
261
320
 
262
321
  step_result.result = format_step_result(step_result)
@@ -285,6 +344,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
285
344
  agents=agents,
286
345
  )
287
346
 
347
+ # Use planner directly - no verb manipulation needed
288
348
  plan = await self.planner.generate_structured(
289
349
  message=prompt,
290
350
  response_model=Plan,
@@ -316,6 +376,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
316
376
  agents=agents,
317
377
  )
318
378
 
379
+ # Use planner directly - no verb manipulation needed
319
380
  next_step = await self.planner.generate_structured(
320
381
  message=prompt,
321
382
  response_model=NextStep,
@@ -30,12 +30,15 @@ and Agents (which are collections of servers):
30
30
  Agents:
31
31
  {agents}
32
32
 
33
+ IMPORTANT: You can ONLY use the agents listed above. Do not invent or reference agents that are not in the list.
34
+ The plan will fail if you reference agents that are not available.
35
+
33
36
  Generate a plan with all remaining steps needed.
34
37
  Steps are sequential, but each Step can have parallel subtasks.
35
38
  For each Step, specify a description of the step and independent subtasks that can run in parallel.
36
39
  For each subtask specify:
37
40
  1. Clear description of the task that an LLM can execute
38
- 2. Name of 1 Agent OR List of MCP server names to use for the task
41
+ 2. Name of 1 Agent from the available agents list above
39
42
 
40
43
  Return your response in the following JSON structure:
41
44
  {{
@@ -45,11 +48,11 @@ Return your response in the following JSON structure:
45
48
  "tasks": [
46
49
  {{
47
50
  "description": "Description of task 1",
48
- "agent": "agent_name" # For AgentTask
51
+ "agent": "agent_name" # For AgentTask - MUST be one of the available agents
49
52
  }},
50
53
  {{
51
54
  "description": "Description of task 2",
52
- "agent": "agent_name2"
55
+ "agent": "agent_name2" # MUST be one of the available agents
53
56
  }}
54
57
  ]
55
58
  }}
@@ -79,19 +82,21 @@ and Agents (which are collections of servers):
79
82
  Agents:
80
83
  {agents}
81
84
 
85
+ IMPORTANT: You can ONLY use the agents listed above. Do not invent or reference agents that are not in the list.
86
+ The plan will fail if you reference agents that are not available.
87
+
82
88
  Generate the next step, by specifying a description of the step and independent subtasks that can run in parallel:
83
89
  For each subtask specify:
84
90
  1. Clear description of the task that an LLM can execute
85
- 2. Name of 1 Agent OR List of MCP server names to use for the task
91
+ 2. Name of 1 Agent from the available agents list above
86
92
 
87
93
  Return your response in the following JSON structure:
88
94
  {{
89
-
90
95
  "description": "Description of step 1",
91
96
  "tasks": [
92
97
  {{
93
98
  "description": "Description of task 1",
94
- "agent": "agent_name" # For AgentTask
99
+ "agent": "agent_name" # For AgentTask - MUST be one of the available agents
95
100
  }}
96
101
  ],
97
102
  "is_complete": false
@@ -6,6 +6,7 @@ from mcp_agent.agents.agent import Agent
6
6
  from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams
7
7
  from mcp_agent.workflows.router.router_base import ResultT, Router, RouterResult
8
8
  from mcp_agent.logging.logger import get_logger
9
+ from mcp_agent.event_progress import ProgressAction
9
10
 
10
11
  if TYPE_CHECKING:
11
12
  from mcp_agent.context import Context
@@ -100,6 +101,9 @@ class LLMRouter(Router):
100
101
  default_request_params: Optional[RequestParams] = None,
101
102
  **kwargs,
102
103
  ):
104
+ # Extract verb from kwargs to avoid passing it up the inheritance chain
105
+ self._llm_verb = kwargs.pop("verb", None)
106
+
103
107
  super().__init__(
104
108
  server_names=server_names,
105
109
  agents=agents,
@@ -161,10 +165,18 @@ class LLMRouter(Router):
161
165
  router_params = RequestParams(**params_dict)
162
166
  # Set up router-specific request params with routing instruction
163
167
  router_params.use_history = False
168
+ # Use the stored verb if available, otherwise default to ROUTING
169
+ verb_param = (
170
+ self._llm_verb
171
+ if hasattr(self, "_llm_verb") and self._llm_verb
172
+ else ProgressAction.ROUTING
173
+ )
174
+
164
175
  self.llm = self.llm_factory(
165
176
  agent=None, # Router doesn't need an agent context
166
- name="LLM Router",
177
+ name=self.name, # Use the name provided during initialization
167
178
  default_request_params=router_params,
179
+ verb=verb_param, # Use stored verb parameter or default to ROUTING
168
180
  )
169
181
  self.initialized = True
170
182
 
@@ -243,7 +255,6 @@ class LLMRouter(Router):
243
255
  context=context, request=request, top_k=top_k
244
256
  )
245
257
 
246
- # Get routes from LLM
247
258
  response = await self.llm.generate_structured(
248
259
  message=prompt,
249
260
  response_model=StructuredResponse,
@@ -1,9 +0,0 @@
1
- default_model: sonnet
2
- mcp:
3
- servers:
4
- fetch:
5
- command: "uvx"
6
- args: ["mcp-server-fetch"]
7
- filesystem:
8
- command: "npx"
9
- args: ["@modelcontextprotocol/server-filesystem","."]