fast-agent-mcp 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/METADATA +7 -1
  2. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/RECORD +28 -17
  3. mcp_agent/agents/agent.py +46 -0
  4. mcp_agent/core/agent_app.py +373 -9
  5. mcp_agent/core/decorators.py +455 -0
  6. mcp_agent/core/enhanced_prompt.py +71 -5
  7. mcp_agent/core/factory.py +501 -0
  8. mcp_agent/core/fastagent.py +143 -1059
  9. mcp_agent/core/proxies.py +71 -14
  10. mcp_agent/core/validation.py +221 -0
  11. mcp_agent/human_input/handler.py +5 -2
  12. mcp_agent/mcp/mcp_aggregator.py +537 -47
  13. mcp_agent/mcp/mcp_connection_manager.py +13 -2
  14. mcp_agent/mcp_server/__init__.py +4 -0
  15. mcp_agent/mcp_server/agent_server.py +121 -0
  16. mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
  17. mcp_agent/resources/examples/internal/prompt_category.py +21 -0
  18. mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
  19. mcp_agent/resources/examples/internal/sizer.py +24 -0
  20. mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
  21. mcp_agent/resources/examples/workflows/sse.py +23 -0
  22. mcp_agent/ui/console_display.py +278 -0
  23. mcp_agent/workflows/llm/augmented_llm.py +245 -179
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
  26. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/WHEEL +0 -0
  27. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/entry_points.txt +0 -0
  28. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,501 @@
1
+ """
2
+ Factory functions for creating agent and workflow instances.
3
+ """
4
+
5
+ from typing import Dict, Any, Optional, TypeVar, Callable
6
+
7
+ from mcp_agent.app import MCPApp
8
+ from mcp_agent.agents.agent import Agent, AgentConfig
9
+ from mcp_agent.event_progress import ProgressAction
10
+ from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
11
+ EvaluatorOptimizerLLM,
12
+ QualityRating,
13
+ )
14
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
15
+ from mcp_agent.workflows.llm.model_factory import ModelFactory
16
+ from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
17
+ from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
18
+ from mcp_agent.workflows.router.router_llm import LLMRouter
19
+
20
+ from mcp_agent.core.agent_types import AgentType
21
+ from mcp_agent.core.exceptions import AgentConfigError
22
+ from mcp_agent.core.proxies import (
23
+ BaseAgentProxy,
24
+ LLMAgentProxy,
25
+ WorkflowProxy,
26
+ RouterProxy,
27
+ ChainProxy,
28
+ )
29
+ from mcp_agent.core.types import AgentOrWorkflow, ProxyDict
30
+ from mcp_agent.core.agent_utils import log_agent_load, unwrap_proxy, get_agent_instances
31
+ from mcp_agent.core.validation import get_dependencies
32
+
33
+ T = TypeVar("T") # For the wrapper classes
34
+
35
+
36
+ def create_proxy(
37
+ app: MCPApp,
38
+ name: str,
39
+ instance: AgentOrWorkflow,
40
+ agent_type: str
41
+ ) -> BaseAgentProxy:
42
+ """Create appropriate proxy type based on agent type and validate instance type
43
+
44
+ Args:
45
+ app: The MCPApp instance
46
+ name: Name of the agent/workflow
47
+ instance: The agent or workflow instance
48
+ agent_type: Type from AgentType enum values
49
+
50
+ Returns:
51
+ Appropriate proxy type wrapping the instance
52
+
53
+ Raises:
54
+ TypeError: If instance type doesn't match expected type for agent_type
55
+ """
56
+ if agent_type not in [
57
+ AgentType.PARALLEL.value,
58
+ AgentType.EVALUATOR_OPTIMIZER.value,
59
+ AgentType.CHAIN.value,
60
+ ]:
61
+ log_agent_load(app, name)
62
+ if agent_type == AgentType.BASIC.value:
63
+ if not isinstance(instance, Agent):
64
+ raise TypeError(
65
+ f"Expected Agent instance for {name}, got {type(instance)}"
66
+ )
67
+ return LLMAgentProxy(app, name, instance)
68
+ elif agent_type == AgentType.ORCHESTRATOR.value:
69
+ if not isinstance(instance, Orchestrator):
70
+ raise TypeError(
71
+ f"Expected Orchestrator instance for {name}, got {type(instance)}"
72
+ )
73
+ return WorkflowProxy(app, name, instance)
74
+ elif agent_type == AgentType.PARALLEL.value:
75
+ if not isinstance(instance, ParallelLLM):
76
+ raise TypeError(
77
+ f"Expected ParallelLLM instance for {name}, got {type(instance)}"
78
+ )
79
+ return WorkflowProxy(app, name, instance)
80
+ elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
81
+ if not isinstance(instance, EvaluatorOptimizerLLM):
82
+ raise TypeError(
83
+ f"Expected EvaluatorOptimizerLLM instance for {name}, got {type(instance)}"
84
+ )
85
+ return WorkflowProxy(app, name, instance)
86
+ elif agent_type == AgentType.ROUTER.value:
87
+ if not isinstance(instance, LLMRouter):
88
+ raise TypeError(
89
+ f"Expected LLMRouter instance for {name}, got {type(instance)}"
90
+ )
91
+ return RouterProxy(app, name, instance)
92
+ elif agent_type == AgentType.CHAIN.value:
93
+ # Chain proxy is directly returned from _create_agents_by_type
94
+ # No need for type checking as it's already a ChainProxy
95
+ return instance
96
+ else:
97
+ raise ValueError(f"Unknown agent type: {agent_type}")
98
+
99
+
100
+ def get_model_factory(
101
+ context,
102
+ model: Optional[str] = None,
103
+ request_params: Optional[RequestParams] = None,
104
+ default_model: Optional[str] = None,
105
+ cli_model: Optional[str] = None,
106
+ ) -> Any:
107
+ """
108
+ Get model factory using specified or default model.
109
+ Model string is parsed by ModelFactory to determine provider and reasoning effort.
110
+
111
+ Args:
112
+ context: Application context
113
+ model: Optional model specification string (highest precedence)
114
+ request_params: Optional RequestParams to configure LLM behavior
115
+ default_model: Default model from configuration
116
+ cli_model: Model specified via command line
117
+
118
+ Returns:
119
+ ModelFactory instance for the specified or default model
120
+ """
121
+ # Config has lowest precedence
122
+ model_spec = default_model or context.config.default_model
123
+
124
+ # Command line override has next precedence
125
+ if cli_model:
126
+ model_spec = cli_model
127
+
128
+ # Model from decorator has highest precedence
129
+ if model:
130
+ model_spec = model
131
+
132
+ # Update or create request_params with the final model choice
133
+ if request_params:
134
+ request_params = request_params.model_copy(update={"model": model_spec})
135
+ else:
136
+ request_params = RequestParams(model=model_spec)
137
+
138
+ # Let model factory handle the model string parsing and setup
139
+ return ModelFactory.create_factory(model_spec, request_params=request_params)
140
+
141
+
142
+ async def create_agents_by_type(
143
+ app_instance: MCPApp,
144
+ agents_dict: Dict[str, Dict[str, Any]],
145
+ agent_type: AgentType,
146
+ active_agents: ProxyDict = None,
147
+ model_factory_func: Callable = None,
148
+ **kwargs,
149
+ ) -> ProxyDict:
150
+ """
151
+ Generic method to create agents of a specific type.
152
+
153
+ Args:
154
+ app_instance: The main application instance
155
+ agents_dict: Dictionary of agent configurations
156
+ agent_type: Type of agents to create
157
+ active_agents: Dictionary of already created agents/proxies (for dependencies)
158
+ model_factory_func: Function for creating model factories
159
+ **kwargs: Additional type-specific parameters
160
+
161
+ Returns:
162
+ Dictionary of initialized agents wrapped in appropriate proxies
163
+ """
164
+ if active_agents is None:
165
+ active_agents = {}
166
+
167
+ # Create a dictionary to store the initialized agents
168
+ result_agents = {}
169
+
170
+ # Get all agents of the specified type
171
+ for name, agent_data in agents_dict.items():
172
+ if agent_data["type"] == agent_type.value:
173
+ # Get common configuration
174
+ config = agent_data["config"]
175
+
176
+ # Type-specific initialization
177
+ if agent_type == AgentType.BASIC:
178
+ # Get the agent name for special handling
179
+ agent_name = agent_data["config"].name
180
+
181
+ # Check if this is an agent that should use the PassthroughLLM
182
+ if agent_name.endswith("_fan_in") or agent_name.startswith(
183
+ "passthrough"
184
+ ):
185
+ # Import here to avoid circular imports
186
+ from mcp_agent.workflows.llm.augmented_llm import PassthroughLLM
187
+
188
+ # Create basic agent with configuration
189
+ agent = Agent(config=config, context=app_instance.context)
190
+
191
+ # Set up a PassthroughLLM directly
192
+ async with agent:
193
+ agent._llm = PassthroughLLM(
194
+ name=f"{config.name}_llm",
195
+ context=app_instance.context,
196
+ agent=agent,
197
+ default_request_params=config.default_request_params,
198
+ )
199
+
200
+ # Store the agent
201
+ instance = agent
202
+ else:
203
+ # Standard basic agent with LLM
204
+ agent = Agent(config=config, context=app_instance.context)
205
+
206
+ # Set up LLM with proper configuration
207
+ async with agent:
208
+ llm_factory = model_factory_func(
209
+ model=config.model,
210
+ request_params=config.default_request_params,
211
+ )
212
+ agent._llm = await agent.attach_llm(llm_factory)
213
+
214
+ # Store the agent
215
+ instance = agent
216
+
217
+ elif agent_type == AgentType.ORCHESTRATOR:
218
+ # Get base params configured with model settings
219
+ base_params = (
220
+ config.default_request_params.model_copy()
221
+ if config.default_request_params
222
+ else RequestParams()
223
+ )
224
+ base_params.use_history = False # Force no history for orchestrator
225
+
226
+ # Get the child agents - need to unwrap proxies and validate LLM config
227
+ child_agents = []
228
+ for agent_name in agent_data["child_agents"]:
229
+ proxy = active_agents[agent_name]
230
+ instance = unwrap_proxy(proxy)
231
+ # Validate basic agents have LLM
232
+ if isinstance(instance, Agent):
233
+ if not hasattr(instance, "_llm") or not instance._llm:
234
+ raise AgentConfigError(
235
+ f"Agent '{agent_name}' used by orchestrator '{name}' missing LLM configuration",
236
+ "All agents must be fully configured with LLMs before being used in an orchestrator",
237
+ )
238
+ child_agents.append(instance)
239
+
240
+ # Create a properly configured planner agent
241
+ planner_config = AgentConfig(
242
+ name=f"{name}", # Use orchestrator name as prefix
243
+ instruction=config.instruction
244
+ or """
245
+ You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
246
+ or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
247
+ which can be performed by LLMs with access to the servers or agents.
248
+ """,
249
+ servers=[], # Planner doesn't need server access
250
+ model=config.model,
251
+ default_request_params=base_params,
252
+ )
253
+ planner_agent = Agent(
254
+ config=planner_config, context=app_instance.context
255
+ )
256
+ planner_factory = model_factory_func(
257
+ model=config.model,
258
+ request_params=config.default_request_params,
259
+ )
260
+
261
+ async with planner_agent:
262
+ planner = await planner_agent.attach_llm(planner_factory)
263
+
264
+ # Create the orchestrator with pre-configured planner
265
+ instance = Orchestrator(
266
+ name=config.name,
267
+ planner=planner, # Pass pre-configured planner
268
+ available_agents=child_agents,
269
+ context=app_instance.context,
270
+ request_params=planner.default_request_params, # Base params already include model settings
271
+ plan_type=agent_data.get(
272
+ "plan_type", "full"
273
+ ), # Get plan_type from agent_data
274
+ verb=ProgressAction.PLANNING,
275
+ )
276
+
277
+ elif agent_type == AgentType.EVALUATOR_OPTIMIZER:
278
+ # Get the referenced agents - unwrap from proxies
279
+ generator = unwrap_proxy(
280
+ active_agents[agent_data["generator"]]
281
+ )
282
+ evaluator = unwrap_proxy(
283
+ active_agents[agent_data["evaluator"]]
284
+ )
285
+
286
+ if not generator or not evaluator:
287
+ raise ValueError(
288
+ f"Missing agents for workflow {name}: "
289
+ f"generator={agent_data['generator']}, "
290
+ f"evaluator={agent_data['evaluator']}"
291
+ )
292
+
293
+ # Get model from generator if it's an Agent, or from config otherwise
294
+ optimizer_model = None
295
+ if isinstance(generator, Agent):
296
+ optimizer_model = generator.config.model
297
+ elif hasattr(generator, '_sequence') and hasattr(generator, '_agent_proxies'):
298
+ # For ChainProxy, use the config model directly
299
+ optimizer_model = config.model
300
+
301
+ instance = EvaluatorOptimizerLLM(
302
+ name=config.name, # Pass name from config
303
+ generator=generator,
304
+ evaluator=evaluator,
305
+ min_rating=QualityRating[agent_data["min_rating"]],
306
+ max_refinements=agent_data["max_refinements"],
307
+ llm_factory=model_factory_func(model=optimizer_model),
308
+ context=app_instance.context,
309
+ instruction=config.instruction, # Pass any custom instruction
310
+ )
311
+
312
+ elif agent_type == AgentType.ROUTER:
313
+ # Get the router's agents - unwrap proxies
314
+ router_agents = get_agent_instances(
315
+ agent_data["agents"], active_agents
316
+ )
317
+
318
+ # Create the router with proper configuration
319
+ llm_factory = model_factory_func(
320
+ model=config.model,
321
+ request_params=config.default_request_params,
322
+ )
323
+
324
+ instance = LLMRouter(
325
+ name=config.name,
326
+ llm_factory=llm_factory,
327
+ agents=router_agents,
328
+ server_names=config.servers,
329
+ context=app_instance.context,
330
+ default_request_params=config.default_request_params,
331
+ verb=ProgressAction.ROUTING, # Set verb for progress display
332
+ )
333
+
334
+ elif agent_type == AgentType.CHAIN:
335
+ # Get the sequence from either parameter
336
+ sequence = agent_data.get("sequence", agent_data.get("agents", []))
337
+
338
+ # Auto-generate instruction if not provided or if it's just the default
339
+ default_instruction = f"Chain of agents: {', '.join(sequence)}"
340
+
341
+ # If user provided a custom instruction, use that
342
+ # Otherwise, generate a description based on the sequence and their servers
343
+ if config.instruction == default_instruction:
344
+ # Get all agent names in the sequence
345
+ agent_names = []
346
+ all_servers = set()
347
+
348
+ # Collect information about the agents and their servers
349
+ for agent_name in sequence:
350
+ if agent_name in active_agents:
351
+ agent_proxy = active_agents[agent_name]
352
+ if hasattr(agent_proxy, "_agent"):
353
+ # For LLMAgentProxy
354
+ agent_instance = agent_proxy._agent
355
+ agent_names.append(agent_name)
356
+ if hasattr(agent_instance, "server_names"):
357
+ all_servers.update(agent_instance.server_names)
358
+ elif hasattr(agent_proxy, "_workflow"):
359
+ # For WorkflowProxy
360
+ agent_names.append(agent_name)
361
+
362
+ # Generate a better description
363
+ if agent_names:
364
+ server_part = (
365
+ f" with access to servers: {', '.join(sorted(all_servers))}"
366
+ if all_servers
367
+ else ""
368
+ )
369
+ config.instruction = f"Sequence of agents: {', '.join(agent_names)}{server_part}."
370
+
371
+ # Create a ChainProxy without needing a new instance
372
+ # Just pass the agent proxies and sequence
373
+ instance = ChainProxy(app_instance, name, sequence, active_agents)
374
+ # Set continue_with_final behavior from configuration
375
+ instance._continue_with_final = agent_data.get(
376
+ "continue_with_final", True
377
+ )
378
+ # Set cumulative behavior from configuration
379
+ instance._cumulative = agent_data.get(
380
+ "cumulative", False
381
+ )
382
+
383
+ elif agent_type == AgentType.PARALLEL:
384
+ # Get fan-out agents (could be basic agents or other parallels)
385
+ fan_out_agents = get_agent_instances(
386
+ agent_data["fan_out"], active_agents
387
+ )
388
+
389
+ # Get fan-in agent - unwrap proxy
390
+ fan_in_agent = unwrap_proxy(
391
+ active_agents[agent_data["fan_in"]]
392
+ )
393
+
394
+ # Create the parallel workflow
395
+ llm_factory = model_factory_func(config.model)
396
+ instance = ParallelLLM(
397
+ name=config.name,
398
+ instruction=config.instruction,
399
+ fan_out_agents=fan_out_agents,
400
+ fan_in_agent=fan_in_agent,
401
+ context=app_instance.context,
402
+ llm_factory=llm_factory,
403
+ default_request_params=config.default_request_params,
404
+ include_request=agent_data.get("include_request", True),
405
+ )
406
+
407
+ else:
408
+ raise ValueError(f"Unsupported agent type: {agent_type}")
409
+
410
+ # Create the appropriate proxy and store in results
411
+ result_agents[name] = create_proxy(
412
+ app_instance, name, instance, agent_type.value
413
+ )
414
+
415
+ return result_agents
416
+
417
+
418
+ async def create_basic_agents(
419
+ app_instance: MCPApp,
420
+ agents_dict: Dict[str, Dict[str, Any]],
421
+ model_factory_func: Callable,
422
+ ) -> ProxyDict:
423
+ """
424
+ Create and initialize basic agents with their configurations.
425
+
426
+ Args:
427
+ app_instance: The main application instance
428
+ agents_dict: Dictionary of agent configurations
429
+ model_factory_func: Function for creating model factories
430
+
431
+ Returns:
432
+ Dictionary of initialized basic agents wrapped in appropriate proxies
433
+ """
434
+ return await create_agents_by_type(
435
+ app_instance,
436
+ agents_dict,
437
+ AgentType.BASIC,
438
+ model_factory_func=model_factory_func
439
+ )
440
+
441
+
442
+ async def create_agents_in_dependency_order(
443
+ app_instance: MCPApp,
444
+ agents_dict: Dict[str, Dict[str, Any]],
445
+ active_agents: ProxyDict,
446
+ agent_type: AgentType,
447
+ model_factory_func: Callable,
448
+ ) -> ProxyDict:
449
+ """
450
+ Create agents in dependency order to avoid circular references.
451
+ Works for both Parallel and Chain workflows.
452
+
453
+ Args:
454
+ app_instance: The main application instance
455
+ agents_dict: Dictionary of agent configurations
456
+ active_agents: Dictionary of already created agents/proxies
457
+ agent_type: Type of agents to create (AgentType.PARALLEL or AgentType.CHAIN)
458
+ model_factory_func: Function for creating model factories
459
+
460
+ Returns:
461
+ Dictionary of initialized agents
462
+ """
463
+ result_agents = {}
464
+ visited = set()
465
+
466
+ # Get all agents of the specified type
467
+ agent_names = [
468
+ name
469
+ for name, agent_data in agents_dict.items()
470
+ if agent_data["type"] == agent_type.value
471
+ ]
472
+
473
+ # Create agents in dependency order
474
+ for name in agent_names:
475
+ # Get ordered dependencies if not already processed
476
+ if name not in visited:
477
+ try:
478
+ ordered_agents = get_dependencies(
479
+ name, agents_dict, visited, set(), agent_type
480
+ )
481
+ except ValueError as e:
482
+ raise ValueError(
483
+ f"Error creating {agent_type.name.lower()} agent {name}: {str(e)}"
484
+ )
485
+
486
+ # Create each agent in order
487
+ for agent_name in ordered_agents:
488
+ if agent_name not in result_agents:
489
+ # Create one agent at a time using the generic method
490
+ agent_result = await create_agents_by_type(
491
+ app_instance,
492
+ agents_dict,
493
+ agent_type,
494
+ active_agents,
495
+ model_factory_func=model_factory_func,
496
+ agent_name=agent_name,
497
+ )
498
+ if agent_name in agent_result:
499
+ result_agents[agent_name] = agent_result[agent_name]
500
+
501
+ return result_agents