fast-agent-mcp 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (100) hide show
  1. fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
  2. fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
  3. fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
  4. fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
  5. fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
  6. mcp_agent/__init__.py +0 -0
  7. mcp_agent/agents/__init__.py +0 -0
  8. mcp_agent/agents/agent.py +277 -0
  9. mcp_agent/app.py +303 -0
  10. mcp_agent/cli/__init__.py +0 -0
  11. mcp_agent/cli/__main__.py +4 -0
  12. mcp_agent/cli/commands/bootstrap.py +221 -0
  13. mcp_agent/cli/commands/config.py +11 -0
  14. mcp_agent/cli/commands/setup.py +229 -0
  15. mcp_agent/cli/main.py +68 -0
  16. mcp_agent/cli/terminal.py +24 -0
  17. mcp_agent/config.py +334 -0
  18. mcp_agent/console.py +28 -0
  19. mcp_agent/context.py +251 -0
  20. mcp_agent/context_dependent.py +48 -0
  21. mcp_agent/core/fastagent.py +1013 -0
  22. mcp_agent/eval/__init__.py +0 -0
  23. mcp_agent/event_progress.py +88 -0
  24. mcp_agent/executor/__init__.py +0 -0
  25. mcp_agent/executor/decorator_registry.py +120 -0
  26. mcp_agent/executor/executor.py +293 -0
  27. mcp_agent/executor/task_registry.py +34 -0
  28. mcp_agent/executor/temporal.py +405 -0
  29. mcp_agent/executor/workflow.py +197 -0
  30. mcp_agent/executor/workflow_signal.py +325 -0
  31. mcp_agent/human_input/__init__.py +0 -0
  32. mcp_agent/human_input/handler.py +49 -0
  33. mcp_agent/human_input/types.py +58 -0
  34. mcp_agent/logging/__init__.py +0 -0
  35. mcp_agent/logging/events.py +123 -0
  36. mcp_agent/logging/json_serializer.py +163 -0
  37. mcp_agent/logging/listeners.py +216 -0
  38. mcp_agent/logging/logger.py +365 -0
  39. mcp_agent/logging/rich_progress.py +120 -0
  40. mcp_agent/logging/tracing.py +140 -0
  41. mcp_agent/logging/transport.py +461 -0
  42. mcp_agent/mcp/__init__.py +0 -0
  43. mcp_agent/mcp/gen_client.py +85 -0
  44. mcp_agent/mcp/mcp_activity.py +18 -0
  45. mcp_agent/mcp/mcp_agent_client_session.py +242 -0
  46. mcp_agent/mcp/mcp_agent_server.py +56 -0
  47. mcp_agent/mcp/mcp_aggregator.py +394 -0
  48. mcp_agent/mcp/mcp_connection_manager.py +330 -0
  49. mcp_agent/mcp/stdio.py +104 -0
  50. mcp_agent/mcp_server_registry.py +275 -0
  51. mcp_agent/progress_display.py +10 -0
  52. mcp_agent/resources/examples/decorator/main.py +26 -0
  53. mcp_agent/resources/examples/decorator/optimizer.py +78 -0
  54. mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
  55. mcp_agent/resources/examples/decorator/parallel.py +81 -0
  56. mcp_agent/resources/examples/decorator/router.py +56 -0
  57. mcp_agent/resources/examples/decorator/tiny.py +22 -0
  58. mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
  59. mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
  60. mcp_agent/telemetry/__init__.py +0 -0
  61. mcp_agent/telemetry/usage_tracking.py +18 -0
  62. mcp_agent/workflows/__init__.py +0 -0
  63. mcp_agent/workflows/embedding/__init__.py +0 -0
  64. mcp_agent/workflows/embedding/embedding_base.py +61 -0
  65. mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
  66. mcp_agent/workflows/embedding/embedding_openai.py +46 -0
  67. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  68. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
  69. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  70. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
  71. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
  72. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
  73. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
  74. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
  75. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
  76. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
  77. mcp_agent/workflows/llm/__init__.py +0 -0
  78. mcp_agent/workflows/llm/augmented_llm.py +645 -0
  79. mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
  80. mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
  81. mcp_agent/workflows/llm/llm_selector.py +345 -0
  82. mcp_agent/workflows/llm/model_factory.py +175 -0
  83. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  84. mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
  85. mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
  86. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
  87. mcp_agent/workflows/parallel/__init__.py +0 -0
  88. mcp_agent/workflows/parallel/fan_in.py +350 -0
  89. mcp_agent/workflows/parallel/fan_out.py +187 -0
  90. mcp_agent/workflows/parallel/parallel_llm.py +141 -0
  91. mcp_agent/workflows/router/__init__.py +0 -0
  92. mcp_agent/workflows/router/router_base.py +276 -0
  93. mcp_agent/workflows/router/router_embedding.py +240 -0
  94. mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
  95. mcp_agent/workflows/router/router_embedding_openai.py +59 -0
  96. mcp_agent/workflows/router/router_llm.py +301 -0
  97. mcp_agent/workflows/swarm/__init__.py +0 -0
  98. mcp_agent/workflows/swarm/swarm.py +320 -0
  99. mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
  100. mcp_agent/workflows/swarm/swarm_openai.py +41 -0
@@ -0,0 +1,1013 @@
1
+ """
2
+ Decorator-based interface for MCP Agent applications.
3
+ Provides a simplified way to create and manage agents using decorators.
4
+ """
5
+
6
+ from typing import List, Optional, Dict, Callable, TypeVar, Any, Union, TypeAlias
7
+ from enum import Enum
8
+ import yaml
9
+ import argparse
10
+ from contextlib import asynccontextmanager
11
+
12
+ from mcp_agent.app import MCPApp
13
+ from mcp_agent.agents.agent import Agent, AgentConfig
14
+ from mcp_agent.context_dependent import ContextDependent
15
+ from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
16
+ from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
17
+ from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
18
+ EvaluatorOptimizerLLM,
19
+ QualityRating,
20
+ )
21
+ from mcp_agent.workflows.router.router_llm import LLMRouter
22
+ from mcp_agent.config import Settings
23
+ from rich.prompt import Prompt
24
+ from rich import print
25
+ from mcp_agent.progress_display import progress_display
26
+ from mcp_agent.workflows.llm.model_factory import ModelFactory
27
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
28
+
29
+ import readline # noqa: F401
30
+
31
+
32
+ # Type aliases for better readability
33
+ WorkflowType: TypeAlias = Union[
34
+ Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter
35
+ ]
36
+ AgentOrWorkflow: TypeAlias = Union[Agent, WorkflowType]
37
+ ProxyDict: TypeAlias = Dict[str, "BaseAgentProxy"]
38
+
39
+
40
+ class AgentType(Enum):
41
+ """Enumeration of supported agent types."""
42
+
43
+ BASIC = "agent"
44
+ ORCHESTRATOR = "orchestrator"
45
+ PARALLEL = "parallel"
46
+ EVALUATOR_OPTIMIZER = "evaluator_optimizer"
47
+ ROUTER = "router"
48
+
49
+
50
+ T = TypeVar("T") # For the wrapper classes
51
+
52
+
53
+ class BaseAgentProxy:
54
+ """Base class for all proxy types"""
55
+
56
+ def __init__(self, app: MCPApp, name: str):
57
+ self._app = app
58
+ self._name = name
59
+
60
+ async def __call__(self, message: Optional[str] = None) -> str:
61
+ """Allow: agent.researcher('message')"""
62
+ return await self.send(message)
63
+
64
+ async def send(self, message: Optional[str] = None) -> str:
65
+ """Allow: agent.researcher.send('message')"""
66
+ if message is None:
67
+ return await self.prompt()
68
+ return await self.generate_str(message)
69
+
70
+ async def prompt(self, default_prompt: str = "") -> str:
71
+ """Allow: agent.researcher.prompt()"""
72
+ return await self._app.prompt(self._name, default_prompt)
73
+
74
+ async def generate_str(self, message: str) -> str:
75
+ """Generate response for a message - must be implemented by subclasses"""
76
+ raise NotImplementedError("Subclasses must implement generate_str")
77
+
78
+
79
+ class AgentProxy(BaseAgentProxy):
80
+ """Legacy proxy for individual agent operations"""
81
+
82
+ async def generate_str(self, message: str) -> str:
83
+ return await self._app.send(self._name, message)
84
+
85
+
86
+ class LLMAgentProxy(BaseAgentProxy):
87
+ """Proxy for regular agents that use _llm.generate_str()"""
88
+
89
+ def __init__(self, app: MCPApp, name: str, agent: Agent):
90
+ super().__init__(app, name)
91
+ self._agent = agent
92
+
93
+ async def generate_str(self, message: str) -> str:
94
+ return await self._agent._llm.generate_str(message)
95
+
96
+
97
+ class WorkflowProxy(BaseAgentProxy):
98
+ """Proxy for workflow types that implement generate_str() directly"""
99
+
100
+ def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
101
+ super().__init__(app, name)
102
+ self._workflow = workflow
103
+
104
+ async def generate_str(self, message: str) -> str:
105
+ return await self._workflow.generate_str(message)
106
+
107
+
108
+ class RouterProxy(BaseAgentProxy):
109
+ """Proxy for LLM Routers"""
110
+
111
+ def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
112
+ super().__init__(app, name)
113
+ self._workflow = workflow
114
+
115
+ async def generate_str(self, message: str) -> str:
116
+ results = await self._workflow.route(message)
117
+ if not results:
118
+ return "No appropriate route found for the request."
119
+
120
+ # Get the top result
121
+ top_result = results[0]
122
+ if isinstance(top_result.result, Agent):
123
+ # Agent route - delegate to the agent
124
+ return await top_result.result._llm.generate_str(message)
125
+ elif isinstance(top_result.result, str):
126
+ # Server route - use the router directly
127
+ return "Tool call requested by router - not yet supported"
128
+
129
+ return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
130
+
131
+
132
+ class AgentApp:
133
+ """Main application wrapper"""
134
+
135
+ def __init__(self, app: MCPApp, agents: ProxyDict):
136
+ self._app = app
137
+ self._agents = agents
138
+ # Optional: set default agent for direct calls
139
+ self._default = next(iter(agents)) if agents else None
140
+
141
+ async def send(self, agent_name: str, message: Optional[str]) -> str:
142
+ """Core message sending"""
143
+ if agent_name not in self._agents:
144
+ raise ValueError(f"No agent named '{agent_name}'")
145
+
146
+ if not message or "" == message:
147
+ return await self.prompt(agent_name)
148
+
149
+ proxy = self._agents[agent_name]
150
+ return await proxy.generate_str(message)
151
+
152
+ async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
153
+ """
154
+ Interactive prompt for sending messages.
155
+
156
+ Args:
157
+ agent_name: Optional target agent name (uses default if not specified)
158
+ default_prompt: Default message to use when user presses enter
159
+ """
160
+
161
+ agent = agent_name or self._default
162
+
163
+ if agent not in self._agents:
164
+ raise ValueError(f"No agent named '{agent}'")
165
+ result = ""
166
+ while True:
167
+ with progress_display.paused():
168
+ if default == "STOP":
169
+ print("Press <ENTER> to finish.")
170
+ elif default != "":
171
+ print("Enter a prompt, or [red]STOP[/red] to finish.")
172
+ print(
173
+ f"Press <ENTER> to use the default prompt:\n[cyan]{default}[/cyan]"
174
+ )
175
+ else:
176
+ print("Enter a prompt, or [red]STOP[/red] to finish")
177
+
178
+ prompt_text = f"[blue]{agent}[/blue] >"
179
+ user_input = Prompt.ask(
180
+ prompt=prompt_text, default=default, show_default=False
181
+ )
182
+ if user_input.upper() == "STOP":
183
+ return
184
+ if user_input == "":
185
+ continue
186
+
187
+ result = await self.send(agent, user_input)
188
+
189
+ return result
190
+
191
+ def __getattr__(self, name: str) -> AgentProxy:
192
+ """Support: agent.researcher"""
193
+ if name not in self._agents:
194
+ raise AttributeError(f"No agent named '{name}'")
195
+ return AgentProxy(self, name)
196
+
197
+ def __getitem__(self, name: str) -> AgentProxy:
198
+ """Support: agent['researcher']"""
199
+ if name not in self._agents:
200
+ raise KeyError(f"No agent named '{name}'")
201
+ return AgentProxy(self, name)
202
+
203
+ async def __call__(
204
+ self, message: Optional[str] = "", agent_name: Optional[str] = None
205
+ ) -> str:
206
+ """Support: agent('message')"""
207
+ target = agent_name or self._default
208
+ if not target:
209
+ raise ValueError("No default agent available")
210
+ return await self.send(target, message)
211
+
212
+
213
+ class FastAgent(ContextDependent):
214
+ """
215
+ A decorator-based interface for MCP Agent applications.
216
+ Provides a simplified way to create and manage agents using decorators.
217
+ """
218
+
219
+ def _create_proxy(
220
+ self, name: str, instance: AgentOrWorkflow, agent_type: str
221
+ ) -> BaseAgentProxy:
222
+ """Create appropriate proxy type based on agent type and validate instance type
223
+
224
+ Args:
225
+ name: Name of the agent/workflow
226
+ instance: The agent or workflow instance
227
+ agent_type: Type from AgentType enum values
228
+
229
+ Returns:
230
+ Appropriate proxy type wrapping the instance
231
+
232
+ Raises:
233
+ TypeError: If instance type doesn't match expected type for agent_type
234
+ """
235
+ if agent_type == AgentType.BASIC.value:
236
+ if not isinstance(instance, Agent):
237
+ raise TypeError(
238
+ f"Expected Agent instance for {name}, got {type(instance)}"
239
+ )
240
+ return LLMAgentProxy(self.app, name, instance)
241
+ elif agent_type == AgentType.ORCHESTRATOR.value:
242
+ if not isinstance(instance, Orchestrator):
243
+ raise TypeError(
244
+ f"Expected Orchestrator instance for {name}, got {type(instance)}"
245
+ )
246
+ return WorkflowProxy(self.app, name, instance)
247
+ elif agent_type == AgentType.PARALLEL.value:
248
+ if not isinstance(instance, ParallelLLM):
249
+ raise TypeError(
250
+ f"Expected ParallelLLM instance for {name}, got {type(instance)}"
251
+ )
252
+ return WorkflowProxy(self.app, name, instance)
253
+ elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
254
+ if not isinstance(instance, EvaluatorOptimizerLLM):
255
+ raise TypeError(
256
+ f"Expected EvaluatorOptimizerLLM instance for {name}, got {type(instance)}"
257
+ )
258
+ return WorkflowProxy(self.app, name, instance)
259
+ elif agent_type == AgentType.ROUTER.value:
260
+ if not isinstance(instance, LLMRouter):
261
+ raise TypeError(
262
+ f"Expected LLMRouter instance for {name}, got {type(instance)}"
263
+ )
264
+ return RouterProxy(self.app, name, instance)
265
+ else:
266
+ raise ValueError(f"Unknown agent type: {agent_type}")
267
+
268
+ def __init__(self, name: str, config_path: Optional[str] = None):
269
+ """
270
+ Initialize the decorator interface.
271
+
272
+ Args:
273
+ name: Name of the application
274
+ config_path: Optional path to config file
275
+ """
276
+ # Initialize ContextDependent
277
+ super().__init__()
278
+
279
+ # Setup command line argument parsing
280
+ parser = argparse.ArgumentParser(description="MCP Agent Application")
281
+ parser.add_argument(
282
+ "--model",
283
+ help="Override the default model for all agents. Precedence is default < config_file < command line < constructor",
284
+ )
285
+ self.args = parser.parse_args()
286
+
287
+ self.name = name
288
+ self.config_path = config_path
289
+ self._load_config()
290
+ self.app = MCPApp(
291
+ name=name,
292
+ settings=Settings(**self.config) if hasattr(self, "config") else None,
293
+ )
294
+ self.agents: Dict[str, Dict[str, Any]] = {}
295
+
296
+ @property
297
+ def context(self):
298
+ """Access the application context"""
299
+ return self.app.context
300
+
301
+ def _load_config(self) -> None:
302
+ """Load configuration from YAML file, properly handling without dotenv processing"""
303
+ if self.config_path:
304
+ with open(self.config_path) as f:
305
+ self.config = yaml.safe_load(f)
306
+
307
+ def _get_model_factory(
308
+ self,
309
+ model: Optional[str] = None,
310
+ request_params: Optional[RequestParams] = None,
311
+ ) -> Any:
312
+ """
313
+ Get model factory using specified or default model.
314
+ Model string is parsed by ModelFactory to determine provider and reasoning effort.
315
+
316
+ Args:
317
+ model: Optional model specification string
318
+ request_params: Optional RequestParams to configure LLM behavior
319
+
320
+ Returns:
321
+ ModelFactory instance for the specified or default model
322
+ """
323
+
324
+ # Config has lowest precedence
325
+ model_spec = self.context.config.default_model
326
+
327
+ # Command line override has next precedence
328
+ if self.args.model:
329
+ model_spec = self.args.model
330
+
331
+ # Model from decorator has highest precedence
332
+ if model:
333
+ model_spec = model
334
+
335
+ # Update or create request_params with the final model choice
336
+ if request_params:
337
+ request_params = request_params.model_copy(update={"model": model_spec})
338
+ else:
339
+ request_params = RequestParams(model=model_spec)
340
+
341
+ # Let model factory handle the model string parsing and setup
342
+ return ModelFactory.create_factory(model_spec, request_params=request_params)
343
+
344
+ def agent(
345
+ self,
346
+ name: str = "Agent",
347
+ *,
348
+ instruction: str = "You are a helpful agent.",
349
+ servers: List[str] = [],
350
+ model: Optional[str] = None,
351
+ use_history: bool = True,
352
+ request_params: Optional[Dict] = None,
353
+ ) -> Callable:
354
+ """
355
+ Decorator to create and register an agent with configuration.
356
+
357
+ Args:
358
+ name: Name of the agent
359
+ instruction: Base instruction for the agent
360
+ servers: List of server names the agent should connect to
361
+ model: Model specification string (highest precedence)
362
+ use_history: Whether to maintain conversation history
363
+ request_params: Additional request parameters for the LLM
364
+ """
365
+ # print(f"\nDecorating agent {name} with model={model}")
366
+
367
+ def decorator(func: Callable) -> Callable:
368
+ base_params = RequestParams(
369
+ use_history=use_history,
370
+ model=model, # Include model in initial params
371
+ maxTokens=4096, # Default to larger context for agents TODO configurations
372
+ **(request_params or {}),
373
+ )
374
+
375
+ # Create agent configuration
376
+ config = AgentConfig(
377
+ name=name,
378
+ instruction=instruction,
379
+ servers=servers,
380
+ model=model, # Highest precedence
381
+ use_history=use_history,
382
+ default_request_params=base_params,
383
+ )
384
+
385
+ # Store the agent configuration
386
+ self.agents[name] = {
387
+ "config": config,
388
+ "type": AgentType.BASIC.value,
389
+ "func": func,
390
+ }
391
+
392
+ async def wrapper(*args, **kwargs):
393
+ return await func(*args, **kwargs)
394
+
395
+ return wrapper
396
+
397
+ return decorator
398
+
399
+ def orchestrator(
400
+ self,
401
+ name: str,
402
+ instruction: str,
403
+ agents: List[str],
404
+ model: str | None = None,
405
+ use_history: bool = True,
406
+ request_params: Optional[Dict] = None,
407
+ ) -> Callable:
408
+ """
409
+ Decorator to create and register an orchestrator.
410
+
411
+ Args:
412
+ name: Name of the orchestrator
413
+ instruction: Base instruction for the orchestrator
414
+ agents: List of agent names this orchestrator can use
415
+ model: Model specification string (highest precedence)
416
+ use_history: Whether to maintain conversation history
417
+ request_params: Additional request parameters for the LLM
418
+ """
419
+
420
+ def decorator(func: Callable) -> Callable:
421
+ # Create base request params
422
+ base_params = RequestParams(
423
+ use_history=use_history, **(request_params or {})
424
+ )
425
+
426
+ # Create agent configuration
427
+ config = AgentConfig(
428
+ name=name,
429
+ instruction=instruction,
430
+ servers=[], # Orchestrators don't need servers
431
+ model=model, # Highest precedence
432
+ use_history=use_history,
433
+ default_request_params=base_params,
434
+ )
435
+
436
+ # Store the orchestrator configuration
437
+ self.agents[name] = {
438
+ "config": config,
439
+ "child_agents": agents,
440
+ "type": AgentType.ORCHESTRATOR.value,
441
+ "func": func,
442
+ }
443
+
444
+ async def wrapper(*args, **kwargs):
445
+ return await func(*args, **kwargs)
446
+
447
+ return wrapper
448
+
449
+ return decorator
450
+
451
+ def parallel(
452
+ self,
453
+ name: str,
454
+ fan_in: str,
455
+ fan_out: List[str],
456
+ instruction: str = "",
457
+ model: str | None = None,
458
+ use_history: bool = True,
459
+ request_params: Optional[Dict] = None,
460
+ ) -> Callable:
461
+ """
462
+ Decorator to create and register a parallel executing agent.
463
+
464
+ Args:
465
+ name: Name of the parallel executing agent
466
+ fan_in: Name of collecting agent
467
+ fan_out: List of parallel execution agents
468
+ instruction: Optional instruction for the parallel agent
469
+ model: Model specification string
470
+ use_history: Whether to maintain conversation history
471
+ request_params: Additional request parameters for the LLM
472
+ """
473
+
474
+ def decorator(func: Callable) -> Callable:
475
+ # Create request params with history setting
476
+ params = RequestParams(**(request_params or {}))
477
+ params.use_history = use_history
478
+
479
+ # Create agent configuration
480
+ config = AgentConfig(
481
+ name=name,
482
+ instruction=instruction,
483
+ servers=[], # Parallel agents don't need servers
484
+ model=model,
485
+ use_history=use_history,
486
+ default_request_params=params,
487
+ )
488
+
489
+ # Store the parallel configuration
490
+ self.agents[name] = {
491
+ "config": config,
492
+ "fan_out": fan_out,
493
+ "fan_in": fan_in,
494
+ "type": AgentType.PARALLEL.value,
495
+ "func": func,
496
+ }
497
+
498
+ async def wrapper(*args, **kwargs):
499
+ return await func(*args, **kwargs)
500
+
501
+ return wrapper
502
+
503
+ return decorator
504
+
505
+ def evaluator_optimizer(
506
+ self,
507
+ name: str,
508
+ optimizer: str,
509
+ evaluator: str,
510
+ min_rating: str = "GOOD",
511
+ max_refinements: int = 3,
512
+ use_history: bool = True,
513
+ request_params: Optional[Dict] = None,
514
+ ) -> Callable:
515
+ """
516
+ Decorator to create and register an evaluator-optimizer workflow.
517
+
518
+ Args:
519
+ name: Name of the workflow
520
+ optimizer: Name of the optimizer agent
521
+ evaluator: Name of the evaluator agent
522
+ min_rating: Minimum acceptable quality rating (EXCELLENT, GOOD, FAIR, POOR)
523
+ max_refinements: Maximum number of refinement iterations
524
+ use_history: Whether to maintain conversation history
525
+ request_params: Additional request parameters for the LLM
526
+ """
527
+
528
+ def decorator(func: Callable) -> Callable:
529
+ # Create workflow configuration
530
+ config = AgentConfig(
531
+ name=name,
532
+ instruction="", # Uses optimizer's instruction
533
+ servers=[], # Uses agents' server access
534
+ use_history=use_history,
535
+ default_request_params=request_params,
536
+ )
537
+
538
+ # Store the workflow configuration
539
+ self.agents[name] = {
540
+ "config": config,
541
+ "optimizer": optimizer,
542
+ "evaluator": evaluator,
543
+ "min_rating": min_rating,
544
+ "max_refinements": max_refinements,
545
+ "type": AgentType.EVALUATOR_OPTIMIZER.value,
546
+ "func": func,
547
+ }
548
+
549
+ async def wrapper(*args, **kwargs):
550
+ return await func(*args, **kwargs)
551
+
552
+ return wrapper
553
+
554
+ return decorator
555
+
556
+ def router(
557
+ self,
558
+ name: str,
559
+ agents: List[str],
560
+ servers: List[str] = [],
561
+ model: Optional[str] = None,
562
+ use_history: bool = True,
563
+ request_params: Optional[Dict] = None,
564
+ ) -> Callable:
565
+ """
566
+ Decorator to create and register a router.
567
+
568
+ Args:
569
+ name: Name of the router
570
+ agents: List of agent names this router can delegate to
571
+ servers: List of server names the router can use directly
572
+ model: Model specification string
573
+ use_history: Whether to maintain conversation history
574
+ request_params: Additional request parameters for the LLM
575
+ """
576
+
577
+ def decorator(func: Callable) -> Callable:
578
+ # Create base request params
579
+ base_params = RequestParams(
580
+ use_history=use_history, **(request_params or {})
581
+ )
582
+
583
+ # Create agent configuration
584
+ config = AgentConfig(
585
+ name=name,
586
+ instruction="", # Router uses its own routing instruction
587
+ servers=servers,
588
+ model=model,
589
+ use_history=use_history,
590
+ default_request_params=base_params,
591
+ )
592
+
593
+ # Store the router configuration
594
+ self.agents[name] = {
595
+ "config": config,
596
+ "agents": agents,
597
+ "type": AgentType.ROUTER.value,
598
+ "func": func,
599
+ }
600
+
601
+ async def wrapper(*args, **kwargs):
602
+ return await func(*args, **kwargs)
603
+
604
+ return wrapper
605
+
606
+ return decorator
607
+
608
+ async def _create_basic_agents(self, agent_app: MCPApp) -> ProxyDict:
609
+ """
610
+ Create and initialize basic agents with their configurations.
611
+
612
+ Args:
613
+ agent_app: The main application instance
614
+
615
+ Returns:
616
+ Dictionary of initialized basic agents wrapped in appropriate proxies
617
+ """
618
+ active_agents = {}
619
+
620
+ for name, agent_data in self.agents.items():
621
+ if agent_data["type"] == AgentType.BASIC.value:
622
+ config = agent_data["config"]
623
+
624
+ # Create agent with configuration
625
+ agent = Agent(config=config, context=agent_app.context)
626
+
627
+ # Set up LLM with proper configuration
628
+ async with agent:
629
+ llm_factory = self._get_model_factory(
630
+ model=config.model,
631
+ request_params=config.default_request_params,
632
+ )
633
+ agent._llm = await agent.attach_llm(llm_factory)
634
+
635
+ # Create proxy for the agent
636
+ active_agents[name] = self._create_proxy(
637
+ name, agent, AgentType.BASIC.value
638
+ )
639
+
640
+ return active_agents
641
+
642
+ def _create_orchestrators(
643
+ self, agent_app: MCPApp, active_agents: ProxyDict
644
+ ) -> ProxyDict:
645
+ """
646
+ Create orchestrator agents.
647
+
648
+ Args:
649
+ agent_app: The main application instance
650
+ active_agents: Dictionary of already created agents/proxies
651
+
652
+ Returns:
653
+ Dictionary of initialized orchestrator agents wrapped in appropriate proxies
654
+ """
655
+ orchestrators = {}
656
+ for name, agent_data in self.agents.items():
657
+ if agent_data["type"] == AgentType.ORCHESTRATOR.value:
658
+ config = agent_data["config"]
659
+
660
+ # TODO: Remove legacy - This model/params setup should be in Agent class
661
+ # Resolve model alias if present
662
+ model_config = ModelFactory.parse_model_string(config.model)
663
+ resolved_model = model_config.model_name
664
+
665
+ # Start with existing params if available
666
+ if config.default_request_params:
667
+ base_params = config.default_request_params.model_copy()
668
+ # Update with orchestrator-specific settings
669
+ base_params.use_history = config.use_history
670
+ base_params.model = resolved_model
671
+ else:
672
+ base_params = RequestParams(
673
+ use_history=config.use_history, model=resolved_model
674
+ )
675
+
676
+ llm_factory = self._get_model_factory(
677
+ model=config.model, # Use original model string for factory creation
678
+ request_params=base_params,
679
+ )
680
+
681
+ # Get the child agents - need to unwrap proxies
682
+ child_agents = []
683
+ for agent_name in agent_data["child_agents"]:
684
+ proxy = active_agents[agent_name]
685
+ if isinstance(proxy, LLMAgentProxy):
686
+ child_agents.append(proxy._agent) # Get the actual Agent
687
+ else:
688
+ # Handle case where it might be another workflow
689
+ child_agents.append(proxy._workflow)
690
+
691
+ orchestrator = Orchestrator(
692
+ name=config.name,
693
+ instruction=config.instruction,
694
+ available_agents=child_agents,
695
+ context=agent_app.context,
696
+ llm_factory=llm_factory,
697
+ request_params=base_params, # Use our base params that include model
698
+ plan_type="full",
699
+ )
700
+
701
+ # Use factory to create appropriate proxy
702
+ orchestrators[name] = self._create_proxy(
703
+ name, orchestrator, AgentType.ORCHESTRATOR.value
704
+ )
705
+ return orchestrators
706
+
707
+ async def _create_evaluator_optimizers(
708
+ self, agent_app: MCPApp, active_agents: ProxyDict
709
+ ) -> ProxyDict:
710
+ """
711
+ Create evaluator-optimizer workflows.
712
+
713
+ Args:
714
+ agent_app: The main application instance
715
+ active_agents: Dictionary of already created agents/proxies
716
+
717
+ Returns:
718
+ Dictionary of initialized evaluator-optimizer workflows
719
+ """
720
+ workflows = {}
721
+ for name, agent_data in self.agents.items():
722
+ if agent_data["type"] == AgentType.EVALUATOR_OPTIMIZER.value:
723
+ # Get the referenced agents - unwrap from proxies
724
+ optimizer = self._unwrap_proxy(active_agents[agent_data["optimizer"]])
725
+ evaluator = self._unwrap_proxy(active_agents[agent_data["evaluator"]])
726
+
727
+ if not optimizer or not evaluator:
728
+ raise ValueError(
729
+ f"Missing agents for workflow {name}: "
730
+ f"optimizer={agent_data['optimizer']}, "
731
+ f"evaluator={agent_data['evaluator']}"
732
+ )
733
+
734
+ # TODO: Remove legacy - factory usage is only needed for str evaluators
735
+ # Later this should only be passed when evaluator is a string
736
+ optimizer_model = (
737
+ optimizer.config.model if isinstance(optimizer, Agent) else None
738
+ )
739
+ workflow = EvaluatorOptimizerLLM(
740
+ optimizer=optimizer,
741
+ evaluator=evaluator,
742
+ min_rating=QualityRating[agent_data["min_rating"]],
743
+ max_refinements=agent_data["max_refinements"],
744
+ llm_factory=self._get_model_factory(model=optimizer_model),
745
+ context=agent_app.context,
746
+ )
747
+
748
+ workflows[name] = self._create_proxy(
749
+ name, workflow, AgentType.EVALUATOR_OPTIMIZER.value
750
+ )
751
+
752
+ return workflows
753
+
754
+ def _get_parallel_dependencies(
755
+ self, name: str, visited: set, path: set
756
+ ) -> List[str]:
757
+ """
758
+ Get dependencies for a parallel agent in topological order.
759
+
760
+ Args:
761
+ name: Name of the parallel agent
762
+ visited: Set of already visited agents
763
+ path: Current path for cycle detection
764
+
765
+ Returns:
766
+ List of agent names in dependency order
767
+
768
+ Raises:
769
+ ValueError: If circular dependency detected
770
+ """
771
+ if name in path:
772
+ path_str = " -> ".join(path)
773
+ raise ValueError(f"Circular dependency detected: {path_str} -> {name}")
774
+
775
+ if name in visited:
776
+ return []
777
+
778
+ if name not in self.agents:
779
+ return []
780
+
781
+ config = self.agents[name]
782
+ if config["type"] != AgentType.PARALLEL.value:
783
+ return []
784
+
785
+ path.add(name)
786
+ deps = []
787
+
788
+ # Get dependencies from fan-out agents
789
+ for fan_out in config["fan_out"]:
790
+ deps.extend(self._get_parallel_dependencies(fan_out, visited, path))
791
+
792
+ # Add this agent after its dependencies
793
+ deps.append(name)
794
+ visited.add(name)
795
+ path.remove(name)
796
+
797
+ return deps
798
+
799
+ def _create_parallel_agents(
800
+ self, agent_app: MCPApp, active_agents: ProxyDict
801
+ ) -> ProxyDict:
802
+ """
803
+ Create parallel execution agents in dependency order.
804
+
805
+ Args:
806
+ agent_app: The main application instance
807
+ active_agents: Dictionary of already created agents/proxies
808
+
809
+ Returns:
810
+ Dictionary of initialized parallel agents
811
+ """
812
+ parallel_agents = {}
813
+ visited = set()
814
+
815
+ # Get all parallel agents
816
+ parallel_names = [
817
+ name
818
+ for name, agent_data in self.agents.items()
819
+ if agent_data["type"] == AgentType.PARALLEL.value
820
+ ]
821
+
822
+ # Create agents in dependency order
823
+ for name in parallel_names:
824
+ # Get ordered dependencies if not already processed
825
+ if name not in visited:
826
+ try:
827
+ ordered_agents = self._get_parallel_dependencies(
828
+ name, visited, set()
829
+ )
830
+ except ValueError as e:
831
+ raise ValueError(f"Error creating parallel agent {name}: {str(e)}")
832
+
833
+ # Create each agent in order
834
+ for agent_name in ordered_agents:
835
+ if agent_name not in parallel_agents:
836
+ agent_data = self.agents[agent_name]
837
+ config = agent_data["config"]
838
+
839
+ # Get fan-out agents (could be basic agents or other parallels)
840
+ fan_out_agents = self._get_agent_instances(
841
+ agent_data["fan_out"], active_agents
842
+ )
843
+
844
+ # Get fan-in agent - unwrap proxy
845
+ fan_in_agent = self._unwrap_proxy(
846
+ active_agents[agent_data["fan_in"]]
847
+ )
848
+
849
+ # Create the parallel workflow
850
+ llm_factory = self._get_model_factory(config.model)
851
+ parallel = ParallelLLM(
852
+ name=config.name,
853
+ instruction=config.instruction,
854
+ fan_out_agents=fan_out_agents,
855
+ fan_in_agent=fan_in_agent,
856
+ context=agent_app.context,
857
+ llm_factory=llm_factory,
858
+ default_request_params=config.default_request_params,
859
+ )
860
+
861
+ parallel_agents[agent_name] = self._create_proxy(
862
+ name, parallel, AgentType.PARALLEL.value
863
+ )
864
+
865
+ return parallel_agents
866
+
867
+ def _create_routers(self, agent_app: MCPApp, active_agents: ProxyDict) -> ProxyDict:
868
+ """
869
+ Create router agents.
870
+
871
+ Args:
872
+ agent_app: The main application instance
873
+ active_agents: Dictionary of already created agents
874
+
875
+ Returns:
876
+ Dictionary of initialized router agents
877
+ """
878
+ routers = {}
879
+ for name, agent_data in self.agents.items():
880
+ if agent_data["type"] == AgentType.ROUTER.value:
881
+ config = agent_data["config"]
882
+
883
+ # Get the router's agents - unwrap proxies
884
+ router_agents = self._get_agent_instances(
885
+ agent_data["agents"], active_agents
886
+ )
887
+
888
+ # Create the router with proper configuration
889
+ llm_factory = self._get_model_factory(
890
+ model=config.model,
891
+ request_params=config.default_request_params,
892
+ )
893
+
894
+ router = LLMRouter(
895
+ name=config.name, # Add the name parameter
896
+ llm_factory=llm_factory,
897
+ agents=router_agents,
898
+ server_names=config.servers,
899
+ context=agent_app.context,
900
+ default_request_params=config.default_request_params,
901
+ )
902
+
903
+ routers[name] = self._create_proxy(name, router, AgentType.ROUTER.value)
904
+
905
+ return routers
906
+
907
+ def _unwrap_proxy(self, proxy: BaseAgentProxy) -> AgentOrWorkflow:
908
+ """
909
+ Unwrap a proxy to get the underlying agent or workflow instance.
910
+
911
+ Args:
912
+ proxy: The proxy object to unwrap
913
+
914
+ Returns:
915
+ The underlying Agent or workflow instance
916
+ """
917
+ if isinstance(proxy, LLMAgentProxy):
918
+ return proxy._agent
919
+ return proxy._workflow
920
+
921
+ def _get_agent_instances(
922
+ self, agent_names: List[str], active_agents: ProxyDict
923
+ ) -> List[AgentOrWorkflow]:
924
+ """
925
+ Get list of actual agent/workflow instances from a list of names.
926
+
927
+ Args:
928
+ agent_names: List of agent names to look up
929
+ active_agents: Dictionary of active agent proxies
930
+
931
+ Returns:
932
+ List of unwrapped agent/workflow instances
933
+ """
934
+ return [self._unwrap_proxy(active_agents[name]) for name in agent_names]
935
+
936
+ @asynccontextmanager
937
+ async def run(self):
938
+ """
939
+ Context manager for running the application.
940
+ Handles setup and teardown of the app and agents.
941
+
942
+ Yields:
943
+ AgentAppWrapper instance with all initialized agents
944
+ """
945
+ async with self.app.run() as agent_app:
946
+ # Create all types of agents
947
+ active_agents = await self._create_basic_agents(agent_app)
948
+ orchestrators = self._create_orchestrators(agent_app, active_agents)
949
+ parallel_agents = self._create_parallel_agents(agent_app, active_agents)
950
+ evaluator_optimizers = await self._create_evaluator_optimizers(
951
+ agent_app, active_agents
952
+ )
953
+ routers = self._create_routers(agent_app, active_agents)
954
+
955
+ # Merge all agents into active_agents
956
+ active_agents.update(orchestrators)
957
+ active_agents.update(parallel_agents)
958
+ active_agents.update(evaluator_optimizers)
959
+ active_agents.update(routers)
960
+
961
+ # Create wrapper with all agents
962
+ wrapper = AgentApp(agent_app, active_agents)
963
+ try:
964
+ yield wrapper
965
+ finally:
966
+ # Clean up basic agents - need to get the actual agent from the proxy
967
+ for name, proxy in active_agents.items():
968
+ if isinstance(proxy, LLMAgentProxy):
969
+ await proxy._agent.__aexit__(None, None, None)
970
+
971
+ # async def send(self, agent_name: str, message: str) -> Any:
972
+ # """
973
+ # Send a message to a specific agent and get the response.
974
+
975
+ # Args:
976
+ # agent_name: Name of the target agent
977
+ # message: Message to send
978
+
979
+ # Returns:
980
+ # Agent's response
981
+
982
+ # Raises:
983
+ # ValueError: If agent not found
984
+ # RuntimeError: If agent has no LLM attached
985
+ # """
986
+ # if agent_name not in self.agents:
987
+ # raise ValueError(f"Agent {agent_name} not found")
988
+
989
+ # agent = self.agents[agent_name]
990
+
991
+ # # Special handling for routers
992
+ # if isinstance(agent._llm, LLMRouter):
993
+ # # Route the message and get results
994
+ # results = await agent._llm.route(message)
995
+ # if not results:
996
+ # return "No appropriate route found for the request."
997
+
998
+ # # Get the top result
999
+ # top_result = results[0]
1000
+ # if isinstance(top_result.result, Agent):
1001
+ # # Agent route - delegate to the agent
1002
+ # return await top_result.result._llm.generate_str(message)
1003
+ # elif isinstance(top_result.result, str):
1004
+ # # Server route - use the router directly
1005
+ # return "Tool call requested by router - not yet supported"
1006
+ # else:
1007
+ # return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
1008
+
1009
+ # # Normal agent handling
1010
+ # if not hasattr(agent, "_llm") or agent._llm is None:
1011
+ # raise RuntimeError(f"Agent {agent_name} has no LLM attached")
1012
+
1013
+ # return await agent._llm.generate_str(message)