fast-agent-mcp 0.0.8__py3-none-any.whl → 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/METADATA +15 -9
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/RECORD +28 -26
- mcp_agent/app.py +4 -4
- mcp_agent/cli/commands/bootstrap.py +4 -0
- mcp_agent/cli/commands/setup.py +1 -1
- mcp_agent/core/fastagent.py +498 -369
- mcp_agent/event_progress.py +5 -2
- mcp_agent/human_input/handler.py +6 -2
- mcp_agent/logging/rich_progress.py +10 -5
- mcp_agent/mcp/mcp_aggregator.py +2 -1
- mcp_agent/mcp/mcp_connection_manager.py +67 -37
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +2 -0
- mcp_agent/resources/examples/internal/job.py +83 -0
- mcp_agent/resources/examples/workflows/agent_build.py +61 -0
- mcp_agent/resources/examples/workflows/chaining.py +0 -1
- mcp_agent/resources/examples/workflows/human_input.py +0 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +1 -7
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +63 -65
- mcp_agent/workflows/llm/augmented_llm.py +9 -1
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +28 -23
- mcp_agent/workflows/llm/model_factory.py +25 -11
- mcp_agent/workflows/orchestrator/orchestrator.py +106 -100
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +11 -6
- mcp_agent/workflows/router/router_llm.py +13 -2
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/licenses/LICENSE +0 -0
|
@@ -238,6 +238,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
|
238
238
|
|
|
239
239
|
self.model_selector = self.context.model_selector
|
|
240
240
|
self.type_converter = type_converter
|
|
241
|
+
self.verb = kwargs.get("verb")
|
|
241
242
|
|
|
242
243
|
@abstractmethod
|
|
243
244
|
async def generate(
|
|
@@ -627,8 +628,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
|
627
628
|
self, chat_turn: Optional[int] = None, model: Optional[str] = None
|
|
628
629
|
):
|
|
629
630
|
"""Log a chat progress event"""
|
|
631
|
+
# Determine action type based on verb
|
|
632
|
+
if hasattr(self, "verb") and self.verb:
|
|
633
|
+
# Use verb directly regardless of type
|
|
634
|
+
act = self.verb
|
|
635
|
+
else:
|
|
636
|
+
act = ProgressAction.CHATTING
|
|
637
|
+
|
|
630
638
|
data = {
|
|
631
|
-
"progress_action":
|
|
639
|
+
"progress_action": act,
|
|
632
640
|
"model": model,
|
|
633
641
|
"agent_name": self.name,
|
|
634
642
|
"chat_turn": chat_turn if chat_turn is not None else None,
|
|
@@ -52,11 +52,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
|
52
52
|
"""
|
|
53
53
|
|
|
54
54
|
def __init__(self, *args, **kwargs):
|
|
55
|
-
super().__init__(*args, type_converter=AnthropicMCPTypeConverter, **kwargs)
|
|
56
|
-
|
|
57
55
|
self.provider = "Anthropic"
|
|
58
|
-
# Initialize logger
|
|
59
|
-
self.logger = get_logger(
|
|
56
|
+
# Initialize logger - keep it simple without name reference
|
|
57
|
+
self.logger = get_logger(__name__)
|
|
58
|
+
|
|
59
|
+
# Now call super().__init__
|
|
60
|
+
super().__init__(*args, type_converter=AnthropicMCPTypeConverter, **kwargs)
|
|
60
61
|
|
|
61
62
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
62
63
|
"""Initialize Anthropic-specific default parameters"""
|
|
@@ -79,25 +80,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
|
79
80
|
Process a query using an LLM and available tools.
|
|
80
81
|
Override this method to use a different LLM.
|
|
81
82
|
"""
|
|
82
|
-
config = self.context.config
|
|
83
|
-
|
|
84
|
-
api_key = None
|
|
85
|
-
|
|
86
|
-
if hasattr(config, "anthropic") and config.anthropic:
|
|
87
|
-
api_key = config.anthropic.api_key
|
|
88
|
-
if api_key == "<your-api-key-here>":
|
|
89
|
-
api_key = None
|
|
90
83
|
|
|
91
|
-
|
|
92
|
-
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
93
|
-
|
|
94
|
-
if not api_key:
|
|
95
|
-
raise ProviderKeyError(
|
|
96
|
-
"Anthropic API key not configured",
|
|
97
|
-
"The Anthropic API key is required but not set.\n"
|
|
98
|
-
"Add it to your configuration file under anthropic.api_key "
|
|
99
|
-
"or set the ANTHROPIC_API_KEY environment variable.",
|
|
100
|
-
)
|
|
84
|
+
api_key = self._api_key(self.context.config)
|
|
101
85
|
try:
|
|
102
86
|
anthropic = Anthropic(api_key=api_key)
|
|
103
87
|
messages: List[MessageParam] = []
|
|
@@ -264,6 +248,27 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
|
264
248
|
|
|
265
249
|
return responses
|
|
266
250
|
|
|
251
|
+
def _api_key(self, config):
|
|
252
|
+
api_key = None
|
|
253
|
+
|
|
254
|
+
if hasattr(config, "anthropic") and config.anthropic:
|
|
255
|
+
api_key = config.anthropic.api_key
|
|
256
|
+
if api_key == "<your-api-key-here>":
|
|
257
|
+
api_key = None
|
|
258
|
+
|
|
259
|
+
if api_key is None:
|
|
260
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
261
|
+
|
|
262
|
+
if not api_key:
|
|
263
|
+
raise ProviderKeyError(
|
|
264
|
+
"Anthropic API key not configured",
|
|
265
|
+
"The Anthropic API key is required but not set.\n"
|
|
266
|
+
"Add it to your configuration file under anthropic.api_key "
|
|
267
|
+
"or set the ANTHROPIC_API_KEY environment variable.",
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return api_key
|
|
271
|
+
|
|
267
272
|
async def generate_str(
|
|
268
273
|
self,
|
|
269
274
|
message,
|
|
@@ -313,7 +318,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
|
313
318
|
|
|
314
319
|
# Next we pass the text through instructor to extract structured data
|
|
315
320
|
client = instructor.from_anthropic(
|
|
316
|
-
Anthropic(api_key=self.context.config
|
|
321
|
+
Anthropic(api_key=self._api_key(self.context.config)),
|
|
317
322
|
)
|
|
318
323
|
|
|
319
324
|
params = self.get_request_params(request_params)
|
|
@@ -62,18 +62,23 @@ class ModelFactory:
|
|
|
62
62
|
"o1-preview": Provider.OPENAI,
|
|
63
63
|
"o3-mini": Provider.OPENAI,
|
|
64
64
|
"claude-3-haiku-20240307": Provider.ANTHROPIC,
|
|
65
|
+
"claude-3-5-haiku-20241022": Provider.ANTHROPIC,
|
|
66
|
+
"claude-3-5-haiku-latest": Provider.ANTHROPIC,
|
|
65
67
|
"claude-3-5-sonnet-20240620": Provider.ANTHROPIC,
|
|
66
68
|
"claude-3-5-sonnet-20241022": Provider.ANTHROPIC,
|
|
67
69
|
"claude-3-5-sonnet-latest": Provider.ANTHROPIC,
|
|
70
|
+
"claude-3-7-sonnet-20250219": Provider.ANTHROPIC,
|
|
71
|
+
"claude-3-7-sonnet-latest": Provider.ANTHROPIC,
|
|
68
72
|
"claude-3-opus-20240229": Provider.ANTHROPIC,
|
|
69
73
|
"claude-3-opus-latest": Provider.ANTHROPIC,
|
|
70
74
|
}
|
|
71
75
|
|
|
72
76
|
MODEL_ALIASES = {
|
|
73
|
-
"sonnet": "claude-3-
|
|
77
|
+
"sonnet": "claude-3-7-sonnet-latest",
|
|
74
78
|
"sonnet35": "claude-3-5-sonnet-latest",
|
|
79
|
+
"sonnet37": "claude-3-7-sonnet-latest",
|
|
75
80
|
"claude": "claude-3-5-sonnet-latest",
|
|
76
|
-
"haiku": "claude-3-haiku-
|
|
81
|
+
"haiku": "claude-3-5-haiku-latest",
|
|
77
82
|
"haiku3": "claude-3-haiku-20240307",
|
|
78
83
|
"opus": "claude-3-opus-latest",
|
|
79
84
|
"opus3": "claude-3-opus-latest",
|
|
@@ -161,15 +166,24 @@ class ModelFactory:
|
|
|
161
166
|
config.model_name
|
|
162
167
|
) # Ensure parsed model name isn't overwritten
|
|
163
168
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
169
|
+
# Forward all keyword arguments to LLM constructor
|
|
170
|
+
llm_args = {
|
|
171
|
+
"agent": agent,
|
|
172
|
+
"model": config.model_name,
|
|
173
|
+
"request_params": factory_params,
|
|
174
|
+
"name": kwargs.get("name"),
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# Add reasoning effort if available
|
|
178
|
+
if config.reasoning_effort:
|
|
179
|
+
llm_args["reasoning_effort"] = config.reasoning_effort.value
|
|
180
|
+
|
|
181
|
+
# Forward all other kwargs (including verb)
|
|
182
|
+
for key, value in kwargs.items():
|
|
183
|
+
if key not in ["agent", "default_request_params", "name"]:
|
|
184
|
+
llm_args[key] = value
|
|
185
|
+
|
|
186
|
+
llm = llm_class(**llm_args)
|
|
173
187
|
return llm
|
|
174
188
|
|
|
175
189
|
return factory
|
|
@@ -2,9 +2,7 @@
|
|
|
2
2
|
Orchestrator implementation for MCP Agent applications.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import contextlib
|
|
6
5
|
from typing import (
|
|
7
|
-
Callable,
|
|
8
6
|
List,
|
|
9
7
|
Literal,
|
|
10
8
|
Optional,
|
|
@@ -12,7 +10,8 @@ from typing import (
|
|
|
12
10
|
TYPE_CHECKING,
|
|
13
11
|
)
|
|
14
12
|
|
|
15
|
-
from mcp_agent.agents.agent import Agent
|
|
13
|
+
from mcp_agent.agents.agent import Agent
|
|
14
|
+
from mcp_agent.event_progress import ProgressAction
|
|
16
15
|
from mcp_agent.workflows.llm.augmented_llm import (
|
|
17
16
|
AugmentedLLM,
|
|
18
17
|
MessageParamT,
|
|
@@ -20,7 +19,6 @@ from mcp_agent.workflows.llm.augmented_llm import (
|
|
|
20
19
|
ModelT,
|
|
21
20
|
RequestParams,
|
|
22
21
|
)
|
|
23
|
-
from mcp_agent.workflows.llm.model_factory import ModelFactory
|
|
24
22
|
from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
|
25
23
|
format_plan_result,
|
|
26
24
|
format_step_result,
|
|
@@ -47,9 +45,9 @@ logger = get_logger(__name__)
|
|
|
47
45
|
|
|
48
46
|
class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
49
47
|
"""
|
|
50
|
-
In the orchestrator-workers workflow, a central LLM dynamically breaks down tasks
|
|
51
|
-
delegates them to worker LLMs
|
|
52
|
-
|
|
48
|
+
In the orchestrator-workers workflow, a central planner LLM dynamically breaks down tasks and
|
|
49
|
+
delegates them to pre-configured worker LLMs. The planner synthesizes their results in a loop
|
|
50
|
+
until the task is complete.
|
|
53
51
|
|
|
54
52
|
When to use this workflow:
|
|
55
53
|
- This workflow is well-suited for complex tasks where you can't predict the
|
|
@@ -60,65 +58,76 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
60
58
|
- Coding products that make complex changes to multiple files each time.
|
|
61
59
|
- Search tasks that involve gathering and analyzing information from multiple sources
|
|
62
60
|
for possible relevant information.
|
|
61
|
+
|
|
62
|
+
Note:
|
|
63
|
+
All agents must be pre-configured with LLMs before being passed to the orchestrator.
|
|
64
|
+
This ensures consistent model behavior and configuration across all components.
|
|
63
65
|
"""
|
|
64
66
|
|
|
65
67
|
def __init__(
|
|
66
68
|
self,
|
|
67
|
-
|
|
68
|
-
planner: AugmentedLLM
|
|
69
|
-
available_agents: List[Agent | AugmentedLLM]
|
|
69
|
+
name: str,
|
|
70
|
+
planner: AugmentedLLM, # Pre-configured planner
|
|
71
|
+
available_agents: List[Agent | AugmentedLLM],
|
|
70
72
|
plan_type: Literal["full", "iterative"] = "full",
|
|
71
73
|
context: Optional["Context"] = None,
|
|
72
74
|
**kwargs,
|
|
73
75
|
):
|
|
74
76
|
"""
|
|
75
77
|
Args:
|
|
76
|
-
|
|
77
|
-
planner: LLM to use for planning steps
|
|
78
|
-
|
|
79
|
-
|
|
78
|
+
name: Name of the orchestrator workflow
|
|
79
|
+
planner: Pre-configured planner LLM to use for planning steps
|
|
80
|
+
available_agents: List of pre-configured agents available to this orchestrator
|
|
81
|
+
plan_type: "full" planning generates the full plan first, then executes. "iterative" plans next step and loops.
|
|
80
82
|
context: Application context
|
|
81
83
|
"""
|
|
84
|
+
# Initialize logger early so we can log
|
|
85
|
+
self.logger = logger
|
|
86
|
+
|
|
87
|
+
# Set a fixed verb - always use PLANNING for all orchestrator activities
|
|
88
|
+
self.verb = ProgressAction.PLANNING
|
|
89
|
+
|
|
82
90
|
# Initialize with orchestrator-specific defaults
|
|
83
91
|
orchestrator_params = RequestParams(
|
|
84
92
|
use_history=False, # Orchestrator doesn't support history
|
|
85
93
|
max_iterations=30, # Higher default for complex tasks
|
|
86
|
-
maxTokens=8192, # Higher default for planning
|
|
94
|
+
maxTokens=8192, # Higher default for planning
|
|
87
95
|
parallel_tool_calls=True,
|
|
88
96
|
)
|
|
89
97
|
|
|
90
|
-
# If kwargs contains request_params, merge
|
|
98
|
+
# If kwargs contains request_params, merge our defaults while preserving the model config
|
|
91
99
|
if "request_params" in kwargs:
|
|
92
100
|
base_params = kwargs["request_params"]
|
|
93
|
-
merged
|
|
94
|
-
merged
|
|
101
|
+
# Create merged params starting with our defaults
|
|
102
|
+
merged = orchestrator_params.model_copy()
|
|
103
|
+
# Update with base params to get model config
|
|
104
|
+
if isinstance(base_params, dict):
|
|
105
|
+
merged = merged.model_copy(update=base_params)
|
|
106
|
+
else:
|
|
107
|
+
merged = merged.model_copy(update=base_params.model_dump())
|
|
108
|
+
# Force specific settings
|
|
109
|
+
merged.use_history = False
|
|
95
110
|
kwargs["request_params"] = merged
|
|
96
111
|
else:
|
|
97
112
|
kwargs["request_params"] = orchestrator_params
|
|
98
113
|
|
|
114
|
+
# Pass verb to AugmentedLLM
|
|
115
|
+
kwargs["verb"] = self.verb
|
|
116
|
+
|
|
99
117
|
super().__init__(context=context, **kwargs)
|
|
100
118
|
|
|
101
|
-
self.
|
|
102
|
-
|
|
103
|
-
# Create default planner with AgentConfig
|
|
104
|
-
request_params = self.get_request_params(kwargs.get("request_params"))
|
|
105
|
-
planner_config = AgentConfig(
|
|
106
|
-
name="LLM Orchestrator",
|
|
107
|
-
instruction="""
|
|
108
|
-
You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
|
|
109
|
-
or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
|
|
110
|
-
which can be performed by LLMs with access to the servers or agents.
|
|
111
|
-
""",
|
|
112
|
-
servers=[], # Planner doesn't need direct server access
|
|
113
|
-
default_request_params=request_params,
|
|
114
|
-
model=request_params.model if request_params else None,
|
|
115
|
-
)
|
|
119
|
+
self.planner = planner
|
|
116
120
|
|
|
117
|
-
self.planner
|
|
121
|
+
if hasattr(self.planner, "verb"):
|
|
122
|
+
self.planner.verb = self.verb
|
|
118
123
|
|
|
119
|
-
self.plan_type
|
|
124
|
+
self.plan_type = plan_type
|
|
120
125
|
self.server_registry = self.context.server_registry
|
|
121
|
-
self.agents = {agent.name: agent for agent in available_agents
|
|
126
|
+
self.agents = {agent.name: agent for agent in available_agents}
|
|
127
|
+
|
|
128
|
+
# Initialize logger
|
|
129
|
+
self.logger = logger
|
|
130
|
+
self.name = name
|
|
122
131
|
|
|
123
132
|
async def generate(
|
|
124
133
|
self,
|
|
@@ -139,7 +148,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
139
148
|
) -> str:
|
|
140
149
|
"""Request an LLM generation and return the string representation of the result"""
|
|
141
150
|
params = self.get_request_params(request_params)
|
|
142
|
-
|
|
151
|
+
# TODO -- properly incorporate this in to message display etc.
|
|
143
152
|
result = await self.generate(
|
|
144
153
|
message=message,
|
|
145
154
|
request_params=params,
|
|
@@ -155,25 +164,15 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
155
164
|
) -> ModelT:
|
|
156
165
|
"""Request a structured LLM generation and return the result as a Pydantic model."""
|
|
157
166
|
params = self.get_request_params(request_params)
|
|
158
|
-
|
|
159
167
|
result_str = await self.generate_str(message=message, request_params=params)
|
|
160
168
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
instruction="Produce a structured output given a message",
|
|
164
|
-
servers=[], # No server access needed for structured output
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
llm = self.llm_factory(agent=Agent(config=structured_config))
|
|
168
|
-
|
|
169
|
-
structured_result = await llm.generate_structured(
|
|
169
|
+
# Use AugmentedLLM's structured output handling
|
|
170
|
+
return await super().generate_structured(
|
|
170
171
|
message=result_str,
|
|
171
172
|
response_model=response_model,
|
|
172
173
|
request_params=params,
|
|
173
174
|
)
|
|
174
175
|
|
|
175
|
-
return structured_result
|
|
176
|
-
|
|
177
176
|
async def execute(
|
|
178
177
|
self, objective: str, request_params: RequestParams | None = None
|
|
179
178
|
) -> PlanResult:
|
|
@@ -182,6 +181,20 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
182
181
|
|
|
183
182
|
params = self.get_request_params(request_params)
|
|
184
183
|
|
|
184
|
+
# Single progress event for orchestration start
|
|
185
|
+
model = await self.select_model(params) or "unknown-model"
|
|
186
|
+
|
|
187
|
+
# Log the progress with minimal required fields
|
|
188
|
+
self.logger.info(
|
|
189
|
+
"Planning task execution",
|
|
190
|
+
data={
|
|
191
|
+
"progress_action": self.verb,
|
|
192
|
+
"model": model,
|
|
193
|
+
"agent_name": self.name,
|
|
194
|
+
"target": self.name,
|
|
195
|
+
},
|
|
196
|
+
)
|
|
197
|
+
|
|
185
198
|
plan_result = PlanResult(objective=objective, step_results=[])
|
|
186
199
|
|
|
187
200
|
while iterations < params.max_iterations:
|
|
@@ -212,6 +225,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
212
225
|
plan_result=format_plan_result(plan_result)
|
|
213
226
|
)
|
|
214
227
|
|
|
228
|
+
# Use planner directly - planner already has PLANNING verb
|
|
215
229
|
plan_result.result = await self.planner.generate_str(
|
|
216
230
|
message=synthesis_prompt,
|
|
217
231
|
request_params=params.model_copy(update={"max_iterations": 1}),
|
|
@@ -233,8 +247,6 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
233
247
|
|
|
234
248
|
plan_result.add_step_result(step_result)
|
|
235
249
|
|
|
236
|
-
plan_result.add_step_result(step_result)
|
|
237
|
-
|
|
238
250
|
logger.debug(
|
|
239
251
|
f"Iteration {iterations}: Intermediate plan result:", data=plan_result
|
|
240
252
|
)
|
|
@@ -251,70 +263,62 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
251
263
|
request_params: RequestParams | None = None,
|
|
252
264
|
) -> StepResult:
|
|
253
265
|
"""Execute a step's subtasks in parallel and synthesize results"""
|
|
254
|
-
params = self.get_request_params(request_params)
|
|
255
266
|
|
|
256
267
|
step_result = StepResult(step=step, task_results=[])
|
|
257
268
|
context = format_plan_result(previous_result)
|
|
258
269
|
|
|
259
|
-
#
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
for task in step.tasks:
|
|
263
|
-
agent = self.agents.get(task.agent)
|
|
264
|
-
if not agent:
|
|
265
|
-
raise ValueError(f"No agent found matching {task.agent}")
|
|
270
|
+
# Execute tasks
|
|
271
|
+
futures = []
|
|
272
|
+
error_tasks = []
|
|
266
273
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
else:
|
|
274
|
-
# Only create new context if needed
|
|
275
|
-
ctx_agent = await stack.enter_async_context(agent)
|
|
276
|
-
# Create factory with agent's own configuration
|
|
277
|
-
agent_factory = ModelFactory.create_factory(
|
|
278
|
-
model_string=agent.config.model,
|
|
279
|
-
request_params=agent.config.default_request_params,
|
|
280
|
-
)
|
|
281
|
-
llm = await ctx_agent.attach_llm(agent_factory)
|
|
282
|
-
|
|
283
|
-
task_llms.append((task, llm))
|
|
284
|
-
|
|
285
|
-
# Execute all tasks within the same context
|
|
286
|
-
futures = []
|
|
287
|
-
for task, llm in task_llms:
|
|
288
|
-
task_description = TASK_PROMPT_TEMPLATE.format(
|
|
289
|
-
objective=previous_result.objective,
|
|
290
|
-
task=task.description,
|
|
291
|
-
context=context,
|
|
274
|
+
for task in step.tasks:
|
|
275
|
+
agent = self.agents.get(task.agent)
|
|
276
|
+
if not agent:
|
|
277
|
+
# Instead of failing the entire step, track this as an error task
|
|
278
|
+
self.logger.error(
|
|
279
|
+
f"No agent found matching '{task.agent}'. Available agents: {list(self.agents.keys())}"
|
|
292
280
|
)
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
if hasattr(agent, "config")
|
|
298
|
-
else params
|
|
299
|
-
)
|
|
300
|
-
futures.append(
|
|
301
|
-
llm.generate_str(
|
|
302
|
-
message=task_description, request_params=task_params
|
|
281
|
+
error_tasks.append(
|
|
282
|
+
(
|
|
283
|
+
task,
|
|
284
|
+
f"Error: Agent '{task.agent}' not found. Available agents: {', '.join(self.agents.keys())}",
|
|
303
285
|
)
|
|
304
286
|
)
|
|
287
|
+
continue
|
|
288
|
+
|
|
289
|
+
task_description = TASK_PROMPT_TEMPLATE.format(
|
|
290
|
+
objective=previous_result.objective,
|
|
291
|
+
task=task.description,
|
|
292
|
+
context=context,
|
|
293
|
+
)
|
|
305
294
|
|
|
306
|
-
#
|
|
307
|
-
|
|
295
|
+
# All agents should now be LLM-capable
|
|
296
|
+
futures.append(agent._llm.generate_str(message=task_description))
|
|
308
297
|
|
|
309
|
-
|
|
310
|
-
|
|
298
|
+
# Wait for all tasks (only if we have valid futures)
|
|
299
|
+
results = await self.executor.execute(*futures) if futures else []
|
|
300
|
+
|
|
301
|
+
# Process successful results
|
|
302
|
+
task_index = 0
|
|
303
|
+
for task in step.tasks:
|
|
304
|
+
# Skip tasks that had agent errors (they're in error_tasks)
|
|
305
|
+
if any(et[0] == task for et in error_tasks):
|
|
306
|
+
continue
|
|
307
|
+
|
|
308
|
+
if task_index < len(results):
|
|
309
|
+
result = results[task_index]
|
|
311
310
|
step_result.add_task_result(
|
|
312
311
|
TaskWithResult(**task.model_dump(), result=str(result))
|
|
313
312
|
)
|
|
313
|
+
task_index += 1
|
|
314
314
|
|
|
315
|
-
|
|
316
|
-
|
|
315
|
+
# Add error task results
|
|
316
|
+
for task, error_message in error_tasks:
|
|
317
|
+
step_result.add_task_result(
|
|
318
|
+
TaskWithResult(**task.model_dump(), result=error_message)
|
|
319
|
+
)
|
|
317
320
|
|
|
321
|
+
step_result.result = format_step_result(step_result)
|
|
318
322
|
return step_result
|
|
319
323
|
|
|
320
324
|
async def _get_full_plan(
|
|
@@ -340,6 +344,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
340
344
|
agents=agents,
|
|
341
345
|
)
|
|
342
346
|
|
|
347
|
+
# Use planner directly - no verb manipulation needed
|
|
343
348
|
plan = await self.planner.generate_structured(
|
|
344
349
|
message=prompt,
|
|
345
350
|
response_model=Plan,
|
|
@@ -371,6 +376,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
371
376
|
agents=agents,
|
|
372
377
|
)
|
|
373
378
|
|
|
379
|
+
# Use planner directly - no verb manipulation needed
|
|
374
380
|
next_step = await self.planner.generate_structured(
|
|
375
381
|
message=prompt,
|
|
376
382
|
response_model=NextStep,
|
|
@@ -30,12 +30,15 @@ and Agents (which are collections of servers):
|
|
|
30
30
|
Agents:
|
|
31
31
|
{agents}
|
|
32
32
|
|
|
33
|
+
IMPORTANT: You can ONLY use the agents listed above. Do not invent or reference agents that are not in the list.
|
|
34
|
+
The plan will fail if you reference agents that are not available.
|
|
35
|
+
|
|
33
36
|
Generate a plan with all remaining steps needed.
|
|
34
37
|
Steps are sequential, but each Step can have parallel subtasks.
|
|
35
38
|
For each Step, specify a description of the step and independent subtasks that can run in parallel.
|
|
36
39
|
For each subtask specify:
|
|
37
40
|
1. Clear description of the task that an LLM can execute
|
|
38
|
-
2. Name of 1 Agent
|
|
41
|
+
2. Name of 1 Agent from the available agents list above
|
|
39
42
|
|
|
40
43
|
Return your response in the following JSON structure:
|
|
41
44
|
{{
|
|
@@ -45,11 +48,11 @@ Return your response in the following JSON structure:
|
|
|
45
48
|
"tasks": [
|
|
46
49
|
{{
|
|
47
50
|
"description": "Description of task 1",
|
|
48
|
-
"agent": "agent_name" # For AgentTask
|
|
51
|
+
"agent": "agent_name" # For AgentTask - MUST be one of the available agents
|
|
49
52
|
}},
|
|
50
53
|
{{
|
|
51
54
|
"description": "Description of task 2",
|
|
52
|
-
"agent": "agent_name2"
|
|
55
|
+
"agent": "agent_name2" # MUST be one of the available agents
|
|
53
56
|
}}
|
|
54
57
|
]
|
|
55
58
|
}}
|
|
@@ -79,19 +82,21 @@ and Agents (which are collections of servers):
|
|
|
79
82
|
Agents:
|
|
80
83
|
{agents}
|
|
81
84
|
|
|
85
|
+
IMPORTANT: You can ONLY use the agents listed above. Do not invent or reference agents that are not in the list.
|
|
86
|
+
The plan will fail if you reference agents that are not available.
|
|
87
|
+
|
|
82
88
|
Generate the next step, by specifying a description of the step and independent subtasks that can run in parallel:
|
|
83
89
|
For each subtask specify:
|
|
84
90
|
1. Clear description of the task that an LLM can execute
|
|
85
|
-
2. Name of 1 Agent
|
|
91
|
+
2. Name of 1 Agent from the available agents list above
|
|
86
92
|
|
|
87
93
|
Return your response in the following JSON structure:
|
|
88
94
|
{{
|
|
89
|
-
|
|
90
95
|
"description": "Description of step 1",
|
|
91
96
|
"tasks": [
|
|
92
97
|
{{
|
|
93
98
|
"description": "Description of task 1",
|
|
94
|
-
"agent": "agent_name" # For AgentTask
|
|
99
|
+
"agent": "agent_name" # For AgentTask - MUST be one of the available agents
|
|
95
100
|
}}
|
|
96
101
|
],
|
|
97
102
|
"is_complete": false
|
|
@@ -6,6 +6,7 @@ from mcp_agent.agents.agent import Agent
|
|
|
6
6
|
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams
|
|
7
7
|
from mcp_agent.workflows.router.router_base import ResultT, Router, RouterResult
|
|
8
8
|
from mcp_agent.logging.logger import get_logger
|
|
9
|
+
from mcp_agent.event_progress import ProgressAction
|
|
9
10
|
|
|
10
11
|
if TYPE_CHECKING:
|
|
11
12
|
from mcp_agent.context import Context
|
|
@@ -100,6 +101,9 @@ class LLMRouter(Router):
|
|
|
100
101
|
default_request_params: Optional[RequestParams] = None,
|
|
101
102
|
**kwargs,
|
|
102
103
|
):
|
|
104
|
+
# Extract verb from kwargs to avoid passing it up the inheritance chain
|
|
105
|
+
self._llm_verb = kwargs.pop("verb", None)
|
|
106
|
+
|
|
103
107
|
super().__init__(
|
|
104
108
|
server_names=server_names,
|
|
105
109
|
agents=agents,
|
|
@@ -161,10 +165,18 @@ class LLMRouter(Router):
|
|
|
161
165
|
router_params = RequestParams(**params_dict)
|
|
162
166
|
# Set up router-specific request params with routing instruction
|
|
163
167
|
router_params.use_history = False
|
|
168
|
+
# Use the stored verb if available, otherwise default to ROUTING
|
|
169
|
+
verb_param = (
|
|
170
|
+
self._llm_verb
|
|
171
|
+
if hasattr(self, "_llm_verb") and self._llm_verb
|
|
172
|
+
else ProgressAction.ROUTING
|
|
173
|
+
)
|
|
174
|
+
|
|
164
175
|
self.llm = self.llm_factory(
|
|
165
176
|
agent=None, # Router doesn't need an agent context
|
|
166
|
-
name=
|
|
177
|
+
name=self.name, # Use the name provided during initialization
|
|
167
178
|
default_request_params=router_params,
|
|
179
|
+
verb=verb_param, # Use stored verb parameter or default to ROUTING
|
|
168
180
|
)
|
|
169
181
|
self.initialized = True
|
|
170
182
|
|
|
@@ -243,7 +255,6 @@ class LLMRouter(Router):
|
|
|
243
255
|
context=context, request=request, top_k=top_k
|
|
244
256
|
)
|
|
245
257
|
|
|
246
|
-
# Get routes from LLM
|
|
247
258
|
response = await self.llm.generate_structured(
|
|
248
259
|
message=prompt,
|
|
249
260
|
response_model=StructuredResponse,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|