fast-agent-mcp 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
- fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
- fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
- mcp_agent/__init__.py +0 -0
- mcp_agent/agents/__init__.py +0 -0
- mcp_agent/agents/agent.py +277 -0
- mcp_agent/app.py +303 -0
- mcp_agent/cli/__init__.py +0 -0
- mcp_agent/cli/__main__.py +4 -0
- mcp_agent/cli/commands/bootstrap.py +221 -0
- mcp_agent/cli/commands/config.py +11 -0
- mcp_agent/cli/commands/setup.py +229 -0
- mcp_agent/cli/main.py +68 -0
- mcp_agent/cli/terminal.py +24 -0
- mcp_agent/config.py +334 -0
- mcp_agent/console.py +28 -0
- mcp_agent/context.py +251 -0
- mcp_agent/context_dependent.py +48 -0
- mcp_agent/core/fastagent.py +1013 -0
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/event_progress.py +88 -0
- mcp_agent/executor/__init__.py +0 -0
- mcp_agent/executor/decorator_registry.py +120 -0
- mcp_agent/executor/executor.py +293 -0
- mcp_agent/executor/task_registry.py +34 -0
- mcp_agent/executor/temporal.py +405 -0
- mcp_agent/executor/workflow.py +197 -0
- mcp_agent/executor/workflow_signal.py +325 -0
- mcp_agent/human_input/__init__.py +0 -0
- mcp_agent/human_input/handler.py +49 -0
- mcp_agent/human_input/types.py +58 -0
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/logging/events.py +123 -0
- mcp_agent/logging/json_serializer.py +163 -0
- mcp_agent/logging/listeners.py +216 -0
- mcp_agent/logging/logger.py +365 -0
- mcp_agent/logging/rich_progress.py +120 -0
- mcp_agent/logging/tracing.py +140 -0
- mcp_agent/logging/transport.py +461 -0
- mcp_agent/mcp/__init__.py +0 -0
- mcp_agent/mcp/gen_client.py +85 -0
- mcp_agent/mcp/mcp_activity.py +18 -0
- mcp_agent/mcp/mcp_agent_client_session.py +242 -0
- mcp_agent/mcp/mcp_agent_server.py +56 -0
- mcp_agent/mcp/mcp_aggregator.py +394 -0
- mcp_agent/mcp/mcp_connection_manager.py +330 -0
- mcp_agent/mcp/stdio.py +104 -0
- mcp_agent/mcp_server_registry.py +275 -0
- mcp_agent/progress_display.py +10 -0
- mcp_agent/resources/examples/decorator/main.py +26 -0
- mcp_agent/resources/examples/decorator/optimizer.py +78 -0
- mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
- mcp_agent/resources/examples/decorator/parallel.py +81 -0
- mcp_agent/resources/examples/decorator/router.py +56 -0
- mcp_agent/resources/examples/decorator/tiny.py +22 -0
- mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
- mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +18 -0
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +61 -0
- mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
- mcp_agent/workflows/embedding/embedding_openai.py +46 -0
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +645 -0
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
- mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
- mcp_agent/workflows/llm/llm_selector.py +345 -0
- mcp_agent/workflows/llm/model_factory.py +175 -0
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
- mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +350 -0
- mcp_agent/workflows/parallel/fan_out.py +187 -0
- mcp_agent/workflows/parallel/parallel_llm.py +141 -0
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +276 -0
- mcp_agent/workflows/router/router_embedding.py +240 -0
- mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
- mcp_agent/workflows/router/router_embedding_openai.py +59 -0
- mcp_agent/workflows/router/router_llm.py +301 -0
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +320 -0
- mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
- mcp_agent/workflows/swarm/swarm_openai.py +41 -0
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Orchestrator implementation for MCP Agent applications.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import contextlib
|
|
6
|
+
from typing import (
|
|
7
|
+
Callable,
|
|
8
|
+
List,
|
|
9
|
+
Literal,
|
|
10
|
+
Optional,
|
|
11
|
+
Type,
|
|
12
|
+
TYPE_CHECKING,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from mcp_agent.agents.agent import Agent, AgentConfig
|
|
16
|
+
from mcp_agent.workflows.llm.augmented_llm import (
|
|
17
|
+
AugmentedLLM,
|
|
18
|
+
MessageParamT,
|
|
19
|
+
MessageT,
|
|
20
|
+
ModelT,
|
|
21
|
+
RequestParams,
|
|
22
|
+
)
|
|
23
|
+
from mcp_agent.workflows.llm.model_factory import ModelFactory
|
|
24
|
+
from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
|
25
|
+
format_plan_result,
|
|
26
|
+
format_step_result,
|
|
27
|
+
NextStep,
|
|
28
|
+
Plan,
|
|
29
|
+
PlanResult,
|
|
30
|
+
Step,
|
|
31
|
+
StepResult,
|
|
32
|
+
TaskWithResult,
|
|
33
|
+
)
|
|
34
|
+
from mcp_agent.workflows.orchestrator.orchestrator_prompts import (
|
|
35
|
+
FULL_PLAN_PROMPT_TEMPLATE,
|
|
36
|
+
ITERATIVE_PLAN_PROMPT_TEMPLATE,
|
|
37
|
+
SYNTHESIZE_PLAN_PROMPT_TEMPLATE,
|
|
38
|
+
TASK_PROMPT_TEMPLATE,
|
|
39
|
+
)
|
|
40
|
+
from mcp_agent.logging.logger import get_logger
|
|
41
|
+
|
|
42
|
+
if TYPE_CHECKING:
|
|
43
|
+
from mcp_agent.context import Context
|
|
44
|
+
|
|
45
|
+
logger = get_logger(__name__)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
49
|
+
"""
|
|
50
|
+
In the orchestrator-workers workflow, a central LLM dynamically breaks down tasks,
|
|
51
|
+
delegates them to worker LLMs, and synthesizes their results. It does this
|
|
52
|
+
in a loop until the task is complete.
|
|
53
|
+
|
|
54
|
+
When to use this workflow:
|
|
55
|
+
- This workflow is well-suited for complex tasks where you can't predict the
|
|
56
|
+
subtasks needed (in coding, for example, the number of files that need to be
|
|
57
|
+
changed and the nature of the change in each file likely depend on the task).
|
|
58
|
+
|
|
59
|
+
Example where orchestrator-workers is useful:
|
|
60
|
+
- Coding products that make complex changes to multiple files each time.
|
|
61
|
+
- Search tasks that involve gathering and analyzing information from multiple sources
|
|
62
|
+
for possible relevant information.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(
|
|
66
|
+
self,
|
|
67
|
+
llm_factory: Callable[[Agent], AugmentedLLM[MessageParamT, MessageT]],
|
|
68
|
+
planner: AugmentedLLM | None = None,
|
|
69
|
+
available_agents: List[Agent | AugmentedLLM] | None = None,
|
|
70
|
+
plan_type: Literal["full", "iterative"] = "full",
|
|
71
|
+
context: Optional["Context"] = None,
|
|
72
|
+
**kwargs,
|
|
73
|
+
):
|
|
74
|
+
"""
|
|
75
|
+
Args:
|
|
76
|
+
llm_factory: Factory function to create an LLM for a given agent
|
|
77
|
+
planner: LLM to use for planning steps (if not provided, a default planner will be used)
|
|
78
|
+
plan_type: "full" planning generates the full plan first, then executes. "iterative" plans the next step, and loops until success.
|
|
79
|
+
available_agents: List of agents available to tasks executed by this orchestrator
|
|
80
|
+
context: Application context
|
|
81
|
+
"""
|
|
82
|
+
# Initialize with orchestrator-specific defaults
|
|
83
|
+
orchestrator_params = RequestParams(
|
|
84
|
+
use_history=False, # Orchestrator doesn't support history
|
|
85
|
+
max_iterations=30, # Higher default for complex tasks
|
|
86
|
+
maxTokens=8192, # Higher default for planning TODO this will break some models - make configurable.
|
|
87
|
+
parallel_tool_calls=True,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# If kwargs contains request_params, merge with our defaults but force use_history False
|
|
91
|
+
if "request_params" in kwargs:
|
|
92
|
+
base_params = kwargs["request_params"]
|
|
93
|
+
merged = base_params.model_copy()
|
|
94
|
+
merged.use_history = False # Force this setting
|
|
95
|
+
kwargs["request_params"] = merged
|
|
96
|
+
else:
|
|
97
|
+
kwargs["request_params"] = orchestrator_params
|
|
98
|
+
|
|
99
|
+
super().__init__(context=context, **kwargs)
|
|
100
|
+
|
|
101
|
+
self.llm_factory = llm_factory
|
|
102
|
+
|
|
103
|
+
# Create default planner with AgentConfig
|
|
104
|
+
request_params = self.get_request_params(kwargs.get("request_params"))
|
|
105
|
+
planner_config = AgentConfig(
|
|
106
|
+
name="LLM Orchestrator",
|
|
107
|
+
instruction="""
|
|
108
|
+
You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
|
|
109
|
+
or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
|
|
110
|
+
which can be performed by LLMs with access to the servers or agents.
|
|
111
|
+
""",
|
|
112
|
+
servers=[], # Planner doesn't need direct server access
|
|
113
|
+
default_request_params=request_params,
|
|
114
|
+
model=request_params.model if request_params else None,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
self.planner = planner or llm_factory(agent=Agent(config=planner_config))
|
|
118
|
+
|
|
119
|
+
self.plan_type: Literal["full", "iterative"] = plan_type
|
|
120
|
+
self.server_registry = self.context.server_registry
|
|
121
|
+
self.agents = {agent.name: agent for agent in available_agents or []}
|
|
122
|
+
|
|
123
|
+
async def generate(
|
|
124
|
+
self,
|
|
125
|
+
message: str | MessageParamT | List[MessageParamT],
|
|
126
|
+
request_params: RequestParams | None = None,
|
|
127
|
+
) -> List[MessageT]:
|
|
128
|
+
"""Request an LLM generation, which may run multiple iterations, and return the result"""
|
|
129
|
+
params = self.get_request_params(request_params)
|
|
130
|
+
objective = str(message)
|
|
131
|
+
plan_result = await self.execute(objective=objective, request_params=params)
|
|
132
|
+
|
|
133
|
+
return [plan_result.result]
|
|
134
|
+
|
|
135
|
+
async def generate_str(
|
|
136
|
+
self,
|
|
137
|
+
message: str | MessageParamT | List[MessageParamT],
|
|
138
|
+
request_params: RequestParams | None = None,
|
|
139
|
+
) -> str:
|
|
140
|
+
"""Request an LLM generation and return the string representation of the result"""
|
|
141
|
+
params = self.get_request_params(request_params)
|
|
142
|
+
|
|
143
|
+
result = await self.generate(
|
|
144
|
+
message=message,
|
|
145
|
+
request_params=params,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
return str(result[0])
|
|
149
|
+
|
|
150
|
+
async def generate_structured(
|
|
151
|
+
self,
|
|
152
|
+
message: str | MessageParamT | List[MessageParamT],
|
|
153
|
+
response_model: Type[ModelT],
|
|
154
|
+
request_params: RequestParams | None = None,
|
|
155
|
+
) -> ModelT:
|
|
156
|
+
"""Request a structured LLM generation and return the result as a Pydantic model."""
|
|
157
|
+
params = self.get_request_params(request_params)
|
|
158
|
+
|
|
159
|
+
result_str = await self.generate_str(message=message, request_params=params)
|
|
160
|
+
|
|
161
|
+
structured_config = AgentConfig(
|
|
162
|
+
name="Structured Output",
|
|
163
|
+
instruction="Produce a structured output given a message",
|
|
164
|
+
servers=[], # No server access needed for structured output
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
llm = self.llm_factory(agent=Agent(config=structured_config))
|
|
168
|
+
|
|
169
|
+
structured_result = await llm.generate_structured(
|
|
170
|
+
message=result_str,
|
|
171
|
+
response_model=response_model,
|
|
172
|
+
request_params=params,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
return structured_result
|
|
176
|
+
|
|
177
|
+
async def execute(
|
|
178
|
+
self, objective: str, request_params: RequestParams | None = None
|
|
179
|
+
) -> PlanResult:
|
|
180
|
+
"""Execute task with result chaining between steps"""
|
|
181
|
+
iterations = 0
|
|
182
|
+
|
|
183
|
+
params = self.get_request_params(request_params)
|
|
184
|
+
|
|
185
|
+
plan_result = PlanResult(objective=objective, step_results=[])
|
|
186
|
+
|
|
187
|
+
while iterations < params.max_iterations:
|
|
188
|
+
if self.plan_type == "iterative":
|
|
189
|
+
# Get next plan/step
|
|
190
|
+
next_step = await self._get_next_step(
|
|
191
|
+
objective=objective, plan_result=plan_result, request_params=params
|
|
192
|
+
)
|
|
193
|
+
logger.debug(f"Iteration {iterations}: Iterative plan:", data=next_step)
|
|
194
|
+
plan = Plan(steps=[next_step], is_complete=next_step.is_complete)
|
|
195
|
+
elif self.plan_type == "full":
|
|
196
|
+
plan = await self._get_full_plan(
|
|
197
|
+
objective=objective, plan_result=plan_result, request_params=params
|
|
198
|
+
)
|
|
199
|
+
logger.debug(f"Iteration {iterations}: Full Plan:", data=plan)
|
|
200
|
+
else:
|
|
201
|
+
raise ValueError(f"Invalid plan type {self.plan_type}")
|
|
202
|
+
|
|
203
|
+
plan_result.plan = plan
|
|
204
|
+
|
|
205
|
+
if plan.is_complete:
|
|
206
|
+
# Only mark as complete if we have actually executed some steps
|
|
207
|
+
if len(plan_result.step_results) > 0:
|
|
208
|
+
plan_result.is_complete = True
|
|
209
|
+
|
|
210
|
+
# Synthesize final result into a single message
|
|
211
|
+
synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(
|
|
212
|
+
plan_result=format_plan_result(plan_result)
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
plan_result.result = await self.planner.generate_str(
|
|
216
|
+
message=synthesis_prompt,
|
|
217
|
+
request_params=params.model_copy(update={"max_iterations": 1}),
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
return plan_result
|
|
221
|
+
else:
|
|
222
|
+
# Don't allow completion without executing steps
|
|
223
|
+
plan.is_complete = False
|
|
224
|
+
|
|
225
|
+
# Execute each step, collecting results
|
|
226
|
+
# Note that in iterative mode this will only be a single step
|
|
227
|
+
for step in plan.steps:
|
|
228
|
+
step_result = await self._execute_step(
|
|
229
|
+
step=step,
|
|
230
|
+
previous_result=plan_result,
|
|
231
|
+
request_params=params,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
plan_result.add_step_result(step_result)
|
|
235
|
+
|
|
236
|
+
plan_result.add_step_result(step_result)
|
|
237
|
+
|
|
238
|
+
logger.debug(
|
|
239
|
+
f"Iteration {iterations}: Intermediate plan result:", data=plan_result
|
|
240
|
+
)
|
|
241
|
+
iterations += 1
|
|
242
|
+
|
|
243
|
+
raise RuntimeError(
|
|
244
|
+
f"Task failed to complete in {params.max_iterations} iterations"
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
async def _execute_step(
|
|
248
|
+
self,
|
|
249
|
+
step: Step,
|
|
250
|
+
previous_result: PlanResult,
|
|
251
|
+
request_params: RequestParams | None = None,
|
|
252
|
+
) -> StepResult:
|
|
253
|
+
"""Execute a step's subtasks in parallel and synthesize results"""
|
|
254
|
+
params = self.get_request_params(request_params)
|
|
255
|
+
|
|
256
|
+
step_result = StepResult(step=step, task_results=[])
|
|
257
|
+
context = format_plan_result(previous_result)
|
|
258
|
+
|
|
259
|
+
# Prepare tasks and LLMs
|
|
260
|
+
task_llms = []
|
|
261
|
+
async with contextlib.AsyncExitStack() as stack:
|
|
262
|
+
for task in step.tasks:
|
|
263
|
+
agent = self.agents.get(task.agent)
|
|
264
|
+
if not agent:
|
|
265
|
+
raise ValueError(f"No agent found matching {task.agent}")
|
|
266
|
+
|
|
267
|
+
if isinstance(agent, AugmentedLLM):
|
|
268
|
+
llm = agent
|
|
269
|
+
else:
|
|
270
|
+
# Use existing LLM if agent has one
|
|
271
|
+
if hasattr(agent, "_llm") and agent._llm:
|
|
272
|
+
llm = agent._llm
|
|
273
|
+
else:
|
|
274
|
+
# Only create new context if needed
|
|
275
|
+
ctx_agent = await stack.enter_async_context(agent)
|
|
276
|
+
# Create factory with agent's own configuration
|
|
277
|
+
agent_factory = ModelFactory.create_factory(
|
|
278
|
+
model_string=agent.config.model,
|
|
279
|
+
request_params=agent.config.default_request_params,
|
|
280
|
+
)
|
|
281
|
+
llm = await ctx_agent.attach_llm(agent_factory)
|
|
282
|
+
|
|
283
|
+
task_llms.append((task, llm))
|
|
284
|
+
|
|
285
|
+
# Execute all tasks within the same context
|
|
286
|
+
futures = []
|
|
287
|
+
for task, llm in task_llms:
|
|
288
|
+
task_description = TASK_PROMPT_TEMPLATE.format(
|
|
289
|
+
objective=previous_result.objective,
|
|
290
|
+
task=task.description,
|
|
291
|
+
context=context,
|
|
292
|
+
)
|
|
293
|
+
# Get the agent's config for task execution
|
|
294
|
+
agent = self.agents.get(task.agent)
|
|
295
|
+
task_params = (
|
|
296
|
+
agent.config.default_request_params
|
|
297
|
+
if hasattr(agent, "config")
|
|
298
|
+
else params
|
|
299
|
+
)
|
|
300
|
+
futures.append(
|
|
301
|
+
llm.generate_str(
|
|
302
|
+
message=task_description, request_params=task_params
|
|
303
|
+
)
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
# Wait for all tasks, including any tool calls they make
|
|
307
|
+
results = await self.executor.execute(*futures)
|
|
308
|
+
|
|
309
|
+
# Process results while contexts are still active
|
|
310
|
+
for (task, _), result in zip(task_llms, results):
|
|
311
|
+
step_result.add_task_result(
|
|
312
|
+
TaskWithResult(**task.model_dump(), result=str(result))
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
# Format final result while contexts are still active
|
|
316
|
+
step_result.result = format_step_result(step_result)
|
|
317
|
+
|
|
318
|
+
return step_result
|
|
319
|
+
|
|
320
|
+
async def _get_full_plan(
|
|
321
|
+
self,
|
|
322
|
+
objective: str,
|
|
323
|
+
plan_result: PlanResult,
|
|
324
|
+
request_params: RequestParams | None = None,
|
|
325
|
+
) -> Plan:
|
|
326
|
+
"""Generate full plan considering previous results"""
|
|
327
|
+
params = self.get_request_params(request_params)
|
|
328
|
+
params = params.model_copy(update={"use_history": False})
|
|
329
|
+
|
|
330
|
+
agents = "\n".join(
|
|
331
|
+
[
|
|
332
|
+
f"{idx}. {self._format_agent_info(agent)}"
|
|
333
|
+
for idx, agent in enumerate(self.agents, 1)
|
|
334
|
+
]
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
prompt = FULL_PLAN_PROMPT_TEMPLATE.format(
|
|
338
|
+
objective=objective,
|
|
339
|
+
plan_result=format_plan_result(plan_result),
|
|
340
|
+
agents=agents,
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
plan = await self.planner.generate_structured(
|
|
344
|
+
message=prompt,
|
|
345
|
+
response_model=Plan,
|
|
346
|
+
request_params=params,
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
return plan
|
|
350
|
+
|
|
351
|
+
async def _get_next_step(
|
|
352
|
+
self,
|
|
353
|
+
objective: str,
|
|
354
|
+
plan_result: PlanResult,
|
|
355
|
+
request_params: RequestParams | None = None,
|
|
356
|
+
) -> NextStep:
|
|
357
|
+
"""Generate just the next needed step"""
|
|
358
|
+
params = self.get_request_params(request_params)
|
|
359
|
+
params = params.model_copy(update={"use_history": False})
|
|
360
|
+
|
|
361
|
+
agents = "\n".join(
|
|
362
|
+
[
|
|
363
|
+
f"{idx}. {self._format_agent_info(agent)}"
|
|
364
|
+
for idx, agent in enumerate(self.agents, 1)
|
|
365
|
+
]
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
prompt = ITERATIVE_PLAN_PROMPT_TEMPLATE.format(
|
|
369
|
+
objective=objective,
|
|
370
|
+
plan_result=format_plan_result(plan_result),
|
|
371
|
+
agents=agents,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
next_step = await self.planner.generate_structured(
|
|
375
|
+
message=prompt,
|
|
376
|
+
response_model=NextStep,
|
|
377
|
+
request_params=params,
|
|
378
|
+
)
|
|
379
|
+
return next_step
|
|
380
|
+
|
|
381
|
+
def _format_server_info(self, server_name: str) -> str:
|
|
382
|
+
"""Format server information for display to planners"""
|
|
383
|
+
server_config = self.server_registry.get_server_config(server_name)
|
|
384
|
+
server_str = f"Server Name: {server_name}"
|
|
385
|
+
if not server_config:
|
|
386
|
+
return server_str
|
|
387
|
+
|
|
388
|
+
description = server_config.description
|
|
389
|
+
if description:
|
|
390
|
+
server_str = f"{server_str}\nDescription: {description}"
|
|
391
|
+
|
|
392
|
+
return server_str
|
|
393
|
+
|
|
394
|
+
def _format_agent_info(self, agent_name: str) -> str:
|
|
395
|
+
"""Format Agent information for display to planners"""
|
|
396
|
+
agent = self.agents.get(agent_name)
|
|
397
|
+
if not agent:
|
|
398
|
+
return ""
|
|
399
|
+
|
|
400
|
+
servers = "\n".join(
|
|
401
|
+
[
|
|
402
|
+
f"- {self._format_server_info(server_name)}"
|
|
403
|
+
for server_name in agent.server_names
|
|
404
|
+
]
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
return f"Agent Name: {agent.name}\nDescription: {agent.instruction}\nServers in Agent: {servers}"
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
4
|
+
|
|
5
|
+
from mcp_agent.workflows.orchestrator.orchestrator_prompts import (
|
|
6
|
+
PLAN_RESULT_TEMPLATE,
|
|
7
|
+
STEP_RESULT_TEMPLATE,
|
|
8
|
+
TASK_RESULT_TEMPLATE,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Task(BaseModel):
|
|
13
|
+
"""An individual task that needs to be executed"""
|
|
14
|
+
|
|
15
|
+
description: str = Field(description="Description of the task")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ServerTask(Task):
|
|
19
|
+
"""An individual task that can be accomplished by one or more MCP servers"""
|
|
20
|
+
|
|
21
|
+
servers: List[str] = Field(
|
|
22
|
+
description="Names of MCP servers that the LLM has access to for this task",
|
|
23
|
+
default_factory=list,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AgentTask(Task):
|
|
28
|
+
"""An individual task that can be accomplished by an Agent."""
|
|
29
|
+
|
|
30
|
+
agent: str = Field(
|
|
31
|
+
description="Name of Agent from given list of agents that the LLM has access to for this task",
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class Step(BaseModel):
|
|
36
|
+
"""A step containing independent tasks that can be executed in parallel"""
|
|
37
|
+
|
|
38
|
+
description: str = Field(description="Description of the step")
|
|
39
|
+
|
|
40
|
+
tasks: List[AgentTask] = Field(
|
|
41
|
+
description="Subtasks that can be executed in parallel",
|
|
42
|
+
default_factory=list,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class Plan(BaseModel):
|
|
47
|
+
"""Plan generated by the orchestrator planner."""
|
|
48
|
+
|
|
49
|
+
steps: List[Step] = Field(
|
|
50
|
+
description="List of steps to execute sequentially",
|
|
51
|
+
default_factory=list,
|
|
52
|
+
)
|
|
53
|
+
is_complete: bool = Field(
|
|
54
|
+
description="Whether the overall plan objective is complete"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class TaskWithResult(Task):
|
|
59
|
+
"""An individual task with its result"""
|
|
60
|
+
|
|
61
|
+
result: str = Field(
|
|
62
|
+
description="Result of executing the task", default="Task completed"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class StepResult(BaseModel):
|
|
69
|
+
"""Result of executing a step"""
|
|
70
|
+
|
|
71
|
+
step: Step = Field(description="The step that was executed", default_factory=Step)
|
|
72
|
+
task_results: List[TaskWithResult] = Field(
|
|
73
|
+
description="Results of executing each task", default_factory=list
|
|
74
|
+
)
|
|
75
|
+
result: str = Field(
|
|
76
|
+
description="Result of executing the step", default="Step completed"
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def add_task_result(self, task_result: TaskWithResult):
|
|
80
|
+
"""Add a task result to this step"""
|
|
81
|
+
if not isinstance(self.task_results, list):
|
|
82
|
+
self.task_results = []
|
|
83
|
+
self.task_results.append(task_result)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class PlanResult(BaseModel):
|
|
87
|
+
"""Results of executing a plan"""
|
|
88
|
+
|
|
89
|
+
objective: str
|
|
90
|
+
"""Objective of the plan"""
|
|
91
|
+
|
|
92
|
+
plan: Plan | None = None
|
|
93
|
+
"""The plan that was executed"""
|
|
94
|
+
|
|
95
|
+
step_results: List[StepResult]
|
|
96
|
+
"""Results of executing each step"""
|
|
97
|
+
|
|
98
|
+
is_complete: bool = False
|
|
99
|
+
"""Whether the overall plan objective is complete"""
|
|
100
|
+
|
|
101
|
+
result: str | None = None
|
|
102
|
+
"""Result of executing the plan"""
|
|
103
|
+
|
|
104
|
+
def add_step_result(self, step_result: StepResult):
|
|
105
|
+
"""Add a step result to this plan"""
|
|
106
|
+
if not isinstance(self.step_results, list):
|
|
107
|
+
self.step_results = []
|
|
108
|
+
self.step_results.append(step_result)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class NextStep(Step):
|
|
112
|
+
"""Single next step in iterative planning"""
|
|
113
|
+
|
|
114
|
+
is_complete: bool = Field(
|
|
115
|
+
description="Whether the overall plan objective is complete"
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def format_task_result(task_result: TaskWithResult) -> str:
|
|
120
|
+
"""Format a task result for display to planners"""
|
|
121
|
+
return TASK_RESULT_TEMPLATE.format(
|
|
122
|
+
task_description=task_result.description, task_result=task_result.result
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def format_step_result(step_result: StepResult) -> str:
|
|
127
|
+
"""Format a step result for display to planners"""
|
|
128
|
+
tasks_str = "\n".join(
|
|
129
|
+
f" - {format_task_result(task)}" for task in step_result.task_results
|
|
130
|
+
)
|
|
131
|
+
return STEP_RESULT_TEMPLATE.format(
|
|
132
|
+
step_description=step_result.step.description,
|
|
133
|
+
step_result=step_result.result,
|
|
134
|
+
tasks_str=tasks_str,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def format_plan_result(plan_result: PlanResult) -> str:
|
|
139
|
+
"""Format the full plan execution state for display to planners"""
|
|
140
|
+
steps_str = (
|
|
141
|
+
"\n\n".join(
|
|
142
|
+
f"{i + 1}:\n{format_step_result(step)}"
|
|
143
|
+
for i, step in enumerate(plan_result.step_results)
|
|
144
|
+
)
|
|
145
|
+
if plan_result.step_results
|
|
146
|
+
else "No steps executed yet"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
return PLAN_RESULT_TEMPLATE.format(
|
|
150
|
+
plan_objective=plan_result.objective,
|
|
151
|
+
steps_str=steps_str,
|
|
152
|
+
plan_status="Complete" if plan_result.is_complete else "In Progress",
|
|
153
|
+
plan_result=plan_result.result if plan_result.is_complete else "In Progress",
|
|
154
|
+
)
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
TASK_RESULT_TEMPLATE = """Task: {task_description}
|
|
2
|
+
Result: {task_result}"""
|
|
3
|
+
|
|
4
|
+
STEP_RESULT_TEMPLATE = """Step: {step_description}
|
|
5
|
+
Step Subtasks:
|
|
6
|
+
{tasks_str}"""
|
|
7
|
+
|
|
8
|
+
PLAN_RESULT_TEMPLATE = """Plan Objective: {plan_objective}
|
|
9
|
+
|
|
10
|
+
Progress So Far (steps completed):
|
|
11
|
+
{steps_str}
|
|
12
|
+
|
|
13
|
+
Plan Current Status: {plan_status}
|
|
14
|
+
Plan Current Result: {plan_result}"""
|
|
15
|
+
|
|
16
|
+
FULL_PLAN_PROMPT_TEMPLATE = """You are tasked with orchestrating a plan to complete an objective.
|
|
17
|
+
You can analyze results from the previous steps already executed to decide if the objective is complete.
|
|
18
|
+
Your plan must be structured in sequential steps, with each step containing independent parallel subtasks.
|
|
19
|
+
|
|
20
|
+
Objective: {objective}
|
|
21
|
+
|
|
22
|
+
{plan_result}
|
|
23
|
+
|
|
24
|
+
If the previous results achieve the objective, return is_complete=True.
|
|
25
|
+
Otherwise, generate remaining steps needed.
|
|
26
|
+
|
|
27
|
+
You have access to the following MCP Servers (which are collections of tools/functions),
|
|
28
|
+
and Agents (which are collections of servers):
|
|
29
|
+
|
|
30
|
+
Agents:
|
|
31
|
+
{agents}
|
|
32
|
+
|
|
33
|
+
Generate a plan with all remaining steps needed.
|
|
34
|
+
Steps are sequential, but each Step can have parallel subtasks.
|
|
35
|
+
For each Step, specify a description of the step and independent subtasks that can run in parallel.
|
|
36
|
+
For each subtask specify:
|
|
37
|
+
1. Clear description of the task that an LLM can execute
|
|
38
|
+
2. Name of 1 Agent OR List of MCP server names to use for the task
|
|
39
|
+
|
|
40
|
+
Return your response in the following JSON structure:
|
|
41
|
+
{{
|
|
42
|
+
"steps": [
|
|
43
|
+
{{
|
|
44
|
+
"description": "Description of step 1",
|
|
45
|
+
"tasks": [
|
|
46
|
+
{{
|
|
47
|
+
"description": "Description of task 1",
|
|
48
|
+
"agent": "agent_name" # For AgentTask
|
|
49
|
+
}},
|
|
50
|
+
{{
|
|
51
|
+
"description": "Description of task 2",
|
|
52
|
+
"agent": "agent_name2"
|
|
53
|
+
}}
|
|
54
|
+
]
|
|
55
|
+
}}
|
|
56
|
+
],
|
|
57
|
+
"is_complete": false
|
|
58
|
+
}}
|
|
59
|
+
|
|
60
|
+
You must respond with valid JSON only, with no triple backticks. No markdown formatting.
|
|
61
|
+
No extra text. Do not wrap in ```json code fences."""
|
|
62
|
+
|
|
63
|
+
ITERATIVE_PLAN_PROMPT_TEMPLATE = """You are tasked with determining only the next step in a plan
|
|
64
|
+
needed to complete an objective. You must analyze the current state and progress from previous steps
|
|
65
|
+
to decide what to do next.
|
|
66
|
+
|
|
67
|
+
A Step must be sequential in the plan, but can have independent parallel subtasks. Only return a single Step.
|
|
68
|
+
|
|
69
|
+
Objective: {objective}
|
|
70
|
+
|
|
71
|
+
{plan_result}
|
|
72
|
+
|
|
73
|
+
If the previous results achieve the objective, return is_complete=True.
|
|
74
|
+
Otherwise, generate the next Step.
|
|
75
|
+
|
|
76
|
+
You have access to the following MCP Servers (which are collections of tools/functions),
|
|
77
|
+
and Agents (which are collections of servers):
|
|
78
|
+
|
|
79
|
+
Agents:
|
|
80
|
+
{agents}
|
|
81
|
+
|
|
82
|
+
Generate the next step, by specifying a description of the step and independent subtasks that can run in parallel:
|
|
83
|
+
For each subtask specify:
|
|
84
|
+
1. Clear description of the task that an LLM can execute
|
|
85
|
+
2. Name of 1 Agent OR List of MCP server names to use for the task
|
|
86
|
+
|
|
87
|
+
Return your response in the following JSON structure:
|
|
88
|
+
{{
|
|
89
|
+
|
|
90
|
+
"description": "Description of step 1",
|
|
91
|
+
"tasks": [
|
|
92
|
+
{{
|
|
93
|
+
"description": "Description of task 1",
|
|
94
|
+
"agent": "agent_name" # For AgentTask
|
|
95
|
+
}}
|
|
96
|
+
],
|
|
97
|
+
"is_complete": false
|
|
98
|
+
}}
|
|
99
|
+
|
|
100
|
+
You must respond with valid JSON only, with no triple backticks. No markdown formatting.
|
|
101
|
+
No extra text. Do not wrap in ```json code fences."""
|
|
102
|
+
|
|
103
|
+
TASK_PROMPT_TEMPLATE = """You are part of a larger workflow to achieve the objective: {objective}.
|
|
104
|
+
Your job is to accomplish only the following task: {task}.
|
|
105
|
+
|
|
106
|
+
Results so far that may provide helpful context:
|
|
107
|
+
{context}"""
|
|
108
|
+
|
|
109
|
+
SYNTHESIZE_STEP_PROMPT_TEMPLATE = """Synthesize the results of these parallel tasks into a cohesive result:
|
|
110
|
+
{step_result}"""
|
|
111
|
+
|
|
112
|
+
SYNTHESIZE_PLAN_PROMPT_TEMPLATE = """Synthesize the results of executing all steps in the plan into a cohesive result:
|
|
113
|
+
{plan_result}"""
|
|
File without changes
|