fast-agent-mcp 0.0.16__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.0.16.dist-info → fast_agent_mcp-0.1.1.dist-info}/METADATA +30 -13
- {fast_agent_mcp-0.0.16.dist-info → fast_agent_mcp-0.1.1.dist-info}/RECORD +21 -20
- mcp_agent/cli/commands/bootstrap.py +1 -1
- mcp_agent/cli/commands/setup.py +4 -1
- mcp_agent/cli/main.py +13 -3
- mcp_agent/core/agent_app.py +1 -1
- mcp_agent/core/enhanced_prompt.py +3 -3
- mcp_agent/core/fastagent.py +96 -49
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +188 -0
- mcp_agent/resources/examples/data-analysis/analysis.py +36 -32
- mcp_agent/resources/examples/workflows/agent_build.py +48 -28
- mcp_agent/resources/examples/workflows/evaluator.py +3 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -2
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +120 -63
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +4 -3
- mcp_agent/workflows/orchestrator/orchestrator.py +170 -70
- mcp_agent/workflows/orchestrator/orchestrator_models.py +3 -0
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +48 -0
- {fast_agent_mcp-0.0.16.dist-info → fast_agent_mcp-0.1.1.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.16.dist-info → fast_agent_mcp-0.1.1.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.16.dist-info → fast_agent_mcp-0.1.1.dist-info}/licenses/LICENSE +0 -0
@@ -32,6 +32,7 @@ from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
|
32
32
|
from mcp_agent.workflows.orchestrator.orchestrator_prompts import (
|
33
33
|
FULL_PLAN_PROMPT_TEMPLATE,
|
34
34
|
ITERATIVE_PLAN_PROMPT_TEMPLATE,
|
35
|
+
SYNTHESIZE_INCOMPLETE_PLAN_TEMPLATE, # Add the missing import
|
35
36
|
SYNTHESIZE_PLAN_PROMPT_TEMPLATE,
|
36
37
|
TASK_PROMPT_TEMPLATE,
|
37
38
|
)
|
@@ -90,7 +91,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
90
91
|
# Initialize with orchestrator-specific defaults
|
91
92
|
orchestrator_params = RequestParams(
|
92
93
|
use_history=False, # Orchestrator doesn't support history
|
93
|
-
max_iterations=
|
94
|
+
max_iterations=10, # Higher default for complex tasks
|
94
95
|
maxTokens=8192, # Higher default for planning
|
95
96
|
parallel_tool_calls=True,
|
96
97
|
)
|
@@ -126,9 +127,29 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
126
127
|
self.agents = {agent.name: agent for agent in available_agents}
|
127
128
|
|
128
129
|
# Initialize logger
|
129
|
-
self.logger = logger
|
130
130
|
self.name = name
|
131
131
|
|
132
|
+
# Store agents by name - COMPLETE REWRITE OF AGENT STORAGE
|
133
|
+
self.agents = {}
|
134
|
+
for agent in available_agents:
|
135
|
+
# Fix: Remove all special handling of agent names and store them exactly as they are
|
136
|
+
agent_name = agent.name
|
137
|
+
|
138
|
+
# Verify if the name is actually "None" (string) or None (NoneType)
|
139
|
+
if agent_name == "None":
|
140
|
+
# Try to get a better name from config if available
|
141
|
+
if hasattr(agent, "config") and agent.config and agent.config.name:
|
142
|
+
agent_name = agent.config.name
|
143
|
+
elif agent_name is None:
|
144
|
+
# Try to get a better name from config if available
|
145
|
+
if hasattr(agent, "config") and agent.config and agent.config.name:
|
146
|
+
agent_name = agent.config.name
|
147
|
+
else:
|
148
|
+
agent_name = f"unnamed_agent_{len(self.agents)}"
|
149
|
+
|
150
|
+
self.logger.info(f"Adding agent '{agent_name}' to orchestrator")
|
151
|
+
self.agents[agent_name] = agent
|
152
|
+
|
132
153
|
async def generate(
|
133
154
|
self,
|
134
155
|
message: str | MessageParamT | List[MessageParamT],
|
@@ -165,19 +186,21 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
165
186
|
"""Request a structured LLM generation and return the result as a Pydantic model."""
|
166
187
|
import json
|
167
188
|
from pydantic import ValidationError
|
168
|
-
|
189
|
+
|
169
190
|
params = self.get_request_params(request_params)
|
170
191
|
result_str = await self.generate_str(message=message, request_params=params)
|
171
|
-
|
192
|
+
|
172
193
|
try:
|
173
194
|
# Directly parse JSON and create model instance
|
174
195
|
parsed_data = json.loads(result_str)
|
175
196
|
return response_model(**parsed_data)
|
176
197
|
except (json.JSONDecodeError, ValidationError) as e:
|
177
198
|
# Log the error and fall back to the original method if direct parsing fails
|
178
|
-
self.logger.error(
|
199
|
+
self.logger.error(
|
200
|
+
f"Direct JSON parsing failed: {str(e)}. Falling back to standard method."
|
201
|
+
)
|
179
202
|
self.logger.debug(f"Failed JSON content: {result_str}")
|
180
|
-
|
203
|
+
|
181
204
|
# Use AugmentedLLM's structured output handling as fallback
|
182
205
|
return await super().generate_structured(
|
183
206
|
message=result_str,
|
@@ -190,8 +213,12 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
190
213
|
) -> PlanResult:
|
191
214
|
"""Execute task with result chaining between steps"""
|
192
215
|
iterations = 0
|
216
|
+
total_steps_executed = 0
|
193
217
|
|
194
218
|
params = self.get_request_params(request_params)
|
219
|
+
max_steps = getattr(
|
220
|
+
params, "max_steps", params.max_iterations * 5
|
221
|
+
) # Default to 5× max_iterations
|
195
222
|
|
196
223
|
# Single progress event for orchestration start
|
197
224
|
model = await self.select_model(params) or "unknown-model"
|
@@ -208,6 +235,9 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
208
235
|
)
|
209
236
|
|
210
237
|
plan_result = PlanResult(objective=objective, step_results=[])
|
238
|
+
plan_result.max_iterations_reached = (
|
239
|
+
False # Add a flag to track if we hit the limit
|
240
|
+
)
|
211
241
|
|
212
242
|
while iterations < params.max_iterations:
|
213
243
|
if self.plan_type == "iterative":
|
@@ -256,6 +286,14 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
256
286
|
# Execute each step, collecting results
|
257
287
|
# Note that in iterative mode this will only be a single step
|
258
288
|
for step in plan.steps:
|
289
|
+
# Check if we've hit the step limit
|
290
|
+
if total_steps_executed >= max_steps:
|
291
|
+
self.logger.warning(
|
292
|
+
f"Reached maximum step limit ({max_steps}) without completing objective."
|
293
|
+
)
|
294
|
+
plan_result.max_steps_reached = True
|
295
|
+
break
|
296
|
+
|
259
297
|
step_result = await self._execute_step(
|
260
298
|
step=step,
|
261
299
|
previous_result=plan_result,
|
@@ -263,16 +301,40 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
263
301
|
)
|
264
302
|
|
265
303
|
plan_result.add_step_result(step_result)
|
304
|
+
total_steps_executed += 1
|
305
|
+
|
306
|
+
# Check for step limit after executing steps
|
307
|
+
if total_steps_executed >= max_steps:
|
308
|
+
plan_result.max_iterations_reached = True
|
309
|
+
break
|
266
310
|
|
267
311
|
logger.debug(
|
268
312
|
f"Iteration {iterations}: Intermediate plan result:", data=plan_result
|
269
313
|
)
|
270
314
|
iterations += 1
|
271
315
|
|
272
|
-
|
273
|
-
|
316
|
+
# If we get here, we've hit the iteration limit without completing
|
317
|
+
self.logger.warning(
|
318
|
+
f"Failed to complete in {params.max_iterations} iterations."
|
319
|
+
)
|
320
|
+
|
321
|
+
# Mark that we hit the iteration limit
|
322
|
+
plan_result.max_iterations_reached = True
|
323
|
+
|
324
|
+
# Synthesize what we have so far, but use a different prompt that explains the incomplete status
|
325
|
+
synthesis_prompt = SYNTHESIZE_INCOMPLETE_PLAN_TEMPLATE.format(
|
326
|
+
plan_result=format_plan_result(plan_result),
|
327
|
+
max_iterations=params.max_iterations,
|
328
|
+
)
|
329
|
+
|
330
|
+
# Generate a final synthesis that acknowledges the incomplete status
|
331
|
+
plan_result.result = await self.planner.generate_str(
|
332
|
+
message=synthesis_prompt,
|
333
|
+
request_params=params.model_copy(update={"max_iterations": 1}),
|
274
334
|
)
|
275
335
|
|
336
|
+
return plan_result
|
337
|
+
|
276
338
|
async def _execute_step(
|
277
339
|
self,
|
278
340
|
step: Step,
|
@@ -312,8 +374,14 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
312
374
|
context=context,
|
313
375
|
)
|
314
376
|
|
315
|
-
#
|
316
|
-
|
377
|
+
# Handle both Agent objects and AugmentedLLM objects
|
378
|
+
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
379
|
+
|
380
|
+
if isinstance(agent, AugmentedLLM):
|
381
|
+
futures.append(agent.generate_str(message=task_description))
|
382
|
+
else:
|
383
|
+
# Traditional Agent objects with _llm property
|
384
|
+
futures.append(agent._llm.generate_str(message=task_description))
|
317
385
|
|
318
386
|
# Wait for all tasks (only if we have valid futures)
|
319
387
|
results = await self.executor.execute(*futures) if futures else []
|
@@ -332,7 +400,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
332
400
|
task_result = TaskWithResult(
|
333
401
|
description=task_model["description"],
|
334
402
|
agent=task_model["agent"], # Track which agent produced this result
|
335
|
-
result=str(result)
|
403
|
+
result=str(result),
|
336
404
|
)
|
337
405
|
step_result.add_task_result(task_result)
|
338
406
|
task_index += 1
|
@@ -344,7 +412,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
344
412
|
TaskWithResult(
|
345
413
|
description=task_model["description"],
|
346
414
|
agent=task_model["agent"],
|
347
|
-
result=f"ERROR: {error_message}"
|
415
|
+
result=f"ERROR: {error_message}",
|
348
416
|
)
|
349
417
|
)
|
350
418
|
|
@@ -361,25 +429,49 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
361
429
|
"""Generate full plan considering previous results"""
|
362
430
|
import json
|
363
431
|
from pydantic import ValidationError
|
364
|
-
from mcp_agent.workflows.orchestrator.orchestrator_models import
|
432
|
+
from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
433
|
+
Plan,
|
434
|
+
Step,
|
435
|
+
AgentTask,
|
436
|
+
)
|
365
437
|
|
366
438
|
params = self.get_request_params(request_params)
|
367
439
|
params = params.model_copy(update={"use_history": False})
|
368
440
|
|
369
441
|
# Format agents without numeric prefixes for cleaner XML
|
370
|
-
|
371
|
-
|
372
|
-
|
442
|
+
agent_formats = []
|
443
|
+
for agent_name in self.agents.keys():
|
444
|
+
formatted = self._format_agent_info(agent_name)
|
445
|
+
agent_formats.append(formatted)
|
446
|
+
|
447
|
+
agents = "\n".join(agent_formats)
|
373
448
|
|
374
449
|
# Create clear plan status indicator for the template
|
375
450
|
plan_status = "Plan Status: Not Started"
|
376
451
|
if hasattr(plan_result, "is_complete"):
|
377
|
-
plan_status =
|
378
|
-
|
452
|
+
plan_status = (
|
453
|
+
"Plan Status: Complete"
|
454
|
+
if plan_result.is_complete
|
455
|
+
else "Plan Status: In Progress"
|
456
|
+
)
|
457
|
+
|
458
|
+
# Fix the iteration counting display
|
459
|
+
max_iterations = params.max_iterations
|
460
|
+
# Get the actual iteration number we're on (0-based → 1-based for display)
|
461
|
+
current_iteration = len(plan_result.step_results) // (
|
462
|
+
1 if self.plan_type == "iterative" else len(plan_result.step_results) or 1
|
463
|
+
)
|
464
|
+
current_iteration = min(current_iteration, max_iterations - 1) # Cap at max-1
|
465
|
+
iterations_remaining = max(
|
466
|
+
0, max_iterations - current_iteration - 1
|
467
|
+
) # Ensure non-negative
|
468
|
+
iterations_info = f"Planning Budget: Iteration {current_iteration + 1} of {max_iterations} (with {iterations_remaining} remaining)"
|
469
|
+
|
379
470
|
prompt = FULL_PLAN_PROMPT_TEMPLATE.format(
|
380
471
|
objective=objective,
|
381
472
|
plan_result=format_plan_result(plan_result),
|
382
473
|
plan_status=plan_status,
|
474
|
+
iterations_info=iterations_info,
|
383
475
|
agents=agents,
|
384
476
|
)
|
385
477
|
|
@@ -388,50 +480,44 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
388
480
|
message=prompt,
|
389
481
|
request_params=params,
|
390
482
|
)
|
391
|
-
|
483
|
+
|
392
484
|
try:
|
393
485
|
# Parse JSON directly
|
394
486
|
data = json.loads(result_str)
|
395
|
-
|
487
|
+
|
396
488
|
# Create models manually to ensure agent names are preserved exactly as returned
|
397
489
|
steps = []
|
398
|
-
for step_data in data.get(
|
490
|
+
for step_data in data.get("steps", []):
|
399
491
|
tasks = []
|
400
|
-
for task_data in step_data.get(
|
492
|
+
for task_data in step_data.get("tasks", []):
|
401
493
|
# Create AgentTask directly from dict, preserving exact agent string
|
402
494
|
task = AgentTask(
|
403
|
-
description=task_data.get(
|
404
|
-
agent=task_data.get(
|
495
|
+
description=task_data.get("description", ""),
|
496
|
+
agent=task_data.get("agent", ""), # Preserve exact agent name
|
405
497
|
)
|
406
498
|
tasks.append(task)
|
407
|
-
|
499
|
+
|
408
500
|
# Create Step with the exact task objects we created
|
409
|
-
step = Step(
|
410
|
-
description=step_data.get('description', ''),
|
411
|
-
tasks=tasks
|
412
|
-
)
|
501
|
+
step = Step(description=step_data.get("description", ""), tasks=tasks)
|
413
502
|
steps.append(step)
|
414
|
-
|
503
|
+
|
415
504
|
# Create final Plan
|
416
|
-
plan = Plan(
|
417
|
-
|
418
|
-
is_complete=data.get('is_complete', False)
|
419
|
-
)
|
420
|
-
|
505
|
+
plan = Plan(steps=steps, is_complete=data.get("is_complete", False))
|
506
|
+
|
421
507
|
return plan
|
422
|
-
|
508
|
+
|
423
509
|
except (json.JSONDecodeError, ValidationError, KeyError) as e:
|
424
510
|
# Log detailed error and fall back to the original method as last resort
|
425
511
|
self.logger.error(f"Error parsing plan JSON: {str(e)}")
|
426
512
|
self.logger.debug(f"Failed JSON content: {result_str}")
|
427
|
-
|
513
|
+
|
428
514
|
# Use the normal structured parsing as fallback
|
429
515
|
plan = await self.planner.generate_structured(
|
430
516
|
message=result_str,
|
431
517
|
response_model=Plan,
|
432
518
|
request_params=params,
|
433
519
|
)
|
434
|
-
|
520
|
+
|
435
521
|
return plan
|
436
522
|
|
437
523
|
async def _get_next_step(
|
@@ -443,25 +529,40 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
443
529
|
"""Generate just the next needed step"""
|
444
530
|
import json
|
445
531
|
from pydantic import ValidationError
|
446
|
-
from mcp_agent.workflows.orchestrator.orchestrator_models import
|
447
|
-
|
532
|
+
from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
533
|
+
NextStep,
|
534
|
+
AgentTask,
|
535
|
+
)
|
536
|
+
|
448
537
|
params = self.get_request_params(request_params)
|
449
538
|
params = params.model_copy(update={"use_history": False})
|
450
539
|
|
451
540
|
# Format agents without numeric prefixes for cleaner XML
|
541
|
+
# FIX: Iterate over agent names instead of agent objects
|
452
542
|
agents = "\n".join(
|
453
|
-
[self._format_agent_info(
|
543
|
+
[self._format_agent_info(agent_name) for agent_name in self.agents.keys()]
|
454
544
|
)
|
455
545
|
|
456
546
|
# Create clear plan status indicator for the template
|
457
547
|
plan_status = "Plan Status: Not Started"
|
458
548
|
if hasattr(plan_result, "is_complete"):
|
459
|
-
plan_status =
|
460
|
-
|
549
|
+
plan_status = (
|
550
|
+
"Plan Status: Complete"
|
551
|
+
if plan_result.is_complete
|
552
|
+
else "Plan Status: In Progress"
|
553
|
+
)
|
554
|
+
|
555
|
+
# Add max_iterations info for the LLM
|
556
|
+
max_iterations = params.max_iterations
|
557
|
+
current_iteration = len(plan_result.step_results)
|
558
|
+
iterations_remaining = max_iterations - current_iteration
|
559
|
+
iterations_info = f"Planning Budget: {iterations_remaining} of {max_iterations} iterations remaining"
|
560
|
+
|
461
561
|
prompt = ITERATIVE_PLAN_PROMPT_TEMPLATE.format(
|
462
562
|
objective=objective,
|
463
563
|
plan_result=format_plan_result(plan_result),
|
464
564
|
plan_status=plan_status,
|
565
|
+
iterations_info=iterations_info,
|
465
566
|
agents=agents,
|
466
567
|
)
|
467
568
|
|
@@ -470,55 +571,55 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
470
571
|
message=prompt,
|
471
572
|
request_params=params,
|
472
573
|
)
|
473
|
-
|
574
|
+
|
474
575
|
try:
|
475
576
|
# Parse JSON directly
|
476
577
|
data = json.loads(result_str)
|
477
|
-
|
578
|
+
|
478
579
|
# Create task objects manually to preserve exact agent names
|
479
580
|
tasks = []
|
480
|
-
for task_data in data.get(
|
581
|
+
for task_data in data.get("tasks", []):
|
481
582
|
# Preserve the exact agent name as specified in the JSON
|
482
583
|
task = AgentTask(
|
483
|
-
description=task_data.get(
|
484
|
-
agent=task_data.get(
|
584
|
+
description=task_data.get("description", ""),
|
585
|
+
agent=task_data.get("agent", ""),
|
485
586
|
)
|
486
587
|
tasks.append(task)
|
487
|
-
|
588
|
+
|
488
589
|
# Create step with manually constructed tasks
|
489
590
|
next_step = NextStep(
|
490
|
-
description=data.get(
|
591
|
+
description=data.get("description", ""),
|
491
592
|
tasks=tasks,
|
492
|
-
is_complete=data.get(
|
593
|
+
is_complete=data.get("is_complete", False),
|
493
594
|
)
|
494
|
-
|
595
|
+
|
495
596
|
return next_step
|
496
|
-
|
597
|
+
|
497
598
|
except (json.JSONDecodeError, ValidationError, KeyError) as e:
|
498
599
|
# Log detailed error and fall back to the original method
|
499
600
|
self.logger.error(f"Error parsing next step JSON: {str(e)}")
|
500
601
|
self.logger.debug(f"Failed JSON content: {result_str}")
|
501
|
-
|
602
|
+
|
502
603
|
# Use the normal structured parsing as fallback
|
503
604
|
next_step = await self.planner.generate_structured(
|
504
605
|
message=result_str,
|
505
606
|
response_model=NextStep,
|
506
607
|
request_params=params,
|
507
608
|
)
|
508
|
-
|
609
|
+
|
509
610
|
return next_step
|
510
611
|
|
511
612
|
def _format_server_info(self, server_name: str) -> str:
|
512
613
|
"""Format server information for display to planners using XML tags"""
|
513
614
|
from mcp_agent.workflows.llm.prompt_utils import format_server_info
|
514
|
-
|
615
|
+
|
515
616
|
server_config = self.server_registry.get_server_config(server_name)
|
516
|
-
|
617
|
+
|
517
618
|
# Get description or empty string if not available
|
518
619
|
description = ""
|
519
620
|
if server_config and server_config.description:
|
520
621
|
description = server_config.description
|
521
|
-
|
622
|
+
|
522
623
|
return format_server_info(server_name, description)
|
523
624
|
|
524
625
|
def _validate_agent_names(self, plan: Plan) -> None:
|
@@ -527,12 +628,12 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
527
628
|
This helps catch invalid agent references early.
|
528
629
|
"""
|
529
630
|
invalid_agents = []
|
530
|
-
|
631
|
+
|
531
632
|
for step in plan.steps:
|
532
633
|
for task in step.tasks:
|
533
634
|
if task.agent not in self.agents:
|
534
635
|
invalid_agents.append(task.agent)
|
535
|
-
|
636
|
+
|
536
637
|
if invalid_agents:
|
537
638
|
available_agents = ", ".join(self.agents.keys())
|
538
639
|
invalid_list = ", ".join(invalid_agents)
|
@@ -540,20 +641,17 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
540
641
|
self.logger.error(error_msg)
|
541
642
|
# We don't raise an exception here as the execution will handle invalid agents
|
542
643
|
# by logging errors for individual tasks
|
543
|
-
|
644
|
+
|
544
645
|
def _format_agent_info(self, agent_name: str) -> str:
|
545
646
|
"""Format Agent information for display to planners using XML tags"""
|
546
647
|
from mcp_agent.workflows.llm.prompt_utils import format_agent_info
|
547
|
-
|
648
|
+
|
548
649
|
agent = self.agents.get(agent_name)
|
549
650
|
if not agent:
|
651
|
+
self.logger.error(f"Agent '{agent_name}' not found in orchestrator agents")
|
550
652
|
return ""
|
551
|
-
|
552
|
-
# Get agent instruction as string
|
553
653
|
instruction = agent.instruction
|
554
|
-
|
555
|
-
instruction = instruction({})
|
556
|
-
|
654
|
+
|
557
655
|
# Get servers information
|
558
656
|
server_info = []
|
559
657
|
for server_name in agent.server_names:
|
@@ -561,7 +659,9 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
561
659
|
description = ""
|
562
660
|
if server_config and server_config.description:
|
563
661
|
description = server_config.description
|
564
|
-
|
662
|
+
|
565
663
|
server_info.append({"name": server_name, "description": description})
|
566
|
-
|
567
|
-
return format_agent_info(
|
664
|
+
|
665
|
+
return format_agent_info(
|
666
|
+
agent.name, instruction, server_info if server_info else None
|
667
|
+
)
|
@@ -102,6 +102,9 @@ class PlanResult(BaseModel):
|
|
102
102
|
is_complete: bool = False
|
103
103
|
"""Whether the overall plan objective is complete"""
|
104
104
|
|
105
|
+
max_iterations_reached: bool = False
|
106
|
+
"""Whether the plan execution reached the maximum number of iterations without completing"""
|
107
|
+
|
105
108
|
result: str | None = None
|
106
109
|
"""Result of executing the plan"""
|
107
110
|
|
@@ -1,3 +1,8 @@
|
|
1
|
+
"""
|
2
|
+
Prompt templates used by the Orchestrator workflow.
|
3
|
+
"""
|
4
|
+
|
5
|
+
# Templates for formatting results
|
1
6
|
TASK_RESULT_TEMPLATE = """Task: {task_description}
|
2
7
|
Result: {task_result}"""
|
3
8
|
|
@@ -30,6 +35,7 @@ You can analyze results from the previous steps already executed to decide if th
|
|
30
35
|
|
31
36
|
<fastagent:status>
|
32
37
|
{plan_status}
|
38
|
+
{iterations_info}
|
33
39
|
</fastagent:status>
|
34
40
|
</fastagent:data>
|
35
41
|
|
@@ -38,6 +44,10 @@ If the previous results achieve the objective, return is_complete=True.
|
|
38
44
|
Otherwise, generate remaining steps needed.
|
39
45
|
|
40
46
|
<fastagent:instruction>
|
47
|
+
You are operating in "full plan" mode, where you generate a complete plan with ALL remaining steps needed.
|
48
|
+
After receiving your plan, the system will execute ALL steps in your plan before asking for your input again.
|
49
|
+
If the plan needs multiple iterations, you'll be called again with updated results.
|
50
|
+
|
41
51
|
Generate a plan with all remaining steps needed.
|
42
52
|
Steps are sequential, but each Step can have parallel subtasks.
|
43
53
|
For each Step, specify a description of the step and independent subtasks that can run in parallel.
|
@@ -68,6 +78,8 @@ Return your response in the following JSON structure:
|
|
68
78
|
"is_complete": false
|
69
79
|
}}
|
70
80
|
|
81
|
+
Set "is_complete" to true ONLY if you are confident the objective has been fully achieved based on work completed so far.
|
82
|
+
|
71
83
|
You must respond with valid JSON only, with no triple backticks. No markdown formatting.
|
72
84
|
No extra text. Do not wrap in ```json code fences.
|
73
85
|
</fastagent:instruction>
|
@@ -92,6 +104,7 @@ to decide what to do next.
|
|
92
104
|
|
93
105
|
<fastagent:status>
|
94
106
|
{plan_status}
|
107
|
+
{iterations_info}
|
95
108
|
</fastagent:status>
|
96
109
|
</fastagent:data>
|
97
110
|
|
@@ -100,6 +113,9 @@ If the previous results achieve the objective, return is_complete=True.
|
|
100
113
|
Otherwise, generate the next Step.
|
101
114
|
|
102
115
|
<fastagent:instruction>
|
116
|
+
You are operating in "iterative plan" mode, where you generate ONLY ONE STEP at a time.
|
117
|
+
After each step is executed, you'll be called again to determine the next step based on updated results.
|
118
|
+
|
103
119
|
Generate the next step, by specifying a description of the step and independent subtasks that can run in parallel:
|
104
120
|
For each subtask specify:
|
105
121
|
1. Clear description of the task that an LLM can execute
|
@@ -120,6 +136,8 @@ Return your response in the following JSON structure:
|
|
120
136
|
"is_complete": false
|
121
137
|
}}
|
122
138
|
|
139
|
+
Set "is_complete" to true ONLY if you are confident the objective has been fully achieved based on work completed so far.
|
140
|
+
|
123
141
|
You must respond with valid JSON only, with no triple backticks. No markdown formatting.
|
124
142
|
No extra text. Do not wrap in ```json code fences.
|
125
143
|
</fastagent:instruction>
|
@@ -184,5 +202,35 @@ Create a comprehensive final response that addresses the original objective.
|
|
184
202
|
Integrate all the information gathered across all plan steps.
|
185
203
|
Provide a clear, complete answer that achieves the objective.
|
186
204
|
Focus on delivering value through your synthesis, not just summarizing.
|
205
|
+
|
206
|
+
If the plan was marked as incomplete but the maximum number of iterations was reached,
|
207
|
+
make sure to state clearly what was accomplished and what remains to be done.
|
208
|
+
</fastagent:instruction>
|
209
|
+
"""
|
210
|
+
|
211
|
+
# New template for incomplete plans due to iteration limits
|
212
|
+
SYNTHESIZE_INCOMPLETE_PLAN_TEMPLATE = """You need to synthesize the results of all completed plan steps into a final response.
|
213
|
+
|
214
|
+
<fastagent:data>
|
215
|
+
<fastagent:plan-results>
|
216
|
+
{plan_result}
|
217
|
+
</fastagent:plan-results>
|
218
|
+
</fastagent:data>
|
219
|
+
|
220
|
+
<fastagent:status>
|
221
|
+
The maximum number of iterations ({max_iterations}) was reached before the objective could be completed.
|
222
|
+
</fastagent:status>
|
223
|
+
|
224
|
+
<fastagent:instruction>
|
225
|
+
Create a comprehensive response that summarizes what was accomplished so far.
|
226
|
+
The objective was NOT fully completed due to reaching the maximum number of iterations.
|
227
|
+
|
228
|
+
In your response:
|
229
|
+
1. Clearly state that the objective was not fully completed
|
230
|
+
2. Summarize what WAS accomplished across all the executed steps
|
231
|
+
3. Identify what remains to be done to complete the objective
|
232
|
+
4. Organize the information to provide maximum value despite being incomplete
|
233
|
+
|
234
|
+
Focus on being transparent about the incomplete status while providing as much value as possible.
|
187
235
|
</fastagent:instruction>
|
188
236
|
"""
|
File without changes
|
File without changes
|
File without changes
|