fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
  2. fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
  3. mcp_agent/agents/agent.py +37 -102
  4. mcp_agent/app.py +16 -27
  5. mcp_agent/cli/commands/bootstrap.py +22 -52
  6. mcp_agent/cli/commands/config.py +4 -4
  7. mcp_agent/cli/commands/setup.py +11 -26
  8. mcp_agent/cli/main.py +6 -9
  9. mcp_agent/cli/terminal.py +2 -2
  10. mcp_agent/config.py +1 -5
  11. mcp_agent/context.py +13 -26
  12. mcp_agent/context_dependent.py +3 -7
  13. mcp_agent/core/agent_app.py +46 -122
  14. mcp_agent/core/agent_types.py +29 -2
  15. mcp_agent/core/agent_utils.py +3 -5
  16. mcp_agent/core/decorators.py +6 -14
  17. mcp_agent/core/enhanced_prompt.py +25 -52
  18. mcp_agent/core/error_handling.py +1 -1
  19. mcp_agent/core/exceptions.py +8 -8
  20. mcp_agent/core/factory.py +30 -72
  21. mcp_agent/core/fastagent.py +48 -88
  22. mcp_agent/core/mcp_content.py +10 -19
  23. mcp_agent/core/prompt.py +8 -15
  24. mcp_agent/core/proxies.py +34 -25
  25. mcp_agent/core/request_params.py +46 -0
  26. mcp_agent/core/types.py +6 -6
  27. mcp_agent/core/validation.py +16 -16
  28. mcp_agent/executor/decorator_registry.py +11 -23
  29. mcp_agent/executor/executor.py +8 -17
  30. mcp_agent/executor/task_registry.py +2 -4
  31. mcp_agent/executor/temporal.py +28 -74
  32. mcp_agent/executor/workflow.py +3 -5
  33. mcp_agent/executor/workflow_signal.py +17 -29
  34. mcp_agent/human_input/handler.py +4 -9
  35. mcp_agent/human_input/types.py +2 -3
  36. mcp_agent/logging/events.py +1 -5
  37. mcp_agent/logging/json_serializer.py +7 -6
  38. mcp_agent/logging/listeners.py +20 -23
  39. mcp_agent/logging/logger.py +15 -17
  40. mcp_agent/logging/rich_progress.py +10 -8
  41. mcp_agent/logging/tracing.py +4 -6
  42. mcp_agent/logging/transport.py +24 -24
  43. mcp_agent/mcp/gen_client.py +4 -12
  44. mcp_agent/mcp/interfaces.py +107 -88
  45. mcp_agent/mcp/mcp_agent_client_session.py +11 -19
  46. mcp_agent/mcp/mcp_agent_server.py +8 -10
  47. mcp_agent/mcp/mcp_aggregator.py +49 -122
  48. mcp_agent/mcp/mcp_connection_manager.py +16 -37
  49. mcp_agent/mcp/prompt_message_multipart.py +12 -18
  50. mcp_agent/mcp/prompt_serialization.py +13 -38
  51. mcp_agent/mcp/prompts/prompt_load.py +99 -0
  52. mcp_agent/mcp/prompts/prompt_server.py +21 -128
  53. mcp_agent/mcp/prompts/prompt_template.py +20 -42
  54. mcp_agent/mcp/resource_utils.py +8 -17
  55. mcp_agent/mcp/sampling.py +62 -64
  56. mcp_agent/mcp/stdio.py +11 -8
  57. mcp_agent/mcp_server/__init__.py +1 -1
  58. mcp_agent/mcp_server/agent_server.py +10 -17
  59. mcp_agent/mcp_server_registry.py +13 -35
  60. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
  61. mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
  62. mcp_agent/resources/examples/data-analysis/slides.py +110 -0
  63. mcp_agent/resources/examples/internal/agent.py +2 -1
  64. mcp_agent/resources/examples/internal/job.py +2 -1
  65. mcp_agent/resources/examples/internal/prompt_category.py +1 -1
  66. mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
  67. mcp_agent/resources/examples/internal/sizer.py +2 -1
  68. mcp_agent/resources/examples/internal/social.py +2 -1
  69. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
  70. mcp_agent/resources/examples/prompting/__init__.py +1 -1
  71. mcp_agent/resources/examples/prompting/agent.py +2 -1
  72. mcp_agent/resources/examples/prompting/image_server.py +5 -11
  73. mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
  74. mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
  75. mcp_agent/resources/examples/researcher/researcher.py +2 -1
  76. mcp_agent/resources/examples/workflows/agent_build.py +2 -1
  77. mcp_agent/resources/examples/workflows/chaining.py +2 -1
  78. mcp_agent/resources/examples/workflows/evaluator.py +2 -1
  79. mcp_agent/resources/examples/workflows/human_input.py +2 -1
  80. mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
  81. mcp_agent/resources/examples/workflows/parallel.py +2 -1
  82. mcp_agent/resources/examples/workflows/router.py +2 -1
  83. mcp_agent/resources/examples/workflows/sse.py +1 -1
  84. mcp_agent/telemetry/usage_tracking.py +2 -1
  85. mcp_agent/ui/console_display.py +17 -41
  86. mcp_agent/workflows/embedding/embedding_base.py +1 -4
  87. mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
  88. mcp_agent/workflows/embedding/embedding_openai.py +4 -13
  89. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
  90. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
  91. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
  92. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
  93. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
  94. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
  95. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
  96. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
  97. mcp_agent/workflows/llm/anthropic_utils.py +8 -29
  98. mcp_agent/workflows/llm/augmented_llm.py +94 -332
  99. mcp_agent/workflows/llm/augmented_llm_anthropic.py +43 -76
  100. mcp_agent/workflows/llm/augmented_llm_openai.py +46 -100
  101. mcp_agent/workflows/llm/augmented_llm_passthrough.py +42 -20
  102. mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
  103. mcp_agent/workflows/llm/memory.py +103 -0
  104. mcp_agent/workflows/llm/model_factory.py +9 -21
  105. mcp_agent/workflows/llm/openai_utils.py +1 -1
  106. mcp_agent/workflows/llm/prompt_utils.py +39 -27
  107. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +246 -184
  108. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +212 -202
  109. mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
  110. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +11 -212
  111. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +13 -215
  112. mcp_agent/workflows/llm/sampling_converter.py +117 -0
  113. mcp_agent/workflows/llm/sampling_format_converter.py +12 -29
  114. mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
  115. mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
  116. mcp_agent/workflows/parallel/fan_in.py +17 -47
  117. mcp_agent/workflows/parallel/fan_out.py +6 -12
  118. mcp_agent/workflows/parallel/parallel_llm.py +9 -26
  119. mcp_agent/workflows/router/router_base.py +29 -59
  120. mcp_agent/workflows/router/router_embedding.py +11 -25
  121. mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
  122. mcp_agent/workflows/router/router_embedding_openai.py +2 -2
  123. mcp_agent/workflows/router/router_llm.py +12 -28
  124. mcp_agent/workflows/swarm/swarm.py +20 -48
  125. mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
  126. mcp_agent/workflows/swarm/swarm_openai.py +2 -2
  127. fast_agent_mcp-0.1.11.dist-info/RECORD +0 -160
  128. mcp_agent/workflows/llm/llm_selector.py +0 -345
  129. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
  130. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
  131. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,117 @@
1
+ """
2
+ Simplified converter between MCP sampling types and PromptMessageMultipart.
3
+ This replaces the more complex provider-specific converters with direct conversions.
4
+ """
5
+
6
+ from typing import List, Optional
7
+
8
+ from mcp.types import (
9
+ CreateMessageRequestParams,
10
+ CreateMessageResult,
11
+ SamplingMessage,
12
+ StopReason,
13
+ TextContent,
14
+ )
15
+
16
+ from mcp_agent.mcp.interfaces import RequestParams
17
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
18
+
19
+
20
+ class SamplingConverter:
21
+ """
22
+ Simplified converter between MCP sampling types and internal LLM types.
23
+
24
+ This handles converting between:
25
+ - SamplingMessage and PromptMessageMultipart
26
+ - CreateMessageRequestParams and RequestParams
27
+ - LLM responses and CreateMessageResult
28
+ """
29
+
30
+ @staticmethod
31
+ def sampling_message_to_prompt_message(
32
+ message: SamplingMessage,
33
+ ) -> PromptMessageMultipart:
34
+ """
35
+ Convert a SamplingMessage to a PromptMessageMultipart.
36
+
37
+ Args:
38
+ message: MCP SamplingMessage to convert
39
+
40
+ Returns:
41
+ PromptMessageMultipart suitable for use with LLMs
42
+ """
43
+ return PromptMessageMultipart(role=message.role, content=[message.content])
44
+
45
+ @staticmethod
46
+ def extract_request_params(params: CreateMessageRequestParams) -> RequestParams:
47
+ """
48
+ Extract parameters from CreateMessageRequestParams into RequestParams.
49
+
50
+ Args:
51
+ params: MCP request parameters
52
+
53
+ Returns:
54
+ RequestParams suitable for use with LLM.generate_prompt
55
+ """
56
+ return RequestParams(
57
+ maxTokens=params.maxTokens,
58
+ systemPrompt=params.systemPrompt,
59
+ temperature=params.temperature,
60
+ stopSequences=params.stopSequences,
61
+ modelPreferences=params.modelPreferences,
62
+ # Add any other parameters needed
63
+ )
64
+
65
+ @staticmethod
66
+ def create_message_result(response: str, model: str, stop_reason: StopReason = "endTurn") -> CreateMessageResult:
67
+ """
68
+ Create a CreateMessageResult from an LLM response.
69
+
70
+ Args:
71
+ response: Text response from the LLM
72
+ model: Model identifier
73
+ stop_reason: Reason generation stopped
74
+
75
+ Returns:
76
+ CreateMessageResult suitable for returning to MCP
77
+ """
78
+ return CreateMessageResult(
79
+ role="assistant",
80
+ content=TextContent(type="text", text=response),
81
+ model=model,
82
+ stopReason=stop_reason,
83
+ )
84
+
85
+ @staticmethod
86
+ def error_result(error_message: str, model: Optional[str] = None) -> CreateMessageResult:
87
+ """
88
+ Create an error result.
89
+
90
+ Args:
91
+ error_message: Error message text
92
+ model: Optional model identifier
93
+
94
+ Returns:
95
+ CreateMessageResult with error information
96
+ """
97
+ return CreateMessageResult(
98
+ role="assistant",
99
+ content=TextContent(type="text", text=error_message),
100
+ model=model or "unknown",
101
+ stopReason="error",
102
+ )
103
+
104
+ @staticmethod
105
+ def convert_messages(
106
+ messages: List[SamplingMessage],
107
+ ) -> List[PromptMessageMultipart]:
108
+ """
109
+ Convert multiple SamplingMessages to PromptMessageMultipart objects.
110
+
111
+ Args:
112
+ messages: List of SamplingMessages to convert
113
+
114
+ Returns:
115
+ List of PromptMessageMultipart objects, each with a single content item
116
+ """
117
+ return [SamplingConverter.sampling_message_to_prompt_message(msg) for msg in messages]
@@ -1,39 +1,22 @@
1
- from typing import Generic, List, Protocol, TypeVar
1
+ from typing import Generic, Protocol, TypeVar
2
2
 
3
- from mcp import CreateMessageResult, SamplingMessage
3
+ from mcp.types import PromptMessage
4
4
 
5
- # Define type variables here instead of importing from augmented_llm
6
- MessageParamT = TypeVar("MessageParamT")
7
- """A type representing an input message to an LLM."""
5
+ # Define covariant type variables
6
+ MessageParamT_co = TypeVar("MessageParamT_co", covariant=True)
7
+ MessageT_co = TypeVar("MessageT_co", covariant=True)
8
8
 
9
- MessageT = TypeVar("MessageT")
10
- """A type representing an output message from an LLM."""
11
9
 
12
-
13
- class SamplingFormatConverter(Protocol, Generic[MessageParamT, MessageT]):
10
+ class ProviderFormatConverter(Protocol, Generic[MessageParamT_co, MessageT_co]):
14
11
  """Conversions between LLM provider and MCP types"""
15
12
 
16
13
  @classmethod
17
- def to_sampling_result(cls, result: MessageT) -> CreateMessageResult:
18
- """Convert an LLM response to an MCP message result type."""
19
-
20
- @classmethod
21
- def from_sampling_result(cls, result: CreateMessageResult) -> MessageT:
22
- """Convert an MCP message result to an LLM response type."""
23
-
24
- @classmethod
25
- def to_sampling_message(cls, param: MessageParamT) -> SamplingMessage:
26
- """Convert an LLM input to an MCP message (SamplingMessage) type."""
27
-
28
- @classmethod
29
- def from_sampling_message(cls, param: SamplingMessage) -> MessageParamT:
30
- """Convert an MCP message (SamplingMessage) to an LLM input type."""
31
-
32
- @classmethod
33
- def from_prompt_message(cls, message) -> MessageParamT:
14
+ def from_prompt_message(cls, message: PromptMessage) -> MessageParamT_co:
34
15
  """Convert an MCP PromptMessage to a provider-specific message parameter."""
16
+ ...
35
17
 
36
18
 
37
- def typed_dict_extras(d: dict, exclude: List[str]):
38
- extras = {k: v for k, v in d.items() if k not in exclude}
39
- return extras
19
+ class BasicFormatConverter(ProviderFormatConverter[PromptMessage, PromptMessage]):
20
+ @classmethod
21
+ def from_prompt_message(cls, message: PromptMessage) -> PromptMessage:
22
+ return message
@@ -3,15 +3,16 @@ Orchestrator implementation for MCP Agent applications.
3
3
  """
4
4
 
5
5
  from typing import (
6
+ TYPE_CHECKING,
6
7
  List,
7
8
  Literal,
8
9
  Optional,
9
- TYPE_CHECKING,
10
10
  Type,
11
11
  )
12
12
 
13
13
  from mcp_agent.agents.agent import Agent
14
14
  from mcp_agent.event_progress import ProgressAction
15
+ from mcp_agent.logging.logger import get_logger
15
16
  from mcp_agent.workflows.llm.augmented_llm import (
16
17
  AugmentedLLM,
17
18
  MessageParamT,
@@ -20,14 +21,14 @@ from mcp_agent.workflows.llm.augmented_llm import (
20
21
  RequestParams,
21
22
  )
22
23
  from mcp_agent.workflows.orchestrator.orchestrator_models import (
23
- format_plan_result,
24
- format_step_result_text,
25
24
  NextStep,
26
25
  Plan,
27
26
  PlanResult,
28
27
  Step,
29
28
  StepResult,
30
29
  TaskWithResult,
30
+ format_plan_result,
31
+ format_step_result_text,
31
32
  )
32
33
  from mcp_agent.workflows.orchestrator.orchestrator_prompts import (
33
34
  FULL_PLAN_PROMPT_TEMPLATE,
@@ -36,7 +37,6 @@ from mcp_agent.workflows.orchestrator.orchestrator_prompts import (
36
37
  SYNTHESIZE_PLAN_PROMPT_TEMPLATE,
37
38
  TASK_PROMPT_TEMPLATE,
38
39
  )
39
- from mcp_agent.logging.logger import get_logger
40
40
 
41
41
  if TYPE_CHECKING:
42
42
  from mcp_agent.context import Context
@@ -73,7 +73,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
73
73
  plan_type: Literal["full", "iterative"] = "full",
74
74
  context: Optional["Context"] = None,
75
75
  **kwargs,
76
- ):
76
+ ) -> None:
77
77
  """
78
78
  Args:
79
79
  name: Name of the orchestrator workflow
@@ -171,17 +171,13 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
171
171
  ) -> ModelT:
172
172
  return None
173
173
 
174
- async def execute(
175
- self, objective: str, request_params: RequestParams | None = None
176
- ) -> PlanResult:
174
+ async def execute(self, objective: str, request_params: RequestParams | None = None) -> PlanResult:
177
175
  """Execute task with result chaining between steps"""
178
176
  iterations = 0
179
177
  total_steps_executed = 0
180
178
 
181
179
  params = self.get_request_params(request_params)
182
- max_steps = getattr(
183
- params, "max_steps", params.max_iterations * 5
184
- ) # Default to 5× max_iterations
180
+ max_steps = getattr(params, "max_steps", params.max_iterations * 5) # Default to 5× max_iterations
185
181
 
186
182
  # Single progress event for orchestration start
187
183
  model = await self.select_model(params) or "unknown-model"
@@ -198,24 +194,18 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
198
194
  )
199
195
 
200
196
  plan_result = PlanResult(objective=objective, step_results=[])
201
- plan_result.max_iterations_reached = (
202
- False # Add a flag to track if we hit the limit
203
- )
197
+ plan_result.max_iterations_reached = False # Add a flag to track if we hit the limit
204
198
 
205
199
  while iterations < params.max_iterations:
206
200
  if self.plan_type == "iterative":
207
201
  # Get next plan/step
208
- next_step = await self._get_next_step(
209
- objective=objective, plan_result=plan_result, request_params=params
210
- )
202
+ next_step = await self._get_next_step(objective=objective, plan_result=plan_result, request_params=params)
211
203
  logger.debug(f"Iteration {iterations}: Iterative plan:", data=next_step)
212
204
  plan = Plan(steps=[next_step], is_complete=next_step.is_complete)
213
205
  # Validate agent names in the plan early
214
206
  self._validate_agent_names(plan)
215
207
  elif self.plan_type == "full":
216
- plan = await self._get_full_plan(
217
- objective=objective, plan_result=plan_result, request_params=params
218
- )
208
+ plan = await self._get_full_plan(objective=objective, plan_result=plan_result, request_params=params)
219
209
  logger.debug(f"Iteration {iterations}: Full Plan:", data=plan)
220
210
  # Validate agent names in the plan early
221
211
  self._validate_agent_names(plan)
@@ -230,9 +220,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
230
220
 
231
221
  # Synthesize final result into a single message
232
222
  # Use the structured XML format for better context
233
- synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(
234
- plan_result=format_plan_result(plan_result)
235
- )
223
+ synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(plan_result=format_plan_result(plan_result))
236
224
 
237
225
  # Use planner directly - planner already has PLANNING verb
238
226
  plan_result.result = await self.planner.generate_str(
@@ -247,9 +235,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
247
235
  for step in plan.steps:
248
236
  # Check if we've hit the step limit
249
237
  if total_steps_executed >= max_steps:
250
- self.logger.warning(
251
- f"Reached maximum step limit ({max_steps}) without completing objective."
252
- )
238
+ self.logger.warning(f"Reached maximum step limit ({max_steps}) without completing objective.")
253
239
  plan_result.max_steps_reached = True
254
240
  break
255
241
 
@@ -263,15 +249,10 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
263
249
  total_steps_executed += 1
264
250
 
265
251
  # Check if we need to break from the main loop due to hitting max_steps
266
- if (
267
- hasattr(plan_result, "max_steps_reached")
268
- and plan_result.max_steps_reached
269
- ):
252
+ if hasattr(plan_result, "max_steps_reached") and plan_result.max_steps_reached:
270
253
  break
271
254
 
272
- logger.debug(
273
- f"Iteration {iterations}: Intermediate plan result:", data=plan_result
274
- )
255
+ logger.debug(f"Iteration {iterations}: Intermediate plan result:", data=plan_result)
275
256
 
276
257
  # Check for diminishing returns
277
258
  if iterations > 2 and len(plan.steps) <= 1:
@@ -289,9 +270,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
289
270
 
290
271
  # Check if we hit iteration limits without completing
291
272
  if iterations >= params.max_iterations and not plan_result.is_complete:
292
- self.logger.warning(
293
- f"Failed to complete in {params.max_iterations} iterations."
294
- )
273
+ self.logger.warning(f"Failed to complete in {params.max_iterations} iterations.")
295
274
  # Mark that we hit the iteration limit
296
275
  plan_result.max_iterations_reached = True
297
276
 
@@ -303,15 +282,11 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
303
282
  else:
304
283
  # Either plan is complete or we had diminishing returns (which we mark as complete)
305
284
  if not plan_result.is_complete:
306
- self.logger.info(
307
- "Plan terminated due to diminishing returns, marking as complete"
308
- )
285
+ self.logger.info("Plan terminated due to diminishing returns, marking as complete")
309
286
  plan_result.is_complete = True
310
287
 
311
288
  # Use standard template for complete plans
312
- synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(
313
- plan_result=format_plan_result(plan_result)
314
- )
289
+ synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(plan_result=format_plan_result(plan_result))
315
290
 
316
291
  # Generate the final synthesis with the appropriate template
317
292
  plan_result.result = await self.planner.generate_str(
@@ -341,9 +316,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
341
316
  # Make sure we're using a valid agent name
342
317
  agent = self.agents.get(task.agent)
343
318
  if not agent:
344
- self.logger.error(
345
- f"AGENT VALIDATION ERROR: No agent found matching '{task.agent}'. Available agents: {list(self.agents.keys())}"
346
- )
319
+ self.logger.error(f"AGENT VALIDATION ERROR: No agent found matching '{task.agent}'. Available agents: {list(self.agents.keys())}")
347
320
  error_tasks.append(
348
321
  (
349
322
  task,
@@ -425,20 +398,14 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
425
398
  # Create clear plan status indicator for the template
426
399
  plan_status = "Plan Status: Not Started"
427
400
  if plan_result.is_complete:
428
- plan_status = (
429
- "Plan Status: Complete"
430
- if plan_result.is_complete
431
- else "Plan Status: In Progress"
432
- )
401
+ plan_status = "Plan Status: Complete" if plan_result.is_complete else "Plan Status: In Progress"
433
402
 
434
403
  # Fix the iteration counting display
435
404
  max_iterations = params.max_iterations
436
405
  # Simplified iteration counting logic
437
406
  current_iteration = len(plan_result.step_results)
438
407
  current_iteration = min(current_iteration, max_iterations - 1) # Cap at max-1
439
- iterations_remaining = max(
440
- 0, max_iterations - current_iteration - 1
441
- ) # Ensure non-negative
408
+ iterations_remaining = max(0, max_iterations - current_iteration - 1) # Ensure non-negative
442
409
  iterations_info = f"Planning Budget: Iteration {current_iteration + 1} of {max_iterations} (with {iterations_remaining} remaining)"
443
410
 
444
411
  prompt = FULL_PLAN_PROMPT_TEMPLATE.format(
@@ -488,18 +455,12 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
488
455
 
489
456
  # Format agents without numeric prefixes for cleaner XML
490
457
  # FIX: Iterate over agent names instead of agent objects
491
- agents = "\n".join(
492
- [self._format_agent_info(agent_name) for agent_name in self.agents.keys()]
493
- )
458
+ agents = "\n".join([self._format_agent_info(agent_name) for agent_name in self.agents.keys()])
494
459
 
495
460
  # Create clear plan status indicator for the template
496
461
  plan_status = "Plan Status: Not Started"
497
462
  if plan_result:
498
- plan_status = (
499
- "Plan Status: Complete"
500
- if plan_result.is_complete
501
- else "Plan Status: In Progress"
502
- )
463
+ plan_status = "Plan Status: Complete" if plan_result.is_complete else "Plan Status: In Progress"
503
464
 
504
465
  # Add max_iterations info for the LLM
505
466
  max_iterations = params.max_iterations
@@ -516,9 +477,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
516
477
  )
517
478
 
518
479
  # Get raw JSON response from LLM
519
- return await self.planner.generate_structured(
520
- message=prompt, request_params=params, response_model=NextStep
521
- )
480
+ return await self.planner.generate_structured(message=prompt, request_params=params, response_model=NextStep)
522
481
 
523
482
  def _format_server_info(self, server_name: str) -> str:
524
483
  """Format server information for display to planners using XML tags"""
@@ -573,6 +532,4 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
573
532
 
574
533
  server_info.append({"name": server_name, "description": description})
575
534
 
576
- return format_agent_info(
577
- agent.name, instruction, server_info if server_info else None
578
- )
535
+ return format_agent_info(agent.name, instruction, server_info if server_info else None)
@@ -50,21 +50,15 @@ class Plan(BaseModel):
50
50
  description="List of steps to execute sequentially",
51
51
  default_factory=list,
52
52
  )
53
- is_complete: bool = Field(
54
- description="Whether the overall plan objective is complete"
55
- )
53
+ is_complete: bool = Field(description="Whether the overall plan objective is complete")
56
54
 
57
55
 
58
56
  class TaskWithResult(Task):
59
57
  """An individual task with its result"""
60
58
 
61
- result: str = Field(
62
- description="Result of executing the task", default="Task completed"
63
- )
59
+ result: str = Field(description="Result of executing the task", default="Task completed")
64
60
 
65
- agent: str = Field(
66
- description="Name of the agent that executed this task", default=""
67
- )
61
+ agent: str = Field(description="Name of the agent that executed this task", default="")
68
62
 
69
63
  model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
70
64
 
@@ -73,14 +67,10 @@ class StepResult(BaseModel):
73
67
  """Result of executing a step"""
74
68
 
75
69
  step: Step = Field(description="The step that was executed", default_factory=Step)
76
- task_results: List[TaskWithResult] = Field(
77
- description="Results of executing each task", default_factory=list
78
- )
79
- result: str = Field(
80
- description="Result of executing the step", default="Step completed"
81
- )
70
+ task_results: List[TaskWithResult] = Field(description="Results of executing each task", default_factory=list)
71
+ result: str = Field(description="Result of executing the step", default="Step completed")
82
72
 
83
- def add_task_result(self, task_result: TaskWithResult):
73
+ def add_task_result(self, task_result: TaskWithResult) -> None:
84
74
  """Add a task result to this step"""
85
75
  if not isinstance(self.task_results, list):
86
76
  self.task_results = []
@@ -108,7 +98,7 @@ class PlanResult(BaseModel):
108
98
  result: str | None = None
109
99
  """Result of executing the plan"""
110
100
 
111
- def add_step_result(self, step_result: StepResult):
101
+ def add_step_result(self, step_result: StepResult) -> None:
112
102
  """Add a step result to this plan"""
113
103
  if not isinstance(self.step_results, list):
114
104
  self.step_results = []
@@ -118,23 +108,17 @@ class PlanResult(BaseModel):
118
108
  class NextStep(Step):
119
109
  """Single next step in iterative planning"""
120
110
 
121
- is_complete: bool = Field(
122
- description="Whether the overall plan objective is complete"
123
- )
111
+ is_complete: bool = Field(description="Whether the overall plan objective is complete")
124
112
 
125
113
 
126
114
  def format_task_result_text(task_result: TaskWithResult) -> str:
127
115
  """Format a task result as plain text for display"""
128
- return TASK_RESULT_TEMPLATE.format(
129
- task_description=task_result.description, task_result=task_result.result
130
- )
116
+ return TASK_RESULT_TEMPLATE.format(task_description=task_result.description, task_result=task_result.result)
131
117
 
132
118
 
133
119
  def format_step_result_text(step_result: StepResult) -> str:
134
120
  """Format a step result as plain text for display"""
135
- tasks_str = "\n".join(
136
- f" - {format_task_result_text(task)}" for task in step_result.task_results
137
- )
121
+ tasks_str = "\n".join(f" - {format_task_result_text(task)}" for task in step_result.task_results)
138
122
  return STEP_RESULT_TEMPLATE.format(
139
123
  step_description=step_result.step.description,
140
124
  step_result=step_result.result,
@@ -145,10 +129,7 @@ def format_step_result_text(step_result: StepResult) -> str:
145
129
  def format_plan_result_text(plan_result: PlanResult) -> str:
146
130
  """Format the full plan execution state as plain text for display"""
147
131
  steps_str = (
148
- "\n\n".join(
149
- f"{i + 1}:\n{format_step_result_text(step)}"
150
- for i, step in enumerate(plan_result.step_results)
151
- )
132
+ "\n\n".join(f"{i + 1}:\n{format_step_result_text(step)}" for i, step in enumerate(plan_result.step_results))
152
133
  if plan_result.step_results
153
134
  else "No steps executed yet"
154
135
  )
@@ -166,13 +147,8 @@ def format_task_result_xml(task_result: TaskWithResult) -> str:
166
147
 
167
148
  return format_fastagent_tag(
168
149
  "task-result",
169
- f"\n<fastagent:description>{task_result.description}</fastagent:description>\n"
170
- f"<fastagent:result>{task_result.result}</fastagent:result>\n",
171
- {
172
- "description": task_result.description[:50] + "..."
173
- if len(task_result.description) > 50
174
- else task_result.description
175
- },
150
+ f"\n<fastagent:description>{task_result.description}</fastagent:description>\n" f"<fastagent:result>{task_result.result}</fastagent:result>\n",
151
+ {"description": task_result.description[:50] + "..." if len(task_result.description) > 50 else task_result.description},
176
152
  )
177
153
 
178
154
 
@@ -221,9 +197,7 @@ def format_plan_result(plan_result: PlanResult) -> str:
221
197
  else:
222
198
  # No steps executed yet
223
199
  progress_content = (
224
- f"{objective_tag}\n"
225
- f"<fastagent:steps>No steps executed yet</fastagent:steps>\n"
226
- f"<fastagent:status>Not Started</fastagent:status>\n"
200
+ f"{objective_tag}\n" f"<fastagent:steps>No steps executed yet</fastagent:steps>\n" f"<fastagent:status>Not Started</fastagent:status>\n"
227
201
  )
228
202
 
229
203
  return format_fastagent_tag("progress", progress_content)