fast-agent-mcp 0.0.15__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/METADATA +121 -21
  2. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/RECORD +27 -25
  3. mcp_agent/cli/__main__.py +3 -0
  4. mcp_agent/cli/commands/bootstrap.py +1 -1
  5. mcp_agent/cli/commands/setup.py +4 -1
  6. mcp_agent/cli/main.py +13 -3
  7. mcp_agent/config.py +19 -11
  8. mcp_agent/core/agent_app.py +1 -1
  9. mcp_agent/core/enhanced_prompt.py +13 -5
  10. mcp_agent/core/fastagent.py +87 -49
  11. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +188 -0
  12. mcp_agent/resources/examples/data-analysis/analysis.py +26 -0
  13. mcp_agent/resources/examples/workflows/evaluator.py +3 -3
  14. mcp_agent/resources/examples/workflows/orchestrator.py +1 -1
  15. mcp_agent/resources/examples/workflows/parallel.py +0 -4
  16. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +229 -91
  17. mcp_agent/workflows/llm/augmented_llm_anthropic.py +16 -2
  18. mcp_agent/workflows/llm/augmented_llm_openai.py +13 -1
  19. mcp_agent/workflows/llm/prompt_utils.py +137 -0
  20. mcp_agent/workflows/orchestrator/orchestrator.py +252 -50
  21. mcp_agent/workflows/orchestrator/orchestrator_models.py +81 -9
  22. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +112 -42
  23. mcp_agent/workflows/router/router_base.py +113 -21
  24. mcp_agent/workflows/router/router_llm.py +19 -5
  25. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/WHEEL +0 -0
  26. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/entry_points.txt +0 -0
  27. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -10,7 +10,7 @@ from mcp_agent.workflows.llm.augmented_llm import (
10
10
  ModelT,
11
11
  RequestParams,
12
12
  )
13
- from mcp_agent.agents.agent import Agent
13
+ from mcp_agent.agents.agent import Agent, AgentConfig
14
14
  from mcp_agent.logging.logger import get_logger
15
15
 
16
16
  if TYPE_CHECKING:
@@ -64,6 +64,25 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
64
64
  - Document writing requiring multiple revisions
65
65
  """
66
66
 
67
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
68
+ """Initialize default parameters using the workflow's settings."""
69
+ return RequestParams(
70
+ modelPreferences=self.model_preferences,
71
+ systemPrompt=self.instruction,
72
+ parallel_tool_calls=True,
73
+ max_iterations=10,
74
+ use_history=self.generator_use_history, # Use generator's history setting
75
+ )
76
+
77
+ def _init_request_params(self):
78
+ """Initialize request parameters for both generator and evaluator components."""
79
+ # Set up workflow's default params based on generator's history setting
80
+ self.default_request_params = self._initialize_default_params({})
81
+
82
+ # Ensure evaluator's request params have history disabled
83
+ if hasattr(self.evaluator_llm, "default_request_params"):
84
+ self.evaluator_llm.default_request_params.use_history = False
85
+
67
86
  def __init__(
68
87
  self,
69
88
  generator: Agent | AugmentedLLM,
@@ -73,6 +92,8 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
73
92
  llm_factory: Callable[[Agent], AugmentedLLM]
74
93
  | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
75
94
  context: Optional["Context"] = None,
95
+ name: Optional[str] = None, # Allow overriding the name
96
+ instruction: Optional[str] = None, # Allow overriding the instruction
76
97
  ):
77
98
  """
78
99
  Initialize the evaluator-optimizer workflow.
@@ -87,16 +108,50 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
87
108
  min_rating: Minimum acceptable quality rating
88
109
  max_refinements: Maximum refinement iterations
89
110
  llm_factory: Optional factory to create LLMs from agents
111
+ name: Optional name for the workflow (defaults to generator's name)
112
+ instruction: Optional instruction (defaults to generator's instruction)
113
+
114
+ Note on History Management:
115
+ This workflow manages two distinct history contexts:
116
+ 1. Generator History: Controlled by the generator's use_history setting. When False,
117
+ each refinement iteration starts fresh without previous context.
118
+ 2. Evaluator History: Always disabled as each evaluation should be independent
119
+ and based solely on the current response.
90
120
  """
91
- super().__init__(context=context)
92
-
93
- # Set up the optimizer
94
- self.name = generator.name
121
+ # Set up initial instance attributes - allow name override
122
+ self.name = name or generator.name
95
123
  self.llm_factory = llm_factory
96
124
  self.generator = generator
97
125
  self.evaluator = evaluator
126
+ self.min_rating = min_rating
127
+ self.max_refinements = max_refinements
128
+
129
+ # Determine generator's history setting before super().__init__
130
+ if isinstance(generator, Agent):
131
+ self.generator_use_history = generator.config.use_history
132
+ elif isinstance(generator, AugmentedLLM):
133
+ if hasattr(generator, "aggregator") and isinstance(
134
+ generator.aggregator, Agent
135
+ ):
136
+ self.generator_use_history = generator.aggregator.config.use_history
137
+ else:
138
+ self.generator_use_history = getattr(
139
+ generator,
140
+ "use_history",
141
+ getattr(generator.default_request_params, "use_history", False),
142
+ )
143
+ else:
144
+ raise ValueError(f"Unsupported optimizer type: {type(generator)}")
145
+
146
+ # Now we can call super().__init__ which will use generator_use_history
147
+ super().__init__(context=context, name=name or generator.name)
98
148
 
99
- # TODO: Remove legacy - optimizer should always be an AugmentedLLM, no conversion needed
149
+ # Add a PassthroughLLM as _llm property for compatibility with Orchestrator
150
+ from mcp_agent.workflows.llm.augmented_llm import PassthroughLLM
151
+
152
+ self._llm = PassthroughLLM(name=f"{self.name}_passthrough", context=context)
153
+
154
+ # Set up the generator
100
155
  if isinstance(generator, Agent):
101
156
  if not llm_factory:
102
157
  raise ValueError("llm_factory is required when using an Agent")
@@ -109,9 +164,12 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
109
164
 
110
165
  self.aggregator = generator
111
166
  self.instruction = (
112
- generator.instruction
113
- if isinstance(generator.instruction, str)
114
- else None
167
+ instruction # Use provided instruction if any
168
+ or (
169
+ generator.instruction
170
+ if isinstance(generator.instruction, str)
171
+ else None
172
+ ) # Fallback to generator's
115
173
  )
116
174
 
117
175
  elif isinstance(generator, AugmentedLLM):
@@ -119,46 +177,58 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
119
177
  self.aggregator = generator.aggregator
120
178
  self.instruction = generator.instruction
121
179
 
122
- else:
123
- raise ValueError(f"Unsupported optimizer type: {type(generator)}")
124
-
125
- self.history = self.generator_llm.history
126
-
127
- # Set up the evaluator
180
+ # Set up the evaluator - evaluations should be independent, so history is always disabled
128
181
  if isinstance(evaluator, AugmentedLLM):
129
182
  self.evaluator_llm = evaluator
130
- # TODO: Remove legacy - evaluator should be either AugmentedLLM or str
183
+ # Override evaluator's history setting
184
+ if hasattr(evaluator, "default_request_params"):
185
+ evaluator.default_request_params.use_history = False
131
186
  elif isinstance(evaluator, Agent):
132
187
  if not llm_factory:
133
188
  raise ValueError(
134
189
  "llm_factory is required when using an Agent evaluator"
135
190
  )
136
191
 
137
- # Only create new LLM if agent doesn't have one
192
+ # Create evaluator with history disabled
138
193
  if hasattr(evaluator, "_llm") and evaluator._llm:
139
194
  self.evaluator_llm = evaluator._llm
195
+ if hasattr(self.evaluator_llm, "default_request_params"):
196
+ self.evaluator_llm.default_request_params.use_history = False
140
197
  else:
198
+ # Force history off in config before creating LLM
199
+ evaluator.config.use_history = False
141
200
  self.evaluator_llm = llm_factory(agent=evaluator)
142
201
  elif isinstance(evaluator, str):
143
- # If a string is passed as the evaluator, we use it as the evaluation criteria
144
- # and create an evaluator agent with that instruction
145
202
  if not llm_factory:
146
203
  raise ValueError(
147
204
  "llm_factory is required when using a string evaluator"
148
205
  )
149
206
 
150
- self.evaluator_llm = llm_factory(
151
- agent=Agent(name="Evaluator", instruction=evaluator)
207
+ # Create evaluator agent with history disabled
208
+ evaluator_agent = Agent(
209
+ name="Evaluator",
210
+ instruction=evaluator,
211
+ config=AgentConfig(
212
+ name="Evaluator",
213
+ instruction=evaluator,
214
+ servers=[],
215
+ use_history=False, # Force history off for evaluator
216
+ ),
152
217
  )
218
+ self.evaluator_llm = llm_factory(agent=evaluator_agent)
153
219
  else:
154
220
  raise ValueError(f"Unsupported evaluator type: {type(evaluator)}")
155
221
 
156
- self.min_rating = min_rating
157
- self.max_refinements = max_refinements
158
-
159
- # Track iteration history
222
+ # Track iteration history (for the workflow itself)
160
223
  self.refinement_history = []
161
224
 
225
+ # Set up workflow's default params based on generator's history setting
226
+ self.default_request_params = self._initialize_default_params({})
227
+
228
+ # Ensure evaluator's request params have history disabled
229
+ if hasattr(self.evaluator_llm, "default_request_params"):
230
+ self.evaluator_llm.default_request_params.use_history = False
231
+
162
232
  async def generate(
163
233
  self,
164
234
  message: str | MessageParamT | List[MessageParamT],
@@ -171,6 +241,9 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
171
241
  best_rating = QualityRating.POOR
172
242
  self.refinement_history = []
173
243
 
244
+ # Get request params with proper use_history setting
245
+ params = self.get_request_params(request_params)
246
+
174
247
  # Use a single AsyncExitStack for the entire method to maintain connections
175
248
  async with contextlib.AsyncExitStack() as stack:
176
249
  # Enter all agent contexts once at the beginning
@@ -180,22 +253,20 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
180
253
  await stack.enter_async_context(self.evaluator)
181
254
 
182
255
  # Initial generation
183
- response = await self.generator_llm.generate(
256
+ response = await self.generator_llm.generate_str(
184
257
  message=message,
185
- request_params=request_params,
258
+ request_params=params, # Pass params which may override use_history
186
259
  )
187
260
 
188
261
  best_response = response
189
262
 
190
263
  while refinement_count < self.max_refinements:
191
- logger.debug("Optimizer result:", data=response)
264
+ logger.debug("Generator result:", data=response)
192
265
 
193
266
  # Evaluate current response
194
267
  eval_prompt = self._build_eval_prompt(
195
268
  original_request=str(message),
196
- current_response="\n".join(str(r) for r in response)
197
- if isinstance(response, list)
198
- else str(response),
269
+ current_response=response, # response is already a string
199
270
  iteration=refinement_count,
200
271
  )
201
272
 
@@ -244,22 +315,23 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
244
315
  # Generate refined response
245
316
  refinement_prompt = self._build_refinement_prompt(
246
317
  original_request=str(message),
247
- current_response="\n".join(str(r) for r in response)
248
- if isinstance(response, list)
249
- else str(response),
318
+ current_response=response,
250
319
  feedback=evaluation_result,
251
320
  iteration=refinement_count,
321
+ use_history=self.generator_use_history, # Use the generator's history setting
252
322
  )
253
323
 
254
- # No nested AsyncExitStack here either
255
- response = await self.generator_llm.generate(
324
+ response = await self.generator_llm.generate_str(
256
325
  message=refinement_prompt,
257
- request_params=request_params,
326
+ request_params=params, # Pass params which may override use_history
258
327
  )
259
328
 
260
329
  refinement_count += 1
261
330
 
262
- return best_response
331
+ # Return the best response as a list with a single string element
332
+ # This makes it consistent with other AugmentedLLM implementations
333
+ # that return List[MessageT]
334
+ return [best_response]
263
335
 
264
336
  async def generate_str(
265
337
  self,
@@ -271,28 +343,8 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
271
343
  message=message,
272
344
  request_params=request_params,
273
345
  )
274
-
275
- # Handle case where response is a single message
276
- if not isinstance(response, list):
277
- return str(response)
278
-
279
- # Convert all messages to strings, handling different message types
280
- result_strings = []
281
- for r in response:
282
- if hasattr(r, "text"):
283
- result_strings.append(r.text)
284
- elif hasattr(r, "content"):
285
- # Handle ToolUseBlock and similar
286
- if isinstance(r.content, list):
287
- # Typically content is a list of blocks
288
- result_strings.extend(str(block) for block in r.content)
289
- else:
290
- result_strings.append(str(r.content))
291
- else:
292
- # Fallback to string representation
293
- result_strings.append(str(r))
294
-
295
- return "\n".join(result_strings)
346
+ # Since generate now returns [best_response], just return the first element
347
+ return str(response[0])
296
348
 
297
349
  async def generate_structured(
298
350
  self,
@@ -316,23 +368,50 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
316
368
  ) -> str:
317
369
  """Build the evaluation prompt for the evaluator"""
318
370
  return f"""
319
- Evaluate the following response based on these criteria:
320
- {self.evaluator.instruction}
321
-
322
- Original Request: {original_request}
323
- Current Response (Iteration {iteration + 1}): {current_response}
324
-
325
- Provide your evaluation as a structured response with:
326
- 1. A quality rating (EXCELLENT, GOOD, FAIR, or POOR)
327
- 2. Specific feedback and suggestions
328
- 3. Whether improvement is needed (true/false)
329
- 4. Focus areas for improvement
330
-
331
- Rate as EXCELLENT only if no improvements are needed.
332
- Rate as GOOD if only minor improvements are possible.
333
- Rate as FAIR if several improvements are needed.
334
- Rate as POOR if major improvements are needed.
335
- """
371
+ You are an expert evaluator for content quality. Your task is to evaluate a response against the user's original request.
372
+
373
+ Evaluate the response for iteration {iteration + 1} and provide structured feedback on its quality and areas for improvement.
374
+
375
+ <fastagent:data>
376
+ <fastagent:request>
377
+ {original_request}
378
+ </fastagent:request>
379
+
380
+ <fastagent:response>
381
+ {current_response}
382
+ </fastagent:response>
383
+
384
+ <fastagent:evaluation-criteria>
385
+ {self.evaluator.instruction}
386
+ </fastagent:evaluation-criteria>
387
+ </fastagent:data>
388
+
389
+ <fastagent:instruction>
390
+ Provide a structured evaluation with the following components:
391
+
392
+ <rating>
393
+ Choose one: EXCELLENT, GOOD, FAIR, or POOR
394
+ - EXCELLENT: No improvements needed
395
+ - GOOD: Only minor improvements possible
396
+ - FAIR: Several improvements needed
397
+ - POOR: Major improvements needed
398
+ </rating>
399
+
400
+ <details>
401
+ Provide specific, actionable feedback and suggestions for improvement.
402
+ Be precise about what works well and what could be improved.
403
+ </details>
404
+
405
+ <needs_improvement>
406
+ Indicate true/false whether further improvement is needed.
407
+ </needs_improvement>
408
+
409
+ <focus-areas>
410
+ List 1-3 specific areas to focus on in the next iteration.
411
+ Be concrete and actionable in your recommendations.
412
+ </focus-areas>
413
+ </fastagent:instruction>
414
+ """
336
415
 
337
416
  def _build_refinement_prompt(
338
417
  self,
@@ -340,19 +419,78 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
340
419
  current_response: str,
341
420
  feedback: EvaluationResult,
342
421
  iteration: int,
422
+ use_history: bool = None,
343
423
  ) -> str:
344
424
  """Build the refinement prompt for the optimizer"""
345
- return f"""
346
- Improve your previous response based on the evaluation feedback.
347
-
348
- Original Request: {original_request}
349
-
350
- Previous Response (Iteration {iteration + 1}):
351
- {current_response}
352
-
353
- Quality Rating: {feedback.rating}
354
- Feedback: {feedback.feedback}
355
- Areas to Focus On: {", ".join(feedback.focus_areas)}
356
-
357
- Generate an improved version addressing the feedback while maintaining accuracy and relevance.
358
- """
425
+ # Get the correct history setting - use param if provided, otherwise class default
426
+ if use_history is None:
427
+ use_history = (
428
+ self.generator_use_history
429
+ ) # Use generator's setting as default
430
+
431
+ # Start with clear non-delimited instructions
432
+ prompt = f"""
433
+ You are tasked with improving a response based on expert feedback. This is iteration {iteration + 1} of the refinement process.
434
+
435
+ Your goal is to address all feedback points while maintaining accuracy and relevance to the original request.
436
+ """
437
+
438
+ # Add data section with all relevant information
439
+ prompt += """
440
+ <fastagent:data>
441
+ """
442
+
443
+ # Add request
444
+ prompt += f"""
445
+ <fastagent:request>
446
+ {original_request}
447
+ </fastagent:request>
448
+ """
449
+
450
+ # Only include previous response if history is not enabled
451
+ if not use_history:
452
+ prompt += f"""
453
+ <fastagent:previous-response>
454
+ {current_response}
455
+ </fastagent:previous-response>
456
+ """
457
+
458
+ # Always include the feedback
459
+ prompt += f"""
460
+ <fastagent:feedback>
461
+ <rating>{feedback.rating}</rating>
462
+ <details>{feedback.feedback}</details>
463
+ <focus-areas>{", ".join(feedback.focus_areas) if feedback.focus_areas else "None specified"}</focus-areas>
464
+ </fastagent:feedback>
465
+ </fastagent:data>
466
+ """
467
+
468
+ # Customize instruction based on history availability
469
+ if not use_history:
470
+ prompt += """
471
+ <fastagent:instruction>
472
+ Create an improved version of the response that:
473
+ 1. Directly addresses each point in the feedback
474
+ 2. Focuses on the specific areas mentioned for improvement
475
+ 3. Maintains all the strengths of the original response
476
+ 4. Remains accurate and relevant to the original request
477
+
478
+ Provide your complete improved response without explanations or commentary.
479
+ </fastagent:instruction>
480
+ """
481
+ else:
482
+ prompt += """
483
+ <fastagent:instruction>
484
+ Your previous response is available in your conversation history.
485
+
486
+ Create an improved version that:
487
+ 1. Directly addresses each point in the feedback
488
+ 2. Focuses on the specific areas mentioned for improvement
489
+ 3. Maintains all the strengths of your original response
490
+ 4. Remains accurate and relevant to the original request
491
+
492
+ Provide your complete improved response without explanations or commentary.
493
+ </fastagent:instruction>
494
+ """
495
+
496
+ return prompt
@@ -117,10 +117,11 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
117
117
  responses: List[Message] = []
118
118
  model = await self.select_model(params)
119
119
  chat_turn = (len(messages) + 1) // 2
120
- self._log_chat_progress(chat_turn, model=model)
121
120
  self.show_user_message(str(message), model, chat_turn)
122
121
 
123
122
  for i in range(params.max_iterations):
123
+ chat_turn = (len(messages) + 1) // 2
124
+ self._log_chat_progress(chat_turn, model=model)
124
125
  arguments = {
125
126
  "model": model,
126
127
  "messages": messages,
@@ -208,10 +209,23 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
208
209
  break
209
210
  elif response.stop_reason == "max_tokens":
210
211
  # We have reached the max tokens limit
212
+
211
213
  self.logger.debug(
212
214
  f"Iteration {i}: Stopping because finish_reason is 'max_tokens'"
213
215
  )
214
- # TODO: saqadri - would be useful to return the reason for stopping to the caller
216
+ if params.maxTokens is not None:
217
+ message_text = Text(
218
+ f"the assistant has reached the maximum token limit ({params.maxTokens})",
219
+ style="dim green italic",
220
+ )
221
+ else:
222
+ message_text = Text(
223
+ "the assistant has reached the maximum token limit",
224
+ style="dim green italic",
225
+ )
226
+
227
+ await self.show_assistant_message(message_text)
228
+
215
229
  break
216
230
  else:
217
231
  message_text = ""
@@ -254,7 +254,7 @@ class OpenAIAugmentedLLM(
254
254
  message_text,
255
255
  message.tool_calls[
256
256
  0
257
- ].function.name, # TODO support multiple tool calls
257
+ ].function.name, # TODO support displaying multiple tool calls
258
258
  )
259
259
  else:
260
260
  await self.show_assistant_message(
@@ -294,6 +294,18 @@ class OpenAIAugmentedLLM(
294
294
  self.logger.debug(
295
295
  f"Iteration {i}: Stopping because finish_reason is 'length'"
296
296
  )
297
+ if request_params and request_params.maxTokens is not None:
298
+ message_text = Text(
299
+ f"the assistant has reached the maximum token limit ({request_params.maxTokens})",
300
+ style="dim green italic",
301
+ )
302
+ else:
303
+ message_text = Text(
304
+ "the assistant has reached the maximum token limit",
305
+ style="dim green italic",
306
+ )
307
+
308
+ await self.show_assistant_message(message_text)
297
309
  # TODO: saqadri - would be useful to return the reason for stopping to the caller
298
310
  break
299
311
  elif choice.finish_reason == "content_filter":
@@ -0,0 +1,137 @@
1
+ """
2
+ XML formatting utilities for consistent prompt engineering across components.
3
+ """
4
+
5
+ from typing import Dict, List, Optional, Union
6
+
7
+
8
+ def format_xml_tag(tag_name: str, content: Optional[str] = None,
9
+ attributes: Optional[Dict[str, str]] = None) -> str:
10
+ """
11
+ Format an XML tag with optional content and attributes.
12
+ Uses self-closing tag when content is None or empty.
13
+
14
+ Args:
15
+ tag_name: Name of the XML tag
16
+ content: Content to include inside the tag (None for self-closing)
17
+ attributes: Dictionary of attribute name-value pairs
18
+
19
+ Returns:
20
+ Formatted XML tag as string
21
+ """
22
+ # Format attributes if provided
23
+ attrs_str = ""
24
+ if attributes:
25
+ attrs_str = " " + " ".join(f'{k}="{v}"' for k, v in attributes.items())
26
+
27
+ # Use self-closing tag if no content
28
+ if content is None or content == "":
29
+ return f"<{tag_name}{attrs_str} />"
30
+
31
+ # Full tag with content
32
+ return f"<{tag_name}{attrs_str}>{content}</{tag_name}>"
33
+
34
+
35
+ def format_fastagent_tag(tag_type: str, content: Optional[str] = None,
36
+ attributes: Optional[Dict[str, str]] = None) -> str:
37
+ """
38
+ Format a fastagent-namespaced XML tag with consistent formatting.
39
+
40
+ Args:
41
+ tag_type: Type of fastagent tag (without namespace prefix)
42
+ content: Content to include inside the tag
43
+ attributes: Dictionary of attribute name-value pairs
44
+
45
+ Returns:
46
+ Formatted fastagent XML tag as string
47
+ """
48
+ return format_xml_tag(f"fastagent:{tag_type}", content, attributes)
49
+
50
+
51
+ def format_server_info(server_name: str, description: Optional[str] = None,
52
+ tools: Optional[List[Dict[str, str]]] = None) -> str:
53
+ """
54
+ Format server information consistently across router and orchestrator modules.
55
+
56
+ Args:
57
+ server_name: Name of the server
58
+ description: Optional server description
59
+ tools: Optional list of tool dictionaries with 'name' and 'description' keys
60
+
61
+ Returns:
62
+ Formatted server XML as string
63
+ """
64
+ # Use self-closing tag if no description or tools
65
+ if not description and not tools:
66
+ return format_fastagent_tag("server", None, {"name": server_name})
67
+
68
+ # Start building components
69
+ components = []
70
+
71
+ # Add description if present
72
+ if description:
73
+ desc_tag = format_fastagent_tag("description", description)
74
+ components.append(desc_tag)
75
+
76
+ # Add tools section if tools exist
77
+ if tools and len(tools) > 0:
78
+ tool_tags = []
79
+ for tool in tools:
80
+ tool_name = tool.get("name", "")
81
+ tool_desc = tool.get("description", "")
82
+ tool_tag = format_fastagent_tag("tool", tool_desc, {"name": tool_name})
83
+ tool_tags.append(tool_tag)
84
+
85
+ tools_content = "\n".join(tool_tags)
86
+ tools_tag = format_fastagent_tag("tools", f"\n{tools_content}\n")
87
+ components.append(tools_tag)
88
+
89
+ # Combine all components
90
+ server_content = "\n".join(components)
91
+ return format_fastagent_tag("server", f"\n{server_content}\n", {"name": server_name})
92
+
93
+
94
+ def format_agent_info(agent_name: str, description: Optional[str] = None,
95
+ servers: Optional[List[Dict[str, Union[str, List[Dict[str, str]]]]]] = None) -> str:
96
+ """
97
+ Format agent information consistently across router and orchestrator modules.
98
+
99
+ Args:
100
+ agent_name: Name of the agent
101
+ description: Optional agent description/instruction
102
+ servers: Optional list of server dictionaries with 'name', 'description', and 'tools' keys
103
+
104
+ Returns:
105
+ Formatted agent XML as string
106
+ """
107
+ # Start building components
108
+ components = []
109
+
110
+ # Add description if present
111
+ if description:
112
+ desc_tag = format_fastagent_tag("description", description)
113
+ components.append(desc_tag)
114
+
115
+ # If no description or servers, use self-closing tag
116
+ if not description and not servers:
117
+ return format_fastagent_tag("agent", None, {"name": agent_name})
118
+
119
+ # If has servers, format them
120
+ if servers and len(servers) > 0:
121
+ server_tags = []
122
+ for server in servers:
123
+ server_name = server.get("name", "")
124
+ server_desc = server.get("description", "")
125
+ server_tools = server.get("tools", [])
126
+ server_tag = format_server_info(server_name, server_desc, server_tools)
127
+ server_tags.append(server_tag)
128
+
129
+ # Only add servers section if we have servers
130
+ if server_tags:
131
+ servers_content = "\n".join(server_tags)
132
+ servers_tag = format_fastagent_tag("servers", f"\n{servers_content}\n")
133
+ components.append(servers_tag)
134
+
135
+ # Combine all components
136
+ agent_content = "\n".join(components)
137
+ return format_fastagent_tag("agent", f"\n{agent_content}\n", {"name": agent_name})