fast-agent-mcp 0.1.10__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.1.10.dist-info → fast_agent_mcp-0.1.12.dist-info}/METADATA +36 -38
- {fast_agent_mcp-0.1.10.dist-info → fast_agent_mcp-0.1.12.dist-info}/RECORD +45 -42
- mcp_agent/agents/agent.py +1 -24
- mcp_agent/app.py +0 -5
- mcp_agent/config.py +9 -0
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +29 -0
- mcp_agent/core/agent_types.py +29 -2
- mcp_agent/core/decorators.py +1 -2
- mcp_agent/core/error_handling.py +1 -1
- mcp_agent/core/factory.py +2 -3
- mcp_agent/core/mcp_content.py +2 -3
- mcp_agent/core/proxies.py +3 -0
- mcp_agent/core/request_params.py +43 -0
- mcp_agent/core/types.py +4 -2
- mcp_agent/core/validation.py +14 -15
- mcp_agent/logging/transport.py +2 -2
- mcp_agent/mcp/gen_client.py +4 -4
- mcp_agent/mcp/interfaces.py +186 -0
- mcp_agent/mcp/mcp_agent_client_session.py +10 -2
- mcp_agent/mcp/mcp_aggregator.py +12 -3
- mcp_agent/mcp/sampling.py +140 -0
- mcp_agent/mcp/stdio.py +1 -2
- mcp_agent/mcp_server/__init__.py +1 -1
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +3 -0
- mcp_agent/resources/examples/prompting/__init__.py +1 -1
- mcp_agent/ui/console_display.py +2 -2
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +2 -2
- mcp_agent/workflows/llm/augmented_llm.py +42 -102
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +4 -3
- mcp_agent/workflows/llm/augmented_llm_openai.py +4 -3
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +119 -37
- mcp_agent/workflows/llm/model_factory.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +42 -28
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +244 -140
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +230 -185
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +5 -204
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +9 -207
- mcp_agent/workflows/llm/sampling_converter.py +124 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -17
- mcp_agent/workflows/router/router_base.py +10 -10
- mcp_agent/workflows/llm/llm_selector.py +0 -345
- {fast_agent_mcp-0.1.10.dist-info → fast_agent_mcp-0.1.12.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.10.dist-info → fast_agent_mcp-0.1.12.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.10.dist-info → fast_agent_mcp-0.1.12.dist-info}/licenses/LICENSE +0 -0
@@ -10,7 +10,6 @@ from typing import (
|
|
10
10
|
TYPE_CHECKING,
|
11
11
|
)
|
12
12
|
|
13
|
-
from mcp import CreateMessageResult, SamplingMessage
|
14
13
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
15
14
|
from mcp_agent.workflows.llm.sampling_format_converter import (
|
16
15
|
SamplingFormatConverter,
|
@@ -25,23 +24,28 @@ if TYPE_CHECKING:
|
|
25
24
|
from mcp_agent.context import Context
|
26
25
|
|
27
26
|
|
28
|
-
from pydantic import Field
|
29
27
|
|
30
28
|
from mcp.types import (
|
31
29
|
CallToolRequest,
|
32
30
|
CallToolResult,
|
33
|
-
CreateMessageRequestParams,
|
34
|
-
ModelPreferences,
|
35
31
|
PromptMessage,
|
36
32
|
TextContent,
|
37
33
|
GetPromptResult,
|
38
34
|
)
|
39
35
|
|
40
36
|
from mcp_agent.context_dependent import ContextDependent
|
41
|
-
from mcp_agent.core.exceptions import PromptExitError
|
37
|
+
from mcp_agent.core.exceptions import ModelConfigError, PromptExitError
|
38
|
+
from mcp_agent.core.request_params import RequestParams
|
42
39
|
from mcp_agent.event_progress import ProgressAction
|
43
|
-
|
44
|
-
|
40
|
+
|
41
|
+
try:
|
42
|
+
from mcp_agent.mcp.mcp_aggregator import MCPAggregator
|
43
|
+
except ImportError:
|
44
|
+
# For testing purposes
|
45
|
+
class MCPAggregator:
|
46
|
+
pass
|
47
|
+
|
48
|
+
|
45
49
|
from mcp_agent.ui.console_display import ConsoleDisplay
|
46
50
|
from rich.text import Text
|
47
51
|
|
@@ -155,43 +159,6 @@ class SimpleMemory(Memory, Generic[MessageParamT]):
|
|
155
159
|
self.prompt_messages = []
|
156
160
|
|
157
161
|
|
158
|
-
class RequestParams(CreateMessageRequestParams):
|
159
|
-
"""
|
160
|
-
Parameters to configure the AugmentedLLM 'generate' requests.
|
161
|
-
"""
|
162
|
-
|
163
|
-
messages: None = Field(exclude=True, default=None)
|
164
|
-
"""
|
165
|
-
Ignored. 'messages' are removed from CreateMessageRequestParams
|
166
|
-
to avoid confusion with the 'message' parameter on 'generate' method.
|
167
|
-
"""
|
168
|
-
|
169
|
-
maxTokens: int = 2048
|
170
|
-
"""The maximum number of tokens to sample, as requested by the server."""
|
171
|
-
|
172
|
-
model: str | None = None
|
173
|
-
"""
|
174
|
-
The model to use for the LLM generation.
|
175
|
-
If specified, this overrides the 'modelPreferences' selection criteria.
|
176
|
-
"""
|
177
|
-
|
178
|
-
use_history: bool = True
|
179
|
-
"""
|
180
|
-
Include the message history in the generate request.
|
181
|
-
"""
|
182
|
-
|
183
|
-
max_iterations: int = 10
|
184
|
-
"""
|
185
|
-
The maximum number of iterations to run the LLM for.
|
186
|
-
"""
|
187
|
-
|
188
|
-
parallel_tool_calls: bool = True
|
189
|
-
"""
|
190
|
-
Whether to allow multiple tool calls per iteration.
|
191
|
-
Also known as multi-step tool use.
|
192
|
-
"""
|
193
|
-
|
194
|
-
|
195
162
|
class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
|
196
163
|
"""Protocol defining the interface for augmented LLMs"""
|
197
164
|
|
@@ -269,30 +236,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
269
236
|
# Initialize the display component
|
270
237
|
self.display = ConsoleDisplay(config=self.context.config)
|
271
238
|
|
272
|
-
# Set initial model preferences
|
273
|
-
self.model_preferences = ModelPreferences(
|
274
|
-
costPriority=0.3,
|
275
|
-
speedPriority=0.4,
|
276
|
-
intelligencePriority=0.3,
|
277
|
-
)
|
278
|
-
|
279
239
|
# Initialize default parameters
|
280
240
|
self.default_request_params = self._initialize_default_params(kwargs)
|
281
241
|
|
282
|
-
# Update model preferences from default params
|
283
|
-
if self.default_request_params and self.default_request_params.modelPreferences:
|
284
|
-
self.model_preferences = self.default_request_params.modelPreferences
|
285
|
-
|
286
242
|
# Merge with provided params if any
|
287
243
|
if self._init_request_params:
|
288
244
|
self.default_request_params = self._merge_request_params(
|
289
245
|
self.default_request_params, self._init_request_params
|
290
246
|
)
|
291
|
-
# Update model preferences again if they changed in the merge
|
292
|
-
if self.default_request_params.modelPreferences:
|
293
|
-
self.model_preferences = self.default_request_params.modelPreferences
|
294
247
|
|
295
|
-
self.model_selector = self.context.model_selector
|
296
248
|
self.type_converter = type_converter
|
297
249
|
self.verb = kwargs.get("verb")
|
298
250
|
|
@@ -321,39 +273,21 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
321
273
|
) -> ModelT:
|
322
274
|
"""Request a structured LLM generation and return the result as a Pydantic model."""
|
323
275
|
|
324
|
-
# aysnc def generate2_str(self, prompt: PromptMessageMultipart, request_params: RequestParams | None = None) -> List[MessageT]:
|
325
|
-
# """Request an LLM generation, which may run multiple iterations, and return the result"""
|
326
|
-
# return None
|
327
|
-
|
328
276
|
async def select_model(
|
329
277
|
self, request_params: RequestParams | None = None
|
330
278
|
) -> str | None:
|
331
279
|
"""
|
332
|
-
|
333
|
-
If a model is specified in the request, it will override the model selection criteria.
|
280
|
+
Return the configured model (legacy support)
|
334
281
|
"""
|
335
|
-
|
336
|
-
|
337
|
-
model_preferences = request_params.modelPreferences or model_preferences
|
338
|
-
model = request_params.model
|
339
|
-
if model:
|
340
|
-
return model
|
282
|
+
if request_params.model:
|
283
|
+
return request_params.model
|
341
284
|
|
342
|
-
|
343
|
-
if not self.model_selector:
|
344
|
-
self.model_selector = ModelSelector()
|
345
|
-
|
346
|
-
model_info = self.model_selector.select_best_model(
|
347
|
-
model_preferences=model_preferences, provider=self.provider
|
348
|
-
)
|
349
|
-
|
350
|
-
return model_info.name
|
285
|
+
raise ModelConfigError("Internal Error: Model is not configured correctly")
|
351
286
|
|
352
287
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
353
288
|
"""Initialize default parameters for the LLM.
|
354
289
|
Should be overridden by provider implementations to set provider-specific defaults."""
|
355
290
|
return RequestParams(
|
356
|
-
modelPreferences=self.model_preferences,
|
357
291
|
systemPrompt=self.instruction,
|
358
292
|
parallel_tool_calls=True,
|
359
293
|
max_iterations=10,
|
@@ -395,25 +329,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
395
329
|
|
396
330
|
return default_request_params
|
397
331
|
|
398
|
-
def to_mcp_message_result(self, result: MessageT) -> CreateMessageResult:
|
399
|
-
"""Convert an LLM response to an MCP message result type."""
|
400
|
-
return self.type_converter.to_sampling_result(result)
|
401
|
-
|
402
|
-
def from_mcp_message_result(self, result: CreateMessageResult) -> MessageT:
|
403
|
-
"""Convert an MCP message result to an LLM response type."""
|
404
|
-
return self.type_converter.from_sampling_result(result)
|
405
|
-
|
406
|
-
def to_mcp_message_param(self, param: MessageParamT) -> SamplingMessage:
|
407
|
-
"""Convert an LLM input to an MCP message (SamplingMessage) type."""
|
408
|
-
return self.type_converter.to_sampling_message(param)
|
409
|
-
|
410
|
-
def from_mcp_message_param(self, param: SamplingMessage) -> MessageParamT:
|
411
|
-
"""Convert an MCP message (SamplingMessage) to an LLM input type."""
|
412
|
-
return self.type_converter.from_sampling_message(param)
|
413
|
-
|
414
|
-
def from_mcp_prompt_message(self, message: PromptMessage) -> MessageParamT:
|
415
|
-
return self.type_converter.from_prompt_message(message)
|
416
|
-
|
417
332
|
@classmethod
|
418
333
|
def convert_message_to_message_param(
|
419
334
|
cls, message: MessageT, **kwargs
|
@@ -689,10 +604,35 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
689
604
|
)
|
690
605
|
|
691
606
|
# Delegate to the provider-specific implementation
|
692
|
-
return await self._apply_prompt_template_provider_specific(
|
607
|
+
return await self._apply_prompt_template_provider_specific(
|
608
|
+
multipart_messages, None
|
609
|
+
)
|
610
|
+
|
611
|
+
async def apply_prompt(
|
612
|
+
self,
|
613
|
+
multipart_messages: List["PromptMessageMultipart"],
|
614
|
+
request_params: RequestParams | None = None,
|
615
|
+
) -> str:
|
616
|
+
"""
|
617
|
+
Apply a list of PromptMessageMultipart messages directly to the LLM.
|
618
|
+
This is a cleaner interface to _apply_prompt_template_provider_specific.
|
619
|
+
|
620
|
+
Args:
|
621
|
+
multipart_messages: List of PromptMessageMultipart objects
|
622
|
+
request_params: Optional parameters to configure the LLM request
|
623
|
+
|
624
|
+
Returns:
|
625
|
+
String representation of the assistant's response
|
626
|
+
"""
|
627
|
+
# Delegate to the provider-specific implementation
|
628
|
+
return await self._apply_prompt_template_provider_specific(
|
629
|
+
multipart_messages, request_params
|
630
|
+
)
|
693
631
|
|
694
632
|
async def _apply_prompt_template_provider_specific(
|
695
|
-
self,
|
633
|
+
self,
|
634
|
+
multipart_messages: List["PromptMessageMultipart"],
|
635
|
+
request_params: RequestParams | None = None,
|
696
636
|
) -> str:
|
697
637
|
"""
|
698
638
|
Provider-specific implementation of apply_prompt_template.
|
@@ -60,7 +60,6 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
60
60
|
"""Initialize Anthropic-specific default parameters"""
|
61
61
|
return RequestParams(
|
62
62
|
model=kwargs.get("model", DEFAULT_ANTHROPIC_MODEL),
|
63
|
-
modelPreferences=self.model_preferences,
|
64
63
|
maxTokens=4096, # default haiku3
|
65
64
|
systemPrompt=self.instruction,
|
66
65
|
parallel_tool_calls=True,
|
@@ -360,7 +359,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
360
359
|
)
|
361
360
|
|
362
361
|
async def _apply_prompt_template_provider_specific(
|
363
|
-
self,
|
362
|
+
self,
|
363
|
+
multipart_messages: List["PromptMessageMultipart"],
|
364
|
+
request_params: RequestParams | None = None,
|
364
365
|
) -> str:
|
365
366
|
"""
|
366
367
|
Anthropic-specific implementation of apply_prompt_template that handles
|
@@ -393,7 +394,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
393
394
|
"Last message in prompt is from user, generating assistant response"
|
394
395
|
)
|
395
396
|
message_param = AnthropicConverter.convert_to_anthropic(last_message)
|
396
|
-
return await self.generate_str(message_param)
|
397
|
+
return await self.generate_str(message_param, request_params)
|
397
398
|
else:
|
398
399
|
# For assistant messages: Return the last message content as text
|
399
400
|
self.logger.debug(
|
@@ -92,7 +92,6 @@ class OpenAIAugmentedLLM(
|
|
92
92
|
|
93
93
|
return RequestParams(
|
94
94
|
model=chosen_model,
|
95
|
-
modelPreferences=self.model_preferences,
|
96
95
|
systemPrompt=self.instruction,
|
97
96
|
parallel_tool_calls=True,
|
98
97
|
max_iterations=10,
|
@@ -395,7 +394,9 @@ class OpenAIAugmentedLLM(
|
|
395
394
|
return "\n".join(final_text)
|
396
395
|
|
397
396
|
async def _apply_prompt_template_provider_specific(
|
398
|
-
self,
|
397
|
+
self,
|
398
|
+
multipart_messages: List["PromptMessageMultipart"],
|
399
|
+
request_params: RequestParams | None = None,
|
399
400
|
) -> str:
|
400
401
|
"""
|
401
402
|
OpenAI-specific implementation of apply_prompt_template that handles
|
@@ -431,7 +432,7 @@ class OpenAIAugmentedLLM(
|
|
431
432
|
"Last message in prompt is from user, generating assistant response"
|
432
433
|
)
|
433
434
|
message_param = OpenAIConverter.convert_to_openai(last_message)
|
434
|
-
return await self.generate_str(message_param)
|
435
|
+
return await self.generate_str(message_param, request_params)
|
435
436
|
else:
|
436
437
|
# For assistant messages: Return the last message content as text
|
437
438
|
self.logger.debug(
|
@@ -1,5 +1,5 @@
|
|
1
1
|
from typing import Any, List, Optional, Type, Union
|
2
|
-
import json
|
2
|
+
import json # Import at the module level
|
3
3
|
from mcp import GetPromptResult
|
4
4
|
from mcp.types import PromptMessage
|
5
5
|
from pydantic_core import from_json
|
@@ -52,6 +52,13 @@ class PassthroughLLM(AugmentedLLM):
|
|
52
52
|
self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
|
53
53
|
await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
|
54
54
|
|
55
|
+
# Handle PromptMessage by concatenating all parts
|
56
|
+
if isinstance(message, PromptMessage):
|
57
|
+
parts_text = []
|
58
|
+
for part in message.content:
|
59
|
+
parts_text.append(str(part))
|
60
|
+
return "\n".join(parts_text)
|
61
|
+
|
55
62
|
return str(message)
|
56
63
|
|
57
64
|
async def _call_tool_and_return_result(self, command: str) -> str:
|
@@ -65,42 +72,73 @@ class PassthroughLLM(AugmentedLLM):
|
|
65
72
|
Tool result as a string
|
66
73
|
"""
|
67
74
|
try:
|
68
|
-
|
69
|
-
parts = command.split(" ", 2)
|
70
|
-
if len(parts) < 2:
|
71
|
-
return "Error: Invalid format. Expected '***CALL_TOOL <tool_name> [arguments_json]'"
|
72
|
-
|
73
|
-
tool_name = parts[1].strip()
|
74
|
-
arguments = None
|
75
|
-
|
76
|
-
# Parse optional JSON arguments if provided
|
77
|
-
if len(parts) > 2:
|
78
|
-
try:
|
79
|
-
arguments = json.loads(parts[2])
|
80
|
-
except json.JSONDecodeError:
|
81
|
-
return f"Error: Invalid JSON arguments: {parts[2]}"
|
82
|
-
|
83
|
-
# Call the tool and get the result
|
84
|
-
self.logger.info(f"Calling tool {tool_name} with arguments {arguments}")
|
75
|
+
tool_name, arguments = self._parse_tool_command(command)
|
85
76
|
result = await self.aggregator.call_tool(tool_name, arguments)
|
77
|
+
return self._format_tool_result(tool_name, result)
|
78
|
+
except Exception as e:
|
79
|
+
self.logger.error(f"Error calling tool: {str(e)}")
|
80
|
+
return f"Error calling tool: {str(e)}"
|
81
|
+
|
82
|
+
def _parse_tool_command(self, command: str) -> tuple[str, Optional[dict]]:
|
83
|
+
"""
|
84
|
+
Parse a tool command string into tool name and arguments.
|
86
85
|
|
87
|
-
|
88
|
-
|
89
|
-
return f"Error calling tool '{tool_name}': {result.message}"
|
86
|
+
Args:
|
87
|
+
command: The command string in format "***CALL_TOOL <tool_name> [arguments_json]"
|
90
88
|
|
91
|
-
|
92
|
-
|
89
|
+
Returns:
|
90
|
+
Tuple of (tool_name, arguments_dict)
|
91
|
+
|
92
|
+
Raises:
|
93
|
+
ValueError: If command format is invalid
|
94
|
+
"""
|
95
|
+
parts = command.split(" ", 2)
|
96
|
+
if len(parts) < 2:
|
97
|
+
raise ValueError(
|
98
|
+
"Invalid format. Expected '***CALL_TOOL <tool_name> [arguments_json]'"
|
99
|
+
)
|
100
|
+
|
101
|
+
tool_name = parts[1].strip()
|
102
|
+
arguments = None
|
103
|
+
|
104
|
+
if len(parts) > 2:
|
105
|
+
try:
|
106
|
+
arguments = json.loads(parts[2])
|
107
|
+
except json.JSONDecodeError:
|
108
|
+
raise ValueError(f"Invalid JSON arguments: {parts[2]}")
|
109
|
+
|
110
|
+
self.logger.info(f"Calling tool {tool_name} with arguments {arguments}")
|
111
|
+
return tool_name, arguments
|
112
|
+
|
113
|
+
def _format_tool_result(self, tool_name: str, result) -> str:
|
114
|
+
"""
|
115
|
+
Format tool execution result as a string.
|
116
|
+
|
117
|
+
Args:
|
118
|
+
tool_name: The name of the tool that was called
|
119
|
+
result: The result returned from the tool
|
120
|
+
|
121
|
+
Returns:
|
122
|
+
Formatted result as a string
|
123
|
+
"""
|
124
|
+
if result.isError:
|
125
|
+
error_text = []
|
93
126
|
for content_item in result.content:
|
94
127
|
if hasattr(content_item, "text"):
|
95
|
-
|
128
|
+
error_text.append(content_item.text)
|
96
129
|
else:
|
97
|
-
|
130
|
+
error_text.append(str(content_item))
|
131
|
+
error_message = "\n".join(error_text) if error_text else "Unknown error"
|
132
|
+
return f"Error calling tool '{tool_name}': {error_message}"
|
98
133
|
|
99
|
-
|
134
|
+
result_text = []
|
135
|
+
for content_item in result.content:
|
136
|
+
if hasattr(content_item, "text"):
|
137
|
+
result_text.append(content_item.text)
|
138
|
+
else:
|
139
|
+
result_text.append(str(content_item))
|
100
140
|
|
101
|
-
|
102
|
-
self.logger.error(f"Error calling tool: {str(e)}")
|
103
|
-
return f"Error calling tool: {str(e)}"
|
141
|
+
return "\n".join(result_text)
|
104
142
|
|
105
143
|
async def generate_structured(
|
106
144
|
self,
|
@@ -123,26 +161,62 @@ class PassthroughLLM(AugmentedLLM):
|
|
123
161
|
async def generate_prompt(
|
124
162
|
self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
|
125
163
|
) -> str:
|
126
|
-
|
127
|
-
if
|
128
|
-
|
129
|
-
|
164
|
+
# Check if this prompt contains a tool call command
|
165
|
+
if (
|
166
|
+
prompt.content
|
167
|
+
and prompt.content[0].text
|
168
|
+
and prompt.content[0].text.startswith("***CALL_TOOL ")
|
169
|
+
):
|
170
|
+
return await self._call_tool_and_return_result(prompt.content[0].text)
|
171
|
+
|
172
|
+
# Process all parts of the PromptMessageMultipart
|
173
|
+
parts_text = []
|
174
|
+
for part in prompt.content:
|
175
|
+
parts_text.append(str(part))
|
176
|
+
|
177
|
+
# If no parts found, return empty string
|
178
|
+
if not parts_text:
|
179
|
+
return ""
|
180
|
+
|
181
|
+
# Join all parts and process with generate_str
|
182
|
+
return await self.generate_str("\n".join(parts_text), request_params)
|
183
|
+
|
184
|
+
async def apply_prompt(
|
185
|
+
self,
|
186
|
+
multipart_messages: List["PromptMessageMultipart"],
|
187
|
+
request_params: Optional[RequestParams] = None,
|
188
|
+
) -> str:
|
189
|
+
"""
|
190
|
+
Apply a list of PromptMessageMultipart messages directly to the LLM.
|
191
|
+
In PassthroughLLM, this returns a concatenated string of all message content.
|
192
|
+
|
193
|
+
Args:
|
194
|
+
multipart_messages: List of PromptMessageMultipart objects
|
195
|
+
request_params: Optional parameters to configure the LLM request
|
196
|
+
|
197
|
+
Returns:
|
198
|
+
String representation of all message content concatenated together
|
199
|
+
"""
|
200
|
+
# Generate and concatenate result from all messages
|
201
|
+
result = ""
|
202
|
+
for prompt in multipart_messages:
|
203
|
+
result += await self.generate_prompt(prompt, request_params) + "\n"
|
204
|
+
|
205
|
+
return result
|
130
206
|
|
131
207
|
async def apply_prompt_template(
|
132
208
|
self, prompt_result: GetPromptResult, prompt_name: str
|
133
209
|
) -> str:
|
134
210
|
"""
|
135
211
|
Apply a prompt template by adding it to the conversation history.
|
136
|
-
|
137
|
-
generate an assistant response.
|
212
|
+
For PassthroughLLM, this returns all content concatenated together.
|
138
213
|
|
139
214
|
Args:
|
140
215
|
prompt_result: The GetPromptResult containing prompt messages
|
141
216
|
prompt_name: The name of the prompt being applied
|
142
217
|
|
143
218
|
Returns:
|
144
|
-
String representation of
|
145
|
-
or the last assistant message in the prompt
|
219
|
+
String representation of all message content concatenated together
|
146
220
|
"""
|
147
221
|
prompt_messages: List[PromptMessage] = prompt_result.messages
|
148
222
|
|
@@ -157,3 +231,11 @@ class PassthroughLLM(AugmentedLLM):
|
|
157
231
|
arguments=arguments,
|
158
232
|
)
|
159
233
|
self._messages = prompt_messages
|
234
|
+
|
235
|
+
# Convert prompt messages to multipart format
|
236
|
+
multipart_messages = PromptMessageMultipart.from_prompt_messages(
|
237
|
+
prompt_messages
|
238
|
+
)
|
239
|
+
|
240
|
+
# Use apply_prompt to handle the multipart messages
|
241
|
+
return await self.apply_prompt(multipart_messages)
|
@@ -4,9 +4,9 @@ from typing import Optional, Type, Dict, Union, Callable
|
|
4
4
|
|
5
5
|
from mcp_agent.agents.agent import Agent
|
6
6
|
from mcp_agent.core.exceptions import ModelConfigError
|
7
|
+
from mcp_agent.core.request_params import RequestParams
|
7
8
|
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
8
9
|
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
9
|
-
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
10
10
|
from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
|
11
11
|
from mcp_agent.workflows.llm.augmented_llm_playback import PlaybackLLM
|
12
12
|
|
@@ -5,8 +5,11 @@ XML formatting utilities for consistent prompt engineering across components.
|
|
5
5
|
from typing import Dict, List, Optional, Union
|
6
6
|
|
7
7
|
|
8
|
-
def format_xml_tag(
|
9
|
-
|
8
|
+
def format_xml_tag(
|
9
|
+
tag_name: str,
|
10
|
+
content: Optional[str] = None,
|
11
|
+
attributes: Optional[Dict[str, str]] = None,
|
12
|
+
) -> str:
|
10
13
|
"""
|
11
14
|
Format an XML tag with optional content and attributes.
|
12
15
|
Uses self-closing tag when content is None or empty.
|
@@ -23,56 +26,62 @@ def format_xml_tag(tag_name: str, content: Optional[str] = None,
|
|
23
26
|
attrs_str = ""
|
24
27
|
if attributes:
|
25
28
|
attrs_str = " " + " ".join(f'{k}="{v}"' for k, v in attributes.items())
|
26
|
-
|
29
|
+
|
27
30
|
# Use self-closing tag if no content
|
28
31
|
if content is None or content == "":
|
29
32
|
return f"<{tag_name}{attrs_str} />"
|
30
|
-
|
33
|
+
|
31
34
|
# Full tag with content
|
32
35
|
return f"<{tag_name}{attrs_str}>{content}</{tag_name}>"
|
33
36
|
|
34
37
|
|
35
|
-
def format_fastagent_tag(
|
36
|
-
|
38
|
+
def format_fastagent_tag(
|
39
|
+
tag_type: str,
|
40
|
+
content: Optional[str] = None,
|
41
|
+
attributes: Optional[Dict[str, str]] = None,
|
42
|
+
) -> str:
|
37
43
|
"""
|
38
44
|
Format a fastagent-namespaced XML tag with consistent formatting.
|
39
|
-
|
45
|
+
|
40
46
|
Args:
|
41
47
|
tag_type: Type of fastagent tag (without namespace prefix)
|
42
48
|
content: Content to include inside the tag
|
43
49
|
attributes: Dictionary of attribute name-value pairs
|
44
|
-
|
50
|
+
|
45
51
|
Returns:
|
46
52
|
Formatted fastagent XML tag as string
|
47
53
|
"""
|
48
54
|
return format_xml_tag(f"fastagent:{tag_type}", content, attributes)
|
49
55
|
|
50
56
|
|
51
|
-
def format_server_info(
|
52
|
-
|
57
|
+
def format_server_info(
|
58
|
+
server_name: str,
|
59
|
+
description: Optional[str] = None,
|
60
|
+
tools: Optional[List[Dict[str, str]]] = None,
|
61
|
+
) -> str:
|
53
62
|
"""
|
54
63
|
Format server information consistently across router and orchestrator modules.
|
55
|
-
|
64
|
+
|
56
65
|
Args:
|
57
66
|
server_name: Name of the server
|
58
67
|
description: Optional server description
|
59
68
|
tools: Optional list of tool dictionaries with 'name' and 'description' keys
|
60
|
-
|
69
|
+
|
61
70
|
Returns:
|
62
71
|
Formatted server XML as string
|
63
72
|
"""
|
64
73
|
# Use self-closing tag if no description or tools
|
65
74
|
if not description and not tools:
|
66
75
|
return format_fastagent_tag("server", None, {"name": server_name})
|
67
|
-
|
76
|
+
|
68
77
|
# Start building components
|
69
78
|
components = []
|
70
|
-
|
79
|
+
|
71
80
|
# Add description if present
|
72
81
|
if description:
|
73
82
|
desc_tag = format_fastagent_tag("description", description)
|
74
83
|
components.append(desc_tag)
|
75
|
-
|
84
|
+
|
76
85
|
# Add tools section if tools exist
|
77
86
|
if tools and len(tools) > 0:
|
78
87
|
tool_tags = []
|
@@ -81,41 +90,46 @@ def format_server_info(server_name: str, description: Optional[str] = None,
|
|
81
90
|
tool_desc = tool.get("description", "")
|
82
91
|
tool_tag = format_fastagent_tag("tool", tool_desc, {"name": tool_name})
|
83
92
|
tool_tags.append(tool_tag)
|
84
|
-
|
93
|
+
|
85
94
|
tools_content = "\n".join(tool_tags)
|
86
95
|
tools_tag = format_fastagent_tag("tools", f"\n{tools_content}\n")
|
87
96
|
components.append(tools_tag)
|
88
|
-
|
97
|
+
|
89
98
|
# Combine all components
|
90
99
|
server_content = "\n".join(components)
|
91
|
-
return format_fastagent_tag(
|
100
|
+
return format_fastagent_tag(
|
101
|
+
"server", f"\n{server_content}\n", {"name": server_name}
|
102
|
+
)
|
92
103
|
|
93
104
|
|
94
|
-
def format_agent_info(
|
95
|
-
|
105
|
+
def format_agent_info(
|
106
|
+
agent_name: str,
|
107
|
+
description: Optional[str] = None,
|
108
|
+
servers: Optional[List[Dict[str, Union[str, List[Dict[str, str]]]]]] = None,
|
109
|
+
) -> str:
|
96
110
|
"""
|
97
111
|
Format agent information consistently across router and orchestrator modules.
|
98
|
-
|
112
|
+
|
99
113
|
Args:
|
100
114
|
agent_name: Name of the agent
|
101
115
|
description: Optional agent description/instruction
|
102
116
|
servers: Optional list of server dictionaries with 'name', 'description', and 'tools' keys
|
103
|
-
|
117
|
+
|
104
118
|
Returns:
|
105
119
|
Formatted agent XML as string
|
106
120
|
"""
|
107
121
|
# Start building components
|
108
122
|
components = []
|
109
|
-
|
123
|
+
|
110
124
|
# Add description if present
|
111
125
|
if description:
|
112
126
|
desc_tag = format_fastagent_tag("description", description)
|
113
127
|
components.append(desc_tag)
|
114
|
-
|
128
|
+
|
115
129
|
# If no description or servers, use self-closing tag
|
116
130
|
if not description and not servers:
|
117
131
|
return format_fastagent_tag("agent", None, {"name": agent_name})
|
118
|
-
|
132
|
+
|
119
133
|
# If has servers, format them
|
120
134
|
if servers and len(servers) > 0:
|
121
135
|
server_tags = []
|
@@ -125,13 +139,13 @@ def format_agent_info(agent_name: str, description: Optional[str] = None,
|
|
125
139
|
server_tools = server.get("tools", [])
|
126
140
|
server_tag = format_server_info(server_name, server_desc, server_tools)
|
127
141
|
server_tags.append(server_tag)
|
128
|
-
|
142
|
+
|
129
143
|
# Only add servers section if we have servers
|
130
144
|
if server_tags:
|
131
145
|
servers_content = "\n".join(server_tags)
|
132
146
|
servers_tag = format_fastagent_tag("servers", f"\n{servers_content}\n")
|
133
147
|
components.append(servers_tag)
|
134
|
-
|
148
|
+
|
135
149
|
# Combine all components
|
136
150
|
agent_content = "\n".join(components)
|
137
|
-
return format_fastagent_tag("agent", f"\n{agent_content}\n", {"name": agent_name})
|
151
|
+
return format_fastagent_tag("agent", f"\n{agent_content}\n", {"name": agent_name})
|