fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +61 -415
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +15 -19
- mcp_agent/cli/commands/bootstrap.py +19 -38
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +7 -14
- mcp_agent/cli/main.py +7 -10
- mcp_agent/cli/terminal.py +3 -3
- mcp_agent/config.py +25 -40
- mcp_agent/context.py +12 -21
- mcp_agent/context_dependent.py +3 -5
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +23 -55
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/fastagent.py +145 -371
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +17 -17
- mcp_agent/core/prompt.py +6 -9
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/validation.py +92 -18
- mcp_agent/executor/decorator_registry.py +9 -17
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +19 -41
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +15 -21
- mcp_agent/human_input/handler.py +4 -7
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/llm/augmented_llm.py +450 -0
- mcp_agent/llm/augmented_llm_passthrough.py +162 -0
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/llm/memory.py +103 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
- mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
- mcp_agent/llm/sampling_format_converter.py +37 -0
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +17 -19
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +1 -3
- mcp_agent/mcp/interfaces.py +117 -110
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +7 -7
- mcp_agent/mcp/mcp_agent_server.py +8 -8
- mcp_agent/mcp/mcp_aggregator.py +102 -143
- mcp_agent/mcp/mcp_connection_manager.py +20 -27
- mcp_agent/mcp/prompt_message_multipart.py +68 -16
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +30 -48
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +109 -0
- mcp_agent/mcp/prompts/prompt_server.py +155 -195
- mcp_agent/mcp/prompts/prompt_template.py +35 -66
- mcp_agent/mcp/resource_utils.py +7 -14
- mcp_agent/mcp/sampling.py +17 -17
- mcp_agent/mcp_server/agent_server.py +13 -17
- mcp_agent/mcp_server_registry.py +13 -22
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
- mcp_agent/resources/examples/in_dev/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +6 -3
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +4 -8
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +16 -20
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- mcp_agent/core/agent_app.py +0 -646
- mcp_agent/core/agent_utils.py +0 -71
- mcp_agent/core/decorators.py +0 -455
- mcp_agent/core/factory.py +0 -463
- mcp_agent/core/proxies.py +0 -269
- mcp_agent/core/types.py +0 -24
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -111
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
- mcp_agent/resources/examples/researcher/researcher.py +0 -38
- mcp_agent/resources/examples/workflows/chaining.py +0 -44
- mcp_agent/resources/examples/workflows/evaluator.py +0 -78
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -25
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
- mcp_agent/resources/examples/workflows/parallel.py +0 -78
- mcp_agent/resources/examples/workflows/router.py +0 -53
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -18
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -61
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -46
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +0 -753
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -350
- mcp_agent/workflows/parallel/fan_out.py +0 -187
- mcp_agent/workflows/parallel/parallel_llm.py +0 -166
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -368
- mcp_agent/workflows/router/router_embedding.py +0 -240
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -320
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -320
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -1,241 +0,0 @@
|
|
1
|
-
from typing import Any, List, Optional, Type, Union
|
2
|
-
import json # Import at the module level
|
3
|
-
from mcp import GetPromptResult
|
4
|
-
from mcp.types import PromptMessage
|
5
|
-
from pydantic_core import from_json
|
6
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
7
|
-
from mcp_agent.workflows.llm.augmented_llm import (
|
8
|
-
AugmentedLLM,
|
9
|
-
MessageParamT,
|
10
|
-
MessageT,
|
11
|
-
ModelT,
|
12
|
-
RequestParams,
|
13
|
-
)
|
14
|
-
from mcp_agent.logging.logger import get_logger
|
15
|
-
|
16
|
-
|
17
|
-
class PassthroughLLM(AugmentedLLM):
|
18
|
-
"""
|
19
|
-
A specialized LLM implementation that simply passes through input messages without modification.
|
20
|
-
|
21
|
-
This is useful for cases where you need an object with the AugmentedLLM interface
|
22
|
-
but want to preserve the original message without any processing, such as in a
|
23
|
-
parallel workflow where no fan-in aggregation is needed.
|
24
|
-
"""
|
25
|
-
|
26
|
-
def __init__(self, name: str = "Passthrough", context=None, **kwargs):
|
27
|
-
super().__init__(name=name, context=context, **kwargs)
|
28
|
-
self.provider = "fast-agent"
|
29
|
-
# Initialize logger - keep it simple without name reference
|
30
|
-
self.logger = get_logger(__name__)
|
31
|
-
self._messages = [PromptMessage]
|
32
|
-
|
33
|
-
async def generate(
|
34
|
-
self,
|
35
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
36
|
-
request_params: Optional[RequestParams] = None,
|
37
|
-
) -> Union[List[MessageT], Any]:
|
38
|
-
"""Simply return the input message as is."""
|
39
|
-
# Return in the format expected by the caller
|
40
|
-
return [message] if isinstance(message, list) else message
|
41
|
-
|
42
|
-
async def generate_str(
|
43
|
-
self,
|
44
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
45
|
-
request_params: Optional[RequestParams] = None,
|
46
|
-
) -> str:
|
47
|
-
"""Return the input message as a string."""
|
48
|
-
# Check if this is a special command to call a tool
|
49
|
-
if isinstance(message, str) and message.startswith("***CALL_TOOL "):
|
50
|
-
return await self._call_tool_and_return_result(message)
|
51
|
-
|
52
|
-
self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
|
53
|
-
await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
|
54
|
-
|
55
|
-
# Handle PromptMessage by concatenating all parts
|
56
|
-
if isinstance(message, PromptMessage):
|
57
|
-
parts_text = []
|
58
|
-
for part in message.content:
|
59
|
-
parts_text.append(str(part))
|
60
|
-
return "\n".join(parts_text)
|
61
|
-
|
62
|
-
return str(message)
|
63
|
-
|
64
|
-
async def _call_tool_and_return_result(self, command: str) -> str:
|
65
|
-
"""
|
66
|
-
Call a tool based on the command and return its result as a string.
|
67
|
-
|
68
|
-
Args:
|
69
|
-
command: The command string, expected format: "***CALL_TOOL <server>-<tool_name> [arguments_json]"
|
70
|
-
|
71
|
-
Returns:
|
72
|
-
Tool result as a string
|
73
|
-
"""
|
74
|
-
try:
|
75
|
-
tool_name, arguments = self._parse_tool_command(command)
|
76
|
-
result = await self.aggregator.call_tool(tool_name, arguments)
|
77
|
-
return self._format_tool_result(tool_name, result)
|
78
|
-
except Exception as e:
|
79
|
-
self.logger.error(f"Error calling tool: {str(e)}")
|
80
|
-
return f"Error calling tool: {str(e)}"
|
81
|
-
|
82
|
-
def _parse_tool_command(self, command: str) -> tuple[str, Optional[dict]]:
|
83
|
-
"""
|
84
|
-
Parse a tool command string into tool name and arguments.
|
85
|
-
|
86
|
-
Args:
|
87
|
-
command: The command string in format "***CALL_TOOL <tool_name> [arguments_json]"
|
88
|
-
|
89
|
-
Returns:
|
90
|
-
Tuple of (tool_name, arguments_dict)
|
91
|
-
|
92
|
-
Raises:
|
93
|
-
ValueError: If command format is invalid
|
94
|
-
"""
|
95
|
-
parts = command.split(" ", 2)
|
96
|
-
if len(parts) < 2:
|
97
|
-
raise ValueError(
|
98
|
-
"Invalid format. Expected '***CALL_TOOL <tool_name> [arguments_json]'"
|
99
|
-
)
|
100
|
-
|
101
|
-
tool_name = parts[1].strip()
|
102
|
-
arguments = None
|
103
|
-
|
104
|
-
if len(parts) > 2:
|
105
|
-
try:
|
106
|
-
arguments = json.loads(parts[2])
|
107
|
-
except json.JSONDecodeError:
|
108
|
-
raise ValueError(f"Invalid JSON arguments: {parts[2]}")
|
109
|
-
|
110
|
-
self.logger.info(f"Calling tool {tool_name} with arguments {arguments}")
|
111
|
-
return tool_name, arguments
|
112
|
-
|
113
|
-
def _format_tool_result(self, tool_name: str, result) -> str:
|
114
|
-
"""
|
115
|
-
Format tool execution result as a string.
|
116
|
-
|
117
|
-
Args:
|
118
|
-
tool_name: The name of the tool that was called
|
119
|
-
result: The result returned from the tool
|
120
|
-
|
121
|
-
Returns:
|
122
|
-
Formatted result as a string
|
123
|
-
"""
|
124
|
-
if result.isError:
|
125
|
-
error_text = []
|
126
|
-
for content_item in result.content:
|
127
|
-
if hasattr(content_item, "text"):
|
128
|
-
error_text.append(content_item.text)
|
129
|
-
else:
|
130
|
-
error_text.append(str(content_item))
|
131
|
-
error_message = "\n".join(error_text) if error_text else "Unknown error"
|
132
|
-
return f"Error calling tool '{tool_name}': {error_message}"
|
133
|
-
|
134
|
-
result_text = []
|
135
|
-
for content_item in result.content:
|
136
|
-
if hasattr(content_item, "text"):
|
137
|
-
result_text.append(content_item.text)
|
138
|
-
else:
|
139
|
-
result_text.append(str(content_item))
|
140
|
-
|
141
|
-
return "\n".join(result_text)
|
142
|
-
|
143
|
-
async def generate_structured(
|
144
|
-
self,
|
145
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
146
|
-
response_model: Type[ModelT],
|
147
|
-
request_params: Optional[RequestParams] = None,
|
148
|
-
) -> ModelT:
|
149
|
-
"""
|
150
|
-
Return the input message as the requested model type.
|
151
|
-
This is a best-effort implementation - it may fail if the
|
152
|
-
message cannot be converted to the requested model.
|
153
|
-
"""
|
154
|
-
if isinstance(message, response_model):
|
155
|
-
return message
|
156
|
-
elif isinstance(message, dict):
|
157
|
-
return response_model(**message)
|
158
|
-
elif isinstance(message, str):
|
159
|
-
return response_model.model_validate(from_json(message, allow_partial=True))
|
160
|
-
|
161
|
-
async def generate_prompt(
|
162
|
-
self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
|
163
|
-
) -> str:
|
164
|
-
# Check if this prompt contains a tool call command
|
165
|
-
if (
|
166
|
-
prompt.content
|
167
|
-
and prompt.content[0].text
|
168
|
-
and prompt.content[0].text.startswith("***CALL_TOOL ")
|
169
|
-
):
|
170
|
-
return await self._call_tool_and_return_result(prompt.content[0].text)
|
171
|
-
|
172
|
-
# Process all parts of the PromptMessageMultipart
|
173
|
-
parts_text = []
|
174
|
-
for part in prompt.content:
|
175
|
-
parts_text.append(str(part))
|
176
|
-
|
177
|
-
# If no parts found, return empty string
|
178
|
-
if not parts_text:
|
179
|
-
return ""
|
180
|
-
|
181
|
-
# Join all parts and process with generate_str
|
182
|
-
return await self.generate_str("\n".join(parts_text), request_params)
|
183
|
-
|
184
|
-
async def apply_prompt(
|
185
|
-
self,
|
186
|
-
multipart_messages: List["PromptMessageMultipart"],
|
187
|
-
request_params: Optional[RequestParams] = None,
|
188
|
-
) -> str:
|
189
|
-
"""
|
190
|
-
Apply a list of PromptMessageMultipart messages directly to the LLM.
|
191
|
-
In PassthroughLLM, this returns a concatenated string of all message content.
|
192
|
-
|
193
|
-
Args:
|
194
|
-
multipart_messages: List of PromptMessageMultipart objects
|
195
|
-
request_params: Optional parameters to configure the LLM request
|
196
|
-
|
197
|
-
Returns:
|
198
|
-
String representation of all message content concatenated together
|
199
|
-
"""
|
200
|
-
# Generate and concatenate result from all messages
|
201
|
-
result = ""
|
202
|
-
for prompt in multipart_messages:
|
203
|
-
result += await self.generate_prompt(prompt, request_params) + "\n"
|
204
|
-
|
205
|
-
return result
|
206
|
-
|
207
|
-
async def apply_prompt_template(
|
208
|
-
self, prompt_result: GetPromptResult, prompt_name: str
|
209
|
-
) -> str:
|
210
|
-
"""
|
211
|
-
Apply a prompt template by adding it to the conversation history.
|
212
|
-
For PassthroughLLM, this returns all content concatenated together.
|
213
|
-
|
214
|
-
Args:
|
215
|
-
prompt_result: The GetPromptResult containing prompt messages
|
216
|
-
prompt_name: The name of the prompt being applied
|
217
|
-
|
218
|
-
Returns:
|
219
|
-
String representation of all message content concatenated together
|
220
|
-
"""
|
221
|
-
prompt_messages: List[PromptMessage] = prompt_result.messages
|
222
|
-
|
223
|
-
# Extract arguments if they were stored in the result
|
224
|
-
arguments = getattr(prompt_result, "arguments", None)
|
225
|
-
|
226
|
-
# Display information about the loaded prompt
|
227
|
-
await self.show_prompt_loaded(
|
228
|
-
prompt_name=prompt_name,
|
229
|
-
description=prompt_result.description,
|
230
|
-
message_count=len(prompt_messages),
|
231
|
-
arguments=arguments,
|
232
|
-
)
|
233
|
-
self._messages = prompt_messages
|
234
|
-
|
235
|
-
# Convert prompt messages to multipart format
|
236
|
-
multipart_messages = PromptMessageMultipart.from_prompt_messages(
|
237
|
-
prompt_messages
|
238
|
-
)
|
239
|
-
|
240
|
-
# Use apply_prompt to handle the multipart messages
|
241
|
-
return await self.apply_prompt(multipart_messages)
|
@@ -1,109 +0,0 @@
|
|
1
|
-
from typing import List, Optional, Union
|
2
|
-
from mcp import GetPromptResult
|
3
|
-
from mcp.types import PromptMessage
|
4
|
-
from mcp_agent.workflows.llm.augmented_llm import MessageParamT, RequestParams
|
5
|
-
from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
|
6
|
-
|
7
|
-
|
8
|
-
# TODO -- support tool calling
|
9
|
-
class PlaybackLLM(PassthroughLLM):
|
10
|
-
"""
|
11
|
-
A specialized LLM implementation that plays back assistant messages when loaded with prompts.
|
12
|
-
|
13
|
-
Unlike the PassthroughLLM which simply passes through messages without modification,
|
14
|
-
PlaybackLLM is designed to simulate a conversation by playing back prompt messages
|
15
|
-
in sequence when loaded with prompts through apply_prompt_template.
|
16
|
-
|
17
|
-
After apply_prompts has been called, each call to generate_str returns the next
|
18
|
-
"ASSISTANT" message in the loaded messages. If no messages are set or all messages have
|
19
|
-
been played back, it returns a message indicating that messages are exhausted.
|
20
|
-
"""
|
21
|
-
|
22
|
-
def __init__(self, name: str = "Playback", **kwargs):
|
23
|
-
super().__init__(name=name, **kwargs)
|
24
|
-
self._messages: List[PromptMessage] = []
|
25
|
-
self._current_index = 0
|
26
|
-
|
27
|
-
async def generate_str(
|
28
|
-
self,
|
29
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
30
|
-
request_params: Optional[RequestParams] = None,
|
31
|
-
) -> str:
|
32
|
-
"""
|
33
|
-
Return the next ASSISTANT message in the loaded messages list.
|
34
|
-
If no messages are available or all have been played back,
|
35
|
-
returns a message indicating messages are exhausted.
|
36
|
-
|
37
|
-
Note: Only assistant messages are returned; user messages are skipped.
|
38
|
-
"""
|
39
|
-
self.show_user_message(message, model="fastagent-playback", chat_turn=0)
|
40
|
-
|
41
|
-
if not self._messages or self._current_index >= len(self._messages):
|
42
|
-
size = len(self._messages) if self._messages else 0
|
43
|
-
response = f"MESSAGES EXHAUSTED (list size {size})"
|
44
|
-
else:
|
45
|
-
response = self._get_next_assistant_message()
|
46
|
-
|
47
|
-
await self.show_assistant_message(response, title="ASSISTANT/PLAYBACK")
|
48
|
-
return response
|
49
|
-
|
50
|
-
def _get_next_assistant_message(self) -> str:
|
51
|
-
"""
|
52
|
-
Get the next assistant message from the loaded messages.
|
53
|
-
Increments the current message index and skips user messages.
|
54
|
-
"""
|
55
|
-
# Find next assistant message
|
56
|
-
while self._current_index < len(self._messages):
|
57
|
-
message = self._messages[self._current_index]
|
58
|
-
self._current_index += 1
|
59
|
-
|
60
|
-
# Skip non-assistant messages
|
61
|
-
if getattr(message, "role", None) != "assistant":
|
62
|
-
continue
|
63
|
-
|
64
|
-
# Get content as string
|
65
|
-
content = message.content
|
66
|
-
if hasattr(content, "text"):
|
67
|
-
return content.text
|
68
|
-
return str(content)
|
69
|
-
|
70
|
-
# If we get here, we've run out of assistant messages
|
71
|
-
return f"MESSAGES EXHAUSTED (list size {len(self._messages)})"
|
72
|
-
|
73
|
-
async def apply_prompt_template(
|
74
|
-
self, prompt_result: GetPromptResult, prompt_name: str
|
75
|
-
) -> str:
|
76
|
-
"""
|
77
|
-
Apply a prompt template by adding its messages to the playback queue.
|
78
|
-
|
79
|
-
Args:
|
80
|
-
prompt_result: The GetPromptResult containing prompt messages
|
81
|
-
prompt_name: The name of the prompt being applied
|
82
|
-
|
83
|
-
Returns:
|
84
|
-
String representation of the first message or an indication that no messages were added
|
85
|
-
"""
|
86
|
-
prompt_messages: List[PromptMessage] = prompt_result.messages
|
87
|
-
|
88
|
-
# Extract arguments if they were stored in the result
|
89
|
-
arguments = getattr(prompt_result, "arguments", None)
|
90
|
-
|
91
|
-
# Display information about the loaded prompt
|
92
|
-
await self.show_prompt_loaded(
|
93
|
-
prompt_name=prompt_name,
|
94
|
-
description=prompt_result.description,
|
95
|
-
message_count=len(prompt_messages),
|
96
|
-
arguments=arguments,
|
97
|
-
)
|
98
|
-
|
99
|
-
# Add new messages to the end of the existing messages list
|
100
|
-
self._messages.extend(prompt_messages)
|
101
|
-
|
102
|
-
if not prompt_messages:
|
103
|
-
return "Prompt contains no messages"
|
104
|
-
|
105
|
-
# Reset current index if this is the first time loading messages
|
106
|
-
if len(self._messages) == len(prompt_messages):
|
107
|
-
self._current_index = 0
|
108
|
-
|
109
|
-
return f"Added {len(prompt_messages)} messages to playback queue"
|
@@ -1,8 +0,0 @@
|
|
1
|
-
from mcp_agent.workflows.llm.providers.sampling_converter_anthropic import (
|
2
|
-
AnthropicSamplingConverter,
|
3
|
-
)
|
4
|
-
from mcp_agent.workflows.llm.providers.sampling_converter_openai import (
|
5
|
-
OpenAISamplingConverter,
|
6
|
-
)
|
7
|
-
|
8
|
-
__all__ = ["AnthropicSamplingConverter", "OpenAISamplingConverter"]
|
@@ -1,22 +0,0 @@
|
|
1
|
-
from typing import Generic, List, Protocol, TypeVar
|
2
|
-
|
3
|
-
|
4
|
-
# Define type variables here instead of importing from augmented_llm
|
5
|
-
MessageParamT = TypeVar("MessageParamT")
|
6
|
-
"""A type representing an input message to an LLM."""
|
7
|
-
|
8
|
-
MessageT = TypeVar("MessageT")
|
9
|
-
"""A type representing an output message from an LLM."""
|
10
|
-
|
11
|
-
|
12
|
-
class SamplingFormatConverter(Protocol, Generic[MessageParamT, MessageT]):
|
13
|
-
"""Conversions between LLM provider and MCP types"""
|
14
|
-
|
15
|
-
@classmethod
|
16
|
-
def from_prompt_message(cls, message) -> MessageParamT:
|
17
|
-
"""Convert an MCP PromptMessage to a provider-specific message parameter."""
|
18
|
-
|
19
|
-
|
20
|
-
def typed_dict_extras(d: dict, exclude: List[str]):
|
21
|
-
extras = {k: v for k, v in d.items() if k not in exclude}
|
22
|
-
return extras
|
File without changes
|