fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +59 -371
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +3 -1
- mcp_agent/cli/commands/bootstrap.py +18 -7
- mcp_agent/cli/commands/setup.py +12 -4
- mcp_agent/cli/main.py +1 -1
- mcp_agent/cli/terminal.py +1 -1
- mcp_agent/config.py +24 -35
- mcp_agent/context.py +3 -1
- mcp_agent/context_dependent.py +3 -1
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +15 -20
- mcp_agent/core/fastagent.py +151 -337
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +19 -11
- mcp_agent/core/prompt.py +6 -2
- mcp_agent/core/validation.py +89 -16
- mcp_agent/executor/decorator_registry.py +6 -2
- mcp_agent/executor/temporal.py +35 -11
- mcp_agent/executor/workflow_signal.py +8 -2
- mcp_agent/human_input/handler.py +3 -1
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
- mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
- mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
- mcp_agent/logging/logger.py +2 -2
- mcp_agent/mcp/gen_client.py +9 -3
- mcp_agent/mcp/interfaces.py +67 -45
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +12 -4
- mcp_agent/mcp/mcp_agent_server.py +3 -1
- mcp_agent/mcp/mcp_aggregator.py +124 -93
- mcp_agent/mcp/mcp_connection_manager.py +21 -7
- mcp_agent/mcp/prompt_message_multipart.py +59 -1
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +20 -13
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +15 -5
- mcp_agent/mcp/prompts/prompt_server.py +154 -87
- mcp_agent/mcp/prompts/prompt_template.py +26 -35
- mcp_agent/mcp/resource_utils.py +3 -1
- mcp_agent/mcp/sampling.py +24 -15
- mcp_agent/mcp_server/agent_server.py +8 -5
- mcp_agent/mcp_server_registry.py +22 -9
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
- mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
- mcp_agent/resources/examples/internal/agent.py +4 -2
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/prompting/image_server.py +3 -1
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +27 -7
- fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
- mcp_agent/core/agent_app.py +0 -570
- mcp_agent/core/agent_utils.py +0 -69
- mcp_agent/core/decorators.py +0 -448
- mcp_agent/core/factory.py +0 -422
- mcp_agent/core/proxies.py +0 -278
- mcp_agent/core/types.py +0 -22
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -114
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
- mcp_agent/resources/examples/researcher/researcher.py +0 -39
- mcp_agent/resources/examples/workflows/chaining.py +0 -45
- mcp_agent/resources/examples/workflows/evaluator.py +0 -79
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -26
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
- mcp_agent/resources/examples/workflows/parallel.py +0 -79
- mcp_agent/resources/examples/workflows/router.py +0 -54
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -19
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -58
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -37
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -320
- mcp_agent/workflows/parallel/fan_out.py +0 -181
- mcp_agent/workflows/parallel/parallel_llm.py +0 -149
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -338
- mcp_agent/workflows/router/router_embedding.py +0 -226
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -304
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -292
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
- /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
- /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
mcp_agent/agents/agent.py
CHANGED
@@ -1,408 +1,96 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, Union
|
1
|
+
"""
|
2
|
+
Agent implementation using the clean BaseAgent adapter.
|
4
3
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
EmbeddedResource,
|
9
|
-
ListToolsResult,
|
10
|
-
ReadResourceResult,
|
11
|
-
TextContent,
|
12
|
-
Tool,
|
13
|
-
)
|
4
|
+
This provides a streamlined implementation that adheres to AgentProtocol
|
5
|
+
while delegating LLM operations to an attached AugmentedLLMProtocol instance.
|
6
|
+
"""
|
14
7
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar
|
9
|
+
|
10
|
+
from mcp_agent.agents.base_agent import BaseAgent
|
15
11
|
from mcp_agent.core.agent_types import AgentConfig
|
16
|
-
from mcp_agent.core.
|
17
|
-
from mcp_agent.
|
18
|
-
from mcp_agent.human_input.types import (
|
19
|
-
HUMAN_INPUT_SIGNAL_NAME,
|
20
|
-
HumanInputCallback,
|
21
|
-
HumanInputRequest,
|
22
|
-
HumanInputResponse,
|
23
|
-
)
|
12
|
+
from mcp_agent.core.interactive_prompt import InteractivePrompt
|
13
|
+
from mcp_agent.human_input.types import HumanInputCallback
|
24
14
|
from mcp_agent.logging.logger import get_logger
|
25
|
-
from mcp_agent.mcp.
|
26
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
27
|
-
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLMProtocol
|
15
|
+
from mcp_agent.mcp.interfaces import AugmentedLLMProtocol
|
28
16
|
|
29
17
|
if TYPE_CHECKING:
|
30
18
|
from mcp_agent.context import Context
|
31
|
-
import traceback
|
32
19
|
|
33
20
|
logger = get_logger(__name__)
|
34
21
|
|
35
22
|
# Define a TypeVar for AugmentedLLM and its subclasses
|
36
23
|
LLM = TypeVar("LLM", bound=AugmentedLLMProtocol)
|
37
24
|
|
38
|
-
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
39
|
-
|
40
25
|
|
41
|
-
class Agent(
|
26
|
+
class Agent(BaseAgent):
|
42
27
|
"""
|
43
28
|
An Agent is an entity that has access to a set of MCP servers and can interact with them.
|
44
29
|
Each agent should have a purpose defined by its instruction.
|
30
|
+
|
31
|
+
This implementation provides a clean adapter that adheres to AgentProtocol
|
32
|
+
while delegating LLM operations to an attached AugmentedLLMProtocol instance.
|
45
33
|
"""
|
46
34
|
|
47
35
|
def __init__(
|
48
36
|
self,
|
49
|
-
config:
|
50
|
-
instruction: Optional[Union[str, Callable[[Dict], str]]] = None,
|
51
|
-
server_names: Optional[List[str]] = None,
|
37
|
+
config: AgentConfig, # Can be AgentConfig or backward compatible str name
|
52
38
|
functions: Optional[List[Callable]] = None,
|
53
39
|
connection_persistence: bool = True,
|
54
40
|
human_input_callback: Optional[HumanInputCallback] = None,
|
55
41
|
context: Optional["Context"] = None,
|
56
|
-
**kwargs:
|
42
|
+
**kwargs: Dict[str, Any],
|
57
43
|
) -> None:
|
58
|
-
#
|
59
|
-
if isinstance(config, str):
|
60
|
-
self.config = AgentConfig(
|
61
|
-
name=config,
|
62
|
-
instruction=instruction or "You are a helpful agent.",
|
63
|
-
servers=server_names or [],
|
64
|
-
)
|
65
|
-
else:
|
66
|
-
self.config = config
|
67
|
-
|
44
|
+
# Initialize with BaseAgent constructor
|
68
45
|
super().__init__(
|
69
|
-
|
70
|
-
|
46
|
+
config=config,
|
47
|
+
functions=functions,
|
71
48
|
connection_persistence=connection_persistence,
|
72
|
-
|
49
|
+
human_input_callback=human_input_callback,
|
50
|
+
context=context,
|
73
51
|
**kwargs,
|
74
52
|
)
|
75
53
|
|
76
|
-
|
77
|
-
self.instruction = self.config.instruction
|
78
|
-
self.functions = functions or []
|
79
|
-
self.executor = self.context.executor
|
80
|
-
self.logger = get_logger(f"{__name__}.{self.name}")
|
81
|
-
|
82
|
-
# Store the default request params from config
|
83
|
-
self._default_request_params = self.config.default_request_params
|
84
|
-
|
85
|
-
# Map function names to tools
|
86
|
-
self._function_tool_map: Dict[str, FastTool] = {}
|
87
|
-
|
88
|
-
if not self.config.human_input:
|
89
|
-
self.human_input_callback = None
|
90
|
-
else:
|
91
|
-
self.human_input_callback: HumanInputCallback | None = human_input_callback
|
92
|
-
if not human_input_callback:
|
93
|
-
if self.context.human_input_handler:
|
94
|
-
self.human_input_callback = self.context.human_input_handler
|
95
|
-
|
96
|
-
async def initialize(self) -> None:
|
97
|
-
"""
|
98
|
-
Initialize the agent and connect to the MCP servers.
|
99
|
-
NOTE: This method is called automatically when the agent is used as an async context manager.
|
100
|
-
"""
|
101
|
-
await self.__aenter__() # This initializes the connection manager and loads the servers
|
102
|
-
|
103
|
-
for function in self.functions:
|
104
|
-
tool: FastTool = FastTool.from_function(function)
|
105
|
-
self._function_tool_map[tool.name] = tool
|
106
|
-
|
107
|
-
async def attach_llm(self, llm_factory: Callable[..., LLM]) -> LLM:
|
108
|
-
"""
|
109
|
-
Create an LLM instance for the agent.
|
110
|
-
|
111
|
-
Args:
|
112
|
-
llm_factory: A callable that constructs an AugmentedLLM or its subclass.
|
113
|
-
The factory should accept keyword arguments matching the
|
114
|
-
AugmentedLLM constructor parameters.
|
115
|
-
|
116
|
-
Returns:
|
117
|
-
An instance of AugmentedLLM or one of its subclasses.
|
118
|
-
"""
|
119
|
-
return llm_factory(agent=self, default_request_params=self._default_request_params)
|
120
|
-
|
121
|
-
async def shutdown(self) -> None:
|
122
|
-
"""
|
123
|
-
Shutdown the agent and close all MCP server connections.
|
124
|
-
NOTE: This method is called automatically when the agent is used as an async context manager.
|
125
|
-
"""
|
126
|
-
await super().close()
|
127
|
-
|
128
|
-
async def request_human_input(self, request: HumanInputRequest) -> str:
|
54
|
+
async def prompt(self, default_prompt: str = "", agent_name: Optional[str] = None) -> str:
|
129
55
|
"""
|
130
|
-
|
56
|
+
Start an interactive prompt session with this agent.
|
131
57
|
|
132
58
|
Args:
|
133
|
-
|
59
|
+
default: Default message to use when user presses enter
|
60
|
+
agent_name: Ignored for single agents, included for API compatibility
|
134
61
|
|
135
62
|
Returns:
|
136
|
-
The
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
#
|
145
|
-
|
146
|
-
|
147
|
-
#
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
self.logger.debug("Waiting for human input signal")
|
171
|
-
|
172
|
-
# Wait for signal (workflow is paused here)
|
173
|
-
result = await self.executor.wait_for_signal(
|
174
|
-
signal_name=request_id,
|
175
|
-
request_id=request_id,
|
176
|
-
workflow_id=request.workflow_id,
|
177
|
-
signal_description=request.description or request.prompt,
|
178
|
-
timeout_seconds=request.timeout_seconds,
|
179
|
-
signal_type=HumanInputResponse, # TODO: saqadri - should this be HumanInputResponse?
|
63
|
+
The result of the interactive session
|
64
|
+
"""
|
65
|
+
# Use the agent name as a string - ensure it's not the object itself
|
66
|
+
agent_name_str = str(self.name)
|
67
|
+
|
68
|
+
# Create agent_types dictionary with just this agent
|
69
|
+
agent_types = {agent_name_str: getattr(self.config, "agent_type", "Agent")}
|
70
|
+
|
71
|
+
# Create the interactive prompt
|
72
|
+
prompt = InteractivePrompt(agent_types=agent_types)
|
73
|
+
|
74
|
+
# Define wrapper for send function
|
75
|
+
async def send_wrapper(message, agent_name):
|
76
|
+
return await self.send(message)
|
77
|
+
|
78
|
+
# Define wrapper for apply_prompt function
|
79
|
+
async def apply_prompt_wrapper(prompt_name, args, agent_name):
|
80
|
+
# Just apply the prompt directly
|
81
|
+
return await self.apply_prompt(prompt_name, args)
|
82
|
+
|
83
|
+
# Define wrapper for list_prompts function
|
84
|
+
async def list_prompts_wrapper(agent_name):
|
85
|
+
# Always call list_prompts on this agent regardless of agent_name
|
86
|
+
return await self.list_prompts()
|
87
|
+
|
88
|
+
# Start the prompt loop with just this agent
|
89
|
+
return await prompt.prompt_loop(
|
90
|
+
send_func=send_wrapper,
|
91
|
+
default_agent=agent_name_str,
|
92
|
+
available_agents=[agent_name_str], # Only this agent
|
93
|
+
apply_prompt_func=apply_prompt_wrapper,
|
94
|
+
list_prompts_func=list_prompts_wrapper,
|
95
|
+
default=default_prompt,
|
180
96
|
)
|
181
|
-
|
182
|
-
if isinstance(result, dict) and result.get("exit_requested", False):
|
183
|
-
raise PromptExitError(result.get("error", "User requested to exit FastAgent session"))
|
184
|
-
self.logger.debug("Received human input signal", data=result)
|
185
|
-
return result
|
186
|
-
|
187
|
-
async def list_tools(self) -> ListToolsResult:
|
188
|
-
if not self.initialized:
|
189
|
-
await self.initialize()
|
190
|
-
|
191
|
-
result = await super().list_tools()
|
192
|
-
|
193
|
-
# Add function tools
|
194
|
-
for tool in self._function_tool_map.values():
|
195
|
-
result.tools.append(
|
196
|
-
Tool(
|
197
|
-
name=tool.name,
|
198
|
-
description=tool.description,
|
199
|
-
inputSchema=tool.parameters,
|
200
|
-
)
|
201
|
-
)
|
202
|
-
|
203
|
-
# Add a human_input_callback as a tool
|
204
|
-
if not self.human_input_callback:
|
205
|
-
self.logger.debug("Human input callback not set")
|
206
|
-
return result
|
207
|
-
|
208
|
-
# Add a human_input_callback as a tool
|
209
|
-
human_input_tool: FastTool = FastTool.from_function(self.request_human_input)
|
210
|
-
result.tools.append(
|
211
|
-
Tool(
|
212
|
-
name=HUMAN_INPUT_TOOL_NAME,
|
213
|
-
description=human_input_tool.description,
|
214
|
-
inputSchema=human_input_tool.parameters,
|
215
|
-
)
|
216
|
-
)
|
217
|
-
|
218
|
-
return result
|
219
|
-
|
220
|
-
# todo would prefer to use tool_name to disambiguate agent name
|
221
|
-
async def call_tool(self, name: str, arguments: dict | None = None) -> CallToolResult:
|
222
|
-
if name == HUMAN_INPUT_TOOL_NAME:
|
223
|
-
# Call the human input tool
|
224
|
-
return await self._call_human_input_tool(arguments)
|
225
|
-
elif name in self._function_tool_map:
|
226
|
-
# Call local function and return the result as a text response
|
227
|
-
tool = self._function_tool_map[name]
|
228
|
-
result = await tool.run(arguments)
|
229
|
-
return CallToolResult(content=[TextContent(type="text", text=str(result))])
|
230
|
-
else:
|
231
|
-
return await super().call_tool(name, arguments)
|
232
|
-
|
233
|
-
async def _call_human_input_tool(self, arguments: dict | None = None) -> CallToolResult:
|
234
|
-
# Handle human input request
|
235
|
-
try:
|
236
|
-
# Make sure arguments is not None
|
237
|
-
if arguments is None:
|
238
|
-
arguments = {}
|
239
|
-
|
240
|
-
# Extract request data
|
241
|
-
request_data = arguments.get("request")
|
242
|
-
|
243
|
-
# Handle both string and dict request formats
|
244
|
-
if isinstance(request_data, str):
|
245
|
-
request = HumanInputRequest(prompt=request_data)
|
246
|
-
elif isinstance(request_data, dict):
|
247
|
-
request = HumanInputRequest(**request_data)
|
248
|
-
else:
|
249
|
-
# Fallback for invalid or missing request data
|
250
|
-
request = HumanInputRequest(prompt="Please provide input:")
|
251
|
-
|
252
|
-
result = await self.request_human_input(request=request)
|
253
|
-
|
254
|
-
# Use response attribute if available, otherwise use the result directly
|
255
|
-
response_text = result.response if isinstance(result, HumanInputResponse) else str(result)
|
256
|
-
|
257
|
-
return CallToolResult(content=[TextContent(type="text", text=f"Human response: {response_text}")])
|
258
|
-
|
259
|
-
except PromptExitError:
|
260
|
-
raise
|
261
|
-
except TimeoutError as e:
|
262
|
-
return CallToolResult(
|
263
|
-
isError=True,
|
264
|
-
content=[
|
265
|
-
TextContent(
|
266
|
-
type="text",
|
267
|
-
text=f"Error: Human input request timed out: {str(e)}",
|
268
|
-
)
|
269
|
-
],
|
270
|
-
)
|
271
|
-
except Exception as e:
|
272
|
-
print(f"Error in _call_human_input_tool: {traceback.format_exc()}")
|
273
|
-
|
274
|
-
return CallToolResult(
|
275
|
-
isError=True,
|
276
|
-
content=[TextContent(type="text", text=f"Error requesting human input: {str(e)}")],
|
277
|
-
)
|
278
|
-
|
279
|
-
async def apply_prompt(self, prompt_name: str, arguments: dict[str, str] | None) -> str:
|
280
|
-
"""
|
281
|
-
Apply an MCP Server Prompt by name and return the assistant's response.
|
282
|
-
Will search all available servers for the prompt if not namespaced.
|
283
|
-
|
284
|
-
If the last message in the prompt is from a user, this will automatically
|
285
|
-
generate an assistant response to ensure we always end with an assistant message.
|
286
|
-
|
287
|
-
Args:
|
288
|
-
prompt_name: The name of the prompt to apply
|
289
|
-
arguments: Optional dictionary of string arguments to pass to the prompt template
|
290
|
-
|
291
|
-
Returns:
|
292
|
-
The assistant's response or error message
|
293
|
-
"""
|
294
|
-
# If we don't have an LLM, we can't apply the prompt
|
295
|
-
if not hasattr(self, "_llm") or not self._llm:
|
296
|
-
raise RuntimeError("Agent has no attached LLM")
|
297
|
-
|
298
|
-
# Get the prompt - this will search all servers if needed
|
299
|
-
self.logger.debug(f"Loading prompt '{prompt_name}'")
|
300
|
-
prompt_result = await self.get_prompt(prompt_name, arguments)
|
301
|
-
|
302
|
-
if not prompt_result or not prompt_result.messages:
|
303
|
-
error_msg = f"Prompt '{prompt_name}' could not be found or contains no messages"
|
304
|
-
self.logger.warning(error_msg)
|
305
|
-
return error_msg
|
306
|
-
|
307
|
-
# Get the display name (namespaced version)
|
308
|
-
display_name = getattr(prompt_result, "namespaced_name", prompt_name)
|
309
|
-
|
310
|
-
# Apply the prompt template and get the result
|
311
|
-
# The LLM will automatically generate a response if needed
|
312
|
-
result = await self._llm.apply_prompt_template(prompt_result, display_name)
|
313
|
-
return result
|
314
|
-
|
315
|
-
async def get_resource(self, server_name: str, resource_name: str):
|
316
|
-
"""
|
317
|
-
Get a resource directly from an MCP server by name.
|
318
|
-
|
319
|
-
Args:
|
320
|
-
server_name: Name of the MCP server to retrieve the resource from
|
321
|
-
resource_name: Name of the resource to retrieve
|
322
|
-
|
323
|
-
Returns:
|
324
|
-
The resource object from the MCP server
|
325
|
-
|
326
|
-
Raises:
|
327
|
-
ValueError: If the server doesn't exist or the resource couldn't be found
|
328
|
-
"""
|
329
|
-
if not self.initialized:
|
330
|
-
await self.initialize()
|
331
|
-
|
332
|
-
# Get the specified server connection
|
333
|
-
server = self.get_server(server_name)
|
334
|
-
if not server:
|
335
|
-
raise ValueError(f"Server '{server_name}' not found or not connected")
|
336
|
-
|
337
|
-
# Request the resource directly from the server
|
338
|
-
try:
|
339
|
-
resource_result = await server.get_resource(resource_name)
|
340
|
-
return resource_result
|
341
|
-
except Exception as e:
|
342
|
-
self.logger.error(f"Error retrieving resource '{resource_name}' from server '{server_name}': {str(e)}")
|
343
|
-
raise ValueError(f"Failed to retrieve resource '{resource_name}' from server '{server_name}': {str(e)}")
|
344
|
-
|
345
|
-
async def get_embedded_resources(self, server_name: str, resource_name: str) -> List[EmbeddedResource]:
|
346
|
-
"""
|
347
|
-
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
348
|
-
|
349
|
-
Args:
|
350
|
-
server_name: Name of the MCP server to retrieve the resource from
|
351
|
-
resource_name: Name or URI of the resource to retrieve
|
352
|
-
|
353
|
-
Returns:
|
354
|
-
List of EmbeddedResource objects ready to use in a PromptMessageMultipart
|
355
|
-
|
356
|
-
Raises:
|
357
|
-
ValueError: If the server doesn't exist or the resource couldn't be found
|
358
|
-
"""
|
359
|
-
# Get the raw resource result
|
360
|
-
result: ReadResourceResult = await super().get_resource(server_name, resource_name)
|
361
|
-
|
362
|
-
# Convert each resource content to an EmbeddedResource
|
363
|
-
embedded_resources: List[EmbeddedResource] = []
|
364
|
-
for resource_content in result.contents:
|
365
|
-
embedded_resource = EmbeddedResource(type="resource", resource=resource_content, annotations=None)
|
366
|
-
embedded_resources.append(embedded_resource)
|
367
|
-
|
368
|
-
return embedded_resources
|
369
|
-
|
370
|
-
async def apply_prompt_messages(self, prompts: List[PromptMessageMultipart], request_params: RequestParams | None) -> str:
|
371
|
-
return self._llm.apply_prompt_messages(prompts, request_params)
|
372
|
-
|
373
|
-
async def with_resource(
|
374
|
-
self,
|
375
|
-
prompt_content: Union[str, PromptMessageMultipart],
|
376
|
-
server_name: str,
|
377
|
-
resource_name: str,
|
378
|
-
) -> str:
|
379
|
-
"""
|
380
|
-
Create a prompt with the given content and resource, then send it to the agent.
|
381
|
-
|
382
|
-
Args:
|
383
|
-
prompt_content: Either a string message or an existing PromptMessageMultipart
|
384
|
-
server_name: Name of the MCP server to retrieve the resource from
|
385
|
-
resource_name: Name or URI of the resource to retrieve
|
386
|
-
|
387
|
-
Returns:
|
388
|
-
The agent's response as a string
|
389
|
-
"""
|
390
|
-
# Get the embedded resources
|
391
|
-
embedded_resources: List[EmbeddedResource] = await self.get_embedded_resources(server_name, resource_name)
|
392
|
-
|
393
|
-
# Create or update the prompt message
|
394
|
-
prompt: PromptMessageMultipart
|
395
|
-
if isinstance(prompt_content, str):
|
396
|
-
# Create a new prompt with the text and resources
|
397
|
-
content = [TextContent(type="text", text=prompt_content)]
|
398
|
-
content.extend(embedded_resources)
|
399
|
-
prompt = PromptMessageMultipart(role="user", content=content)
|
400
|
-
elif isinstance(prompt_content, PromptMessageMultipart):
|
401
|
-
# Add resources to the existing prompt
|
402
|
-
prompt = prompt_content
|
403
|
-
prompt.content.extend(embedded_resources)
|
404
|
-
else:
|
405
|
-
raise TypeError("prompt_content must be a string or PromptMessageMultipart")
|
406
|
-
|
407
|
-
# Send the prompt to the agent and return the response
|
408
|
-
return await self._llm.generate_prompt(prompt, None)
|