fast-agent-mcp 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/METADATA +13 -9
- {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/RECORD +42 -40
- mcp_agent/__init__.py +2 -2
- mcp_agent/agents/agent.py +5 -0
- mcp_agent/agents/base_agent.py +152 -36
- mcp_agent/agents/workflow/chain_agent.py +9 -13
- mcp_agent/agents/workflow/evaluator_optimizer.py +3 -3
- mcp_agent/agents/workflow/orchestrator_agent.py +9 -7
- mcp_agent/agents/workflow/parallel_agent.py +2 -2
- mcp_agent/agents/workflow/router_agent.py +7 -5
- mcp_agent/cli/main.py +11 -0
- mcp_agent/config.py +29 -7
- mcp_agent/context.py +2 -0
- mcp_agent/core/{direct_agent_app.py → agent_app.py} +115 -15
- mcp_agent/core/direct_factory.py +9 -18
- mcp_agent/core/enhanced_prompt.py +3 -3
- mcp_agent/core/fastagent.py +218 -49
- mcp_agent/core/mcp_content.py +38 -5
- mcp_agent/core/prompt.py +70 -8
- mcp_agent/core/validation.py +1 -1
- mcp_agent/llm/augmented_llm.py +44 -16
- mcp_agent/llm/augmented_llm_passthrough.py +3 -1
- mcp_agent/llm/model_factory.py +16 -28
- mcp_agent/llm/providers/augmented_llm_openai.py +3 -3
- mcp_agent/llm/providers/multipart_converter_anthropic.py +8 -8
- mcp_agent/llm/providers/multipart_converter_openai.py +9 -9
- mcp_agent/mcp/helpers/__init__.py +3 -0
- mcp_agent/mcp/helpers/content_helpers.py +116 -0
- mcp_agent/mcp/interfaces.py +39 -16
- mcp_agent/mcp/mcp_aggregator.py +117 -13
- mcp_agent/mcp/prompt_message_multipart.py +29 -22
- mcp_agent/mcp/prompt_render.py +18 -15
- mcp_agent/mcp/prompt_serialization.py +42 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +22 -112
- mcp_agent/mcp/prompts/prompt_load.py +51 -3
- mcp_agent/mcp_server/agent_server.py +62 -13
- mcp_agent/resources/examples/internal/agent.py +2 -2
- mcp_agent/resources/examples/internal/fastagent.config.yaml +5 -0
- mcp_agent/resources/examples/internal/history_transfer.py +35 -0
- mcp_agent/mcp/mcp_agent_server.py +0 -56
- {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/licenses/LICENSE +0 -0
@@ -8,7 +8,7 @@ or a maximum number of refinements is attempted.
|
|
8
8
|
"""
|
9
9
|
|
10
10
|
from enum import Enum
|
11
|
-
from typing import Any, List, Optional, Type
|
11
|
+
from typing import Any, List, Optional, Tuple, Type
|
12
12
|
|
13
13
|
from pydantic import BaseModel, Field
|
14
14
|
|
@@ -139,7 +139,7 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
139
139
|
|
140
140
|
# Create evaluation message and get structured evaluation result
|
141
141
|
eval_message = Prompt.user(eval_prompt)
|
142
|
-
evaluation_result = await self.evaluator_agent.structured(
|
142
|
+
evaluation_result, _ = await self.evaluator_agent.structured(
|
143
143
|
[eval_message], EvaluationResult, request_params
|
144
144
|
)
|
145
145
|
|
@@ -202,7 +202,7 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
202
202
|
prompt: List[PromptMessageMultipart],
|
203
203
|
model: Type[ModelT],
|
204
204
|
request_params: Optional[RequestParams] = None,
|
205
|
-
) ->
|
205
|
+
) -> Tuple[ModelT | None, PromptMessageMultipart]:
|
206
206
|
"""
|
207
207
|
Generate an optimized response and parse it into a structured format.
|
208
208
|
|
@@ -5,7 +5,7 @@ This workflow provides an implementation that manages complex tasks by
|
|
5
5
|
dynamically planning, delegating to specialized agents, and synthesizing results.
|
6
6
|
"""
|
7
7
|
|
8
|
-
from typing import Any, Dict, List, Literal, Optional, Type
|
8
|
+
from typing import Any, Dict, List, Literal, Optional, Tuple, Type
|
9
9
|
|
10
10
|
from mcp.types import TextContent
|
11
11
|
|
@@ -29,6 +29,7 @@ from mcp_agent.agents.workflow.orchestrator_prompts import (
|
|
29
29
|
)
|
30
30
|
from mcp_agent.core.agent_types import AgentConfig
|
31
31
|
from mcp_agent.core.exceptions import AgentConfigError
|
32
|
+
from mcp_agent.core.prompt import Prompt
|
32
33
|
from mcp_agent.core.request_params import RequestParams
|
33
34
|
from mcp_agent.logging.logger import get_logger
|
34
35
|
from mcp_agent.mcp.interfaces import ModelT
|
@@ -117,7 +118,7 @@ class OrchestratorAgent(BaseAgent):
|
|
117
118
|
prompt: List[PromptMessageMultipart],
|
118
119
|
model: Type[ModelT],
|
119
120
|
request_params: Optional[RequestParams] = None,
|
120
|
-
) ->
|
121
|
+
) -> Tuple[ModelT | None, PromptMessageMultipart]:
|
121
122
|
"""
|
122
123
|
Execute an orchestration plan and parse the result into a structured format.
|
123
124
|
|
@@ -138,12 +139,11 @@ class OrchestratorAgent(BaseAgent):
|
|
138
139
|
prompt_message = PromptMessageMultipart(
|
139
140
|
role="user", content=[TextContent(type="text", text=result_text)]
|
140
141
|
)
|
141
|
-
|
142
|
-
# Use the LLM's structured parsing capability
|
142
|
+
assert self._llm
|
143
143
|
return await self._llm.structured([prompt_message], model, request_params)
|
144
144
|
except Exception as e:
|
145
145
|
self.logger.warning(f"Failed to parse orchestration result: {str(e)}")
|
146
|
-
return None
|
146
|
+
return None, Prompt.assistant(f"Failed to parse orchestration result: {str(e)}")
|
147
147
|
|
148
148
|
async def initialize(self) -> None:
|
149
149
|
"""Initialize the orchestrator agent and worker agents."""
|
@@ -429,7 +429,8 @@ class OrchestratorAgent(BaseAgent):
|
|
429
429
|
plan_msg = PromptMessageMultipart(
|
430
430
|
role="user", content=[TextContent(type="text", text=prompt)]
|
431
431
|
)
|
432
|
-
|
432
|
+
plan, _ = await self._llm.structured([plan_msg], Plan, request_params)
|
433
|
+
return plan
|
433
434
|
except Exception as e:
|
434
435
|
self.logger.error(f"Failed to parse plan: {str(e)}")
|
435
436
|
return None
|
@@ -483,7 +484,8 @@ class OrchestratorAgent(BaseAgent):
|
|
483
484
|
plan_msg = PromptMessageMultipart(
|
484
485
|
role="user", content=[TextContent(type="text", text=prompt)]
|
485
486
|
)
|
486
|
-
|
487
|
+
next_step, _ = await self._llm.structured([plan_msg], NextStep, request_params)
|
488
|
+
return next_step
|
487
489
|
except Exception as e:
|
488
490
|
self.logger.error(f"Failed to parse next step: {str(e)}")
|
489
491
|
return None
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
-
from typing import Any, List, Optional
|
2
|
+
from typing import Any, List, Optional, Tuple
|
3
3
|
|
4
4
|
from mcp.types import TextContent
|
5
5
|
|
@@ -113,7 +113,7 @@ class ParallelAgent(BaseAgent):
|
|
113
113
|
prompt: List[PromptMessageMultipart],
|
114
114
|
model: type[ModelT],
|
115
115
|
request_params: Optional[RequestParams] = None,
|
116
|
-
) ->
|
116
|
+
) -> Tuple[ModelT | None, PromptMessageMultipart]:
|
117
117
|
"""
|
118
118
|
Apply the prompt and return the result as a Pydantic model.
|
119
119
|
|
@@ -5,7 +5,7 @@ This provides a simplified implementation that routes messages to agents
|
|
5
5
|
by determining the best agent for a request and dispatching to it.
|
6
6
|
"""
|
7
7
|
|
8
|
-
from typing import TYPE_CHECKING, List, Optional, Type
|
8
|
+
from typing import TYPE_CHECKING, List, Optional, Tuple, Type
|
9
9
|
|
10
10
|
from mcp.types import TextContent
|
11
11
|
from pydantic import BaseModel
|
@@ -14,6 +14,7 @@ from mcp_agent.agents.agent import Agent
|
|
14
14
|
from mcp_agent.agents.base_agent import BaseAgent
|
15
15
|
from mcp_agent.core.agent_types import AgentConfig
|
16
16
|
from mcp_agent.core.exceptions import AgentConfigError
|
17
|
+
from mcp_agent.core.prompt import Prompt
|
17
18
|
from mcp_agent.core.request_params import RequestParams
|
18
19
|
from mcp_agent.logging.logger import get_logger
|
19
20
|
from mcp_agent.mcp.interfaces import ModelT
|
@@ -73,7 +74,7 @@ class RoutingResponse(BaseModel):
|
|
73
74
|
class RouterResult(BaseModel):
|
74
75
|
"""Router result with agent reference and confidence rating."""
|
75
76
|
|
76
|
-
result:
|
77
|
+
result: BaseAgent
|
77
78
|
confidence: str
|
78
79
|
reasoning: Optional[str] = None
|
79
80
|
|
@@ -221,7 +222,7 @@ class RouterAgent(BaseAgent):
|
|
221
222
|
prompt: List[PromptMessageMultipart],
|
222
223
|
model: Type[ModelT],
|
223
224
|
request_params: Optional[RequestParams] = None,
|
224
|
-
) ->
|
225
|
+
) -> Tuple[ModelT | None, PromptMessageMultipart]:
|
225
226
|
"""
|
226
227
|
Route the request to the most appropriate agent and parse its response.
|
227
228
|
|
@@ -236,7 +237,7 @@ class RouterAgent(BaseAgent):
|
|
236
237
|
routing_result = await self._get_routing_result(prompt)
|
237
238
|
|
238
239
|
if not routing_result:
|
239
|
-
return None
|
240
|
+
return None, Prompt.assistant("No routing result")
|
240
241
|
|
241
242
|
# Get the selected agent
|
242
243
|
selected_agent = routing_result.result
|
@@ -287,7 +288,8 @@ class RouterAgent(BaseAgent):
|
|
287
288
|
)
|
288
289
|
|
289
290
|
# Get structured response from LLM
|
290
|
-
|
291
|
+
assert self._llm
|
292
|
+
response, _ = await self._llm.structured(
|
291
293
|
[prompt], RoutingResponse, self._default_request_params
|
292
294
|
)
|
293
295
|
|
mcp_agent/cli/main.py
CHANGED
@@ -62,6 +62,7 @@ def main(
|
|
62
62
|
verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose mode"),
|
63
63
|
quiet: bool = typer.Option(False, "--quiet", "-q", help="Disable output"),
|
64
64
|
color: bool = typer.Option(True, "--color/--no-color", help="Enable/disable color output"),
|
65
|
+
version: bool = typer.Option(False, "--version", help="Show version and exit"),
|
65
66
|
) -> None:
|
66
67
|
"""FastAgent CLI - Build effective agents using Model Context Protocol (MCP).
|
67
68
|
|
@@ -70,6 +71,16 @@ def main(
|
|
70
71
|
application.verbosity = 1 if verbose else 0 if not quiet else -1
|
71
72
|
application.console = application.console if color else None
|
72
73
|
|
74
|
+
# Handle version flag
|
75
|
+
if version:
|
76
|
+
from importlib.metadata import version as get_version
|
77
|
+
try:
|
78
|
+
app_version = get_version("fast-agent-mcp")
|
79
|
+
except: # noqa: E722
|
80
|
+
app_version = "unknown"
|
81
|
+
console.print(f"fast-agent-mcp v{app_version}")
|
82
|
+
raise typer.Exit()
|
83
|
+
|
73
84
|
# Show welcome message if no command was invoked
|
74
85
|
if ctx.invoked_subcommand is None:
|
75
86
|
show_welcome()
|
mcp_agent/config.py
CHANGED
@@ -40,7 +40,7 @@ class MCPRootSettings(BaseModel):
|
|
40
40
|
@classmethod
|
41
41
|
def validate_uri(cls, v: str) -> str:
|
42
42
|
"""Validate that the URI starts with file:// (required by specification 2024-11-05)"""
|
43
|
-
if not v.startswith("file://"):
|
43
|
+
if v and not v.startswith("file://"):
|
44
44
|
raise ValueError("Root URI must start with file://")
|
45
45
|
return v
|
46
46
|
|
@@ -276,9 +276,9 @@ class Settings(BaseSettings):
|
|
276
276
|
# Check current directory and parent directories
|
277
277
|
while current_dir != current_dir.parent:
|
278
278
|
for filename in [
|
279
|
+
"fastagent.config.yaml",
|
279
280
|
"mcp-agent.config.yaml",
|
280
281
|
"mcp_agent.config.yaml",
|
281
|
-
"fastagent.config.yaml",
|
282
282
|
]:
|
283
283
|
config_path = current_dir / filename
|
284
284
|
if config_path.exists():
|
@@ -306,15 +306,33 @@ def get_settings(config_path: str | None = None) -> Settings:
|
|
306
306
|
return merged
|
307
307
|
|
308
308
|
global _settings
|
309
|
-
|
309
|
+
|
310
|
+
# If we have a specific config path, always reload settings
|
311
|
+
# This ensures each test gets its own config
|
312
|
+
if config_path:
|
313
|
+
# Reset for the new path
|
314
|
+
_settings = None
|
315
|
+
elif _settings:
|
316
|
+
# Use cached settings only for no specific path
|
310
317
|
return _settings
|
311
318
|
|
312
|
-
|
319
|
+
# Handle config path - convert string to Path if needed
|
320
|
+
if config_path:
|
321
|
+
config_file = Path(config_path)
|
322
|
+
# If it's a relative path and doesn't exist, try finding it
|
323
|
+
if not config_file.is_absolute() and not config_file.exists():
|
324
|
+
# Try resolving against current directory first
|
325
|
+
resolved_path = Path.cwd() / config_file.name
|
326
|
+
if resolved_path.exists():
|
327
|
+
config_file = resolved_path
|
328
|
+
else:
|
329
|
+
config_file = Settings.find_config()
|
330
|
+
|
313
331
|
merged_settings = {}
|
314
332
|
|
315
333
|
if config_file:
|
316
334
|
if not config_file.exists():
|
317
|
-
|
335
|
+
print(f"Warning: Specified config file does not exist: {config_file}")
|
318
336
|
else:
|
319
337
|
import yaml # pylint: disable=C0415
|
320
338
|
|
@@ -326,11 +344,14 @@ def get_settings(config_path: str | None = None) -> Settings:
|
|
326
344
|
# but stop after finding the first one
|
327
345
|
current_dir = config_file.parent
|
328
346
|
found_secrets = False
|
347
|
+
# Start with the absolute path of the config file's directory
|
348
|
+
current_dir = config_file.parent.resolve()
|
349
|
+
|
329
350
|
while current_dir != current_dir.parent and not found_secrets:
|
330
351
|
for secrets_filename in [
|
352
|
+
"fastagent.secrets.yaml",
|
331
353
|
"mcp-agent.secrets.yaml",
|
332
354
|
"mcp_agent.secrets.yaml",
|
333
|
-
"fastagent.secrets.yaml",
|
334
355
|
]:
|
335
356
|
secrets_file = current_dir / secrets_filename
|
336
357
|
if secrets_file.exists():
|
@@ -340,7 +361,8 @@ def get_settings(config_path: str | None = None) -> Settings:
|
|
340
361
|
found_secrets = True
|
341
362
|
break
|
342
363
|
if not found_secrets:
|
343
|
-
|
364
|
+
# Get the absolute path of the parent directory
|
365
|
+
current_dir = current_dir.parent.resolve()
|
344
366
|
|
345
367
|
_settings = Settings(**merged_settings)
|
346
368
|
return _settings
|
mcp_agent/context.py
CHANGED
@@ -234,5 +234,7 @@ def get_current_context() -> Context:
|
|
234
234
|
def get_current_config():
|
235
235
|
"""
|
236
236
|
Get the current application config.
|
237
|
+
|
238
|
+
Returns the context config if available, otherwise falls back to global settings.
|
237
239
|
"""
|
238
240
|
return get_current_context().config or get_settings()
|
@@ -2,14 +2,17 @@
|
|
2
2
|
Direct AgentApp implementation for interacting with agents without proxies.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import Dict, Optional, Union
|
5
|
+
from typing import Dict, List, Optional, Union
|
6
|
+
|
7
|
+
from deprecated import deprecated
|
8
|
+
from mcp.types import PromptMessage
|
6
9
|
|
7
10
|
from mcp_agent.agents.agent import Agent
|
8
11
|
from mcp_agent.core.interactive_prompt import InteractivePrompt
|
9
12
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
10
13
|
|
11
14
|
|
12
|
-
class
|
15
|
+
class AgentApp:
|
13
16
|
"""
|
14
17
|
Container for active agents that provides a simple API for interacting with them.
|
15
18
|
This implementation works directly with Agent instances without proxies.
|
@@ -44,7 +47,7 @@ class DirectAgentApp:
|
|
44
47
|
|
45
48
|
async def __call__(
|
46
49
|
self,
|
47
|
-
message: Union[str, PromptMessageMultipart] | None = None,
|
50
|
+
message: Union[str, PromptMessage, PromptMessageMultipart] | None = None,
|
48
51
|
agent_name: str | None = None,
|
49
52
|
default_prompt: str = "",
|
50
53
|
) -> str:
|
@@ -53,9 +56,12 @@ class DirectAgentApp:
|
|
53
56
|
This mirrors the FastAgent implementation that allowed agent("message").
|
54
57
|
|
55
58
|
Args:
|
56
|
-
message:
|
59
|
+
message: Message content in various formats:
|
60
|
+
- String: Converted to a user PromptMessageMultipart
|
61
|
+
- PromptMessage: Converted to PromptMessageMultipart
|
62
|
+
- PromptMessageMultipart: Used directly
|
57
63
|
agent_name: Optional name of the agent to send to (defaults to first agent)
|
58
|
-
|
64
|
+
default_prompt: Default message to use in interactive prompt mode
|
59
65
|
|
60
66
|
Returns:
|
61
67
|
The agent's response as a string or the result of the interactive session
|
@@ -63,14 +69,21 @@ class DirectAgentApp:
|
|
63
69
|
if message:
|
64
70
|
return await self._agent(agent_name).send(message)
|
65
71
|
|
66
|
-
return await self.
|
72
|
+
return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
|
67
73
|
|
68
|
-
async def send(
|
74
|
+
async def send(
|
75
|
+
self,
|
76
|
+
message: Union[str, PromptMessage, PromptMessageMultipart],
|
77
|
+
agent_name: Optional[str] = None,
|
78
|
+
) -> str:
|
69
79
|
"""
|
70
80
|
Send a message to the specified agent (or to all agents).
|
71
81
|
|
72
82
|
Args:
|
73
|
-
message:
|
83
|
+
message: Message content in various formats:
|
84
|
+
- String: Converted to a user PromptMessageMultipart
|
85
|
+
- PromptMessage: Converted to PromptMessageMultipart
|
86
|
+
- PromptMessageMultipart: Used directly
|
74
87
|
agent_name: Optional name of the agent to send to
|
75
88
|
|
76
89
|
Returns:
|
@@ -97,32 +110,119 @@ class DirectAgentApp:
|
|
97
110
|
|
98
111
|
Args:
|
99
112
|
prompt_name: Name of the prompt template to apply
|
100
|
-
agent_name: Name of the agent to send to
|
101
113
|
arguments: Optional arguments for the prompt template
|
114
|
+
agent_name: Name of the agent to send to
|
102
115
|
|
103
116
|
Returns:
|
104
117
|
The agent's response as a string
|
105
118
|
"""
|
106
119
|
return await self._agent(agent_name).apply_prompt(prompt_name, arguments)
|
107
120
|
|
108
|
-
async def list_prompts(self, agent_name: str | None = None):
|
121
|
+
async def list_prompts(self, server_name: str | None = None, agent_name: str | None = None):
|
109
122
|
"""
|
110
123
|
List available prompts for an agent.
|
111
124
|
|
112
125
|
Args:
|
126
|
+
server_name: Optional name of the server to list prompts from
|
113
127
|
agent_name: Name of the agent to list prompts for
|
114
128
|
|
115
129
|
Returns:
|
116
130
|
Dictionary mapping server names to lists of available prompts
|
117
131
|
"""
|
118
|
-
return await self._agent(agent_name).list_prompts()
|
132
|
+
return await self._agent(agent_name).list_prompts(server_name=server_name)
|
133
|
+
|
134
|
+
async def get_prompt(
|
135
|
+
self,
|
136
|
+
prompt_name: str,
|
137
|
+
arguments: Dict[str, str] | None = None,
|
138
|
+
server_name: str | None = None,
|
139
|
+
agent_name: str | None = None,
|
140
|
+
):
|
141
|
+
"""
|
142
|
+
Get a prompt from a server.
|
143
|
+
|
144
|
+
Args:
|
145
|
+
prompt_name: Name of the prompt, optionally namespaced
|
146
|
+
arguments: Optional dictionary of arguments to pass to the prompt template
|
147
|
+
server_name: Optional name of the server to get the prompt from
|
148
|
+
agent_name: Name of the agent to use
|
119
149
|
|
120
|
-
|
121
|
-
|
122
|
-
|
150
|
+
Returns:
|
151
|
+
GetPromptResult containing the prompt information
|
152
|
+
"""
|
153
|
+
return await self._agent(agent_name).get_prompt(
|
154
|
+
prompt_name=prompt_name, arguments=arguments, server_name=server_name
|
123
155
|
)
|
124
156
|
|
125
|
-
async def
|
157
|
+
async def with_resource(
|
158
|
+
self,
|
159
|
+
prompt_content: Union[str, PromptMessage, PromptMessageMultipart],
|
160
|
+
resource_uri: str,
|
161
|
+
server_name: str | None = None,
|
162
|
+
agent_name: str | None = None,
|
163
|
+
) -> str:
|
164
|
+
"""
|
165
|
+
Send a message with an attached MCP resource.
|
166
|
+
|
167
|
+
Args:
|
168
|
+
prompt_content: Content in various formats (String, PromptMessage, or PromptMessageMultipart)
|
169
|
+
resource_uri: URI of the resource to retrieve
|
170
|
+
server_name: Optional name of the MCP server to retrieve the resource from
|
171
|
+
agent_name: Name of the agent to use
|
172
|
+
|
173
|
+
Returns:
|
174
|
+
The agent's response as a string
|
175
|
+
"""
|
176
|
+
return await self._agent(agent_name).with_resource(
|
177
|
+
prompt_content=prompt_content, resource_uri=resource_uri, server_name=server_name
|
178
|
+
)
|
179
|
+
|
180
|
+
async def list_resources(
|
181
|
+
self,
|
182
|
+
server_name: str | None = None,
|
183
|
+
agent_name: str | None = None,
|
184
|
+
) -> Dict[str, List[str]]:
|
185
|
+
"""
|
186
|
+
List available resources from one or all servers.
|
187
|
+
|
188
|
+
Args:
|
189
|
+
server_name: Optional server name to list resources from
|
190
|
+
agent_name: Name of the agent to use
|
191
|
+
|
192
|
+
Returns:
|
193
|
+
Dictionary mapping server names to lists of resource URIs
|
194
|
+
"""
|
195
|
+
return await self._agent(agent_name).list_resources(server_name=server_name)
|
196
|
+
|
197
|
+
async def get_resource(
|
198
|
+
self,
|
199
|
+
resource_uri: str,
|
200
|
+
server_name: str | None = None,
|
201
|
+
agent_name: str | None = None,
|
202
|
+
):
|
203
|
+
"""
|
204
|
+
Get a resource from an MCP server.
|
205
|
+
|
206
|
+
Args:
|
207
|
+
resource_uri: URI of the resource to retrieve
|
208
|
+
server_name: Optional name of the MCP server to retrieve the resource from
|
209
|
+
agent_name: Name of the agent to use
|
210
|
+
|
211
|
+
Returns:
|
212
|
+
ReadResourceResult object containing the resource content
|
213
|
+
"""
|
214
|
+
return await self._agent(agent_name).get_resource(
|
215
|
+
resource_uri=resource_uri, server_name=server_name
|
216
|
+
)
|
217
|
+
|
218
|
+
@deprecated
|
219
|
+
async def prompt(self, agent_name: str | None = None, default_prompt: str = "") -> str:
|
220
|
+
"""
|
221
|
+
Deprecated - use interactive() instead.
|
222
|
+
"""
|
223
|
+
return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
|
224
|
+
|
225
|
+
async def interactive(self, agent_name: str | None = None, default_prompt: str = "") -> str:
|
126
226
|
"""
|
127
227
|
Interactive prompt for sending messages with advanced features.
|
128
228
|
|
mcp_agent/core/direct_factory.py
CHANGED
@@ -147,11 +147,8 @@ async def create_agents_by_type(
|
|
147
147
|
await agent.initialize()
|
148
148
|
|
149
149
|
# Attach LLM to the agent
|
150
|
-
llm_factory = model_factory_func(
|
151
|
-
|
152
|
-
request_params=config.default_request_params,
|
153
|
-
)
|
154
|
-
await agent.attach_llm(llm_factory)
|
150
|
+
llm_factory = model_factory_func(model=config.model)
|
151
|
+
await agent.attach_llm(llm_factory, request_params=config.default_request_params)
|
155
152
|
result_agents[name] = agent
|
156
153
|
|
157
154
|
elif agent_type == AgentType.ORCHESTRATOR:
|
@@ -183,11 +180,10 @@ async def create_agents_by_type(
|
|
183
180
|
await orchestrator.initialize()
|
184
181
|
|
185
182
|
# Attach LLM to the orchestrator
|
186
|
-
llm_factory = model_factory_func(
|
187
|
-
|
188
|
-
request_params=config.default_request_params
|
183
|
+
llm_factory = model_factory_func(model=config.model)
|
184
|
+
await orchestrator.attach_llm(
|
185
|
+
llm_factory, request_params=config.default_request_params
|
189
186
|
)
|
190
|
-
await orchestrator.attach_llm(llm_factory)
|
191
187
|
|
192
188
|
result_agents[name] = orchestrator
|
193
189
|
|
@@ -201,9 +197,7 @@ async def create_agents_by_type(
|
|
201
197
|
# Create default fan-in agent with auto-generated name
|
202
198
|
fan_in_name = f"{name}_fan_in"
|
203
199
|
fan_in_agent = await _create_default_fan_in_agent(
|
204
|
-
fan_in_name,
|
205
|
-
app_instance.context,
|
206
|
-
model_factory_func
|
200
|
+
fan_in_name, app_instance.context, model_factory_func
|
207
201
|
)
|
208
202
|
# Add to result_agents so it's registered properly
|
209
203
|
result_agents[fan_in_name] = fan_in_agent
|
@@ -247,11 +241,8 @@ async def create_agents_by_type(
|
|
247
241
|
await router.initialize()
|
248
242
|
|
249
243
|
# Attach LLM to the router
|
250
|
-
llm_factory = model_factory_func(
|
251
|
-
|
252
|
-
request_params=config.default_request_params,
|
253
|
-
)
|
254
|
-
await router.attach_llm(llm_factory)
|
244
|
+
llm_factory = model_factory_func(model=config.model)
|
245
|
+
await router.attach_llm(llm_factory, request_params=config.default_request_params)
|
255
246
|
result_agents[name] = router
|
256
247
|
|
257
248
|
elif agent_type == AgentType.CHAIN:
|
@@ -459,7 +450,7 @@ async def _create_default_fan_in_agent(
|
|
459
450
|
default_config = AgentConfig(
|
460
451
|
name=fan_in_name,
|
461
452
|
model="passthrough",
|
462
|
-
instruction="You are a passthrough agent that combines outputs from parallel agents."
|
453
|
+
instruction="You are a passthrough agent that combines outputs from parallel agents.",
|
463
454
|
)
|
464
455
|
|
465
456
|
# Create and initialize the default agent
|
@@ -291,7 +291,7 @@ async def get_enhanced_input(
|
|
291
291
|
return f"SELECT_PROMPT:{cmd_parts[1].strip()}"
|
292
292
|
elif cmd == "exit":
|
293
293
|
return "EXIT"
|
294
|
-
elif cmd == "stop":
|
294
|
+
elif cmd.lower() == "stop":
|
295
295
|
return "STOP"
|
296
296
|
|
297
297
|
# Agent switching
|
@@ -420,7 +420,7 @@ async def get_argument_input(
|
|
420
420
|
prompt_session.app.exit()
|
421
421
|
|
422
422
|
|
423
|
-
async def handle_special_commands(command, agent_app=None):
|
423
|
+
async def handle_special_commands(command: str, agent_app=None):
|
424
424
|
"""Handle special input commands."""
|
425
425
|
# Quick guard for empty or None commands
|
426
426
|
if not command:
|
@@ -450,7 +450,7 @@ async def handle_special_commands(command, agent_app=None):
|
|
450
450
|
print("\033c", end="")
|
451
451
|
return True
|
452
452
|
|
453
|
-
elif command == "EXIT":
|
453
|
+
elif command.upper() == "EXIT":
|
454
454
|
raise PromptExitError("User requested to exit fast-agent session")
|
455
455
|
|
456
456
|
elif command == "LIST_AGENTS":
|