fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +13 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +17 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +43 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
- fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
- fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
- mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
from typing import Any, Callable, Dict, List
|
|
2
|
+
|
|
3
|
+
from mcp.server.fastmcp.tools.base import Tool as FastMCPTool
|
|
4
|
+
from mcp.types import CallToolResult, ListToolsResult, Tool
|
|
5
|
+
|
|
6
|
+
from fast_agent.agents.agent_types import AgentConfig
|
|
7
|
+
from fast_agent.agents.llm_agent import LlmAgent
|
|
8
|
+
from fast_agent.constants import HUMAN_INPUT_TOOL_NAME
|
|
9
|
+
from fast_agent.context import Context
|
|
10
|
+
from fast_agent.core.logging.logger import get_logger
|
|
11
|
+
from fast_agent.mcp.helpers.content_helpers import text_content
|
|
12
|
+
from fast_agent.tools.elicitation import get_elicitation_fastmcp_tool
|
|
13
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
14
|
+
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
15
|
+
|
|
16
|
+
logger = get_logger(__name__)
|
|
17
|
+
|
|
18
|
+
DEFAULT_MAX_TOOL_CALLS = 20
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# should we have MAX_TOOL_CALLS instead to constrain by number of tools rather than turns...?
|
|
22
|
+
DEFAULT_MAX_ITERATIONS = 20
|
|
23
|
+
"""Maximum number of User/Assistant turns to take"""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ToolAgent(LlmAgent):
|
|
27
|
+
"""
|
|
28
|
+
A Tool Calling agent that uses FastMCP Tools for execution.
|
|
29
|
+
|
|
30
|
+
Pass either:
|
|
31
|
+
- FastMCP Tool objects (created via Tool.from_function)
|
|
32
|
+
- Regular Python functions (will be wrapped as FastMCP Tools)
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
config: AgentConfig,
|
|
38
|
+
tools: list[FastMCPTool | Callable] = [],
|
|
39
|
+
context: Context | None = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
super().__init__(config=config, context=context)
|
|
42
|
+
|
|
43
|
+
self._execution_tools: dict[str, FastMCPTool] = {}
|
|
44
|
+
self._tool_schemas: list[Tool] = []
|
|
45
|
+
|
|
46
|
+
# Build a working list of tools and auto-inject human-input tool if missing
|
|
47
|
+
working_tools: list[FastMCPTool | Callable] = list(tools) if tools else []
|
|
48
|
+
# Only auto-inject if enabled via AgentConfig
|
|
49
|
+
if self.config.human_input:
|
|
50
|
+
existing_names = {
|
|
51
|
+
t.name if isinstance(t, FastMCPTool) else getattr(t, "__name__", "")
|
|
52
|
+
for t in working_tools
|
|
53
|
+
}
|
|
54
|
+
if HUMAN_INPUT_TOOL_NAME not in existing_names:
|
|
55
|
+
try:
|
|
56
|
+
working_tools.append(get_elicitation_fastmcp_tool())
|
|
57
|
+
except Exception as e:
|
|
58
|
+
logger.warning(f"Failed to initialize human-input tool: {e}")
|
|
59
|
+
|
|
60
|
+
for tool in working_tools:
|
|
61
|
+
if isinstance(tool, FastMCPTool):
|
|
62
|
+
fast_tool = tool
|
|
63
|
+
elif callable(tool):
|
|
64
|
+
fast_tool = FastMCPTool.from_function(tool)
|
|
65
|
+
else:
|
|
66
|
+
logger.warning(f"Skipping unknown tool type: {type(tool)}")
|
|
67
|
+
continue
|
|
68
|
+
|
|
69
|
+
self._execution_tools[fast_tool.name] = fast_tool
|
|
70
|
+
# Create MCP Tool schema for the LLM interface
|
|
71
|
+
self._tool_schemas.append(
|
|
72
|
+
Tool(
|
|
73
|
+
name=fast_tool.name,
|
|
74
|
+
description=fast_tool.description,
|
|
75
|
+
inputSchema=fast_tool.parameters,
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
async def generate_impl(
|
|
80
|
+
self,
|
|
81
|
+
messages: List[PromptMessageExtended],
|
|
82
|
+
request_params: RequestParams | None = None,
|
|
83
|
+
tools: List[Tool] | None = None,
|
|
84
|
+
) -> PromptMessageExtended:
|
|
85
|
+
"""
|
|
86
|
+
Generate a response using the LLM, and handle tool calls if necessary.
|
|
87
|
+
Messages are already normalized to List[PromptMessageExtended].
|
|
88
|
+
"""
|
|
89
|
+
if tools is None:
|
|
90
|
+
tools = (await self.list_tools()).tools
|
|
91
|
+
|
|
92
|
+
iterations = 0
|
|
93
|
+
|
|
94
|
+
while True:
|
|
95
|
+
result = await super().generate_impl(
|
|
96
|
+
messages, request_params=request_params, tools=tools
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
if LlmStopReason.TOOL_USE == result.stop_reason:
|
|
100
|
+
messages = [await self.run_tools(result)]
|
|
101
|
+
else:
|
|
102
|
+
break
|
|
103
|
+
|
|
104
|
+
iterations += 1
|
|
105
|
+
if iterations > DEFAULT_MAX_ITERATIONS:
|
|
106
|
+
logger.warning("Max iterations reached, stopping tool loop")
|
|
107
|
+
break
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
# we take care of tool results, so skip displaying them
|
|
111
|
+
def show_user_message(self, message: PromptMessageExtended) -> None:
|
|
112
|
+
if message.tool_results:
|
|
113
|
+
return
|
|
114
|
+
super().show_user_message(message)
|
|
115
|
+
|
|
116
|
+
async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtended:
|
|
117
|
+
"""Runs the tools in the request, and returns a new User message with the results"""
|
|
118
|
+
if not request.tool_calls:
|
|
119
|
+
logger.warning("No tool calls found in request", data=request)
|
|
120
|
+
return PromptMessageExtended(role="user", tool_results={})
|
|
121
|
+
|
|
122
|
+
tool_results: dict[str, CallToolResult] = {}
|
|
123
|
+
# TODO -- use gather() for parallel results, update display
|
|
124
|
+
available_tools = [t.name for t in (await self.list_tools()).tools]
|
|
125
|
+
for correlation_id, tool_request in request.tool_calls.items():
|
|
126
|
+
tool_name = tool_request.params.name
|
|
127
|
+
tool_args = tool_request.params.arguments or {}
|
|
128
|
+
self.display.show_tool_call(
|
|
129
|
+
name=self.name,
|
|
130
|
+
tool_args=tool_args,
|
|
131
|
+
bottom_items=available_tools,
|
|
132
|
+
tool_name=tool_name,
|
|
133
|
+
max_item_length=12,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Delegate to call_tool for execution (overridable by subclasses)
|
|
137
|
+
result = await self.call_tool(tool_name, tool_args)
|
|
138
|
+
tool_results[correlation_id] = result
|
|
139
|
+
self.display.show_tool_result(name=self.name, result=result)
|
|
140
|
+
|
|
141
|
+
return PromptMessageExtended(role="user", tool_results=tool_results)
|
|
142
|
+
|
|
143
|
+
async def list_tools(self) -> ListToolsResult:
|
|
144
|
+
"""Return available tools for this agent. Overridable by subclasses."""
|
|
145
|
+
return ListToolsResult(tools=list(self._tool_schemas))
|
|
146
|
+
|
|
147
|
+
async def call_tool(self, name: str, arguments: Dict[str, Any] | None = None) -> CallToolResult:
|
|
148
|
+
"""Execute a tool by name using local FastMCP tools. Overridable by subclasses."""
|
|
149
|
+
fast_tool = self._execution_tools.get(name)
|
|
150
|
+
if not fast_tool:
|
|
151
|
+
logger.warning(f"Unknown tool: {name}")
|
|
152
|
+
return CallToolResult(
|
|
153
|
+
content=[text_content(f"Unknown tool: {name}")],
|
|
154
|
+
isError=True,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
result = await fast_tool.run(arguments or {}, convert_result=False)
|
|
159
|
+
return CallToolResult(
|
|
160
|
+
content=[text_content(str(result))],
|
|
161
|
+
isError=False,
|
|
162
|
+
)
|
|
163
|
+
except Exception as e:
|
|
164
|
+
logger.error(f"Tool {name} failed: {e}")
|
|
165
|
+
return CallToolResult(
|
|
166
|
+
content=[text_content(f"Error: {str(e)}")],
|
|
167
|
+
isError=True,
|
|
168
|
+
)
|
|
@@ -7,18 +7,20 @@ other agents, chaining their outputs together.
|
|
|
7
7
|
|
|
8
8
|
from typing import Any, List, Optional, Tuple, Type
|
|
9
9
|
|
|
10
|
+
from mcp import Tool
|
|
10
11
|
from mcp.types import TextContent
|
|
11
12
|
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
from
|
|
16
|
-
from
|
|
17
|
-
from
|
|
18
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
|
13
|
+
from fast_agent.agents.agent_types import AgentConfig, AgentType
|
|
14
|
+
from fast_agent.agents.llm_agent import LlmAgent
|
|
15
|
+
from fast_agent.core.logging.logger import get_logger
|
|
16
|
+
from fast_agent.core.prompt import Prompt
|
|
17
|
+
from fast_agent.interfaces import ModelT
|
|
18
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
19
19
|
|
|
20
|
+
logger = get_logger(__name__)
|
|
20
21
|
|
|
21
|
-
|
|
22
|
+
|
|
23
|
+
class ChainAgent(LlmAgent):
|
|
22
24
|
"""
|
|
23
25
|
A chain agent that processes requests through a series of specialized agents in sequence.
|
|
24
26
|
Passes the output of each agent to the next agent in the chain.
|
|
@@ -33,7 +35,7 @@ class ChainAgent(BaseAgent):
|
|
|
33
35
|
def __init__(
|
|
34
36
|
self,
|
|
35
37
|
config: AgentConfig,
|
|
36
|
-
agents: List[
|
|
38
|
+
agents: List[LlmAgent],
|
|
37
39
|
cumulative: bool = False,
|
|
38
40
|
context: Optional[Any] = None,
|
|
39
41
|
**kwargs,
|
|
@@ -52,50 +54,61 @@ class ChainAgent(BaseAgent):
|
|
|
52
54
|
self.agents = agents
|
|
53
55
|
self.cumulative = cumulative
|
|
54
56
|
|
|
55
|
-
async def
|
|
57
|
+
async def generate_impl(
|
|
56
58
|
self,
|
|
57
|
-
|
|
59
|
+
messages: List[PromptMessageExtended],
|
|
58
60
|
request_params: Optional[RequestParams] = None,
|
|
59
|
-
|
|
61
|
+
tools: List[Tool] | None = None,
|
|
62
|
+
) -> PromptMessageExtended:
|
|
60
63
|
"""
|
|
61
64
|
Chain the request through multiple agents in sequence.
|
|
62
65
|
|
|
63
66
|
Args:
|
|
64
|
-
|
|
67
|
+
normalized_messages: Already normalized list of PromptMessageExtended
|
|
65
68
|
request_params: Optional request parameters
|
|
66
69
|
|
|
67
70
|
Returns:
|
|
68
71
|
The response from the final agent in the chain
|
|
69
72
|
"""
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
user_message = multipart_messages[-1] if multipart_messages else None
|
|
73
|
+
# Get the original user message (last message in the list)
|
|
74
|
+
user_message = messages[-1]
|
|
73
75
|
|
|
74
76
|
if not self.cumulative:
|
|
75
|
-
response:
|
|
77
|
+
response: PromptMessageExtended = await self.agents[0].generate(
|
|
78
|
+
messages, request_params
|
|
79
|
+
)
|
|
76
80
|
# Process the rest of the agents in the chain
|
|
77
81
|
for agent in self.agents[1:]:
|
|
78
82
|
next_message = Prompt.user(*response.content)
|
|
79
|
-
response = await agent.generate([next_message])
|
|
83
|
+
response = await agent.generate([next_message], request_params)
|
|
80
84
|
|
|
81
85
|
return response
|
|
82
86
|
|
|
83
87
|
# Track all responses in the chain
|
|
84
|
-
all_responses: List[
|
|
88
|
+
all_responses: List[PromptMessageExtended] = []
|
|
85
89
|
|
|
86
90
|
# Initialize list for storing formatted results
|
|
87
91
|
final_results: List[str] = []
|
|
88
92
|
|
|
89
93
|
# Add the original request with XML tag
|
|
90
|
-
request_text =
|
|
94
|
+
request_text = (
|
|
95
|
+
f"<fastagent:request>{user_message.all_text() or '<no response>'}</fastagent:request>"
|
|
96
|
+
)
|
|
91
97
|
final_results.append(request_text)
|
|
92
98
|
|
|
93
99
|
# Process through each agent in sequence
|
|
94
100
|
for i, agent in enumerate(self.agents):
|
|
95
101
|
# In cumulative mode, include the original message and all previous responses
|
|
96
|
-
chain_messages =
|
|
97
|
-
|
|
98
|
-
|
|
102
|
+
chain_messages = messages.copy()
|
|
103
|
+
|
|
104
|
+
# Convert previous assistant responses to user messages for the next agent
|
|
105
|
+
for prev_response in all_responses:
|
|
106
|
+
chain_messages.append(Prompt.user(prev_response.all_text()))
|
|
107
|
+
|
|
108
|
+
current_response = await agent.generate(
|
|
109
|
+
chain_messages,
|
|
110
|
+
request_params,
|
|
111
|
+
)
|
|
99
112
|
|
|
100
113
|
# Store the response
|
|
101
114
|
all_responses.append(current_response)
|
|
@@ -106,22 +119,19 @@ class ChainAgent(BaseAgent):
|
|
|
106
119
|
)
|
|
107
120
|
final_results.append(attributed_response)
|
|
108
121
|
|
|
109
|
-
if i < len(self.agents) - 1:
|
|
110
|
-
[Prompt.user(current_response.all_text())]
|
|
111
|
-
|
|
112
122
|
# For cumulative mode, return the properly formatted output with XML tags
|
|
113
123
|
response_text = "\n\n".join(final_results)
|
|
114
|
-
return
|
|
124
|
+
return PromptMessageExtended(
|
|
115
125
|
role="assistant",
|
|
116
126
|
content=[TextContent(type="text", text=response_text)],
|
|
117
127
|
)
|
|
118
128
|
|
|
119
|
-
async def
|
|
129
|
+
async def structured_impl(
|
|
120
130
|
self,
|
|
121
|
-
|
|
131
|
+
messages: List[PromptMessageExtended],
|
|
122
132
|
model: Type[ModelT],
|
|
123
133
|
request_params: Optional[RequestParams] = None,
|
|
124
|
-
) -> Tuple[ModelT | None,
|
|
134
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
125
135
|
"""
|
|
126
136
|
Chain the request through multiple agents and parse the final response.
|
|
127
137
|
|
|
@@ -134,12 +144,12 @@ class ChainAgent(BaseAgent):
|
|
|
134
144
|
The parsed response from the final agent, or None if parsing fails
|
|
135
145
|
"""
|
|
136
146
|
# Generate response through the chain
|
|
137
|
-
response = await self.generate(
|
|
147
|
+
response = await self.generate(messages, request_params)
|
|
138
148
|
last_agent = self.agents[-1]
|
|
139
149
|
try:
|
|
140
150
|
return await last_agent.structured([response], model, request_params)
|
|
141
151
|
except Exception as e:
|
|
142
|
-
|
|
152
|
+
logger.warning(f"Failed to parse response from chain: {str(e)}")
|
|
143
153
|
return None, Prompt.assistant("Failed to parse response from chain: {str(e)}")
|
|
144
154
|
|
|
145
155
|
async def initialize(self) -> None:
|
|
@@ -164,4 +174,4 @@ class ChainAgent(BaseAgent):
|
|
|
164
174
|
try:
|
|
165
175
|
await agent.shutdown()
|
|
166
176
|
except Exception as e:
|
|
167
|
-
|
|
177
|
+
logger.warning(f"Error shutting down agent in chain: {str(e)}")
|
|
@@ -10,17 +10,16 @@ or a maximum number of refinements is attempted.
|
|
|
10
10
|
from enum import Enum
|
|
11
11
|
from typing import Any, List, Optional, Tuple, Type
|
|
12
12
|
|
|
13
|
+
from mcp import Tool
|
|
13
14
|
from pydantic import BaseModel, Field
|
|
14
15
|
|
|
15
|
-
from
|
|
16
|
-
from
|
|
17
|
-
from
|
|
18
|
-
from
|
|
19
|
-
from
|
|
20
|
-
from
|
|
21
|
-
from
|
|
22
|
-
from mcp_agent.mcp.interfaces import ModelT
|
|
23
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
|
16
|
+
from fast_agent.agents.agent_types import AgentConfig, AgentType
|
|
17
|
+
from fast_agent.agents.llm_agent import LlmAgent
|
|
18
|
+
from fast_agent.core.exceptions import AgentConfigError
|
|
19
|
+
from fast_agent.core.logging.logger import get_logger
|
|
20
|
+
from fast_agent.core.prompt import Prompt
|
|
21
|
+
from fast_agent.interfaces import AgentProtocol, ModelT
|
|
22
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
24
23
|
|
|
25
24
|
logger = get_logger(__name__)
|
|
26
25
|
|
|
@@ -54,7 +53,7 @@ class EvaluationResult(BaseModel):
|
|
|
54
53
|
)
|
|
55
54
|
|
|
56
55
|
|
|
57
|
-
class EvaluatorOptimizerAgent(
|
|
56
|
+
class EvaluatorOptimizerAgent(LlmAgent):
|
|
58
57
|
"""
|
|
59
58
|
An agent that implements the evaluator-optimizer workflow pattern.
|
|
60
59
|
|
|
@@ -70,9 +69,9 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
70
69
|
|
|
71
70
|
def __init__(
|
|
72
71
|
self,
|
|
73
|
-
config:
|
|
74
|
-
generator_agent:
|
|
75
|
-
evaluator_agent:
|
|
72
|
+
config: AgentConfig,
|
|
73
|
+
generator_agent: AgentProtocol,
|
|
74
|
+
evaluator_agent: AgentProtocol,
|
|
76
75
|
min_rating: QualityRating = QualityRating.GOOD,
|
|
77
76
|
max_refinements: int = 3,
|
|
78
77
|
context: Optional[Any] = None,
|
|
@@ -83,8 +82,8 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
83
82
|
|
|
84
83
|
Args:
|
|
85
84
|
config: Agent configuration or name
|
|
86
|
-
generator_agent:
|
|
87
|
-
evaluator_agent:
|
|
85
|
+
generator_agent: LlmAgent that generates the initial and refined responses
|
|
86
|
+
evaluator_agent: LlmAgent that evaluates responses and provides feedback
|
|
88
87
|
min_rating: Minimum acceptable quality rating to stop refinement
|
|
89
88
|
max_refinements: Maximum number of refinement cycles to attempt
|
|
90
89
|
context: Optional context object
|
|
@@ -104,16 +103,17 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
104
103
|
self.max_refinements = max_refinements
|
|
105
104
|
self.refinement_history = []
|
|
106
105
|
|
|
107
|
-
async def
|
|
106
|
+
async def generate_impl(
|
|
108
107
|
self,
|
|
109
|
-
|
|
110
|
-
request_params:
|
|
111
|
-
|
|
108
|
+
messages: List[PromptMessageExtended],
|
|
109
|
+
request_params: RequestParams | None = None,
|
|
110
|
+
tools: List[Tool] | None = None,
|
|
111
|
+
) -> PromptMessageExtended:
|
|
112
112
|
"""
|
|
113
113
|
Generate a response through evaluation-guided refinement.
|
|
114
114
|
|
|
115
115
|
Args:
|
|
116
|
-
|
|
116
|
+
normalized_messages: Already normalized list of PromptMessageExtended
|
|
117
117
|
request_params: Optional request parameters
|
|
118
118
|
|
|
119
119
|
Returns:
|
|
@@ -126,10 +126,10 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
126
126
|
self.refinement_history = []
|
|
127
127
|
|
|
128
128
|
# Extract the user request
|
|
129
|
-
request =
|
|
129
|
+
request = messages[-1].all_text() if messages else ""
|
|
130
130
|
|
|
131
131
|
# Initial generation
|
|
132
|
-
response = await self.generator_agent.generate(
|
|
132
|
+
response = await self.generator_agent.generate(messages, request_params)
|
|
133
133
|
best_response = response
|
|
134
134
|
|
|
135
135
|
# Refinement loop
|
|
@@ -138,7 +138,7 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
138
138
|
|
|
139
139
|
# Evaluate current response
|
|
140
140
|
eval_prompt = self._build_eval_prompt(
|
|
141
|
-
request=request, response=response.last_text(), iteration=refinement_count
|
|
141
|
+
request=request, response=response.last_text() or "", iteration=refinement_count
|
|
142
142
|
)
|
|
143
143
|
|
|
144
144
|
# Create evaluation message and get structured evaluation result
|
|
@@ -190,8 +190,6 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
190
190
|
|
|
191
191
|
# Generate refined response
|
|
192
192
|
refinement_prompt = self._build_refinement_prompt(
|
|
193
|
-
request=request,
|
|
194
|
-
response=response.last_text(), ## only if there is no history?
|
|
195
193
|
feedback=evaluation_result,
|
|
196
194
|
iteration=refinement_count,
|
|
197
195
|
)
|
|
@@ -204,17 +202,17 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
204
202
|
|
|
205
203
|
return best_response
|
|
206
204
|
|
|
207
|
-
async def
|
|
205
|
+
async def structured_impl(
|
|
208
206
|
self,
|
|
209
|
-
|
|
207
|
+
messages: List[PromptMessageExtended],
|
|
210
208
|
model: Type[ModelT],
|
|
211
|
-
request_params:
|
|
212
|
-
) -> Tuple[ModelT | None,
|
|
209
|
+
request_params: RequestParams | None = None,
|
|
210
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
213
211
|
"""
|
|
214
212
|
Generate an optimized response and parse it into a structured format.
|
|
215
213
|
|
|
216
214
|
Args:
|
|
217
|
-
|
|
215
|
+
messages: List of messages to process
|
|
218
216
|
model: Pydantic model to parse the response into
|
|
219
217
|
request_params: Optional request parameters
|
|
220
218
|
|
|
@@ -222,7 +220,7 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
222
220
|
The parsed response, or None if parsing fails
|
|
223
221
|
"""
|
|
224
222
|
# Generate optimized response
|
|
225
|
-
response = await self.
|
|
223
|
+
response = await self.generate_impl(messages, request_params)
|
|
226
224
|
|
|
227
225
|
# Delegate structured parsing to the generator agent
|
|
228
226
|
structured_prompt = Prompt.user(response.all_text())
|
|
@@ -233,10 +231,10 @@ class EvaluatorOptimizerAgent(BaseAgent):
|
|
|
233
231
|
await super().initialize()
|
|
234
232
|
|
|
235
233
|
# Initialize generator and evaluator agents if not already initialized
|
|
236
|
-
if not
|
|
234
|
+
if not self.generator_agent.initialized:
|
|
237
235
|
await self.generator_agent.initialize()
|
|
238
236
|
|
|
239
|
-
if not
|
|
237
|
+
if not self.evaluator_agent.initialized:
|
|
240
238
|
await self.evaluator_agent.initialize()
|
|
241
239
|
|
|
242
240
|
self.initialized = True
|
|
@@ -290,8 +288,6 @@ Evaluate the response for iteration {iteration + 1} and provide feedback on its
|
|
|
290
288
|
|
|
291
289
|
def _build_refinement_prompt(
|
|
292
290
|
self,
|
|
293
|
-
request: str,
|
|
294
|
-
response: str,
|
|
295
291
|
feedback: EvaluationResult,
|
|
296
292
|
iteration: int,
|
|
297
293
|
) -> str:
|