fast-agent-mcp 0.2.58__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +10 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +17 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +127 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- {mcp_agent/llm/providers → fast_agent/llm/provider/bedrock}/bedrock_utils.py +3 -1
- mcp_agent/llm/providers/augmented_llm_bedrock.py → fast_agent/llm/provider/bedrock/llm_bedrock.py +833 -717
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -207
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +43 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +17 -12
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
- fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.58.dist-info/RECORD +0 -193
- fast_agent_mcp-0.2.58.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -718
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -496
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,232 +0,0 @@
|
|
|
1
|
-
import json # Import at the module level
|
|
2
|
-
from typing import Any, List, Optional, Union
|
|
3
|
-
|
|
4
|
-
from mcp.types import PromptMessage
|
|
5
|
-
|
|
6
|
-
from mcp_agent.core.prompt import Prompt
|
|
7
|
-
from mcp_agent.llm.augmented_llm import (
|
|
8
|
-
AugmentedLLM,
|
|
9
|
-
MessageParamT,
|
|
10
|
-
RequestParams,
|
|
11
|
-
)
|
|
12
|
-
from mcp_agent.llm.provider_types import Provider
|
|
13
|
-
from mcp_agent.llm.usage_tracking import create_turn_usage_from_messages
|
|
14
|
-
from mcp_agent.logging.logger import get_logger
|
|
15
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
|
16
|
-
|
|
17
|
-
CALL_TOOL_INDICATOR = "***CALL_TOOL"
|
|
18
|
-
FIXED_RESPONSE_INDICATOR = "***FIXED_RESPONSE"
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class PassthroughLLM(AugmentedLLM):
|
|
22
|
-
"""
|
|
23
|
-
A specialized LLM implementation that simply passes through input messages without modification.
|
|
24
|
-
|
|
25
|
-
This is useful for cases where you need an object with the AugmentedLLM interface
|
|
26
|
-
but want to preserve the original message without any processing, such as in a
|
|
27
|
-
parallel workflow where no fan-in aggregation is needed.
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
def __init__(
|
|
31
|
-
self, provider=Provider.FAST_AGENT, name: str = "Passthrough", **kwargs: dict[str, Any]
|
|
32
|
-
) -> None:
|
|
33
|
-
super().__init__(name=name, provider=provider, **kwargs)
|
|
34
|
-
self.logger = get_logger(__name__)
|
|
35
|
-
self._messages = [PromptMessage]
|
|
36
|
-
self._fixed_response: str | None = None
|
|
37
|
-
|
|
38
|
-
async def generate_str(
|
|
39
|
-
self,
|
|
40
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
|
41
|
-
request_params: Optional[RequestParams] = None,
|
|
42
|
-
) -> str:
|
|
43
|
-
"""Return the input message as a string."""
|
|
44
|
-
# Check if this is a special command to call a tool
|
|
45
|
-
if isinstance(message, str) and message.startswith("***CALL_TOOL "):
|
|
46
|
-
return await self._call_tool_and_return_result(message)
|
|
47
|
-
|
|
48
|
-
self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
|
|
49
|
-
await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
|
|
50
|
-
|
|
51
|
-
# Handle PromptMessage by concatenating all parts
|
|
52
|
-
result = ""
|
|
53
|
-
if isinstance(message, PromptMessage):
|
|
54
|
-
parts_text = []
|
|
55
|
-
for part in message.content:
|
|
56
|
-
parts_text.append(str(part))
|
|
57
|
-
result = "\n".join(parts_text)
|
|
58
|
-
else:
|
|
59
|
-
result = str(message)
|
|
60
|
-
|
|
61
|
-
# Track usage for this passthrough "turn"
|
|
62
|
-
try:
|
|
63
|
-
input_content = str(message)
|
|
64
|
-
output_content = result
|
|
65
|
-
tool_calls = 1 if input_content.startswith("***CALL_TOOL") else 0
|
|
66
|
-
|
|
67
|
-
turn_usage = create_turn_usage_from_messages(
|
|
68
|
-
input_content=input_content,
|
|
69
|
-
output_content=output_content,
|
|
70
|
-
model="passthrough",
|
|
71
|
-
model_type="passthrough",
|
|
72
|
-
tool_calls=tool_calls,
|
|
73
|
-
delay_seconds=0.0,
|
|
74
|
-
)
|
|
75
|
-
self.usage_accumulator.add_turn(turn_usage)
|
|
76
|
-
except Exception as e:
|
|
77
|
-
self.logger.warning(f"Failed to track usage: {e}")
|
|
78
|
-
|
|
79
|
-
return result
|
|
80
|
-
|
|
81
|
-
async def initialize(self) -> None:
|
|
82
|
-
pass
|
|
83
|
-
|
|
84
|
-
async def _call_tool_and_return_result(self, command: str) -> str:
|
|
85
|
-
"""
|
|
86
|
-
Call a tool based on the command and return its result as a string.
|
|
87
|
-
|
|
88
|
-
Args:
|
|
89
|
-
command: The command string, expected format: "***CALL_TOOL <server>-<tool_name> [arguments_json]"
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
Tool result as a string
|
|
93
|
-
"""
|
|
94
|
-
try:
|
|
95
|
-
tool_name, arguments = self._parse_tool_command(command)
|
|
96
|
-
result = await self.aggregator.call_tool(tool_name, arguments)
|
|
97
|
-
return self._format_tool_result(tool_name, result)
|
|
98
|
-
except Exception as e:
|
|
99
|
-
self.logger.error(f"Error calling tool: {str(e)}")
|
|
100
|
-
return f"Error calling tool: {str(e)}"
|
|
101
|
-
|
|
102
|
-
def _parse_tool_command(self, command: str) -> tuple[str, Optional[dict]]:
|
|
103
|
-
"""
|
|
104
|
-
Parse a tool command string into tool name and arguments.
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
command: The command string in format "***CALL_TOOL <tool_name> [arguments_json]"
|
|
108
|
-
|
|
109
|
-
Returns:
|
|
110
|
-
Tuple of (tool_name, arguments_dict)
|
|
111
|
-
|
|
112
|
-
Raises:
|
|
113
|
-
ValueError: If command format is invalid
|
|
114
|
-
"""
|
|
115
|
-
parts = command.split(" ", 2)
|
|
116
|
-
if len(parts) < 2:
|
|
117
|
-
raise ValueError("Invalid format. Expected '***CALL_TOOL <tool_name> [arguments_json]'")
|
|
118
|
-
|
|
119
|
-
tool_name = parts[1].strip()
|
|
120
|
-
arguments = None
|
|
121
|
-
|
|
122
|
-
if len(parts) > 2:
|
|
123
|
-
try:
|
|
124
|
-
arguments = json.loads(parts[2])
|
|
125
|
-
except json.JSONDecodeError:
|
|
126
|
-
raise ValueError(f"Invalid JSON arguments: {parts[2]}")
|
|
127
|
-
|
|
128
|
-
self.logger.info(f"Calling tool {tool_name} with arguments {arguments}")
|
|
129
|
-
return tool_name, arguments
|
|
130
|
-
|
|
131
|
-
def _format_tool_result(self, tool_name: str, result) -> str:
|
|
132
|
-
"""
|
|
133
|
-
Format tool execution result as a string.
|
|
134
|
-
|
|
135
|
-
Args:
|
|
136
|
-
tool_name: The name of the tool that was called
|
|
137
|
-
result: The result returned from the tool
|
|
138
|
-
|
|
139
|
-
Returns:
|
|
140
|
-
Formatted result as a string
|
|
141
|
-
"""
|
|
142
|
-
if result.isError:
|
|
143
|
-
error_text = []
|
|
144
|
-
for content_item in result.content:
|
|
145
|
-
if hasattr(content_item, "text"):
|
|
146
|
-
error_text.append(content_item.text)
|
|
147
|
-
else:
|
|
148
|
-
error_text.append(str(content_item))
|
|
149
|
-
error_message = "\n".join(error_text) if error_text else "Unknown error"
|
|
150
|
-
return f"Error calling tool '{tool_name}': {error_message}"
|
|
151
|
-
|
|
152
|
-
result_text = []
|
|
153
|
-
for content_item in result.content:
|
|
154
|
-
if hasattr(content_item, "text"):
|
|
155
|
-
result_text.append(content_item.text)
|
|
156
|
-
else:
|
|
157
|
-
result_text.append(str(content_item))
|
|
158
|
-
|
|
159
|
-
return "\n".join(result_text)
|
|
160
|
-
|
|
161
|
-
async def _apply_prompt_provider_specific(
|
|
162
|
-
self,
|
|
163
|
-
multipart_messages: List["PromptMessageMultipart"],
|
|
164
|
-
request_params: RequestParams | None = None,
|
|
165
|
-
is_template: bool = False,
|
|
166
|
-
) -> PromptMessageMultipart:
|
|
167
|
-
# Add messages to history with proper is_prompt flag
|
|
168
|
-
self.history.extend(multipart_messages, is_prompt=is_template)
|
|
169
|
-
|
|
170
|
-
last_message = multipart_messages[-1]
|
|
171
|
-
|
|
172
|
-
if self.is_tool_call(last_message):
|
|
173
|
-
result = Prompt.assistant(await self.generate_str(last_message.first_text()))
|
|
174
|
-
await self.show_assistant_message(result.first_text())
|
|
175
|
-
|
|
176
|
-
# Track usage for this tool call "turn"
|
|
177
|
-
try:
|
|
178
|
-
input_content = "\n".join(message.all_text() for message in multipart_messages)
|
|
179
|
-
output_content = result.first_text()
|
|
180
|
-
|
|
181
|
-
turn_usage = create_turn_usage_from_messages(
|
|
182
|
-
input_content=input_content,
|
|
183
|
-
output_content=output_content,
|
|
184
|
-
model="passthrough",
|
|
185
|
-
model_type="passthrough",
|
|
186
|
-
tool_calls=1, # This is definitely a tool call
|
|
187
|
-
delay_seconds=0.0,
|
|
188
|
-
)
|
|
189
|
-
self.usage_accumulator.add_turn(turn_usage)
|
|
190
|
-
|
|
191
|
-
except Exception as e:
|
|
192
|
-
self.logger.warning(f"Failed to track usage: {e}")
|
|
193
|
-
|
|
194
|
-
return result
|
|
195
|
-
|
|
196
|
-
if last_message.first_text().startswith(FIXED_RESPONSE_INDICATOR):
|
|
197
|
-
self._fixed_response = (
|
|
198
|
-
last_message.first_text().split(FIXED_RESPONSE_INDICATOR, 1)[1].strip()
|
|
199
|
-
)
|
|
200
|
-
|
|
201
|
-
if self._fixed_response:
|
|
202
|
-
await self.show_assistant_message(self._fixed_response)
|
|
203
|
-
result = Prompt.assistant(self._fixed_response)
|
|
204
|
-
else:
|
|
205
|
-
# TODO -- improve when we support Audio/Multimodal gen models e.g. gemini . This should really just return the input as "assistant"...
|
|
206
|
-
concatenated: str = "\n".join(message.all_text() for message in multipart_messages)
|
|
207
|
-
await self.show_assistant_message(concatenated)
|
|
208
|
-
result = Prompt.assistant(concatenated)
|
|
209
|
-
|
|
210
|
-
# Track usage for this passthrough "turn"
|
|
211
|
-
try:
|
|
212
|
-
input_content = "\n".join(message.all_text() for message in multipart_messages)
|
|
213
|
-
output_content = result.first_text()
|
|
214
|
-
tool_calls = 1 if self.is_tool_call(last_message) else 0
|
|
215
|
-
|
|
216
|
-
turn_usage = create_turn_usage_from_messages(
|
|
217
|
-
input_content=input_content,
|
|
218
|
-
output_content=output_content,
|
|
219
|
-
model="passthrough",
|
|
220
|
-
model_type="passthrough",
|
|
221
|
-
tool_calls=tool_calls,
|
|
222
|
-
delay_seconds=0.0,
|
|
223
|
-
)
|
|
224
|
-
self.usage_accumulator.add_turn(turn_usage)
|
|
225
|
-
|
|
226
|
-
except Exception as e:
|
|
227
|
-
self.logger.warning(f"Failed to track usage: {e}")
|
|
228
|
-
|
|
229
|
-
return result
|
|
230
|
-
|
|
231
|
-
def is_tool_call(self, message: PromptMessageMultipart) -> bool:
|
|
232
|
-
return message.first_text().startswith(CALL_TOOL_INDICATOR)
|
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
from typing import Any, List, Optional, Union
|
|
3
|
-
|
|
4
|
-
from mcp_agent.llm.augmented_llm import (
|
|
5
|
-
MessageParamT,
|
|
6
|
-
RequestParams,
|
|
7
|
-
)
|
|
8
|
-
from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
|
|
9
|
-
from mcp_agent.llm.provider_types import Provider
|
|
10
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class SlowLLM(PassthroughLLM):
|
|
14
|
-
"""
|
|
15
|
-
A specialized LLM implementation that sleeps for 3 seconds before responding like PassthroughLLM.
|
|
16
|
-
|
|
17
|
-
This is useful for testing scenarios where you want to simulate slow responses
|
|
18
|
-
or for debugging timing-related issues in parallel workflows.
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
def __init__(
|
|
22
|
-
self, provider=Provider.FAST_AGENT, name: str = "Slow", **kwargs: dict[str, Any]
|
|
23
|
-
) -> None:
|
|
24
|
-
super().__init__(name=name, provider=provider, **kwargs)
|
|
25
|
-
|
|
26
|
-
async def generate_str(
|
|
27
|
-
self,
|
|
28
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
|
29
|
-
request_params: Optional[RequestParams] = None,
|
|
30
|
-
) -> str:
|
|
31
|
-
"""Sleep for 3 seconds then return the input message as a string."""
|
|
32
|
-
await asyncio.sleep(3)
|
|
33
|
-
result = await super().generate_str(message, request_params)
|
|
34
|
-
|
|
35
|
-
# Override the last turn to include the 3-second delay
|
|
36
|
-
if self.usage_accumulator.turns:
|
|
37
|
-
last_turn = self.usage_accumulator.turns[-1]
|
|
38
|
-
# Update the raw usage to include delay
|
|
39
|
-
if hasattr(last_turn.raw_usage, 'delay_seconds'):
|
|
40
|
-
last_turn.raw_usage.delay_seconds = 3.0
|
|
41
|
-
# Print updated debug info
|
|
42
|
-
print("SlowLLM: Added 3.0s delay to turn usage")
|
|
43
|
-
|
|
44
|
-
return result
|
|
45
|
-
|
|
46
|
-
async def _apply_prompt_provider_specific(
|
|
47
|
-
self,
|
|
48
|
-
multipart_messages: List["PromptMessageMultipart"],
|
|
49
|
-
request_params: RequestParams | None = None,
|
|
50
|
-
) -> PromptMessageMultipart:
|
|
51
|
-
"""Sleep for 3 seconds then apply prompt like PassthroughLLM."""
|
|
52
|
-
await asyncio.sleep(3)
|
|
53
|
-
return await super()._apply_prompt_provider_specific(multipart_messages, request_params)
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
from mcp_agent.llm.providers.sampling_converter_anthropic import (
|
|
2
|
-
AnthropicSamplingConverter,
|
|
3
|
-
)
|
|
4
|
-
from mcp_agent.llm.providers.sampling_converter_openai import (
|
|
5
|
-
OpenAISamplingConverter,
|
|
6
|
-
)
|
|
7
|
-
|
|
8
|
-
__all__ = ["AnthropicSamplingConverter", "OpenAISamplingConverter"]
|