fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +13 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +17 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +43 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
- fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
- fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
- mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
{mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py
RENAMED
|
@@ -10,6 +10,7 @@ from anthropic.types import (
|
|
|
10
10
|
PlainTextSourceParam,
|
|
11
11
|
TextBlockParam,
|
|
12
12
|
ToolResultBlockParam,
|
|
13
|
+
ToolUseBlockParam,
|
|
13
14
|
URLImageSourceParam,
|
|
14
15
|
URLPDFSourceParam,
|
|
15
16
|
)
|
|
@@ -24,8 +25,8 @@ from mcp.types import (
|
|
|
24
25
|
TextResourceContents,
|
|
25
26
|
)
|
|
26
27
|
|
|
27
|
-
from
|
|
28
|
-
from
|
|
28
|
+
from fast_agent.core.logging.logger import get_logger
|
|
29
|
+
from fast_agent.mcp.helpers.content_helpers import (
|
|
29
30
|
get_image_data,
|
|
30
31
|
get_resource_uri,
|
|
31
32
|
get_text,
|
|
@@ -33,13 +34,12 @@ from mcp_agent.mcp.helpers.content_helpers import (
|
|
|
33
34
|
is_resource_content,
|
|
34
35
|
is_text_content,
|
|
35
36
|
)
|
|
36
|
-
from
|
|
37
|
+
from fast_agent.mcp.mime_utils import (
|
|
37
38
|
guess_mime_type,
|
|
38
39
|
is_image_mime_type,
|
|
39
40
|
is_text_mime_type,
|
|
40
41
|
)
|
|
41
|
-
from
|
|
42
|
-
from mcp_agent.mcp.resource_utils import extract_title_from_uri
|
|
42
|
+
from fast_agent.types import PromptMessageExtended
|
|
43
43
|
|
|
44
44
|
_logger = get_logger("multipart_converter_anthropic")
|
|
45
45
|
|
|
@@ -63,41 +63,81 @@ class AnthropicConverter:
|
|
|
63
63
|
return mime_type in SUPPORTED_IMAGE_MIME_TYPES
|
|
64
64
|
|
|
65
65
|
@staticmethod
|
|
66
|
-
def convert_to_anthropic(multipart_msg:
|
|
66
|
+
def convert_to_anthropic(multipart_msg: PromptMessageExtended) -> MessageParam:
|
|
67
67
|
"""
|
|
68
|
-
Convert a
|
|
68
|
+
Convert a PromptMessageExtended message to Anthropic API format.
|
|
69
69
|
|
|
70
70
|
Args:
|
|
71
|
-
multipart_msg: The
|
|
71
|
+
multipart_msg: The PromptMessageExtended message to convert
|
|
72
72
|
|
|
73
73
|
Returns:
|
|
74
74
|
An Anthropic API MessageParam object
|
|
75
75
|
"""
|
|
76
76
|
role = multipart_msg.role
|
|
77
|
+
all_content_blocks = []
|
|
78
|
+
|
|
79
|
+
# If this is an assistant message that contains tool_calls, convert
|
|
80
|
+
# those into Anthropic tool_use blocks so the next user message can
|
|
81
|
+
# legally include corresponding tool_result blocks.
|
|
82
|
+
if role == "assistant" and multipart_msg.tool_calls:
|
|
83
|
+
for tool_use_id, req in multipart_msg.tool_calls.items():
|
|
84
|
+
name = None
|
|
85
|
+
args = None
|
|
86
|
+
try:
|
|
87
|
+
params = getattr(req, "params", None)
|
|
88
|
+
if params is not None:
|
|
89
|
+
name = getattr(params, "name", None)
|
|
90
|
+
args = getattr(params, "arguments", None)
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
all_content_blocks.append(
|
|
95
|
+
ToolUseBlockParam(
|
|
96
|
+
type="tool_use",
|
|
97
|
+
id=tool_use_id,
|
|
98
|
+
name=name or "unknown_tool",
|
|
99
|
+
input=args or {},
|
|
100
|
+
)
|
|
101
|
+
)
|
|
77
102
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
103
|
+
return MessageParam(role=role, content=all_content_blocks)
|
|
104
|
+
|
|
105
|
+
# Handle tool_results if present (for user messages with tool results)
|
|
106
|
+
# Tool results must come FIRST in the content array per Anthropic API requirements
|
|
107
|
+
if multipart_msg.tool_results:
|
|
108
|
+
# Convert dict to list of tuples for create_tool_results_message
|
|
109
|
+
tool_results_list = list(multipart_msg.tool_results.items())
|
|
110
|
+
tool_msg = AnthropicConverter.create_tool_results_message(tool_results_list)
|
|
111
|
+
# Extract the content blocks from the tool results message
|
|
112
|
+
all_content_blocks.extend(tool_msg["content"])
|
|
113
|
+
|
|
114
|
+
# Then handle regular content blocks if present
|
|
115
|
+
if multipart_msg.content:
|
|
116
|
+
# Convert content blocks
|
|
117
|
+
anthropic_blocks = AnthropicConverter._convert_content_items(
|
|
118
|
+
multipart_msg.content, document_mode=True
|
|
119
|
+
)
|
|
81
120
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
121
|
+
# Filter blocks based on role (assistant can only have text blocks)
|
|
122
|
+
if role == "assistant":
|
|
123
|
+
text_blocks = []
|
|
124
|
+
for block in anthropic_blocks:
|
|
125
|
+
if block.get("type") == "text":
|
|
126
|
+
text_blocks.append(block)
|
|
127
|
+
else:
|
|
128
|
+
_logger.warning(
|
|
129
|
+
f"Removing non-text block from assistant message: {block.get('type')}"
|
|
130
|
+
)
|
|
131
|
+
anthropic_blocks = text_blocks
|
|
86
132
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
text_blocks.append(block)
|
|
93
|
-
else:
|
|
94
|
-
_logger.warning(
|
|
95
|
-
f"Removing non-text block from assistant message: {block.get('type')}"
|
|
96
|
-
)
|
|
97
|
-
anthropic_blocks = text_blocks
|
|
133
|
+
all_content_blocks.extend(anthropic_blocks)
|
|
134
|
+
|
|
135
|
+
# Handle empty content case
|
|
136
|
+
if not all_content_blocks:
|
|
137
|
+
return MessageParam(role=role, content=[])
|
|
98
138
|
|
|
99
139
|
# Create the Anthropic message
|
|
100
|
-
return MessageParam(role=role, content=
|
|
140
|
+
return MessageParam(role=role, content=all_content_blocks)
|
|
101
141
|
|
|
102
142
|
@staticmethod
|
|
103
143
|
def convert_prompt_message_to_anthropic(message: PromptMessage) -> MessageParam:
|
|
@@ -110,8 +150,8 @@ class AnthropicConverter:
|
|
|
110
150
|
Returns:
|
|
111
151
|
An Anthropic API MessageParam object
|
|
112
152
|
"""
|
|
113
|
-
# Convert the PromptMessage to a
|
|
114
|
-
multipart =
|
|
153
|
+
# Convert the PromptMessage to a PromptMessageExtended containing a single content item
|
|
154
|
+
multipart = PromptMessageExtended(role=message.role, content=[message.content])
|
|
115
155
|
|
|
116
156
|
# Use the existing conversion method
|
|
117
157
|
return AnthropicConverter.convert_to_anthropic(multipart)
|
|
@@ -195,6 +235,8 @@ class AnthropicConverter:
|
|
|
195
235
|
mime_type = AnthropicConverter._determine_mime_type(resource_content)
|
|
196
236
|
|
|
197
237
|
# Extract title from URI
|
|
238
|
+
from fast_agent.mcp.resource_utils import extract_title_from_uri
|
|
239
|
+
|
|
198
240
|
title = extract_title_from_uri(uri) if uri else "resource"
|
|
199
241
|
|
|
200
242
|
# Convert based on MIME type
|
|
@@ -345,47 +387,6 @@ class AnthropicConverter:
|
|
|
345
387
|
|
|
346
388
|
return TextBlockParam(type="text", text=f"[{message}]")
|
|
347
389
|
|
|
348
|
-
@staticmethod
|
|
349
|
-
def convert_tool_result_to_anthropic(
|
|
350
|
-
tool_result: CallToolResult, tool_use_id: str
|
|
351
|
-
) -> ToolResultBlockParam:
|
|
352
|
-
"""
|
|
353
|
-
Convert an MCP CallToolResult to an Anthropic ToolResultBlockParam.
|
|
354
|
-
|
|
355
|
-
Args:
|
|
356
|
-
tool_result: The tool result from a tool call
|
|
357
|
-
tool_use_id: The ID of the associated tool use
|
|
358
|
-
|
|
359
|
-
Returns:
|
|
360
|
-
An Anthropic ToolResultBlockParam ready to be included in a user message
|
|
361
|
-
"""
|
|
362
|
-
# For tool results, always use document_mode=False to get text blocks instead of document blocks
|
|
363
|
-
anthropic_content = []
|
|
364
|
-
|
|
365
|
-
for item in tool_result.content:
|
|
366
|
-
if isinstance(item, EmbeddedResource):
|
|
367
|
-
# For embedded resources, always use text mode in tool results
|
|
368
|
-
resource_block = AnthropicConverter._convert_embedded_resource(
|
|
369
|
-
item, document_mode=False
|
|
370
|
-
)
|
|
371
|
-
anthropic_content.append(resource_block)
|
|
372
|
-
elif isinstance(item, (TextContent, ImageContent)):
|
|
373
|
-
# For text and image, use standard conversion
|
|
374
|
-
blocks = AnthropicConverter._convert_content_items([item], document_mode=False)
|
|
375
|
-
anthropic_content.extend(blocks)
|
|
376
|
-
|
|
377
|
-
# If we ended up with no valid content blocks, create a placeholder
|
|
378
|
-
if not anthropic_content:
|
|
379
|
-
anthropic_content = [TextBlockParam(type="text", text="[No content in tool result]")]
|
|
380
|
-
|
|
381
|
-
# Create the tool result block
|
|
382
|
-
return ToolResultBlockParam(
|
|
383
|
-
type="tool_result",
|
|
384
|
-
tool_use_id=tool_use_id,
|
|
385
|
-
content=anthropic_content,
|
|
386
|
-
is_error=tool_result.isError,
|
|
387
|
-
)
|
|
388
|
-
|
|
389
390
|
@staticmethod
|
|
390
391
|
def create_tool_results_message(
|
|
391
392
|
tool_results: List[tuple[str, CallToolResult]],
|
|
@@ -404,7 +405,6 @@ class AnthropicConverter:
|
|
|
404
405
|
for tool_use_id, result in tool_results:
|
|
405
406
|
# Process each tool result
|
|
406
407
|
tool_result_blocks = []
|
|
407
|
-
separate_blocks = []
|
|
408
408
|
|
|
409
409
|
# Process each content item in the result
|
|
410
410
|
for item in result.content:
|
|
@@ -413,19 +413,13 @@ class AnthropicConverter:
|
|
|
413
413
|
tool_result_blocks.extend(blocks)
|
|
414
414
|
elif isinstance(item, EmbeddedResource):
|
|
415
415
|
resource_content = item.resource
|
|
416
|
-
|
|
417
|
-
#
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
else:
|
|
424
|
-
# For binary resources like PDFs, add as separate block
|
|
425
|
-
block = AnthropicConverter._convert_embedded_resource(
|
|
426
|
-
item, document_mode=True
|
|
427
|
-
)
|
|
428
|
-
separate_blocks.append(block)
|
|
416
|
+
document_mode: bool = not isinstance(resource_content, TextResourceContents)
|
|
417
|
+
# With Anthropic SDK 0.66, documents can be inside tool results
|
|
418
|
+
# Text resources remain inline within the tool_result
|
|
419
|
+
block = AnthropicConverter._convert_embedded_resource(
|
|
420
|
+
item, document_mode=document_mode
|
|
421
|
+
)
|
|
422
|
+
tool_result_blocks.append(block)
|
|
429
423
|
|
|
430
424
|
# Create the tool result block if we have content
|
|
431
425
|
if tool_result_blocks:
|
|
@@ -448,7 +442,6 @@ class AnthropicConverter:
|
|
|
448
442
|
)
|
|
449
443
|
)
|
|
450
444
|
|
|
451
|
-
#
|
|
452
|
-
content_blocks.extend(separate_blocks)
|
|
445
|
+
# All content is now included within the tool_result block.
|
|
453
446
|
|
|
454
447
|
return MessageParam(role="user", content=content_blocks)
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Collection, Dict, List, Literal, Optional, Set, TypedDict, cast
|
|
4
|
+
|
|
5
|
+
# Lightweight, runtime-only loader for AWS Bedrock models.
|
|
6
|
+
# - Fetches once per process via boto3 (region from session; env override supported)
|
|
7
|
+
# - Memory cache only; no disk persistence
|
|
8
|
+
# - Provides filtering and optional prefixing (default 'bedrock.') for model IDs
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import boto3
|
|
12
|
+
except Exception: # pragma: no cover - import error path
|
|
13
|
+
boto3 = None # type: ignore[assignment]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
Modality = Literal["TEXT", "IMAGE", "VIDEO", "SPEECH", "EMBEDDING"]
|
|
17
|
+
Lifecycle = Literal["ACTIVE", "LEGACY"]
|
|
18
|
+
InferenceType = Literal["ON_DEMAND", "PROVISIONED", "INFERENCE_PROFILE"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ModelSummary(TypedDict, total=False):
|
|
22
|
+
modelId: str
|
|
23
|
+
modelName: str
|
|
24
|
+
providerName: str
|
|
25
|
+
inputModalities: List[Modality]
|
|
26
|
+
outputModalities: List[Modality]
|
|
27
|
+
responseStreamingSupported: bool
|
|
28
|
+
customizationsSupported: List[str]
|
|
29
|
+
inferenceTypesSupported: List[InferenceType]
|
|
30
|
+
modelLifecycle: Dict[str, Lifecycle]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
_MODELS_CACHE_BY_REGION: Dict[str, Dict[str, ModelSummary]] = {}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _resolve_region(region: Optional[str]) -> str:
|
|
37
|
+
if region:
|
|
38
|
+
return region
|
|
39
|
+
import os
|
|
40
|
+
|
|
41
|
+
env_region = os.getenv("BEDROCK_REGION")
|
|
42
|
+
if env_region:
|
|
43
|
+
return env_region
|
|
44
|
+
if boto3 is None:
|
|
45
|
+
raise RuntimeError(
|
|
46
|
+
"boto3 is required to load Bedrock models. Install boto3 or provide a static list."
|
|
47
|
+
)
|
|
48
|
+
session = boto3.Session()
|
|
49
|
+
if not session.region_name:
|
|
50
|
+
raise RuntimeError(
|
|
51
|
+
"AWS region could not be resolved. Configure your AWS SSO/profile or set BEDROCK_REGION."
|
|
52
|
+
)
|
|
53
|
+
return session.region_name
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _strip_prefix(model_id: str, prefix: str) -> str:
|
|
57
|
+
return model_id[len(prefix) :] if prefix and model_id.startswith(prefix) else model_id
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _ensure_loaded(region: Optional[str] = None) -> Dict[str, ModelSummary]:
|
|
61
|
+
resolved_region = _resolve_region(region)
|
|
62
|
+
cache = _MODELS_CACHE_BY_REGION.get(resolved_region)
|
|
63
|
+
if cache is not None:
|
|
64
|
+
return cache
|
|
65
|
+
|
|
66
|
+
if boto3 is None:
|
|
67
|
+
raise RuntimeError("boto3 is required to load Bedrock models. Install boto3.")
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
client = boto3.client("bedrock", region_name=resolved_region)
|
|
71
|
+
resp = client.list_foundation_models()
|
|
72
|
+
summaries: List[ModelSummary] = resp.get("modelSummaries", []) # type: ignore[assignment]
|
|
73
|
+
except Exception as exc: # keep error simple and actionable
|
|
74
|
+
raise RuntimeError(
|
|
75
|
+
f"Failed to list Bedrock foundation models in region '{resolved_region}'. "
|
|
76
|
+
f"Ensure AWS credentials (SSO) and permissions (bedrock:ListFoundationModels) are configured. "
|
|
77
|
+
f"Original error: {exc}"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
cache = {s.get("modelId", ""): s for s in summaries if s.get("modelId")}
|
|
81
|
+
_MODELS_CACHE_BY_REGION[resolved_region] = cache
|
|
82
|
+
return cache
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def refresh_bedrock_models(region: Optional[str] = None) -> None:
|
|
86
|
+
resolved_region = _resolve_region(region)
|
|
87
|
+
# drop and reload on next access
|
|
88
|
+
_MODELS_CACHE_BY_REGION.pop(resolved_region, None)
|
|
89
|
+
_ensure_loaded(resolved_region)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _matches_modalities(model_modalities: List[Modality], requested: Collection[Modality]) -> bool:
|
|
93
|
+
# include if all requested are present in the model's modalities
|
|
94
|
+
return set(requested).issubset(set(model_modalities))
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def all_model_summaries(
|
|
98
|
+
input_modalities: Optional[Collection[Modality]] = None,
|
|
99
|
+
output_modalities: Optional[Collection[Modality]] = None,
|
|
100
|
+
include_legacy: bool = False,
|
|
101
|
+
providers: Optional[Collection[str]] = None,
|
|
102
|
+
inference_types: Optional[Collection[InferenceType]] = None,
|
|
103
|
+
direct_invocation_only: bool = True,
|
|
104
|
+
region: Optional[str] = None,
|
|
105
|
+
) -> List[ModelSummary]:
|
|
106
|
+
"""Return filtered Bedrock model summaries.
|
|
107
|
+
|
|
108
|
+
Defaults: input_modalities={"TEXT"}, output_modalities={"TEXT"}, include_legacy=False,
|
|
109
|
+
inference_types={"ON_DEMAND"}, direct_invocation_only=True.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
cache = _ensure_loaded(region)
|
|
113
|
+
results: List[ModelSummary] = []
|
|
114
|
+
|
|
115
|
+
effective_output: Set[Modality] = (
|
|
116
|
+
set(output_modalities) if output_modalities is not None else {cast("Modality", "TEXT")}
|
|
117
|
+
)
|
|
118
|
+
effective_input: Optional[Set[Modality]] = (
|
|
119
|
+
set(input_modalities) if input_modalities is not None else {cast("Modality", "TEXT")}
|
|
120
|
+
)
|
|
121
|
+
provider_filter: Optional[Set[str]] = set(providers) if providers is not None else None
|
|
122
|
+
effective_inference: Set[InferenceType] = (
|
|
123
|
+
set(inference_types)
|
|
124
|
+
if inference_types is not None
|
|
125
|
+
else {cast("InferenceType", "ON_DEMAND")}
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
for summary in cache.values():
|
|
129
|
+
lifecycle = (summary.get("modelLifecycle") or {}).get("status")
|
|
130
|
+
if not include_legacy and lifecycle == "LEGACY":
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
if provider_filter is not None and summary.get("providerName") not in provider_filter:
|
|
134
|
+
continue
|
|
135
|
+
|
|
136
|
+
# direct invocation only: exclude profile variants like :0:24k or :mm
|
|
137
|
+
if direct_invocation_only:
|
|
138
|
+
mid = summary.get("modelId") or ""
|
|
139
|
+
if mid.count(":") > 1:
|
|
140
|
+
continue
|
|
141
|
+
|
|
142
|
+
# modalities
|
|
143
|
+
model_inputs: List[Modality] = summary.get("inputModalities", []) # type: ignore[assignment]
|
|
144
|
+
model_outputs: List[Modality] = summary.get("outputModalities", []) # type: ignore[assignment]
|
|
145
|
+
|
|
146
|
+
if effective_input is not None and not _matches_modalities(model_inputs, effective_input):
|
|
147
|
+
continue
|
|
148
|
+
if effective_output and not _matches_modalities(model_outputs, effective_output):
|
|
149
|
+
continue
|
|
150
|
+
|
|
151
|
+
# inference types
|
|
152
|
+
model_inference: List[InferenceType] = summary.get("inferenceTypesSupported", []) # type: ignore[assignment]
|
|
153
|
+
if effective_inference and not set(effective_inference).issubset(set(model_inference)):
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
results.append(summary)
|
|
157
|
+
|
|
158
|
+
return results
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def all_bedrock_models(
|
|
162
|
+
input_modalities: Optional[Collection[Modality]] = None,
|
|
163
|
+
output_modalities: Optional[Collection[Modality]] = None,
|
|
164
|
+
include_legacy: bool = False,
|
|
165
|
+
providers: Optional[Collection[str]] = None,
|
|
166
|
+
prefix: str = "bedrock.",
|
|
167
|
+
inference_types: Optional[Collection[InferenceType]] = None,
|
|
168
|
+
direct_invocation_only: bool = True,
|
|
169
|
+
region: Optional[str] = None,
|
|
170
|
+
) -> List[str]:
|
|
171
|
+
"""Return model IDs (optionally prefixed) filtered by the given criteria.
|
|
172
|
+
|
|
173
|
+
Defaults: output_modalities={"TEXT"}, exclude LEGACY,
|
|
174
|
+
inference_types={"ON_DEMAND"}, direct_invocation_only=True.
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
summaries = all_model_summaries(
|
|
178
|
+
input_modalities=input_modalities,
|
|
179
|
+
output_modalities=output_modalities,
|
|
180
|
+
include_legacy=include_legacy,
|
|
181
|
+
providers=providers,
|
|
182
|
+
inference_types=inference_types,
|
|
183
|
+
direct_invocation_only=direct_invocation_only,
|
|
184
|
+
region=region,
|
|
185
|
+
)
|
|
186
|
+
ids: List[str] = []
|
|
187
|
+
for s in summaries:
|
|
188
|
+
mid = s.get("modelId")
|
|
189
|
+
if mid:
|
|
190
|
+
ids.append(mid)
|
|
191
|
+
if prefix:
|
|
192
|
+
return [f"{prefix}{mid}" for mid in ids]
|
|
193
|
+
return ids
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def get_model_metadata(model_id: str, region: Optional[str] = None) -> Optional[ModelSummary]:
|
|
197
|
+
cache = _ensure_loaded(region)
|
|
198
|
+
# Accept either prefixed or plain model IDs
|
|
199
|
+
plain_id = _strip_prefix(model_id, "bedrock.")
|
|
200
|
+
return cache.get(plain_id)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def list_providers(region: Optional[str] = None) -> List[str]:
|
|
204
|
+
cache = _ensure_loaded(region)
|
|
205
|
+
providers = {s.get("providerName") for s in cache.values() if s.get("providerName")}
|
|
206
|
+
return sorted(providers) # type: ignore[arg-type]
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
__all__ = [
|
|
210
|
+
"Modality",
|
|
211
|
+
"Lifecycle",
|
|
212
|
+
"ModelSummary",
|
|
213
|
+
"all_bedrock_models",
|
|
214
|
+
"all_model_summaries",
|
|
215
|
+
"get_model_metadata",
|
|
216
|
+
"list_providers",
|
|
217
|
+
"refresh_bedrock_models",
|
|
218
|
+
]
|