fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,561 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Any, Union
|
|
3
|
+
|
|
4
|
+
from mcp.types import (
|
|
5
|
+
CallToolResult,
|
|
6
|
+
EmbeddedResource,
|
|
7
|
+
ImageContent,
|
|
8
|
+
PromptMessage,
|
|
9
|
+
TextContent,
|
|
10
|
+
)
|
|
11
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
12
|
+
|
|
13
|
+
from fast_agent.core.logging.logger import get_logger
|
|
14
|
+
from fast_agent.mcp.helpers.content_helpers import (
|
|
15
|
+
get_image_data,
|
|
16
|
+
get_resource_uri,
|
|
17
|
+
get_text,
|
|
18
|
+
is_image_content,
|
|
19
|
+
is_resource_content,
|
|
20
|
+
is_resource_link,
|
|
21
|
+
is_text_content,
|
|
22
|
+
)
|
|
23
|
+
from fast_agent.mcp.mime_utils import (
|
|
24
|
+
guess_mime_type,
|
|
25
|
+
is_image_mime_type,
|
|
26
|
+
is_text_mime_type,
|
|
27
|
+
)
|
|
28
|
+
from fast_agent.types import PromptMessageExtended
|
|
29
|
+
|
|
30
|
+
_logger = get_logger("multipart_converter_openai")
|
|
31
|
+
|
|
32
|
+
# Define type aliases for content blocks
|
|
33
|
+
ContentBlock = dict[str, Any]
|
|
34
|
+
OpenAIMessage = dict[str, Any]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class OpenAIConverter:
|
|
38
|
+
"""Converts MCP message types to OpenAI API format."""
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def _is_supported_image_type(mime_type: str) -> bool:
|
|
42
|
+
"""
|
|
43
|
+
Check if the given MIME type is supported by OpenAI's image API.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
mime_type: The MIME type to check
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
True if the MIME type is generally supported, False otherwise
|
|
50
|
+
"""
|
|
51
|
+
return (
|
|
52
|
+
mime_type is not None and is_image_mime_type(mime_type) and mime_type != "image/svg+xml"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
@staticmethod
|
|
56
|
+
def convert_to_openai(
|
|
57
|
+
multipart_msg: PromptMessageExtended, concatenate_text_blocks: bool = False
|
|
58
|
+
) -> list[dict[str, Any]]:
|
|
59
|
+
"""
|
|
60
|
+
Convert a PromptMessageExtended message to OpenAI API format.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
multipart_msg: The PromptMessageExtended message to convert
|
|
64
|
+
concatenate_text_blocks: If True, adjacent text blocks will be combined
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
A list of OpenAI API message objects
|
|
68
|
+
"""
|
|
69
|
+
# If this is an assistant message that contains tool_calls, convert to an
|
|
70
|
+
# assistant message with tool_calls per OpenAI format to establish the
|
|
71
|
+
# required call IDs before tool responses appear.
|
|
72
|
+
if multipart_msg.role == "assistant" and multipart_msg.tool_calls:
|
|
73
|
+
tool_calls_list: list[dict[str, Any]] = []
|
|
74
|
+
for tool_id, req in multipart_msg.tool_calls.items():
|
|
75
|
+
name = None
|
|
76
|
+
arguments = {}
|
|
77
|
+
try:
|
|
78
|
+
params = getattr(req, "params", None)
|
|
79
|
+
if params is not None:
|
|
80
|
+
name = getattr(params, "name", None)
|
|
81
|
+
arguments = getattr(params, "arguments", {}) or {}
|
|
82
|
+
except Exception:
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
tool_calls_list.append(
|
|
86
|
+
{
|
|
87
|
+
"id": tool_id,
|
|
88
|
+
"type": "function",
|
|
89
|
+
"function": {
|
|
90
|
+
"name": name or "unknown_tool",
|
|
91
|
+
"arguments": json.dumps(arguments),
|
|
92
|
+
},
|
|
93
|
+
}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
return [{"role": "assistant", "tool_calls": tool_calls_list, "content": ""}]
|
|
97
|
+
|
|
98
|
+
# Handle tool_results first if present
|
|
99
|
+
if multipart_msg.tool_results:
|
|
100
|
+
messages = OpenAIConverter.convert_function_results_to_openai(
|
|
101
|
+
multipart_msg.tool_results, concatenate_text_blocks
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# If there's also content, convert and append it
|
|
105
|
+
if multipart_msg.content:
|
|
106
|
+
role = multipart_msg.role
|
|
107
|
+
content_msg = OpenAIConverter._convert_content_to_message(
|
|
108
|
+
multipart_msg.content, role, concatenate_text_blocks
|
|
109
|
+
)
|
|
110
|
+
if content_msg: # Only append if non-empty
|
|
111
|
+
messages.append(content_msg)
|
|
112
|
+
|
|
113
|
+
return messages
|
|
114
|
+
|
|
115
|
+
# Regular content conversion (no tool_results)
|
|
116
|
+
role = multipart_msg.role
|
|
117
|
+
content_msg = OpenAIConverter._convert_content_to_message(
|
|
118
|
+
multipart_msg.content, role, concatenate_text_blocks
|
|
119
|
+
)
|
|
120
|
+
return [content_msg] if content_msg else []
|
|
121
|
+
|
|
122
|
+
@staticmethod
|
|
123
|
+
def _convert_content_to_message(
|
|
124
|
+
content: list, role: str, concatenate_text_blocks: bool = False
|
|
125
|
+
) -> dict[str, Any] | None:
|
|
126
|
+
"""
|
|
127
|
+
Convert content blocks to a single OpenAI message.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
content: List of content blocks
|
|
131
|
+
role: The message role
|
|
132
|
+
concatenate_text_blocks: If True, adjacent text blocks will be combined
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
An OpenAI message dict or None if content is empty
|
|
136
|
+
"""
|
|
137
|
+
# Handle empty content
|
|
138
|
+
if not content:
|
|
139
|
+
return {"role": role, "content": ""}
|
|
140
|
+
|
|
141
|
+
# single text block
|
|
142
|
+
if 1 == len(content) and is_text_content(content[0]):
|
|
143
|
+
return {"role": role, "content": get_text(content[0])}
|
|
144
|
+
|
|
145
|
+
# For user messages, convert each content block
|
|
146
|
+
content_blocks: list[ContentBlock] = []
|
|
147
|
+
|
|
148
|
+
_logger.debug(f"Converting {len(content)} content items for role '{role}'")
|
|
149
|
+
|
|
150
|
+
for item in content:
|
|
151
|
+
try:
|
|
152
|
+
if is_text_content(item):
|
|
153
|
+
text = get_text(item)
|
|
154
|
+
content_blocks.append({"type": "text", "text": text})
|
|
155
|
+
|
|
156
|
+
elif is_image_content(item):
|
|
157
|
+
image_block = OpenAIConverter._convert_image_content(item)
|
|
158
|
+
content_blocks.append(image_block)
|
|
159
|
+
_logger.debug(
|
|
160
|
+
f"Added image content block: {image_block.get('type', 'unknown')}"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
elif is_resource_content(item):
|
|
164
|
+
block = OpenAIConverter._convert_embedded_resource(item)
|
|
165
|
+
if block:
|
|
166
|
+
content_blocks.append(block)
|
|
167
|
+
|
|
168
|
+
elif is_resource_link(item):
|
|
169
|
+
text = get_text(item)
|
|
170
|
+
if text:
|
|
171
|
+
content_blocks.append({"type": "text", "text": text})
|
|
172
|
+
|
|
173
|
+
else:
|
|
174
|
+
_logger.warning(f"Unsupported content type: {type(item)}")
|
|
175
|
+
# Create a text block with information about the skipped content
|
|
176
|
+
fallback_text = f"[Unsupported content type: {type(item).__name__}]"
|
|
177
|
+
content_blocks.append({"type": "text", "text": fallback_text})
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
_logger.warning(f"Error converting content item: {e}")
|
|
181
|
+
# Create a text block with information about the conversion error
|
|
182
|
+
fallback_text = f"[Content conversion error: {str(e)}]"
|
|
183
|
+
content_blocks.append({"type": "text", "text": fallback_text})
|
|
184
|
+
|
|
185
|
+
if not content_blocks:
|
|
186
|
+
return {"role": role, "content": ""}
|
|
187
|
+
|
|
188
|
+
# If concatenate_text_blocks is True, combine adjacent text blocks
|
|
189
|
+
if concatenate_text_blocks:
|
|
190
|
+
content_blocks = OpenAIConverter._concatenate_text_blocks(content_blocks)
|
|
191
|
+
|
|
192
|
+
# Return user message with content blocks
|
|
193
|
+
result = {"role": role, "content": content_blocks}
|
|
194
|
+
_logger.debug(f"Final message for role '{role}': {len(content_blocks)} content blocks")
|
|
195
|
+
return result
|
|
196
|
+
|
|
197
|
+
@staticmethod
|
|
198
|
+
def _concatenate_text_blocks(blocks: list[ContentBlock]) -> list[ContentBlock]:
|
|
199
|
+
"""
|
|
200
|
+
Combine adjacent text blocks into single blocks.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
blocks: List of content blocks
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
List with adjacent text blocks combined
|
|
207
|
+
"""
|
|
208
|
+
if not blocks:
|
|
209
|
+
return []
|
|
210
|
+
|
|
211
|
+
combined_blocks: list[ContentBlock] = []
|
|
212
|
+
current_text = ""
|
|
213
|
+
|
|
214
|
+
for block in blocks:
|
|
215
|
+
if block["type"] == "text":
|
|
216
|
+
# Add to current text accumulator
|
|
217
|
+
if current_text:
|
|
218
|
+
current_text += " " + block["text"]
|
|
219
|
+
else:
|
|
220
|
+
current_text = block["text"]
|
|
221
|
+
else:
|
|
222
|
+
# Non-text block found, flush accumulated text if any
|
|
223
|
+
if current_text:
|
|
224
|
+
combined_blocks.append({"type": "text", "text": current_text})
|
|
225
|
+
current_text = ""
|
|
226
|
+
# Add the non-text block
|
|
227
|
+
combined_blocks.append(block)
|
|
228
|
+
|
|
229
|
+
# Don't forget any remaining text
|
|
230
|
+
if current_text:
|
|
231
|
+
combined_blocks.append({"type": "text", "text": current_text})
|
|
232
|
+
|
|
233
|
+
return combined_blocks
|
|
234
|
+
|
|
235
|
+
@staticmethod
|
|
236
|
+
def convert_prompt_message_to_openai(
|
|
237
|
+
message: PromptMessage, concatenate_text_blocks: bool = False
|
|
238
|
+
) -> ChatCompletionMessageParam:
|
|
239
|
+
"""
|
|
240
|
+
Convert a standard PromptMessage to OpenAI API format.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
message: The PromptMessage to convert
|
|
244
|
+
concatenate_text_blocks: If True, adjacent text blocks will be combined
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
An OpenAI API message object
|
|
248
|
+
"""
|
|
249
|
+
# Convert the PromptMessage to a PromptMessageExtended containing a single content item
|
|
250
|
+
multipart = PromptMessageExtended(role=message.role, content=[message.content])
|
|
251
|
+
|
|
252
|
+
# Use the existing conversion method with the specified concatenation option
|
|
253
|
+
# Since convert_to_openai now returns a list, we return the first element
|
|
254
|
+
messages = OpenAIConverter.convert_to_openai(multipart, concatenate_text_blocks)
|
|
255
|
+
return messages[0] if messages else {"role": message.role, "content": ""}
|
|
256
|
+
|
|
257
|
+
@staticmethod
|
|
258
|
+
def _convert_image_content(content: ImageContent) -> ContentBlock:
|
|
259
|
+
"""Convert ImageContent to OpenAI image_url content block."""
|
|
260
|
+
# Get image data using helper
|
|
261
|
+
image_data = get_image_data(content)
|
|
262
|
+
|
|
263
|
+
# OpenAI requires image URLs or data URIs for images
|
|
264
|
+
image_url = {"url": f"data:{content.mimeType};base64,{image_data}"}
|
|
265
|
+
|
|
266
|
+
# Check if the image has annotations for detail level
|
|
267
|
+
if hasattr(content, "annotations") and content.annotations:
|
|
268
|
+
if hasattr(content.annotations, "detail"):
|
|
269
|
+
detail = content.annotations.detail
|
|
270
|
+
if detail in ("auto", "low", "high"):
|
|
271
|
+
image_url["detail"] = detail
|
|
272
|
+
|
|
273
|
+
return {"type": "image_url", "image_url": image_url}
|
|
274
|
+
|
|
275
|
+
@staticmethod
|
|
276
|
+
def _determine_mime_type(resource_content) -> str:
|
|
277
|
+
"""
|
|
278
|
+
Determine the MIME type of a resource.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
resource_content: The resource content to check
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
The determined MIME type as a string
|
|
285
|
+
"""
|
|
286
|
+
if hasattr(resource_content, "mimeType") and resource_content.mimeType:
|
|
287
|
+
return resource_content.mimeType
|
|
288
|
+
|
|
289
|
+
if hasattr(resource_content, "uri") and resource_content.uri:
|
|
290
|
+
mime_type = guess_mime_type(str(resource_content.uri))
|
|
291
|
+
return mime_type
|
|
292
|
+
|
|
293
|
+
if hasattr(resource_content, "blob"):
|
|
294
|
+
return "application/octet-stream"
|
|
295
|
+
|
|
296
|
+
return "text/plain"
|
|
297
|
+
|
|
298
|
+
@staticmethod
|
|
299
|
+
def _convert_embedded_resource(
|
|
300
|
+
resource: EmbeddedResource,
|
|
301
|
+
) -> ContentBlock | None:
|
|
302
|
+
"""
|
|
303
|
+
Convert EmbeddedResource to appropriate OpenAI content block.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
resource: The embedded resource to convert
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
An appropriate OpenAI content block or None if conversion failed
|
|
310
|
+
"""
|
|
311
|
+
resource_content = resource.resource
|
|
312
|
+
uri_str = get_resource_uri(resource)
|
|
313
|
+
uri = getattr(resource_content, "uri", None)
|
|
314
|
+
is_url = uri and str(uri).startswith(("http://", "https://"))
|
|
315
|
+
from fast_agent.mcp.resource_utils import extract_title_from_uri
|
|
316
|
+
|
|
317
|
+
title = extract_title_from_uri(uri) if uri else "resource"
|
|
318
|
+
mime_type = OpenAIConverter._determine_mime_type(resource_content)
|
|
319
|
+
|
|
320
|
+
# Handle different resource types based on MIME type
|
|
321
|
+
|
|
322
|
+
# Handle images
|
|
323
|
+
if OpenAIConverter._is_supported_image_type(mime_type):
|
|
324
|
+
if is_url and uri_str:
|
|
325
|
+
return {"type": "image_url", "image_url": {"url": uri_str}}
|
|
326
|
+
|
|
327
|
+
# Try to get image data
|
|
328
|
+
image_data = get_image_data(resource)
|
|
329
|
+
if image_data:
|
|
330
|
+
return {
|
|
331
|
+
"type": "image_url",
|
|
332
|
+
"image_url": {"url": f"data:{mime_type};base64,{image_data}"},
|
|
333
|
+
}
|
|
334
|
+
else:
|
|
335
|
+
return {"type": "text", "text": f"[Image missing data: {title}]"}
|
|
336
|
+
|
|
337
|
+
# Handle PDFs
|
|
338
|
+
elif mime_type == "application/pdf":
|
|
339
|
+
if is_url and uri_str:
|
|
340
|
+
# OpenAI doesn't directly support PDF URLs, explain this limitation
|
|
341
|
+
return {
|
|
342
|
+
"type": "text",
|
|
343
|
+
"text": f"[PDF URL: {uri_str}]\nOpenAI requires PDF files to be uploaded or provided as base64 data.",
|
|
344
|
+
}
|
|
345
|
+
elif hasattr(resource_content, "blob"):
|
|
346
|
+
return {
|
|
347
|
+
"type": "file",
|
|
348
|
+
"file": {
|
|
349
|
+
"filename": title or "document.pdf",
|
|
350
|
+
"file_data": f"data:application/pdf;base64,{resource_content.blob}",
|
|
351
|
+
},
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
# Handle SVG (convert to text)
|
|
355
|
+
elif mime_type == "image/svg+xml":
|
|
356
|
+
text = get_text(resource)
|
|
357
|
+
if text:
|
|
358
|
+
file_text = (
|
|
359
|
+
f'<fastagent:file title="{title}" mimetype="{mime_type}">\n'
|
|
360
|
+
f"{text}\n"
|
|
361
|
+
f"</fastagent:file>"
|
|
362
|
+
)
|
|
363
|
+
return {"type": "text", "text": file_text}
|
|
364
|
+
|
|
365
|
+
# Handle text files
|
|
366
|
+
elif is_text_mime_type(mime_type):
|
|
367
|
+
text = get_text(resource)
|
|
368
|
+
if text:
|
|
369
|
+
file_text = (
|
|
370
|
+
f'<fastagent:file title="{title}" mimetype="{mime_type}">\n'
|
|
371
|
+
f"{text}\n"
|
|
372
|
+
f"</fastagent:file>"
|
|
373
|
+
)
|
|
374
|
+
return {"type": "text", "text": file_text}
|
|
375
|
+
|
|
376
|
+
# Default fallback for text resources
|
|
377
|
+
text = get_text(resource)
|
|
378
|
+
if text:
|
|
379
|
+
return {"type": "text", "text": text}
|
|
380
|
+
|
|
381
|
+
# Default fallback for binary resources
|
|
382
|
+
elif hasattr(resource_content, "blob"):
|
|
383
|
+
return {
|
|
384
|
+
"type": "text",
|
|
385
|
+
"text": f"[Binary resource: {title} ({mime_type})]",
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
# Last resort fallback
|
|
389
|
+
return {
|
|
390
|
+
"type": "text",
|
|
391
|
+
"text": f"[Unsupported resource: {title} ({mime_type})]",
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
@staticmethod
|
|
395
|
+
def _extract_text_from_content_blocks(
|
|
396
|
+
content: Union[str, list[ContentBlock]],
|
|
397
|
+
) -> str:
|
|
398
|
+
"""
|
|
399
|
+
Extract and combine text from content blocks.
|
|
400
|
+
|
|
401
|
+
Args:
|
|
402
|
+
content: Content blocks or string
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
Combined text as a string
|
|
406
|
+
"""
|
|
407
|
+
if isinstance(content, str):
|
|
408
|
+
return content
|
|
409
|
+
|
|
410
|
+
if not content:
|
|
411
|
+
return ""
|
|
412
|
+
|
|
413
|
+
# Extract only text blocks
|
|
414
|
+
text_parts = []
|
|
415
|
+
for block in content:
|
|
416
|
+
if block.get("type") == "text":
|
|
417
|
+
text_parts.append(block.get("text", ""))
|
|
418
|
+
|
|
419
|
+
return " ".join(text_parts) if text_parts else "[Complex content converted to text]"
|
|
420
|
+
|
|
421
|
+
@staticmethod
|
|
422
|
+
def convert_tool_result_to_openai(
|
|
423
|
+
tool_result: CallToolResult,
|
|
424
|
+
tool_call_id: str,
|
|
425
|
+
concatenate_text_blocks: bool = False,
|
|
426
|
+
) -> Union[dict[str, Any], tuple[dict[str, Any], list[dict[str, Any]]]]:
|
|
427
|
+
"""
|
|
428
|
+
Convert a CallToolResult to an OpenAI tool message.
|
|
429
|
+
|
|
430
|
+
If the result contains non-text elements, those are converted to separate user messages
|
|
431
|
+
since OpenAI tool messages can only contain text.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
tool_result: The tool result from a tool call
|
|
435
|
+
tool_call_id: The ID of the associated tool use
|
|
436
|
+
concatenate_text_blocks: If True, adjacent text blocks will be combined
|
|
437
|
+
|
|
438
|
+
Returns:
|
|
439
|
+
Either a single OpenAI message for the tool response (if text only),
|
|
440
|
+
or a tuple containing the tool message and a list of additional messages for non-text content
|
|
441
|
+
"""
|
|
442
|
+
# Handle empty content case
|
|
443
|
+
if not tool_result.content:
|
|
444
|
+
return {
|
|
445
|
+
"role": "tool",
|
|
446
|
+
"tool_call_id": tool_call_id,
|
|
447
|
+
"content": "[Tool completed successfully]",
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
# Separate text and non-text content
|
|
451
|
+
text_content = []
|
|
452
|
+
non_text_content = []
|
|
453
|
+
|
|
454
|
+
for item in tool_result.content:
|
|
455
|
+
if isinstance(item, TextContent):
|
|
456
|
+
text_content.append(item)
|
|
457
|
+
else:
|
|
458
|
+
non_text_content.append(item)
|
|
459
|
+
|
|
460
|
+
# Create tool message with text content
|
|
461
|
+
tool_message_content = ""
|
|
462
|
+
if text_content:
|
|
463
|
+
# Convert text content to OpenAI format
|
|
464
|
+
temp_multipart = PromptMessageExtended(role="user", content=text_content)
|
|
465
|
+
converted_messages = OpenAIConverter.convert_to_openai(
|
|
466
|
+
temp_multipart, concatenate_text_blocks=concatenate_text_blocks
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
# Extract text from content blocks (convert_to_openai now returns a list)
|
|
470
|
+
if converted_messages:
|
|
471
|
+
tool_message_content = OpenAIConverter._extract_text_from_content_blocks(
|
|
472
|
+
converted_messages[0].get("content", "")
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# Ensure we always have non-empty content for compatibility
|
|
476
|
+
if not tool_message_content or tool_message_content.strip() == "":
|
|
477
|
+
tool_message_content = "[Tool completed successfully]"
|
|
478
|
+
|
|
479
|
+
# Create the tool message with just the text
|
|
480
|
+
tool_message = {
|
|
481
|
+
"role": "tool",
|
|
482
|
+
"tool_call_id": tool_call_id,
|
|
483
|
+
"content": tool_message_content,
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
# If there's no non-text content, return just the tool message
|
|
487
|
+
if not non_text_content:
|
|
488
|
+
return tool_message
|
|
489
|
+
|
|
490
|
+
# Process non-text content as a separate user message
|
|
491
|
+
non_text_multipart = PromptMessageExtended(role="user", content=non_text_content)
|
|
492
|
+
|
|
493
|
+
# Convert to OpenAI format (returns a list now)
|
|
494
|
+
user_messages = OpenAIConverter.convert_to_openai(non_text_multipart)
|
|
495
|
+
|
|
496
|
+
# Debug logging to understand what's happening with image conversion
|
|
497
|
+
_logger.debug(
|
|
498
|
+
f"Tool result conversion: non_text_content={len(non_text_content)} items, "
|
|
499
|
+
f"user_messages={len(user_messages)} messages"
|
|
500
|
+
)
|
|
501
|
+
if not user_messages:
|
|
502
|
+
_logger.warning(
|
|
503
|
+
f"No user messages generated for non-text content: {[type(item).__name__ for item in non_text_content]}"
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
return (tool_message, user_messages)
|
|
507
|
+
|
|
508
|
+
@staticmethod
|
|
509
|
+
def convert_function_results_to_openai(
|
|
510
|
+
results: dict[str, CallToolResult],
|
|
511
|
+
concatenate_text_blocks: bool = False,
|
|
512
|
+
) -> list[dict[str, Any]]:
|
|
513
|
+
"""
|
|
514
|
+
Convert function call results to OpenAI messages.
|
|
515
|
+
|
|
516
|
+
Args:
|
|
517
|
+
results: Dictionary mapping tool_call_id to CallToolResult
|
|
518
|
+
concatenate_text_blocks: If True, adjacent text blocks will be combined
|
|
519
|
+
|
|
520
|
+
Returns:
|
|
521
|
+
List of OpenAI API messages for tool responses
|
|
522
|
+
"""
|
|
523
|
+
tool_messages = []
|
|
524
|
+
user_messages = []
|
|
525
|
+
has_mixed_content = False
|
|
526
|
+
|
|
527
|
+
for tool_call_id, result in results.items():
|
|
528
|
+
try:
|
|
529
|
+
converted = OpenAIConverter.convert_tool_result_to_openai(
|
|
530
|
+
tool_result=result,
|
|
531
|
+
tool_call_id=tool_call_id,
|
|
532
|
+
concatenate_text_blocks=concatenate_text_blocks,
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
# Handle the case where we have mixed content and get back a tuple
|
|
536
|
+
if isinstance(converted, tuple):
|
|
537
|
+
tool_message, additional_messages = converted
|
|
538
|
+
tool_messages.append(tool_message)
|
|
539
|
+
user_messages.extend(additional_messages)
|
|
540
|
+
has_mixed_content = True
|
|
541
|
+
else:
|
|
542
|
+
# Single message case (text-only)
|
|
543
|
+
tool_messages.append(converted)
|
|
544
|
+
except Exception as e:
|
|
545
|
+
_logger.error(f"Failed to convert tool_call_id={tool_call_id}: {e}")
|
|
546
|
+
# Create a basic tool response to prevent missing tool_call_id error
|
|
547
|
+
fallback_message = {
|
|
548
|
+
"role": "tool",
|
|
549
|
+
"tool_call_id": tool_call_id,
|
|
550
|
+
"content": f"[Conversion error: {str(e)}]",
|
|
551
|
+
}
|
|
552
|
+
tool_messages.append(fallback_message)
|
|
553
|
+
|
|
554
|
+
# CONDITIONAL REORDERING: Only reorder if there are user messages (mixed content)
|
|
555
|
+
if has_mixed_content and user_messages:
|
|
556
|
+
# Reorder: All tool messages first (OpenAI sequence), then user messages (vision context)
|
|
557
|
+
messages = tool_messages + user_messages
|
|
558
|
+
else:
|
|
559
|
+
# Pure tool responses - keep original order to preserve context (snapshots, etc.)
|
|
560
|
+
messages = tool_messages
|
|
561
|
+
return messages
|