fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# openai_multipart.py
|
|
2
|
+
"""
|
|
3
|
+
Clean utilities for converting between PromptMessageExtended and OpenAI message formats.
|
|
4
|
+
Each function handles all content types consistently and is designed for simple testing.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Union
|
|
8
|
+
|
|
9
|
+
from mcp.types import (
|
|
10
|
+
BlobResourceContents,
|
|
11
|
+
EmbeddedResource,
|
|
12
|
+
ImageContent,
|
|
13
|
+
TextContent,
|
|
14
|
+
TextResourceContents,
|
|
15
|
+
)
|
|
16
|
+
from openai.types.chat import (
|
|
17
|
+
ChatCompletionMessage,
|
|
18
|
+
ChatCompletionMessageParam,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
from fast_agent.types import PromptMessageExtended
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def openai_to_extended(
|
|
25
|
+
message: Union[
|
|
26
|
+
ChatCompletionMessage,
|
|
27
|
+
ChatCompletionMessageParam,
|
|
28
|
+
list[Union[ChatCompletionMessage, ChatCompletionMessageParam]],
|
|
29
|
+
],
|
|
30
|
+
) -> Union[PromptMessageExtended, list[PromptMessageExtended]]:
|
|
31
|
+
"""
|
|
32
|
+
Convert OpenAI messages to PromptMessageExtended format.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
message: OpenAI Message, MessageParam, or list of them
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Equivalent message(s) in PromptMessageExtended format
|
|
39
|
+
"""
|
|
40
|
+
if isinstance(message, list):
|
|
41
|
+
return [_openai_message_to_extended(m) for m in message]
|
|
42
|
+
return _openai_message_to_extended(message)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _openai_message_to_extended(
|
|
46
|
+
message: Union[ChatCompletionMessage, dict[str, Any]],
|
|
47
|
+
) -> PromptMessageExtended:
|
|
48
|
+
"""Convert a single OpenAI message to PromptMessageExtended."""
|
|
49
|
+
# Get role and content from message
|
|
50
|
+
if isinstance(message, dict):
|
|
51
|
+
role = message.get("role", "assistant")
|
|
52
|
+
content = message.get("content", "")
|
|
53
|
+
else:
|
|
54
|
+
role = message.role
|
|
55
|
+
content = message.content
|
|
56
|
+
|
|
57
|
+
mcp_contents = []
|
|
58
|
+
|
|
59
|
+
# Handle string content (simple case)
|
|
60
|
+
if isinstance(content, str):
|
|
61
|
+
mcp_contents.append(TextContent(type="text", text=content))
|
|
62
|
+
|
|
63
|
+
# Handle list of content parts
|
|
64
|
+
elif isinstance(content, list):
|
|
65
|
+
for part in content:
|
|
66
|
+
part_type = part.get("type") if isinstance(part, dict) else getattr(part, "type", None)
|
|
67
|
+
|
|
68
|
+
# Handle text content
|
|
69
|
+
if part_type == "text":
|
|
70
|
+
text = part.get("text") if isinstance(part, dict) else getattr(part, "text", "")
|
|
71
|
+
|
|
72
|
+
# Check if this is a resource marker
|
|
73
|
+
if (
|
|
74
|
+
text
|
|
75
|
+
and (text.startswith("[Resource:") or text.startswith("[Binary Resource:"))
|
|
76
|
+
and "\n" in text
|
|
77
|
+
):
|
|
78
|
+
header, content_text = text.split("\n", 1)
|
|
79
|
+
if "MIME:" in header:
|
|
80
|
+
mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
|
|
81
|
+
|
|
82
|
+
# If not text/plain, create an embedded resource
|
|
83
|
+
if mime_match != "text/plain":
|
|
84
|
+
if "Resource:" in header and "Binary Resource:" not in header:
|
|
85
|
+
uri = header.split("Resource:", 1)[1].split(",")[0].strip()
|
|
86
|
+
mcp_contents.append(
|
|
87
|
+
EmbeddedResource(
|
|
88
|
+
type="resource",
|
|
89
|
+
resource=TextResourceContents(
|
|
90
|
+
uri=uri,
|
|
91
|
+
mimeType=mime_match,
|
|
92
|
+
text=content_text,
|
|
93
|
+
),
|
|
94
|
+
)
|
|
95
|
+
)
|
|
96
|
+
continue
|
|
97
|
+
|
|
98
|
+
# Regular text content
|
|
99
|
+
mcp_contents.append(TextContent(type="text", text=text))
|
|
100
|
+
|
|
101
|
+
# Handle image content
|
|
102
|
+
elif part_type == "image_url":
|
|
103
|
+
image_url = (
|
|
104
|
+
part.get("image_url", {})
|
|
105
|
+
if isinstance(part, dict)
|
|
106
|
+
else getattr(part, "image_url", None)
|
|
107
|
+
)
|
|
108
|
+
if image_url:
|
|
109
|
+
url = (
|
|
110
|
+
image_url.get("url")
|
|
111
|
+
if isinstance(image_url, dict)
|
|
112
|
+
else getattr(image_url, "url", "")
|
|
113
|
+
)
|
|
114
|
+
if url and url.startswith("data:image/"):
|
|
115
|
+
# Handle base64 data URLs
|
|
116
|
+
mime_type = url.split(";")[0].replace("data:", "")
|
|
117
|
+
data = url.split(",")[1]
|
|
118
|
+
mcp_contents.append(
|
|
119
|
+
ImageContent(type="image", data=data, mimeType=mime_type)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Handle explicit resource types
|
|
123
|
+
elif part_type == "resource" and isinstance(part, dict) and "resource" in part:
|
|
124
|
+
resource = part["resource"]
|
|
125
|
+
if isinstance(resource, dict):
|
|
126
|
+
# Text resource
|
|
127
|
+
if "text" in resource and "mimeType" in resource:
|
|
128
|
+
mime_type = resource["mimeType"]
|
|
129
|
+
uri = resource.get("uri", "resource://unknown")
|
|
130
|
+
|
|
131
|
+
if mime_type == "text/plain":
|
|
132
|
+
mcp_contents.append(TextContent(type="text", text=resource["text"]))
|
|
133
|
+
else:
|
|
134
|
+
mcp_contents.append(
|
|
135
|
+
EmbeddedResource(
|
|
136
|
+
type="resource",
|
|
137
|
+
resource=TextResourceContents(
|
|
138
|
+
text=resource["text"],
|
|
139
|
+
mimeType=mime_type,
|
|
140
|
+
uri=uri,
|
|
141
|
+
),
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
# Binary resource
|
|
145
|
+
elif "blob" in resource and "mimeType" in resource:
|
|
146
|
+
mime_type = resource["mimeType"]
|
|
147
|
+
uri = resource.get("uri", "resource://unknown")
|
|
148
|
+
|
|
149
|
+
if mime_type.startswith("image/") and mime_type != "image/svg+xml":
|
|
150
|
+
mcp_contents.append(
|
|
151
|
+
ImageContent(
|
|
152
|
+
type="image",
|
|
153
|
+
data=resource["blob"],
|
|
154
|
+
mimeType=mime_type,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
else:
|
|
158
|
+
mcp_contents.append(
|
|
159
|
+
EmbeddedResource(
|
|
160
|
+
type="resource",
|
|
161
|
+
resource=BlobResourceContents(
|
|
162
|
+
blob=resource["blob"],
|
|
163
|
+
mimeType=mime_type,
|
|
164
|
+
uri=uri,
|
|
165
|
+
),
|
|
166
|
+
)
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
return PromptMessageExtended(role=role, content=mcp_contents)
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for OpenAI integration with MCP.
|
|
3
|
+
|
|
4
|
+
This file provides backward compatibility with the existing API while
|
|
5
|
+
delegating to the proper implementations in the providers/ directory.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, Union
|
|
9
|
+
|
|
10
|
+
from openai.types.chat import (
|
|
11
|
+
ChatCompletionMessage,
|
|
12
|
+
ChatCompletionMessageParam,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from fast_agent.llm.provider.openai.multipart_converter_openai import OpenAIConverter
|
|
16
|
+
from fast_agent.llm.provider.openai.openai_multipart import (
|
|
17
|
+
openai_to_extended,
|
|
18
|
+
)
|
|
19
|
+
from fast_agent.types import PromptMessageExtended
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def openai_message_to_prompt_message_multipart(
|
|
23
|
+
message: Union[ChatCompletionMessage, dict[str, Any]],
|
|
24
|
+
) -> PromptMessageExtended:
|
|
25
|
+
"""
|
|
26
|
+
Convert an OpenAI ChatCompletionMessage to a PromptMessageExtended.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
message: The OpenAI message to convert (can be an actual ChatCompletionMessage
|
|
30
|
+
or a dictionary with the same structure)
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
A PromptMessageExtended representation
|
|
34
|
+
"""
|
|
35
|
+
return openai_to_extended(message)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def openai_message_param_to_prompt_message_multipart(
|
|
39
|
+
message_param: ChatCompletionMessageParam,
|
|
40
|
+
) -> PromptMessageExtended:
|
|
41
|
+
"""
|
|
42
|
+
Convert an OpenAI ChatCompletionMessageParam to a PromptMessageExtended.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
message_param: The OpenAI message param to convert
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
A PromptMessageExtended representation
|
|
49
|
+
"""
|
|
50
|
+
return openai_to_extended(message_param)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def prompt_message_multipart_to_openai_message_param(
|
|
54
|
+
multipart: PromptMessageExtended,
|
|
55
|
+
) -> ChatCompletionMessageParam:
|
|
56
|
+
"""
|
|
57
|
+
Convert a PromptMessageExtended to an OpenAI ChatCompletionMessageParam.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
multipart: The PromptMessageExtended to convert
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
An OpenAI ChatCompletionMessageParam representation
|
|
64
|
+
"""
|
|
65
|
+
# convert_to_openai now returns a list, return the first element for backward compatibility
|
|
66
|
+
messages = OpenAIConverter.convert_to_openai(multipart)
|
|
67
|
+
return messages[0] if messages else {"role": multipart.role, "content": ""}
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# from openai.types.beta.chat import
|
|
2
|
+
|
|
3
|
+
from mcp import Tool
|
|
4
|
+
from mcp.types import ContentBlock, TextContent
|
|
5
|
+
from openai import AsyncOpenAI
|
|
6
|
+
from openai.types.chat import (
|
|
7
|
+
ChatCompletionMessage,
|
|
8
|
+
ChatCompletionMessageParam,
|
|
9
|
+
)
|
|
10
|
+
from openai.types.responses import (
|
|
11
|
+
ResponseReasoningItem,
|
|
12
|
+
ResponseReasoningSummaryTextDeltaEvent,
|
|
13
|
+
ResponseTextDeltaEvent,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
from fast_agent.constants import REASONING
|
|
17
|
+
from fast_agent.core.logging.logger import get_logger
|
|
18
|
+
from fast_agent.event_progress import ProgressAction
|
|
19
|
+
from fast_agent.llm.fastagent_llm import FastAgentLLM
|
|
20
|
+
from fast_agent.llm.provider_types import Provider
|
|
21
|
+
from fast_agent.llm.request_params import RequestParams
|
|
22
|
+
from fast_agent.llm.stream_types import StreamChunk
|
|
23
|
+
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
|
|
24
|
+
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
25
|
+
|
|
26
|
+
_logger = get_logger(__name__)
|
|
27
|
+
|
|
28
|
+
DEFAULT_RESPONSES_MODEL = "gpt-5-mini"
|
|
29
|
+
DEFAULT_REASONING_EFFORT = "medium"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# model selection
|
|
33
|
+
# system prompt
|
|
34
|
+
# usage info
|
|
35
|
+
# reasoning/thinking display and summary
|
|
36
|
+
# encrypted tokens
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ResponsesLLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage]):
|
|
40
|
+
"""LLM implementation for OpenAI's Responses models."""
|
|
41
|
+
|
|
42
|
+
# OpenAI-specific parameter exclusions
|
|
43
|
+
|
|
44
|
+
def __init__(self, provider=Provider.RESPONSES, *args, **kwargs):
|
|
45
|
+
super().__init__(*args, provider=provider, **kwargs)
|
|
46
|
+
|
|
47
|
+
async def _responses_client(self) -> AsyncOpenAI:
|
|
48
|
+
return AsyncOpenAI(api_key=self._api_key())
|
|
49
|
+
|
|
50
|
+
async def _apply_prompt_provider_specific(
|
|
51
|
+
self,
|
|
52
|
+
multipart_messages: list[PromptMessageExtended],
|
|
53
|
+
request_params: RequestParams | None = None,
|
|
54
|
+
tools: list[Tool] | None = None,
|
|
55
|
+
is_template: bool = False,
|
|
56
|
+
) -> PromptMessageExtended:
|
|
57
|
+
responses_client = await self._responses_client()
|
|
58
|
+
|
|
59
|
+
async with responses_client.responses.stream(
|
|
60
|
+
model="gpt-5-mini",
|
|
61
|
+
instructions="You are a helpful assistant.",
|
|
62
|
+
input=multipart_messages[-1].all_text(),
|
|
63
|
+
reasoning={"summary": "auto", "effort": DEFAULT_REASONING_EFFORT},
|
|
64
|
+
) as stream:
|
|
65
|
+
reasoning_chars: int = 0
|
|
66
|
+
text_chars: int = 0
|
|
67
|
+
|
|
68
|
+
async for event in stream:
|
|
69
|
+
if isinstance(event, ResponseReasoningSummaryTextDeltaEvent):
|
|
70
|
+
reasoning_chars += len(event.delta)
|
|
71
|
+
await self._emit_streaming_progress(
|
|
72
|
+
model="gpt-5-mini (thinking)",
|
|
73
|
+
new_total=reasoning_chars,
|
|
74
|
+
type=ProgressAction.THINKING,
|
|
75
|
+
)
|
|
76
|
+
if isinstance(event, ResponseTextDeltaEvent):
|
|
77
|
+
# Notify stream listeners with the delta text
|
|
78
|
+
self._notify_stream_listeners(StreamChunk(text=event.delta, is_reasoning=False))
|
|
79
|
+
text_chars += len(event.delta)
|
|
80
|
+
await self._emit_streaming_progress(
|
|
81
|
+
model="gpt-5-mini",
|
|
82
|
+
new_total=text_chars,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
final_response = await stream.get_final_response()
|
|
86
|
+
reasoning_content: list[ContentBlock] = []
|
|
87
|
+
for output_item in final_response.output:
|
|
88
|
+
if isinstance(output_item, ResponseReasoningItem):
|
|
89
|
+
summary_text = "\n".join(part.text for part in output_item.summary if part.text)
|
|
90
|
+
# reasoning text is not supplied by openai - leaving for future use with other providers
|
|
91
|
+
reasoning_text = "".join(
|
|
92
|
+
chunk.text
|
|
93
|
+
for chunk in (output_item.content or [])
|
|
94
|
+
if chunk.type == "reasoning_text"
|
|
95
|
+
)
|
|
96
|
+
if summary_text.strip():
|
|
97
|
+
reasoning_content.append(TextContent(type="text", text=summary_text.strip()))
|
|
98
|
+
if reasoning_text.strip():
|
|
99
|
+
reasoning_content.append(
|
|
100
|
+
TextContent(type="text", text=reasoning_text.strip())
|
|
101
|
+
)
|
|
102
|
+
channels = {REASONING: reasoning_content} if reasoning_content else None
|
|
103
|
+
|
|
104
|
+
return PromptMessageExtended(
|
|
105
|
+
role="assistant",
|
|
106
|
+
channels=channels,
|
|
107
|
+
content=[TextContent(type="text", text=final_response.output_text)],
|
|
108
|
+
stop_reason=LlmStopReason.END_TURN,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
async def _emit_streaming_progress(
|
|
112
|
+
self,
|
|
113
|
+
model: str,
|
|
114
|
+
new_total: int,
|
|
115
|
+
type: ProgressAction = ProgressAction.STREAMING,
|
|
116
|
+
) -> None:
|
|
117
|
+
"""Emit a streaming progress event.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
model: The model being used.
|
|
121
|
+
new_total: The new total token count.
|
|
122
|
+
"""
|
|
123
|
+
token_str = str(new_total).rjust(5)
|
|
124
|
+
|
|
125
|
+
# Emit progress event
|
|
126
|
+
data = {
|
|
127
|
+
"progress_action": type,
|
|
128
|
+
"model": model,
|
|
129
|
+
"agent_name": self.name,
|
|
130
|
+
"chat_turn": self.chat_turn(),
|
|
131
|
+
"details": token_str.strip(), # Token count goes in details for STREAMING action
|
|
132
|
+
}
|
|
133
|
+
self.logger.info("Streaming progress", data=data)
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provider API key management for various LLM providers.
|
|
3
|
+
Centralizes API key handling logic to make provider implementations more generic.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from fast_agent.core.exceptions import ProviderKeyError
|
|
12
|
+
|
|
13
|
+
PROVIDER_ENVIRONMENT_MAP: dict[str, str] = {
|
|
14
|
+
# default behaviour in _get_env_key_name is to capitalize the
|
|
15
|
+
# provider name and suffix "_API_KEY" - so no specific mapping needed unless overriding
|
|
16
|
+
"hf": "HF_TOKEN",
|
|
17
|
+
"responses": "OPENAI_API_KEY", # Temporary workaround
|
|
18
|
+
}
|
|
19
|
+
PROVIDER_CONFIG_KEY_ALIASES: dict[str, tuple[str, ...]] = {
|
|
20
|
+
# HuggingFace historically used "huggingface" (full name) in config files,
|
|
21
|
+
# while the provider id is "hf". Support both spellings.
|
|
22
|
+
"hf": ("hf", "huggingface"),
|
|
23
|
+
"huggingface": ("huggingface", "hf"),
|
|
24
|
+
}
|
|
25
|
+
API_KEY_HINT_TEXT = "<your-api-key-here>"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ProviderKeyManager:
|
|
29
|
+
"""
|
|
30
|
+
Manages API keys for different providers centrally.
|
|
31
|
+
This class abstracts away the provider-specific key access logic,
|
|
32
|
+
making the provider implementations more generic.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def get_env_var(provider_name: str) -> str | None:
|
|
37
|
+
return os.getenv(ProviderKeyManager.get_env_key_name(provider_name))
|
|
38
|
+
|
|
39
|
+
@staticmethod
|
|
40
|
+
def get_env_key_name(provider_name: str) -> str:
|
|
41
|
+
return PROVIDER_ENVIRONMENT_MAP.get(provider_name, f"{provider_name.upper()}_API_KEY")
|
|
42
|
+
|
|
43
|
+
@staticmethod
|
|
44
|
+
def get_config_file_key(provider_name: str, config: Any) -> str | None:
|
|
45
|
+
api_key = None
|
|
46
|
+
if isinstance(config, BaseModel):
|
|
47
|
+
config = config.model_dump()
|
|
48
|
+
provider_name = provider_name.lower()
|
|
49
|
+
provider_keys = ProviderKeyManager._get_provider_config_keys(provider_name)
|
|
50
|
+
for key in provider_keys:
|
|
51
|
+
provider_settings = config.get(key)
|
|
52
|
+
if not provider_settings:
|
|
53
|
+
continue
|
|
54
|
+
api_key = provider_settings.get("api_key", API_KEY_HINT_TEXT)
|
|
55
|
+
if api_key == API_KEY_HINT_TEXT:
|
|
56
|
+
api_key = None
|
|
57
|
+
break
|
|
58
|
+
|
|
59
|
+
return api_key
|
|
60
|
+
|
|
61
|
+
@staticmethod
|
|
62
|
+
def _get_provider_config_keys(provider_name: str) -> list[str]:
|
|
63
|
+
"""Return config key candidates for a provider (provider id + aliases)."""
|
|
64
|
+
keys = [provider_name]
|
|
65
|
+
for alias in PROVIDER_CONFIG_KEY_ALIASES.get(provider_name, ()):
|
|
66
|
+
if alias not in keys:
|
|
67
|
+
keys.append(alias)
|
|
68
|
+
return keys
|
|
69
|
+
|
|
70
|
+
@staticmethod
|
|
71
|
+
def get_api_key(provider_name: str, config: Any) -> str:
|
|
72
|
+
"""
|
|
73
|
+
Gets the API key for the specified provider.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
provider_name: Name of the provider (e.g., "anthropic", "openai")
|
|
77
|
+
config: The application configuration object
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
The API key as a string
|
|
81
|
+
|
|
82
|
+
Raises:
|
|
83
|
+
ProviderKeyError: If the API key is not found or is invalid
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
from fast_agent.llm.provider_types import Provider
|
|
87
|
+
|
|
88
|
+
provider_name = provider_name.lower()
|
|
89
|
+
|
|
90
|
+
# Fast-agent provider doesn't need external API keys
|
|
91
|
+
if provider_name == "fast-agent":
|
|
92
|
+
return ""
|
|
93
|
+
|
|
94
|
+
# Google Vertex AI uses ADC/IAM and does not require an API key.
|
|
95
|
+
if provider_name == "google":
|
|
96
|
+
try:
|
|
97
|
+
cfg = config.model_dump() if isinstance(config, BaseModel) else config
|
|
98
|
+
if isinstance(cfg, dict) and bool((cfg.get("google") or {}).get("vertex_ai", {}).get("enabled")):
|
|
99
|
+
return ""
|
|
100
|
+
except Exception:
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
api_key = ProviderKeyManager.get_config_file_key(provider_name, config)
|
|
104
|
+
if not api_key:
|
|
105
|
+
api_key = ProviderKeyManager.get_env_var(provider_name)
|
|
106
|
+
|
|
107
|
+
# HuggingFace: also support tokens managed by huggingface_hub (e.g. `hf auth login`)
|
|
108
|
+
# even when HF_TOKEN isn't explicitly set in the environment or config.
|
|
109
|
+
if not api_key and provider_name in {"hf", "huggingface"}:
|
|
110
|
+
try:
|
|
111
|
+
from huggingface_hub import get_token # type: ignore
|
|
112
|
+
|
|
113
|
+
api_key = get_token()
|
|
114
|
+
except Exception:
|
|
115
|
+
pass
|
|
116
|
+
|
|
117
|
+
if not api_key and provider_name == "generic":
|
|
118
|
+
api_key = "ollama" # Default for generic provider
|
|
119
|
+
|
|
120
|
+
if not api_key:
|
|
121
|
+
# Get proper display name for error message
|
|
122
|
+
try:
|
|
123
|
+
provider_enum = Provider(provider_name)
|
|
124
|
+
display_name = provider_enum.display_name
|
|
125
|
+
except ValueError:
|
|
126
|
+
# Invalid provider name
|
|
127
|
+
raise ProviderKeyError(
|
|
128
|
+
f"Invalid provider: {provider_name}",
|
|
129
|
+
f"'{provider_name}' is not a valid provider name.",
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
raise ProviderKeyError(
|
|
133
|
+
f"{display_name} API key not configured",
|
|
134
|
+
f"The {display_name} API key is required but not set.\n"
|
|
135
|
+
f"Add it to your configuration file under {provider_name}.api_key "
|
|
136
|
+
f"or set the {ProviderKeyManager.get_env_key_name(provider_name)} environment variable.",
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
return api_key
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Type definitions for LLM providers.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Provider(Enum):
|
|
9
|
+
"""Supported LLM providers"""
|
|
10
|
+
|
|
11
|
+
display_name: str
|
|
12
|
+
|
|
13
|
+
def __new__(cls, config_name, display_name=None):
|
|
14
|
+
obj = object.__new__(cls)
|
|
15
|
+
obj._value_ = config_name
|
|
16
|
+
obj.display_name = display_name or config_name.title()
|
|
17
|
+
return obj
|
|
18
|
+
|
|
19
|
+
ANTHROPIC = ("anthropic", "Anthropic")
|
|
20
|
+
DEEPSEEK = ("deepseek", "Deepseek")
|
|
21
|
+
FAST_AGENT = ("fast-agent", "fast-agent-internal")
|
|
22
|
+
GENERIC = ("generic", "Generic")
|
|
23
|
+
GOOGLE_OAI = ("googleoai", "GoogleOAI") # For Google through OpenAI libraries
|
|
24
|
+
GOOGLE = ("google", "Google") # For Google GenAI native library
|
|
25
|
+
OPENAI = ("openai", "OpenAI")
|
|
26
|
+
OPENROUTER = ("openrouter", "OpenRouter")
|
|
27
|
+
TENSORZERO = ("tensorzero", "TensorZero") # For TensorZero Gateway
|
|
28
|
+
AZURE = ("azure", "Azure") # Azure OpenAI Service
|
|
29
|
+
ALIYUN = ("aliyun", "Aliyun") # Aliyun Bailian OpenAI Service
|
|
30
|
+
HUGGINGFACE = ("hf", "HuggingFace") # For HuggingFace MCP connections
|
|
31
|
+
XAI = ("xai", "XAI") # For xAI Grok models
|
|
32
|
+
BEDROCK = ("bedrock", "Bedrock")
|
|
33
|
+
GROQ = ("groq", "Groq")
|
|
34
|
+
RESPONSES = ("responses", "responses")
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Request parameters definitions for LLM interactions.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from mcp import SamplingMessage
|
|
8
|
+
from mcp.types import CreateMessageRequestParams
|
|
9
|
+
from pydantic import Field
|
|
10
|
+
|
|
11
|
+
from fast_agent.constants import DEFAULT_MAX_ITERATIONS
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class RequestParams(CreateMessageRequestParams):
|
|
15
|
+
"""
|
|
16
|
+
Parameters to configure the FastAgentLLM 'generate' requests.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
messages: list[SamplingMessage] = Field(exclude=True, default=[])
|
|
20
|
+
"""
|
|
21
|
+
Ignored. 'messages' are removed from CreateMessageRequestParams
|
|
22
|
+
to avoid confusion with the 'message' parameter on 'generate' method.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
maxTokens: int = 2048
|
|
26
|
+
"""The maximum number of tokens to sample, as requested by the server."""
|
|
27
|
+
|
|
28
|
+
model: str | None = None
|
|
29
|
+
"""
|
|
30
|
+
The model to use for the LLM generation. This can only be set during Agent creation.
|
|
31
|
+
If specified, this overrides the 'modelPreferences' selection criteria.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
use_history: bool = True
|
|
35
|
+
"""
|
|
36
|
+
Agent/LLM maintains conversation history. Does not include applied Prompts
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
max_iterations: int = DEFAULT_MAX_ITERATIONS
|
|
40
|
+
"""
|
|
41
|
+
The maximum number of tool calls allowed in a conversation turn
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
parallel_tool_calls: bool = True
|
|
45
|
+
"""
|
|
46
|
+
Whether to allow simultaneous tool calls
|
|
47
|
+
"""
|
|
48
|
+
response_format: Any | None = None
|
|
49
|
+
"""
|
|
50
|
+
Override response format for structured calls. Prefer sending pydantic model - only use in exceptional circumstances
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
template_vars: dict[str, Any] = Field(default_factory=dict)
|
|
54
|
+
"""
|
|
55
|
+
Optional dictionary of template variables for dynamic templates. Currently only works for TensorZero inference backend
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
mcp_metadata: dict[str, Any] | None = None
|
|
59
|
+
"""
|
|
60
|
+
Metadata to pass through to MCP tool calls via the _meta field.
|
|
61
|
+
"""
|