fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Type, Union
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
from fast_agent.core.exceptions import ModelConfigError
|
|
7
|
+
from fast_agent.interfaces import AgentProtocol, FastAgentLLMProtocol, LLMFactoryProtocol
|
|
8
|
+
from fast_agent.llm.internal.passthrough import PassthroughLLM
|
|
9
|
+
from fast_agent.llm.internal.playback import PlaybackLLM
|
|
10
|
+
from fast_agent.llm.internal.silent import SilentLLM
|
|
11
|
+
from fast_agent.llm.internal.slow import SlowLLM
|
|
12
|
+
from fast_agent.llm.provider_types import Provider
|
|
13
|
+
from fast_agent.types import RequestParams
|
|
14
|
+
|
|
15
|
+
# Type alias for LLM classes
|
|
16
|
+
LLMClass = Union[Type[PassthroughLLM], Type[PlaybackLLM], Type[SilentLLM], Type[SlowLLM], type]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ReasoningEffort(Enum):
|
|
20
|
+
"""Optional reasoning effort levels"""
|
|
21
|
+
|
|
22
|
+
MINIMAL = "minimal"
|
|
23
|
+
LOW = "low"
|
|
24
|
+
MEDIUM = "medium"
|
|
25
|
+
HIGH = "high"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ModelConfig(BaseModel):
|
|
29
|
+
"""Configuration for a specific model"""
|
|
30
|
+
|
|
31
|
+
provider: Provider
|
|
32
|
+
model_name: str
|
|
33
|
+
reasoning_effort: ReasoningEffort | None = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ModelFactory:
|
|
37
|
+
"""Factory for creating LLM instances based on model specifications"""
|
|
38
|
+
|
|
39
|
+
# Mapping of effort strings to enum values
|
|
40
|
+
# TODO -- move this to the model database
|
|
41
|
+
EFFORT_MAP = {
|
|
42
|
+
"minimal": ReasoningEffort.MINIMAL, # Alias for low effort
|
|
43
|
+
"low": ReasoningEffort.LOW,
|
|
44
|
+
"medium": ReasoningEffort.MEDIUM,
|
|
45
|
+
"high": ReasoningEffort.HIGH,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
"""
|
|
49
|
+
TODO -- add audio supporting got-4o-audio-preview
|
|
50
|
+
TODO -- bring model parameter configuration here
|
|
51
|
+
Mapping of model names to their default providers
|
|
52
|
+
"""
|
|
53
|
+
DEFAULT_PROVIDERS = {
|
|
54
|
+
"passthrough": Provider.FAST_AGENT,
|
|
55
|
+
"silent": Provider.FAST_AGENT,
|
|
56
|
+
"playback": Provider.FAST_AGENT,
|
|
57
|
+
"slow": Provider.FAST_AGENT,
|
|
58
|
+
"gpt-4o": Provider.OPENAI,
|
|
59
|
+
"gpt-4o-mini": Provider.OPENAI,
|
|
60
|
+
"gpt-4.1": Provider.OPENAI,
|
|
61
|
+
"gpt-4.1-mini": Provider.OPENAI,
|
|
62
|
+
"gpt-4.1-nano": Provider.OPENAI,
|
|
63
|
+
"gpt-5": Provider.OPENAI,
|
|
64
|
+
"gpt-5.1": Provider.OPENAI,
|
|
65
|
+
"gpt-5.1-mini": Provider.OPENAI,
|
|
66
|
+
"gpt-5.1-nano": Provider.OPENAI,
|
|
67
|
+
"gpt-5-mini": Provider.OPENAI,
|
|
68
|
+
"gpt-5-nano": Provider.OPENAI,
|
|
69
|
+
"o1-mini": Provider.OPENAI,
|
|
70
|
+
"o1": Provider.OPENAI,
|
|
71
|
+
"o1-preview": Provider.OPENAI,
|
|
72
|
+
"o3": Provider.OPENAI,
|
|
73
|
+
"o3-mini": Provider.OPENAI,
|
|
74
|
+
"o4-mini": Provider.OPENAI,
|
|
75
|
+
"claude-3-haiku-20240307": Provider.ANTHROPIC,
|
|
76
|
+
"claude-3-5-haiku-20241022": Provider.ANTHROPIC,
|
|
77
|
+
"claude-3-5-haiku-latest": Provider.ANTHROPIC,
|
|
78
|
+
"claude-3-5-sonnet-20240620": Provider.ANTHROPIC,
|
|
79
|
+
"claude-3-5-sonnet-20241022": Provider.ANTHROPIC,
|
|
80
|
+
"claude-3-5-sonnet-latest": Provider.ANTHROPIC,
|
|
81
|
+
"claude-3-7-sonnet-20250219": Provider.ANTHROPIC,
|
|
82
|
+
"claude-3-7-sonnet-latest": Provider.ANTHROPIC,
|
|
83
|
+
"claude-3-opus-20240229": Provider.ANTHROPIC,
|
|
84
|
+
"claude-3-opus-latest": Provider.ANTHROPIC,
|
|
85
|
+
"claude-opus-4-0": Provider.ANTHROPIC,
|
|
86
|
+
"claude-opus-4-1": Provider.ANTHROPIC,
|
|
87
|
+
"claude-opus-4-5": Provider.ANTHROPIC,
|
|
88
|
+
"claude-opus-4-20250514": Provider.ANTHROPIC,
|
|
89
|
+
"claude-sonnet-4-20250514": Provider.ANTHROPIC,
|
|
90
|
+
"claude-sonnet-4-0": Provider.ANTHROPIC,
|
|
91
|
+
"claude-sonnet-4-5-20250929": Provider.ANTHROPIC,
|
|
92
|
+
"claude-sonnet-4-5": Provider.ANTHROPIC,
|
|
93
|
+
"claude-haiku-4-5": Provider.ANTHROPIC,
|
|
94
|
+
"deepseek-chat": Provider.DEEPSEEK,
|
|
95
|
+
"gemini-2.0-flash": Provider.GOOGLE,
|
|
96
|
+
"gemini-2.5-flash-preview-05-20": Provider.GOOGLE,
|
|
97
|
+
"gemini-2.5-flash-preview-09-2025": Provider.GOOGLE,
|
|
98
|
+
"gemini-2.5-pro-preview-05-06": Provider.GOOGLE,
|
|
99
|
+
"gemini-2.5-pro": Provider.GOOGLE,
|
|
100
|
+
"gemini-3-pro-preview": Provider.GOOGLE,
|
|
101
|
+
"grok-4": Provider.XAI,
|
|
102
|
+
"grok-4-0709": Provider.XAI,
|
|
103
|
+
"grok-3": Provider.XAI,
|
|
104
|
+
"grok-3-mini": Provider.XAI,
|
|
105
|
+
"grok-3-fast": Provider.XAI,
|
|
106
|
+
"grok-3-mini-fast": Provider.XAI,
|
|
107
|
+
"qwen-turbo": Provider.ALIYUN,
|
|
108
|
+
"qwen-plus": Provider.ALIYUN,
|
|
109
|
+
"qwen-max": Provider.ALIYUN,
|
|
110
|
+
"qwen-long": Provider.ALIYUN,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
MODEL_ALIASES = {
|
|
114
|
+
"gpt51": "openai.gpt-5.1",
|
|
115
|
+
"sonnet": "claude-sonnet-4-5",
|
|
116
|
+
"sonnet4": "claude-sonnet-4-0",
|
|
117
|
+
"sonnet45": "claude-sonnet-4-5",
|
|
118
|
+
"sonnet35": "claude-3-5-sonnet-latest",
|
|
119
|
+
"sonnet37": "claude-3-7-sonnet-latest",
|
|
120
|
+
"claude": "claude-sonnet-4-5",
|
|
121
|
+
"haiku": "claude-haiku-4-5",
|
|
122
|
+
"haiku3": "claude-3-haiku-20240307",
|
|
123
|
+
"haiku35": "claude-3-5-haiku-latest",
|
|
124
|
+
"haiku45": "claude-haiku-4-5",
|
|
125
|
+
"opus": "claude-opus-4-5",
|
|
126
|
+
"opus4": "claude-opus-4-1",
|
|
127
|
+
"opus45": "claude-opus-4-5",
|
|
128
|
+
"opus3": "claude-3-opus-latest",
|
|
129
|
+
"deepseekv3": "deepseek-chat",
|
|
130
|
+
"deepseek3": "deepseek-chat",
|
|
131
|
+
"deepseek": "deepseek-chat",
|
|
132
|
+
"gemini2": "gemini-2.0-flash",
|
|
133
|
+
"gemini25": "gemini-2.5-flash-preview-09-2025",
|
|
134
|
+
"gemini25pro": "gemini-2.5-pro",
|
|
135
|
+
"gemini3": "gemini-3-pro-preview",
|
|
136
|
+
"grok-4-fast": "xai.grok-4-fast-non-reasoning",
|
|
137
|
+
"grok-4-fast-reasoning": "xai.grok-4-fast-reasoning",
|
|
138
|
+
"kimigroq": "groq.moonshotai/kimi-k2-instruct-0905",
|
|
139
|
+
"minimax": "hf.MiniMaxAI/MiniMax-M2",
|
|
140
|
+
"kimi": "hf.moonshotai/Kimi-K2-Instruct-0905:groq",
|
|
141
|
+
"gpt-oss": "hf.openai/gpt-oss-120b",
|
|
142
|
+
"gpt-oss-20b": "hf.openai/gpt-oss-20b",
|
|
143
|
+
"glm": "hf.zai-org/GLM-4.6:cerebras",
|
|
144
|
+
"qwen3": "hf.Qwen/Qwen3-Next-80B-A3B-Instruct:together",
|
|
145
|
+
"deepseek31": "hf.deepseek-ai/DeepSeek-V3.1",
|
|
146
|
+
"kimithink": "hf.moonshotai/Kimi-K2-Thinking:together",
|
|
147
|
+
"deepseek32": "deepseek-ai/DeepSeek-V3.2-Exp:novita",
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def _bedrock_pattern_matches(model_name: str) -> bool:
|
|
152
|
+
"""Return True if model_name matches Bedrock's expected pattern, else False.
|
|
153
|
+
|
|
154
|
+
Uses provider's helper if available; otherwise, returns False.
|
|
155
|
+
"""
|
|
156
|
+
try:
|
|
157
|
+
from fast_agent.llm.provider.bedrock.llm_bedrock import BedrockLLM # type: ignore
|
|
158
|
+
|
|
159
|
+
return BedrockLLM.matches_model_pattern(model_name)
|
|
160
|
+
except Exception:
|
|
161
|
+
return False
|
|
162
|
+
|
|
163
|
+
# Mapping of providers to their LLM classes
|
|
164
|
+
PROVIDER_CLASSES: dict[Provider, LLMClass] = {}
|
|
165
|
+
|
|
166
|
+
# Mapping of special model names to their specific LLM classes
|
|
167
|
+
# This overrides the provider-based class selection
|
|
168
|
+
MODEL_SPECIFIC_CLASSES: dict[str, LLMClass] = {
|
|
169
|
+
"playback": PlaybackLLM,
|
|
170
|
+
"silent": SilentLLM,
|
|
171
|
+
"slow": SlowLLM,
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
@classmethod
|
|
175
|
+
def parse_model_string(
|
|
176
|
+
cls, model_string: str, aliases: dict[str, str] | None = None
|
|
177
|
+
) -> ModelConfig:
|
|
178
|
+
"""Parse a model string into a ModelConfig object
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
model_string: The model specification string (e.g. "gpt-4.1", "kimi:groq")
|
|
182
|
+
aliases: Optional custom aliases map. Defaults to MODEL_ALIASES.
|
|
183
|
+
"""
|
|
184
|
+
if aliases is None:
|
|
185
|
+
aliases = cls.MODEL_ALIASES
|
|
186
|
+
|
|
187
|
+
suffix: str | None = None
|
|
188
|
+
if ":" in model_string:
|
|
189
|
+
base, suffix = model_string.rsplit(":", 1)
|
|
190
|
+
if base:
|
|
191
|
+
model_string = base
|
|
192
|
+
|
|
193
|
+
model_string = aliases.get(model_string, model_string)
|
|
194
|
+
|
|
195
|
+
# If user provided a suffix (e.g., kimi:groq), strip any existing suffix
|
|
196
|
+
# from the resolved alias (e.g., hf.model:cerebras -> hf.model)
|
|
197
|
+
if suffix and ":" in model_string:
|
|
198
|
+
model_string = model_string.rsplit(":", 1)[0]
|
|
199
|
+
provider_override: Provider | None = None
|
|
200
|
+
if "/" in model_string:
|
|
201
|
+
prefix, rest = model_string.split("/", 1)
|
|
202
|
+
if prefix and rest and any(p.value == prefix for p in Provider):
|
|
203
|
+
provider_override = Provider(prefix)
|
|
204
|
+
model_string = rest
|
|
205
|
+
|
|
206
|
+
parts = model_string.split(".")
|
|
207
|
+
|
|
208
|
+
model_name_str = model_string # Default full string as model name initially
|
|
209
|
+
provider: Provider | None = provider_override
|
|
210
|
+
reasoning_effort = None
|
|
211
|
+
parts_for_provider_model = []
|
|
212
|
+
|
|
213
|
+
# Check for reasoning effort first (last part)
|
|
214
|
+
if len(parts) > 1 and parts[-1].lower() in cls.EFFORT_MAP:
|
|
215
|
+
reasoning_effort = cls.EFFORT_MAP[parts[-1].lower()]
|
|
216
|
+
# Remove effort from parts list for provider/model name determination
|
|
217
|
+
parts_for_provider_model = parts[:-1]
|
|
218
|
+
else:
|
|
219
|
+
parts_for_provider_model = parts[:]
|
|
220
|
+
|
|
221
|
+
# Try to match longest possible provider string
|
|
222
|
+
identified_provider_parts = 0 # How many parts belong to the provider string
|
|
223
|
+
|
|
224
|
+
if provider is None and len(parts_for_provider_model) >= 2:
|
|
225
|
+
potential_provider_str = f"{parts_for_provider_model[0]}.{parts_for_provider_model[1]}"
|
|
226
|
+
if any(p.value == potential_provider_str for p in Provider):
|
|
227
|
+
provider = Provider(potential_provider_str)
|
|
228
|
+
identified_provider_parts = 2
|
|
229
|
+
|
|
230
|
+
if provider is None and len(parts_for_provider_model) >= 1:
|
|
231
|
+
potential_provider_str = parts_for_provider_model[0]
|
|
232
|
+
if any(p.value == potential_provider_str for p in Provider):
|
|
233
|
+
provider = Provider(potential_provider_str)
|
|
234
|
+
identified_provider_parts = 1
|
|
235
|
+
|
|
236
|
+
# Construct model_name from remaining parts
|
|
237
|
+
if identified_provider_parts > 0:
|
|
238
|
+
model_name_str = ".".join(parts_for_provider_model[identified_provider_parts:])
|
|
239
|
+
else:
|
|
240
|
+
# If no provider prefix was matched, the whole string (after effort removal) is the model name
|
|
241
|
+
model_name_str = ".".join(parts_for_provider_model)
|
|
242
|
+
|
|
243
|
+
# If provider still None, try to get from DEFAULT_PROVIDERS using the model_name_str
|
|
244
|
+
if provider is None:
|
|
245
|
+
provider = cls.DEFAULT_PROVIDERS.get(model_name_str)
|
|
246
|
+
|
|
247
|
+
# If still None, try pattern matching for Bedrock models
|
|
248
|
+
if provider is None and cls._bedrock_pattern_matches(model_name_str):
|
|
249
|
+
provider = Provider.BEDROCK
|
|
250
|
+
|
|
251
|
+
if provider is None:
|
|
252
|
+
raise ModelConfigError(
|
|
253
|
+
f"Unknown model or provider for: {model_string}. Model name parsed as '{model_name_str}'"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if provider == Provider.TENSORZERO and not model_name_str:
|
|
257
|
+
raise ModelConfigError(
|
|
258
|
+
f"TensorZero provider requires a function name after the provider "
|
|
259
|
+
f"(e.g., tensorzero.my-function), got: {model_string}"
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
if suffix:
|
|
263
|
+
model_name_str = f"{model_name_str}:{suffix}"
|
|
264
|
+
|
|
265
|
+
return ModelConfig(
|
|
266
|
+
provider=provider, model_name=model_name_str, reasoning_effort=reasoning_effort
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
@classmethod
|
|
270
|
+
def create_factory(
|
|
271
|
+
cls, model_string: str, aliases: dict[str, str] | None = None
|
|
272
|
+
) -> LLMFactoryProtocol:
|
|
273
|
+
"""
|
|
274
|
+
Creates a factory function that follows the attach_llm protocol.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
model_string: The model specification string (e.g. "gpt-4.1")
|
|
278
|
+
aliases: Optional custom aliases map. Defaults to MODEL_ALIASES.
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
A callable that takes an agent parameter and returns an LLM instance
|
|
282
|
+
"""
|
|
283
|
+
config = cls.parse_model_string(model_string, aliases=aliases)
|
|
284
|
+
|
|
285
|
+
# Ensure provider is valid before trying to access PROVIDER_CLASSES with it
|
|
286
|
+
# Lazily ensure provider class map is populated and supports this provider
|
|
287
|
+
if config.model_name not in cls.MODEL_SPECIFIC_CLASSES:
|
|
288
|
+
llm_class = cls._load_provider_class(config.provider)
|
|
289
|
+
# Stash for next time
|
|
290
|
+
cls.PROVIDER_CLASSES[config.provider] = llm_class
|
|
291
|
+
|
|
292
|
+
if config.model_name in cls.MODEL_SPECIFIC_CLASSES:
|
|
293
|
+
llm_class = cls.MODEL_SPECIFIC_CLASSES[config.model_name]
|
|
294
|
+
else:
|
|
295
|
+
llm_class = cls.PROVIDER_CLASSES[config.provider]
|
|
296
|
+
|
|
297
|
+
def factory(
|
|
298
|
+
agent: AgentProtocol, request_params: RequestParams | None = None, **kwargs
|
|
299
|
+
) -> FastAgentLLMProtocol:
|
|
300
|
+
base_params = RequestParams()
|
|
301
|
+
base_params.model = config.model_name
|
|
302
|
+
if config.reasoning_effort:
|
|
303
|
+
kwargs["reasoning_effort"] = config.reasoning_effort.value
|
|
304
|
+
llm_args = {
|
|
305
|
+
"model": config.model_name,
|
|
306
|
+
"request_params": request_params,
|
|
307
|
+
"name": getattr(agent, "name", "fast-agent"),
|
|
308
|
+
"instructions": getattr(agent, "instruction", None),
|
|
309
|
+
**kwargs,
|
|
310
|
+
}
|
|
311
|
+
llm: FastAgentLLMProtocol = llm_class(**llm_args)
|
|
312
|
+
return llm
|
|
313
|
+
|
|
314
|
+
return factory
|
|
315
|
+
|
|
316
|
+
@classmethod
|
|
317
|
+
def _load_provider_class(cls, provider: Provider) -> type:
|
|
318
|
+
"""Import provider-specific LLM classes lazily to avoid heavy deps at import time."""
|
|
319
|
+
try:
|
|
320
|
+
if provider == Provider.FAST_AGENT:
|
|
321
|
+
return PassthroughLLM
|
|
322
|
+
if provider == Provider.ANTHROPIC:
|
|
323
|
+
from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
|
|
324
|
+
|
|
325
|
+
return AnthropicLLM
|
|
326
|
+
if provider == Provider.OPENAI:
|
|
327
|
+
from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
|
|
328
|
+
|
|
329
|
+
return OpenAILLM
|
|
330
|
+
if provider == Provider.DEEPSEEK:
|
|
331
|
+
from fast_agent.llm.provider.openai.llm_deepseek import DeepSeekLLM
|
|
332
|
+
|
|
333
|
+
return DeepSeekLLM
|
|
334
|
+
if provider == Provider.GENERIC:
|
|
335
|
+
from fast_agent.llm.provider.openai.llm_generic import GenericLLM
|
|
336
|
+
|
|
337
|
+
return GenericLLM
|
|
338
|
+
if provider == Provider.GOOGLE_OAI:
|
|
339
|
+
from fast_agent.llm.provider.openai.llm_google_oai import GoogleOaiLLM
|
|
340
|
+
|
|
341
|
+
return GoogleOaiLLM
|
|
342
|
+
if provider == Provider.GOOGLE:
|
|
343
|
+
from fast_agent.llm.provider.google.llm_google_native import GoogleNativeLLM
|
|
344
|
+
|
|
345
|
+
return GoogleNativeLLM
|
|
346
|
+
|
|
347
|
+
if provider == Provider.HUGGINGFACE:
|
|
348
|
+
from fast_agent.llm.provider.openai.llm_huggingface import HuggingFaceLLM
|
|
349
|
+
|
|
350
|
+
return HuggingFaceLLM
|
|
351
|
+
if provider == Provider.XAI:
|
|
352
|
+
from fast_agent.llm.provider.openai.llm_xai import XAILLM
|
|
353
|
+
|
|
354
|
+
return XAILLM
|
|
355
|
+
if provider == Provider.OPENROUTER:
|
|
356
|
+
from fast_agent.llm.provider.openai.llm_openrouter import OpenRouterLLM
|
|
357
|
+
|
|
358
|
+
return OpenRouterLLM
|
|
359
|
+
if provider == Provider.TENSORZERO:
|
|
360
|
+
from fast_agent.llm.provider.openai.llm_tensorzero_openai import TensorZeroOpenAILLM
|
|
361
|
+
|
|
362
|
+
return TensorZeroOpenAILLM
|
|
363
|
+
if provider == Provider.AZURE:
|
|
364
|
+
from fast_agent.llm.provider.openai.llm_azure import AzureOpenAILLM
|
|
365
|
+
|
|
366
|
+
return AzureOpenAILLM
|
|
367
|
+
if provider == Provider.ALIYUN:
|
|
368
|
+
from fast_agent.llm.provider.openai.llm_aliyun import AliyunLLM
|
|
369
|
+
|
|
370
|
+
return AliyunLLM
|
|
371
|
+
if provider == Provider.BEDROCK:
|
|
372
|
+
from fast_agent.llm.provider.bedrock.llm_bedrock import BedrockLLM
|
|
373
|
+
|
|
374
|
+
return BedrockLLM
|
|
375
|
+
if provider == Provider.GROQ:
|
|
376
|
+
from fast_agent.llm.provider.openai.llm_groq import GroqLLM
|
|
377
|
+
|
|
378
|
+
return GroqLLM
|
|
379
|
+
if provider == Provider.RESPONSES:
|
|
380
|
+
from fast_agent.llm.provider.openai.responses import ResponsesLLM
|
|
381
|
+
|
|
382
|
+
return ResponsesLLM
|
|
383
|
+
|
|
384
|
+
except Exception as e:
|
|
385
|
+
raise ModelConfigError(
|
|
386
|
+
f"Provider '{provider.value}' is unavailable or missing dependencies: {e}"
|
|
387
|
+
)
|
|
388
|
+
raise ModelConfigError(f"Unsupported provider: {provider}")
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Typed model information helpers.
|
|
3
|
+
|
|
4
|
+
Provides a small, pythonic interface to query model/provider and
|
|
5
|
+
capabilities (Text/Document/Vision), backed by the model database.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
from fast_agent.llm.model_database import ModelDatabase
|
|
14
|
+
from fast_agent.llm.model_factory import ModelFactory
|
|
15
|
+
from fast_agent.llm.provider_types import Provider
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
# Import behind TYPE_CHECKING to avoid import cycles at runtime
|
|
19
|
+
from fast_agent.interfaces import FastAgentLLMProtocol
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass(frozen=True)
|
|
23
|
+
class ModelInfo:
|
|
24
|
+
"""Resolved model information with convenient capability accessors."""
|
|
25
|
+
|
|
26
|
+
name: str
|
|
27
|
+
provider: Provider
|
|
28
|
+
context_window: int | None
|
|
29
|
+
max_output_tokens: int | None
|
|
30
|
+
tokenizes: list[str]
|
|
31
|
+
json_mode: str | None
|
|
32
|
+
reasoning: str | None
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def supports_text(self) -> bool:
|
|
36
|
+
if "text/plain" in (self.tokenizes or []):
|
|
37
|
+
return True
|
|
38
|
+
return ModelDatabase.supports_mime(self.name, "text/plain")
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
def supports_document(self) -> bool:
|
|
42
|
+
# Document support currently keyed off PDF support
|
|
43
|
+
if "application/pdf" in (self.tokenizes or []):
|
|
44
|
+
return True
|
|
45
|
+
return ModelDatabase.supports_mime(self.name, "pdf")
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def supports_vision(self) -> bool:
|
|
49
|
+
# Any common image format indicates vision support
|
|
50
|
+
tokenizes = self.tokenizes or []
|
|
51
|
+
if any(mt in tokenizes for mt in ("image/jpeg", "image/png", "image/webp")):
|
|
52
|
+
return True
|
|
53
|
+
|
|
54
|
+
return any(
|
|
55
|
+
ModelDatabase.supports_mime(self.name, mt)
|
|
56
|
+
for mt in ("image/jpeg", "image/png", "image/webp")
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def tdv_flags(self) -> tuple[bool, bool, bool]:
|
|
61
|
+
"""Convenience tuple: (text, document, vision)."""
|
|
62
|
+
return (self.supports_text, self.supports_document, self.supports_vision)
|
|
63
|
+
|
|
64
|
+
@classmethod
|
|
65
|
+
def from_llm(cls, llm: "FastAgentLLMProtocol") -> "ModelInfo" | None:
|
|
66
|
+
name = llm.model_name
|
|
67
|
+
provider = llm.provider
|
|
68
|
+
if not name:
|
|
69
|
+
return None
|
|
70
|
+
return cls.from_name(name, provider)
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def from_name(cls, name: str, provider: Provider | None = None) -> "ModelInfo" | None:
|
|
74
|
+
canonical_name = ModelFactory.MODEL_ALIASES.get(name, name)
|
|
75
|
+
params = ModelDatabase.get_model_params(canonical_name)
|
|
76
|
+
if not params:
|
|
77
|
+
# Unknown model: return a conservative default that supports text only.
|
|
78
|
+
# This matches the desired behavior for TDV display fallbacks.
|
|
79
|
+
if provider is None:
|
|
80
|
+
provider = Provider.GENERIC
|
|
81
|
+
return ModelInfo(
|
|
82
|
+
name=canonical_name,
|
|
83
|
+
provider=provider,
|
|
84
|
+
context_window=None,
|
|
85
|
+
max_output_tokens=None,
|
|
86
|
+
tokenizes=["text/plain"],
|
|
87
|
+
json_mode=None,
|
|
88
|
+
reasoning=None,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if provider is None:
|
|
92
|
+
provider = ModelFactory.DEFAULT_PROVIDERS.get(canonical_name, Provider.GENERIC)
|
|
93
|
+
|
|
94
|
+
return ModelInfo(
|
|
95
|
+
name=canonical_name,
|
|
96
|
+
provider=provider,
|
|
97
|
+
context_window=params.context_window,
|
|
98
|
+
max_output_tokens=params.max_output_tokens,
|
|
99
|
+
tokenizes=params.tokenizes,
|
|
100
|
+
json_mode=params.json_mode,
|
|
101
|
+
reasoning=params.reasoning,
|
|
102
|
+
)
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
XML formatting utilities for consistent prompt engineering across components.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import TypedDict
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def format_xml_tag(
|
|
9
|
+
tag_name: str,
|
|
10
|
+
content: str | None = None,
|
|
11
|
+
attributes: dict[str, str] | None = None,
|
|
12
|
+
) -> str:
|
|
13
|
+
"""
|
|
14
|
+
Format an XML tag with optional content and attributes.
|
|
15
|
+
Uses self-closing tag when content is None or empty.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
tag_name: Name of the XML tag
|
|
19
|
+
content: Content to include inside the tag (None for self-closing)
|
|
20
|
+
attributes: Dictionary of attribute name-value pairs
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Formatted XML tag as string
|
|
24
|
+
"""
|
|
25
|
+
# Format attributes if provided
|
|
26
|
+
attrs_str = ""
|
|
27
|
+
if attributes:
|
|
28
|
+
attrs_str = " " + " ".join(f'{k}="{v}"' for k, v in attributes.items())
|
|
29
|
+
|
|
30
|
+
# Use self-closing tag if no content
|
|
31
|
+
if content is None or content == "":
|
|
32
|
+
return f"<{tag_name}{attrs_str} />"
|
|
33
|
+
|
|
34
|
+
# Full tag with content
|
|
35
|
+
return f"<{tag_name}{attrs_str}>{content}</{tag_name}>"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def format_fastagent_tag(
|
|
39
|
+
tag_type: str,
|
|
40
|
+
content: str | None = None,
|
|
41
|
+
attributes: dict[str, str] | None = None,
|
|
42
|
+
) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Format a fastagent-namespaced XML tag with consistent formatting.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
tag_type: Type of fastagent tag (without namespace prefix)
|
|
48
|
+
content: Content to include inside the tag
|
|
49
|
+
attributes: Dictionary of attribute name-value pairs
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Formatted fastagent XML tag as string
|
|
53
|
+
"""
|
|
54
|
+
return format_xml_tag(f"fastagent:{tag_type}", content, attributes)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def format_server_info(
|
|
58
|
+
server_name: str,
|
|
59
|
+
description: str | None = None,
|
|
60
|
+
tools: list[dict[str, str]] | None = None,
|
|
61
|
+
) -> str:
|
|
62
|
+
"""
|
|
63
|
+
Format server information consistently across router and orchestrator modules.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
server_name: Name of the server
|
|
67
|
+
description: Optional server description
|
|
68
|
+
tools: Optional list of tool dictionaries with 'name' and 'description' keys
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Formatted server XML as string
|
|
72
|
+
"""
|
|
73
|
+
# Use self-closing tag if no description or tools
|
|
74
|
+
if not description and not tools:
|
|
75
|
+
return format_fastagent_tag("server", None, {"name": server_name})
|
|
76
|
+
|
|
77
|
+
# Start building components
|
|
78
|
+
components = []
|
|
79
|
+
|
|
80
|
+
# Add description if present
|
|
81
|
+
if description:
|
|
82
|
+
desc_tag = format_fastagent_tag("description", description)
|
|
83
|
+
components.append(desc_tag)
|
|
84
|
+
|
|
85
|
+
# Add tools section if tools exist
|
|
86
|
+
if tools and len(tools) > 0:
|
|
87
|
+
tool_tags = []
|
|
88
|
+
for tool in tools:
|
|
89
|
+
tool_name = tool.get("name", "")
|
|
90
|
+
tool_desc = tool.get("description", "")
|
|
91
|
+
tool_tag = format_fastagent_tag("tool", tool_desc, {"name": tool_name})
|
|
92
|
+
tool_tags.append(tool_tag)
|
|
93
|
+
|
|
94
|
+
tools_content = "\n".join(tool_tags)
|
|
95
|
+
tools_tag = format_fastagent_tag("tools", f"\n{tools_content}\n")
|
|
96
|
+
components.append(tools_tag)
|
|
97
|
+
|
|
98
|
+
# Combine all components
|
|
99
|
+
server_content = "\n".join(components)
|
|
100
|
+
return format_fastagent_tag("server", f"\n{server_content}\n", {"name": server_name})
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class ServerInfo(TypedDict, total=False):
|
|
104
|
+
name: str
|
|
105
|
+
description: str
|
|
106
|
+
tools: list[dict[str, str]]
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def format_agent_info(
|
|
110
|
+
agent_name: str,
|
|
111
|
+
description: str | None = None,
|
|
112
|
+
servers: list[ServerInfo] | None = None,
|
|
113
|
+
) -> str:
|
|
114
|
+
"""
|
|
115
|
+
Format agent information consistently across router and orchestrator modules.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
agent_name: Name of the agent
|
|
119
|
+
description: Optional agent description/instruction
|
|
120
|
+
servers: Optional list of server dictionaries with 'name', 'description', and 'tools' keys
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Formatted agent XML as string
|
|
124
|
+
"""
|
|
125
|
+
# Start building components
|
|
126
|
+
components = []
|
|
127
|
+
|
|
128
|
+
# Add description if present
|
|
129
|
+
if description:
|
|
130
|
+
desc_tag = format_fastagent_tag("description", description)
|
|
131
|
+
components.append(desc_tag)
|
|
132
|
+
|
|
133
|
+
# If no description or servers, use self-closing tag
|
|
134
|
+
if not description and not servers:
|
|
135
|
+
return format_fastagent_tag("agent", None, {"name": agent_name})
|
|
136
|
+
|
|
137
|
+
# If has servers, format them
|
|
138
|
+
if servers and len(servers) > 0:
|
|
139
|
+
server_tags = []
|
|
140
|
+
for server in servers:
|
|
141
|
+
server_name = server.get("name", "")
|
|
142
|
+
server_desc = server.get("description")
|
|
143
|
+
server_tools = server.get("tools")
|
|
144
|
+
server_tag = format_server_info(server_name, server_desc, server_tools)
|
|
145
|
+
server_tags.append(server_tag)
|
|
146
|
+
|
|
147
|
+
# Only add servers section if we have servers
|
|
148
|
+
if server_tags:
|
|
149
|
+
servers_content = "\n".join(server_tags)
|
|
150
|
+
servers_tag = format_fastagent_tag("servers", f"\n{servers_content}\n")
|
|
151
|
+
components.append(servers_tag)
|
|
152
|
+
|
|
153
|
+
# Combine all components
|
|
154
|
+
agent_content = "\n".join(components)
|
|
155
|
+
return format_fastagent_tag("agent", f"\n{agent_content}\n", {"name": agent_name})
|