fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from fast_agent.llm.provider.openai.llm_groq import GroqLLM
|
|
2
|
+
from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
|
|
3
|
+
from fast_agent.llm.provider_types import Provider
|
|
4
|
+
from fast_agent.types import RequestParams
|
|
5
|
+
|
|
6
|
+
ALIYUN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
|
7
|
+
DEFAULT_QWEN_MODEL = "qwen-turbo"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AliyunLLM(GroqLLM):
|
|
11
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
12
|
+
OpenAILLM.__init__(self, *args, provider=Provider.ALIYUN, **kwargs)
|
|
13
|
+
|
|
14
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
15
|
+
"""Initialize Aliyun-specific default parameters"""
|
|
16
|
+
# Get base defaults from parent (includes ModelDatabase lookup)
|
|
17
|
+
base_params = super()._initialize_default_params(kwargs)
|
|
18
|
+
|
|
19
|
+
# Override with Aliyun-specific settings
|
|
20
|
+
chosen_model = kwargs.get("model", DEFAULT_QWEN_MODEL)
|
|
21
|
+
base_params.model = chosen_model
|
|
22
|
+
base_params.parallel_tool_calls = True
|
|
23
|
+
|
|
24
|
+
return base_params
|
|
25
|
+
|
|
26
|
+
def _base_url(self) -> str:
|
|
27
|
+
base_url = None
|
|
28
|
+
if self.context.config and self.context.config.aliyun:
|
|
29
|
+
base_url = self.context.config.aliyun.base_url
|
|
30
|
+
|
|
31
|
+
return base_url if base_url else ALIYUN_BASE_URL
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
from openai import AsyncAzureOpenAI, AsyncOpenAI, AuthenticationError
|
|
2
|
+
|
|
3
|
+
from fast_agent.core.exceptions import ProviderKeyError
|
|
4
|
+
from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
|
|
5
|
+
from fast_agent.llm.provider_types import Provider
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from azure.identity import DefaultAzureCredential
|
|
9
|
+
except ImportError:
|
|
10
|
+
DefaultAzureCredential = None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _extract_resource_name(url: str) -> str | None:
|
|
14
|
+
from urllib.parse import urlparse
|
|
15
|
+
|
|
16
|
+
host = urlparse(url).hostname or ""
|
|
17
|
+
suffix = ".openai.azure.com"
|
|
18
|
+
return host.replace(suffix, "") if host.endswith(suffix) else None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
DEFAULT_AZURE_API_VERSION = "2024-10-21"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AzureOpenAILLM(OpenAILLM):
|
|
25
|
+
"""
|
|
26
|
+
Azure OpenAI implementation extending OpenAILLM.
|
|
27
|
+
Handles both API Key and DefaultAzureCredential authentication.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, provider: Provider = Provider.AZURE, *args, **kwargs):
|
|
31
|
+
# Set provider to AZURE, pass through to base
|
|
32
|
+
super().__init__(provider=provider, *args, **kwargs)
|
|
33
|
+
|
|
34
|
+
# Context/config extraction
|
|
35
|
+
context = getattr(self, "context", None)
|
|
36
|
+
config = getattr(context, "config", None) if context else None
|
|
37
|
+
azure_cfg = getattr(config, "azure", None) if config else None
|
|
38
|
+
|
|
39
|
+
if azure_cfg is None:
|
|
40
|
+
raise ProviderKeyError(
|
|
41
|
+
"Missing Azure configuration",
|
|
42
|
+
"Azure provider requires configuration section 'azure' in your config file.",
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
self.use_default_cred = getattr(azure_cfg, "use_default_azure_credential", False)
|
|
46
|
+
default_request_params = getattr(self, "default_request_params", None)
|
|
47
|
+
self.deployment_name = getattr(default_request_params, "model", None) or getattr(
|
|
48
|
+
azure_cfg, "azure_deployment", None
|
|
49
|
+
)
|
|
50
|
+
self.api_version = getattr(azure_cfg, "api_version", None) or DEFAULT_AZURE_API_VERSION
|
|
51
|
+
|
|
52
|
+
if self.use_default_cred:
|
|
53
|
+
self.base_url = getattr(azure_cfg, "base_url", None)
|
|
54
|
+
if not self.base_url:
|
|
55
|
+
raise ProviderKeyError(
|
|
56
|
+
"Missing Azure endpoint",
|
|
57
|
+
"When using 'use_default_azure_credential', 'base_url' is required in azure config.",
|
|
58
|
+
)
|
|
59
|
+
if DefaultAzureCredential is None:
|
|
60
|
+
raise ProviderKeyError(
|
|
61
|
+
"azure-identity not installed",
|
|
62
|
+
"You must install 'azure-identity' to use DefaultAzureCredential authentication.",
|
|
63
|
+
)
|
|
64
|
+
self.credential = DefaultAzureCredential()
|
|
65
|
+
|
|
66
|
+
def get_azure_token():
|
|
67
|
+
token = self.credential.get_token("https://cognitiveservices.azure.com/.default")
|
|
68
|
+
return token.token
|
|
69
|
+
|
|
70
|
+
self.get_azure_token = get_azure_token
|
|
71
|
+
else:
|
|
72
|
+
self.api_key = self._api_key()
|
|
73
|
+
self.resource_name = getattr(azure_cfg, "resource_name", None)
|
|
74
|
+
self.base_url = getattr(azure_cfg, "base_url", None) or (
|
|
75
|
+
f"https://{self.resource_name}.openai.azure.com/" if self.resource_name else None
|
|
76
|
+
)
|
|
77
|
+
if not self.api_key:
|
|
78
|
+
raise ProviderKeyError(
|
|
79
|
+
"Missing Azure OpenAI credentials",
|
|
80
|
+
"Field 'api_key' is required in azure config.",
|
|
81
|
+
)
|
|
82
|
+
if not (self.resource_name or self.base_url):
|
|
83
|
+
raise ProviderKeyError(
|
|
84
|
+
"Missing Azure endpoint",
|
|
85
|
+
"Provide either 'resource_name' or 'base_url' under azure config.",
|
|
86
|
+
)
|
|
87
|
+
if not self.deployment_name:
|
|
88
|
+
raise ProviderKeyError(
|
|
89
|
+
"Missing deployment name",
|
|
90
|
+
"Set 'azure_deployment' in config or pass model=<deployment>.",
|
|
91
|
+
)
|
|
92
|
+
# If resource_name was missing, try to extract it from base_url
|
|
93
|
+
if not self.resource_name and self.base_url:
|
|
94
|
+
self.resource_name = _extract_resource_name(self.base_url)
|
|
95
|
+
|
|
96
|
+
def _api_key(self):
|
|
97
|
+
"""Override to return 'AzureCredential' when using DefaultAzureCredential"""
|
|
98
|
+
if self.use_default_cred:
|
|
99
|
+
return "AzureCredential"
|
|
100
|
+
return super()._api_key()
|
|
101
|
+
|
|
102
|
+
def _openai_client(self) -> AsyncOpenAI:
|
|
103
|
+
"""
|
|
104
|
+
Returns an AzureOpenAI client, handling both API Key and DefaultAzureCredential.
|
|
105
|
+
"""
|
|
106
|
+
try:
|
|
107
|
+
if self.use_default_cred:
|
|
108
|
+
if self.base_url is None:
|
|
109
|
+
raise ProviderKeyError(
|
|
110
|
+
"Missing Azure endpoint",
|
|
111
|
+
"azure_endpoint (base_url) is None at client creation time.",
|
|
112
|
+
)
|
|
113
|
+
return AsyncAzureOpenAI(
|
|
114
|
+
azure_ad_token_provider=self.get_azure_token,
|
|
115
|
+
azure_endpoint=self.base_url,
|
|
116
|
+
api_version=self.api_version,
|
|
117
|
+
azure_deployment=self.deployment_name,
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
if self.base_url is None:
|
|
121
|
+
raise ProviderKeyError(
|
|
122
|
+
"Missing Azure endpoint",
|
|
123
|
+
"azure_endpoint (base_url) is None at client creation time.",
|
|
124
|
+
)
|
|
125
|
+
return AsyncAzureOpenAI(
|
|
126
|
+
api_key=self.api_key,
|
|
127
|
+
azure_endpoint=self.base_url,
|
|
128
|
+
api_version=self.api_version,
|
|
129
|
+
azure_deployment=self.deployment_name,
|
|
130
|
+
)
|
|
131
|
+
except AuthenticationError as e:
|
|
132
|
+
if self.use_default_cred:
|
|
133
|
+
raise ProviderKeyError(
|
|
134
|
+
"Invalid Azure AD credentials",
|
|
135
|
+
"The configured Azure AD credentials were rejected.\n"
|
|
136
|
+
"Please check your Azure identity setup.",
|
|
137
|
+
) from e
|
|
138
|
+
else:
|
|
139
|
+
raise ProviderKeyError(
|
|
140
|
+
"Invalid Azure OpenAI API key",
|
|
141
|
+
"The configured Azure OpenAI API key was rejected.\n"
|
|
142
|
+
"Please check that your API key is valid and not expired.",
|
|
143
|
+
) from e
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from copy import copy
|
|
2
|
+
from typing import Type, cast
|
|
3
|
+
|
|
4
|
+
from openai.types.chat import (
|
|
5
|
+
ChatCompletionAssistantMessageParam,
|
|
6
|
+
ChatCompletionMessage,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
from fast_agent.interfaces import ModelT
|
|
10
|
+
from fast_agent.llm.provider.openai.llm_openai_compatible import OpenAICompatibleLLM
|
|
11
|
+
from fast_agent.llm.provider_types import Provider
|
|
12
|
+
from fast_agent.types import RequestParams
|
|
13
|
+
|
|
14
|
+
DEEPSEEK_BASE_URL = "https://api.deepseek.com"
|
|
15
|
+
DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type models
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DeepSeekLLM(OpenAICompatibleLLM):
|
|
19
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
20
|
+
super().__init__(*args, provider=Provider.DEEPSEEK, **kwargs)
|
|
21
|
+
|
|
22
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
23
|
+
"""Initialize Deepseek-specific default parameters"""
|
|
24
|
+
# Get base defaults from parent (includes ModelDatabase lookup)
|
|
25
|
+
base_params = super()._initialize_default_params(kwargs)
|
|
26
|
+
|
|
27
|
+
# Override with Deepseek-specific settings
|
|
28
|
+
chosen_model = kwargs.get("model", DEFAULT_DEEPSEEK_MODEL)
|
|
29
|
+
base_params.model = chosen_model
|
|
30
|
+
|
|
31
|
+
return base_params
|
|
32
|
+
|
|
33
|
+
def _base_url(self) -> str:
|
|
34
|
+
base_url = None
|
|
35
|
+
if self.context.config and self.context.config.deepseek:
|
|
36
|
+
base_url = self.context.config.deepseek.base_url
|
|
37
|
+
|
|
38
|
+
return base_url if base_url else DEEPSEEK_BASE_URL
|
|
39
|
+
|
|
40
|
+
def _build_structured_prompt_instruction(self, model: Type[ModelT]) -> str | None:
|
|
41
|
+
full_schema = model.model_json_schema()
|
|
42
|
+
properties = full_schema.get("properties", {})
|
|
43
|
+
required_fields = set(full_schema.get("required", []))
|
|
44
|
+
|
|
45
|
+
format_lines = ["{"]
|
|
46
|
+
for field_name, field_info in properties.items():
|
|
47
|
+
field_type = field_info.get("type", "string")
|
|
48
|
+
description = field_info.get("description", "")
|
|
49
|
+
line = f' "{field_name}": "{field_type}"'
|
|
50
|
+
if description:
|
|
51
|
+
line += f" // {description}"
|
|
52
|
+
if field_name in required_fields:
|
|
53
|
+
line += " // REQUIRED"
|
|
54
|
+
format_lines.append(line)
|
|
55
|
+
format_lines.append("}")
|
|
56
|
+
format_description = "\n".join(format_lines)
|
|
57
|
+
|
|
58
|
+
return f"""YOU MUST RESPOND WITH A JSON OBJECT IN EXACTLY THIS FORMAT:
|
|
59
|
+
{format_description}
|
|
60
|
+
|
|
61
|
+
IMPORTANT RULES:
|
|
62
|
+
- Respond ONLY with the JSON object, no other text
|
|
63
|
+
- Do NOT include "properties" or "schema" wrappers
|
|
64
|
+
- Do NOT use code fences or markdown
|
|
65
|
+
- The response must be valid JSON that matches the format above
|
|
66
|
+
- All required fields must be included"""
|
|
67
|
+
|
|
68
|
+
@classmethod
|
|
69
|
+
def convert_message_to_message_param(
|
|
70
|
+
cls, message: ChatCompletionMessage, **kwargs
|
|
71
|
+
) -> ChatCompletionAssistantMessageParam:
|
|
72
|
+
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
|
|
73
|
+
if hasattr(message, "reasoning_content"):
|
|
74
|
+
message = copy(message)
|
|
75
|
+
del message.reasoning_content
|
|
76
|
+
return cast("ChatCompletionAssistantMessageParam", message)
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
|
|
4
|
+
from fast_agent.llm.provider_types import Provider
|
|
5
|
+
from fast_agent.types import RequestParams
|
|
6
|
+
|
|
7
|
+
DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434/v1"
|
|
8
|
+
DEFAULT_OLLAMA_MODEL = "llama3.2:latest"
|
|
9
|
+
DEFAULT_OLLAMA_API_KEY = "ollama"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class GenericLLM(OpenAILLM):
|
|
13
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
14
|
+
super().__init__(
|
|
15
|
+
*args, provider=Provider.GENERIC, **kwargs
|
|
16
|
+
) # Properly pass args and kwargs to parent
|
|
17
|
+
|
|
18
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
19
|
+
"""Initialize Generic parameters"""
|
|
20
|
+
chosen_model = kwargs.get("model", DEFAULT_OLLAMA_MODEL)
|
|
21
|
+
|
|
22
|
+
return RequestParams(
|
|
23
|
+
model=chosen_model,
|
|
24
|
+
systemPrompt=self.instruction,
|
|
25
|
+
parallel_tool_calls=True,
|
|
26
|
+
max_iterations=10,
|
|
27
|
+
use_history=True,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
def _base_url(self) -> str:
|
|
31
|
+
base_url = os.getenv("GENERIC_BASE_URL", DEFAULT_OLLAMA_BASE_URL)
|
|
32
|
+
if self.context.config and self.context.config.generic:
|
|
33
|
+
base_url = self.context.config.generic.base_url
|
|
34
|
+
|
|
35
|
+
return base_url
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
|
|
2
|
+
from fast_agent.llm.provider_types import Provider
|
|
3
|
+
from fast_agent.types import RequestParams
|
|
4
|
+
|
|
5
|
+
GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai"
|
|
6
|
+
DEFAULT_GOOGLE_MODEL = "gemini-2.0-flash"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GoogleOaiLLM(OpenAILLM):
|
|
10
|
+
config_section = "google"
|
|
11
|
+
|
|
12
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
13
|
+
super().__init__(*args, provider=Provider.GOOGLE_OAI, **kwargs)
|
|
14
|
+
|
|
15
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
16
|
+
"""Initialize Google OpenAI Compatibility default parameters"""
|
|
17
|
+
chosen_model = kwargs.get("model", DEFAULT_GOOGLE_MODEL)
|
|
18
|
+
|
|
19
|
+
return RequestParams(
|
|
20
|
+
model=chosen_model,
|
|
21
|
+
systemPrompt=self.instruction,
|
|
22
|
+
parallel_tool_calls=False,
|
|
23
|
+
max_iterations=20,
|
|
24
|
+
use_history=True,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
def _base_url(self) -> str:
|
|
28
|
+
base_url = None
|
|
29
|
+
if self.context.config and self.context.config.google:
|
|
30
|
+
base_url = self.context.config.google.base_url
|
|
31
|
+
|
|
32
|
+
return base_url if base_url else GOOGLE_BASE_URL
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from fast_agent.llm.model_database import ModelDatabase
|
|
2
|
+
from fast_agent.llm.provider.openai.llm_openai_compatible import OpenAICompatibleLLM
|
|
3
|
+
from fast_agent.llm.provider_types import Provider
|
|
4
|
+
from fast_agent.types import RequestParams
|
|
5
|
+
|
|
6
|
+
GROQ_BASE_URL = "https://api.groq.com/openai/v1"
|
|
7
|
+
DEFAULT_GROQ_MODEL = "moonshotai/kimi-k2-instruct"
|
|
8
|
+
|
|
9
|
+
### There is some big refactorings to be had quite easily here now:
|
|
10
|
+
### - combining the structured output type handling
|
|
11
|
+
### - deduplicating between this and the deepseek llm
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GroqLLM(OpenAICompatibleLLM):
|
|
15
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
16
|
+
super().__init__(*args, provider=Provider.GROQ, **kwargs)
|
|
17
|
+
|
|
18
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
19
|
+
"""Initialize Groq default parameters"""
|
|
20
|
+
# Get base defaults from parent (includes ModelDatabase lookup)
|
|
21
|
+
base_params = super()._initialize_default_params(kwargs)
|
|
22
|
+
|
|
23
|
+
# Override with Groq-specific settings
|
|
24
|
+
chosen_model = kwargs.get("model", DEFAULT_GROQ_MODEL)
|
|
25
|
+
base_params.model = chosen_model
|
|
26
|
+
base_params.parallel_tool_calls = False
|
|
27
|
+
|
|
28
|
+
return base_params
|
|
29
|
+
|
|
30
|
+
def _supports_structured_prompt(self) -> bool:
|
|
31
|
+
llm_model = (
|
|
32
|
+
self.default_request_params.model if self.default_request_params else DEFAULT_GROQ_MODEL
|
|
33
|
+
)
|
|
34
|
+
json_mode: str | None = ModelDatabase.get_json_mode(llm_model)
|
|
35
|
+
return json_mode == "object"
|
|
36
|
+
|
|
37
|
+
def _base_url(self) -> str:
|
|
38
|
+
base_url = None
|
|
39
|
+
if self.context.config and self.context.config.groq:
|
|
40
|
+
base_url = self.context.config.groq.base_url
|
|
41
|
+
|
|
42
|
+
return base_url if base_url else GROQ_BASE_URL
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from fast_agent.llm.provider.openai.llm_openai_compatible import OpenAICompatibleLLM
|
|
4
|
+
from fast_agent.llm.provider_types import Provider
|
|
5
|
+
from fast_agent.types import RequestParams
|
|
6
|
+
|
|
7
|
+
HUGGINGFACE_BASE_URL = "https://router.huggingface.co/v1"
|
|
8
|
+
DEFAULT_HUGGINGFACE_MODEL = "moonshotai/Kimi-K2-Instruct-0905"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class HuggingFaceLLM(OpenAICompatibleLLM):
|
|
12
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
13
|
+
self._hf_provider_suffix: str | None = None
|
|
14
|
+
super().__init__(*args, provider=Provider.HUGGINGFACE, **kwargs)
|
|
15
|
+
|
|
16
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
17
|
+
"""Initialize HuggingFace-specific default parameters"""
|
|
18
|
+
kwargs = kwargs.copy()
|
|
19
|
+
requested_model = kwargs.get("model") or DEFAULT_HUGGINGFACE_MODEL
|
|
20
|
+
base_model, explicit_provider = self._split_provider_suffix(requested_model)
|
|
21
|
+
base_model = base_model or requested_model
|
|
22
|
+
kwargs["model"] = base_model
|
|
23
|
+
|
|
24
|
+
# Determine which provider suffix to use
|
|
25
|
+
provider_suffix = explicit_provider or self._resolve_default_provider()
|
|
26
|
+
self._hf_provider_suffix = provider_suffix
|
|
27
|
+
|
|
28
|
+
# Get base defaults from parent (includes ModelDatabase lookup)
|
|
29
|
+
base_params = super()._initialize_default_params(kwargs)
|
|
30
|
+
|
|
31
|
+
# Override with HuggingFace-specific settings
|
|
32
|
+
base_params.model = base_model
|
|
33
|
+
base_params.parallel_tool_calls = True
|
|
34
|
+
|
|
35
|
+
return base_params
|
|
36
|
+
|
|
37
|
+
def _base_url(self) -> str:
|
|
38
|
+
base_url = None
|
|
39
|
+
if self.context.config and self.context.config.hf:
|
|
40
|
+
base_url = self.context.config.hf.base_url
|
|
41
|
+
|
|
42
|
+
return base_url if base_url else HUGGINGFACE_BASE_URL
|
|
43
|
+
|
|
44
|
+
def _prepare_api_request(
|
|
45
|
+
self, messages, tools: list | None, request_params: RequestParams
|
|
46
|
+
) -> dict[str, str]:
|
|
47
|
+
arguments = super()._prepare_api_request(messages, tools, request_params)
|
|
48
|
+
model_name = arguments.get("model")
|
|
49
|
+
base_model, explicit_provider = self._split_provider_suffix(model_name)
|
|
50
|
+
base_model = base_model or model_name
|
|
51
|
+
if not base_model:
|
|
52
|
+
return arguments
|
|
53
|
+
|
|
54
|
+
provider_suffix = explicit_provider or self._hf_provider_suffix
|
|
55
|
+
if provider_suffix:
|
|
56
|
+
arguments["model"] = f"{base_model}:{provider_suffix}"
|
|
57
|
+
else:
|
|
58
|
+
arguments["model"] = base_model
|
|
59
|
+
return arguments
|
|
60
|
+
|
|
61
|
+
def _resolve_default_provider(self) -> str | None:
|
|
62
|
+
config_provider = None
|
|
63
|
+
if self.context and self.context.config and self.context.config.hf:
|
|
64
|
+
config_provider = self.context.config.hf.default_provider
|
|
65
|
+
env_provider = os.getenv("HF_DEFAULT_PROVIDER")
|
|
66
|
+
return config_provider or env_provider
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
def _split_provider_suffix(model: str | None) -> tuple[str | None, str | None]:
|
|
70
|
+
if not model or ":" not in model:
|
|
71
|
+
return model, None
|
|
72
|
+
base, suffix = model.rsplit(":", 1)
|
|
73
|
+
if not base:
|
|
74
|
+
return model, None
|
|
75
|
+
return base, suffix or None
|
|
76
|
+
|
|
77
|
+
def get_hf_display_info(self) -> dict[str, str]:
|
|
78
|
+
"""Return display information for HuggingFace model and provider.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
dict with 'model' and 'provider' keys
|
|
82
|
+
"""
|
|
83
|
+
model = self.default_request_params.model if self.default_request_params else None
|
|
84
|
+
provider = self._hf_provider_suffix or "auto-routing"
|
|
85
|
+
return {"model": model or DEFAULT_HUGGINGFACE_MODEL, "provider": provider}
|