@xsai-ext/providers 0.4.0-beta.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE.md ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Moeru AI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1 @@
1
+ https://xsai.js.org/docs/packages-ext/providers
@@ -0,0 +1,336 @@
1
+ import * as _xsai_ext_shared_providers from '@xsai-ext/shared-providers';
2
+ import { A as AnthropicModels, b as AzureModels, T as TogetheraiModels, C as CloudflareWorkersAiModels } from './openrouter-DCc0QhRt.js';
3
+ export { c as createOpenRouter } from './openrouter-DCc0QhRt.js';
4
+
5
+ /**
6
+ * Create a Moonshot AI (China) Provider
7
+ * @see {@link https://platform.moonshot.cn/docs/api/chat}
8
+ */
9
+ declare const createMoonshotaiCn: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ChatProvider<"kimi-k2-0905-preview" | "kimi-k2-0711-preview" | "kimi-k2-turbo-preview"> & _xsai_ext_shared_providers.ModelProvider;
10
+ /**
11
+ * Create a LucidQuery AI Provider
12
+ * @see {@link https://lucidquery.com/api/docs}
13
+ */
14
+ declare const createLucidquery: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"lucidquery-nexus-coder" | "lucidnova-rf1-100b">;
15
+ /**
16
+ * Create a Moonshot AI Provider
17
+ * @see {@link https://platform.moonshot.ai/docs/api/chat}
18
+ */
19
+ declare const createMoonshotai: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ChatProvider<"kimi-k2-0905-preview" | "kimi-k2-0711-preview" | "kimi-k2-turbo-preview"> & _xsai_ext_shared_providers.ModelProvider;
20
+ /**
21
+ * Create a Z.AI Coding Plan Provider
22
+ * @see {@link https://docs.z.ai/devpack/overview}
23
+ */
24
+ declare const createZaiCodingPlan: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
25
+ /**
26
+ * Create a Alibaba Provider
27
+ * @see {@link https://www.alibabacloud.com/help/en/model-studio/models}
28
+ */
29
+ declare const createAlibaba: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-livetranslate-flash-realtime" | "qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen-plus-character-ja" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus">;
30
+ /**
31
+ * Create a xAI Provider
32
+ * @see {@link https://docs.x.ai/docs/models}
33
+ */
34
+ declare const createXai: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"grok-4-fast-non-reasoning" | "grok-3-fast" | "grok-4" | "grok-2-vision" | "grok-code-fast-1" | "grok-2" | "grok-3-mini-fast-latest" | "grok-2-vision-1212" | "grok-3" | "grok-4-fast" | "grok-2-latest" | "grok-2-1212" | "grok-3-fast-latest" | "grok-3-latest" | "grok-2-vision-latest" | "grok-vision-beta" | "grok-3-mini" | "grok-beta" | "grok-3-mini-latest" | "grok-3-mini-fast">;
35
+ /**
36
+ * Create a Vultr Provider
37
+ * @see {@link https://api.vultrinference.com/}
38
+ */
39
+ declare const createVultr: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-r1-distill-qwen-32b" | "qwen2.5-coder-32b-instruct" | "kimi-k2-instruct" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b">;
40
+ /**
41
+ * Create a Nvidia Provider
42
+ * @see {@link https://docs.api.nvidia.com/nim/}
43
+ */
44
+ declare const createNvidia: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/kimi-k2-instruct-0905" | "moonshotai/kimi-k2-instruct" | "nvidia/cosmos-nemotron-34b" | "nvidia/llama-embed-nemotron-8b" | "nvidia/parakeet-tdt-0.6b-v2" | "nvidia/nemoretriever-ocr-v1" | "nvidia/llama-3.1-nemotron-ultra-253b-v1" | "minimaxai/minimax-m2" | "google/gemma-3-27b-it" | "microsoft/phi-4-mini-instruct" | "openai/whisper-large-v3" | "openai/gpt-oss-120b" | "qwen/qwen3-235b-a22b" | "qwen/qwen3-coder-480b-a35b-instruct" | "deepseek-ai/deepseek-v3.1-terminus" | "deepseek-ai/deepseek-v3.1" | "black-forest-labs/flux.1-dev">;
45
+ /**
46
+ * Create a Upstage Provider
47
+ * @see {@link https://developers.upstage.ai/docs/apis/chat}
48
+ */
49
+ declare const createUpstage: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"solar-mini" | "solar-pro2">;
50
+ /**
51
+ * Create a Groq Provider
52
+ * @see {@link https://console.groq.com/docs/models}
53
+ */
54
+ declare const createGroq: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-r1-distill-llama-70b" | "moonshotai/kimi-k2-instruct-0905" | "moonshotai/kimi-k2-instruct" | "openai/gpt-oss-120b" | "llama-3.1-8b-instant" | "mistral-saba-24b" | "llama3-8b-8192" | "qwen-qwq-32b" | "llama3-70b-8192" | "llama-guard-3-8b" | "gemma2-9b-it" | "llama-3.3-70b-versatile" | "openai/gpt-oss-20b" | "qwen/qwen3-32b" | "meta-llama/llama-4-scout-17b-16e-instruct" | "meta-llama/llama-4-maverick-17b-128e-instruct" | "meta-llama/llama-guard-4-12b"> & _xsai_ext_shared_providers.EmbedProvider<string>;
55
+ /**
56
+ * Create a GitHub Copilot Provider
57
+ * @see {@link https://docs.github.com/en/copilot}
58
+ */
59
+ declare const createGithubCopilot: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"grok-code-fast-1" | "gemini-2.0-flash-001" | "claude-opus-4" | "claude-haiku-4.5" | "claude-3.5-sonnet" | "o3-mini" | "gpt-5-codex" | "gpt-4o" | "gpt-4.1" | "o4-mini" | "claude-opus-41" | "gpt-5-mini" | "claude-3.7-sonnet" | "gemini-2.5-pro" | "o3" | "claude-sonnet-4" | "gpt-5" | "claude-3.7-sonnet-thought" | "claude-sonnet-4.5">;
60
+ /**
61
+ * Create a Mistral Provider
62
+ * @see {@link https://docs.mistral.ai/getting-started/models/}
63
+ */
64
+ declare const createMistral: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"devstral-medium-2507" | "open-mixtral-8x22b" | "ministral-8b-latest" | "pixtral-large-latest" | "ministral-3b-latest" | "pixtral-12b" | "mistral-medium-2505" | "devstral-small-2505" | "mistral-medium-2508" | "mistral-small-latest" | "magistral-small" | "devstral-small-2507" | "codestral-latest" | "open-mixtral-8x7b" | "mistral-nemo" | "open-mistral-7b" | "mistral-large-latest" | "mistral-medium-latest" | "magistral-medium-latest">;
65
+ /**
66
+ * Create a Nebius AI Studio Provider
67
+ * @see {@link https://docs.studio.nebius.com/quickstart}
68
+ */
69
+ declare const createNebius: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/kimi-k2-instruct" | "openai/gpt-oss-120b" | "qwen/qwen3-coder-480b-a35b-instruct" | "openai/gpt-oss-20b" | "NousResearch/hermes-4-70b" | "NousResearch/hermes-4-405b" | "nvidia/llama-3_1-nemotron-ultra-253b-v1" | "qwen/qwen3-235b-a22b-instruct-2507" | "qwen/qwen3-235b-a22b-thinking-2507" | "meta-llama/llama-3_1-405b-instruct" | "meta-llama/llama-3.3-70b-instruct-fast" | "meta-llama/llama-3.3-70b-instruct-base" | "zai-org/glm-4.5" | "zai-org/glm-4.5-air" | "deepseek-ai/deepseek-v3">;
70
+ /**
71
+ * Create a DeepSeek Provider
72
+ * @see {@link https://platform.deepseek.com/api-docs/pricing}
73
+ */
74
+ declare const createDeepSeek: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-chat" | "deepseek-reasoner">;
75
+ /**
76
+ * Create a Alibaba (China) Provider
77
+ * @see {@link https://www.alibabacloud.com/help/en/model-studio/models}
78
+ */
79
+ declare const createAlibabaCn: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus" | "deepseek-r1-distill-qwen-32b" | "deepseek-r1-distill-llama-70b" | "deepseek-r1-distill-qwen-7b" | "deepseek-r1-0528" | "deepseek-v3" | "deepseek-v3-2-exp" | "deepseek-r1" | "qwen-plus-character" | "qwen2-5-coder-32b-instruct" | "qwen-math-plus" | "qwen-doc-turbo" | "qwen-deep-research" | "qwen-long" | "qwen2-5-math-72b-instruct" | "moonshot-kimi-k2-instruct" | "tongyi-intent-detect-v3" | "deepseek-v3-1" | "qwen2-5-coder-7b-instruct" | "deepseek-r1-distill-qwen-14b" | "qwen-math-turbo" | "deepseek-r1-distill-llama-8b" | "qwq-32b" | "qwen2-5-math-7b-instruct" | "deepseek-r1-distill-qwen-1-5b">;
80
+ /**
81
+ * Create a Venice AI Provider
82
+ * @see {@link https://docs.venice.ai}
83
+ */
84
+ declare const createVenice: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"dolphin-2.9.2-qwen2-72b" | "mistral-31-24b" | "venice-uncensored" | "qwen-2.5-vl" | "qwen3-235b" | "qwen-2.5-qwq-32b" | "deepseek-coder-v2-lite" | "qwen3-4b" | "llama-3.3-70b" | "qwen-2.5-coder-32b" | "deepseek-r1-671b" | "llama-3.2-3b" | "llama-3.1-405b">;
85
+ /**
86
+ * Create a Chutes Provider
87
+ * @see {@link https://llm.chutes.ai/v1/models}
88
+ */
89
+ declare const createChutes: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "moonshotai/Kimi-K2-Instruct-75k" | "moonshotai/Kimi-K2-Instruct-0905" | "moonshotai/Kimi-VL-A3B-Thinking" | "MiniMaxAI/MiniMax-M2" | "meituan-longcat/LongCat-Flash-Chat-FP8" | "tngtech/DeepSeek-R1T-Chimera" | "tngtech/DeepSeek-TNG-R1T2-Chimera" | "chutesai/Mistral-Small-3.2-24B-Instruct-2506" | "Qwen/Qwen3-30B-A3B" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-30B-A3B-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" | "Qwen/Qwen3-30B-A3B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "Qwen/Qwen3-Next-80B-A3B-Thinking" | "zai-org/GLM-4.5" | "zai-org/GLM-4.6-FP8" | "zai-org/GLM-4.6-turbo" | "zai-org/GLM-4.6" | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3.2-Exp" | "deepseek-ai/DeepSeek-V3.1-Terminus" | "deepseek-ai/DeepSeek-V3.1:THINKING" | "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" | "deepseek-ai/DeepSeek-V3.1" | "deepseek-ai/DeepSeek-V3-0324">;
90
+ /**
91
+ * Create a Cortecs Provider
92
+ * @see {@link https://api.cortecs.ai/v1/models}
93
+ */
94
+ declare const createCortecs: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-480b-a35b-instruct" | "qwen3-32b" | "kimi-k2-instruct" | "gpt-oss-120b" | "gpt-4.1" | "gemini-2.5-pro" | "claude-sonnet-4" | "nova-pro-v1" | "claude-4-5-sonnet" | "deepseek-v3-0324" | "llama-3.1-405b-instruct">;
95
+ /**
96
+ * Create a GitHub Models Provider
97
+ * @see {@link https://docs.github.com/en/github-models}
98
+ */
99
+ declare const createGithubModels: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"microsoft/phi-4-mini-instruct" | "core42/jais-30b-chat" | "xai/grok-3" | "xai/grok-3-mini" | "cohere/cohere-command-r-08-2024" | "cohere/cohere-command-a" | "cohere/cohere-command-r-plus-08-2024" | "cohere/cohere-command-r" | "cohere/cohere-command-r-plus" | "deepseek/deepseek-r1-0528" | "deepseek/deepseek-r1" | "deepseek/deepseek-v3-0324" | "mistral-ai/mistral-medium-2505" | "mistral-ai/ministral-3b" | "mistral-ai/mistral-nemo" | "mistral-ai/mistral-large-2411" | "mistral-ai/codestral-2501" | "mistral-ai/mistral-small-2503" | "microsoft/phi-3-medium-128k-instruct" | "microsoft/phi-3-mini-4k-instruct" | "microsoft/phi-3-small-128k-instruct" | "microsoft/phi-3.5-vision-instruct" | "microsoft/phi-4" | "microsoft/phi-4-mini-reasoning" | "microsoft/phi-3-small-8k-instruct" | "microsoft/phi-3.5-mini-instruct" | "microsoft/phi-4-multimodal-instruct" | "microsoft/phi-3-mini-128k-instruct" | "microsoft/phi-3.5-moe-instruct" | "microsoft/phi-3-medium-4k-instruct" | "microsoft/phi-4-reasoning" | "microsoft/mai-ds-r1" | "openai/gpt-4.1-nano" | "openai/gpt-4.1-mini" | "openai/o1-preview" | "openai/o3-mini" | "openai/gpt-4o" | "openai/gpt-4.1" | "openai/o4-mini" | "openai/o1" | "openai/o1-mini" | "openai/o3" | "openai/gpt-4o-mini" | "meta/llama-3.2-11b-vision-instruct" | "meta/meta-llama-3.1-405b-instruct" | "meta/llama-4-maverick-17b-128e-instruct-fp8" | "meta/meta-llama-3-70b-instruct" | "meta/meta-llama-3.1-70b-instruct" | "meta/llama-3.3-70b-instruct" | "meta/llama-3.2-90b-vision-instruct" | "meta/meta-llama-3-8b-instruct" | "meta/llama-4-scout-17b-16e-instruct" | "meta/meta-llama-3.1-8b-instruct" | "ai21-labs/ai21-jamba-1.5-large" | "ai21-labs/ai21-jamba-1.5-mini">;
100
+ /**
101
+ * Create a Baseten Provider
102
+ * @see {@link https://docs.baseten.co/development/model-apis/overview}
103
+ */
104
+ declare const createBaseten: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "zai-org/GLM-4.6" | "Qwen3/Qwen3-Coder-480B-A35B-Instruct">;
105
+ /**
106
+ * Create a Hugging Face Provider
107
+ * @see {@link https://huggingface.co/docs/inference-providers}
108
+ */
109
+ declare const createHuggingface: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "MiniMaxAI/MiniMax-M2" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "Qwen/Qwen3-Next-80B-A3B-Thinking" | "zai-org/GLM-4.5" | "zai-org/GLM-4.6" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Embedding-8B" | "Qwen/Qwen3-Embedding-4B" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "deepseek-ai/Deepseek-V3-0324">;
110
+ /**
111
+ * Create a OpenCode Zen Provider
112
+ * @see {@link https://opencode.ai/docs/zen}
113
+ */
114
+ declare const createOpencode: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "gpt-5-codex" | "claude-sonnet-4" | "gpt-5" | "qwen3-coder" | "claude-opus-4-1" | "kimi-k2" | "claude-haiku-4-5" | "minimax-m2" | "claude-sonnet-4-5" | "an-gbt" | "big-pickle" | "claude-3-5-haiku" | "grok-code">;
115
+ /**
116
+ * Create a FastRouter Provider
117
+ * @see {@link https://fastrouter.ai/models}
118
+ */
119
+ declare const createFastrouter: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "openai/gpt-oss-20b" | "openai/gpt-4.1" | "moonshotai/kimi-k2" | "x-ai/grok-4" | "google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-5-mini" | "openai/gpt-5" | "qwen/qwen3-coder" | "anthropic/claude-opus-4.1" | "anthropic/claude-sonnet-4" | "deepseek-ai/deepseek-r1-distill-llama-70b">;
120
+ /**
121
+ * Create a Google Provider
122
+ * @see {@link https://ai.google.dev/gemini-api/docs/pricing}
123
+ */
124
+ declare const createGoogleGenerativeAI: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gemini-2.5-pro" | "gemini-embedding-001" | "gemini-2.5-flash-image" | "gemini-2.5-flash-preview-05-20" | "gemini-flash-lite-latest" | "gemini-2.5-flash" | "gemini-flash-latest" | "gemini-2.5-pro-preview-05-06" | "gemini-2.5-flash-preview-tts" | "gemini-2.0-flash-lite" | "gemini-live-2.5-flash-preview-native-audio" | "gemini-2.0-flash" | "gemini-2.5-flash-lite" | "gemini-2.5-pro-preview-06-05" | "gemini-live-2.5-flash" | "gemini-2.5-flash-lite-preview-06-17" | "gemini-2.5-flash-image-preview" | "gemini-2.5-flash-preview-09-2025" | "gemini-2.5-flash-preview-04-17" | "gemini-2.5-pro-preview-tts" | "gemini-1.5-flash" | "gemini-1.5-flash-8b" | "gemini-2.5-flash-lite-preview-09-2025" | "gemini-1.5-pro">;
125
+ /**
126
+ * Create a Inception Provider
127
+ * @see {@link https://platform.inceptionlabs.ai/docs}
128
+ */
129
+ declare const createInception: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"mercury-coder" | "mercury">;
130
+ /**
131
+ * Create a Weights & Biases Provider
132
+ * @see {@link https://weave-docs.wandb.ai/guides/integrations/inference/}
133
+ */
134
+ declare const createWandb: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3-0324" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "microsoft/Phi-4-mini-instruct" | "meta-llama/Llama-3.1-8B-Instruct" | "meta-llama/Llama-3.3-70B-Instruct" | "meta-llama/Llama-4-Scout-17B-16E-Instruct">;
135
+ /**
136
+ * Create a OpenAI Provider
137
+ * @see {@link https://platform.openai.com/docs/models}
138
+ */
139
+ declare const createOpenAI: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"o3-mini" | "gpt-5-codex" | "gpt-4o" | "gpt-4.1" | "o4-mini" | "gpt-5-mini" | "o3" | "gpt-5" | "gpt-4.1-nano" | "text-embedding-3-small" | "gpt-4" | "o1-pro" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4.1-mini" | "o3-deep-research" | "gpt-3.5-turbo" | "text-embedding-3-large" | "gpt-4-turbo" | "o1-preview" | "codex-mini-latest" | "gpt-5-nano" | "o1" | "o1-mini" | "text-embedding-ada-002" | "o3-pro" | "gpt-4o-2024-11-20" | "o4-mini-deep-research" | "gpt-5-chat-latest" | "gpt-4o-mini" | "gpt-5-pro"> & _xsai_ext_shared_providers.ImageProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string>;
140
+ /**
141
+ * Create a Zhipu AI Coding Plan Provider
142
+ * @see {@link https://docs.bigmodel.cn/cn/coding-plan/overview}
143
+ */
144
+ declare const createZhipuaiCodingPlan: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
145
+ /**
146
+ * Create a Perplexity Provider
147
+ * @see {@link https://docs.perplexity.ai}
148
+ */
149
+ declare const createPerplexity: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"sonar-reasoning" | "sonar" | "sonar-pro" | "sonar-reasoning-pro">;
150
+ /**
151
+ * Create a ZenMux Provider
152
+ * @see {@link https://docs.zenmux.ai}
153
+ */
154
+ declare const createZenmux: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"x-ai/grok-4" | "google/gemini-2.5-pro" | "openai/gpt-5" | "anthropic/claude-opus-4.1" | "moonshotai/kimi-k2-0905" | "x-ai/grok-4-fast-non-reasoning" | "x-ai/grok-code-fast-1" | "x-ai/grok-4-fast" | "deepseek/deepseek-chat" | "openai/gpt-5-codex" | "inclusionai/ring-1t" | "inclusionai/lint-1t" | "z-ai/glm-4.5-air" | "z-ai/glm-4.6" | "qwen/qwen3-coder-plus" | "kuaishou/kat-coder-pro-v1" | "anthropic/claude-haiku-4.5" | "anthropic/claude-sonnet-4.5">;
155
+ /**
156
+ * Create a iFlow Provider
157
+ * @see {@link https://platform.iflow.cn/en/docs}
158
+ */
159
+ declare const createIflowcn: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "qwen3-vl-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-32b" | "deepseek-v3" | "deepseek-r1" | "qwen3-235b" | "qwen3-coder" | "kimi-k2" | "deepseek-v3.1" | "kimi-k2-0905" | "qwen3-235b-a22b-thinking-2507" | "tstars2.0" | "qwen3-235b-a22b-instruct" | "deepseek-v3.2" | "qwen3-max-preview">;
160
+ /**
161
+ * Create a Synthetic Provider
162
+ * @see {@link https://synthetic.new/pricing}
163
+ */
164
+ declare const createSynthetic: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"hf:Qwen/Qwen3-235B-A22B-Instruct-2507" | "hf:Qwen/Qwen2.5-Coder-32B-Instruct" | "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct" | "hf:Qwen/Qwen3-235B-A22B-Thinking-2507" | "hf:MiniMaxAI/MiniMax-M2" | "hf:meta-llama/Llama-3.1-70B-Instruct" | "hf:meta-llama/Llama-3.1-8B-Instruct" | "hf:meta-llama/Llama-3.3-70B-Instruct" | "hf:meta-llama/Llama-4-Scout-17B-16E-Instruct" | "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" | "hf:meta-llama/Llama-3.1-405B-Instruct" | "hf:moonshotai/Kimi-K2-Instruct" | "hf:moonshotai/Kimi-K2-Instruct-0905" | "hf:zai-org/GLM-4.5" | "hf:zai-org/GLM-4.6" | "hf:deepseek-ai/DeepSeek-R1" | "hf:deepseek-ai/DeepSeek-R1-0528" | "hf:deepseek-ai/DeepSeek-V3.1-Terminus" | "hf:deepseek-ai/DeepSeek-V3" | "hf:deepseek-ai/DeepSeek-V3.1" | "hf:deepseek-ai/DeepSeek-V3-0324" | "hf:openai/gpt-oss-120b">;
165
+ /**
166
+ * Create a Deep Infra Provider
167
+ * @see {@link https://deepinfra.com/models}
168
+ */
169
+ declare const createDeepinfra: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"zai-org/GLM-4.5" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo">;
170
+ /**
171
+ * Create a Zhipu AI Provider
172
+ * @see {@link https://docs.z.ai/guides/overview/pricing}
173
+ */
174
+ declare const createZhipuai: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
175
+ /**
176
+ * Create a submodel Provider
177
+ * @see {@link https://submodel.gitbook.io}
178
+ */
179
+ declare const createSubmodel: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3.1" | "deepseek-ai/DeepSeek-V3-0324">;
180
+ /**
181
+ * Create a Z.AI Provider
182
+ * @see {@link https://docs.z.ai/guides/overview/pricing}
183
+ */
184
+ declare const createZai: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
185
+ /**
186
+ * Create a Inference Provider
187
+ * @see {@link https://inference.net/models}
188
+ */
189
+ declare const createInference: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"meta/llama-3.2-11b-vision-instruct" | "mistral/mistral-nemo-12b-instruct" | "google/gemma-3" | "osmosis/osmosis-structure-0.6b" | "qwen/qwen3-embedding-4b" | "qwen/qwen-2.5-7b-vision-instruct" | "meta/llama-3.1-8b-instruct" | "meta/llama-3.2-3b-instruct" | "meta/llama-3.2-1b-instruct">;
190
+ /**
191
+ * Create a Requesty Provider
192
+ * @see {@link https://requesty.ai/solution/llm-routing/models}
193
+ */
194
+ declare const createRequesty: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-4.1-mini" | "openai/gpt-4.1" | "openai/o4-mini" | "openai/gpt-4o-mini" | "google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-5-mini" | "openai/gpt-5" | "anthropic/claude-opus-4" | "anthropic/claude-3-7-sonnet" | "anthropic/claude-4-sonnet-20250522" | "anthropic/claude-opus-4-1-20250805">;
195
+ /**
196
+ * Create a Morph Provider
197
+ * @see {@link https://docs.morphllm.com/api-reference/introduction}
198
+ */
199
+ declare const createMorph: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"morph-v3-large" | "auto" | "morph-v3-fast">;
200
+ /**
201
+ * Create a LMStudio Provider
202
+ * @see {@link https://lmstudio.ai/models}
203
+ */
204
+ declare const createLmstudio: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-20b" | "qwen/qwen3-30b-a3b-2507" | "qwen/qwen3-coder-30b">;
205
+ /**
206
+ * Create a Fireworks AI Provider
207
+ * @see {@link https://fireworks.ai/docs/}
208
+ */
209
+ declare const createFireworks: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"accounts/fireworks/models/deepseek-r1-0528" | "accounts/fireworks/models/deepseek-v3p1" | "accounts/fireworks/models/minimax-m2" | "accounts/fireworks/models/deepseek-v3-0324" | "accounts/fireworks/models/kimi-k2-instruct" | "accounts/fireworks/models/qwen3-235b-a22b" | "accounts/fireworks/models/gpt-oss-20b" | "accounts/fireworks/models/gpt-oss-120b" | "accounts/fireworks/models/glm-4p5-air" | "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct" | "accounts/fireworks/models/glm-4p5">;
210
+ /**
211
+ * Create a ModelScope Provider
212
+ * @see {@link https://modelscope.cn/docs/model-service/API-Inference/intro}
213
+ */
214
+ declare const createModelscope: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-30B-A3B-Instruct" | "Qwen/Qwen3-30B-A3B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "ZhipuAI/GLM-4.5" | "ZhipuAI/GLM-4.6" | "Qwen/Qwen3-30B-A3B-Thinking-2507">;
215
+ /**
216
+ * Create a Llama Provider
217
+ * @see {@link https://llama.developer.meta.com/docs/models}
218
+ */
219
+ declare const createLlama: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"llama-3.3-8b-instruct" | "llama-4-maverick-17b-128e-instruct-fp8" | "llama-3.3-70b-instruct" | "llama-4-scout-17b-16e-instruct-fp8" | "groq-llama-4-maverick-17b-128e-instruct" | "cerebras-llama-4-scout-17b-16e-instruct" | "cerebras-llama-4-maverick-17b-128e-instruct">;
220
+ /**
221
+ * Create a Scaleway Provider
222
+ * @see {@link https://www.scaleway.com/en/docs/generative-apis/}
223
+ */
224
+ declare const createScaleway: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-30b-a3b-instruct" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b" | "llama-3.3-70b-instruct" | "qwen3-235b-a22b-instruct-2507" | "pixtral-12b-2409" | "llama-3.1-8b-instruct" | "mistral-nemo-instruct-2407" | "mistral-small-3.2-24b-instruct-2506" | "whisper-large-v3" | "voxtral-small-24b-2507" | "bge-multilingual-gemma2" | "gemma-3-27b-it">;
225
+ /**
226
+ * Create a Cerebras Provider
227
+ * @see {@link https://inference-docs.cerebras.ai/models/overview}
228
+ */
229
+ declare const createCerebras: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gpt-oss-120b" | "qwen-3-235b-a22b-instruct-2507" | "zai-glm-4.6" | "qwen-3-coder-480b">;
230
+ /**
231
+ * Create a Minimax Provider
232
+ * @see {@link https://platform.minimax.io/docs/api-reference/text-openai-api}
233
+ */
234
+ declare const createMinimax: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
235
+ /**
236
+ * Create a Minimaxi Provider
237
+ * @see {@link https://platform.minimaxi.com/docs/api-reference/text-openai-api}
238
+ */
239
+ declare const createMinimaxi: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
240
+ /**
241
+ * Create a Novita AI Provider
242
+ * @see {@link https://novita.ai/docs/guides/llm-api#api-integration}
243
+ */
244
+ declare const createNovita: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
245
+ /**
246
+ * Create a SiliconFlow Provider
247
+ * @see {@link https://docs.siliconflow.com/en/userguide/quickstart#4-3-call-via-openai-interface}
248
+ */
249
+ declare const createSiliconFlow: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
250
+ /**
251
+ * Create a StepFun Provider
252
+ * @see {@link https://www.stepfun.com}
253
+ */
254
+ declare const createStepfun: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
255
+ /**
256
+ * Create a Tencent Hunyuan Provider
257
+ * @see {@link https://cloud.tencent.com/document/product/1729}
258
+ */
259
+ declare const createTencentHunyuan: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
260
+
261
+ /**
262
+ * Create a Anthropic Provider
263
+ * @see {@link https://docs.claude.com/en/api/openai-sdk}
264
+ */
265
+ declare const createAnthropic: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<AnthropicModels>;
266
+
267
+ interface CreateAzureOptions {
268
+ /**
269
+ * The static API key or AD access token fetcher for authorization.
270
+ *
271
+ * If passed in as a function, it is treated as an accessTokenFetcher.
272
+ *
273
+ * @see {@link https://learn.microsoft.com/en-us/azure/api-management/api-management-authenticate-authorize-azure-openai}
274
+ */
275
+ apiKey: (() => Promise<string> | string) | string;
276
+ /**
277
+ * The Azure API version to use (`api-version` param).
278
+ *
279
+ * Notice: Different deployment over different time may have different API versions, please
280
+ * follow the exact prompt from either [Azure AI Foundry](https://ai.azure.com/) or Azure OpenAI service
281
+ * to get the correct API version from the Azure OpenAI Service endpoint.
282
+ *
283
+ * On Azure AI Foundry portal, you can go to https://ai.azure.com/build/overview > Choose the project >
284
+ * Overview > Endpoints and keys > Included capabilities > Azure OpenAI Service to get the correct endpoint.
285
+ *
286
+ * @see {@link https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning}
287
+ */
288
+ apiVersion?: string;
289
+ /**
290
+ * Azure resource name.
291
+ *
292
+ * On Azure AI Foundry portal, you can go to https://ai.azure.com/build/overview > Choose the project >
293
+ * Overview > Endpoints and keys > Included capabilities > Azure OpenAI Service to get the correct endpoint.
294
+ *
295
+ * @see {@link https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#uri-parameters}
296
+ */
297
+ resourceName: string;
298
+ }
299
+ /**
300
+ * Create a Azure Provider
301
+ * @see {@link https://ai.azure.com/explore/models}
302
+ * @see {@link https://learn.microsoft.com/en-us/azure/ai-foundry/model-inference/concepts/endpoints?tabs=rest#routing}
303
+ * @remarks
304
+ * For Azure AI services, you can have multiple deployments of the same model with different names.
305
+ *
306
+ * Please pass your deployment name as the `model` parameter. By default, Azure will use the model name
307
+ * as the deployment name when deploying a model.
308
+ */
309
+ declare const createAzure: (options: CreateAzureOptions) => Promise<_xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<AzureModels>>;
310
+
311
+ /**
312
+ * Create a Featherless AI Provider
313
+ * @see {@link https://featherless.ai/models}
314
+ */
315
+ declare const createFeatherless: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
316
+
317
+ /** @deprecated use `createFeatherless` instead. */
318
+ declare const createFatherless: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
319
+ /** @deprecated use `createAlibabaCn` instead. */
320
+ declare const createQwen: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus" | "deepseek-r1-distill-qwen-32b" | "deepseek-r1-distill-llama-70b" | "deepseek-r1-distill-qwen-7b" | "deepseek-r1-0528" | "deepseek-v3" | "deepseek-v3-2-exp" | "deepseek-r1" | "qwen-plus-character" | "qwen2-5-coder-32b-instruct" | "qwen-math-plus" | "qwen-doc-turbo" | "qwen-deep-research" | "qwen-long" | "qwen2-5-math-72b-instruct" | "moonshot-kimi-k2-instruct" | "tongyi-intent-detect-v3" | "deepseek-v3-1" | "qwen2-5-coder-7b-instruct" | "deepseek-r1-distill-qwen-14b" | "qwen-math-turbo" | "deepseek-r1-distill-llama-8b" | "qwq-32b" | "qwen2-5-math-7b-instruct" | "deepseek-r1-distill-qwen-1-5b">;
321
+ /** @deprecated use `createXai` instead. */
322
+ declare const createXAI: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"grok-4-fast-non-reasoning" | "grok-3-fast" | "grok-4" | "grok-2-vision" | "grok-code-fast-1" | "grok-2" | "grok-3-mini-fast-latest" | "grok-2-vision-1212" | "grok-3" | "grok-4-fast" | "grok-2-latest" | "grok-2-1212" | "grok-3-fast-latest" | "grok-3-latest" | "grok-2-vision-latest" | "grok-vision-beta" | "grok-3-mini" | "grok-beta" | "grok-3-mini-latest" | "grok-3-mini-fast">;
323
+
324
+ /**
325
+ * Create a Together AI Provider
326
+ * @see {@link https://docs.together.ai/docs/serverless-models}
327
+ */
328
+ declare const createTogetherAI: (apiKey: string, baseURL?: string) => _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<TogetheraiModels>;
329
+
330
+ /**
331
+ * Create a Workers AI Provider
332
+ * @see {@link https://developers.cloudflare.com/workers-ai}
333
+ */
334
+ declare const createWorkersAI: (apiKey: string, accountId: string) => _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<CloudflareWorkersAiModels>;
335
+
336
+ export { createAlibaba, createAlibabaCn, createAnthropic, createAzure, createBaseten, createCerebras, createChutes, createCortecs, createDeepSeek, createDeepinfra, createFastrouter, createFatherless, createFeatherless, createFireworks, createGithubCopilot, createGithubModels, createGoogleGenerativeAI, createGroq, createHuggingface, createIflowcn, createInception, createInference, createLlama, createLmstudio, createLucidquery, createMinimax, createMinimaxi, createMistral, createModelscope, createMoonshotai, createMoonshotaiCn, createMorph, createNebius, createNovita, createNvidia, createOpenAI, createOpencode, createPerplexity, createQwen, createRequesty, createScaleway, createSiliconFlow, createStepfun, createSubmodel, createSynthetic, createTencentHunyuan, createTogetherAI, createUpstage, createVenice, createVultr, createWandb, createWorkersAI, createXAI, createXai, createZai, createZaiCodingPlan, createZenmux, createZhipuai, createZhipuaiCodingPlan };
package/dist/create.js ADDED
@@ -0,0 +1,43 @@
1
+ import { _ as createFeatherless, o as createAlibabaCn, f as createXai } from './together-ai-DXREpfi4.js';
2
+ export { e as createAlibaba, Z as createAnthropic, t as createBaseten, S as createCerebras, q as createChutes, r as createCortecs, n as createDeepSeek, G as createDeepinfra, w as createFastrouter, O as createFireworks, k as createGithubCopilot, s as createGithubModels, x as createGoogleGenerativeAI, j as createGroq, u as createHuggingface, E as createIflowcn, y as createInception, K as createInference, Q as createLlama, N as createLmstudio, a as createLucidquery, T as createMinimax, U as createMinimaxi, l as createMistral, P as createModelscope, b as createMoonshotai, c as createMoonshotaiCn, M as createMorph, m as createNebius, V as createNovita, h as createNvidia, A as createOpenAI, $ as createOpenRouter, v as createOpencode, C as createPerplexity, L as createRequesty, R as createScaleway, W as createSiliconFlow, X as createStepfun, I as createSubmodel, F as createSynthetic, Y as createTencentHunyuan, a0 as createTogetherAI, i as createUpstage, p as createVenice, g as createVultr, z as createWandb, J as createZai, d as createZaiCodingPlan, D as createZenmux, H as createZhipuai, B as createZhipuaiCodingPlan } from './together-ai-DXREpfi4.js';
3
+ import { merge, createChatProvider, createEmbedProvider, createSpeechProvider, createTranscriptionProvider, createModelProvider } from '@xsai-ext/shared-providers';
4
+ import '@xsai/shared';
5
+
6
+ const createAzure = async (options) => {
7
+ const headers = typeof options.apiKey === "string" ? { "api-key": options.apiKey } : void 0;
8
+ const baseURL = `https://${options.resourceName}.services.ai.azure.com/models/`;
9
+ const fetch = async (input, init) => {
10
+ if (options.apiVersion != null)
11
+ input.searchParams.set("api-version", options.apiVersion);
12
+ const token = `Bearer ${typeof options.apiKey === "function" ? await options.apiKey() : options.apiKey}`;
13
+ init.headers ??= {};
14
+ if (Array.isArray(init.headers))
15
+ init.headers.push(["Authorization", token]);
16
+ else if (init.headers instanceof Headers)
17
+ init.headers.append("Authorization", token);
18
+ else
19
+ init.headers.Authorization = token;
20
+ return globalThis.fetch(input, init);
21
+ };
22
+ return merge(
23
+ createChatProvider({ baseURL, fetch, headers }),
24
+ createEmbedProvider({ baseURL, fetch, headers }),
25
+ createSpeechProvider({ baseURL, fetch, headers }),
26
+ createTranscriptionProvider({ baseURL, fetch, headers }),
27
+ createModelProvider({ baseURL, fetch, headers })
28
+ );
29
+ };
30
+
31
+ const createFatherless = createFeatherless;
32
+ const createQwen = createAlibabaCn;
33
+ const createXAI = createXai;
34
+
35
+ const createWorkersAI = (apiKey, accountId) => {
36
+ const baseURL = `https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/v1/`;
37
+ return merge(
38
+ createChatProvider({ apiKey, baseURL }),
39
+ createEmbedProvider({ apiKey, baseURL })
40
+ );
41
+ };
42
+
43
+ export { createAlibabaCn, createAzure, createFatherless, createFeatherless, createQwen, createWorkersAI, createXAI, createXai };