@xsai-ext/providers 0.4.0-beta.10 → 0.4.0-beta.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic-B6NN_uBS.d.ts +117 -0
- package/dist/create.d.ts +192 -151
- package/dist/create.js +4 -3
- package/dist/index.d.ts +313 -237
- package/dist/index.js +52 -41
- package/dist/{together-ai-Dohv6WsZ.js → together-ai-BNlmR9ei.js} +101 -62
- package/package.json +3 -3
- package/dist/openrouter-BJX-qVAE.d.ts +0 -104
package/dist/index.d.ts
CHANGED
|
@@ -1,86 +1,94 @@
|
|
|
1
1
|
import * as _xsai_ext_shared_providers from '@xsai-ext/shared-providers';
|
|
2
|
-
import { A as AnthropicModels, O as OpenrouterModels,
|
|
2
|
+
import { A as AnthropicModels, a as AnthropicOptions, O as OpenrouterModels, b as OpenRouterOptions, T as TogetheraiModels } from './anthropic-B6NN_uBS.js';
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
|
-
*
|
|
6
|
-
* @see {@link https://
|
|
5
|
+
* Alibaba Provider
|
|
6
|
+
* @see {@link https://www.alibabacloud.com/help/en/model-studio/models}
|
|
7
7
|
* @remarks
|
|
8
|
-
* - baseURL - `https://
|
|
9
|
-
* - apiKey - `
|
|
8
|
+
* - baseURL - `https://dashscope-intl.aliyuncs.com/compatible-mode/v1`
|
|
9
|
+
* - apiKey - `DASHSCOPE_API_KEY`
|
|
10
10
|
*/
|
|
11
|
-
declare const
|
|
11
|
+
declare const alibaba: _xsai_ext_shared_providers.ChatProvider<"qwen3-livetranslate-flash-realtime" | "qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen-plus-character-ja" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus"> & _xsai_ext_shared_providers.ModelProvider;
|
|
12
12
|
/**
|
|
13
|
-
*
|
|
14
|
-
* @see {@link https://
|
|
13
|
+
* Alibaba (China) Provider
|
|
14
|
+
* @see {@link https://www.alibabacloud.com/help/en/model-studio/models}
|
|
15
15
|
* @remarks
|
|
16
|
-
* - baseURL - `https://
|
|
17
|
-
* - apiKey - `
|
|
16
|
+
* - baseURL - `https://dashscope.aliyuncs.com/compatible-mode/v1`
|
|
17
|
+
* - apiKey - `DASHSCOPE_API_KEY`
|
|
18
18
|
*/
|
|
19
|
-
declare const
|
|
19
|
+
declare const alibabaCn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus" | "deepseek-r1-distill-qwen-7b" | "deepseek-r1-0528" | "deepseek-v3" | "deepseek-v3-2-exp" | "deepseek-r1" | "deepseek-r1-distill-qwen-32b" | "qwen-plus-character" | "qwen2-5-coder-32b-instruct" | "qwen-math-plus" | "qwen-doc-turbo" | "qwen-deep-research" | "qwen-long" | "qwen2-5-math-72b-instruct" | "moonshot-kimi-k2-instruct" | "tongyi-intent-detect-v3" | "deepseek-v3-1" | "deepseek-r1-distill-llama-70b" | "qwen2-5-coder-7b-instruct" | "deepseek-r1-distill-qwen-14b" | "qwen-math-turbo" | "deepseek-r1-distill-llama-8b" | "qwq-32b" | "qwen2-5-math-7b-instruct" | "deepseek-r1-distill-qwen-1-5b">;
|
|
20
20
|
/**
|
|
21
|
-
*
|
|
22
|
-
* @see {@link https://
|
|
21
|
+
* Bailing Provider
|
|
22
|
+
* @see {@link https://alipaytbox.yuque.com/sxs0ba/ling/intro}
|
|
23
23
|
* @remarks
|
|
24
|
-
* - baseURL - `https://api.
|
|
25
|
-
* - apiKey - `
|
|
24
|
+
* - baseURL - `https://api.tbox.cn/api/llm/v1/chat/completions`
|
|
25
|
+
* - apiKey - `BAILING_API_TOKEN`
|
|
26
26
|
*/
|
|
27
|
-
declare const
|
|
27
|
+
declare const bailing: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Ling-1T" | "Ring-1T">;
|
|
28
28
|
/**
|
|
29
|
-
*
|
|
30
|
-
* @see {@link https://docs.
|
|
29
|
+
* Baseten Provider
|
|
30
|
+
* @see {@link https://docs.baseten.co/development/model-apis/overview}
|
|
31
31
|
* @remarks
|
|
32
|
-
* - baseURL - `https://
|
|
33
|
-
* - apiKey - `
|
|
32
|
+
* - baseURL - `https://inference.baseten.co/v1`
|
|
33
|
+
* - apiKey - `BASETEN_API_KEY`
|
|
34
34
|
*/
|
|
35
|
-
declare const
|
|
35
|
+
declare const baseten: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "moonshotai/Kimi-K2-Thinking" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "zai-org/GLM-4.6" | "deepseek-ai/DeepSeek-V3.2">;
|
|
36
36
|
/**
|
|
37
|
-
*
|
|
38
|
-
* @see {@link https://
|
|
37
|
+
* Cerebras Provider
|
|
38
|
+
* @see {@link https://inference-docs.cerebras.ai/models/overview}
|
|
39
39
|
* @remarks
|
|
40
|
-
* - baseURL - `https://
|
|
41
|
-
* - apiKey - `
|
|
40
|
+
* - baseURL - `https://api.cerebras.ai/v1/`
|
|
41
|
+
* - apiKey - `CEREBRAS_API_KEY`
|
|
42
42
|
*/
|
|
43
|
-
declare const
|
|
43
|
+
declare const cerebras: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen-3-235b-a22b-instruct-2507" | "zai-glm-4.6" | "gpt-oss-120b"> & _xsai_ext_shared_providers.EmbedProvider<string>;
|
|
44
44
|
/**
|
|
45
|
-
*
|
|
46
|
-
* @see {@link https://
|
|
45
|
+
* Chutes Provider
|
|
46
|
+
* @see {@link https://llm.chutes.ai/v1/models}
|
|
47
47
|
* @remarks
|
|
48
|
-
* - baseURL - `https://
|
|
49
|
-
* - apiKey - `
|
|
48
|
+
* - baseURL - `https://llm.chutes.ai/v1`
|
|
49
|
+
* - apiKey - `CHUTES_API_KEY`
|
|
50
50
|
*/
|
|
51
|
-
declare const
|
|
51
|
+
declare const chutes: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "moonshotai/Kimi-K2-Thinking" | "zai-org/GLM-4.6" | "deepseek-ai/DeepSeek-V3.2" | "NousResearch/Hermes-4.3-36B" | "NousResearch/Hermes-4-70B" | "NousResearch/Hermes-4-14B" | "NousResearch/Hermes-4-405B-FP8" | "NousResearch/DeepHermes-3-Mistral-24B-Preview" | "rednote-hilab/dots.ocr" | "MiniMaxAI/MiniMax-M2" | "ArliAI/QwQ-32B-ArliAI-RpR-v1" | "tngtech/DeepSeek-R1T-Chimera" | "tngtech/DeepSeek-TNG-R1T2-Chimera" | "tngtech/TNG-R1T-Chimera-TEE" | "OpenGVLab/InternVL3-78B" | "openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "chutesai/Mistral-Small-3.1-24B-Instruct-2503" | "chutesai/Mistral-Small-3.2-24B-Instruct-2506" | "Alibaba-NLP/Tongyi-DeepResearch-30B-A3B" | "mistralai/Devstral-2-123B-Instruct-2512" | "unsloth/Mistral-Nemo-Instruct-2407" | "unsloth/gemma-3-4b-it" | "unsloth/Mistral-Small-24B-Instruct-2501" | "unsloth/gemma-3-12b-it" | "unsloth/gemma-3-27b-it" | "Qwen/Qwen3-30B-A3B" | "Qwen/Qwen3-14B" | "Qwen/Qwen2.5-VL-32B-Instruct" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen2.5-Coder-32B-Instruct" | "Qwen/Qwen2.5-72B-Instruct" | "Qwen/Qwen3-Coder-30B-A3B-Instruct" | "Qwen/Qwen3-235B-A22B" | "Qwen/Qwen2.5-VL-72B-Instruct" | "Qwen/Qwen3-32B" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" | "Qwen/Qwen3-VL-235B-A22B-Instruct" | "Qwen/Qwen3-VL-235B-A22B-Thinking" | "Qwen/Qwen3-30B-A3B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "zai-org/GLM-4.6-TEE" | "zai-org/GLM-4.6V" | "zai-org/GLM-4.5" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1" | "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3.1-Terminus" | "deepseek-ai/DeepSeek-V3.2-Speciale-TEE" | "deepseek-ai/DeepSeek-V3" | "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" | "deepseek-ai/DeepSeek-V3.1" | "deepseek-ai/DeepSeek-V3-0324">;
|
|
52
52
|
/**
|
|
53
|
-
*
|
|
54
|
-
* @see {@link https://api.
|
|
53
|
+
* Cortecs Provider
|
|
54
|
+
* @see {@link https://api.cortecs.ai/v1/models}
|
|
55
55
|
* @remarks
|
|
56
|
-
* - baseURL - `https://api.
|
|
57
|
-
* - apiKey - `
|
|
56
|
+
* - baseURL - `https://api.cortecs.ai/v1`
|
|
57
|
+
* - apiKey - `CORTECS_API_KEY`
|
|
58
58
|
*/
|
|
59
|
-
declare const
|
|
59
|
+
declare const cortecs: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-480b-a35b-instruct" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "gpt-oss-120b" | "nova-pro-v1" | "devstral-2512" | "intellect-3" | "claude-4-5-sonnet" | "deepseek-v3-0324" | "kimi-k2-thinking" | "kimi-k2-instruct" | "gpt-4.1" | "gemini-2.5-pro" | "devstral-small-2512" | "claude-sonnet-4" | "llama-3.1-405b-instruct">;
|
|
60
60
|
/**
|
|
61
|
-
*
|
|
62
|
-
* @see {@link https://
|
|
61
|
+
* Deep Infra Provider
|
|
62
|
+
* @see {@link https://deepinfra.com/models}
|
|
63
63
|
* @remarks
|
|
64
|
-
* - baseURL - `https://
|
|
65
|
-
* - apiKey - `
|
|
64
|
+
* - baseURL - `https://api.deepinfra.com/v1/openai/`
|
|
65
|
+
* - apiKey - `DEEPINFRA_API_KEY`
|
|
66
66
|
*/
|
|
67
|
-
declare const
|
|
67
|
+
declare const deepinfra: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-Coder-480B-A35B-Instruct" | "openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "zai-org/GLM-4.5" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo">;
|
|
68
68
|
/**
|
|
69
|
-
*
|
|
70
|
-
* @see {@link https://
|
|
69
|
+
* DeepSeek Provider
|
|
70
|
+
* @see {@link https://platform.deepseek.com/api-docs/pricing}
|
|
71
71
|
* @remarks
|
|
72
|
-
* - baseURL - `https://api.
|
|
73
|
-
* - apiKey - `
|
|
72
|
+
* - baseURL - `https://api.deepseek.com`
|
|
73
|
+
* - apiKey - `DEEPSEEK_API_KEY`
|
|
74
74
|
*/
|
|
75
|
-
declare const
|
|
75
|
+
declare const deepseek: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-chat" | "deepseek-reasoner">;
|
|
76
76
|
/**
|
|
77
|
-
*
|
|
78
|
-
* @see {@link https://
|
|
77
|
+
* FastRouter Provider
|
|
78
|
+
* @see {@link https://fastrouter.ai/models}
|
|
79
79
|
* @remarks
|
|
80
|
-
* - baseURL - `https://
|
|
81
|
-
* - apiKey - `
|
|
80
|
+
* - baseURL - `https://go.fastrouter.ai/api/v1`
|
|
81
|
+
* - apiKey - `FASTROUTER_API_KEY`
|
|
82
82
|
*/
|
|
83
|
-
declare const
|
|
83
|
+
declare const fastrouter: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "moonshotai/kimi-k2" | "x-ai/grok-4" | "google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-4.1" | "openai/gpt-5-mini" | "openai/gpt-5" | "qwen/qwen3-coder" | "anthropic/claude-opus-4.1" | "anthropic/claude-sonnet-4" | "deepseek-ai/deepseek-r1-distill-llama-70b">;
|
|
84
|
+
/**
|
|
85
|
+
* Fireworks AI Provider
|
|
86
|
+
* @see {@link https://fireworks.ai/docs/}
|
|
87
|
+
* @remarks
|
|
88
|
+
* - baseURL - `https://api.fireworks.ai/inference/v1/`
|
|
89
|
+
* - apiKey - `FIREWORKS_API_KEY`
|
|
90
|
+
*/
|
|
91
|
+
declare const fireworks: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"accounts/fireworks/models/deepseek-r1-0528" | "accounts/fireworks/models/deepseek-v3p1" | "accounts/fireworks/models/minimax-m2" | "accounts/fireworks/models/deepseek-v3-0324" | "accounts/fireworks/models/kimi-k2-thinking" | "accounts/fireworks/models/kimi-k2-instruct" | "accounts/fireworks/models/qwen3-235b-a22b" | "accounts/fireworks/models/gpt-oss-20b" | "accounts/fireworks/models/gpt-oss-120b" | "accounts/fireworks/models/glm-4p5-air" | "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct" | "accounts/fireworks/models/glm-4p5">;
|
|
84
92
|
/**
|
|
85
93
|
* GitHub Copilot Provider
|
|
86
94
|
* @see {@link https://docs.github.com/en/copilot}
|
|
@@ -88,105 +96,113 @@ declare const groq: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_
|
|
|
88
96
|
* - baseURL - `https://api.githubcopilot.com`
|
|
89
97
|
* - apiKey - `GITHUB_TOKEN`
|
|
90
98
|
*/
|
|
91
|
-
declare const githubCopilot: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"
|
|
99
|
+
declare const githubCopilot: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"gpt-4.1" | "gemini-2.5-pro" | "claude-sonnet-4" | "gemini-2.0-flash-001" | "claude-opus-4" | "gemini-3-flash-preview" | "grok-code-fast-1" | "gpt-5.1-codex" | "claude-haiku-4.5" | "gemini-3-pro-preview" | "oswe-vscode-prime" | "claude-3.5-sonnet" | "gpt-5.1-codex-mini" | "o3-mini" | "gpt-5.1" | "gpt-5-codex" | "gpt-4o" | "o4-mini" | "claude-opus-41" | "gpt-5-mini" | "claude-3.7-sonnet" | "gpt-5.1-codex-max" | "o3" | "gpt-5" | "claude-3.7-sonnet-thought" | "claude-opus-4.5" | "gpt-5.2" | "claude-sonnet-4.5">;
|
|
92
100
|
/**
|
|
93
|
-
*
|
|
94
|
-
* @see {@link https://docs.
|
|
101
|
+
* GitHub Models Provider
|
|
102
|
+
* @see {@link https://docs.github.com/en/github-models}
|
|
95
103
|
* @remarks
|
|
96
|
-
* - baseURL - `https://
|
|
97
|
-
* - apiKey - `
|
|
104
|
+
* - baseURL - `https://models.github.ai/inference`
|
|
105
|
+
* - apiKey - `GITHUB_TOKEN`
|
|
98
106
|
*/
|
|
99
|
-
declare const
|
|
107
|
+
declare const githubModels: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-4.1" | "core42/jais-30b-chat" | "xai/grok-3" | "xai/grok-3-mini" | "cohere/cohere-command-r-08-2024" | "cohere/cohere-command-a" | "cohere/cohere-command-r-plus-08-2024" | "cohere/cohere-command-r" | "cohere/cohere-command-r-plus" | "deepseek/deepseek-r1-0528" | "deepseek/deepseek-r1" | "deepseek/deepseek-v3-0324" | "mistral-ai/mistral-medium-2505" | "mistral-ai/ministral-3b" | "mistral-ai/mistral-nemo" | "mistral-ai/mistral-large-2411" | "mistral-ai/codestral-2501" | "mistral-ai/mistral-small-2503" | "microsoft/phi-3-medium-128k-instruct" | "microsoft/phi-3-mini-4k-instruct" | "microsoft/phi-3-small-128k-instruct" | "microsoft/phi-3.5-vision-instruct" | "microsoft/phi-4" | "microsoft/phi-4-mini-reasoning" | "microsoft/phi-3-small-8k-instruct" | "microsoft/phi-3.5-mini-instruct" | "microsoft/phi-4-multimodal-instruct" | "microsoft/phi-3-mini-128k-instruct" | "microsoft/phi-3.5-moe-instruct" | "microsoft/phi-4-mini-instruct" | "microsoft/phi-3-medium-4k-instruct" | "microsoft/phi-4-reasoning" | "microsoft/mai-ds-r1" | "openai/gpt-4.1-nano" | "openai/gpt-4.1-mini" | "openai/o1-preview" | "openai/o3-mini" | "openai/gpt-4o" | "openai/o4-mini" | "openai/o1" | "openai/o1-mini" | "openai/o3" | "openai/gpt-4o-mini" | "meta/llama-3.2-11b-vision-instruct" | "meta/meta-llama-3.1-405b-instruct" | "meta/llama-4-maverick-17b-128e-instruct-fp8" | "meta/meta-llama-3-70b-instruct" | "meta/meta-llama-3.1-70b-instruct" | "meta/llama-3.3-70b-instruct" | "meta/llama-3.2-90b-vision-instruct" | "meta/meta-llama-3-8b-instruct" | "meta/llama-4-scout-17b-16e-instruct" | "meta/meta-llama-3.1-8b-instruct" | "ai21-labs/ai21-jamba-1.5-large" | "ai21-labs/ai21-jamba-1.5-mini">;
|
|
100
108
|
/**
|
|
101
|
-
*
|
|
102
|
-
* @see {@link https://
|
|
109
|
+
* Google Provider
|
|
110
|
+
* @see {@link https://ai.google.dev/gemini-api/docs/pricing}
|
|
103
111
|
* @remarks
|
|
104
|
-
* - baseURL - `https://
|
|
105
|
-
* - apiKey - `
|
|
112
|
+
* - baseURL - `https://generativelanguage.googleapis.com/v1beta/openai/`
|
|
113
|
+
* - apiKey - `GOOGLE_GENERATIVE_AI_API_KEY or GEMINI_API_KEY`
|
|
106
114
|
*/
|
|
107
|
-
declare const
|
|
115
|
+
declare const google: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gemini-2.5-pro" | "gemini-3-flash-preview" | "gemini-3-pro-preview" | "gemini-embedding-001" | "gemini-2.5-flash-image" | "gemini-2.5-flash-preview-05-20" | "gemini-flash-lite-latest" | "gemini-2.5-flash" | "gemini-flash-latest" | "gemini-2.5-pro-preview-05-06" | "gemini-2.5-flash-preview-tts" | "gemini-2.0-flash-lite" | "gemini-live-2.5-flash-preview-native-audio" | "gemini-2.0-flash" | "gemini-2.5-flash-lite" | "gemini-2.5-pro-preview-06-05" | "gemini-live-2.5-flash" | "gemini-2.5-flash-lite-preview-06-17" | "gemini-2.5-flash-image-preview" | "gemini-2.5-flash-preview-09-2025" | "gemini-2.5-flash-preview-04-17" | "gemini-2.5-pro-preview-tts" | "gemini-1.5-flash" | "gemini-1.5-flash-8b" | "gemini-2.5-flash-lite-preview-09-2025" | "gemini-1.5-pro">;
|
|
108
116
|
/**
|
|
109
|
-
*
|
|
110
|
-
* @see {@link https://
|
|
117
|
+
* Groq Provider
|
|
118
|
+
* @see {@link https://console.groq.com/docs/models}
|
|
111
119
|
* @remarks
|
|
112
|
-
* - baseURL - `https://api.
|
|
113
|
-
* - apiKey - `
|
|
120
|
+
* - baseURL - `https://api.groq.com/openai/v1/`
|
|
121
|
+
* - apiKey - `GROQ_API_KEY`
|
|
114
122
|
*/
|
|
115
|
-
declare const
|
|
123
|
+
declare const groq: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"deepseek-r1-distill-llama-70b" | "openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "llama-3.1-8b-instant" | "mistral-saba-24b" | "llama3-8b-8192" | "qwen-qwq-32b" | "llama3-70b-8192" | "llama-guard-3-8b" | "gemma2-9b-it" | "llama-3.3-70b-versatile" | "moonshotai/kimi-k2-instruct-0905" | "moonshotai/kimi-k2-instruct" | "qwen/qwen3-32b" | "meta-llama/llama-4-scout-17b-16e-instruct" | "meta-llama/llama-4-maverick-17b-128e-instruct" | "meta-llama/llama-guard-4-12b">;
|
|
116
124
|
/**
|
|
117
|
-
*
|
|
118
|
-
* @see {@link https://
|
|
125
|
+
* Helicone Provider
|
|
126
|
+
* @see {@link https://helicone.ai/models}
|
|
119
127
|
* @remarks
|
|
120
|
-
* - baseURL - `https://
|
|
121
|
-
* - apiKey - `
|
|
128
|
+
* - baseURL - `https://ai-gateway.helicone.ai/v1`
|
|
129
|
+
* - apiKey - `HELICONE_API_KEY`
|
|
122
130
|
*/
|
|
123
|
-
declare const
|
|
131
|
+
declare const helicone: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-next-80b-a3b-instruct" | "qwen3-coder-30b-a3b-instruct" | "qwen3-32b" | "deepseek-v3" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b" | "kimi-k2-thinking" | "gpt-4.1" | "gemini-2.5-pro" | "claude-sonnet-4" | "deepseek-reasoner" | "claude-opus-4" | "grok-code-fast-1" | "gpt-5.1-codex" | "gemini-3-pro-preview" | "gpt-5.1-codex-mini" | "o3-mini" | "gpt-5.1" | "gpt-5-codex" | "gpt-4o" | "o4-mini" | "gpt-5-mini" | "claude-3.7-sonnet" | "o3" | "gpt-5" | "gemini-2.5-flash" | "gemini-2.5-flash-lite" | "llama-3.1-8b-instant" | "gemma2-9b-it" | "llama-3.3-70b-versatile" | "gpt-4.1-nano" | "grok-4-fast-non-reasoning" | "qwen3-coder" | "grok-4-fast-reasoning" | "claude-opus-4-1" | "grok-4" | "llama-4-maverick" | "llama-prompt-guard-2-86m" | "grok-4-1-fast-reasoning" | "claude-4.5-haiku" | "llama-3.1-8b-instruct-turbo" | "gpt-4.1-mini-2025-04-14" | "llama-guard-4" | "llama-3.1-8b-instruct" | "gpt-4.1-mini" | "deepseek-v3.1-terminus" | "llama-prompt-guard-2-22m" | "claude-3.5-sonnet-v2" | "sonar-deep-research" | "claude-sonnet-4-5-20250929" | "grok-3" | "mistral-small" | "kimi-k2-0711" | "chatgpt-4o-latest" | "kimi-k2-0905" | "sonar-reasoning" | "llama-3.3-70b-instruct" | "claude-4.5-sonnet" | "codex-mini-latest" | "gpt-5-nano" | "deepseek-tng-r1t2-chimera" | "claude-4.5-opus" | "sonar" | "glm-4.6" | "qwen3-235b-a22b-thinking" | "hermes-2-pro-llama-3-8b" | "o1" | "grok-3-mini" | "sonar-pro" | "o1-mini" | "claude-3-haiku-20240307" | "o3-pro" | "qwen2.5-coder-7b-fast" | "gemma-3-12b-it" | "mistral-nemo" | "gpt-oss-20b" | "claude-3.5-haiku" | "gpt-5-chat-latest" | "gpt-4o-mini" | "sonar-reasoning-pro" | "qwen3-vl-235b-a22b-instruct" | "qwen3-30b-a3b" | "deepseek-v3.2" | "grok-4-1-fast-non-reasoning" | "gpt-5-pro" | "mistral-large-2411" | "claude-opus-4-1-20250805" | "ernie-4.5-21b-a3b-thinking" | "gpt-5.1-chat-latest" | "claude-haiku-4-5-20251001" | "llama-4-scout">;
|
|
124
132
|
/**
|
|
125
|
-
*
|
|
126
|
-
* @see {@link https://docs
|
|
133
|
+
* Hugging Face Provider
|
|
134
|
+
* @see {@link https://huggingface.co/docs/inference-providers}
|
|
127
135
|
* @remarks
|
|
128
|
-
* - baseURL - `https://
|
|
129
|
-
* - apiKey - `
|
|
136
|
+
* - baseURL - `https://router.huggingface.co/v1`
|
|
137
|
+
* - apiKey - `HF_TOKEN`
|
|
130
138
|
*/
|
|
131
|
-
declare const
|
|
139
|
+
declare const huggingface: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "zai-org/GLM-4.6" | "MiniMaxAI/MiniMax-M2" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "zai-org/GLM-4.5" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Embedding-8B" | "Qwen/Qwen3-Embedding-4B" | "Qwen/Qwen3-Next-80B-A3B-Thinking" | "deepseek-ai/Deepseek-V3-0324">;
|
|
132
140
|
/**
|
|
133
|
-
*
|
|
134
|
-
* @see {@link https://
|
|
141
|
+
* iFlow Provider
|
|
142
|
+
* @see {@link https://platform.iflow.cn/en/docs}
|
|
135
143
|
* @remarks
|
|
136
|
-
* - baseURL - `https://
|
|
137
|
-
* - apiKey - `
|
|
144
|
+
* - baseURL - `https://apis.iflow.cn/v1`
|
|
145
|
+
* - apiKey - `IFLOW_API_KEY`
|
|
138
146
|
*/
|
|
139
|
-
declare const
|
|
147
|
+
declare const iflowcn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-vl-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-32b" | "deepseek-v3" | "deepseek-r1" | "kimi-k2-thinking" | "qwen3-coder" | "kimi-k2-0905" | "glm-4.6" | "deepseek-v3.2" | "kimi-k2" | "deepseek-v3.1" | "minimax-m2" | "qwen3-235b" | "deepseek-v3.2-chat" | "qwen3-235b-a22b-thinking-2507" | "tstars2.0" | "qwen3-235b-a22b-instruct" | "qwen3-max-preview">;
|
|
140
148
|
/**
|
|
141
|
-
*
|
|
142
|
-
* @see {@link https://
|
|
149
|
+
* Inception Provider
|
|
150
|
+
* @see {@link https://platform.inceptionlabs.ai/docs}
|
|
143
151
|
* @remarks
|
|
144
|
-
* - baseURL - `https://api.
|
|
145
|
-
* - apiKey - `
|
|
152
|
+
* - baseURL - `https://api.inceptionlabs.ai/v1/`
|
|
153
|
+
* - apiKey - `INCEPTION_API_KEY`
|
|
146
154
|
*/
|
|
147
|
-
declare const
|
|
155
|
+
declare const inception: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"mercury-coder" | "mercury">;
|
|
148
156
|
/**
|
|
149
|
-
*
|
|
150
|
-
* @see {@link https://
|
|
157
|
+
* Inference Provider
|
|
158
|
+
* @see {@link https://inference.net/models}
|
|
151
159
|
* @remarks
|
|
152
|
-
* - baseURL - `https://
|
|
153
|
-
* - apiKey - `
|
|
160
|
+
* - baseURL - `https://inference.net/v1`
|
|
161
|
+
* - apiKey - `INFERENCE_API_KEY`
|
|
154
162
|
*/
|
|
155
|
-
declare const
|
|
163
|
+
declare const inference: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"meta/llama-3.2-11b-vision-instruct" | "mistral/mistral-nemo-12b-instruct" | "google/gemma-3" | "osmosis/osmosis-structure-0.6b" | "qwen/qwen3-embedding-4b" | "qwen/qwen-2.5-7b-vision-instruct" | "meta/llama-3.1-8b-instruct" | "meta/llama-3.2-3b-instruct" | "meta/llama-3.2-1b-instruct">;
|
|
156
164
|
/**
|
|
157
|
-
*
|
|
158
|
-
* @see {@link https://
|
|
165
|
+
* IO.NET Provider
|
|
166
|
+
* @see {@link https://io.net/docs/guides/intelligence/io-intelligence}
|
|
159
167
|
* @remarks
|
|
160
|
-
* - baseURL - `https://
|
|
161
|
-
* - apiKey - `
|
|
168
|
+
* - baseURL - `https://api.intelligence.io.solutions/api/v1`
|
|
169
|
+
* - apiKey - `IOINTELLIGENCE_API_KEY`
|
|
162
170
|
*/
|
|
163
|
-
declare const
|
|
171
|
+
declare const ioNet: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "moonshotai/Kimi-K2-Thinking" | "zai-org/GLM-4.6" | "openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "Qwen/Qwen2.5-VL-32B-Instruct" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "deepseek-ai/DeepSeek-R1-0528" | "mistralai/Devstral-Small-2505" | "mistralai/Mistral-Nemo-Instruct-2407" | "mistralai/Magistral-Small-2506" | "mistralai/Mistral-Large-Instruct-2411" | "meta-llama/Llama-3.3-70B-Instruct" | "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" | "meta-llama/Llama-3.2-90B-Vision-Instruct" | "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar">;
|
|
164
172
|
/**
|
|
165
|
-
*
|
|
166
|
-
* @see {@link https://
|
|
173
|
+
* Kimi For Coding Provider
|
|
174
|
+
* @see {@link https://www.kimi.com/coding/docs/en/third-party-agents.html}
|
|
167
175
|
* @remarks
|
|
168
|
-
* - baseURL - `https://
|
|
169
|
-
* - apiKey - `
|
|
176
|
+
* - baseURL - `https://api.kimi.com/coding/v1`
|
|
177
|
+
* - apiKey - `KIMI_API_KEY`
|
|
170
178
|
*/
|
|
171
|
-
declare const
|
|
179
|
+
declare const kimiForCoding: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"kimi-k2-thinking">;
|
|
172
180
|
/**
|
|
173
|
-
*
|
|
174
|
-
* @see {@link https://
|
|
181
|
+
* Llama Provider
|
|
182
|
+
* @see {@link https://llama.developer.meta.com/docs/models}
|
|
175
183
|
* @remarks
|
|
176
|
-
* - baseURL - `https://
|
|
177
|
-
* - apiKey - `
|
|
184
|
+
* - baseURL - `https://api.llama.com/compat/v1/`
|
|
185
|
+
* - apiKey - `LLAMA_API_KEY`
|
|
178
186
|
*/
|
|
179
|
-
declare const
|
|
187
|
+
declare const llama: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"llama-3.3-70b-instruct" | "llama-3.3-8b-instruct" | "llama-4-maverick-17b-128e-instruct-fp8" | "llama-4-scout-17b-16e-instruct-fp8" | "groq-llama-4-maverick-17b-128e-instruct" | "cerebras-llama-4-scout-17b-16e-instruct" | "cerebras-llama-4-maverick-17b-128e-instruct">;
|
|
180
188
|
/**
|
|
181
|
-
*
|
|
182
|
-
* @see {@link https://
|
|
189
|
+
* LMStudio Provider
|
|
190
|
+
* @see {@link https://lmstudio.ai/models}
|
|
183
191
|
* @remarks
|
|
184
|
-
* - baseURL - `
|
|
185
|
-
* - apiKey - `
|
|
192
|
+
* - baseURL - `http://127.0.0.1:1234/v1`
|
|
193
|
+
* - apiKey - `LMSTUDIO_API_KEY`
|
|
194
|
+
*/
|
|
195
|
+
declare const lmstudio: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-20b" | "qwen/qwen3-30b-a3b-2507" | "qwen/qwen3-coder-30b">;
|
|
196
|
+
/**
|
|
197
|
+
* LucidQuery AI Provider
|
|
198
|
+
* @see {@link https://lucidquery.com/api/docs}
|
|
199
|
+
* @remarks
|
|
200
|
+
* - baseURL - `https://lucidquery.com/api/v1`
|
|
201
|
+
* - apiKey - `LUCIDQUERY_API_KEY`
|
|
186
202
|
*/
|
|
187
|
-
declare const
|
|
203
|
+
declare const lucidquery: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"lucidquery-nexus-coder" | "lucidnova-rf1-100b">;
|
|
188
204
|
/**
|
|
189
|
-
*
|
|
205
|
+
* MiniMax Provider
|
|
190
206
|
* @see {@link https://platform.minimax.io/docs/guides/quickstart}
|
|
191
207
|
* @remarks
|
|
192
208
|
* - baseURL - `https://api.minimax.io/v1/`
|
|
@@ -194,29 +210,77 @@ declare const fastrouter: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_s
|
|
|
194
210
|
*/
|
|
195
211
|
declare const minimax: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"MiniMax-M2">;
|
|
196
212
|
/**
|
|
197
|
-
*
|
|
198
|
-
* @see {@link https://
|
|
213
|
+
* MiniMax (China) Provider
|
|
214
|
+
* @see {@link https://platform.minimaxi.com/docs/guides/quickstart}
|
|
199
215
|
* @remarks
|
|
200
|
-
* - baseURL - `https://
|
|
201
|
-
* - apiKey - `
|
|
216
|
+
* - baseURL - `https://api.minimaxi.com/v1/`
|
|
217
|
+
* - apiKey - `MINIMAX_API_KEY`
|
|
202
218
|
*/
|
|
203
|
-
declare const
|
|
219
|
+
declare const minimaxCn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"MiniMax-M2">;
|
|
204
220
|
/**
|
|
205
|
-
*
|
|
206
|
-
* @see {@link https://
|
|
221
|
+
* Mistral Provider
|
|
222
|
+
* @see {@link https://docs.mistral.ai/getting-started/models/}
|
|
207
223
|
* @remarks
|
|
208
|
-
* - baseURL - `https://api.
|
|
209
|
-
* - apiKey - `
|
|
224
|
+
* - baseURL - `https://api.mistral.ai/v1/`
|
|
225
|
+
* - apiKey - `MISTRAL_API_KEY`
|
|
210
226
|
*/
|
|
211
|
-
declare const
|
|
227
|
+
declare const mistral: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"devstral-2512" | "mistral-nemo" | "mistral-large-2411" | "devstral-medium-2507" | "mistral-large-2512" | "open-mixtral-8x22b" | "ministral-8b-latest" | "pixtral-large-latest" | "mistral-small-2506" | "ministral-3b-latest" | "pixtral-12b" | "mistral-medium-2505" | "labs-devstral-small-2512" | "devstral-medium-latest" | "devstral-small-2505" | "mistral-medium-2508" | "mistral-embed" | "mistral-small-latest" | "magistral-small" | "devstral-small-2507" | "codestral-latest" | "open-mixtral-8x7b" | "open-mistral-7b" | "mistral-large-latest" | "mistral-medium-latest" | "magistral-medium-latest">;
|
|
212
228
|
/**
|
|
213
|
-
*
|
|
214
|
-
* @see {@link https://
|
|
229
|
+
* ModelScope Provider
|
|
230
|
+
* @see {@link https://modelscope.cn/docs/model-service/API-Inference/intro}
|
|
215
231
|
* @remarks
|
|
216
|
-
* - baseURL - `https://api
|
|
217
|
-
* - apiKey - `
|
|
232
|
+
* - baseURL - `https://api-inference.modelscope.cn/v1`
|
|
233
|
+
* - apiKey - `MODELSCOPE_API_KEY`
|
|
234
|
+
*/
|
|
235
|
+
declare const modelscope: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-30B-A3B-Instruct" | "Qwen/Qwen3-30B-A3B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "ZhipuAI/GLM-4.5" | "ZhipuAI/GLM-4.6" | "Qwen/Qwen3-30B-A3B-Thinking-2507">;
|
|
236
|
+
/**
|
|
237
|
+
* Moonshot AI Provider
|
|
238
|
+
* @see {@link https://platform.moonshot.ai/docs/api/chat}
|
|
239
|
+
* @remarks
|
|
240
|
+
* - baseURL - `https://api.moonshot.ai/v1`
|
|
241
|
+
* - apiKey - `MOONSHOT_API_KEY`
|
|
242
|
+
*/
|
|
243
|
+
declare const moonshotai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"kimi-k2-thinking" | "kimi-k2-thinking-turbo" | "kimi-k2-turbo-preview" | "kimi-k2-0711-preview" | "kimi-k2-0905-preview">;
|
|
244
|
+
/**
|
|
245
|
+
* Moonshot AI (China) Provider
|
|
246
|
+
* @see {@link https://platform.moonshot.cn/docs/api/chat}
|
|
247
|
+
* @remarks
|
|
248
|
+
* - baseURL - `https://api.moonshot.cn/v1`
|
|
249
|
+
* - apiKey - `MOONSHOT_API_KEY`
|
|
218
250
|
*/
|
|
219
|
-
declare const
|
|
251
|
+
declare const moonshotaiCn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"kimi-k2-thinking" | "kimi-k2-thinking-turbo" | "kimi-k2-turbo-preview" | "kimi-k2-0711-preview" | "kimi-k2-0905-preview">;
|
|
252
|
+
/**
|
|
253
|
+
* Morph Provider
|
|
254
|
+
* @see {@link https://docs.morphllm.com/api-reference/introduction}
|
|
255
|
+
* @remarks
|
|
256
|
+
* - baseURL - `https://api.morphllm.com/v1`
|
|
257
|
+
* - apiKey - `MORPH_API_KEY`
|
|
258
|
+
*/
|
|
259
|
+
declare const morph: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"morph-v3-large" | "auto" | "morph-v3-fast">;
|
|
260
|
+
/**
|
|
261
|
+
* Nebius Token Factory Provider
|
|
262
|
+
* @see {@link https://docs.tokenfactory.nebius.com/}
|
|
263
|
+
* @remarks
|
|
264
|
+
* - baseURL - `https://api.tokenfactory.nebius.com/v1`
|
|
265
|
+
* - apiKey - `NEBIUS_API_KEY`
|
|
266
|
+
*/
|
|
267
|
+
declare const nebius: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-20b" | "openai/gpt-oss-120b" | "moonshotai/kimi-k2-instruct" | "NousResearch/hermes-4-70b" | "NousResearch/hermes-4-405b" | "nvidia/llama-3_1-nemotron-ultra-253b-v1" | "qwen/qwen3-235b-a22b-instruct-2507" | "qwen/qwen3-235b-a22b-thinking-2507" | "qwen/qwen3-coder-480b-a35b-instruct" | "meta-llama/llama-3_1-405b-instruct" | "meta-llama/llama-3.3-70b-instruct-fast" | "meta-llama/llama-3.3-70b-instruct-base" | "zai-org/glm-4.5" | "zai-org/glm-4.5-air" | "deepseek-ai/deepseek-v3">;
|
|
268
|
+
/**
|
|
269
|
+
* Nvidia Provider
|
|
270
|
+
* @see {@link https://docs.api.nvidia.com/nim/}
|
|
271
|
+
* @remarks
|
|
272
|
+
* - baseURL - `https://integrate.api.nvidia.com/v1`
|
|
273
|
+
* - apiKey - `NVIDIA_API_KEY`
|
|
274
|
+
*/
|
|
275
|
+
declare const nvidia: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "microsoft/phi-4-mini-instruct" | "moonshotai/kimi-k2-instruct-0905" | "moonshotai/kimi-k2-instruct" | "qwen/qwen3-coder-480b-a35b-instruct" | "moonshotai/kimi-k2-thinking" | "nvidia/nvidia-nemotron-nano-9b-v2" | "nvidia/cosmos-nemotron-34b" | "nvidia/llama-embed-nemotron-8b" | "nvidia/parakeet-tdt-0.6b-v2" | "nvidia/nemoretriever-ocr-v1" | "nvidia/llama-3.1-nemotron-ultra-253b-v1" | "minimaxai/minimax-m2" | "google/gemma-3-27b-it" | "openai/whisper-large-v3" | "qwen/qwen3-next-80b-a3b-instruct" | "qwen/qwen3-235b-a22b" | "qwen/qwen3-next-80b-a3b-thinking" | "mistralai/devstral-2-123b-instruct-2512" | "mistralai/mistral-large-3-675b-instruct-2512" | "mistralai/ministral-14b-instruct-2512" | "deepseek-ai/deepseek-v3.1-terminus" | "deepseek-ai/deepseek-v3.1" | "black-forest-labs/flux.1-dev">;
|
|
276
|
+
/**
|
|
277
|
+
* Ollama Cloud Provider
|
|
278
|
+
* @see {@link https://docs.ollama.com/cloud}
|
|
279
|
+
* @remarks
|
|
280
|
+
* - baseURL - `https://ollama.com/v1`
|
|
281
|
+
* - apiKey - `OLLAMA_API_KEY`
|
|
282
|
+
*/
|
|
283
|
+
declare const ollamaCloud: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"kimi-k2-thinking:cloud" | "qwen3-vl-235b-cloud" | "qwen3-coder:480b-cloud" | "gpt-oss:120b-cloud" | "deepseek-v3.1:671b-cloud" | "glm-4.6:cloud" | "cogito-2.1:671b-cloud" | "gpt-oss:20b-cloud" | "qwen3-vl-235b-instruct-cloud" | "kimi-k2:1t-cloud" | "minimax-m2:cloud" | "gemini-3-pro-preview:latest">;
|
|
220
284
|
/**
|
|
221
285
|
* OpenAI Provider
|
|
222
286
|
* @see {@link https://platform.openai.com/docs/models}
|
|
@@ -224,15 +288,23 @@ declare const wandb: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared
|
|
|
224
288
|
* - baseURL - `https://api.openai.com/v1/`
|
|
225
289
|
* - apiKey - `OPENAI_API_KEY`
|
|
226
290
|
*/
|
|
227
|
-
declare const openai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gpt-5.1-codex" | "gpt-5.1-codex-mini" | "o3-mini" | "gpt-5.1" | "gpt-5-codex" | "gpt-4o" | "
|
|
291
|
+
declare const openai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gpt-4.1" | "gpt-5.1-codex" | "gpt-5.1-codex-mini" | "o3-mini" | "gpt-5.1" | "gpt-5-codex" | "gpt-4o" | "o4-mini" | "gpt-5-mini" | "gpt-5.1-codex-max" | "o3" | "gpt-5" | "gpt-5.2" | "gpt-4.1-nano" | "gpt-4.1-mini" | "codex-mini-latest" | "gpt-5-nano" | "o1" | "o1-mini" | "o3-pro" | "gpt-5-chat-latest" | "gpt-4o-mini" | "gpt-5-pro" | "gpt-5.1-chat-latest" | "text-embedding-3-small" | "gpt-4" | "o1-pro" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "o3-deep-research" | "gpt-3.5-turbo" | "gpt-5.2-pro" | "text-embedding-3-large" | "gpt-4-turbo" | "o1-preview" | "gpt-5.2-chat-latest" | "text-embedding-ada-002" | "gpt-4o-2024-11-20" | "o4-mini-deep-research"> & _xsai_ext_shared_providers.ImageProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string>;
|
|
228
292
|
/**
|
|
229
|
-
*
|
|
230
|
-
* @see {@link https://
|
|
293
|
+
* OpenCode Zen Provider
|
|
294
|
+
* @see {@link https://opencode.ai/docs/zen}
|
|
231
295
|
* @remarks
|
|
232
|
-
* - baseURL - `https://
|
|
233
|
-
* - apiKey - `
|
|
296
|
+
* - baseURL - `https://opencode.ai/zen/v1`
|
|
297
|
+
* - apiKey - `OPENCODE_API_KEY`
|
|
234
298
|
*/
|
|
235
|
-
declare const
|
|
299
|
+
declare const opencode: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"kimi-k2-thinking" | "claude-sonnet-4" | "gpt-5.1-codex" | "gpt-5.1" | "gpt-5-codex" | "gpt-5.1-codex-max" | "gpt-5" | "gpt-5.2" | "qwen3-coder" | "claude-opus-4-1" | "gpt-5-nano" | "glm-4.6" | "kimi-k2" | "claude-haiku-4-5" | "claude-opus-4-5" | "gemini-3-pro" | "claude-sonnet-4-5" | "alpha-gd4" | "big-pickle" | "claude-3-5-haiku" | "grok-code" | "gemini-3-flash" | "alpha-doubao-seed-code" | "minimax-m2.1">;
|
|
300
|
+
/**
|
|
301
|
+
* OVHcloud AI Endpoints Provider
|
|
302
|
+
* @see {@link https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog//}
|
|
303
|
+
* @remarks
|
|
304
|
+
* - baseURL - `https://oai.endpoints.kepler.ai.cloud.ovh.net/v1`
|
|
305
|
+
* - apiKey - `OVHCLOUD_API_KEY`
|
|
306
|
+
*/
|
|
307
|
+
declare const ovhcloud: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-30b-a3b-instruct" | "qwen3-32b" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b" | "llama-3.1-8b-instruct" | "gpt-oss-20b" | "mixtral-8x7b-instruct-v0.1" | "mistral-7b-instruct-v0.3" | "qwen2.5-vl-72b-instruct" | "mistral-nemo-instruct-2407" | "mistral-small-3.2-24b-instruct-2506" | "qwen2.5-coder-32b-instruct" | "llava-next-mistral-7b" | "meta-llama-3_1-70b-instruct" | "meta-llama-3_3-70b-instruct">;
|
|
236
308
|
/**
|
|
237
309
|
* Perplexity Provider
|
|
238
310
|
* @see {@link https://docs.perplexity.ai}
|
|
@@ -242,53 +314,45 @@ declare const zhipuaiCodingPlan: _xsai_ext_shared_providers.ModelProvider & _xsa
|
|
|
242
314
|
*/
|
|
243
315
|
declare const perplexity: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"sonar-reasoning" | "sonar" | "sonar-pro" | "sonar-reasoning-pro">;
|
|
244
316
|
/**
|
|
245
|
-
*
|
|
246
|
-
* @see {@link https://
|
|
247
|
-
* @remarks
|
|
248
|
-
* - baseURL - `https://zenmux.ai/api/v1`
|
|
249
|
-
* - apiKey - `ZENMUX_API_KEY`
|
|
250
|
-
*/
|
|
251
|
-
declare const zenmux: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"x-ai/grok-4" | "google/gemini-2.5-pro" | "openai/gpt-5" | "anthropic/claude-opus-4.1" | "moonshotai/kimi-k2-thinking-turbo" | "moonshotai/kimi-k2-0905" | "moonshotai/kimi-k2-thinking" | "x-ai/grok-4-fast-non-reasoning" | "x-ai/grok-code-fast-1" | "x-ai/grok-4-fast" | "deepseek/deepseek-chat" | "minimax/minimax-m2" | "openai/gpt-5-codex" | "inclusionai/ring-1t" | "inclusionai/lint-1t" | "z-ai/glm-4.5-air" | "z-ai/glm-4.6" | "qwen/qwen3-coder-plus" | "kuaishou/kat-coder-pro-v1" | "anthropic/claude-haiku-4.5" | "anthropic/claude-sonnet-4.5">;
|
|
252
|
-
/**
|
|
253
|
-
* OVHcloud AI Endpoints Provider
|
|
254
|
-
* @see {@link https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog//}
|
|
317
|
+
* Poe Provider
|
|
318
|
+
* @see {@link https://creator.poe.com/docs/external-applications/openai-compatible-api}
|
|
255
319
|
* @remarks
|
|
256
|
-
* - baseURL - `https://
|
|
257
|
-
* - apiKey - `
|
|
320
|
+
* - baseURL - `https://api.poe.com/v1`
|
|
321
|
+
* - apiKey - `POE_API_KEY`
|
|
258
322
|
*/
|
|
259
|
-
declare const
|
|
323
|
+
declare const poe: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-4.1" | "openai/gpt-5-mini" | "openai/gpt-5" | "anthropic/claude-opus-4.1" | "anthropic/claude-sonnet-4" | "xai/grok-3" | "xai/grok-3-mini" | "openai/gpt-4.1-nano" | "openai/gpt-4.1-mini" | "openai/o3-mini" | "openai/gpt-4o" | "openai/o4-mini" | "openai/o1" | "openai/o3" | "openai/gpt-4o-mini" | "xai/grok-4-fast-non-reasoning" | "xai/grok-4-fast-reasoning" | "xai/grok-4.1-fast-reasoning" | "xai/grok-4" | "xai/grok-code-fast-1" | "xai/grok-2" | "xai/grok-4.1-fast-non-reasoning" | "ideogramai/ideogram" | "ideogramai/ideogram-v2a" | "ideogramai/ideogram-v2a-turbo" | "ideogramai/ideogram-v2" | "runwayml/runway" | "runwayml/runway-gen-4-turbo" | "poetools/claude-code" | "elevenlabs/elevenlabs-v3" | "elevenlabs/elevenlabs-music" | "elevenlabs/elevenlabs-v2.5-turbo" | "google/gemini-deep-research" | "google/nano-banana" | "google/imagen-4" | "google/imagen-3" | "google/imagen-4-ultra" | "google/gemini-2.0-flash-lite" | "google/gemini-3-pro" | "google/veo-3.1" | "google/imagen-3-fast" | "google/lyria" | "google/gemini-2.0-flash" | "google/gemini-2.5-flash-lite" | "google/veo-3" | "google/veo-3-fast" | "google/imagen-4-fast" | "google/veo-2" | "google/nano-banana-pro" | "google/veo-3.1-fast" | "openai/gpt-5.2-instant" | "openai/sora-2" | "openai/o1-pro" | "openai/gpt-5.1-codex" | "openai/gpt-3.5-turbo-raw" | "openai/gpt-4-classic" | "openai/gpt-5-chat" | "openai/o3-deep-research" | "openai/gpt-4o-search" | "openai/gpt-image-1-mini" | "openai/gpt-3.5-turbo" | "openai/gpt-5.2-pro" | "openai/o3-mini-high" | "openai/chatgpt-4o-latest" | "openai/gpt-4-turbo" | "openai/gpt-5.1-codex-mini" | "openai/gpt-5.1-instant" | "openai/gpt-5.1" | "openai/gpt-5-codex" | "openai/gpt-4o-aug" | "openai/o3-pro" | "openai/gpt-image-1" | "openai/gpt-5.1-codex-max" | "openai/gpt-3.5-turbo-instruct" | "openai/o4-mini-deep-research" | "openai/gpt-4-classic-0314" | "openai/dall-e-3" | "openai/sora-2-pro" | "openai/gpt-5-pro" | "openai/gpt-5.2" | "openai/gpt-4o-mini-search" | "stabilityai/stablediffusionxl" | "topazlabs-co/topazlabs" | "lumalabs/ray2" | "lumalabs/dream-machine" | "anthropic/claude-opus-3" | "anthropic/claude-opus-4" | "anthropic/claude-sonnet-3.7-reasoning" | "anthropic/claude-opus-4-search" | "anthropic/claude-sonnet-3.7" | "anthropic/claude-haiku-3.5-search" | "anthropic/claude-haiku-4.5" | "anthropic/claude-sonnet-4-reasoning" | "anthropic/claude-haiku-3" | "anthropic/claude-sonnet-3.7-search" | "anthropic/claude-opus-4-reasoning" | "anthropic/claude-sonnet-3.5" | "anthropic/claude-opus-4.5" | "anthropic/claude-haiku-3.5" | "anthropic/claude-sonnet-3.5-june" | "anthropic/claude-sonnet-4.5" | "anthropic/claude-sonnet-4-search" | "trytako/tako" | "novita/kimi-k2-thinking" | "novita/glm-4.6">;
|
|
260
324
|
/**
|
|
261
|
-
*
|
|
262
|
-
* @see {@link https://
|
|
325
|
+
* Requesty Provider
|
|
326
|
+
* @see {@link https://requesty.ai/solution/llm-routing/models}
|
|
263
327
|
* @remarks
|
|
264
|
-
* - baseURL - `https://
|
|
265
|
-
* - apiKey - `
|
|
328
|
+
* - baseURL - `https://router.requesty.ai/v1`
|
|
329
|
+
* - apiKey - `REQUESTY_API_KEY`
|
|
266
330
|
*/
|
|
267
|
-
declare const
|
|
331
|
+
declare const requesty: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-4.1" | "openai/gpt-5-mini" | "openai/gpt-5" | "anthropic/claude-sonnet-4" | "openai/gpt-4.1-mini" | "openai/o4-mini" | "openai/gpt-4o-mini" | "xai/grok-4" | "anthropic/claude-opus-4" | "xai/grok-4-fast" | "google/gemini-3-flash-preview" | "google/gemini-3-pro-preview" | "anthropic/claude-opus-4-1" | "anthropic/claude-haiku-4-5" | "anthropic/claude-opus-4-5" | "anthropic/claude-sonnet-4-5" | "anthropic/claude-3-7-sonnet">;
|
|
268
332
|
/**
|
|
269
|
-
*
|
|
270
|
-
* @see {@link https://
|
|
333
|
+
* Scaleway Provider
|
|
334
|
+
* @see {@link https://www.scaleway.com/en/docs/generative-apis/}
|
|
271
335
|
* @remarks
|
|
272
|
-
* - baseURL - `https://api.
|
|
273
|
-
* - apiKey - `
|
|
336
|
+
* - baseURL - `https://api.scaleway.ai/v1`
|
|
337
|
+
* - apiKey - `SCALEWAY_API_KEY`
|
|
274
338
|
*/
|
|
275
|
-
declare const
|
|
339
|
+
declare const scaleway: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-30b-a3b-instruct" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b" | "llama-3.1-8b-instruct" | "llama-3.3-70b-instruct" | "mistral-nemo-instruct-2407" | "mistral-small-3.2-24b-instruct-2506" | "qwen3-235b-a22b-instruct-2507" | "pixtral-12b-2409" | "whisper-large-v3" | "voxtral-small-24b-2507" | "bge-multilingual-gemma2" | "gemma-3-27b-it">;
|
|
276
340
|
/**
|
|
277
|
-
*
|
|
278
|
-
* @see {@link https://
|
|
341
|
+
* SiliconFlow Provider
|
|
342
|
+
* @see {@link https://cloud.siliconflow.com/models}
|
|
279
343
|
* @remarks
|
|
280
|
-
* - baseURL - `https://api.
|
|
281
|
-
* - apiKey - `
|
|
344
|
+
* - baseURL - `https://api.siliconflow.com/v1`
|
|
345
|
+
* - apiKey - `SILICONFLOW_API_KEY`
|
|
282
346
|
*/
|
|
283
|
-
declare const
|
|
347
|
+
declare const siliconflow: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<"qwen-qwq-32b" | "deepseek-ai-deepseek-r1-distill-qwen-7b" | "z-ai-glm-4.5-air" | "qwen-qwen2.5-72b-instruct-128k" | "deepseek-ai-deepseek-vl2" | "moonshotai-kimi-dev-72b" | "qwen-qwen2.5-coder-32b-instruct" | "qwen-qwen3-omni-30b-a3b-captioner" | "qwen-qwen3-vl-235b-a22b-thinking" | "thudm-glm-z1-9b-0414" | "qwen-qwen3-vl-30b-a3b-thinking" | "deepseek-ai-deepseek-v3.2-exp" | "qwen-qwen2.5-vl-32b-instruct" | "qwen-qwen3-235b-a22b-thinking-2507" | "qwen-qwen3-vl-32b-instruct" | "inclusionai-ling-flash-2.0" | "moonshotai-kimi-k2-instruct" | "inclusionai-ling-mini-2.0" | "qwen-qwen3-coder-480b-a35b-instruct" | "qwen-qwen3-omni-30b-a3b-instruct" | "moonshotai-kimi-k2-instruct-0905" | "qwen-qwen3-30b-a3b-thinking-2507" | "qwen-qwen3-14b" | "deepseek-ai-deepseek-r1" | "deepseek-ai-deepseek-v3.1" | "z-ai-glm-4.5" | "qwen-qwen3-30b-a3b-instruct-2507" | "zai-org-glm-4.5v" | "inclusionai-ring-flash-2.0" | "thudm-glm-z1-32b-0414" | "qwen-qwen2.5-vl-72b-instruct" | "qwen-qwen3-vl-32b-thinking" | "tencent-hunyuan-mt-7b" | "qwen-qwen3-30b-a3b" | "openai-gpt-oss-120b" | "minimaxai-minimax-m1-80k" | "deepseek-ai-deepseek-v3.1-terminus" | "zai-org-glm-4.5-air" | "thudm-glm-4-9b-0414" | "qwen-qwen3-coder-30b-a3b-instruct" | "stepfun-ai-step3" | "thudm-glm-4.1v-9b-thinking" | "qwen-qwen3-next-80b-a3b-thinking" | "qwen-qwen3-vl-235b-a22b-instruct" | "zai-org-glm-4.5" | "deepseek-ai-deepseek-r1-distill-qwen-14b" | "deepseek-ai-deepseek-v3" | "openai-gpt-oss-20b" | "qwen-qwen2.5-7b-instruct" | "qwen-qwen2.5-32b-instruct" | "minimaxai-minimax-m2" | "bytedance-seed-seed-oss-36b-instruct" | "qwen-qwen2.5-vl-7b-instruct" | "qwen-qwen3-vl-8b-thinking" | "qwen-qwen3-vl-8b-instruct" | "nex-agi-deepseek-v3.1-nex-n1" | "qwen-qwen3-8b" | "qwen-qwen2.5-72b-instruct" | "qwen-qwen3-235b-a22b" | "meta-llama-meta-llama-3.1-8b-instruct" | "qwen-qwen3-235b-a22b-instruct-2507" | "baidu-ernie-4.5-300b-a47b" | "qwen-qwen3-omni-30b-a3b-thinking" | "zai-org-glm-4.6" | "qwen-qwen3-32b" | "tencent-hunyuan-a13b-instruct" | "thudm-glm-4-32b-0414" | "deepseek-ai-deepseek-r1-distill-qwen-32b" | "qwen-qwen3-next-80b-a3b-instruct" | "qwen-qwen3-vl-30b-a3b-instruct" | "moonshotai-kimi-k2-thinking" | "qwen-qwen2.5-14b-instruct">;
|
|
284
348
|
/**
|
|
285
|
-
*
|
|
286
|
-
* @see {@link https://
|
|
349
|
+
* SiliconFlow (China) Provider
|
|
350
|
+
* @see {@link https://cloud.siliconflow.com/models}
|
|
287
351
|
* @remarks
|
|
288
|
-
* - baseURL - `https://
|
|
289
|
-
* - apiKey - `
|
|
352
|
+
* - baseURL - `https://api.siliconflow.cn/v1`
|
|
353
|
+
* - apiKey - `SILICONFLOW_API_KEY`
|
|
290
354
|
*/
|
|
291
|
-
declare const
|
|
355
|
+
declare const siliconflowCn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen-qwq-32b" | "deepseek-ai-deepseek-r1-distill-qwen-7b" | "z-ai-glm-4.5-air" | "qwen-qwen2.5-72b-instruct-128k" | "deepseek-ai-deepseek-vl2" | "moonshotai-kimi-dev-72b" | "qwen-qwen2.5-coder-32b-instruct" | "qwen-qwen3-omni-30b-a3b-captioner" | "qwen-qwen3-vl-235b-a22b-thinking" | "thudm-glm-z1-9b-0414" | "qwen-qwen3-vl-30b-a3b-thinking" | "deepseek-ai-deepseek-v3.2-exp" | "qwen-qwen2.5-vl-32b-instruct" | "qwen-qwen3-235b-a22b-thinking-2507" | "qwen-qwen3-vl-32b-instruct" | "inclusionai-ling-flash-2.0" | "moonshotai-kimi-k2-instruct" | "inclusionai-ling-mini-2.0" | "qwen-qwen3-coder-480b-a35b-instruct" | "qwen-qwen3-omni-30b-a3b-instruct" | "moonshotai-kimi-k2-instruct-0905" | "qwen-qwen3-30b-a3b-thinking-2507" | "qwen-qwen3-14b" | "deepseek-ai-deepseek-r1" | "deepseek-ai-deepseek-v3.1" | "z-ai-glm-4.5" | "qwen-qwen3-30b-a3b-instruct-2507" | "zai-org-glm-4.5v" | "inclusionai-ring-flash-2.0" | "thudm-glm-z1-32b-0414" | "qwen-qwen2.5-vl-72b-instruct" | "qwen-qwen3-vl-32b-thinking" | "tencent-hunyuan-mt-7b" | "qwen-qwen3-30b-a3b" | "openai-gpt-oss-120b" | "minimaxai-minimax-m1-80k" | "deepseek-ai-deepseek-v3.1-terminus" | "zai-org-glm-4.5-air" | "thudm-glm-4-9b-0414" | "qwen-qwen3-coder-30b-a3b-instruct" | "stepfun-ai-step3" | "thudm-glm-4.1v-9b-thinking" | "qwen-qwen3-next-80b-a3b-thinking" | "qwen-qwen3-vl-235b-a22b-instruct" | "zai-org-glm-4.5" | "deepseek-ai-deepseek-r1-distill-qwen-14b" | "deepseek-ai-deepseek-v3" | "openai-gpt-oss-20b" | "qwen-qwen2.5-7b-instruct" | "qwen-qwen2.5-32b-instruct" | "minimaxai-minimax-m2" | "bytedance-seed-seed-oss-36b-instruct" | "qwen-qwen2.5-vl-7b-instruct" | "qwen-qwen3-vl-8b-thinking" | "qwen-qwen3-vl-8b-instruct" | "nex-agi-deepseek-v3.1-nex-n1" | "qwen-qwen3-8b" | "qwen-qwen2.5-72b-instruct" | "qwen-qwen3-235b-a22b" | "meta-llama-meta-llama-3.1-8b-instruct" | "qwen-qwen3-235b-a22b-instruct-2507" | "baidu-ernie-4.5-300b-a47b" | "qwen-qwen3-omni-30b-a3b-thinking" | "zai-org-glm-4.6" | "qwen-qwen3-32b" | "tencent-hunyuan-a13b-instruct" | "thudm-glm-4-32b-0414" | "deepseek-ai-deepseek-r1-distill-qwen-32b" | "qwen-qwen3-next-80b-a3b-instruct" | "qwen-qwen3-vl-30b-a3b-instruct" | "moonshotai-kimi-k2-thinking" | "qwen-qwen2.5-14b-instruct">;
|
|
292
356
|
/**
|
|
293
357
|
* submodel Provider
|
|
294
358
|
* @see {@link https://submodel.gitbook.io}
|
|
@@ -298,101 +362,101 @@ declare const zhipuai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shar
|
|
|
298
362
|
*/
|
|
299
363
|
declare const submodel: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3.1" | "deepseek-ai/DeepSeek-V3-0324" | "zai-org/GLM-4.5-FP8">;
|
|
300
364
|
/**
|
|
301
|
-
*
|
|
302
|
-
* @see {@link https://
|
|
365
|
+
* Synthetic Provider
|
|
366
|
+
* @see {@link https://synthetic.new/pricing}
|
|
303
367
|
* @remarks
|
|
304
|
-
* - baseURL - `https://api.
|
|
305
|
-
* - apiKey - `
|
|
368
|
+
* - baseURL - `https://api.synthetic.new/v1`
|
|
369
|
+
* - apiKey - `SYNTHETIC_API_KEY`
|
|
306
370
|
*/
|
|
307
|
-
declare const
|
|
371
|
+
declare const synthetic: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"hf:Qwen/Qwen3-235B-A22B-Instruct-2507" | "hf:Qwen/Qwen2.5-Coder-32B-Instruct" | "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct" | "hf:Qwen/Qwen3-235B-A22B-Thinking-2507" | "hf:MiniMaxAI/MiniMax-M2" | "hf:meta-llama/Llama-3.1-70B-Instruct" | "hf:meta-llama/Llama-3.1-8B-Instruct" | "hf:meta-llama/Llama-3.3-70B-Instruct" | "hf:meta-llama/Llama-4-Scout-17B-16E-Instruct" | "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" | "hf:meta-llama/Llama-3.1-405B-Instruct" | "hf:moonshotai/Kimi-K2-Instruct" | "hf:moonshotai/Kimi-K2-Instruct-0905" | "hf:moonshotai/Kimi-K2-Thinking" | "hf:zai-org/GLM-4.5" | "hf:zai-org/GLM-4.6" | "hf:deepseek-ai/DeepSeek-R1" | "hf:deepseek-ai/DeepSeek-R1-0528" | "hf:deepseek-ai/DeepSeek-V3.1-Terminus" | "hf:deepseek-ai/DeepSeek-V3.2" | "hf:deepseek-ai/DeepSeek-V3" | "hf:deepseek-ai/DeepSeek-V3.1" | "hf:deepseek-ai/DeepSeek-V3-0324" | "hf:openai/gpt-oss-120b">;
|
|
308
372
|
/**
|
|
309
|
-
*
|
|
310
|
-
* @see {@link https://
|
|
373
|
+
* Upstage Provider
|
|
374
|
+
* @see {@link https://developers.upstage.ai/docs/apis/chat}
|
|
311
375
|
* @remarks
|
|
312
|
-
* - baseURL - `https://
|
|
313
|
-
* - apiKey - `
|
|
376
|
+
* - baseURL - `https://api.upstage.ai`
|
|
377
|
+
* - apiKey - `UPSTAGE_API_KEY`
|
|
314
378
|
*/
|
|
315
|
-
declare const
|
|
379
|
+
declare const upstage: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"solar-mini" | "solar-pro2">;
|
|
316
380
|
/**
|
|
317
|
-
*
|
|
318
|
-
* @see {@link https://
|
|
381
|
+
* Venice AI Provider
|
|
382
|
+
* @see {@link https://docs.venice.ai}
|
|
319
383
|
* @remarks
|
|
320
|
-
* - baseURL - `https://
|
|
321
|
-
* - apiKey - `
|
|
384
|
+
* - baseURL - `https://api.venice.ai/api/v1`
|
|
385
|
+
* - apiKey - `VENICE_API_KEY`
|
|
322
386
|
*/
|
|
323
|
-
declare const
|
|
387
|
+
declare const venice: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-480b-a35b-instruct" | "kimi-k2-thinking" | "gemini-3-flash-preview" | "gemini-3-pro-preview" | "deepseek-v3.2" | "qwen3-235b" | "qwen3-235b-a22b-thinking-2507" | "qwen3-235b-a22b-instruct-2507" | "openai-gpt-oss-120b" | "zai-org-glm-4.6" | "grok-41-fast" | "claude-opus-45" | "mistral-31-24b" | "venice-uncensored" | "openai-gpt-52" | "qwen3-4b" | "llama-3.3-70b" | "devstral-2-2512" | "llama-3.2-3b" | "google-gemma-3-27b-it" | "hermes-3-llama-3.1-405b" | "zai-org-glm-4.6v" | "qwen3-next-80b">;
|
|
324
388
|
/**
|
|
325
|
-
*
|
|
326
|
-
* @see {@link https://
|
|
389
|
+
* Vultr Provider
|
|
390
|
+
* @see {@link https://api.vultrinference.com/}
|
|
327
391
|
* @remarks
|
|
328
|
-
* - baseURL - `https://api.
|
|
329
|
-
* - apiKey - `
|
|
392
|
+
* - baseURL - `https://api.vultrinference.com/v1`
|
|
393
|
+
* - apiKey - `VULTR_API_KEY`
|
|
330
394
|
*/
|
|
331
|
-
declare const
|
|
395
|
+
declare const vultr: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-r1-distill-qwen-32b" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b" | "kimi-k2-instruct" | "qwen2.5-coder-32b-instruct">;
|
|
332
396
|
/**
|
|
333
|
-
*
|
|
334
|
-
* @see {@link https://
|
|
397
|
+
* Weights & Biases Provider
|
|
398
|
+
* @see {@link https://weave-docs.wandb.ai/guides/integrations/inference/}
|
|
335
399
|
* @remarks
|
|
336
|
-
* - baseURL - `
|
|
337
|
-
* - apiKey - `
|
|
400
|
+
* - baseURL - `https://api.inference.wandb.ai/v1`
|
|
401
|
+
* - apiKey - `WANDB_API_KEY`
|
|
338
402
|
*/
|
|
339
|
-
declare const
|
|
403
|
+
declare const wandb: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-Coder-480B-A35B-Instruct" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3-0324" | "moonshotai/Kimi-K2-Instruct" | "meta-llama/Llama-3.3-70B-Instruct" | "microsoft/Phi-4-mini-instruct" | "meta-llama/Llama-3.1-8B-Instruct" | "meta-llama/Llama-4-Scout-17B-16E-Instruct">;
|
|
340
404
|
/**
|
|
341
|
-
*
|
|
342
|
-
* @see {@link https://
|
|
405
|
+
* xAI Provider
|
|
406
|
+
* @see {@link https://docs.x.ai/docs/models}
|
|
343
407
|
* @remarks
|
|
344
|
-
* - baseURL - `https://api.
|
|
345
|
-
* - apiKey - `
|
|
408
|
+
* - baseURL - `https://api.x.ai/v1/`
|
|
409
|
+
* - apiKey - `XAI_API_KEY`
|
|
346
410
|
*/
|
|
347
|
-
declare const
|
|
411
|
+
declare const xai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"grok-code-fast-1" | "grok-4-fast-non-reasoning" | "grok-4" | "grok-3" | "grok-3-mini" | "grok-4-1-fast-non-reasoning" | "grok-3-fast" | "grok-2-vision" | "grok-2" | "grok-3-mini-fast-latest" | "grok-2-vision-1212" | "grok-4-fast" | "grok-2-latest" | "grok-4-1-fast" | "grok-2-1212" | "grok-3-fast-latest" | "grok-3-latest" | "grok-2-vision-latest" | "grok-vision-beta" | "grok-beta" | "grok-3-mini-latest" | "grok-3-mini-fast">;
|
|
348
412
|
/**
|
|
349
|
-
*
|
|
350
|
-
* @see {@link https://
|
|
413
|
+
* Xiaomi Provider
|
|
414
|
+
* @see {@link https://platform.xiaomimimo.com/#/docs}
|
|
351
415
|
* @remarks
|
|
352
|
-
* - baseURL - `https://api
|
|
353
|
-
* - apiKey - `
|
|
416
|
+
* - baseURL - `https://api.xiaomimimo.com/v1`
|
|
417
|
+
* - apiKey - `XIAOMI_API_KEY`
|
|
354
418
|
*/
|
|
355
|
-
declare const
|
|
419
|
+
declare const xiaomi: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"mimo-v2-flash">;
|
|
356
420
|
/**
|
|
357
|
-
*
|
|
358
|
-
* @see {@link https://
|
|
421
|
+
* Z.AI Provider
|
|
422
|
+
* @see {@link https://docs.z.ai/guides/overview/pricing}
|
|
359
423
|
* @remarks
|
|
360
|
-
* - baseURL - `https://api.
|
|
361
|
-
* - apiKey - `
|
|
424
|
+
* - baseURL - `https://api.z.ai/api/paas/v4`
|
|
425
|
+
* - apiKey - `ZHIPU_API_KEY`
|
|
362
426
|
*/
|
|
363
|
-
declare const
|
|
427
|
+
declare const zai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6v">;
|
|
364
428
|
/**
|
|
365
|
-
*
|
|
366
|
-
* @see {@link https://
|
|
429
|
+
* Z.AI Coding Plan Provider
|
|
430
|
+
* @see {@link https://docs.z.ai/devpack/overview}
|
|
367
431
|
* @remarks
|
|
368
|
-
* - baseURL - `https://api.
|
|
369
|
-
* - apiKey - `
|
|
432
|
+
* - baseURL - `https://api.z.ai/api/coding/paas/v4`
|
|
433
|
+
* - apiKey - `ZHIPU_API_KEY`
|
|
370
434
|
*/
|
|
371
|
-
declare const
|
|
435
|
+
declare const zaiCodingPlan: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6v">;
|
|
372
436
|
/**
|
|
373
|
-
*
|
|
374
|
-
* @see {@link https://
|
|
437
|
+
* ZenMux Provider
|
|
438
|
+
* @see {@link https://docs.zenmux.ai}
|
|
375
439
|
* @remarks
|
|
376
|
-
* - baseURL - `https://
|
|
377
|
-
* - apiKey - `
|
|
440
|
+
* - baseURL - `https://zenmux.ai/api/v1`
|
|
441
|
+
* - apiKey - `ZENMUX_API_KEY`
|
|
378
442
|
*/
|
|
379
|
-
declare const
|
|
443
|
+
declare const zenmux: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"x-ai/grok-4" | "google/gemini-2.5-pro" | "openai/gpt-5" | "anthropic/claude-opus-4.1" | "moonshotai/kimi-k2-thinking" | "openai/gpt-5-codex" | "anthropic/claude-haiku-4.5" | "anthropic/claude-sonnet-4.5" | "moonshotai/kimi-k2-thinking-turbo" | "moonshotai/kimi-k2-0905" | "xiaomi/mimo-v2-flash" | "x-ai/grok-4-fast-non-reasoning" | "x-ai/grok-code-fast-1" | "x-ai/grok-4-fast" | "deepseek/deepseek-chat" | "minimax/minimax-m2" | "inclusionai/ring-1t" | "inclusionai/lint-1t" | "z-ai/glm-4.5-air" | "z-ai/glm-4.6" | "qwen/qwen3-coder-plus" | "kuaishou/kat-coder-pro-v1">;
|
|
380
444
|
/**
|
|
381
|
-
*
|
|
382
|
-
* @see {@link https://
|
|
445
|
+
* Zhipu AI Provider
|
|
446
|
+
* @see {@link https://docs.z.ai/guides/overview/pricing}
|
|
383
447
|
* @remarks
|
|
384
|
-
* - baseURL - `https://
|
|
385
|
-
* - apiKey - `
|
|
448
|
+
* - baseURL - `https://open.bigmodel.cn/api/paas/v4`
|
|
449
|
+
* - apiKey - `ZHIPU_API_KEY`
|
|
386
450
|
*/
|
|
387
|
-
declare const
|
|
451
|
+
declare const zhipuai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6v" | "glm-4.6v-flash">;
|
|
388
452
|
/**
|
|
389
|
-
*
|
|
390
|
-
* @see {@link https://
|
|
453
|
+
* Zhipu AI Coding Plan Provider
|
|
454
|
+
* @see {@link https://docs.bigmodel.cn/cn/coding-plan/overview}
|
|
391
455
|
* @remarks
|
|
392
|
-
* - baseURL - `https://
|
|
393
|
-
* - apiKey - `
|
|
456
|
+
* - baseURL - `https://open.bigmodel.cn/api/coding/paas/v4`
|
|
457
|
+
* - apiKey - `ZHIPU_API_KEY`
|
|
394
458
|
*/
|
|
395
|
-
declare const
|
|
459
|
+
declare const zhipuaiCodingPlan: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6v" | "glm-4.6v-flash">;
|
|
396
460
|
/**
|
|
397
461
|
* Novita AI Provider
|
|
398
462
|
* @see {@link https://novita.ai/docs/guides/llm-api#api-integration}
|
|
@@ -401,14 +465,6 @@ declare const minimaxi: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_sha
|
|
|
401
465
|
* - apiKey - `NOVITA_API_KEY`
|
|
402
466
|
*/
|
|
403
467
|
declare const novita: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
|
|
404
|
-
/**
|
|
405
|
-
* SiliconFlow Provider
|
|
406
|
-
* @see {@link https://docs.siliconflow.com/en/userguide/quickstart#4-3-call-via-openai-interface}
|
|
407
|
-
* @remarks
|
|
408
|
-
* - baseURL - `https://api.siliconflow.cn/v1/`
|
|
409
|
-
* - apiKey - `SILICON_FLOW_API_KEY`
|
|
410
|
-
*/
|
|
411
|
-
declare const siliconFlow: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
|
|
412
468
|
/**
|
|
413
469
|
* StepFun Provider
|
|
414
470
|
* @see {@link https://www.stepfun.com}
|
|
@@ -425,6 +481,22 @@ declare const stepfun: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shar
|
|
|
425
481
|
* - apiKey - `TENCENT_HUNYUAN_API_KEY`
|
|
426
482
|
*/
|
|
427
483
|
declare const tencentHunyuan: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
|
|
484
|
+
/**
|
|
485
|
+
* Ollama Provider
|
|
486
|
+
* @see {@link https://docs.ollama.com}
|
|
487
|
+
* @remarks
|
|
488
|
+
* - baseURL - `http://localhost:11434/v1/`
|
|
489
|
+
* - apiKey - `OLLAMA_API_KEY`
|
|
490
|
+
*/
|
|
491
|
+
declare const ollama: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
|
|
492
|
+
/**
|
|
493
|
+
* LiteLLM Provider
|
|
494
|
+
* @see {@link https://docs.litellm.ai}
|
|
495
|
+
* @remarks
|
|
496
|
+
* - baseURL - `http://localhost:4000/v1/`
|
|
497
|
+
* - apiKey - `LITELLM_API_KEY`
|
|
498
|
+
*/
|
|
499
|
+
declare const litellm: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
|
|
428
500
|
|
|
429
501
|
/**
|
|
430
502
|
* Anthropic Provider
|
|
@@ -433,7 +505,7 @@ declare const tencentHunyuan: _xsai_ext_shared_providers.ModelProvider & _xsai_e
|
|
|
433
505
|
* - baseURL - `https://api.anthropic.com/v1/`
|
|
434
506
|
* - apiKey - `ANTHROPIC_API_KEY`
|
|
435
507
|
*/
|
|
436
|
-
declare const anthropic: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.
|
|
508
|
+
declare const anthropic: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProviderWithExtraOptions<AnthropicModels, AnthropicOptions>;
|
|
437
509
|
/**
|
|
438
510
|
* Featherless AI Provider
|
|
439
511
|
* @see {@link https://featherless.ai/models}
|
|
@@ -458,5 +530,9 @@ declare const openrouter: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_s
|
|
|
458
530
|
* - apiKey - `TOGETHER_API_KEY`
|
|
459
531
|
*/
|
|
460
532
|
declare const togetherai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<TogetheraiModels>;
|
|
533
|
+
/** @deprecated use `siliconflow` instead. */
|
|
534
|
+
declare const siliconFlow: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<"qwen-qwq-32b" | "deepseek-ai-deepseek-r1-distill-qwen-7b" | "z-ai-glm-4.5-air" | "qwen-qwen2.5-72b-instruct-128k" | "deepseek-ai-deepseek-vl2" | "moonshotai-kimi-dev-72b" | "qwen-qwen2.5-coder-32b-instruct" | "qwen-qwen3-omni-30b-a3b-captioner" | "qwen-qwen3-vl-235b-a22b-thinking" | "thudm-glm-z1-9b-0414" | "qwen-qwen3-vl-30b-a3b-thinking" | "deepseek-ai-deepseek-v3.2-exp" | "qwen-qwen2.5-vl-32b-instruct" | "qwen-qwen3-235b-a22b-thinking-2507" | "qwen-qwen3-vl-32b-instruct" | "inclusionai-ling-flash-2.0" | "moonshotai-kimi-k2-instruct" | "inclusionai-ling-mini-2.0" | "qwen-qwen3-coder-480b-a35b-instruct" | "qwen-qwen3-omni-30b-a3b-instruct" | "moonshotai-kimi-k2-instruct-0905" | "qwen-qwen3-30b-a3b-thinking-2507" | "qwen-qwen3-14b" | "deepseek-ai-deepseek-r1" | "deepseek-ai-deepseek-v3.1" | "z-ai-glm-4.5" | "qwen-qwen3-30b-a3b-instruct-2507" | "zai-org-glm-4.5v" | "inclusionai-ring-flash-2.0" | "thudm-glm-z1-32b-0414" | "qwen-qwen2.5-vl-72b-instruct" | "qwen-qwen3-vl-32b-thinking" | "tencent-hunyuan-mt-7b" | "qwen-qwen3-30b-a3b" | "openai-gpt-oss-120b" | "minimaxai-minimax-m1-80k" | "deepseek-ai-deepseek-v3.1-terminus" | "zai-org-glm-4.5-air" | "thudm-glm-4-9b-0414" | "qwen-qwen3-coder-30b-a3b-instruct" | "stepfun-ai-step3" | "thudm-glm-4.1v-9b-thinking" | "qwen-qwen3-next-80b-a3b-thinking" | "qwen-qwen3-vl-235b-a22b-instruct" | "zai-org-glm-4.5" | "deepseek-ai-deepseek-r1-distill-qwen-14b" | "deepseek-ai-deepseek-v3" | "openai-gpt-oss-20b" | "qwen-qwen2.5-7b-instruct" | "qwen-qwen2.5-32b-instruct" | "minimaxai-minimax-m2" | "bytedance-seed-seed-oss-36b-instruct" | "qwen-qwen2.5-vl-7b-instruct" | "qwen-qwen3-vl-8b-thinking" | "qwen-qwen3-vl-8b-instruct" | "nex-agi-deepseek-v3.1-nex-n1" | "qwen-qwen3-8b" | "qwen-qwen2.5-72b-instruct" | "qwen-qwen3-235b-a22b" | "meta-llama-meta-llama-3.1-8b-instruct" | "qwen-qwen3-235b-a22b-instruct-2507" | "baidu-ernie-4.5-300b-a47b" | "qwen-qwen3-omni-30b-a3b-thinking" | "zai-org-glm-4.6" | "qwen-qwen3-32b" | "tencent-hunyuan-a13b-instruct" | "thudm-glm-4-32b-0414" | "deepseek-ai-deepseek-r1-distill-qwen-32b" | "qwen-qwen3-next-80b-a3b-instruct" | "qwen-qwen3-vl-30b-a3b-instruct" | "moonshotai-kimi-k2-thinking" | "qwen-qwen2.5-14b-instruct">;
|
|
535
|
+
/** @deprecated use `minimaxCn` instead. */
|
|
536
|
+
declare const minimaxi: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"MiniMax-M2">;
|
|
461
537
|
|
|
462
|
-
export { alibaba, alibabaCn, anthropic, baseten, cerebras, chutes, cortecs, deepinfra, deepseek, fastrouter, featherless, fireworks, githubCopilot, githubModels, google, groq, huggingface, iflowcn, inception, inference, llama, lmstudio, lucidquery, minimax, minimaxi, mistral, modelscope, moonshotai, moonshotaiCn, morph, nebius, novita, nvidia, openai, opencode, openrouter, ovhcloud, perplexity, poe, requesty, scaleway, siliconFlow, stepfun, submodel, synthetic, tencentHunyuan, togetherai, upstage, venice, vultr, wandb, xai, zai, zaiCodingPlan, zenmux, zhipuai, zhipuaiCodingPlan };
|
|
538
|
+
export { alibaba, alibabaCn, anthropic, bailing, baseten, cerebras, chutes, cortecs, deepinfra, deepseek, fastrouter, featherless, fireworks, githubCopilot, githubModels, google, groq, helicone, huggingface, iflowcn, inception, inference, ioNet, kimiForCoding, litellm, llama, lmstudio, lucidquery, minimax, minimaxCn, minimaxi, mistral, modelscope, moonshotai, moonshotaiCn, morph, nebius, novita, nvidia, ollama, ollamaCloud, openai, opencode, openrouter, ovhcloud, perplexity, poe, requesty, scaleway, siliconFlow, siliconflow, siliconflowCn, stepfun, submodel, synthetic, tencentHunyuan, togetherai, upstage, venice, vultr, wandb, xai, xiaomi, zai, zaiCodingPlan, zenmux, zhipuai, zhipuaiCodingPlan };
|