@xsai-ext/providers 0.4.0-beta.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,446 @@
1
+ import * as _xsai_ext_shared_providers from '@xsai-ext/shared-providers';
2
+ import { A as AnthropicModels, O as OpenrouterModels, a as OpenRouterOptions, T as TogetheraiModels } from './openrouter-DCc0QhRt.js';
3
+
4
+ /**
5
+ * Moonshot AI (China) Provider
6
+ * @see {@link https://platform.moonshot.cn/docs/api/chat}
7
+ * @remarks
8
+ * - baseURL - `https://api.moonshot.cn/v1`
9
+ * - apiKey - `MOONSHOT_API_KEY`
10
+ */
11
+ declare const moonshotaiCn: _xsai_ext_shared_providers.ChatProvider<"kimi-k2-0905-preview" | "kimi-k2-0711-preview" | "kimi-k2-turbo-preview"> & _xsai_ext_shared_providers.ModelProvider;
12
+ /**
13
+ * LucidQuery AI Provider
14
+ * @see {@link https://lucidquery.com/api/docs}
15
+ * @remarks
16
+ * - baseURL - `https://lucidquery.com/api/v1`
17
+ * - apiKey - `LUCIDQUERY_API_KEY`
18
+ */
19
+ declare const lucidquery: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"lucidquery-nexus-coder" | "lucidnova-rf1-100b">;
20
+ /**
21
+ * Moonshot AI Provider
22
+ * @see {@link https://platform.moonshot.ai/docs/api/chat}
23
+ * @remarks
24
+ * - baseURL - `https://api.moonshot.ai/v1`
25
+ * - apiKey - `MOONSHOT_API_KEY`
26
+ */
27
+ declare const moonshotai: _xsai_ext_shared_providers.ChatProvider<"kimi-k2-0905-preview" | "kimi-k2-0711-preview" | "kimi-k2-turbo-preview"> & _xsai_ext_shared_providers.ModelProvider;
28
+ /**
29
+ * Z.AI Coding Plan Provider
30
+ * @see {@link https://docs.z.ai/devpack/overview}
31
+ * @remarks
32
+ * - baseURL - `https://api.z.ai/api/coding/paas/v4`
33
+ * - apiKey - `ZHIPU_API_KEY`
34
+ */
35
+ declare const zaiCodingPlan: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
36
+ /**
37
+ * Alibaba Provider
38
+ * @see {@link https://www.alibabacloud.com/help/en/model-studio/models}
39
+ * @remarks
40
+ * - baseURL - `https://dashscope-intl.aliyuncs.com/compatible-mode/v1`
41
+ * - apiKey - `DASHSCOPE_API_KEY`
42
+ */
43
+ declare const alibaba: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-livetranslate-flash-realtime" | "qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen-plus-character-ja" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus">;
44
+ /**
45
+ * xAI Provider
46
+ * @see {@link https://docs.x.ai/docs/models}
47
+ * @remarks
48
+ * - baseURL - `https://api.x.ai/v1/`
49
+ * - apiKey - `XAI_API_KEY`
50
+ */
51
+ declare const xai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"grok-4-fast-non-reasoning" | "grok-3-fast" | "grok-4" | "grok-2-vision" | "grok-code-fast-1" | "grok-2" | "grok-3-mini-fast-latest" | "grok-2-vision-1212" | "grok-3" | "grok-4-fast" | "grok-2-latest" | "grok-2-1212" | "grok-3-fast-latest" | "grok-3-latest" | "grok-2-vision-latest" | "grok-vision-beta" | "grok-3-mini" | "grok-beta" | "grok-3-mini-latest" | "grok-3-mini-fast">;
52
+ /**
53
+ * Vultr Provider
54
+ * @see {@link https://api.vultrinference.com/}
55
+ * @remarks
56
+ * - baseURL - `https://api.vultrinference.com/v1`
57
+ * - apiKey - `VULTR_API_KEY`
58
+ */
59
+ declare const vultr: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-r1-distill-qwen-32b" | "qwen2.5-coder-32b-instruct" | "kimi-k2-instruct" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b">;
60
+ /**
61
+ * Nvidia Provider
62
+ * @see {@link https://docs.api.nvidia.com/nim/}
63
+ * @remarks
64
+ * - baseURL - `https://integrate.api.nvidia.com/v1`
65
+ * - apiKey - `NVIDIA_API_KEY`
66
+ */
67
+ declare const nvidia: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/kimi-k2-instruct-0905" | "moonshotai/kimi-k2-instruct" | "nvidia/cosmos-nemotron-34b" | "nvidia/llama-embed-nemotron-8b" | "nvidia/parakeet-tdt-0.6b-v2" | "nvidia/nemoretriever-ocr-v1" | "nvidia/llama-3.1-nemotron-ultra-253b-v1" | "minimaxai/minimax-m2" | "google/gemma-3-27b-it" | "microsoft/phi-4-mini-instruct" | "openai/whisper-large-v3" | "openai/gpt-oss-120b" | "qwen/qwen3-235b-a22b" | "qwen/qwen3-coder-480b-a35b-instruct" | "deepseek-ai/deepseek-v3.1-terminus" | "deepseek-ai/deepseek-v3.1" | "black-forest-labs/flux.1-dev">;
68
+ /**
69
+ * Upstage Provider
70
+ * @see {@link https://developers.upstage.ai/docs/apis/chat}
71
+ * @remarks
72
+ * - baseURL - `https://api.upstage.ai`
73
+ * - apiKey - `UPSTAGE_API_KEY`
74
+ */
75
+ declare const upstage: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"solar-mini" | "solar-pro2">;
76
+ /**
77
+ * Groq Provider
78
+ * @see {@link https://console.groq.com/docs/models}
79
+ * @remarks
80
+ * - baseURL - `https://api.groq.com/openai/v1/`
81
+ * - apiKey - `GROQ_API_KEY`
82
+ */
83
+ declare const groq: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-r1-distill-llama-70b" | "moonshotai/kimi-k2-instruct-0905" | "moonshotai/kimi-k2-instruct" | "openai/gpt-oss-120b" | "llama-3.1-8b-instant" | "mistral-saba-24b" | "llama3-8b-8192" | "qwen-qwq-32b" | "llama3-70b-8192" | "llama-guard-3-8b" | "gemma2-9b-it" | "llama-3.3-70b-versatile" | "openai/gpt-oss-20b" | "qwen/qwen3-32b" | "meta-llama/llama-4-scout-17b-16e-instruct" | "meta-llama/llama-4-maverick-17b-128e-instruct" | "meta-llama/llama-guard-4-12b"> & _xsai_ext_shared_providers.EmbedProvider<string>;
84
+ /**
85
+ * GitHub Copilot Provider
86
+ * @see {@link https://docs.github.com/en/copilot}
87
+ * @remarks
88
+ * - baseURL - `https://api.githubcopilot.com`
89
+ * - apiKey - `GITHUB_TOKEN`
90
+ */
91
+ declare const githubCopilot: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"grok-code-fast-1" | "gemini-2.0-flash-001" | "claude-opus-4" | "claude-haiku-4.5" | "claude-3.5-sonnet" | "o3-mini" | "gpt-5-codex" | "gpt-4o" | "gpt-4.1" | "o4-mini" | "claude-opus-41" | "gpt-5-mini" | "claude-3.7-sonnet" | "gemini-2.5-pro" | "o3" | "claude-sonnet-4" | "gpt-5" | "claude-3.7-sonnet-thought" | "claude-sonnet-4.5">;
92
+ /**
93
+ * Mistral Provider
94
+ * @see {@link https://docs.mistral.ai/getting-started/models/}
95
+ * @remarks
96
+ * - baseURL - `https://api.mistral.ai/v1/`
97
+ * - apiKey - `MISTRAL_API_KEY`
98
+ */
99
+ declare const mistral: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"devstral-medium-2507" | "open-mixtral-8x22b" | "ministral-8b-latest" | "pixtral-large-latest" | "ministral-3b-latest" | "pixtral-12b" | "mistral-medium-2505" | "devstral-small-2505" | "mistral-medium-2508" | "mistral-small-latest" | "magistral-small" | "devstral-small-2507" | "codestral-latest" | "open-mixtral-8x7b" | "mistral-nemo" | "open-mistral-7b" | "mistral-large-latest" | "mistral-medium-latest" | "magistral-medium-latest">;
100
+ /**
101
+ * Nebius AI Studio Provider
102
+ * @see {@link https://docs.studio.nebius.com/quickstart}
103
+ * @remarks
104
+ * - baseURL - `https://api.studio.nebius.com/v1/`
105
+ * - apiKey - `NEBIUS_API_KEY`
106
+ */
107
+ declare const nebius: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/kimi-k2-instruct" | "openai/gpt-oss-120b" | "qwen/qwen3-coder-480b-a35b-instruct" | "openai/gpt-oss-20b" | "NousResearch/hermes-4-70b" | "NousResearch/hermes-4-405b" | "nvidia/llama-3_1-nemotron-ultra-253b-v1" | "qwen/qwen3-235b-a22b-instruct-2507" | "qwen/qwen3-235b-a22b-thinking-2507" | "meta-llama/llama-3_1-405b-instruct" | "meta-llama/llama-3.3-70b-instruct-fast" | "meta-llama/llama-3.3-70b-instruct-base" | "zai-org/glm-4.5" | "zai-org/glm-4.5-air" | "deepseek-ai/deepseek-v3">;
108
+ /**
109
+ * DeepSeek Provider
110
+ * @see {@link https://platform.deepseek.com/api-docs/pricing}
111
+ * @remarks
112
+ * - baseURL - `https://api.deepseek.com`
113
+ * - apiKey - `DEEPSEEK_API_KEY`
114
+ */
115
+ declare const deepseek: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"deepseek-chat" | "deepseek-reasoner">;
116
+ /**
117
+ * Alibaba (China) Provider
118
+ * @see {@link https://www.alibabacloud.com/help/en/model-studio/models}
119
+ * @remarks
120
+ * - baseURL - `https://dashscope.aliyuncs.com/compatible-mode/v1`
121
+ * - apiKey - `DASHSCOPE_API_KEY`
122
+ */
123
+ declare const alibabaCn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-asr-flash" | "qwen-omni-turbo" | "qwen-vl-max" | "qwen3-next-80b-a3b-instruct" | "qwen-turbo" | "qwen3-vl-235b-a22b" | "qwen3-coder-flash" | "qwen3-vl-30b-a3b" | "qwen3-14b" | "qvq-max" | "qwen2-5-14b-instruct" | "qwq-plus" | "qwen3-coder-30b-a3b-instruct" | "qwen-vl-ocr" | "qwen2-5-72b-instruct" | "qwen3-omni-flash" | "qwen-flash" | "qwen3-8b" | "qwen3-omni-flash-realtime" | "qwen2-5-vl-72b-instruct" | "qwen3-vl-plus" | "qwen-plus" | "qwen2-5-32b-instruct" | "qwen2-5-omni-7b" | "qwen-max" | "qwen2-5-7b-instruct" | "qwen2-5-vl-7b-instruct" | "qwen3-235b-a22b" | "qwen-omni-turbo-realtime" | "qwen-mt-turbo" | "qwen3-coder-480b-a35b-instruct" | "qwen-mt-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-next-80b-a3b-thinking" | "qwen3-32b" | "qwen-vl-plus" | "deepseek-r1-distill-qwen-32b" | "deepseek-r1-distill-llama-70b" | "deepseek-r1-distill-qwen-7b" | "deepseek-r1-0528" | "deepseek-v3" | "deepseek-v3-2-exp" | "deepseek-r1" | "qwen-plus-character" | "qwen2-5-coder-32b-instruct" | "qwen-math-plus" | "qwen-doc-turbo" | "qwen-deep-research" | "qwen-long" | "qwen2-5-math-72b-instruct" | "moonshot-kimi-k2-instruct" | "tongyi-intent-detect-v3" | "deepseek-v3-1" | "qwen2-5-coder-7b-instruct" | "deepseek-r1-distill-qwen-14b" | "qwen-math-turbo" | "deepseek-r1-distill-llama-8b" | "qwq-32b" | "qwen2-5-math-7b-instruct" | "deepseek-r1-distill-qwen-1-5b">;
124
+ /**
125
+ * Venice AI Provider
126
+ * @see {@link https://docs.venice.ai}
127
+ * @remarks
128
+ * - baseURL - `https://api.venice.ai/api/v1`
129
+ * - apiKey - `VENICE_API_KEY`
130
+ */
131
+ declare const venice: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"dolphin-2.9.2-qwen2-72b" | "mistral-31-24b" | "venice-uncensored" | "qwen-2.5-vl" | "qwen3-235b" | "qwen-2.5-qwq-32b" | "deepseek-coder-v2-lite" | "qwen3-4b" | "llama-3.3-70b" | "qwen-2.5-coder-32b" | "deepseek-r1-671b" | "llama-3.2-3b" | "llama-3.1-405b">;
132
+ /**
133
+ * Chutes Provider
134
+ * @see {@link https://llm.chutes.ai/v1/models}
135
+ * @remarks
136
+ * - baseURL - `https://llm.chutes.ai/v1`
137
+ * - apiKey - `CHUTES_API_KEY`
138
+ */
139
+ declare const chutes: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "moonshotai/Kimi-K2-Instruct-75k" | "moonshotai/Kimi-K2-Instruct-0905" | "moonshotai/Kimi-VL-A3B-Thinking" | "MiniMaxAI/MiniMax-M2" | "meituan-longcat/LongCat-Flash-Chat-FP8" | "tngtech/DeepSeek-R1T-Chimera" | "tngtech/DeepSeek-TNG-R1T2-Chimera" | "chutesai/Mistral-Small-3.2-24B-Instruct-2506" | "Qwen/Qwen3-30B-A3B" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-30B-A3B-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" | "Qwen/Qwen3-30B-A3B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "Qwen/Qwen3-Next-80B-A3B-Thinking" | "zai-org/GLM-4.5" | "zai-org/GLM-4.6-FP8" | "zai-org/GLM-4.6-turbo" | "zai-org/GLM-4.6" | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3.2-Exp" | "deepseek-ai/DeepSeek-V3.1-Terminus" | "deepseek-ai/DeepSeek-V3.1:THINKING" | "deepseek-ai/DeepSeek-R1-Distill-Llama-70B" | "deepseek-ai/DeepSeek-V3.1" | "deepseek-ai/DeepSeek-V3-0324">;
140
+ /**
141
+ * Cortecs Provider
142
+ * @see {@link https://api.cortecs.ai/v1/models}
143
+ * @remarks
144
+ * - baseURL - `https://api.cortecs.ai/v1`
145
+ * - apiKey - `CORTECS_API_KEY`
146
+ */
147
+ declare const cortecs: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-480b-a35b-instruct" | "qwen3-32b" | "kimi-k2-instruct" | "gpt-oss-120b" | "gpt-4.1" | "gemini-2.5-pro" | "claude-sonnet-4" | "nova-pro-v1" | "claude-4-5-sonnet" | "deepseek-v3-0324" | "llama-3.1-405b-instruct">;
148
+ /**
149
+ * GitHub Models Provider
150
+ * @see {@link https://docs.github.com/en/github-models}
151
+ * @remarks
152
+ * - baseURL - `https://models.github.ai/inference`
153
+ * - apiKey - `GITHUB_TOKEN`
154
+ */
155
+ declare const githubModels: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"microsoft/phi-4-mini-instruct" | "core42/jais-30b-chat" | "xai/grok-3" | "xai/grok-3-mini" | "cohere/cohere-command-r-08-2024" | "cohere/cohere-command-a" | "cohere/cohere-command-r-plus-08-2024" | "cohere/cohere-command-r" | "cohere/cohere-command-r-plus" | "deepseek/deepseek-r1-0528" | "deepseek/deepseek-r1" | "deepseek/deepseek-v3-0324" | "mistral-ai/mistral-medium-2505" | "mistral-ai/ministral-3b" | "mistral-ai/mistral-nemo" | "mistral-ai/mistral-large-2411" | "mistral-ai/codestral-2501" | "mistral-ai/mistral-small-2503" | "microsoft/phi-3-medium-128k-instruct" | "microsoft/phi-3-mini-4k-instruct" | "microsoft/phi-3-small-128k-instruct" | "microsoft/phi-3.5-vision-instruct" | "microsoft/phi-4" | "microsoft/phi-4-mini-reasoning" | "microsoft/phi-3-small-8k-instruct" | "microsoft/phi-3.5-mini-instruct" | "microsoft/phi-4-multimodal-instruct" | "microsoft/phi-3-mini-128k-instruct" | "microsoft/phi-3.5-moe-instruct" | "microsoft/phi-3-medium-4k-instruct" | "microsoft/phi-4-reasoning" | "microsoft/mai-ds-r1" | "openai/gpt-4.1-nano" | "openai/gpt-4.1-mini" | "openai/o1-preview" | "openai/o3-mini" | "openai/gpt-4o" | "openai/gpt-4.1" | "openai/o4-mini" | "openai/o1" | "openai/o1-mini" | "openai/o3" | "openai/gpt-4o-mini" | "meta/llama-3.2-11b-vision-instruct" | "meta/meta-llama-3.1-405b-instruct" | "meta/llama-4-maverick-17b-128e-instruct-fp8" | "meta/meta-llama-3-70b-instruct" | "meta/meta-llama-3.1-70b-instruct" | "meta/llama-3.3-70b-instruct" | "meta/llama-3.2-90b-vision-instruct" | "meta/meta-llama-3-8b-instruct" | "meta/llama-4-scout-17b-16e-instruct" | "meta/meta-llama-3.1-8b-instruct" | "ai21-labs/ai21-jamba-1.5-large" | "ai21-labs/ai21-jamba-1.5-mini">;
156
+ /**
157
+ * Baseten Provider
158
+ * @see {@link https://docs.baseten.co/development/model-apis/overview}
159
+ * @remarks
160
+ * - baseURL - `https://inference.baseten.co/v1`
161
+ * - apiKey - `BASETEN_API_KEY`
162
+ */
163
+ declare const baseten: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "zai-org/GLM-4.6" | "Qwen3/Qwen3-Coder-480B-A35B-Instruct">;
164
+ /**
165
+ * Hugging Face Provider
166
+ * @see {@link https://huggingface.co/docs/inference-providers}
167
+ * @remarks
168
+ * - baseURL - `https://router.huggingface.co/v1`
169
+ * - apiKey - `HF_TOKEN`
170
+ */
171
+ declare const huggingface: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"moonshotai/Kimi-K2-Instruct-0905" | "MiniMaxAI/MiniMax-M2" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "Qwen/Qwen3-Next-80B-A3B-Thinking" | "zai-org/GLM-4.5" | "zai-org/GLM-4.6" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Embedding-8B" | "Qwen/Qwen3-Embedding-4B" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "deepseek-ai/Deepseek-V3-0324">;
172
+ /**
173
+ * OpenCode Zen Provider
174
+ * @see {@link https://opencode.ai/docs/zen}
175
+ * @remarks
176
+ * - baseURL - `https://opencode.ai/zen/v1`
177
+ * - apiKey - `OPENCODE_API_KEY`
178
+ */
179
+ declare const opencode: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "gpt-5-codex" | "claude-sonnet-4" | "gpt-5" | "qwen3-coder" | "claude-opus-4-1" | "kimi-k2" | "claude-haiku-4-5" | "minimax-m2" | "claude-sonnet-4-5" | "an-gbt" | "big-pickle" | "claude-3-5-haiku" | "grok-code">;
180
+ /**
181
+ * FastRouter Provider
182
+ * @see {@link https://fastrouter.ai/models}
183
+ * @remarks
184
+ * - baseURL - `https://go.fastrouter.ai/api/v1`
185
+ * - apiKey - `FASTROUTER_API_KEY`
186
+ */
187
+ declare const fastrouter: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "openai/gpt-oss-20b" | "openai/gpt-4.1" | "moonshotai/kimi-k2" | "x-ai/grok-4" | "google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-5-mini" | "openai/gpt-5" | "qwen/qwen3-coder" | "anthropic/claude-opus-4.1" | "anthropic/claude-sonnet-4" | "deepseek-ai/deepseek-r1-distill-llama-70b">;
188
+ /**
189
+ * Google Provider
190
+ * @see {@link https://ai.google.dev/gemini-api/docs/pricing}
191
+ * @remarks
192
+ * - baseURL - `https://generativelanguage.googleapis.com/v1beta/openai/`
193
+ * - apiKey - `GOOGLE_GENERATIVE_AI_API_KEY or GEMINI_API_KEY`
194
+ */
195
+ declare const google: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gemini-2.5-pro" | "gemini-embedding-001" | "gemini-2.5-flash-image" | "gemini-2.5-flash-preview-05-20" | "gemini-flash-lite-latest" | "gemini-2.5-flash" | "gemini-flash-latest" | "gemini-2.5-pro-preview-05-06" | "gemini-2.5-flash-preview-tts" | "gemini-2.0-flash-lite" | "gemini-live-2.5-flash-preview-native-audio" | "gemini-2.0-flash" | "gemini-2.5-flash-lite" | "gemini-2.5-pro-preview-06-05" | "gemini-live-2.5-flash" | "gemini-2.5-flash-lite-preview-06-17" | "gemini-2.5-flash-image-preview" | "gemini-2.5-flash-preview-09-2025" | "gemini-2.5-flash-preview-04-17" | "gemini-2.5-pro-preview-tts" | "gemini-1.5-flash" | "gemini-1.5-flash-8b" | "gemini-2.5-flash-lite-preview-09-2025" | "gemini-1.5-pro">;
196
+ /**
197
+ * Inception Provider
198
+ * @see {@link https://platform.inceptionlabs.ai/docs}
199
+ * @remarks
200
+ * - baseURL - `https://api.inceptionlabs.ai/v1/`
201
+ * - apiKey - `INCEPTION_API_KEY`
202
+ */
203
+ declare const inception: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"mercury-coder" | "mercury">;
204
+ /**
205
+ * Weights & Biases Provider
206
+ * @see {@link https://weave-docs.wandb.ai/guides/integrations/inference/}
207
+ * @remarks
208
+ * - baseURL - `https://api.inference.wandb.ai/v1`
209
+ * - apiKey - `WANDB_API_KEY`
210
+ */
211
+ declare const wandb: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3-0324" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "microsoft/Phi-4-mini-instruct" | "meta-llama/Llama-3.1-8B-Instruct" | "meta-llama/Llama-3.3-70B-Instruct" | "meta-llama/Llama-4-Scout-17B-16E-Instruct">;
212
+ /**
213
+ * OpenAI Provider
214
+ * @see {@link https://platform.openai.com/docs/models}
215
+ * @remarks
216
+ * - baseURL - `https://api.openai.com/v1/`
217
+ * - apiKey - `OPENAI_API_KEY`
218
+ */
219
+ declare const openai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"o3-mini" | "gpt-5-codex" | "gpt-4o" | "gpt-4.1" | "o4-mini" | "gpt-5-mini" | "o3" | "gpt-5" | "gpt-4.1-nano" | "text-embedding-3-small" | "gpt-4" | "o1-pro" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4.1-mini" | "o3-deep-research" | "gpt-3.5-turbo" | "text-embedding-3-large" | "gpt-4-turbo" | "o1-preview" | "codex-mini-latest" | "gpt-5-nano" | "o1" | "o1-mini" | "text-embedding-ada-002" | "o3-pro" | "gpt-4o-2024-11-20" | "o4-mini-deep-research" | "gpt-5-chat-latest" | "gpt-4o-mini" | "gpt-5-pro"> & _xsai_ext_shared_providers.ImageProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string>;
220
+ /**
221
+ * Zhipu AI Coding Plan Provider
222
+ * @see {@link https://docs.bigmodel.cn/cn/coding-plan/overview}
223
+ * @remarks
224
+ * - baseURL - `https://open.bigmodel.cn/api/coding/paas/v4`
225
+ * - apiKey - `ZHIPU_API_KEY`
226
+ */
227
+ declare const zhipuaiCodingPlan: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
228
+ /**
229
+ * Perplexity Provider
230
+ * @see {@link https://docs.perplexity.ai}
231
+ * @remarks
232
+ * - baseURL - `https://api.perplexity.ai/`
233
+ * - apiKey - `PERPLEXITY_API_KEY`
234
+ */
235
+ declare const perplexity: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"sonar-reasoning" | "sonar" | "sonar-pro" | "sonar-reasoning-pro">;
236
+ /**
237
+ * ZenMux Provider
238
+ * @see {@link https://docs.zenmux.ai}
239
+ * @remarks
240
+ * - baseURL - `https://zenmux.ai/api/v1`
241
+ * - apiKey - `ZENMUX_API_KEY`
242
+ */
243
+ declare const zenmux: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"x-ai/grok-4" | "google/gemini-2.5-pro" | "openai/gpt-5" | "anthropic/claude-opus-4.1" | "moonshotai/kimi-k2-0905" | "x-ai/grok-4-fast-non-reasoning" | "x-ai/grok-code-fast-1" | "x-ai/grok-4-fast" | "deepseek/deepseek-chat" | "openai/gpt-5-codex" | "inclusionai/ring-1t" | "inclusionai/lint-1t" | "z-ai/glm-4.5-air" | "z-ai/glm-4.6" | "qwen/qwen3-coder-plus" | "kuaishou/kat-coder-pro-v1" | "anthropic/claude-haiku-4.5" | "anthropic/claude-sonnet-4.5">;
244
+ /**
245
+ * iFlow Provider
246
+ * @see {@link https://platform.iflow.cn/en/docs}
247
+ * @remarks
248
+ * - baseURL - `https://apis.iflow.cn/v1`
249
+ * - apiKey - `IFLOW_API_KEY`
250
+ */
251
+ declare const iflowcn: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.6" | "qwen3-vl-plus" | "qwen3-max" | "qwen3-coder-plus" | "qwen3-32b" | "deepseek-v3" | "deepseek-r1" | "qwen3-235b" | "qwen3-coder" | "kimi-k2" | "deepseek-v3.1" | "kimi-k2-0905" | "qwen3-235b-a22b-thinking-2507" | "tstars2.0" | "qwen3-235b-a22b-instruct" | "deepseek-v3.2" | "qwen3-max-preview">;
252
+ /**
253
+ * Synthetic Provider
254
+ * @see {@link https://synthetic.new/pricing}
255
+ * @remarks
256
+ * - baseURL - `https://api.synthetic.new/v1`
257
+ * - apiKey - `SYNTHETIC_API_KEY`
258
+ */
259
+ declare const synthetic: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"hf:Qwen/Qwen3-235B-A22B-Instruct-2507" | "hf:Qwen/Qwen2.5-Coder-32B-Instruct" | "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct" | "hf:Qwen/Qwen3-235B-A22B-Thinking-2507" | "hf:MiniMaxAI/MiniMax-M2" | "hf:meta-llama/Llama-3.1-70B-Instruct" | "hf:meta-llama/Llama-3.1-8B-Instruct" | "hf:meta-llama/Llama-3.3-70B-Instruct" | "hf:meta-llama/Llama-4-Scout-17B-16E-Instruct" | "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" | "hf:meta-llama/Llama-3.1-405B-Instruct" | "hf:moonshotai/Kimi-K2-Instruct" | "hf:moonshotai/Kimi-K2-Instruct-0905" | "hf:zai-org/GLM-4.5" | "hf:zai-org/GLM-4.6" | "hf:deepseek-ai/DeepSeek-R1" | "hf:deepseek-ai/DeepSeek-R1-0528" | "hf:deepseek-ai/DeepSeek-V3.1-Terminus" | "hf:deepseek-ai/DeepSeek-V3" | "hf:deepseek-ai/DeepSeek-V3.1" | "hf:deepseek-ai/DeepSeek-V3-0324" | "hf:openai/gpt-oss-120b">;
260
+ /**
261
+ * Deep Infra Provider
262
+ * @see {@link https://deepinfra.com/models}
263
+ * @remarks
264
+ * - baseURL - `https://api.deepinfra.com/v1/openai/`
265
+ * - apiKey - `DEEPINFRA_API_KEY`
266
+ */
267
+ declare const deepinfra: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"zai-org/GLM-4.5" | "moonshotai/Kimi-K2-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo">;
268
+ /**
269
+ * Zhipu AI Provider
270
+ * @see {@link https://docs.z.ai/guides/overview/pricing}
271
+ * @remarks
272
+ * - baseURL - `https://open.bigmodel.cn/api/paas/v4`
273
+ * - apiKey - `ZHIPU_API_KEY`
274
+ */
275
+ declare const zhipuai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
276
+ /**
277
+ * submodel Provider
278
+ * @see {@link https://submodel.gitbook.io}
279
+ * @remarks
280
+ * - baseURL - `https://llm.submodel.ai/v1`
281
+ * - apiKey - `SUBMODEL_INSTAGEN_ACCESS_KEY`
282
+ */
283
+ declare const submodel: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-120b" | "Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-Air" | "deepseek-ai/DeepSeek-R1-0528" | "deepseek-ai/DeepSeek-V3.1" | "deepseek-ai/DeepSeek-V3-0324">;
284
+ /**
285
+ * Z.AI Provider
286
+ * @see {@link https://docs.z.ai/guides/overview/pricing}
287
+ * @remarks
288
+ * - baseURL - `https://api.z.ai/api/paas/v4`
289
+ * - apiKey - `ZHIPU_API_KEY`
290
+ */
291
+ declare const zai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"glm-4.5-flash" | "glm-4.5" | "glm-4.5-air" | "glm-4.5v" | "glm-4.6">;
292
+ /**
293
+ * Inference Provider
294
+ * @see {@link https://inference.net/models}
295
+ * @remarks
296
+ * - baseURL - `https://inference.net/v1`
297
+ * - apiKey - `INFERENCE_API_KEY`
298
+ */
299
+ declare const inference: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"meta/llama-3.2-11b-vision-instruct" | "mistral/mistral-nemo-12b-instruct" | "google/gemma-3" | "osmosis/osmosis-structure-0.6b" | "qwen/qwen3-embedding-4b" | "qwen/qwen-2.5-7b-vision-instruct" | "meta/llama-3.1-8b-instruct" | "meta/llama-3.2-3b-instruct" | "meta/llama-3.2-1b-instruct">;
300
+ /**
301
+ * Requesty Provider
302
+ * @see {@link https://requesty.ai/solution/llm-routing/models}
303
+ * @remarks
304
+ * - baseURL - `https://router.requesty.ai/v1`
305
+ * - apiKey - `REQUESTY_API_KEY`
306
+ */
307
+ declare const requesty: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-4.1-mini" | "openai/gpt-4.1" | "openai/o4-mini" | "openai/gpt-4o-mini" | "google/gemini-2.5-flash" | "google/gemini-2.5-pro" | "openai/gpt-5-nano" | "openai/gpt-5-mini" | "openai/gpt-5" | "anthropic/claude-opus-4" | "anthropic/claude-3-7-sonnet" | "anthropic/claude-4-sonnet-20250522" | "anthropic/claude-opus-4-1-20250805">;
308
+ /**
309
+ * Morph Provider
310
+ * @see {@link https://docs.morphllm.com/api-reference/introduction}
311
+ * @remarks
312
+ * - baseURL - `https://api.morphllm.com/v1`
313
+ * - apiKey - `MORPH_API_KEY`
314
+ */
315
+ declare const morph: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"morph-v3-large" | "auto" | "morph-v3-fast">;
316
+ /**
317
+ * LMStudio Provider
318
+ * @see {@link https://lmstudio.ai/models}
319
+ * @remarks
320
+ * - baseURL - `http://127.0.0.1:1234/v1`
321
+ * - apiKey - `LMSTUDIO_API_KEY`
322
+ */
323
+ declare const lmstudio: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"openai/gpt-oss-20b" | "qwen/qwen3-30b-a3b-2507" | "qwen/qwen3-coder-30b">;
324
+ /**
325
+ * Fireworks AI Provider
326
+ * @see {@link https://fireworks.ai/docs/}
327
+ * @remarks
328
+ * - baseURL - `https://api.fireworks.ai/inference/v1/`
329
+ * - apiKey - `FIREWORKS_API_KEY`
330
+ */
331
+ declare const fireworks: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"accounts/fireworks/models/deepseek-r1-0528" | "accounts/fireworks/models/deepseek-v3p1" | "accounts/fireworks/models/minimax-m2" | "accounts/fireworks/models/deepseek-v3-0324" | "accounts/fireworks/models/kimi-k2-instruct" | "accounts/fireworks/models/qwen3-235b-a22b" | "accounts/fireworks/models/gpt-oss-20b" | "accounts/fireworks/models/gpt-oss-120b" | "accounts/fireworks/models/glm-4p5-air" | "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct" | "accounts/fireworks/models/glm-4p5">;
332
+ /**
333
+ * ModelScope Provider
334
+ * @see {@link https://modelscope.cn/docs/model-service/API-Inference/intro}
335
+ * @remarks
336
+ * - baseURL - `https://api-inference.modelscope.cn/v1`
337
+ * - apiKey - `MODELSCOPE_API_KEY`
338
+ */
339
+ declare const modelscope: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"Qwen/Qwen3-235B-A22B-Instruct-2507" | "Qwen/Qwen3-Coder-30B-A3B-Instruct" | "Qwen/Qwen3-30B-A3B-Instruct-2507" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "ZhipuAI/GLM-4.5" | "ZhipuAI/GLM-4.6" | "Qwen/Qwen3-30B-A3B-Thinking-2507">;
340
+ /**
341
+ * Llama Provider
342
+ * @see {@link https://llama.developer.meta.com/docs/models}
343
+ * @remarks
344
+ * - baseURL - `https://api.llama.com/compat/v1/`
345
+ * - apiKey - `LLAMA_API_KEY`
346
+ */
347
+ declare const llama: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"llama-3.3-8b-instruct" | "llama-4-maverick-17b-128e-instruct-fp8" | "llama-3.3-70b-instruct" | "llama-4-scout-17b-16e-instruct-fp8" | "groq-llama-4-maverick-17b-128e-instruct" | "cerebras-llama-4-scout-17b-16e-instruct" | "cerebras-llama-4-maverick-17b-128e-instruct">;
348
+ /**
349
+ * Scaleway Provider
350
+ * @see {@link https://www.scaleway.com/en/docs/generative-apis/}
351
+ * @remarks
352
+ * - baseURL - `https://api.scaleway.ai/v1`
353
+ * - apiKey - `SCALEWAY_API_KEY`
354
+ */
355
+ declare const scaleway: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<"qwen3-coder-30b-a3b-instruct" | "deepseek-r1-distill-llama-70b" | "gpt-oss-120b" | "llama-3.3-70b-instruct" | "qwen3-235b-a22b-instruct-2507" | "pixtral-12b-2409" | "llama-3.1-8b-instruct" | "mistral-nemo-instruct-2407" | "mistral-small-3.2-24b-instruct-2506" | "whisper-large-v3" | "voxtral-small-24b-2507" | "bge-multilingual-gemma2" | "gemma-3-27b-it">;
356
+ /**
357
+ * Cerebras Provider
358
+ * @see {@link https://inference-docs.cerebras.ai/models/overview}
359
+ * @remarks
360
+ * - baseURL - `https://api.cerebras.ai/v1/`
361
+ * - apiKey - `CEREBRAS_API_KEY`
362
+ */
363
+ declare const cerebras: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<"gpt-oss-120b" | "qwen-3-235b-a22b-instruct-2507" | "zai-glm-4.6" | "qwen-3-coder-480b">;
364
+ /**
365
+ * Minimax Provider
366
+ * @see {@link https://platform.minimax.io/docs/api-reference/text-openai-api}
367
+ * @remarks
368
+ * - baseURL - `https://api.minimax.io/v1/`
369
+ * - apiKey - `MINIMAX_API_KEY`
370
+ */
371
+ declare const minimax: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
372
+ /**
373
+ * Minimaxi Provider
374
+ * @see {@link https://platform.minimaxi.com/docs/api-reference/text-openai-api}
375
+ * @remarks
376
+ * - baseURL - `https://api.minimaxi.com/v1/`
377
+ * - apiKey - `MINIMAX_API_KEY`
378
+ */
379
+ declare const minimaxi: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
380
+ /**
381
+ * Novita AI Provider
382
+ * @see {@link https://novita.ai/docs/guides/llm-api#api-integration}
383
+ * @remarks
384
+ * - baseURL - `https://api.novita.ai/v3/openai/`
385
+ * - apiKey - `NOVITA_API_KEY`
386
+ */
387
+ declare const novita: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
388
+ /**
389
+ * SiliconFlow Provider
390
+ * @see {@link https://docs.siliconflow.com/en/userguide/quickstart#4-3-call-via-openai-interface}
391
+ * @remarks
392
+ * - baseURL - `https://api.siliconflow.cn/v1/`
393
+ * - apiKey - `SILICON_FLOW_API_KEY`
394
+ */
395
+ declare const siliconFlow: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
396
+ /**
397
+ * StepFun Provider
398
+ * @see {@link https://www.stepfun.com}
399
+ * @remarks
400
+ * - baseURL - `https://api.stepfun.com/v1/`
401
+ * - apiKey - `STEPFUN_API_KEY`
402
+ */
403
+ declare const stepfun: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.SpeechProvider<string> & _xsai_ext_shared_providers.TranscriptionProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
404
+ /**
405
+ * Tencent Hunyuan Provider
406
+ * @see {@link https://cloud.tencent.com/document/product/1729}
407
+ * @remarks
408
+ * - baseURL - `https://api.hunyuan.cloud.tencent.com/v1/`
409
+ * - apiKey - `TENCENT_HUNYUAN_API_KEY`
410
+ */
411
+ declare const tencentHunyuan: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<string>;
412
+
413
+ /**
414
+ * Anthropic Provider
415
+ * @see {@link https://docs.claude.com/en/api/openai-sdk}
416
+ * @remarks
417
+ * - baseURL - `https://api.anthropic.com/v1/`
418
+ * - apiKey - `ANTHROPIC_API_KEY`
419
+ */
420
+ declare const anthropic: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<AnthropicModels>;
421
+ /**
422
+ * Featherless AI Provider
423
+ * @see {@link https://featherless.ai/models}
424
+ * @remarks
425
+ * - baseURL - `https://api.featherless.ai/v1/`
426
+ * - apiKey - `FEATHERLESS_API_KEY`
427
+ */
428
+ declare const featherless: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProvider<string>;
429
+ /**
430
+ * OpenRouter Provider
431
+ * @see {@link https://openrouter.ai/models}
432
+ * @remarks
433
+ * - baseURL - `https://openrouter.ai/api/v1/`
434
+ * - apiKey - `OPENROUTER_API_KEY`
435
+ */
436
+ declare const openrouter: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.ChatProviderWithExtraOptions<OpenrouterModels, OpenRouterOptions>;
437
+ /**
438
+ * Together AI Provider
439
+ * @see {@link https://docs.together.ai/docs/serverless-models}
440
+ * @remarks
441
+ * - baseURL - `https://api.together.xyz/v1/`
442
+ * - apiKey - `TOGETHER_API_KEY`
443
+ */
444
+ declare const togetherai: _xsai_ext_shared_providers.ModelProvider & _xsai_ext_shared_providers.EmbedProvider<string> & _xsai_ext_shared_providers.ChatProvider<TogetheraiModels>;
445
+
446
+ export { alibaba, alibabaCn, anthropic, baseten, cerebras, chutes, cortecs, deepinfra, deepseek, fastrouter, featherless, fireworks, githubCopilot, githubModels, google, groq, huggingface, iflowcn, inception, inference, llama, lmstudio, lucidquery, minimax, minimaxi, mistral, modelscope, moonshotai, moonshotaiCn, morph, nebius, novita, nvidia, openai, opencode, openrouter, perplexity, requesty, scaleway, siliconFlow, stepfun, submodel, synthetic, tencentHunyuan, togetherai, upstage, venice, vultr, wandb, xai, zai, zaiCodingPlan, zenmux, zhipuai, zhipuaiCodingPlan };
package/dist/index.js ADDED
@@ -0,0 +1,63 @@
1
+ import process from 'node:process';
2
+ import { c as createMoonshotaiCn, a as createLucidquery, b as createMoonshotai, d as createZaiCodingPlan, e as createAlibaba, f as createXai, g as createVultr, h as createNvidia, i as createUpstage, j as createGroq, k as createGithubCopilot, l as createMistral, m as createNebius, n as createDeepSeek, o as createAlibabaCn, p as createVenice, q as createChutes, r as createCortecs, s as createGithubModels, t as createBaseten, u as createHuggingface, v as createOpencode, w as createFastrouter, x as createGoogleGenerativeAI, y as createInception, z as createWandb, A as createOpenAI, B as createZhipuaiCodingPlan, C as createPerplexity, D as createZenmux, E as createIflowcn, F as createSynthetic, G as createDeepinfra, H as createZhipuai, I as createSubmodel, J as createZai, K as createInference, L as createRequesty, M as createMorph, N as createLmstudio, O as createFireworks, P as createModelscope, Q as createLlama, R as createScaleway, S as createCerebras, T as createMinimax, U as createMinimaxi, V as createNovita, W as createSiliconFlow, X as createStepfun, Y as createTencentHunyuan, Z as createAnthropic, _ as createFeatherless, $ as createOpenRouter, a0 as createTogetherAI } from './together-ai-DXREpfi4.js';
3
+ import '@xsai-ext/shared-providers';
4
+ import '@xsai/shared';
5
+
6
+ const moonshotaiCn = createMoonshotaiCn(process.env.MOONSHOT_API_KEY ?? "");
7
+ const lucidquery = createLucidquery(process.env.LUCIDQUERY_API_KEY ?? "");
8
+ const moonshotai = createMoonshotai(process.env.MOONSHOT_API_KEY ?? "");
9
+ const zaiCodingPlan = createZaiCodingPlan(process.env.ZHIPU_API_KEY ?? "");
10
+ const alibaba = createAlibaba(process.env.DASHSCOPE_API_KEY ?? "");
11
+ const xai = createXai(process.env.XAI_API_KEY ?? "");
12
+ const vultr = createVultr(process.env.VULTR_API_KEY ?? "");
13
+ const nvidia = createNvidia(process.env.NVIDIA_API_KEY ?? "");
14
+ const upstage = createUpstage(process.env.UPSTAGE_API_KEY ?? "");
15
+ const groq = createGroq(process.env.GROQ_API_KEY ?? "");
16
+ const githubCopilot = createGithubCopilot(process.env.GITHUB_TOKEN ?? "");
17
+ const mistral = createMistral(process.env.MISTRAL_API_KEY ?? "");
18
+ const nebius = createNebius(process.env.NEBIUS_API_KEY ?? "");
19
+ const deepseek = createDeepSeek(process.env.DEEPSEEK_API_KEY ?? "");
20
+ const alibabaCn = createAlibabaCn(process.env.DASHSCOPE_API_KEY ?? "");
21
+ const venice = createVenice(process.env.VENICE_API_KEY ?? "");
22
+ const chutes = createChutes(process.env.CHUTES_API_KEY ?? "");
23
+ const cortecs = createCortecs(process.env.CORTECS_API_KEY ?? "");
24
+ const githubModels = createGithubModels(process.env.GITHUB_TOKEN ?? "");
25
+ const baseten = createBaseten(process.env.BASETEN_API_KEY ?? "");
26
+ const huggingface = createHuggingface(process.env.HF_TOKEN ?? "");
27
+ const opencode = createOpencode(process.env.OPENCODE_API_KEY ?? "");
28
+ const fastrouter = createFastrouter(process.env.FASTROUTER_API_KEY ?? "");
29
+ const google = createGoogleGenerativeAI(process.env.GOOGLE_GENERATIVE_AI_API_KEY ?? process.env.GEMINI_API_KEY ?? "");
30
+ const inception = createInception(process.env.INCEPTION_API_KEY ?? "");
31
+ const wandb = createWandb(process.env.WANDB_API_KEY ?? "");
32
+ const openai = createOpenAI(process.env.OPENAI_API_KEY ?? "");
33
+ const zhipuaiCodingPlan = createZhipuaiCodingPlan(process.env.ZHIPU_API_KEY ?? "");
34
+ const perplexity = createPerplexity(process.env.PERPLEXITY_API_KEY ?? "");
35
+ const zenmux = createZenmux(process.env.ZENMUX_API_KEY ?? "");
36
+ const iflowcn = createIflowcn(process.env.IFLOW_API_KEY ?? "");
37
+ const synthetic = createSynthetic(process.env.SYNTHETIC_API_KEY ?? "");
38
+ const deepinfra = createDeepinfra(process.env.DEEPINFRA_API_KEY ?? "");
39
+ const zhipuai = createZhipuai(process.env.ZHIPU_API_KEY ?? "");
40
+ const submodel = createSubmodel(process.env.SUBMODEL_INSTAGEN_ACCESS_KEY ?? "");
41
+ const zai = createZai(process.env.ZHIPU_API_KEY ?? "");
42
+ const inference = createInference(process.env.INFERENCE_API_KEY ?? "");
43
+ const requesty = createRequesty(process.env.REQUESTY_API_KEY ?? "");
44
+ const morph = createMorph(process.env.MORPH_API_KEY ?? "");
45
+ const lmstudio = createLmstudio(process.env.LMSTUDIO_API_KEY ?? "");
46
+ const fireworks = createFireworks(process.env.FIREWORKS_API_KEY ?? "");
47
+ const modelscope = createModelscope(process.env.MODELSCOPE_API_KEY ?? "");
48
+ const llama = createLlama(process.env.LLAMA_API_KEY ?? "");
49
+ const scaleway = createScaleway(process.env.SCALEWAY_API_KEY ?? "");
50
+ const cerebras = createCerebras(process.env.CEREBRAS_API_KEY ?? "");
51
+ const minimax = createMinimax(process.env.MINIMAX_API_KEY ?? "");
52
+ const minimaxi = createMinimaxi(process.env.MINIMAX_API_KEY ?? "");
53
+ const novita = createNovita(process.env.NOVITA_API_KEY ?? "");
54
+ const siliconFlow = createSiliconFlow(process.env.SILICON_FLOW_API_KEY ?? "");
55
+ const stepfun = createStepfun(process.env.STEPFUN_API_KEY ?? "");
56
+ const tencentHunyuan = createTencentHunyuan(process.env.TENCENT_HUNYUAN_API_KEY ?? "");
57
+
58
+ const anthropic = createAnthropic(process.env.ANTHROPIC_API_KEY ?? "");
59
+ const featherless = createFeatherless(process.env.FEATHERLESS_API_KEY ?? "");
60
+ const openrouter = createOpenRouter(process.env.OPENROUTER_API_KEY ?? "");
61
+ const togetherai = createTogetherAI(process.env.TOGETHER_API_KEY ?? "");
62
+
63
+ export { alibaba, alibabaCn, anthropic, baseten, cerebras, chutes, cortecs, deepinfra, deepseek, fastrouter, featherless, fireworks, githubCopilot, githubModels, google, groq, huggingface, iflowcn, inception, inference, llama, lmstudio, lucidquery, minimax, minimaxi, mistral, modelscope, moonshotai, moonshotaiCn, morph, nebius, novita, nvidia, openai, opencode, openrouter, perplexity, requesty, scaleway, siliconFlow, stepfun, submodel, synthetic, tencentHunyuan, togetherai, upstage, venice, vultr, wandb, xai, zai, zaiCodingPlan, zenmux, zhipuai, zhipuaiCodingPlan };