@clinebot/llms 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +198 -0
- package/dist/config-browser.d.ts +3 -0
- package/dist/config.d.ts +3 -0
- package/dist/index.browser.d.ts +4 -0
- package/dist/index.browser.js +1 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +7 -0
- package/dist/models/generated-access.d.ts +4 -0
- package/dist/models/generated-provider-loaders.d.ts +13 -0
- package/dist/models/generated.d.ts +14 -0
- package/dist/models/index.d.ts +43 -0
- package/dist/models/models-dev-catalog.d.ts +32 -0
- package/dist/models/providers/aihubmix.d.ts +5 -0
- package/dist/models/providers/anthropic.d.ts +53 -0
- package/dist/models/providers/asksage.d.ts +5 -0
- package/dist/models/providers/baseten.d.ts +5 -0
- package/dist/models/providers/bedrock.d.ts +7 -0
- package/dist/models/providers/cerebras.d.ts +7 -0
- package/dist/models/providers/claude-code.d.ts +4 -0
- package/dist/models/providers/cline.d.ts +34 -0
- package/dist/models/providers/deepseek.d.ts +8 -0
- package/dist/models/providers/dify.d.ts +5 -0
- package/dist/models/providers/doubao.d.ts +7 -0
- package/dist/models/providers/fireworks.d.ts +8 -0
- package/dist/models/providers/gemini.d.ts +9 -0
- package/dist/models/providers/groq.d.ts +8 -0
- package/dist/models/providers/hicap.d.ts +5 -0
- package/dist/models/providers/huawei-cloud-maas.d.ts +5 -0
- package/dist/models/providers/huggingface.d.ts +6 -0
- package/dist/models/providers/index.d.ts +45 -0
- package/dist/models/providers/litellm.d.ts +5 -0
- package/dist/models/providers/lmstudio.d.ts +5 -0
- package/dist/models/providers/minimax.d.ts +7 -0
- package/dist/models/providers/mistral.d.ts +5 -0
- package/dist/models/providers/moonshot.d.ts +7 -0
- package/dist/models/providers/nebius.d.ts +7 -0
- package/dist/models/providers/nous-research.d.ts +7 -0
- package/dist/models/providers/oca.d.ts +9 -0
- package/dist/models/providers/ollama.d.ts +5 -0
- package/dist/models/providers/openai-codex.d.ts +10 -0
- package/dist/models/providers/openai.d.ts +9 -0
- package/dist/models/providers/opencode.d.ts +10 -0
- package/dist/models/providers/openrouter.d.ts +7 -0
- package/dist/models/providers/qwen-code.d.ts +7 -0
- package/dist/models/providers/qwen.d.ts +7 -0
- package/dist/models/providers/requesty.d.ts +6 -0
- package/dist/models/providers/sambanova.d.ts +7 -0
- package/dist/models/providers/sapaicore.d.ts +7 -0
- package/dist/models/providers/together.d.ts +8 -0
- package/dist/models/providers/vercel-ai-gateway.d.ts +5 -0
- package/dist/models/providers/vertex.d.ts +7 -0
- package/dist/models/providers/xai.d.ts +8 -0
- package/dist/models/providers/zai.d.ts +7 -0
- package/dist/models/query.d.ts +181 -0
- package/dist/models/registry.d.ts +123 -0
- package/dist/models/schemas/index.d.ts +7 -0
- package/dist/models/schemas/model.d.ts +340 -0
- package/dist/models/schemas/query.d.ts +191 -0
- package/dist/providers/handlers/ai-sdk-community.d.ts +46 -0
- package/dist/providers/handlers/ai-sdk-provider-base.d.ts +32 -0
- package/dist/providers/handlers/anthropic-base.d.ts +26 -0
- package/dist/providers/handlers/asksage.d.ts +12 -0
- package/dist/providers/handlers/auth.d.ts +5 -0
- package/dist/providers/handlers/base.d.ts +55 -0
- package/dist/providers/handlers/bedrock-base.d.ts +23 -0
- package/dist/providers/handlers/bedrock-client.d.ts +4 -0
- package/dist/providers/handlers/community-sdk.d.ts +97 -0
- package/dist/providers/handlers/fetch-base.d.ts +18 -0
- package/dist/providers/handlers/gemini-base.d.ts +25 -0
- package/dist/providers/handlers/index.d.ts +19 -0
- package/dist/providers/handlers/openai-base.d.ts +54 -0
- package/dist/providers/handlers/openai-responses.d.ts +64 -0
- package/dist/providers/handlers/providers.d.ts +43 -0
- package/dist/providers/handlers/r1-base.d.ts +62 -0
- package/dist/providers/handlers/registry.d.ts +106 -0
- package/dist/providers/handlers/vertex.d.ts +32 -0
- package/dist/providers/index.d.ts +100 -0
- package/dist/providers/public.browser.d.ts +2 -0
- package/dist/providers/public.d.ts +3 -0
- package/dist/providers/shared/openai-compatible.d.ts +10 -0
- package/dist/providers/transform/ai-sdk-community-format.d.ts +9 -0
- package/dist/providers/transform/anthropic-format.d.ts +24 -0
- package/dist/providers/transform/content-format.d.ts +3 -0
- package/dist/providers/transform/gemini-format.d.ts +19 -0
- package/dist/providers/transform/index.d.ts +10 -0
- package/dist/providers/transform/openai-format.d.ts +36 -0
- package/dist/providers/transform/r1-format.d.ts +26 -0
- package/dist/providers/types/config.d.ts +261 -0
- package/dist/providers/types/handler.d.ts +71 -0
- package/dist/providers/types/index.d.ts +11 -0
- package/dist/providers/types/messages.d.ts +139 -0
- package/dist/providers/types/model-info.d.ts +32 -0
- package/dist/providers/types/provider-ids.d.ts +63 -0
- package/dist/providers/types/settings.d.ts +308 -0
- package/dist/providers/types/stream.d.ts +106 -0
- package/dist/providers/utils/index.d.ts +7 -0
- package/dist/providers/utils/retry.d.ts +38 -0
- package/dist/providers/utils/stream-processor.d.ts +110 -0
- package/dist/providers/utils/tool-processor.d.ts +34 -0
- package/dist/sdk.d.ts +18 -0
- package/dist/types.d.ts +60 -0
- package/package.json +66 -0
- package/src/catalog.ts +20 -0
- package/src/config-browser.ts +11 -0
- package/src/config.ts +49 -0
- package/src/index.browser.ts +9 -0
- package/src/index.ts +10 -0
- package/src/live-providers.test.ts +137 -0
- package/src/models/generated-access.ts +41 -0
- package/src/models/generated-provider-loaders.ts +166 -0
- package/src/models/generated.ts +11997 -0
- package/src/models/index.ts +271 -0
- package/src/models/models-dev-catalog.test.ts +161 -0
- package/src/models/models-dev-catalog.ts +161 -0
- package/src/models/providers/aihubmix.ts +19 -0
- package/src/models/providers/anthropic.ts +60 -0
- package/src/models/providers/asksage.ts +19 -0
- package/src/models/providers/baseten.ts +21 -0
- package/src/models/providers/bedrock.ts +30 -0
- package/src/models/providers/cerebras.ts +24 -0
- package/src/models/providers/claude-code.ts +51 -0
- package/src/models/providers/cline.ts +25 -0
- package/src/models/providers/deepseek.ts +33 -0
- package/src/models/providers/dify.ts +17 -0
- package/src/models/providers/doubao.ts +33 -0
- package/src/models/providers/fireworks.ts +34 -0
- package/src/models/providers/gemini.ts +43 -0
- package/src/models/providers/groq.ts +33 -0
- package/src/models/providers/hicap.ts +18 -0
- package/src/models/providers/huawei-cloud-maas.ts +18 -0
- package/src/models/providers/huggingface.ts +22 -0
- package/src/models/providers/index.ts +162 -0
- package/src/models/providers/litellm.ts +19 -0
- package/src/models/providers/lmstudio.ts +22 -0
- package/src/models/providers/minimax.ts +34 -0
- package/src/models/providers/mistral.ts +19 -0
- package/src/models/providers/moonshot.ts +34 -0
- package/src/models/providers/nebius.ts +24 -0
- package/src/models/providers/nous-research.ts +21 -0
- package/src/models/providers/oca.ts +30 -0
- package/src/models/providers/ollama.ts +18 -0
- package/src/models/providers/openai-codex.ts +30 -0
- package/src/models/providers/openai.ts +43 -0
- package/src/models/providers/opencode.ts +28 -0
- package/src/models/providers/openrouter.ts +24 -0
- package/src/models/providers/qwen-code.ts +33 -0
- package/src/models/providers/qwen.ts +34 -0
- package/src/models/providers/requesty.ts +23 -0
- package/src/models/providers/sambanova.ts +23 -0
- package/src/models/providers/sapaicore.ts +34 -0
- package/src/models/providers/together.ts +35 -0
- package/src/models/providers/vercel-ai-gateway.ts +23 -0
- package/src/models/providers/vertex.ts +36 -0
- package/src/models/providers/xai.ts +34 -0
- package/src/models/providers/zai.ts +25 -0
- package/src/models/query.ts +407 -0
- package/src/models/registry.ts +511 -0
- package/src/models/schemas/index.ts +62 -0
- package/src/models/schemas/model.ts +308 -0
- package/src/models/schemas/query.ts +336 -0
- package/src/providers/browser.ts +4 -0
- package/src/providers/handlers/ai-sdk-community.ts +226 -0
- package/src/providers/handlers/ai-sdk-provider-base.ts +193 -0
- package/src/providers/handlers/anthropic-base.ts +372 -0
- package/src/providers/handlers/asksage.test.ts +103 -0
- package/src/providers/handlers/asksage.ts +138 -0
- package/src/providers/handlers/auth.test.ts +19 -0
- package/src/providers/handlers/auth.ts +121 -0
- package/src/providers/handlers/base.test.ts +46 -0
- package/src/providers/handlers/base.ts +160 -0
- package/src/providers/handlers/bedrock-base.ts +390 -0
- package/src/providers/handlers/bedrock-client.ts +100 -0
- package/src/providers/handlers/codex.test.ts +123 -0
- package/src/providers/handlers/community-sdk.test.ts +288 -0
- package/src/providers/handlers/community-sdk.ts +392 -0
- package/src/providers/handlers/fetch-base.ts +68 -0
- package/src/providers/handlers/gemini-base.ts +302 -0
- package/src/providers/handlers/index.ts +67 -0
- package/src/providers/handlers/openai-base.ts +277 -0
- package/src/providers/handlers/openai-responses.ts +598 -0
- package/src/providers/handlers/providers.test.ts +120 -0
- package/src/providers/handlers/providers.ts +563 -0
- package/src/providers/handlers/r1-base.ts +280 -0
- package/src/providers/handlers/registry.ts +185 -0
- package/src/providers/handlers/vertex.test.ts +124 -0
- package/src/providers/handlers/vertex.ts +292 -0
- package/src/providers/index.ts +534 -0
- package/src/providers/public.browser.ts +20 -0
- package/src/providers/public.ts +51 -0
- package/src/providers/shared/openai-compatible.ts +63 -0
- package/src/providers/transform/ai-sdk-community-format.test.ts +73 -0
- package/src/providers/transform/ai-sdk-community-format.ts +115 -0
- package/src/providers/transform/anthropic-format.ts +218 -0
- package/src/providers/transform/content-format.ts +34 -0
- package/src/providers/transform/format-conversion.test.ts +310 -0
- package/src/providers/transform/gemini-format.ts +167 -0
- package/src/providers/transform/index.ts +22 -0
- package/src/providers/transform/openai-format.ts +247 -0
- package/src/providers/transform/r1-format.ts +287 -0
- package/src/providers/types/config.ts +388 -0
- package/src/providers/types/handler.ts +87 -0
- package/src/providers/types/index.ts +120 -0
- package/src/providers/types/messages.ts +158 -0
- package/src/providers/types/model-info.test.ts +57 -0
- package/src/providers/types/model-info.ts +65 -0
- package/src/providers/types/provider-ids.test.ts +12 -0
- package/src/providers/types/provider-ids.ts +89 -0
- package/src/providers/types/settings.test.ts +49 -0
- package/src/providers/types/settings.ts +533 -0
- package/src/providers/types/stream.ts +117 -0
- package/src/providers/utils/index.ts +27 -0
- package/src/providers/utils/retry.test.ts +140 -0
- package/src/providers/utils/retry.ts +188 -0
- package/src/providers/utils/stream-processor.test.ts +232 -0
- package/src/providers/utils/stream-processor.ts +472 -0
- package/src/providers/utils/tool-processor.test.ts +34 -0
- package/src/providers/utils/tool-processor.ts +111 -0
- package/src/sdk.ts +264 -0
- package/src/types.ts +79 -0
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AWS Bedrock Models
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const BEDROCK_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("bedrock");
|
|
10
|
+
|
|
11
|
+
export const BEDROCK_DEFAULT_MODEL =
|
|
12
|
+
Object.keys(BEDROCK_MODELS)[0] ?? "anthropic.claude-sonnet-4-5-20250929-v1:0";
|
|
13
|
+
|
|
14
|
+
export const BEDROCK_PROVIDER: ModelCollection = {
|
|
15
|
+
provider: {
|
|
16
|
+
id: "bedrock",
|
|
17
|
+
name: "AWS Bedrock",
|
|
18
|
+
description: "Amazon Bedrock managed foundation models",
|
|
19
|
+
protocol: "anthropic",
|
|
20
|
+
defaultModelId: BEDROCK_DEFAULT_MODEL,
|
|
21
|
+
capabilities: ["reasoning", "prompt-cache"],
|
|
22
|
+
env: [
|
|
23
|
+
"AWS_REGION",
|
|
24
|
+
"AWS_ACCESS_KEY_ID",
|
|
25
|
+
"AWS_SECRET_ACCESS_KEY",
|
|
26
|
+
"AWS_SESSION_TOKEN",
|
|
27
|
+
],
|
|
28
|
+
},
|
|
29
|
+
models: BEDROCK_MODELS,
|
|
30
|
+
};
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cerebras Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const CEREBRAS_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("cerebras");
|
|
10
|
+
export const CEREBRAS_DEFAULT_MODEL =
|
|
11
|
+
Object.keys(CEREBRAS_MODELS)[0] ?? "llama3.1-70b";
|
|
12
|
+
|
|
13
|
+
export const CEREBRAS_PROVIDER: ModelCollection = {
|
|
14
|
+
provider: {
|
|
15
|
+
id: "cerebras",
|
|
16
|
+
name: "Cerebras",
|
|
17
|
+
description: "Fast inference on Cerebras wafer-scale chips",
|
|
18
|
+
protocol: "openai-chat",
|
|
19
|
+
baseUrl: "https://api.cerebras.ai/v1",
|
|
20
|
+
defaultModelId: CEREBRAS_DEFAULT_MODEL,
|
|
21
|
+
env: ["CEREBRAS_API_KEY"],
|
|
22
|
+
},
|
|
23
|
+
models: CEREBRAS_MODELS,
|
|
24
|
+
};
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
2
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
3
|
+
|
|
4
|
+
const ANTHROPIC_MODELS = getGeneratedModelsForProvider("anthropic");
|
|
5
|
+
|
|
6
|
+
function pickAnthropicModel(match: (id: string) => boolean): ModelInfo {
|
|
7
|
+
const entry = Object.entries(ANTHROPIC_MODELS).find(([id]) => match(id));
|
|
8
|
+
if (entry) {
|
|
9
|
+
return entry[1];
|
|
10
|
+
}
|
|
11
|
+
return {
|
|
12
|
+
id: "sonnet",
|
|
13
|
+
name: "Claude Sonnet",
|
|
14
|
+
capabilities: ["streaming", "reasoning"],
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function toClaudeCodeModel(id: "opus" | "sonnet" | "haiku"): ModelInfo {
|
|
19
|
+
const source =
|
|
20
|
+
id === "opus"
|
|
21
|
+
? pickAnthropicModel((modelId) => modelId.includes("opus"))
|
|
22
|
+
: id === "haiku"
|
|
23
|
+
? pickAnthropicModel((modelId) => modelId.includes("haiku"))
|
|
24
|
+
: pickAnthropicModel((modelId) => modelId.includes("sonnet"));
|
|
25
|
+
return {
|
|
26
|
+
...source,
|
|
27
|
+
id,
|
|
28
|
+
name: `Claude ${id.charAt(0).toUpperCase()}${id.slice(1)}`,
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export const CLAUDE_CODE_MODELS: Record<string, ModelInfo> = {
|
|
33
|
+
opus: toClaudeCodeModel("opus"),
|
|
34
|
+
sonnet: toClaudeCodeModel("sonnet"),
|
|
35
|
+
haiku: toClaudeCodeModel("haiku"),
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
export const CLAUDE_CODE_DEFAULT_MODEL = "sonnet";
|
|
39
|
+
|
|
40
|
+
export const CLAUDE_CODE_PROVIDER: ModelCollection = {
|
|
41
|
+
provider: {
|
|
42
|
+
id: "claude-code",
|
|
43
|
+
name: "Claude Code",
|
|
44
|
+
description: "Use Claude Code SDK with Claude Pro/Max subscription",
|
|
45
|
+
protocol: "openai-chat",
|
|
46
|
+
baseUrl: "",
|
|
47
|
+
defaultModelId: CLAUDE_CODE_DEFAULT_MODEL,
|
|
48
|
+
capabilities: ["reasoning"],
|
|
49
|
+
},
|
|
50
|
+
models: CLAUDE_CODE_MODELS,
|
|
51
|
+
};
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cline Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const CLINE_DEFAULT_MODEL = "anthropic/claude-sonnet-4.6";
|
|
9
|
+
export const CLINE_MODELS: Record<string, ModelInfo> =
|
|
10
|
+
getGeneratedModelsForProvider("vercel-ai-gateway");
|
|
11
|
+
export const CLINE_DEFAULT_MODELINFO = CLINE_MODELS[CLINE_DEFAULT_MODEL];
|
|
12
|
+
|
|
13
|
+
export const CLINE_PROVIDER: ModelCollection = {
|
|
14
|
+
provider: {
|
|
15
|
+
id: "cline",
|
|
16
|
+
name: "Cline",
|
|
17
|
+
description: "Cline API endpoint",
|
|
18
|
+
protocol: "openai-chat",
|
|
19
|
+
baseUrl: "https://api.cline.bot/api/v1",
|
|
20
|
+
defaultModelId: CLINE_DEFAULT_MODEL,
|
|
21
|
+
capabilities: ["reasoning", "prompt-cache", "tools", "oauth"],
|
|
22
|
+
env: ["CLINE_API_KEY"],
|
|
23
|
+
},
|
|
24
|
+
models: CLINE_MODELS,
|
|
25
|
+
};
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DeepSeek Models
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const DEEPSEEK_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("deepseek");
|
|
10
|
+
|
|
11
|
+
export const DEEPSEEK_DEFAULT_MODEL = Object.keys(DEEPSEEK_MODELS)[0];
|
|
12
|
+
|
|
13
|
+
export const DEEPSEEK_PROVIDER: ModelCollection = {
|
|
14
|
+
provider: {
|
|
15
|
+
id: "deepseek",
|
|
16
|
+
name: "DeepSeek",
|
|
17
|
+
description: "Advanced AI models with reasoning capabilities",
|
|
18
|
+
protocol: "openai-chat",
|
|
19
|
+
baseUrl: "https://api.deepseek.com/v1",
|
|
20
|
+
defaultModelId: DEEPSEEK_DEFAULT_MODEL,
|
|
21
|
+
capabilities: ["reasoning", "prompt-cache"],
|
|
22
|
+
env: ["DEEPSEEK_API_KEY"],
|
|
23
|
+
},
|
|
24
|
+
models: DEEPSEEK_MODELS,
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
export function getDeepSeekReasoningModels(): Record<string, ModelInfo> {
|
|
28
|
+
return Object.fromEntries(
|
|
29
|
+
Object.entries(DEEPSEEK_MODELS).filter(([, info]) =>
|
|
30
|
+
info.capabilities?.includes("reasoning"),
|
|
31
|
+
),
|
|
32
|
+
);
|
|
33
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Dify Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { ModelCollection } from "../schemas/index";
|
|
6
|
+
|
|
7
|
+
export const DIFY_PROVIDER: ModelCollection = {
|
|
8
|
+
provider: {
|
|
9
|
+
id: "dify",
|
|
10
|
+
name: "Dify",
|
|
11
|
+
description: "Dify workflow/application provider via AI SDK",
|
|
12
|
+
protocol: "openai-chat",
|
|
13
|
+
defaultModelId: "default",
|
|
14
|
+
env: ["DIFY_API_KEY"],
|
|
15
|
+
},
|
|
16
|
+
models: {},
|
|
17
|
+
};
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Doubao Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
const DEFAULT_DOUBAO_MODEL_ID = "doubao-1-5-pro-256k-250115";
|
|
9
|
+
|
|
10
|
+
export const DOUBAO_MODELS: Record<string, ModelInfo> = {
|
|
11
|
+
[DEFAULT_DOUBAO_MODEL_ID]: {
|
|
12
|
+
id: DEFAULT_DOUBAO_MODEL_ID,
|
|
13
|
+
name: "Doubao 1.5 Pro 256k",
|
|
14
|
+
capabilities: ["streaming", "tools"],
|
|
15
|
+
},
|
|
16
|
+
...getGeneratedModelsForProvider("doubao"),
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
export const DOUBAO_DEFAULT_MODEL =
|
|
20
|
+
Object.keys(DOUBAO_MODELS)[0] ?? DEFAULT_DOUBAO_MODEL_ID;
|
|
21
|
+
|
|
22
|
+
export const DOUBAO_PROVIDER: ModelCollection = {
|
|
23
|
+
provider: {
|
|
24
|
+
id: "doubao",
|
|
25
|
+
name: "Doubao",
|
|
26
|
+
description: "Volcengine Ark platform models",
|
|
27
|
+
protocol: "openai-chat",
|
|
28
|
+
baseUrl: "https://ark.cn-beijing.volces.com/api/v3",
|
|
29
|
+
defaultModelId: DOUBAO_DEFAULT_MODEL,
|
|
30
|
+
env: ["DOUBAO_API_KEY"],
|
|
31
|
+
},
|
|
32
|
+
models: DOUBAO_MODELS,
|
|
33
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Fireworks AI Models
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const FIREWORKS_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("fireworks");
|
|
10
|
+
|
|
11
|
+
export const FIREWORKS_DEFAULT_MODEL =
|
|
12
|
+
Object.keys(FIREWORKS_MODELS)[0] ??
|
|
13
|
+
"accounts/fireworks/models/llama-v3p1-8b-instruct";
|
|
14
|
+
|
|
15
|
+
export const FIREWORKS_PROVIDER: ModelCollection = {
|
|
16
|
+
provider: {
|
|
17
|
+
id: "fireworks",
|
|
18
|
+
name: "Fireworks AI",
|
|
19
|
+
description: "High-performance inference platform",
|
|
20
|
+
protocol: "openai-chat",
|
|
21
|
+
baseUrl: "https://api.fireworks.ai/inference/v1",
|
|
22
|
+
defaultModelId: FIREWORKS_DEFAULT_MODEL,
|
|
23
|
+
env: ["FIREWORKS_API_KEY"],
|
|
24
|
+
},
|
|
25
|
+
models: FIREWORKS_MODELS,
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
export function getFireworksFunctionModels(): Record<string, ModelInfo> {
|
|
29
|
+
return Object.fromEntries(
|
|
30
|
+
Object.entries(FIREWORKS_MODELS).filter(([, info]) =>
|
|
31
|
+
info.capabilities?.includes("tools"),
|
|
32
|
+
),
|
|
33
|
+
);
|
|
34
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google Gemini Models
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const GEMINI_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("gemini");
|
|
10
|
+
|
|
11
|
+
export const GEMINI_DEFAULT_MODEL =
|
|
12
|
+
Object.keys(GEMINI_MODELS)[0] ?? "gemini-3-pro";
|
|
13
|
+
|
|
14
|
+
export const GEMINI_PROVIDER: ModelCollection = {
|
|
15
|
+
provider: {
|
|
16
|
+
id: "gemini",
|
|
17
|
+
name: "Google Gemini",
|
|
18
|
+
description: "Google's multimodal AI models",
|
|
19
|
+
protocol: "gemini",
|
|
20
|
+
baseUrl: "https://generativelanguage.googleapis.com",
|
|
21
|
+
defaultModelId: GEMINI_DEFAULT_MODEL,
|
|
22
|
+
capabilities: ["reasoning", "prompt-cache"],
|
|
23
|
+
env: ["GOOGLE_GENERATIVE_AI_API_KEY", "GEMINI_API_KEY"],
|
|
24
|
+
},
|
|
25
|
+
models: GEMINI_MODELS,
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
export function getActiveGeminiModels(): Record<string, ModelInfo> {
|
|
29
|
+
return Object.fromEntries(
|
|
30
|
+
Object.entries(GEMINI_MODELS).filter(
|
|
31
|
+
([, info]) =>
|
|
32
|
+
!info.status || info.status === "active" || info.status === "preview",
|
|
33
|
+
),
|
|
34
|
+
);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export function getGeminiThinkingModels(): Record<string, ModelInfo> {
|
|
38
|
+
return Object.fromEntries(
|
|
39
|
+
Object.entries(GEMINI_MODELS).filter(([, info]) =>
|
|
40
|
+
info.capabilities?.includes("reasoning"),
|
|
41
|
+
),
|
|
42
|
+
);
|
|
43
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Groq Models
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const GROQ_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("groq");
|
|
10
|
+
|
|
11
|
+
export const GROQ_DEFAULT_MODEL =
|
|
12
|
+
Object.keys(GROQ_MODELS)[0] ?? "llama-3.3-70b-versatile";
|
|
13
|
+
|
|
14
|
+
export const GROQ_PROVIDER: ModelCollection = {
|
|
15
|
+
provider: {
|
|
16
|
+
id: "groq",
|
|
17
|
+
name: "Groq",
|
|
18
|
+
description: "Ultra-fast LPU inference",
|
|
19
|
+
protocol: "openai-chat",
|
|
20
|
+
baseUrl: "https://api.groq.com/openai/v1",
|
|
21
|
+
defaultModelId: GROQ_DEFAULT_MODEL,
|
|
22
|
+
env: ["GROQ_API_KEY"],
|
|
23
|
+
},
|
|
24
|
+
models: GROQ_MODELS,
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
export function getGroqVisionModels(): Record<string, ModelInfo> {
|
|
28
|
+
return Object.fromEntries(
|
|
29
|
+
Object.entries(GROQ_MODELS).filter(([, info]) =>
|
|
30
|
+
info.capabilities?.includes("images"),
|
|
31
|
+
),
|
|
32
|
+
);
|
|
33
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HiCap Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { ModelCollection } from "../schemas/index";
|
|
6
|
+
|
|
7
|
+
export const HICAP_PROVIDER: ModelCollection = {
|
|
8
|
+
provider: {
|
|
9
|
+
id: "hicap",
|
|
10
|
+
name: "HiCap",
|
|
11
|
+
description: "HiCap AI platform",
|
|
12
|
+
protocol: "openai-chat",
|
|
13
|
+
baseUrl: "https://api.hicap.ai/v1",
|
|
14
|
+
defaultModelId: "hicap-pro",
|
|
15
|
+
env: ["HICAP_API_KEY"],
|
|
16
|
+
},
|
|
17
|
+
models: {},
|
|
18
|
+
};
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Huawei Cloud MaaS Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { ModelCollection } from "../schemas/index";
|
|
6
|
+
|
|
7
|
+
export const HUAWEI_CLOUD_MAAS_PROVIDER: ModelCollection = {
|
|
8
|
+
provider: {
|
|
9
|
+
id: "huawei-cloud-maas",
|
|
10
|
+
name: "Huawei Cloud MaaS",
|
|
11
|
+
description: "Huawei's model-as-a-service platform",
|
|
12
|
+
protocol: "openai-chat",
|
|
13
|
+
baseUrl: "https://infer-modelarts.cn-southwest-2.myhuaweicloud.com/v1",
|
|
14
|
+
defaultModelId: "DeepSeek-R1",
|
|
15
|
+
env: ["HUAWEI_CLOUD_MAAS_API_KEY"],
|
|
16
|
+
},
|
|
17
|
+
models: {},
|
|
18
|
+
};
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Hugging Face Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
export const HUGGINGFACE_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("huggingface");
|
|
10
|
+
|
|
11
|
+
export const HUGGINGFACE_PROVIDER: ModelCollection = {
|
|
12
|
+
provider: {
|
|
13
|
+
id: "huggingface",
|
|
14
|
+
name: "Hugging Face",
|
|
15
|
+
description: "Hugging Face inference API",
|
|
16
|
+
protocol: "openai-chat",
|
|
17
|
+
baseUrl: "https://api-inference.huggingface.co/v1",
|
|
18
|
+
defaultModelId: Object.keys(HUGGINGFACE_MODELS)[0],
|
|
19
|
+
env: ["HF_TOKEN"],
|
|
20
|
+
},
|
|
21
|
+
models: HUGGINGFACE_MODELS,
|
|
22
|
+
};
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider Exports
|
|
3
|
+
*
|
|
4
|
+
* Re-exports all provider model definitions and collections.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
export { AIHUBMIX_PROVIDER } from "./aihubmix";
|
|
8
|
+
// === Anthropic ===
|
|
9
|
+
export {
|
|
10
|
+
ANTHROPIC_DEFAULT_MODEL,
|
|
11
|
+
ANTHROPIC_MODELS,
|
|
12
|
+
ANTHROPIC_PROVIDER,
|
|
13
|
+
getActiveAnthropicModels,
|
|
14
|
+
getAnthropicReasoningModels,
|
|
15
|
+
} from "./anthropic";
|
|
16
|
+
export { ASKSAGE_PROVIDER } from "./asksage";
|
|
17
|
+
export { BASETEN_PROVIDER } from "./baseten";
|
|
18
|
+
export {
|
|
19
|
+
BEDROCK_DEFAULT_MODEL,
|
|
20
|
+
BEDROCK_MODELS,
|
|
21
|
+
BEDROCK_PROVIDER,
|
|
22
|
+
} from "./bedrock";
|
|
23
|
+
export {
|
|
24
|
+
CEREBRAS_DEFAULT_MODEL,
|
|
25
|
+
CEREBRAS_MODELS,
|
|
26
|
+
CEREBRAS_PROVIDER,
|
|
27
|
+
} from "./cerebras";
|
|
28
|
+
export {
|
|
29
|
+
CLAUDE_CODE_DEFAULT_MODEL,
|
|
30
|
+
CLAUDE_CODE_MODELS,
|
|
31
|
+
CLAUDE_CODE_PROVIDER,
|
|
32
|
+
} from "./claude-code";
|
|
33
|
+
export { CLINE_DEFAULT_MODEL, CLINE_MODELS, CLINE_PROVIDER } from "./cline";
|
|
34
|
+
// === DeepSeek ===
|
|
35
|
+
export {
|
|
36
|
+
DEEPSEEK_DEFAULT_MODEL,
|
|
37
|
+
DEEPSEEK_MODELS,
|
|
38
|
+
DEEPSEEK_PROVIDER,
|
|
39
|
+
getDeepSeekReasoningModels,
|
|
40
|
+
} from "./deepseek";
|
|
41
|
+
export { DIFY_PROVIDER } from "./dify";
|
|
42
|
+
export {
|
|
43
|
+
DOUBAO_DEFAULT_MODEL,
|
|
44
|
+
DOUBAO_MODELS,
|
|
45
|
+
DOUBAO_PROVIDER,
|
|
46
|
+
} from "./doubao";
|
|
47
|
+
// === Fireworks AI ===
|
|
48
|
+
export {
|
|
49
|
+
FIREWORKS_DEFAULT_MODEL,
|
|
50
|
+
FIREWORKS_MODELS,
|
|
51
|
+
FIREWORKS_PROVIDER,
|
|
52
|
+
getFireworksFunctionModels,
|
|
53
|
+
} from "./fireworks";
|
|
54
|
+
// === Google Gemini ===
|
|
55
|
+
export {
|
|
56
|
+
GEMINI_DEFAULT_MODEL,
|
|
57
|
+
GEMINI_MODELS,
|
|
58
|
+
GEMINI_PROVIDER,
|
|
59
|
+
getActiveGeminiModels,
|
|
60
|
+
getGeminiThinkingModels,
|
|
61
|
+
} from "./gemini";
|
|
62
|
+
// === Groq ===
|
|
63
|
+
export {
|
|
64
|
+
GROQ_DEFAULT_MODEL,
|
|
65
|
+
GROQ_MODELS,
|
|
66
|
+
GROQ_PROVIDER,
|
|
67
|
+
getGroqVisionModels,
|
|
68
|
+
} from "./groq";
|
|
69
|
+
export { HICAP_PROVIDER } from "./hicap";
|
|
70
|
+
export { HUAWEI_CLOUD_MAAS_PROVIDER } from "./huawei-cloud-maas";
|
|
71
|
+
export { HUGGINGFACE_MODELS, HUGGINGFACE_PROVIDER } from "./huggingface";
|
|
72
|
+
export { LITELLM_PROVIDER } from "./litellm";
|
|
73
|
+
export { LMSTUDIO_PROVIDER } from "./lmstudio";
|
|
74
|
+
export {
|
|
75
|
+
MINIMAX_DEFAULT_MODEL,
|
|
76
|
+
MINIMAX_MODELS,
|
|
77
|
+
MINIMAX_PROVIDER,
|
|
78
|
+
} from "./minimax";
|
|
79
|
+
export { MISTRAL_PROVIDER } from "./mistral";
|
|
80
|
+
export {
|
|
81
|
+
MOONSHOT_DEFAULT_MODEL,
|
|
82
|
+
MOONSHOT_MODELS,
|
|
83
|
+
MOONSHOT_PROVIDER,
|
|
84
|
+
} from "./moonshot";
|
|
85
|
+
export {
|
|
86
|
+
NEBIUS_DEFAULT_MODEL,
|
|
87
|
+
NEBIUS_MODELS,
|
|
88
|
+
NEBIUS_PROVIDER,
|
|
89
|
+
} from "./nebius";
|
|
90
|
+
export {
|
|
91
|
+
NOUS_RESEARCH_DEFAULT_MODEL,
|
|
92
|
+
NOUS_RESEARCH_MODELS,
|
|
93
|
+
NOUS_RESEARCH_PROVIDER,
|
|
94
|
+
} from "./nous-research";
|
|
95
|
+
export {
|
|
96
|
+
DEFAULT_EXTERNAL_OCA_BASE_URL,
|
|
97
|
+
DEFAULT_INTERNAL_OCA_BASE_URL,
|
|
98
|
+
OCA_DEFAULT_MODEL,
|
|
99
|
+
OCA_MODELS,
|
|
100
|
+
OCA_PROVIDER,
|
|
101
|
+
} from "./oca";
|
|
102
|
+
export { OLLAMA_PROVIDER } from "./ollama";
|
|
103
|
+
// === OpenAI ===
|
|
104
|
+
export {
|
|
105
|
+
getActiveOpenAIModels,
|
|
106
|
+
getOpenAIReasoningModels,
|
|
107
|
+
OPENAI_DEFAULT_MODEL,
|
|
108
|
+
OPENAI_MODELS,
|
|
109
|
+
OPENAI_PROVIDER,
|
|
110
|
+
} from "./openai";
|
|
111
|
+
export {
|
|
112
|
+
OPENAI_CODEX_DEFAULT_MODEL,
|
|
113
|
+
OPENAI_CODEX_PROVIDER,
|
|
114
|
+
} from "./openai-codex";
|
|
115
|
+
export {
|
|
116
|
+
OPENCODE_DEFAULT_MODEL,
|
|
117
|
+
OPENCODE_MODELS,
|
|
118
|
+
OPENCODE_PROVIDER,
|
|
119
|
+
} from "./opencode";
|
|
120
|
+
export {
|
|
121
|
+
OPENROUTER_DEFAULT_MODEL,
|
|
122
|
+
OPENROUTER_MODELS,
|
|
123
|
+
OPENROUTER_PROVIDER,
|
|
124
|
+
} from "./openrouter";
|
|
125
|
+
export { QWEN_DEFAULT_MODEL, QWEN_MODELS, QWEN_PROVIDER } from "./qwen";
|
|
126
|
+
export {
|
|
127
|
+
QWEN_CODE_DEFAULT_MODEL,
|
|
128
|
+
QWEN_CODE_MODELS,
|
|
129
|
+
QWEN_CODE_PROVIDER,
|
|
130
|
+
} from "./qwen-code";
|
|
131
|
+
export { REQUESTY_PROVIDER } from "./requesty";
|
|
132
|
+
export {
|
|
133
|
+
SAMBANOVA_DEFAULT_MODEL,
|
|
134
|
+
SAMBANOVA_MODELS,
|
|
135
|
+
SAMBANOVA_PROVIDER,
|
|
136
|
+
} from "./sambanova";
|
|
137
|
+
export {
|
|
138
|
+
SAP_AI_CORE_DEFAULT_MODEL,
|
|
139
|
+
SAP_AI_CORE_MODELS,
|
|
140
|
+
SAP_AI_CORE_PROVIDER,
|
|
141
|
+
} from "./sapaicore";
|
|
142
|
+
// === Together AI ===
|
|
143
|
+
export {
|
|
144
|
+
getTogetherLlamaModels,
|
|
145
|
+
TOGETHER_DEFAULT_MODEL,
|
|
146
|
+
TOGETHER_MODELS,
|
|
147
|
+
TOGETHER_PROVIDER,
|
|
148
|
+
} from "./together";
|
|
149
|
+
export { VERCEL_AI_GATEWAY_PROVIDER } from "./vercel-ai-gateway";
|
|
150
|
+
export {
|
|
151
|
+
VERTEX_DEFAULT_MODEL,
|
|
152
|
+
VERTEX_MODELS,
|
|
153
|
+
VERTEX_PROVIDER,
|
|
154
|
+
} from "./vertex";
|
|
155
|
+
// === xAI (Grok) ===
|
|
156
|
+
export {
|
|
157
|
+
getActiveXAIModels,
|
|
158
|
+
XAI_DEFAULT_MODEL,
|
|
159
|
+
XAI_MODELS,
|
|
160
|
+
XAI_PROVIDER,
|
|
161
|
+
} from "./xai";
|
|
162
|
+
export { ZAI_DEFAULT_MODEL, ZAI_MODELS, ZAI_PROVIDER } from "./zai";
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LiteLLM Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { ModelCollection } from "../schemas/index";
|
|
6
|
+
|
|
7
|
+
export const LITELLM_PROVIDER: ModelCollection = {
|
|
8
|
+
provider: {
|
|
9
|
+
id: "litellm",
|
|
10
|
+
name: "LiteLLM",
|
|
11
|
+
description: "Self-hosted LLM proxy",
|
|
12
|
+
protocol: "openai-chat",
|
|
13
|
+
baseUrl: "http://localhost:4000/v1",
|
|
14
|
+
defaultModelId: "gpt-4o",
|
|
15
|
+
capabilities: ["prompt-cache"],
|
|
16
|
+
env: ["LITELLM_API_KEY"],
|
|
17
|
+
},
|
|
18
|
+
models: {},
|
|
19
|
+
};
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LM Studio Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
const LMSTUDIO_MODELS: Record<string, ModelInfo> =
|
|
9
|
+
getGeneratedModelsForProvider("lmstudio");
|
|
10
|
+
|
|
11
|
+
export const LMSTUDIO_PROVIDER: ModelCollection = {
|
|
12
|
+
provider: {
|
|
13
|
+
id: "lmstudio",
|
|
14
|
+
name: "LM Studio",
|
|
15
|
+
description: "Local model inference with LM Studio",
|
|
16
|
+
protocol: "openai-chat",
|
|
17
|
+
baseUrl: "http://localhost:1234/v1",
|
|
18
|
+
defaultModelId: Object.keys(LMSTUDIO_MODELS)[0],
|
|
19
|
+
env: ["LMSTUDIO_API_KEY"],
|
|
20
|
+
},
|
|
21
|
+
models: LMSTUDIO_MODELS || {},
|
|
22
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MiniMax Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
const DEFAULT_MINIMAX_MODEL_ID = "MiniMax-M2.5";
|
|
9
|
+
|
|
10
|
+
export const MINIMAX_MODELS: Record<string, ModelInfo> = {
|
|
11
|
+
[DEFAULT_MINIMAX_MODEL_ID]: {
|
|
12
|
+
id: DEFAULT_MINIMAX_MODEL_ID,
|
|
13
|
+
name: "MiniMax M2.5",
|
|
14
|
+
capabilities: ["streaming", "tools", "reasoning", "prompt-cache"],
|
|
15
|
+
},
|
|
16
|
+
...getGeneratedModelsForProvider("minimax"),
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
export const MINIMAX_DEFAULT_MODEL =
|
|
20
|
+
Object.keys(MINIMAX_MODELS)[0] ?? DEFAULT_MINIMAX_MODEL_ID;
|
|
21
|
+
|
|
22
|
+
export const MINIMAX_PROVIDER: ModelCollection = {
|
|
23
|
+
provider: {
|
|
24
|
+
id: "minimax",
|
|
25
|
+
name: "MiniMax",
|
|
26
|
+
description: "MiniMax models via Anthropic-compatible API",
|
|
27
|
+
protocol: "anthropic",
|
|
28
|
+
baseUrl: "https://api.minimax.io/anthropic",
|
|
29
|
+
defaultModelId: MINIMAX_DEFAULT_MODEL,
|
|
30
|
+
capabilities: ["reasoning", "prompt-cache"],
|
|
31
|
+
env: ["MINIMAX_API_KEY"],
|
|
32
|
+
},
|
|
33
|
+
models: MINIMAX_MODELS,
|
|
34
|
+
};
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Mistral Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { ModelCollection } from "../schemas/index";
|
|
6
|
+
|
|
7
|
+
export const MISTRAL_PROVIDER: ModelCollection = {
|
|
8
|
+
provider: {
|
|
9
|
+
id: "mistral",
|
|
10
|
+
name: "Mistral",
|
|
11
|
+
description: "Mistral AI models via AI SDK provider",
|
|
12
|
+
protocol: "openai-chat",
|
|
13
|
+
baseUrl: "https://api.mistral.ai/v1",
|
|
14
|
+
defaultModelId: "mistral-medium-latest",
|
|
15
|
+
capabilities: ["reasoning"],
|
|
16
|
+
env: ["MISTRAL_API_KEY"],
|
|
17
|
+
},
|
|
18
|
+
models: {},
|
|
19
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Moonshot Provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
+
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
+
|
|
8
|
+
const DEFAULT_MOONSHOT_MODEL_ID = "kimi-k2-0905-preview";
|
|
9
|
+
|
|
10
|
+
export const MOONSHOT_MODELS: Record<string, ModelInfo> = {
|
|
11
|
+
[DEFAULT_MOONSHOT_MODEL_ID]: {
|
|
12
|
+
id: DEFAULT_MOONSHOT_MODEL_ID,
|
|
13
|
+
name: "Kimi K2 Preview",
|
|
14
|
+
capabilities: ["streaming", "tools", "reasoning"],
|
|
15
|
+
},
|
|
16
|
+
...getGeneratedModelsForProvider("moonshot"),
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
export const MOONSHOT_DEFAULT_MODEL =
|
|
20
|
+
Object.keys(MOONSHOT_MODELS)[0] ?? DEFAULT_MOONSHOT_MODEL_ID;
|
|
21
|
+
|
|
22
|
+
export const MOONSHOT_PROVIDER: ModelCollection = {
|
|
23
|
+
provider: {
|
|
24
|
+
id: "moonshot",
|
|
25
|
+
name: "Moonshot",
|
|
26
|
+
description: "Moonshot AI Studio models",
|
|
27
|
+
protocol: "openai-chat",
|
|
28
|
+
baseUrl: "https://api.moonshot.ai/v1",
|
|
29
|
+
defaultModelId: MOONSHOT_DEFAULT_MODEL,
|
|
30
|
+
capabilities: ["reasoning", "prompt-cache"],
|
|
31
|
+
env: ["MOONSHOT_API_KEY"],
|
|
32
|
+
},
|
|
33
|
+
models: MOONSHOT_MODELS,
|
|
34
|
+
};
|