@clinebot/llms 0.0.20 → 0.0.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/config-browser.d.ts +1 -0
- package/dist/config-browser.d.ts.map +1 -0
- package/dist/config.d.ts +1 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/index.browser.d.ts +1 -0
- package/dist/index.browser.d.ts.map +1 -0
- package/dist/index.browser.js +5 -5
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +12 -12
- package/dist/models/generated-access.d.ts +1 -0
- package/dist/models/generated-access.d.ts.map +1 -0
- package/dist/models/generated-provider-loaders.d.ts +1 -0
- package/dist/models/generated-provider-loaders.d.ts.map +1 -0
- package/dist/models/generated.d.ts +1 -0
- package/dist/models/generated.d.ts.map +1 -0
- package/dist/models/index.d.ts +1 -0
- package/dist/models/index.d.ts.map +1 -0
- package/dist/models/models-dev-catalog.d.ts +1 -0
- package/dist/models/models-dev-catalog.d.ts.map +1 -0
- package/dist/models/providers/aihubmix.d.ts +1 -0
- package/dist/models/providers/aihubmix.d.ts.map +1 -0
- package/dist/models/providers/anthropic.d.ts +1 -0
- package/dist/models/providers/anthropic.d.ts.map +1 -0
- package/dist/models/providers/asksage.d.ts +1 -0
- package/dist/models/providers/asksage.d.ts.map +1 -0
- package/dist/models/providers/baseten.d.ts +1 -0
- package/dist/models/providers/baseten.d.ts.map +1 -0
- package/dist/models/providers/bedrock.d.ts +1 -0
- package/dist/models/providers/bedrock.d.ts.map +1 -0
- package/dist/models/providers/cerebras.d.ts +1 -0
- package/dist/models/providers/cerebras.d.ts.map +1 -0
- package/dist/models/providers/claude-code.d.ts +1 -0
- package/dist/models/providers/claude-code.d.ts.map +1 -0
- package/dist/models/providers/cline.d.ts +1 -0
- package/dist/models/providers/cline.d.ts.map +1 -0
- package/dist/models/providers/deepseek.d.ts +1 -0
- package/dist/models/providers/deepseek.d.ts.map +1 -0
- package/dist/models/providers/dify.d.ts +1 -0
- package/dist/models/providers/dify.d.ts.map +1 -0
- package/dist/models/providers/doubao.d.ts +1 -0
- package/dist/models/providers/doubao.d.ts.map +1 -0
- package/dist/models/providers/fireworks.d.ts +1 -0
- package/dist/models/providers/fireworks.d.ts.map +1 -0
- package/dist/models/providers/gemini.d.ts +1 -0
- package/dist/models/providers/gemini.d.ts.map +1 -0
- package/dist/models/providers/groq.d.ts +1 -0
- package/dist/models/providers/groq.d.ts.map +1 -0
- package/dist/models/providers/hicap.d.ts +1 -0
- package/dist/models/providers/hicap.d.ts.map +1 -0
- package/dist/models/providers/huawei-cloud-maas.d.ts +1 -0
- package/dist/models/providers/huawei-cloud-maas.d.ts.map +1 -0
- package/dist/models/providers/huggingface.d.ts +1 -0
- package/dist/models/providers/huggingface.d.ts.map +1 -0
- package/dist/models/providers/index.d.ts +1 -0
- package/dist/models/providers/index.d.ts.map +1 -0
- package/dist/models/providers/litellm.d.ts +1 -0
- package/dist/models/providers/litellm.d.ts.map +1 -0
- package/dist/models/providers/lmstudio.d.ts +1 -0
- package/dist/models/providers/lmstudio.d.ts.map +1 -0
- package/dist/models/providers/minimax.d.ts +1 -0
- package/dist/models/providers/minimax.d.ts.map +1 -0
- package/dist/models/providers/mistral.d.ts +1 -0
- package/dist/models/providers/mistral.d.ts.map +1 -0
- package/dist/models/providers/moonshot.d.ts +1 -0
- package/dist/models/providers/moonshot.d.ts.map +1 -0
- package/dist/models/providers/nebius.d.ts +1 -0
- package/dist/models/providers/nebius.d.ts.map +1 -0
- package/dist/models/providers/nous-research.d.ts +1 -0
- package/dist/models/providers/nous-research.d.ts.map +1 -0
- package/dist/models/providers/oca.d.ts +1 -0
- package/dist/models/providers/oca.d.ts.map +1 -0
- package/dist/models/providers/ollama.d.ts +1 -0
- package/dist/models/providers/ollama.d.ts.map +1 -0
- package/dist/models/providers/openai-codex.d.ts +1 -0
- package/dist/models/providers/openai-codex.d.ts.map +1 -0
- package/dist/models/providers/openai.d.ts +1 -0
- package/dist/models/providers/openai.d.ts.map +1 -0
- package/dist/models/providers/opencode.d.ts +1 -0
- package/dist/models/providers/opencode.d.ts.map +1 -0
- package/dist/models/providers/openrouter.d.ts +1 -0
- package/dist/models/providers/openrouter.d.ts.map +1 -0
- package/dist/models/providers/qwen-code.d.ts +1 -0
- package/dist/models/providers/qwen-code.d.ts.map +1 -0
- package/dist/models/providers/qwen.d.ts +1 -0
- package/dist/models/providers/qwen.d.ts.map +1 -0
- package/dist/models/providers/requesty.d.ts +1 -0
- package/dist/models/providers/requesty.d.ts.map +1 -0
- package/dist/models/providers/sambanova.d.ts +1 -0
- package/dist/models/providers/sambanova.d.ts.map +1 -0
- package/dist/models/providers/sapaicore.d.ts +1 -0
- package/dist/models/providers/sapaicore.d.ts.map +1 -0
- package/dist/models/providers/together.d.ts +1 -0
- package/dist/models/providers/together.d.ts.map +1 -0
- package/dist/models/providers/vercel-ai-gateway.d.ts +1 -0
- package/dist/models/providers/vercel-ai-gateway.d.ts.map +1 -0
- package/dist/models/providers/vertex.d.ts +1 -0
- package/dist/models/providers/vertex.d.ts.map +1 -0
- package/dist/models/providers/xai.d.ts +1 -0
- package/dist/models/providers/xai.d.ts.map +1 -0
- package/dist/models/providers/zai.d.ts +1 -0
- package/dist/models/providers/zai.d.ts.map +1 -0
- package/dist/models/query.d.ts +1 -0
- package/dist/models/query.d.ts.map +1 -0
- package/dist/models/registry.d.ts +1 -0
- package/dist/models/registry.d.ts.map +1 -0
- package/dist/models/schemas/index.d.ts +1 -0
- package/dist/models/schemas/index.d.ts.map +1 -0
- package/dist/models/schemas/model.d.ts +1 -0
- package/dist/models/schemas/model.d.ts.map +1 -0
- package/dist/models/schemas/query.d.ts +1 -0
- package/dist/models/schemas/query.d.ts.map +1 -0
- package/dist/providers/handlers/ai-sdk-community.d.ts +1 -0
- package/dist/providers/handlers/ai-sdk-community.d.ts.map +1 -0
- package/dist/providers/handlers/ai-sdk-provider-base.d.ts +1 -0
- package/dist/providers/handlers/ai-sdk-provider-base.d.ts.map +1 -0
- package/dist/providers/handlers/anthropic-base.d.ts +1 -0
- package/dist/providers/handlers/anthropic-base.d.ts.map +1 -0
- package/dist/providers/handlers/asksage.d.ts +1 -0
- package/dist/providers/handlers/asksage.d.ts.map +1 -0
- package/dist/providers/handlers/auth.d.ts +1 -0
- package/dist/providers/handlers/auth.d.ts.map +1 -0
- package/dist/providers/handlers/base.d.ts +1 -0
- package/dist/providers/handlers/base.d.ts.map +1 -0
- package/dist/providers/handlers/bedrock-base.d.ts +1 -0
- package/dist/providers/handlers/bedrock-base.d.ts.map +1 -0
- package/dist/providers/handlers/bedrock-client.d.ts +1 -0
- package/dist/providers/handlers/bedrock-client.d.ts.map +1 -0
- package/dist/providers/handlers/community-sdk.d.ts +1 -0
- package/dist/providers/handlers/community-sdk.d.ts.map +1 -0
- package/dist/providers/handlers/fetch-base.d.ts +1 -0
- package/dist/providers/handlers/fetch-base.d.ts.map +1 -0
- package/dist/providers/handlers/gemini-base.d.ts +1 -0
- package/dist/providers/handlers/gemini-base.d.ts.map +1 -0
- package/dist/providers/handlers/index.d.ts +1 -0
- package/dist/providers/handlers/index.d.ts.map +1 -0
- package/dist/providers/handlers/openai-base.d.ts +1 -0
- package/dist/providers/handlers/openai-base.d.ts.map +1 -0
- package/dist/providers/handlers/openai-responses.d.ts +1 -0
- package/dist/providers/handlers/openai-responses.d.ts.map +1 -0
- package/dist/providers/handlers/providers.d.ts +1 -0
- package/dist/providers/handlers/providers.d.ts.map +1 -0
- package/dist/providers/handlers/r1-base.d.ts +1 -0
- package/dist/providers/handlers/r1-base.d.ts.map +1 -0
- package/dist/providers/handlers/registry.d.ts +1 -0
- package/dist/providers/handlers/registry.d.ts.map +1 -0
- package/dist/providers/handlers/vertex.d.ts +1 -0
- package/dist/providers/handlers/vertex.d.ts.map +1 -0
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/public.browser.d.ts +1 -0
- package/dist/providers/public.browser.d.ts.map +1 -0
- package/dist/providers/public.d.ts +1 -0
- package/dist/providers/public.d.ts.map +1 -0
- package/dist/providers/shared/openai-compatible.d.ts +1 -0
- package/dist/providers/shared/openai-compatible.d.ts.map +1 -0
- package/dist/providers/transform/ai-sdk-community-format.d.ts +1 -0
- package/dist/providers/transform/ai-sdk-community-format.d.ts.map +1 -0
- package/dist/providers/transform/anthropic-format.d.ts +1 -0
- package/dist/providers/transform/anthropic-format.d.ts.map +1 -0
- package/dist/providers/transform/content-format.d.ts +1 -0
- package/dist/providers/transform/content-format.d.ts.map +1 -0
- package/dist/providers/transform/gemini-format.d.ts +1 -0
- package/dist/providers/transform/gemini-format.d.ts.map +1 -0
- package/dist/providers/transform/index.d.ts +1 -0
- package/dist/providers/transform/index.d.ts.map +1 -0
- package/dist/providers/transform/openai-format.d.ts +1 -0
- package/dist/providers/transform/openai-format.d.ts.map +1 -0
- package/dist/providers/transform/r1-format.d.ts +1 -0
- package/dist/providers/transform/r1-format.d.ts.map +1 -0
- package/dist/providers/types/config.d.ts +1 -0
- package/dist/providers/types/config.d.ts.map +1 -0
- package/dist/providers/types/handler.d.ts +1 -0
- package/dist/providers/types/handler.d.ts.map +1 -0
- package/dist/providers/types/index.d.ts +1 -0
- package/dist/providers/types/index.d.ts.map +1 -0
- package/dist/providers/types/messages.d.ts +1 -0
- package/dist/providers/types/messages.d.ts.map +1 -0
- package/dist/providers/types/model-info.d.ts +1 -0
- package/dist/providers/types/model-info.d.ts.map +1 -0
- package/dist/providers/types/provider-ids.d.ts +1 -1
- package/dist/providers/types/provider-ids.d.ts.map +1 -0
- package/dist/providers/types/settings.d.ts +1 -0
- package/dist/providers/types/settings.d.ts.map +1 -0
- package/dist/providers/types/stream.d.ts +1 -0
- package/dist/providers/types/stream.d.ts.map +1 -0
- package/dist/providers/utils/index.d.ts +1 -0
- package/dist/providers/utils/index.d.ts.map +1 -0
- package/dist/providers/utils/retry.d.ts +1 -0
- package/dist/providers/utils/retry.d.ts.map +1 -0
- package/dist/providers/utils/stream-processor.d.ts +1 -0
- package/dist/providers/utils/stream-processor.d.ts.map +1 -0
- package/dist/providers/utils/tool-processor.d.ts +1 -0
- package/dist/providers/utils/tool-processor.d.ts.map +1 -0
- package/dist/sdk.d.ts +1 -0
- package/dist/sdk.d.ts.map +1 -0
- package/dist/types.d.ts +1 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +3 -4
- package/src/catalog.ts +0 -20
- package/src/config-browser.ts +0 -11
- package/src/config.ts +0 -49
- package/src/index.browser.ts +0 -9
- package/src/index.ts +0 -10
- package/src/live-providers.test.ts +0 -138
- package/src/models/generated-access.ts +0 -41
- package/src/models/generated-provider-loaders.ts +0 -166
- package/src/models/generated.ts +0 -11785
- package/src/models/index.ts +0 -271
- package/src/models/models-dev-catalog.test.ts +0 -161
- package/src/models/models-dev-catalog.ts +0 -168
- package/src/models/providers/aihubmix.ts +0 -19
- package/src/models/providers/anthropic.ts +0 -60
- package/src/models/providers/asksage.ts +0 -19
- package/src/models/providers/baseten.ts +0 -21
- package/src/models/providers/bedrock.ts +0 -30
- package/src/models/providers/cerebras.ts +0 -24
- package/src/models/providers/claude-code.ts +0 -51
- package/src/models/providers/cline.ts +0 -25
- package/src/models/providers/deepseek.ts +0 -33
- package/src/models/providers/dify.ts +0 -17
- package/src/models/providers/doubao.ts +0 -33
- package/src/models/providers/fireworks.ts +0 -34
- package/src/models/providers/gemini.ts +0 -43
- package/src/models/providers/groq.ts +0 -33
- package/src/models/providers/hicap.ts +0 -18
- package/src/models/providers/huawei-cloud-maas.ts +0 -18
- package/src/models/providers/huggingface.ts +0 -22
- package/src/models/providers/index.ts +0 -162
- package/src/models/providers/litellm.ts +0 -19
- package/src/models/providers/lmstudio.ts +0 -22
- package/src/models/providers/minimax.ts +0 -34
- package/src/models/providers/mistral.ts +0 -19
- package/src/models/providers/moonshot.ts +0 -34
- package/src/models/providers/nebius.ts +0 -24
- package/src/models/providers/nous-research.ts +0 -21
- package/src/models/providers/oca.ts +0 -30
- package/src/models/providers/ollama.ts +0 -18
- package/src/models/providers/openai-codex.ts +0 -46
- package/src/models/providers/openai.ts +0 -43
- package/src/models/providers/opencode.ts +0 -28
- package/src/models/providers/openrouter.ts +0 -24
- package/src/models/providers/qwen-code.ts +0 -33
- package/src/models/providers/qwen.ts +0 -34
- package/src/models/providers/requesty.ts +0 -23
- package/src/models/providers/sambanova.ts +0 -23
- package/src/models/providers/sapaicore.ts +0 -34
- package/src/models/providers/together.ts +0 -35
- package/src/models/providers/vercel-ai-gateway.ts +0 -23
- package/src/models/providers/vertex.ts +0 -36
- package/src/models/providers/xai.ts +0 -34
- package/src/models/providers/zai.ts +0 -25
- package/src/models/query.ts +0 -407
- package/src/models/registry.ts +0 -511
- package/src/models/schemas/index.ts +0 -62
- package/src/models/schemas/model.ts +0 -308
- package/src/models/schemas/query.ts +0 -336
- package/src/providers/browser.ts +0 -4
- package/src/providers/handlers/ai-sdk-community.ts +0 -229
- package/src/providers/handlers/ai-sdk-provider-base.ts +0 -203
- package/src/providers/handlers/anthropic-base.test.ts +0 -30
- package/src/providers/handlers/anthropic-base.ts +0 -387
- package/src/providers/handlers/asksage.test.ts +0 -103
- package/src/providers/handlers/asksage.ts +0 -138
- package/src/providers/handlers/auth.test.ts +0 -19
- package/src/providers/handlers/auth.ts +0 -121
- package/src/providers/handlers/base.test.ts +0 -230
- package/src/providers/handlers/base.ts +0 -310
- package/src/providers/handlers/bedrock-base.ts +0 -390
- package/src/providers/handlers/bedrock-client.ts +0 -100
- package/src/providers/handlers/codex.test.ts +0 -160
- package/src/providers/handlers/community-sdk.test.ts +0 -321
- package/src/providers/handlers/community-sdk.ts +0 -391
- package/src/providers/handlers/fetch-base.ts +0 -68
- package/src/providers/handlers/gemini-base.test.ts +0 -261
- package/src/providers/handlers/gemini-base.ts +0 -307
- package/src/providers/handlers/index.ts +0 -67
- package/src/providers/handlers/openai-base.ts +0 -341
- package/src/providers/handlers/openai-responses.test.ts +0 -259
- package/src/providers/handlers/openai-responses.ts +0 -634
- package/src/providers/handlers/providers.test.ts +0 -120
- package/src/providers/handlers/providers.ts +0 -563
- package/src/providers/handlers/r1-base.ts +0 -283
- package/src/providers/handlers/registry.ts +0 -185
- package/src/providers/handlers/vertex.test.ts +0 -124
- package/src/providers/handlers/vertex.ts +0 -302
- package/src/providers/index.ts +0 -534
- package/src/providers/public.browser.ts +0 -20
- package/src/providers/public.ts +0 -51
- package/src/providers/shared/openai-compatible.ts +0 -63
- package/src/providers/transform/ai-sdk-community-format.test.ts +0 -73
- package/src/providers/transform/ai-sdk-community-format.ts +0 -115
- package/src/providers/transform/anthropic-format.ts +0 -230
- package/src/providers/transform/content-format.ts +0 -34
- package/src/providers/transform/format-conversion.test.ts +0 -413
- package/src/providers/transform/gemini-format.ts +0 -262
- package/src/providers/transform/index.ts +0 -22
- package/src/providers/transform/openai-format.ts +0 -290
- package/src/providers/transform/r1-format.ts +0 -287
- package/src/providers/types/config.ts +0 -396
- package/src/providers/types/handler.ts +0 -92
- package/src/providers/types/index.ts +0 -120
- package/src/providers/types/messages.ts +0 -162
- package/src/providers/types/model-info.test.ts +0 -57
- package/src/providers/types/model-info.ts +0 -65
- package/src/providers/types/provider-ids.test.ts +0 -12
- package/src/providers/types/provider-ids.ts +0 -89
- package/src/providers/types/settings.test.ts +0 -49
- package/src/providers/types/settings.ts +0 -533
- package/src/providers/types/stream.ts +0 -117
- package/src/providers/utils/index.ts +0 -27
- package/src/providers/utils/retry.test.ts +0 -140
- package/src/providers/utils/retry.ts +0 -188
- package/src/providers/utils/stream-processor.test.ts +0 -232
- package/src/providers/utils/stream-processor.ts +0 -472
- package/src/providers/utils/tool-processor.test.ts +0 -235
- package/src/providers/utils/tool-processor.ts +0 -146
- package/src/sdk.ts +0 -264
- package/src/types.ts +0 -79
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Baseten Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
const BASETEN_MODELS = getGeneratedModelsForProvider("baseten");
|
|
9
|
-
|
|
10
|
-
export const BASETEN_PROVIDER: ModelCollection = {
|
|
11
|
-
provider: {
|
|
12
|
-
id: "baseten",
|
|
13
|
-
name: "Baseten",
|
|
14
|
-
description: "ML inference platform",
|
|
15
|
-
protocol: "openai-chat",
|
|
16
|
-
baseUrl: "https://model-api.baseten.co/v1",
|
|
17
|
-
defaultModelId: Object.keys(BASETEN_MODELS)[0],
|
|
18
|
-
env: ["BASETEN_API_KEY"],
|
|
19
|
-
},
|
|
20
|
-
models: BASETEN_MODELS,
|
|
21
|
-
};
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AWS Bedrock Models
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const BEDROCK_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("bedrock");
|
|
10
|
-
|
|
11
|
-
export const BEDROCK_DEFAULT_MODEL =
|
|
12
|
-
Object.keys(BEDROCK_MODELS)[0] ?? "anthropic.claude-sonnet-4-5-20250929-v1:0";
|
|
13
|
-
|
|
14
|
-
export const BEDROCK_PROVIDER: ModelCollection = {
|
|
15
|
-
provider: {
|
|
16
|
-
id: "bedrock",
|
|
17
|
-
name: "AWS Bedrock",
|
|
18
|
-
description: "Amazon Bedrock managed foundation models",
|
|
19
|
-
protocol: "anthropic",
|
|
20
|
-
defaultModelId: BEDROCK_DEFAULT_MODEL,
|
|
21
|
-
capabilities: ["reasoning", "prompt-cache"],
|
|
22
|
-
env: [
|
|
23
|
-
"AWS_REGION",
|
|
24
|
-
"AWS_ACCESS_KEY_ID",
|
|
25
|
-
"AWS_SECRET_ACCESS_KEY",
|
|
26
|
-
"AWS_SESSION_TOKEN",
|
|
27
|
-
],
|
|
28
|
-
},
|
|
29
|
-
models: BEDROCK_MODELS,
|
|
30
|
-
};
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Cerebras Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const CEREBRAS_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("cerebras");
|
|
10
|
-
export const CEREBRAS_DEFAULT_MODEL =
|
|
11
|
-
Object.keys(CEREBRAS_MODELS)[0] ?? "llama3.1-70b";
|
|
12
|
-
|
|
13
|
-
export const CEREBRAS_PROVIDER: ModelCollection = {
|
|
14
|
-
provider: {
|
|
15
|
-
id: "cerebras",
|
|
16
|
-
name: "Cerebras",
|
|
17
|
-
description: "Fast inference on Cerebras wafer-scale chips",
|
|
18
|
-
protocol: "openai-chat",
|
|
19
|
-
baseUrl: "https://api.cerebras.ai/v1",
|
|
20
|
-
defaultModelId: CEREBRAS_DEFAULT_MODEL,
|
|
21
|
-
env: ["CEREBRAS_API_KEY"],
|
|
22
|
-
},
|
|
23
|
-
models: CEREBRAS_MODELS,
|
|
24
|
-
};
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
2
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
3
|
-
|
|
4
|
-
const ANTHROPIC_MODELS = getGeneratedModelsForProvider("anthropic");
|
|
5
|
-
|
|
6
|
-
function pickAnthropicModel(match: (id: string) => boolean): ModelInfo {
|
|
7
|
-
const entry = Object.entries(ANTHROPIC_MODELS).find(([id]) => match(id));
|
|
8
|
-
if (entry) {
|
|
9
|
-
return entry[1];
|
|
10
|
-
}
|
|
11
|
-
return {
|
|
12
|
-
id: "sonnet",
|
|
13
|
-
name: "Claude Sonnet",
|
|
14
|
-
capabilities: ["streaming", "reasoning"],
|
|
15
|
-
};
|
|
16
|
-
}
|
|
17
|
-
|
|
18
|
-
function toClaudeCodeModel(id: "opus" | "sonnet" | "haiku"): ModelInfo {
|
|
19
|
-
const source =
|
|
20
|
-
id === "opus"
|
|
21
|
-
? pickAnthropicModel((modelId) => modelId.includes("opus"))
|
|
22
|
-
: id === "haiku"
|
|
23
|
-
? pickAnthropicModel((modelId) => modelId.includes("haiku"))
|
|
24
|
-
: pickAnthropicModel((modelId) => modelId.includes("sonnet"));
|
|
25
|
-
return {
|
|
26
|
-
...source,
|
|
27
|
-
id,
|
|
28
|
-
name: `Claude ${id.charAt(0).toUpperCase()}${id.slice(1)}`,
|
|
29
|
-
};
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
export const CLAUDE_CODE_MODELS: Record<string, ModelInfo> = {
|
|
33
|
-
opus: toClaudeCodeModel("opus"),
|
|
34
|
-
sonnet: toClaudeCodeModel("sonnet"),
|
|
35
|
-
haiku: toClaudeCodeModel("haiku"),
|
|
36
|
-
};
|
|
37
|
-
|
|
38
|
-
export const CLAUDE_CODE_DEFAULT_MODEL = "sonnet";
|
|
39
|
-
|
|
40
|
-
export const CLAUDE_CODE_PROVIDER: ModelCollection = {
|
|
41
|
-
provider: {
|
|
42
|
-
id: "claude-code",
|
|
43
|
-
name: "Claude Code",
|
|
44
|
-
description: "Use Claude Code SDK with Claude Pro/Max subscription",
|
|
45
|
-
protocol: "openai-chat",
|
|
46
|
-
baseUrl: "",
|
|
47
|
-
defaultModelId: CLAUDE_CODE_DEFAULT_MODEL,
|
|
48
|
-
capabilities: ["reasoning"],
|
|
49
|
-
},
|
|
50
|
-
models: CLAUDE_CODE_MODELS,
|
|
51
|
-
};
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Cline Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const CLINE_DEFAULT_MODEL = "anthropic/claude-sonnet-4.6";
|
|
9
|
-
export const CLINE_MODELS: Record<string, ModelInfo> =
|
|
10
|
-
getGeneratedModelsForProvider("vercel-ai-gateway");
|
|
11
|
-
export const CLINE_DEFAULT_MODELINFO = CLINE_MODELS[CLINE_DEFAULT_MODEL];
|
|
12
|
-
|
|
13
|
-
export const CLINE_PROVIDER: ModelCollection = {
|
|
14
|
-
provider: {
|
|
15
|
-
id: "cline",
|
|
16
|
-
name: "Cline",
|
|
17
|
-
description: "Cline API endpoint",
|
|
18
|
-
protocol: "openai-chat",
|
|
19
|
-
baseUrl: "https://api.cline.bot/api/v1",
|
|
20
|
-
defaultModelId: CLINE_DEFAULT_MODEL,
|
|
21
|
-
capabilities: ["reasoning", "prompt-cache", "tools", "oauth"],
|
|
22
|
-
env: ["CLINE_API_KEY"],
|
|
23
|
-
},
|
|
24
|
-
models: CLINE_MODELS,
|
|
25
|
-
};
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* DeepSeek Models
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const DEEPSEEK_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("deepseek");
|
|
10
|
-
|
|
11
|
-
export const DEEPSEEK_DEFAULT_MODEL = Object.keys(DEEPSEEK_MODELS)[0];
|
|
12
|
-
|
|
13
|
-
export const DEEPSEEK_PROVIDER: ModelCollection = {
|
|
14
|
-
provider: {
|
|
15
|
-
id: "deepseek",
|
|
16
|
-
name: "DeepSeek",
|
|
17
|
-
description: "Advanced AI models with reasoning capabilities",
|
|
18
|
-
protocol: "openai-chat",
|
|
19
|
-
baseUrl: "https://api.deepseek.com/v1",
|
|
20
|
-
defaultModelId: DEEPSEEK_DEFAULT_MODEL,
|
|
21
|
-
capabilities: ["reasoning", "prompt-cache"],
|
|
22
|
-
env: ["DEEPSEEK_API_KEY"],
|
|
23
|
-
},
|
|
24
|
-
models: DEEPSEEK_MODELS,
|
|
25
|
-
};
|
|
26
|
-
|
|
27
|
-
export function getDeepSeekReasoningModels(): Record<string, ModelInfo> {
|
|
28
|
-
return Object.fromEntries(
|
|
29
|
-
Object.entries(DEEPSEEK_MODELS).filter(([, info]) =>
|
|
30
|
-
info.capabilities?.includes("reasoning"),
|
|
31
|
-
),
|
|
32
|
-
);
|
|
33
|
-
}
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Dify Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import type { ModelCollection } from "../schemas/index";
|
|
6
|
-
|
|
7
|
-
export const DIFY_PROVIDER: ModelCollection = {
|
|
8
|
-
provider: {
|
|
9
|
-
id: "dify",
|
|
10
|
-
name: "Dify",
|
|
11
|
-
description: "Dify workflow/application provider via AI SDK",
|
|
12
|
-
protocol: "openai-chat",
|
|
13
|
-
defaultModelId: "default",
|
|
14
|
-
env: ["DIFY_API_KEY"],
|
|
15
|
-
},
|
|
16
|
-
models: {},
|
|
17
|
-
};
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Doubao Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
const DEFAULT_DOUBAO_MODEL_ID = "doubao-1-5-pro-256k-250115";
|
|
9
|
-
|
|
10
|
-
export const DOUBAO_MODELS: Record<string, ModelInfo> = {
|
|
11
|
-
[DEFAULT_DOUBAO_MODEL_ID]: {
|
|
12
|
-
id: DEFAULT_DOUBAO_MODEL_ID,
|
|
13
|
-
name: "Doubao 1.5 Pro 256k",
|
|
14
|
-
capabilities: ["streaming", "tools"],
|
|
15
|
-
},
|
|
16
|
-
...getGeneratedModelsForProvider("doubao"),
|
|
17
|
-
};
|
|
18
|
-
|
|
19
|
-
export const DOUBAO_DEFAULT_MODEL =
|
|
20
|
-
Object.keys(DOUBAO_MODELS)[0] ?? DEFAULT_DOUBAO_MODEL_ID;
|
|
21
|
-
|
|
22
|
-
export const DOUBAO_PROVIDER: ModelCollection = {
|
|
23
|
-
provider: {
|
|
24
|
-
id: "doubao",
|
|
25
|
-
name: "Doubao",
|
|
26
|
-
description: "Volcengine Ark platform models",
|
|
27
|
-
protocol: "openai-chat",
|
|
28
|
-
baseUrl: "https://ark.cn-beijing.volces.com/api/v3",
|
|
29
|
-
defaultModelId: DOUBAO_DEFAULT_MODEL,
|
|
30
|
-
env: ["DOUBAO_API_KEY"],
|
|
31
|
-
},
|
|
32
|
-
models: DOUBAO_MODELS,
|
|
33
|
-
};
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Fireworks AI Models
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const FIREWORKS_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("fireworks");
|
|
10
|
-
|
|
11
|
-
export const FIREWORKS_DEFAULT_MODEL =
|
|
12
|
-
Object.keys(FIREWORKS_MODELS)[0] ??
|
|
13
|
-
"accounts/fireworks/models/llama-v3p1-8b-instruct";
|
|
14
|
-
|
|
15
|
-
export const FIREWORKS_PROVIDER: ModelCollection = {
|
|
16
|
-
provider: {
|
|
17
|
-
id: "fireworks",
|
|
18
|
-
name: "Fireworks AI",
|
|
19
|
-
description: "High-performance inference platform",
|
|
20
|
-
protocol: "openai-chat",
|
|
21
|
-
baseUrl: "https://api.fireworks.ai/inference/v1",
|
|
22
|
-
defaultModelId: FIREWORKS_DEFAULT_MODEL,
|
|
23
|
-
env: ["FIREWORKS_API_KEY"],
|
|
24
|
-
},
|
|
25
|
-
models: FIREWORKS_MODELS,
|
|
26
|
-
};
|
|
27
|
-
|
|
28
|
-
export function getFireworksFunctionModels(): Record<string, ModelInfo> {
|
|
29
|
-
return Object.fromEntries(
|
|
30
|
-
Object.entries(FIREWORKS_MODELS).filter(([, info]) =>
|
|
31
|
-
info.capabilities?.includes("tools"),
|
|
32
|
-
),
|
|
33
|
-
);
|
|
34
|
-
}
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Google Gemini Models
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const GEMINI_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("gemini");
|
|
10
|
-
|
|
11
|
-
export const GEMINI_DEFAULT_MODEL =
|
|
12
|
-
Object.keys(GEMINI_MODELS)[0] ?? "gemini-3-pro";
|
|
13
|
-
|
|
14
|
-
export const GEMINI_PROVIDER: ModelCollection = {
|
|
15
|
-
provider: {
|
|
16
|
-
id: "gemini",
|
|
17
|
-
name: "Google Gemini",
|
|
18
|
-
description: "Google Gemini API",
|
|
19
|
-
protocol: "gemini",
|
|
20
|
-
baseUrl: "https://generativelanguage.googleapis.com",
|
|
21
|
-
defaultModelId: GEMINI_DEFAULT_MODEL,
|
|
22
|
-
capabilities: ["reasoning", "prompt-cache"],
|
|
23
|
-
env: ["GOOGLE_GENERATIVE_AI_API_KEY", "GEMINI_API_KEY"],
|
|
24
|
-
},
|
|
25
|
-
models: GEMINI_MODELS,
|
|
26
|
-
};
|
|
27
|
-
|
|
28
|
-
export function getActiveGeminiModels(): Record<string, ModelInfo> {
|
|
29
|
-
return Object.fromEntries(
|
|
30
|
-
Object.entries(GEMINI_MODELS).filter(
|
|
31
|
-
([, info]) =>
|
|
32
|
-
!info.status || info.status === "active" || info.status === "preview",
|
|
33
|
-
),
|
|
34
|
-
);
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
export function getGeminiThinkingModels(): Record<string, ModelInfo> {
|
|
38
|
-
return Object.fromEntries(
|
|
39
|
-
Object.entries(GEMINI_MODELS).filter(([, info]) =>
|
|
40
|
-
info.capabilities?.includes("reasoning"),
|
|
41
|
-
),
|
|
42
|
-
);
|
|
43
|
-
}
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Groq Models
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const GROQ_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("groq");
|
|
10
|
-
|
|
11
|
-
export const GROQ_DEFAULT_MODEL =
|
|
12
|
-
Object.keys(GROQ_MODELS)[0] ?? "llama-3.3-70b-versatile";
|
|
13
|
-
|
|
14
|
-
export const GROQ_PROVIDER: ModelCollection = {
|
|
15
|
-
provider: {
|
|
16
|
-
id: "groq",
|
|
17
|
-
name: "Groq",
|
|
18
|
-
description: "Ultra-fast LPU inference",
|
|
19
|
-
protocol: "openai-chat",
|
|
20
|
-
baseUrl: "https://api.groq.com/openai/v1",
|
|
21
|
-
defaultModelId: GROQ_DEFAULT_MODEL,
|
|
22
|
-
env: ["GROQ_API_KEY"],
|
|
23
|
-
},
|
|
24
|
-
models: GROQ_MODELS,
|
|
25
|
-
};
|
|
26
|
-
|
|
27
|
-
export function getGroqVisionModels(): Record<string, ModelInfo> {
|
|
28
|
-
return Object.fromEntries(
|
|
29
|
-
Object.entries(GROQ_MODELS).filter(([, info]) =>
|
|
30
|
-
info.capabilities?.includes("images"),
|
|
31
|
-
),
|
|
32
|
-
);
|
|
33
|
-
}
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* HiCap Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import type { ModelCollection } from "../schemas/index";
|
|
6
|
-
|
|
7
|
-
export const HICAP_PROVIDER: ModelCollection = {
|
|
8
|
-
provider: {
|
|
9
|
-
id: "hicap",
|
|
10
|
-
name: "HiCap",
|
|
11
|
-
description: "HiCap AI platform",
|
|
12
|
-
protocol: "openai-chat",
|
|
13
|
-
baseUrl: "https://api.hicap.ai/v1",
|
|
14
|
-
defaultModelId: "hicap-pro",
|
|
15
|
-
env: ["HICAP_API_KEY"],
|
|
16
|
-
},
|
|
17
|
-
models: {},
|
|
18
|
-
};
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Huawei Cloud MaaS Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import type { ModelCollection } from "../schemas/index";
|
|
6
|
-
|
|
7
|
-
export const HUAWEI_CLOUD_MAAS_PROVIDER: ModelCollection = {
|
|
8
|
-
provider: {
|
|
9
|
-
id: "huawei-cloud-maas",
|
|
10
|
-
name: "Huawei Cloud MaaS",
|
|
11
|
-
description: "Huawei's model-as-a-service platform",
|
|
12
|
-
protocol: "openai-chat",
|
|
13
|
-
baseUrl: "https://infer-modelarts.cn-southwest-2.myhuaweicloud.com/v1",
|
|
14
|
-
defaultModelId: "DeepSeek-R1",
|
|
15
|
-
env: ["HUAWEI_CLOUD_MAAS_API_KEY"],
|
|
16
|
-
},
|
|
17
|
-
models: {},
|
|
18
|
-
};
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Hugging Face Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
export const HUGGINGFACE_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("huggingface");
|
|
10
|
-
|
|
11
|
-
export const HUGGINGFACE_PROVIDER: ModelCollection = {
|
|
12
|
-
provider: {
|
|
13
|
-
id: "huggingface",
|
|
14
|
-
name: "Hugging Face",
|
|
15
|
-
description: "Hugging Face inference API",
|
|
16
|
-
protocol: "openai-chat",
|
|
17
|
-
baseUrl: "https://api-inference.huggingface.co/v1",
|
|
18
|
-
defaultModelId: Object.keys(HUGGINGFACE_MODELS)[0],
|
|
19
|
-
env: ["HF_TOKEN"],
|
|
20
|
-
},
|
|
21
|
-
models: HUGGINGFACE_MODELS,
|
|
22
|
-
};
|
|
@@ -1,162 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Provider Exports
|
|
3
|
-
*
|
|
4
|
-
* Re-exports all provider model definitions and collections.
|
|
5
|
-
*/
|
|
6
|
-
|
|
7
|
-
export { AIHUBMIX_PROVIDER } from "./aihubmix";
|
|
8
|
-
// === Anthropic ===
|
|
9
|
-
export {
|
|
10
|
-
ANTHROPIC_DEFAULT_MODEL,
|
|
11
|
-
ANTHROPIC_MODELS,
|
|
12
|
-
ANTHROPIC_PROVIDER,
|
|
13
|
-
getActiveAnthropicModels,
|
|
14
|
-
getAnthropicReasoningModels,
|
|
15
|
-
} from "./anthropic";
|
|
16
|
-
export { ASKSAGE_PROVIDER } from "./asksage";
|
|
17
|
-
export { BASETEN_PROVIDER } from "./baseten";
|
|
18
|
-
export {
|
|
19
|
-
BEDROCK_DEFAULT_MODEL,
|
|
20
|
-
BEDROCK_MODELS,
|
|
21
|
-
BEDROCK_PROVIDER,
|
|
22
|
-
} from "./bedrock";
|
|
23
|
-
export {
|
|
24
|
-
CEREBRAS_DEFAULT_MODEL,
|
|
25
|
-
CEREBRAS_MODELS,
|
|
26
|
-
CEREBRAS_PROVIDER,
|
|
27
|
-
} from "./cerebras";
|
|
28
|
-
export {
|
|
29
|
-
CLAUDE_CODE_DEFAULT_MODEL,
|
|
30
|
-
CLAUDE_CODE_MODELS,
|
|
31
|
-
CLAUDE_CODE_PROVIDER,
|
|
32
|
-
} from "./claude-code";
|
|
33
|
-
export { CLINE_DEFAULT_MODEL, CLINE_MODELS, CLINE_PROVIDER } from "./cline";
|
|
34
|
-
// === DeepSeek ===
|
|
35
|
-
export {
|
|
36
|
-
DEEPSEEK_DEFAULT_MODEL,
|
|
37
|
-
DEEPSEEK_MODELS,
|
|
38
|
-
DEEPSEEK_PROVIDER,
|
|
39
|
-
getDeepSeekReasoningModels,
|
|
40
|
-
} from "./deepseek";
|
|
41
|
-
export { DIFY_PROVIDER } from "./dify";
|
|
42
|
-
export {
|
|
43
|
-
DOUBAO_DEFAULT_MODEL,
|
|
44
|
-
DOUBAO_MODELS,
|
|
45
|
-
DOUBAO_PROVIDER,
|
|
46
|
-
} from "./doubao";
|
|
47
|
-
// === Fireworks AI ===
|
|
48
|
-
export {
|
|
49
|
-
FIREWORKS_DEFAULT_MODEL,
|
|
50
|
-
FIREWORKS_MODELS,
|
|
51
|
-
FIREWORKS_PROVIDER,
|
|
52
|
-
getFireworksFunctionModels,
|
|
53
|
-
} from "./fireworks";
|
|
54
|
-
// === Google Gemini ===
|
|
55
|
-
export {
|
|
56
|
-
GEMINI_DEFAULT_MODEL,
|
|
57
|
-
GEMINI_MODELS,
|
|
58
|
-
GEMINI_PROVIDER,
|
|
59
|
-
getActiveGeminiModels,
|
|
60
|
-
getGeminiThinkingModels,
|
|
61
|
-
} from "./gemini";
|
|
62
|
-
// === Groq ===
|
|
63
|
-
export {
|
|
64
|
-
GROQ_DEFAULT_MODEL,
|
|
65
|
-
GROQ_MODELS,
|
|
66
|
-
GROQ_PROVIDER,
|
|
67
|
-
getGroqVisionModels,
|
|
68
|
-
} from "./groq";
|
|
69
|
-
export { HICAP_PROVIDER } from "./hicap";
|
|
70
|
-
export { HUAWEI_CLOUD_MAAS_PROVIDER } from "./huawei-cloud-maas";
|
|
71
|
-
export { HUGGINGFACE_MODELS, HUGGINGFACE_PROVIDER } from "./huggingface";
|
|
72
|
-
export { LITELLM_PROVIDER } from "./litellm";
|
|
73
|
-
export { LMSTUDIO_PROVIDER } from "./lmstudio";
|
|
74
|
-
export {
|
|
75
|
-
MINIMAX_DEFAULT_MODEL,
|
|
76
|
-
MINIMAX_MODELS,
|
|
77
|
-
MINIMAX_PROVIDER,
|
|
78
|
-
} from "./minimax";
|
|
79
|
-
export { MISTRAL_PROVIDER } from "./mistral";
|
|
80
|
-
export {
|
|
81
|
-
MOONSHOT_DEFAULT_MODEL,
|
|
82
|
-
MOONSHOT_MODELS,
|
|
83
|
-
MOONSHOT_PROVIDER,
|
|
84
|
-
} from "./moonshot";
|
|
85
|
-
export {
|
|
86
|
-
NEBIUS_DEFAULT_MODEL,
|
|
87
|
-
NEBIUS_MODELS,
|
|
88
|
-
NEBIUS_PROVIDER,
|
|
89
|
-
} from "./nebius";
|
|
90
|
-
export {
|
|
91
|
-
NOUS_RESEARCH_DEFAULT_MODEL,
|
|
92
|
-
NOUS_RESEARCH_MODELS,
|
|
93
|
-
NOUS_RESEARCH_PROVIDER,
|
|
94
|
-
} from "./nous-research";
|
|
95
|
-
export {
|
|
96
|
-
DEFAULT_EXTERNAL_OCA_BASE_URL,
|
|
97
|
-
DEFAULT_INTERNAL_OCA_BASE_URL,
|
|
98
|
-
OCA_DEFAULT_MODEL,
|
|
99
|
-
OCA_MODELS,
|
|
100
|
-
OCA_PROVIDER,
|
|
101
|
-
} from "./oca";
|
|
102
|
-
export { OLLAMA_PROVIDER } from "./ollama";
|
|
103
|
-
// === OpenAI ===
|
|
104
|
-
export {
|
|
105
|
-
getActiveOpenAIModels,
|
|
106
|
-
getOpenAIReasoningModels,
|
|
107
|
-
OPENAI_DEFAULT_MODEL,
|
|
108
|
-
OPENAI_MODELS,
|
|
109
|
-
OPENAI_PROVIDER,
|
|
110
|
-
} from "./openai";
|
|
111
|
-
export {
|
|
112
|
-
OPENAI_CODEX_DEFAULT_MODEL,
|
|
113
|
-
OPENAI_CODEX_PROVIDER,
|
|
114
|
-
} from "./openai-codex";
|
|
115
|
-
export {
|
|
116
|
-
OPENCODE_DEFAULT_MODEL,
|
|
117
|
-
OPENCODE_MODELS,
|
|
118
|
-
OPENCODE_PROVIDER,
|
|
119
|
-
} from "./opencode";
|
|
120
|
-
export {
|
|
121
|
-
OPENROUTER_DEFAULT_MODEL,
|
|
122
|
-
OPENROUTER_MODELS,
|
|
123
|
-
OPENROUTER_PROVIDER,
|
|
124
|
-
} from "./openrouter";
|
|
125
|
-
export { QWEN_DEFAULT_MODEL, QWEN_MODELS, QWEN_PROVIDER } from "./qwen";
|
|
126
|
-
export {
|
|
127
|
-
QWEN_CODE_DEFAULT_MODEL,
|
|
128
|
-
QWEN_CODE_MODELS,
|
|
129
|
-
QWEN_CODE_PROVIDER,
|
|
130
|
-
} from "./qwen-code";
|
|
131
|
-
export { REQUESTY_PROVIDER } from "./requesty";
|
|
132
|
-
export {
|
|
133
|
-
SAMBANOVA_DEFAULT_MODEL,
|
|
134
|
-
SAMBANOVA_MODELS,
|
|
135
|
-
SAMBANOVA_PROVIDER,
|
|
136
|
-
} from "./sambanova";
|
|
137
|
-
export {
|
|
138
|
-
SAP_AI_CORE_DEFAULT_MODEL,
|
|
139
|
-
SAP_AI_CORE_MODELS,
|
|
140
|
-
SAP_AI_CORE_PROVIDER,
|
|
141
|
-
} from "./sapaicore";
|
|
142
|
-
// === Together AI ===
|
|
143
|
-
export {
|
|
144
|
-
getTogetherLlamaModels,
|
|
145
|
-
TOGETHER_DEFAULT_MODEL,
|
|
146
|
-
TOGETHER_MODELS,
|
|
147
|
-
TOGETHER_PROVIDER,
|
|
148
|
-
} from "./together";
|
|
149
|
-
export { VERCEL_AI_GATEWAY_PROVIDER } from "./vercel-ai-gateway";
|
|
150
|
-
export {
|
|
151
|
-
VERTEX_DEFAULT_MODEL,
|
|
152
|
-
VERTEX_MODELS,
|
|
153
|
-
VERTEX_PROVIDER,
|
|
154
|
-
} from "./vertex";
|
|
155
|
-
// === xAI (Grok) ===
|
|
156
|
-
export {
|
|
157
|
-
getActiveXAIModels,
|
|
158
|
-
XAI_DEFAULT_MODEL,
|
|
159
|
-
XAI_MODELS,
|
|
160
|
-
XAI_PROVIDER,
|
|
161
|
-
} from "./xai";
|
|
162
|
-
export { ZAI_DEFAULT_MODEL, ZAI_MODELS, ZAI_PROVIDER } from "./zai";
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* LiteLLM Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import type { ModelCollection } from "../schemas/index";
|
|
6
|
-
|
|
7
|
-
export const LITELLM_PROVIDER: ModelCollection = {
|
|
8
|
-
provider: {
|
|
9
|
-
id: "litellm",
|
|
10
|
-
name: "LiteLLM",
|
|
11
|
-
description: "Self-hosted LLM proxy",
|
|
12
|
-
protocol: "openai-chat",
|
|
13
|
-
baseUrl: "http://localhost:4000/v1",
|
|
14
|
-
defaultModelId: "gpt-4o",
|
|
15
|
-
capabilities: ["prompt-cache"],
|
|
16
|
-
env: ["LITELLM_API_KEY"],
|
|
17
|
-
},
|
|
18
|
-
models: {},
|
|
19
|
-
};
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* LM Studio Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
const LMSTUDIO_MODELS: Record<string, ModelInfo> =
|
|
9
|
-
getGeneratedModelsForProvider("lmstudio");
|
|
10
|
-
|
|
11
|
-
export const LMSTUDIO_PROVIDER: ModelCollection = {
|
|
12
|
-
provider: {
|
|
13
|
-
id: "lmstudio",
|
|
14
|
-
name: "LM Studio",
|
|
15
|
-
description: "Local model inference with LM Studio",
|
|
16
|
-
protocol: "openai-chat",
|
|
17
|
-
baseUrl: "http://localhost:1234/v1",
|
|
18
|
-
defaultModelId: Object.keys(LMSTUDIO_MODELS)[0],
|
|
19
|
-
env: ["LMSTUDIO_API_KEY"],
|
|
20
|
-
},
|
|
21
|
-
models: LMSTUDIO_MODELS || {},
|
|
22
|
-
};
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* MiniMax Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { getGeneratedModelsForProvider } from "../generated-access";
|
|
6
|
-
import type { ModelCollection, ModelInfo } from "../schemas/index";
|
|
7
|
-
|
|
8
|
-
const DEFAULT_MINIMAX_MODEL_ID = "MiniMax-M2.5";
|
|
9
|
-
|
|
10
|
-
export const MINIMAX_MODELS: Record<string, ModelInfo> = {
|
|
11
|
-
[DEFAULT_MINIMAX_MODEL_ID]: {
|
|
12
|
-
id: DEFAULT_MINIMAX_MODEL_ID,
|
|
13
|
-
name: "MiniMax M2.5",
|
|
14
|
-
capabilities: ["streaming", "tools", "reasoning", "prompt-cache"],
|
|
15
|
-
},
|
|
16
|
-
...getGeneratedModelsForProvider("minimax"),
|
|
17
|
-
};
|
|
18
|
-
|
|
19
|
-
export const MINIMAX_DEFAULT_MODEL =
|
|
20
|
-
Object.keys(MINIMAX_MODELS)[0] ?? DEFAULT_MINIMAX_MODEL_ID;
|
|
21
|
-
|
|
22
|
-
export const MINIMAX_PROVIDER: ModelCollection = {
|
|
23
|
-
provider: {
|
|
24
|
-
id: "minimax",
|
|
25
|
-
name: "MiniMax",
|
|
26
|
-
description: "MiniMax models via Anthropic-compatible API",
|
|
27
|
-
protocol: "anthropic",
|
|
28
|
-
baseUrl: "https://api.minimax.io/anthropic",
|
|
29
|
-
defaultModelId: MINIMAX_DEFAULT_MODEL,
|
|
30
|
-
capabilities: ["reasoning", "prompt-cache"],
|
|
31
|
-
env: ["MINIMAX_API_KEY"],
|
|
32
|
-
},
|
|
33
|
-
models: MINIMAX_MODELS,
|
|
34
|
-
};
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Mistral Provider
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import type { ModelCollection } from "../schemas/index";
|
|
6
|
-
|
|
7
|
-
export const MISTRAL_PROVIDER: ModelCollection = {
|
|
8
|
-
provider: {
|
|
9
|
-
id: "mistral",
|
|
10
|
-
name: "Mistral",
|
|
11
|
-
description: "Mistral AI models via AI SDK provider",
|
|
12
|
-
protocol: "openai-chat",
|
|
13
|
-
baseUrl: "https://api.mistral.ai/v1",
|
|
14
|
-
defaultModelId: "mistral-medium-latest",
|
|
15
|
-
capabilities: ["reasoning"],
|
|
16
|
-
env: ["MISTRAL_API_KEY"],
|
|
17
|
-
},
|
|
18
|
-
models: {},
|
|
19
|
-
};
|