@lobehub/chat 1.118.4 → 1.118.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/test.yml +36 -1
- package/CHANGELOG.md +34 -0
- package/changelog/v1.json +9 -0
- package/package.json +2 -1
- package/packages/const/package.json +4 -1
- package/packages/const/src/image.ts +1 -1
- package/packages/database/src/repositories/aiInfra/index.ts +7 -2
- package/packages/model-bank/package.json +75 -0
- package/{src/config → packages/model-bank/src}/aiModels/bfl.ts +1 -1
- package/{src/config → packages/model-bank/src}/aiModels/fal.ts +1 -1
- package/{src/config → packages/model-bank/src}/aiModels/google.ts +1 -1
- package/{src/config → packages/model-bank/src}/aiModels/index.ts +2 -2
- package/{src/config → packages/model-bank/src}/aiModels/openai.ts +1 -1
- package/packages/model-bank/src/exports.test.ts +37 -0
- package/packages/model-bank/src/index.ts +2 -0
- package/{src/libs → packages/model-bank/src}/standard-parameters/index.ts +1 -1
- package/packages/model-bank/vitest.config.mts +11 -0
- package/packages/model-runtime/package.json +1 -0
- package/packages/model-runtime/src/ai360/index.ts +1 -1
- package/packages/model-runtime/src/aihubmix/index.ts +1 -1
- package/packages/model-runtime/src/anthropic/index.ts +6 -6
- package/packages/model-runtime/src/baichuan/index.ts +1 -1
- package/packages/model-runtime/src/bfl/createImage.ts +1 -2
- package/packages/model-runtime/src/cloudflare/index.ts +1 -1
- package/packages/model-runtime/src/cohere/index.ts +1 -1
- package/packages/model-runtime/src/deepseek/index.ts +1 -1
- package/packages/model-runtime/src/fal/index.ts +1 -2
- package/packages/model-runtime/src/fireworksai/index.ts +1 -1
- package/packages/model-runtime/src/groq/index.ts +1 -1
- package/packages/model-runtime/src/higress/index.ts +1 -1
- package/packages/model-runtime/src/huggingface/index.ts +1 -1
- package/packages/model-runtime/src/hunyuan/index.ts +1 -1
- package/packages/model-runtime/src/infiniai/index.ts +1 -1
- package/packages/model-runtime/src/internlm/index.ts +1 -1
- package/packages/model-runtime/src/jina/index.ts +1 -1
- package/packages/model-runtime/src/lmstudio/index.ts +1 -1
- package/packages/model-runtime/src/minimax/index.ts +1 -1
- package/packages/model-runtime/src/mistral/index.ts +1 -1
- package/packages/model-runtime/src/novita/__snapshots__/index.test.ts.snap +309 -21
- package/packages/model-runtime/src/novita/index.ts +31 -1
- package/packages/model-runtime/src/ollama/index.ts +1 -1
- package/packages/model-runtime/src/openai/__snapshots__/index.test.ts.snap +28 -0
- package/packages/model-runtime/src/openai/index.test.ts +0 -3
- package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +46 -0
- package/packages/model-runtime/src/openrouter/index.test.ts +21 -45
- package/packages/model-runtime/src/openrouter/index.ts +22 -25
- package/packages/model-runtime/src/openrouter/type.ts +12 -24
- package/packages/model-runtime/src/ppio/index.ts +1 -1
- package/packages/model-runtime/src/search1api/index.ts +1 -1
- package/packages/model-runtime/src/sensenova/index.ts +1 -1
- package/packages/model-runtime/src/stepfun/index.ts +1 -1
- package/packages/model-runtime/src/tencentcloud/index.ts +1 -1
- package/packages/model-runtime/src/togetherai/index.ts +1 -1
- package/packages/model-runtime/src/types/image.ts +1 -1
- package/packages/model-runtime/src/utils/modelParse.test.ts +5 -5
- package/packages/model-runtime/src/utils/modelParse.ts +47 -22
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +1 -2
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +1 -1
- package/packages/model-runtime/src/vllm/index.ts +1 -1
- package/packages/model-runtime/src/xinference/index.ts +1 -1
- package/packages/types/src/aiModel.ts +1 -2
- package/packages/types/src/llm.ts +1 -1
- package/packages/utils/src/getFallbackModelProperty.test.ts +1 -1
- package/packages/utils/src/getFallbackModelProperty.ts +1 -1
- package/packages/utils/src/parseModels.test.ts +1 -2
- package/packages/utils/src/parseModels.ts +1 -1
- package/src/app/[variants]/(main)/image/features/GenerationFeed/BatchItem.tsx +1 -1
- package/src/app/[variants]/(main)/profile/features/ClerkProfile.tsx +1 -1
- package/src/app/[variants]/(main)/settings/_layout/Desktop/index.tsx +1 -5
- package/src/locales/default/models.ts +1 -1
- package/src/server/globalConfig/genServerAiProviderConfig.test.ts +3 -3
- package/src/server/globalConfig/genServerAiProviderConfig.ts +1 -1
- package/src/server/routers/async/image.ts +1 -1
- package/src/server/services/discover/index.test.ts +1 -1
- package/src/server/services/discover/index.ts +16 -8
- package/src/store/aiInfra/slices/aiProvider/action.ts +1 -1
- package/src/store/image/slices/generationConfig/action.test.ts +2 -6
- package/src/store/image/slices/generationConfig/action.ts +3 -3
- package/src/store/image/slices/generationConfig/hooks.test.ts +2 -2
- package/src/store/image/slices/generationConfig/hooks.ts +1 -1
- package/src/store/image/slices/generationConfig/initialState.ts +2 -3
- package/src/store/image/slices/generationConfig/selectors.test.ts +1 -2
- package/src/store/image/slices/generationConfig/selectors.ts +1 -1
- /package/{src/config → packages/model-bank/src}/aiModels/ai21.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/ai302.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/ai360.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/aihubmix.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/akashchat.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/anthropic.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/azure.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/azureai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/baichuan.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/bedrock.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/cloudflare.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/cohere.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/deepseek.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/fireworksai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/giteeai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/github.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/groq.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/higress.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/huggingface.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/hunyuan.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/infiniai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/internlm.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/jina.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/lmstudio.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/lobehub.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/minimax.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/mistral.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/modelscope.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/moonshot.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/novita.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/nvidia.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/ollama.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/openrouter.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/perplexity.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/ppio.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/qiniu.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/qwen.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/sambanova.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/search1api.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/sensenova.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/siliconcloud.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/spark.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/stepfun.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/taichu.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/tencentcloud.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/togetherai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/upstage.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/v0.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/vertexai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/vllm.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/volcengine.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/wenxin.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/xai.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/xinference.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/zeroone.ts +0 -0
- /package/{src/config → packages/model-bank/src}/aiModels/zhipu.ts +0 -0
- /package/{src/libs → packages/model-bank/src}/standard-parameters/index.test.ts +0 -0
@@ -7,7 +7,7 @@ permissions:
|
|
7
7
|
|
8
8
|
jobs:
|
9
9
|
# Package tests - using each package's own test script
|
10
|
-
test-packages:
|
10
|
+
test-intenral-packages:
|
11
11
|
runs-on: ubuntu-latest
|
12
12
|
strategy:
|
13
13
|
matrix:
|
@@ -41,6 +41,41 @@ jobs:
|
|
41
41
|
files: ./packages/${{ matrix.package }}/coverage/lcov.info
|
42
42
|
flags: packages/${{ matrix.package }}
|
43
43
|
|
44
|
+
test-packages:
|
45
|
+
runs-on: ubuntu-latest
|
46
|
+
strategy:
|
47
|
+
matrix:
|
48
|
+
package: [model-bank]
|
49
|
+
|
50
|
+
name: Test package ${{ matrix.package }}
|
51
|
+
|
52
|
+
steps:
|
53
|
+
- uses: actions/checkout@v5
|
54
|
+
|
55
|
+
- name: Setup Node.js
|
56
|
+
uses: actions/setup-node@v4
|
57
|
+
with:
|
58
|
+
node-version: 22
|
59
|
+
|
60
|
+
- name: Install bun
|
61
|
+
uses: oven-sh/setup-bun@v1
|
62
|
+
with:
|
63
|
+
bun-version: ${{ secrets.BUN_VERSION }}
|
64
|
+
|
65
|
+
- name: Install deps
|
66
|
+
run: bun i
|
67
|
+
|
68
|
+
- name: Test ${{ matrix.package }} package with coverage
|
69
|
+
run: bun run --filter ${{ matrix.package }} test:coverage
|
70
|
+
|
71
|
+
- name: Upload ${{ matrix.package }} coverage to Codecov
|
72
|
+
uses: codecov/codecov-action@v4
|
73
|
+
with:
|
74
|
+
token: ${{ secrets.CODECOV_TOKEN }}
|
75
|
+
files: ./packages/${{ matrix.package }}/coverage/lcov.info
|
76
|
+
flags: packages/${{ matrix.package }}
|
77
|
+
|
78
|
+
|
44
79
|
# App tests
|
45
80
|
test-website:
|
46
81
|
name: Test Website
|
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,40 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.118.5](https://github.com/lobehub/lobe-chat/compare/v1.118.4...v1.118.5)
|
6
|
+
|
7
|
+
<sup>Released on **2025-08-29**</sup>
|
8
|
+
|
9
|
+
#### ♻ Code Refactoring
|
10
|
+
|
11
|
+
- **misc**: Refactor the `model-bank` package from `src/config/aiModels`.
|
12
|
+
|
13
|
+
#### 💄 Styles
|
14
|
+
|
15
|
+
- **misc**: Fix clerk scrollBox style, ModelFetcher support getting prices.
|
16
|
+
|
17
|
+
<br/>
|
18
|
+
|
19
|
+
<details>
|
20
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
21
|
+
|
22
|
+
#### Code refactoring
|
23
|
+
|
24
|
+
- **misc**: Refactor the `model-bank` package from `src/config/aiModels`, closes [#8983](https://github.com/lobehub/lobe-chat/issues/8983) ([c65eb09](https://github.com/lobehub/lobe-chat/commit/c65eb09))
|
25
|
+
|
26
|
+
#### Styles
|
27
|
+
|
28
|
+
- **misc**: Fix clerk scrollBox style, closes [#8989](https://github.com/lobehub/lobe-chat/issues/8989) ([b25b5a0](https://github.com/lobehub/lobe-chat/commit/b25b5a0))
|
29
|
+
- **misc**: ModelFetcher support getting prices, closes [#8985](https://github.com/lobehub/lobe-chat/issues/8985) ([58b73ec](https://github.com/lobehub/lobe-chat/commit/58b73ec))
|
30
|
+
|
31
|
+
</details>
|
32
|
+
|
33
|
+
<div align="right">
|
34
|
+
|
35
|
+
[](#readme-top)
|
36
|
+
|
37
|
+
</div>
|
38
|
+
|
5
39
|
### [Version 1.118.4](https://github.com/lobehub/lobe-chat/compare/v1.118.3...v1.118.4)
|
6
40
|
|
7
41
|
<sup>Released on **2025-08-29**</sup>
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.118.
|
3
|
+
"version": "1.118.5",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -210,6 +210,7 @@
|
|
210
210
|
"mammoth": "^1.10.0",
|
211
211
|
"markdown-to-txt": "^2.0.1",
|
212
212
|
"mdast-util-to-markdown": "^2.1.2",
|
213
|
+
"model-bank": "workspace:*",
|
213
214
|
"modern-screenshot": "^4.6.5",
|
214
215
|
"nanoid": "^5.1.5",
|
215
216
|
"next": "~15.3.5",
|
@@ -201,7 +201,11 @@ export class AiInfraRepos {
|
|
201
201
|
providerId: string,
|
202
202
|
): Promise<AiProviderModelListItem[] | undefined> => {
|
203
203
|
try {
|
204
|
-
const
|
204
|
+
const modules = await import('model-bank');
|
205
|
+
|
206
|
+
// TODO: when model-bank is a separate module, we will try import from model-bank/[prividerId] again
|
207
|
+
// @ts-expect-error providerId is string
|
208
|
+
const providerModels = modules[providerId];
|
205
209
|
|
206
210
|
// use the serverModelLists as the defined server model list
|
207
211
|
const presetList = this.providerConfigs[providerId]?.serverModelLists || providerModels;
|
@@ -210,7 +214,8 @@ export class AiInfraRepos {
|
|
210
214
|
enabled: m.enabled || false,
|
211
215
|
source: AiModelSourceEnum.Builtin,
|
212
216
|
}));
|
213
|
-
} catch {
|
217
|
+
} catch (error) {
|
218
|
+
console.error(error);
|
214
219
|
// maybe provider id not exist
|
215
220
|
}
|
216
221
|
};
|
@@ -0,0 +1,75 @@
|
|
1
|
+
{
|
2
|
+
"name": "model-bank",
|
3
|
+
"version": "1.0.0",
|
4
|
+
"private": true,
|
5
|
+
"exports": {
|
6
|
+
".": "./src/index.ts",
|
7
|
+
"./ai21": "./src/aiModels/ai21.ts",
|
8
|
+
"./ai302": "./src/aiModels/ai302.ts",
|
9
|
+
"./ai360": "./src/aiModels/ai360.ts",
|
10
|
+
"./aihubmix": "./src/aiModels/aihubmix.ts",
|
11
|
+
"./akashchat": "./src/aiModels/akashchat.ts",
|
12
|
+
"./anthropic": "./src/aiModels/anthropic.ts",
|
13
|
+
"./azureai": "./src/aiModels/azureai.ts",
|
14
|
+
"./azure": "./src/aiModels/azure.ts",
|
15
|
+
"./baichuan": "./src/aiModels/baichuan.ts",
|
16
|
+
"./bedrock": "./src/aiModels/bedrock.ts",
|
17
|
+
"./bfl": "./src/aiModels/bfl.ts",
|
18
|
+
"./cloudflare": "./src/aiModels/cloudflare.ts",
|
19
|
+
"./cohere": "./src/aiModels/cohere.ts",
|
20
|
+
"./deepseek": "./src/aiModels/deepseek.ts",
|
21
|
+
"./fal": "./src/aiModels/fal.ts",
|
22
|
+
"./fireworksai": "./src/aiModels/fireworksai.ts",
|
23
|
+
"./giteeai": "./src/aiModels/giteeai.ts",
|
24
|
+
"./github": "./src/aiModels/github.ts",
|
25
|
+
"./google": "./src/aiModels/google.ts",
|
26
|
+
"./groq": "./src/aiModels/groq.ts",
|
27
|
+
"./lobehub": "./src/aiModels/lobehub.ts",
|
28
|
+
"./higress": "./src/aiModels/higress.ts",
|
29
|
+
"./huggingface": "./src/aiModels/huggingface.ts",
|
30
|
+
"./hunyuan": "./src/aiModels/hunyuan.ts",
|
31
|
+
"./infiniai": "./src/aiModels/infiniai.ts",
|
32
|
+
"./internlm": "./src/aiModels/internlm.ts",
|
33
|
+
"./jina": "./src/aiModels/jina.ts",
|
34
|
+
"./lmstudio": "./src/aiModels/lmstudio.ts",
|
35
|
+
"./minimax": "./src/aiModels/minimax.ts",
|
36
|
+
"./mistral": "./src/aiModels/mistral.ts",
|
37
|
+
"./modelscope": "./src/aiModels/modelscope.ts",
|
38
|
+
"./moonshot": "./src/aiModels/moonshot.ts",
|
39
|
+
"./novita": "./src/aiModels/novita.ts",
|
40
|
+
"./nvidia": "./src/aiModels/nvidia.ts",
|
41
|
+
"./ollama": "./src/aiModels/ollama.ts",
|
42
|
+
"./openai": "./src/aiModels/openai.ts",
|
43
|
+
"./openrouter": "./src/aiModels/openrouter.ts",
|
44
|
+
"./perplexity": "./src/aiModels/perplexity.ts",
|
45
|
+
"./ppio": "./src/aiModels/ppio.ts",
|
46
|
+
"./qiniu": "./src/aiModels/qiniu.ts",
|
47
|
+
"./qwen": "./src/aiModels/qwen.ts",
|
48
|
+
"./sambanova": "./src/aiModels/sambanova.ts",
|
49
|
+
"./search1api": "./src/aiModels/search1api.ts",
|
50
|
+
"./sensenova": "./src/aiModels/sensenova.ts",
|
51
|
+
"./siliconcloud": "./src/aiModels/siliconcloud.ts",
|
52
|
+
"./spark": "./src/aiModels/spark.ts",
|
53
|
+
"./stepfun": "./src/aiModels/stepfun.ts",
|
54
|
+
"./taichu": "./src/aiModels/taichu.ts",
|
55
|
+
"./tencentcloud": "./src/aiModels/tencentcloud.ts",
|
56
|
+
"./togetherai": "./src/aiModels/togetherai.ts",
|
57
|
+
"./upstage": "./src/aiModels/upstage.ts",
|
58
|
+
"./v0": "./src/aiModels/v0.ts",
|
59
|
+
"./vertexai": "./src/aiModels/vertexai.ts",
|
60
|
+
"./vllm": "./src/aiModels/vllm.ts",
|
61
|
+
"./volcengine": "./src/aiModels/volcengine.ts",
|
62
|
+
"./wenxin": "./src/aiModels/wenxin.ts",
|
63
|
+
"./xai": "./src/aiModels/xai.ts",
|
64
|
+
"./xinference": "./src/aiModels/xinference.ts",
|
65
|
+
"./zeroone": "./src/aiModels/zeroone.ts",
|
66
|
+
"./zhipu": "./src/aiModels/zhipu.ts"
|
67
|
+
},
|
68
|
+
"scripts": {
|
69
|
+
"test": "vitest",
|
70
|
+
"test:coverage": "vitest --coverage"
|
71
|
+
},
|
72
|
+
"dependencies": {
|
73
|
+
"zod": "^3.25.76"
|
74
|
+
}
|
75
|
+
}
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { PRESET_ASPECT_RATIOS } from '@/const/image';
|
2
|
-
import { ModelParamsSchema } from '
|
2
|
+
import { ModelParamsSchema } from '../standard-parameters';
|
3
3
|
import { AIImageModelCard } from '@/types/aiModel';
|
4
4
|
|
5
5
|
// https://docs.bfl.ai/api-reference/tasks/edit-or-create-an-image-with-flux-kontext-pro
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { CHAT_MODEL_IMAGE_GENERATION_PARAMS } from '@/const/image';
|
2
|
-
import { ModelParamsSchema } from '
|
2
|
+
import { ModelParamsSchema } from '../standard-parameters';
|
3
3
|
import { AIChatModelCard, AIImageModelCard } from '@/types/aiModel';
|
4
4
|
|
5
5
|
const googleChatModels: AIChatModelCard[] = [
|
@@ -155,7 +155,7 @@ export { default as bfl } from './bfl';
|
|
155
155
|
export { default as cloudflare } from './cloudflare';
|
156
156
|
export { default as cohere } from './cohere';
|
157
157
|
export { default as deepseek } from './deepseek';
|
158
|
-
export { default as fal } from './fal';
|
158
|
+
export { default as fal, fluxSchnellParamsSchema } from './fal';
|
159
159
|
export { default as fireworksai } from './fireworksai';
|
160
160
|
export { default as giteeai } from './giteeai';
|
161
161
|
export { default as github } from './github';
|
@@ -176,7 +176,7 @@ export { default as moonshot } from './moonshot';
|
|
176
176
|
export { default as novita } from './novita';
|
177
177
|
export { default as nvidia } from './nvidia';
|
178
178
|
export { default as ollama } from './ollama';
|
179
|
-
export { default as openai } from './openai';
|
179
|
+
export { gptImage1ParamsSchema, default as openai, openaiChatModels } from './openai';
|
180
180
|
export { default as openrouter } from './openrouter';
|
181
181
|
export { default as perplexity } from './perplexity';
|
182
182
|
export { default as ppio } from './ppio';
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import { readFileSync, readdirSync } from 'node:fs';
|
2
|
+
import path from 'node:path';
|
3
|
+
import { describe, expect, it } from 'vitest';
|
4
|
+
|
5
|
+
// 本测试确保 packages/model-bank/package.json 的 exports 覆盖 src/aiModels 下的所有文件
|
6
|
+
describe('model-bank package.json exports should cover all aiModels files', () => {
|
7
|
+
const packageRoot = path.resolve(__dirname, '..');
|
8
|
+
const aiModelsDir = path.resolve(packageRoot, 'src/aiModels');
|
9
|
+
const packageJsonPath = path.resolve(packageRoot, 'package.json');
|
10
|
+
|
11
|
+
const packageJson = JSON.parse(readFileSync(packageJsonPath, 'utf-8')) as {
|
12
|
+
exports?: Record<string, string>;
|
13
|
+
};
|
14
|
+
|
15
|
+
const allModelFiles = readdirSync(aiModelsDir)
|
16
|
+
.filter((f) => f.endsWith('.ts'))
|
17
|
+
.map((f) => f.replace(/\.ts$/, ''))
|
18
|
+
// 排除非 provider 文件,如 index、类型声明等
|
19
|
+
.filter((name) => !['index'].includes(name));
|
20
|
+
|
21
|
+
it('every aiModels file should be exported in package.json.exports', () => {
|
22
|
+
const exportsMap = packageJson.exports ?? {};
|
23
|
+
|
24
|
+
const missing = allModelFiles.filter((name) => {
|
25
|
+
const key = `./${name}`;
|
26
|
+
const expectedPath = `./src/aiModels/${name}.ts`;
|
27
|
+
return !(key in exportsMap) || exportsMap[key] !== expectedPath;
|
28
|
+
});
|
29
|
+
|
30
|
+
if (missing.length > 0) {
|
31
|
+
// eslint-disable-next-line no-console
|
32
|
+
console.error('Missing exports for aiModels files:', missing);
|
33
|
+
}
|
34
|
+
|
35
|
+
expect(missing).toEqual([]);
|
36
|
+
});
|
37
|
+
});
|
@@ -38,7 +38,7 @@ export const LobeAi360AI = createOpenAICompatibleRuntime({
|
|
38
38
|
chatCompletion: () => process.env.DEBUG_AI360_CHAT_COMPLETION === '1',
|
39
39
|
},
|
40
40
|
models: async ({ client }) => {
|
41
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
41
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
42
42
|
|
43
43
|
const reasoningKeywords = ['360gpt2-o1', '360zhinao2-o1'];
|
44
44
|
|
@@ -12,10 +12,10 @@ import { buildAnthropicMessages, buildAnthropicTools } from '../utils/anthropicH
|
|
12
12
|
import { AgentRuntimeError } from '../utils/createError';
|
13
13
|
import { debugStream } from '../utils/debugStream';
|
14
14
|
import { desensitizeUrl } from '../utils/desensitizeUrl';
|
15
|
+
import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
|
15
16
|
import { StreamingResponse } from '../utils/response';
|
16
17
|
import { AnthropicStream } from '../utils/streams';
|
17
18
|
import { handleAnthropicError } from './handleAnthropicError';
|
18
|
-
import { processModelList, MODEL_LIST_CONFIGS } from '../utils/modelParse';
|
19
19
|
|
20
20
|
export interface AnthropicModelCard {
|
21
21
|
created_at: string;
|
@@ -120,7 +120,7 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
120
120
|
enabledSearch,
|
121
121
|
} = payload;
|
122
122
|
|
123
|
-
const {
|
123
|
+
const { anthropic: anthropicModels } = await import('model-bank');
|
124
124
|
const modelConfig = anthropicModels.find((m) => m.id === model);
|
125
125
|
const defaultMaxOutput = modelConfig?.maxOutput;
|
126
126
|
|
@@ -231,10 +231,10 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
231
231
|
const modelList: AnthropicModelCard[] = json['data'];
|
232
232
|
|
233
233
|
const standardModelList = modelList.map((model) => ({
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
234
|
+
created: model.created_at,
|
235
|
+
displayName: model.display_name,
|
236
|
+
id: model.id,
|
237
|
+
}));
|
238
238
|
return processModelList(standardModelList, MODEL_LIST_CONFIGS.anthropic, 'anthropic');
|
239
239
|
}
|
240
240
|
|
@@ -43,7 +43,7 @@ export const LobeBaichuanAI = createOpenAICompatibleRuntime({
|
|
43
43
|
chatCompletion: () => process.env.DEBUG_BAICHUAN_CHAT_COMPLETION === '1',
|
44
44
|
},
|
45
45
|
models: async ({ client }) => {
|
46
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
46
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
47
47
|
|
48
48
|
const modelsPage = (await client.models.list()) as any;
|
49
49
|
const modelList: BaichuanModelCard[] = modelsPage.data;
|
@@ -1,7 +1,6 @@
|
|
1
1
|
import { imageUrlToBase64 } from '@lobechat/utils';
|
2
2
|
import createDebug from 'debug';
|
3
|
-
|
4
|
-
import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/index';
|
3
|
+
import { RuntimeImageGenParamsValue } from 'model-bank';
|
5
4
|
|
6
5
|
import { AgentRuntimeErrorType } from '../error';
|
7
6
|
import { CreateImagePayload, CreateImageResponse } from '../types/image';
|
@@ -112,7 +112,7 @@ export class LobeCloudflareAI implements LobeRuntimeAI {
|
|
112
112
|
}
|
113
113
|
|
114
114
|
async models(): Promise<ChatModelCard[]> {
|
115
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
115
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
116
116
|
|
117
117
|
const url = `${DEFAULT_BASE_URL_PREFIX}/client/v4/accounts/${this.accountID}/ai/models/search`;
|
118
118
|
const response = await fetch(url, {
|
@@ -37,7 +37,7 @@ export const LobeCohereAI = createOpenAICompatibleRuntime({
|
|
37
37
|
chatCompletion: () => process.env.DEBUG_COHERE_CHAT_COMPLETION === '1',
|
38
38
|
},
|
39
39
|
models: async ({ client }) => {
|
40
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
40
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
41
41
|
|
42
42
|
client.baseURL = 'https://api.cohere.com/v1';
|
43
43
|
|
@@ -13,7 +13,7 @@ export const LobeDeepSeekAI = createOpenAICompatibleRuntime({
|
|
13
13
|
chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
|
14
14
|
},
|
15
15
|
models: async ({ client }) => {
|
16
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
16
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
17
17
|
|
18
18
|
const modelsPage = (await client.models.list()) as any;
|
19
19
|
const modelList: DeepSeekModelCard[] = modelsPage.data;
|
@@ -1,10 +1,9 @@
|
|
1
1
|
import { fal } from '@fal-ai/client';
|
2
2
|
import debug from 'debug';
|
3
3
|
import { pick } from 'lodash-es';
|
4
|
+
import { RuntimeImageGenParamsValue } from 'model-bank';
|
4
5
|
import { ClientOptions } from 'openai';
|
5
6
|
|
6
|
-
import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/index';
|
7
|
-
|
8
7
|
import { LobeRuntimeAI } from '../BaseAI';
|
9
8
|
import { AgentRuntimeErrorType } from '../error';
|
10
9
|
import { CreateImagePayload, CreateImageResponse } from '../types/image';
|
@@ -16,7 +16,7 @@ export const LobeFireworksAI = createOpenAICompatibleRuntime({
|
|
16
16
|
chatCompletion: () => process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION === '1',
|
17
17
|
},
|
18
18
|
models: async ({ client }) => {
|
19
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
19
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
20
20
|
|
21
21
|
const reasoningKeywords = ['deepseek-r1', 'qwq'];
|
22
22
|
|
@@ -31,7 +31,7 @@ export const LobeGroq = createOpenAICompatibleRuntime({
|
|
31
31
|
chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
|
32
32
|
},
|
33
33
|
models: async ({ client }) => {
|
34
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
34
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
35
35
|
|
36
36
|
const functionCallKeywords = [
|
37
37
|
'tool',
|
@@ -27,7 +27,7 @@ export const LobeHigressAI = createOpenAICompatibleRuntime({
|
|
27
27
|
chatCompletion: () => process.env.DEBUG_HIGRESS_CHAT_COMPLETION === '1',
|
28
28
|
},
|
29
29
|
models: async ({ client }) => {
|
30
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
30
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
31
31
|
|
32
32
|
const modelsPage = (await client.models.list()) as any;
|
33
33
|
const modelList: HigressModelCard[] = modelsPage.data;
|
@@ -55,7 +55,7 @@ export const LobeHuggingFaceAI = createOpenAICompatibleRuntime({
|
|
55
55
|
chatCompletion: () => process.env.DEBUG_HUGGINGFACE_CHAT_COMPLETION === '1',
|
56
56
|
},
|
57
57
|
models: async () => {
|
58
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
58
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
59
59
|
|
60
60
|
const visionKeywords = ['image-text-to-text', 'multimodal', 'vision'];
|
61
61
|
|
@@ -41,7 +41,7 @@ export const LobeHunyuanAI = createOpenAICompatibleRuntime({
|
|
41
41
|
chatCompletion: () => process.env.DEBUG_HUNYUAN_CHAT_COMPLETION === '1',
|
42
42
|
},
|
43
43
|
models: async ({ client }) => {
|
44
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
44
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
45
45
|
|
46
46
|
const functionCallKeywords = ['hunyuan-functioncall', 'hunyuan-turbo', 'hunyuan-pro'];
|
47
47
|
|
@@ -42,7 +42,7 @@ export const LobeInfiniAI = createOpenAICompatibleRuntime({
|
|
42
42
|
chatCompletion: () => process.env.DEBUG_INFINIAI_CHAT_COMPLETION === '1',
|
43
43
|
},
|
44
44
|
models: async ({ client }) => {
|
45
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
45
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
46
46
|
|
47
47
|
const reasoningKeywords = ['deepseek-r1', 'qwq', 'qwen3'];
|
48
48
|
const visionKeywords = ['qwen2.5-vl'];
|
@@ -21,7 +21,7 @@ export const LobeInternLMAI = createOpenAICompatibleRuntime({
|
|
21
21
|
chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
|
22
22
|
},
|
23
23
|
models: async ({ client }) => {
|
24
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
24
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
25
25
|
|
26
26
|
const functionCallKeywords = ['internlm'];
|
27
27
|
|
@@ -13,7 +13,7 @@ export const LobeJinaAI = createOpenAICompatibleRuntime({
|
|
13
13
|
chatCompletion: () => process.env.DEBUG_JINA_CHAT_COMPLETION === '1',
|
14
14
|
},
|
15
15
|
models: async ({ client }) => {
|
16
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
16
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
17
17
|
|
18
18
|
const reasoningKeywords = ['deepsearch'];
|
19
19
|
|
@@ -14,7 +14,7 @@ export const LobeLMStudioAI = createOpenAICompatibleRuntime({
|
|
14
14
|
chatCompletion: () => process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION === '1',
|
15
15
|
},
|
16
16
|
models: async ({ client }) => {
|
17
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
17
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
18
18
|
|
19
19
|
const modelsPage = (await client.models.list()) as any;
|
20
20
|
const modelList: LMStudioModelCard[] = modelsPage.data;
|
@@ -34,7 +34,7 @@ export const LobeMistralAI = createOpenAICompatibleRuntime({
|
|
34
34
|
chatCompletion: () => process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1',
|
35
35
|
},
|
36
36
|
models: async ({ client }) => {
|
37
|
-
const { LOBE_DEFAULT_MODEL_LIST } = await import('
|
37
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
|
38
38
|
|
39
39
|
const modelsPage = (await client.models.list()) as any;
|
40
40
|
const modelList: MistralModelCard[] = modelsPage.data;
|