@lobehub/chat 1.91.1 โ†’ 1.91.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/.eslintrc.js +2 -0
  2. package/CHANGELOG.md +58 -0
  3. package/changelog/v1.json +21 -0
  4. package/package.json +2 -2
  5. package/src/app/(backend)/middleware/auth/utils.ts +2 -1
  6. package/src/app/[variants]/(main)/profile/features/ClerkProfile.tsx +1 -4
  7. package/src/config/aiModels/modelscope.ts +4 -1
  8. package/src/config/aiModels/novita.ts +2 -0
  9. package/src/config/aiModels/openrouter.ts +2 -0
  10. package/src/config/aiModels/siliconcloud.ts +1 -0
  11. package/src/config/modelProviders/anthropic.ts +30 -11
  12. package/src/config/modelProviders/openai.ts +14 -0
  13. package/src/layout/AuthProvider/Clerk/useAppearance.ts +1 -4
  14. package/src/libs/model-runtime/google/index.ts +30 -40
  15. package/src/libs/model-runtime/novita/__snapshots__/index.test.ts.snap +19 -1
  16. package/src/libs/model-runtime/novita/index.ts +14 -15
  17. package/src/libs/model-runtime/nvidia/index.ts +2 -21
  18. package/src/libs/model-runtime/openai/__snapshots__/index.test.ts.snap +39 -11
  19. package/src/libs/model-runtime/openai/index.ts +3 -38
  20. package/src/libs/model-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -0
  21. package/src/libs/model-runtime/openrouter/index.ts +45 -54
  22. package/src/libs/model-runtime/qwen/index.ts +2 -45
  23. package/src/libs/model-runtime/siliconcloud/index.ts +2 -51
  24. package/src/libs/model-runtime/utils/modelParse.test.ts +761 -0
  25. package/src/libs/model-runtime/utils/modelParse.ts +186 -0
  26. package/src/libs/model-runtime/volcengine/index.ts +11 -0
  27. package/src/libs/model-runtime/zeroone/index.ts +2 -23
  28. package/src/libs/model-runtime/zhipu/index.ts +7 -34
  29. package/src/app/[variants]/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/index.tsx +0 -0
package/.eslintrc.js CHANGED
@@ -19,6 +19,8 @@ config.rules['unicorn/no-array-for-each'] = 0;
19
19
  config.rules['unicorn/prefer-number-properties'] = 0;
20
20
  config.rules['unicorn/prefer-query-selector'] = 0;
21
21
  config.rules['unicorn/no-array-callback-reference'] = 0;
22
+ // FIXME: Linting error in src/app/[variants]/(main)/chat/features/Migration/DBReader.ts, the fundamental solution should be upgrading typescript-eslint
23
+ config.rules['@typescript-eslint/no-useless-constructor'] = 0;
22
24
 
23
25
  config.overrides = [
24
26
  {
package/CHANGELOG.md CHANGED
@@ -2,6 +2,64 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.91.3](https://github.com/lobehub/lobe-chat/compare/v1.91.2...v1.91.3)
6
+
7
+ <sup>Released on **2025-06-05**</sup>
8
+
9
+ #### ๐Ÿ› Bug Fixes
10
+
11
+ - **misc**: Correct deepseek R1 fc support display.
12
+
13
+ #### ๐Ÿ’„ Styles
14
+
15
+ - **misc**: Add openAI websearch and claude 4 to modelproviders.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Correct deepseek R1 fc support display, closes [#8069](https://github.com/lobehub/lobe-chat/issues/8069) ([ed5bb5f](https://github.com/lobehub/lobe-chat/commit/ed5bb5f))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Add openAI websearch and claude 4 to modelproviders, closes [#7988](https://github.com/lobehub/lobe-chat/issues/7988) ([95994f4](https://github.com/lobehub/lobe-chat/commit/95994f4))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
38
+ ### [Version 1.91.2](https://github.com/lobehub/lobe-chat/compare/v1.91.1...v1.91.2)
39
+
40
+ <sup>Released on **2025-06-05**</sup>
41
+
42
+ #### ๐Ÿ’„ Styles
43
+
44
+ - **misc**: Add Volcengine & OpenAI-like Provider (e.g. oneapi) model fetch support.
45
+
46
+ <br/>
47
+
48
+ <details>
49
+ <summary><kbd>Improvements and Fixes</kbd></summary>
50
+
51
+ #### Styles
52
+
53
+ - **misc**: Add Volcengine & OpenAI-like Provider (e.g. oneapi) model fetch support, closes [#8064](https://github.com/lobehub/lobe-chat/issues/8064) ([d3dafe1](https://github.com/lobehub/lobe-chat/commit/d3dafe1))
54
+
55
+ </details>
56
+
57
+ <div align="right">
58
+
59
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
60
+
61
+ </div>
62
+
5
63
  ### [Version 1.91.1](https://github.com/lobehub/lobe-chat/compare/v1.91.0...v1.91.1)
6
64
 
7
65
  <sup>Released on **2025-06-04**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,25 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Correct deepseek R1 fc support display."
6
+ ],
7
+ "improvements": [
8
+ "Add openAI websearch and claude 4 to modelproviders."
9
+ ]
10
+ },
11
+ "date": "2025-06-05",
12
+ "version": "1.91.3"
13
+ },
14
+ {
15
+ "children": {
16
+ "improvements": [
17
+ "Add Volcengine & OpenAI-like Provider (e.g. oneapi) model fetch support."
18
+ ]
19
+ },
20
+ "date": "2025-06-05",
21
+ "version": "1.91.2"
22
+ },
2
23
  {
3
24
  "children": {
4
25
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.91.1",
3
+ "version": "1.91.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -130,7 +130,7 @@
130
130
  "@azure/core-auth": "^1.9.0",
131
131
  "@cfworker/json-schema": "^4.1.1",
132
132
  "@clerk/localizations": "^3.16.3",
133
- "@clerk/nextjs": "^6.20.2",
133
+ "@clerk/nextjs": "^6.21.0",
134
134
  "@clerk/themes": "^2.2.48",
135
135
  "@codesandbox/sandpack-react": "^2.20.0",
136
136
  "@cyntler/react-doc-viewer": "^1.17.0",
@@ -28,7 +28,8 @@ export const checkAuthMethod = ({
28
28
  // clerk auth handler
29
29
  if (enableClerk) {
30
30
  // if there is no userId, means the use is not login, just throw error
31
- if (!clerkAuth?.userId) throw AgentRuntimeError.createError(ChatErrorType.InvalidClerkUser);
31
+ if (!(clerkAuth as any)?.userId)
32
+ throw AgentRuntimeError.createError(ChatErrorType.InvalidClerkUser);
32
33
  // if the user is login, just return
33
34
  else return;
34
35
  }
@@ -50,10 +50,7 @@ export const useStyles = createStyles(
50
50
  scrollBox: css`
51
51
  background: transparent;
52
52
  `,
53
- }) as Partial<{
54
- // eslint-disable-next-line unused-imports/no-unused-vars
55
- [k in keyof ElementsConfig]: any;
56
- }>,
53
+ }) as Partial<Record<keyof ElementsConfig, any>>,
57
54
  );
58
55
 
59
56
  const Client = memo<{ mobile?: boolean }>(({ mobile }) => {
@@ -4,9 +4,11 @@ const modelscopeChatModels: AIChatModelCard[] = [
4
4
  {
5
5
  abilities: {
6
6
  functionCall: true,
7
+ reasoning: true,
7
8
  },
8
9
  contextWindowTokens: 131_072,
9
- description: 'DeepSeek R1 ้€š่ฟ‡ๅˆฉ็”จๅขžๅŠ ็š„่ฎก็ฎ—่ต„ๆบๅ’ŒๅœจๅŽ่ฎญ็ปƒ่ฟ‡็จ‹ไธญๅผ•ๅ…ฅ็ฎ—ๆณ•ไผ˜ๅŒ–ๆœบๅˆถ๏ผŒๆ˜พ่‘—ๆ้ซ˜ไบ†ๅ…ถๆŽจ็†ๅ’ŒๆŽจๆ–ญ่ƒฝๅŠ›็š„ๆทฑๅบฆใ€‚่ฏฅๆจกๅž‹ๅœจๅ„็งๅŸบๅ‡†่ฏ„ไผฐไธญ่กจ็Žฐๅ‡บ่‰ฒ๏ผŒๅŒ…ๆ‹ฌๆ•ฐๅญฆใ€็ผ–็จ‹ๅ’Œไธ€่ˆฌ้€ป่พ‘ๆ–น้ขใ€‚ๅ…ถๆ•ดไฝ“ๆ€ง่ƒฝ็ŽฐๅทฒๆŽฅ่ฟ‘้ข†ๅ…ˆๆจกๅž‹๏ผŒๅฆ‚ O3 ๅ’Œ Gemini 2.5 Proใ€‚',
10
+ description:
11
+ 'DeepSeek R1 ้€š่ฟ‡ๅˆฉ็”จๅขžๅŠ ็š„่ฎก็ฎ—่ต„ๆบๅ’ŒๅœจๅŽ่ฎญ็ปƒ่ฟ‡็จ‹ไธญๅผ•ๅ…ฅ็ฎ—ๆณ•ไผ˜ๅŒ–ๆœบๅˆถ๏ผŒๆ˜พ่‘—ๆ้ซ˜ไบ†ๅ…ถๆŽจ็†ๅ’ŒๆŽจๆ–ญ่ƒฝๅŠ›็š„ๆทฑๅบฆใ€‚่ฏฅๆจกๅž‹ๅœจๅ„็งๅŸบๅ‡†่ฏ„ไผฐไธญ่กจ็Žฐๅ‡บ่‰ฒ๏ผŒๅŒ…ๆ‹ฌๆ•ฐๅญฆใ€็ผ–็จ‹ๅ’Œไธ€่ˆฌ้€ป่พ‘ๆ–น้ขใ€‚ๅ…ถๆ•ดไฝ“ๆ€ง่ƒฝ็ŽฐๅทฒๆŽฅ่ฟ‘้ข†ๅ…ˆๆจกๅž‹๏ผŒๅฆ‚ O3 ๅ’Œ Gemini 2.5 Proใ€‚',
10
12
  displayName: 'DeepSeek-R1-0528',
11
13
  enabled: true,
12
14
  id: 'deepseek-ai/DeepSeek-R1-0528',
@@ -26,6 +28,7 @@ const modelscopeChatModels: AIChatModelCard[] = [
26
28
  {
27
29
  abilities: {
28
30
  functionCall: true,
31
+ reasoning: true,
29
32
  },
30
33
  contextWindowTokens: 131_072,
31
34
  description: 'DeepSeek-R1ๆ˜ฏDeepSeekๆœ€ๆ–ฐ็š„ๆŽจ็†ๆจกๅž‹๏ผŒไธ“ๆณจไบŽๅคๆ‚ๆŽจ็†ไปปๅŠกใ€‚',
@@ -227,6 +227,7 @@ const novitaChatModels: AIChatModelCard[] = [
227
227
  },
228
228
  {
229
229
  abilities: {
230
+ functionCall: true,
230
231
  reasoning: true,
231
232
  },
232
233
  contextWindowTokens: 128_000,
@@ -241,6 +242,7 @@ const novitaChatModels: AIChatModelCard[] = [
241
242
  },
242
243
  {
243
244
  abilities: {
245
+ functionCall: true,
244
246
  reasoning: true,
245
247
  },
246
248
  contextWindowTokens: 128_000,
@@ -501,6 +501,7 @@ const openrouterChatModels: AIChatModelCard[] = [
501
501
  },
502
502
  {
503
503
  abilities: {
504
+ functionCall: true,
504
505
  reasoning: true,
505
506
  },
506
507
  contextWindowTokens: 163_840,
@@ -517,6 +518,7 @@ const openrouterChatModels: AIChatModelCard[] = [
517
518
  },
518
519
  {
519
520
  abilities: {
521
+ functionCall: true,
520
522
  reasoning: true,
521
523
  },
522
524
  contextWindowTokens: 163_840,
@@ -215,6 +215,7 @@ const siliconcloudChatModels: AIChatModelCard[] = [
215
215
  },
216
216
  {
217
217
  abilities: {
218
+ functionCall: true,
218
219
  reasoning: true,
219
220
  },
220
221
  contextWindowTokens: 131_072,
@@ -6,25 +6,44 @@ const Anthropic: ModelProviderCard = {
6
6
  {
7
7
  contextWindowTokens: 200_000,
8
8
  description:
9
- 'Claude 3.7 sonnet ๆ˜ฏ Anthropic ๆœ€ๅฟซ็š„ไธ‹ไธ€ไปฃๆจกๅž‹ใ€‚ไธŽ Claude 3 Haiku ็›ธๆฏ”๏ผŒClaude 3.7 Sonnet ๅœจๅ„้กนๆŠ€่ƒฝไธŠ้ƒฝๆœ‰ๆ‰€ๆๅ‡๏ผŒๅนถๅœจ่ฎธๅคšๆ™บๅŠ›ๅŸบๅ‡†ๆต‹่ฏ•ไธญ่ถ…่ถŠไบ†ไธŠไธ€ไปฃๆœ€ๅคง็š„ๆจกๅž‹ Claude 3 Opusใ€‚',
10
- displayName: 'Claude 3.7 Sonnet',
9
+ 'Claude 4 Opus ๆ˜ฏ Anthropic ๆœ€ๅผบๅคง็š„ไธ‹ไธ€ไปฃๆจกๅž‹๏ผŒๅ…ทๆœ‰ๅ“่ถŠ็š„ๆŽจ็†่ƒฝๅŠ›ๅ’Œๅˆ›้€ ๅŠ›๏ผŒ้€‚็”จไบŽๆœ€ๅคๆ‚็š„ไปปๅŠกๅ’Œ้ซ˜็บงๅˆ†ๆžใ€‚',
10
+ displayName: 'Claude 4 Opus',
11
11
  enabled: true,
12
12
  functionCall: true,
13
- id: 'claude-3-7-sonnet-20250219',
14
- maxOutput: 8192,
13
+ id: 'claude-opus-4-20250514',
14
+ maxOutput: 32_000,
15
15
  pricing: {
16
- cachedInput: 0.1,
17
- input: 1,
18
- output: 5,
19
- writeCacheInput: 1.25,
16
+ cachedInput: 7.5,
17
+ input: 30,
18
+ output: 150,
19
+ writeCacheInput: 37.5,
20
20
  },
21
- releasedAt: '2025-02-24',
21
+ releasedAt: '2025-05-14',
22
+ vision: true,
23
+ },
24
+ {
25
+ contextWindowTokens: 200_000,
26
+ description:
27
+ 'Claude 4 Sonnet ๆไพ›ไบ†ไผ˜ๅผ‚็š„ๆ€ง่ƒฝๅ’Œ้€Ÿๅบฆๅนณ่กก๏ผŒๆ˜ฏๆ–ฐไธ€ไปฃๆจกๅž‹ไธญ็š„็†ๆƒณ้€‰ๆ‹ฉ๏ผŒ้€‚็”จไบŽๅนฟๆณ›็š„ไผไธšๅ’Œๅˆ›ๆ„ไปปๅŠกใ€‚',
28
+ displayName: 'Claude 4 Sonnet',
29
+ enabled: true,
30
+ functionCall: true,
31
+ id: 'claude-sonnet-4-20250514',
32
+ maxOutput: 64_000,
33
+ pricing: {
34
+ cachedInput: 1.5,
35
+ input: 6,
36
+ output: 30,
37
+ writeCacheInput: 7.5,
38
+ },
39
+ releasedAt: '2025-05-14',
40
+ vision: true,
22
41
  },
23
42
  {
24
43
  contextWindowTokens: 200_000,
25
44
  description:
26
- 'Claude 3.7 sonnet Extended thinking ๆ˜ฏ Anthropic ๆœ€ๅฟซ็š„ไธ‹ไธ€ไปฃๆจกๅž‹ใ€‚ไธŽ Claude 3 Haiku ็›ธๆฏ”๏ผŒClaude 3.7 Sonnet ๅœจๅ„้กนๆŠ€่ƒฝไธŠ้ƒฝๆœ‰ๆ‰€ๆๅ‡๏ผŒๅนถๅœจ่ฎธๅคšๆ™บๅŠ›ๅŸบๅ‡†ๆต‹่ฏ•ไธญ่ถ…่ถŠไบ†ไธŠไธ€ไปฃๆœ€ๅคง็š„ๆจกๅž‹ Claude 3 Opusใ€‚',
27
- displayName: 'Claude 3.7 Sonnet Extended thinking',
45
+ 'Claude 3.7 sonnet ๆ˜ฏ Anthropic ๆœ€ๅฟซ็š„ไธ‹ไธ€ไปฃๆจกๅž‹ใ€‚ไธŽ Claude 3 Haiku ็›ธๆฏ”๏ผŒClaude 3.7 Sonnet ๅœจๅ„้กนๆŠ€่ƒฝไธŠ้ƒฝๆœ‰ๆ‰€ๆๅ‡๏ผŒๅนถๅœจ่ฎธๅคšๆ™บๅŠ›ๅŸบๅ‡†ๆต‹่ฏ•ไธญ่ถ…่ถŠไบ†ไธŠไธ€ไปฃๆœ€ๅคง็š„ๆจกๅž‹ Claude 3 Opusใ€‚',
46
+ displayName: 'Claude 3.7 Sonnet',
28
47
  enabled: true,
29
48
  functionCall: true,
30
49
  id: 'claude-3-7-sonnet-20250219',
@@ -78,6 +78,20 @@ const OpenAI: ModelProviderCard = {
78
78
  },
79
79
  vision: true,
80
80
  },
81
+ {
82
+ contextWindowTokens: 128_000,
83
+ description: 'GPT-4o mini search preview ๆ˜ฏไธ€ไธชไธ“้—จไธบๆœ็ดขๅŠŸ่ƒฝไผ˜ๅŒ–็š„้ข„่งˆ็‰ˆๆœฌ๏ผŒๅ…ทๆœ‰ๅขžๅผบ็š„็ฝ‘็ปœๆœ็ดข่ƒฝๅŠ›ๅ’Œๅฎžๆ—ถไฟกๆฏๆฃ€็ดขๅŠŸ่ƒฝใ€‚',
84
+ displayName: 'GPT-4o mini Search Preview',
85
+ functionCall: true,
86
+ id: 'gpt-4o-mini-search-preview',
87
+ maxOutput: 16_384,
88
+ pricing: {
89
+ input: 0.15,
90
+ output: 0.6,
91
+ },
92
+ releasedAt: '2024-12-01',
93
+ vision: true,
94
+ },
81
95
  {
82
96
  contextWindowTokens: 128_000,
83
97
  description:
@@ -89,10 +89,7 @@ export const useStyles = createStyles(
89
89
  socialButtonsBlockButton__google: css`
90
90
  order: -1;
91
91
  `,
92
- }) as Partial<{
93
- // eslint-disable-next-line unused-imports/no-unused-vars
94
- [k in keyof ElementsConfig]: any;
95
- }>,
92
+ }) as Partial<Record<keyof ElementsConfig, any>>,
96
93
  );
97
94
 
98
95
  export const useAppearance = () => {
@@ -10,7 +10,6 @@ import {
10
10
  SchemaType,
11
11
  } from '@google/generative-ai';
12
12
 
13
- import type { ChatModelCard } from '@/types/llm';
14
13
  import { imageUrlToBase64 } from '@/utils/imageToBase64';
15
14
  import { safeParseJSON } from '@/utils/safeParseJSON';
16
15
 
@@ -206,47 +205,38 @@ export class LobeGoogleAI implements LobeRuntimeAI {
206
205
  }
207
206
 
208
207
  async models() {
209
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
210
-
211
- const url = `${this.baseURL}/v1beta/models?key=${this.apiKey}`;
212
- const response = await fetch(url, {
213
- method: 'GET',
214
- });
215
- const json = await response.json();
216
-
217
- const modelList: GoogleModelCard[] = json['models'];
218
-
219
- return modelList
220
- .map((model) => {
221
- const modelName = model.name.replace(/^models\//, '');
222
-
223
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
224
- (m) => modelName.toLowerCase() === m.id.toLowerCase(),
225
- );
226
-
208
+ try {
209
+ const url = `${this.baseURL}/v1beta/models?key=${this.apiKey}`;
210
+ const response = await fetch(url, {
211
+ method: 'GET',
212
+ });
213
+
214
+ if (!response.ok) {
215
+ throw new Error(`HTTP error! status: ${response.status}`);
216
+ }
217
+
218
+ const json = await response.json();
219
+
220
+ const modelList: GoogleModelCard[] = json.models;
221
+
222
+ const processedModels = modelList.map((model) => {
223
+ const id = model.name.replace(/^models\//, '');
224
+
227
225
  return {
228
- contextWindowTokens: model.inputTokenLimit + model.outputTokenLimit,
229
- displayName: model.displayName,
230
- enabled: knownModel?.enabled || false,
231
- functionCall:
232
- (modelName.toLowerCase().includes('gemini') &&
233
- !modelName.toLowerCase().includes('thinking')) ||
234
- knownModel?.abilities?.functionCall ||
235
- false,
236
- id: modelName,
237
- reasoning:
238
- modelName.toLowerCase().includes('thinking') ||
239
- knownModel?.abilities?.reasoning ||
240
- false,
241
- vision:
242
- modelName.toLowerCase().includes('vision') ||
243
- (modelName.toLowerCase().includes('gemini') &&
244
- !modelName.toLowerCase().includes('gemini-1.0')) ||
245
- knownModel?.abilities?.vision ||
246
- false,
226
+ contextWindowTokens: (model.inputTokenLimit || 0) + (model.outputTokenLimit || 0),
227
+ displayName: model.displayName || id,
228
+ id,
229
+ maxOutput: model.outputTokenLimit || undefined,
247
230
  };
248
- })
249
- .filter(Boolean) as ChatModelCard[];
231
+ });
232
+
233
+ const { MODEL_LIST_CONFIGS, processModelList } = await import('../utils/modelParse');
234
+
235
+ return processModelList(processedModels, MODEL_LIST_CONFIGS.google);
236
+ } catch (error) {
237
+ console.error('Failed to fetch Google models:', error);
238
+ throw error;
239
+ }
250
240
  }
251
241
 
252
242
  private buildPayload(payload: ChatStreamPayload) {
@@ -9,6 +9,7 @@ exports[`NovitaAI > models > should get models 1`] = `
9
9
  "enabled": false,
10
10
  "functionCall": false,
11
11
  "id": "meta-llama/llama-3-8b-instruct",
12
+ "maxOutput": undefined,
12
13
  "reasoning": false,
13
14
  "vision": false,
14
15
  },
@@ -19,6 +20,7 @@ exports[`NovitaAI > models > should get models 1`] = `
19
20
  "enabled": false,
20
21
  "functionCall": false,
21
22
  "id": "meta-llama/llama-3-70b-instruct",
23
+ "maxOutput": undefined,
22
24
  "reasoning": false,
23
25
  "vision": false,
24
26
  },
@@ -29,6 +31,7 @@ exports[`NovitaAI > models > should get models 1`] = `
29
31
  "enabled": false,
30
32
  "functionCall": false,
31
33
  "id": "meta-llama/llama-3.1-8b-instruct",
34
+ "maxOutput": undefined,
32
35
  "reasoning": false,
33
36
  "vision": false,
34
37
  },
@@ -39,6 +42,7 @@ exports[`NovitaAI > models > should get models 1`] = `
39
42
  "enabled": true,
40
43
  "functionCall": false,
41
44
  "id": "meta-llama/llama-3.1-70b-instruct",
45
+ "maxOutput": undefined,
42
46
  "reasoning": false,
43
47
  "vision": false,
44
48
  },
@@ -49,6 +53,7 @@ exports[`NovitaAI > models > should get models 1`] = `
49
53
  "enabled": false,
50
54
  "functionCall": false,
51
55
  "id": "meta-llama/llama-3.1-405b-instruct",
56
+ "maxOutput": undefined,
52
57
  "reasoning": false,
53
58
  "vision": false,
54
59
  },
@@ -60,6 +65,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
60
65
  "enabled": false,
61
66
  "functionCall": false,
62
67
  "id": "google/gemma-2-9b-it",
68
+ "maxOutput": undefined,
63
69
  "reasoning": false,
64
70
  "vision": false,
65
71
  },
@@ -70,6 +76,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
70
76
  "enabled": false,
71
77
  "functionCall": false,
72
78
  "id": "jondurbin/airoboros-l2-70b",
79
+ "maxOutput": undefined,
73
80
  "reasoning": false,
74
81
  "vision": false,
75
82
  },
@@ -80,6 +87,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
80
87
  "enabled": false,
81
88
  "functionCall": true,
82
89
  "id": "nousresearch/hermes-2-pro-llama-3-8b",
90
+ "maxOutput": undefined,
83
91
  "reasoning": false,
84
92
  "vision": false,
85
93
  },
@@ -90,6 +98,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
90
98
  "enabled": false,
91
99
  "functionCall": false,
92
100
  "id": "mistralai/mistral-7b-instruct",
101
+ "maxOutput": undefined,
93
102
  "reasoning": false,
94
103
  "vision": false,
95
104
  },
@@ -100,6 +109,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
100
109
  "enabled": false,
101
110
  "functionCall": false,
102
111
  "id": "cognitivecomputations/dolphin-mixtral-8x22b",
112
+ "maxOutput": undefined,
103
113
  "reasoning": false,
104
114
  "vision": false,
105
115
  },
@@ -110,7 +120,8 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
110
120
  "enabled": false,
111
121
  "functionCall": false,
112
122
  "id": "sao10k/l3-70b-euryale-v2.1",
113
- "reasoning": false,
123
+ "maxOutput": undefined,
124
+ "reasoning": true,
114
125
  "vision": false,
115
126
  },
116
127
  {
@@ -120,6 +131,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
120
131
  "enabled": false,
121
132
  "functionCall": false,
122
133
  "id": "sophosympatheia/midnight-rose-70b",
134
+ "maxOutput": undefined,
123
135
  "reasoning": false,
124
136
  "vision": false,
125
137
  },
@@ -130,6 +142,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
130
142
  "enabled": false,
131
143
  "functionCall": false,
132
144
  "id": "gryphe/mythomax-l2-13b",
145
+ "maxOutput": undefined,
133
146
  "reasoning": false,
134
147
  "vision": false,
135
148
  },
@@ -140,6 +153,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
140
153
  "enabled": false,
141
154
  "functionCall": false,
142
155
  "id": "nousresearch/nous-hermes-llama2-13b",
156
+ "maxOutput": undefined,
143
157
  "reasoning": false,
144
158
  "vision": false,
145
159
  },
@@ -150,6 +164,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
150
164
  "enabled": false,
151
165
  "functionCall": false,
152
166
  "id": "Nous-Hermes-2-Mixtral-8x7B-DPO",
167
+ "maxOutput": undefined,
153
168
  "reasoning": false,
154
169
  "vision": false,
155
170
  },
@@ -160,6 +175,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
160
175
  "enabled": false,
161
176
  "functionCall": false,
162
177
  "id": "lzlv_70b",
178
+ "maxOutput": undefined,
163
179
  "reasoning": false,
164
180
  "vision": false,
165
181
  },
@@ -170,6 +186,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
170
186
  "enabled": false,
171
187
  "functionCall": false,
172
188
  "id": "teknium/openhermes-2.5-mistral-7b",
189
+ "maxOutput": undefined,
173
190
  "reasoning": false,
174
191
  "vision": false,
175
192
  },
@@ -180,6 +197,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
180
197
  "enabled": false,
181
198
  "functionCall": false,
182
199
  "id": "microsoft/wizardlm-2-8x22b",
200
+ "maxOutput": undefined,
183
201
  "reasoning": false,
184
202
  "vision": false,
185
203
  },
@@ -1,6 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
5
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
6
  import { NovitaModelCard } from './type';
6
7
 
@@ -15,38 +16,36 @@ export const LobeNovitaAI = createOpenAICompatibleRuntime({
15
16
  chatCompletion: () => process.env.DEBUG_NOVITA_CHAT_COMPLETION === '1',
16
17
  },
17
18
  models: async ({ client }) => {
18
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
19
-
20
19
  const reasoningKeywords = ['deepseek-r1'];
21
20
 
22
21
  const modelsPage = (await client.models.list()) as any;
23
22
  const modelList: NovitaModelCard[] = modelsPage.data;
24
23
 
25
- return modelList
26
- .map((model) => {
27
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
28
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
29
- );
24
+ // ่งฃๆžๆจกๅž‹่ƒฝๅŠ›
25
+ const baseModels = await processMultiProviderModelList(modelList);
26
+
27
+ // ๅˆๅนถ Novita ่Žทๅ–็š„ๆจกๅž‹ไฟกๆฏ
28
+ return baseModels
29
+ .map((baseModel) => {
30
+ const model = modelList.find((m) => m.id === baseModel.id);
31
+
32
+ if (!model) return baseModel;
30
33
 
31
34
  return {
35
+ ...baseModel,
32
36
  contextWindowTokens: model.context_size,
33
37
  description: model.description,
34
38
  displayName: model.title,
35
- enabled: knownModel?.enabled || false,
36
39
  functionCall:
40
+ baseModel.functionCall ||
37
41
  model.description.toLowerCase().includes('function calling') ||
38
- knownModel?.abilities?.functionCall ||
39
42
  false,
40
- id: model.id,
41
43
  reasoning:
44
+ baseModel.reasoning ||
42
45
  model.description.toLowerCase().includes('reasoning task') ||
43
46
  reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
44
- knownModel?.abilities?.reasoning ||
45
- false,
46
- vision:
47
- model.description.toLowerCase().includes('vision') ||
48
- knownModel?.abilities?.vision ||
49
47
  false,
48
+ vision: baseModel.vision || model.description.toLowerCase().includes('vision') || false,
50
49
  };
51
50
  })
52
51
  .filter(Boolean) as ChatModelCard[];
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface NvidiaModelCard {
@@ -13,28 +12,10 @@ export const LobeNvidiaAI = createOpenAICompatibleRuntime({
13
12
  chatCompletion: () => process.env.DEBUG_NVIDIA_CHAT_COMPLETION === '1',
14
13
  },
15
14
  models: async ({ client }) => {
16
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
-
18
15
  const modelsPage = (await client.models.list()) as any;
19
16
  const modelList: NvidiaModelCard[] = modelsPage.data;
20
17
 
21
- return modelList
22
- .map((model) => {
23
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
24
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
25
- );
26
-
27
- return {
28
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
29
- displayName: knownModel?.displayName ?? undefined,
30
- enabled: knownModel?.enabled || false,
31
- functionCall: knownModel?.abilities?.functionCall || false,
32
- id: model.id,
33
- reasoning: knownModel?.abilities?.reasoning || false,
34
- vision: knownModel?.abilities?.vision || false,
35
- };
36
- })
37
- .filter(Boolean) as ChatModelCard[];
18
+ return processMultiProviderModelList(modelList);
38
19
  },
39
20
  provider: ModelProvider.Nvidia,
40
21
  });