@lobehub/chat 1.111.9 → 1.111.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/development/state-management/state-management-intro.mdx +2 -2
  4. package/docs/development/state-management/state-management-intro.zh-CN.mdx +2 -2
  5. package/package.json +1 -1
  6. package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +9 -1
  7. package/packages/model-runtime/src/ai302/index.ts +1 -1
  8. package/packages/model-runtime/src/aihubmix/index.ts +28 -71
  9. package/packages/model-runtime/src/anthropic/index.ts +6 -26
  10. package/packages/model-runtime/src/giteeai/index.ts +2 -37
  11. package/packages/model-runtime/src/github/index.ts +33 -44
  12. package/packages/model-runtime/src/modelscope/index.ts +2 -38
  13. package/packages/model-runtime/src/moonshot/index.ts +2 -36
  14. package/packages/model-runtime/src/novita/__snapshots__/index.test.ts.snap +40 -22
  15. package/packages/model-runtime/src/novita/index.ts +1 -32
  16. package/packages/model-runtime/src/nvidia/index.ts +1 -1
  17. package/packages/model-runtime/src/openai/__snapshots__/index.test.ts.snap +63 -7
  18. package/packages/model-runtime/src/openai/index.ts +1 -1
  19. package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +6 -21
  20. package/packages/model-runtime/src/openrouter/index.ts +29 -37
  21. package/packages/model-runtime/src/qiniu/index.ts +3 -27
  22. package/packages/model-runtime/src/qwen/index.ts +1 -1
  23. package/packages/model-runtime/src/siliconcloud/index.ts +1 -1
  24. package/packages/model-runtime/src/utils/modelParse.test.ts +6 -6
  25. package/packages/model-runtime/src/utils/modelParse.ts +238 -40
  26. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +18 -0
  27. package/packages/model-runtime/src/utils/streams/openai/openai.ts +12 -0
  28. package/packages/model-runtime/src/v0/index.ts +2 -2
  29. package/packages/model-runtime/src/volcengine/index.ts +1 -1
  30. package/packages/model-runtime/src/xai/index.ts +2 -24
  31. package/packages/model-runtime/src/zhipu/index.ts +1 -1
  32. package/src/components/Thinking/index.tsx +1 -1
  33. package/src/config/aiModels/aihubmix.ts +1 -7
  34. package/src/config/aiModels/anthropic.ts +24 -4
  35. package/src/config/aiModels/fal.ts +20 -3
  36. package/src/config/aiModels/google.ts +60 -6
  37. package/src/config/aiModels/groq.ts +4 -21
  38. package/src/config/aiModels/hunyuan.ts +1 -1
  39. package/src/config/aiModels/mistral.ts +22 -5
  40. package/src/config/aiModels/moonshot.ts +20 -0
  41. package/src/config/aiModels/openai.ts +0 -43
  42. package/src/config/aiModels/qwen.ts +113 -3
  43. package/src/config/aiModels/sensenova.ts +6 -6
  44. package/src/config/aiModels/siliconcloud.ts +80 -0
  45. package/src/config/aiModels/stepfun.ts +38 -4
  46. package/src/config/aiModels/zhipu.ts +33 -8
  47. package/src/config/modelProviders/aihubmix.ts +1 -1
  48. package/src/config/modelProviders/mistral.ts +1 -0
  49. package/src/config/modelProviders/openai.ts +1 -1
  50. package/src/config/modelProviders/qwen.ts +1 -1
  51. package/src/config/modelProviders/v0.ts +1 -0
  52. package/src/config/modelProviders/volcengine.ts +1 -0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.111.11](https://github.com/lobehub/lobe-chat/compare/v1.111.10...v1.111.11)
6
+
7
+ <sup>Released on **2025-08-13**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update Mistral AI models & Optimize many model providers fetching.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update Mistral AI models & Optimize many model providers fetching, closes [#8644](https://github.com/lobehub/lobe-chat/issues/8644) ([1d466e5](https://github.com/lobehub/lobe-chat/commit/1d466e5))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.111.10](https://github.com/lobehub/lobe-chat/compare/v1.111.9...v1.111.10)
31
+
32
+ <sup>Released on **2025-08-12**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Adjust near bottom size on thinking scroll.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Adjust near bottom size on thinking scroll, closes [#8772](https://github.com/lobehub/lobe-chat/issues/8772) ([1fae490](https://github.com/lobehub/lobe-chat/commit/1fae490))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.111.9](https://github.com/lobehub/lobe-chat/compare/v1.111.8...v1.111.9)
6
56
 
7
57
  <sup>Released on **2025-08-12**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Update Mistral AI models & Optimize many model providers fetching."
6
+ ]
7
+ },
8
+ "date": "2025-08-13",
9
+ "version": "1.111.11"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Adjust near bottom size on thinking scroll."
15
+ ]
16
+ },
17
+ "date": "2025-08-12",
18
+ "version": "1.111.10"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
@@ -1,5 +1,3 @@
1
- {/* eslint-disable no-irregular-whitespace */}
2
-
3
1
  # Best Practices for State Management
4
2
 
5
3
  LobeChat differs from traditional CRUD web applications in that it involves a large amount of rich interactive capabilities. Therefore, it is crucial to design a data flow architecture that is easy to develop and maintain. This document will introduce the best practices for data flow management in LobeChat.
@@ -111,6 +109,8 @@ Based on the provided directory structure of LobeChat SessionStore, we can updat
111
109
 
112
110
  In the LobeChat application, session management is a complex functional module, so we use the Slice pattern to organize the data flow. Below is the directory structure of LobeChat SessionStore, where each directory and file has its specific purpose:
113
111
 
112
+ {/* eslint-disable no-irregular-whitespace */}
113
+
114
114
  ```bash
115
115
  src/store/session
116
116
  ├── helpers.ts # Helper functions
@@ -1,5 +1,3 @@
1
- {/* eslint-disable no-irregular-whitespace */}
2
-
3
1
  # 状态管理最佳实践
4
2
 
5
3
  LobeChat 不同于传统 CRUD 的网页,存在大量的富交互能力,如何设计一个易于开发与易于维护的数据流架构非常重要。本篇文档将介绍 LobeChat 中的数据流管理最佳实践。
@@ -111,6 +109,8 @@ LobeChat SessionStore
111
109
 
112
110
  在 LobeChat 应用中,由于会话管理是一个复杂的功能模块,因此我们采用了 [slice 模式](https://github.com/pmndrs/zustand/blob/main/docs/guides/slices-pattern.md) 来组织数据流。下面是 LobeChat SessionStore 的目录结构,其中每个目录和文件都有其特定的用途:
113
111
 
112
+ {/* eslint-disable no-irregular-whitespace */}
113
+
114
114
  ```fish
115
115
  src/store/session
116
116
  ├── index.ts # SessionStore 的聚合导出文件
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.111.9",
3
+ "version": "1.111.11",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -114,6 +114,7 @@ export const createRouterRuntime = ({
114
114
  id,
115
115
  routers,
116
116
  apiKey: DEFAULT_API_LEY,
117
+ models,
117
118
  ...params
118
119
  }: CreateRouterRuntimeOptions) => {
119
120
  return class UniformRuntime implements LobeRuntimeAI {
@@ -201,7 +202,14 @@ export const createRouterRuntime = ({
201
202
  }
202
203
 
203
204
  async models() {
204
- return this._runtimes[0].runtime.models?.();
205
+ if (models && typeof models === 'function') {
206
+ // 如果是函数式配置,使用最后一个 runtime 的 client 调用函数
207
+ const lastRuntime = this._runtimes.at(-1)?.runtime;
208
+ if (lastRuntime && 'client' in lastRuntime) {
209
+ return await models({ client: (lastRuntime as any).client });
210
+ }
211
+ }
212
+ return this._runtimes.at(-1)?.runtime.models?.();
205
213
  }
206
214
 
207
215
  async embeddings(payload: EmbeddingsPayload, options?: EmbeddingsOptions) {
@@ -40,7 +40,7 @@ export const Lobe302AI = createOpenAICompatibleRuntime({
40
40
  const modelsPage = (await client.models.list()) as any;
41
41
  const modelList: Ai302ModelCard[] = modelsPage.data;
42
42
 
43
- return processMultiProviderModelList(modelList);
43
+ return processMultiProviderModelList(modelList, 'ai302');
44
44
  },
45
45
  provider: ModelProvider.Ai302,
46
46
  });
@@ -1,10 +1,12 @@
1
1
  import urlJoin from 'url-join';
2
2
 
3
- import AiHubMixModels from '@/config/aiModels/aihubmix';
4
- import type { ChatModelCard } from '@/types/llm';
3
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import { responsesAPIModels } from '@/const/models';
5
5
 
6
6
  import { createRouterRuntime } from '../RouterRuntime';
7
7
  import { ModelProvider } from '../types';
8
+ import { ChatStreamPayload } from '../types/chat';
9
+ import { detectModelProvider, processMultiProviderModelList } from '../utils/modelParse';
8
10
 
9
11
  export interface AiHubMixModelCard {
10
12
  created: number;
@@ -15,6 +17,17 @@ export interface AiHubMixModelCard {
15
17
 
16
18
  const baseURL = 'https://aihubmix.com';
17
19
 
20
+ const handlePayload = (payload: ChatStreamPayload) => {
21
+ if (
22
+ responsesAPIModels.has(payload.model) ||
23
+ payload.model.includes('gpt-') ||
24
+ /^o\d/.test(payload.model)
25
+ ) {
26
+ return { ...payload, apiMode: 'responses' } as any;
27
+ }
28
+ return payload as any;
29
+ };
30
+
18
31
  export const LobeAiHubMixAI = createRouterRuntime({
19
32
  debug: {
20
33
  chatCompletion: () => process.env.DEBUG_AIHUBMIX_CHAT_COMPLETION === '1',
@@ -24,68 +37,11 @@ export const LobeAiHubMixAI = createRouterRuntime({
24
37
  },
25
38
  id: ModelProvider.AiHubMix,
26
39
  models: async ({ client }) => {
27
- const functionCallKeywords = [
28
- 'gpt-4',
29
- 'gpt-3.5',
30
- 'claude',
31
- 'gemini',
32
- 'qwen',
33
- 'deepseek',
34
- 'llama',
35
- ];
36
-
37
- const visionKeywords = [
38
- 'gpt-4o',
39
- 'gpt-4-vision',
40
- 'claude-3',
41
- 'claude-4',
42
- 'gemini-pro-vision',
43
- 'qwen-vl',
44
- 'llava',
45
- ];
46
-
47
- const reasoningKeywords = [
48
- 'o1',
49
- 'deepseek-r1',
50
- 'qwq',
51
- 'claude-opus-4',
52
- 'claude-sonnet-4',
53
- 'claude-3-5-sonnet',
54
- 'claude-3-5-haiku',
55
- ];
56
-
57
40
  try {
58
41
  const modelsPage = (await client.models.list()) as any;
59
42
  const modelList: AiHubMixModelCard[] = modelsPage.data || [];
60
43
 
61
- return modelList
62
- .map((model) => {
63
- const knownModel = AiHubMixModels.find(
64
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
65
- );
66
-
67
- const modelId = model.id.toLowerCase();
68
-
69
- return {
70
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
71
- displayName: knownModel?.displayName ?? model.id,
72
- enabled: knownModel?.enabled || false,
73
- functionCall:
74
- functionCallKeywords.some((keyword) => modelId.includes(keyword)) ||
75
- knownModel?.abilities?.functionCall ||
76
- false,
77
- id: model.id,
78
- reasoning:
79
- reasoningKeywords.some((keyword) => modelId.includes(keyword)) ||
80
- knownModel?.abilities?.reasoning ||
81
- false,
82
- vision:
83
- visionKeywords.some((keyword) => modelId.includes(keyword)) ||
84
- knownModel?.abilities?.vision ||
85
- false,
86
- };
87
- })
88
- .filter(Boolean) as ChatModelCard[];
44
+ return await processMultiProviderModelList(modelList, 'aihubmix');
89
45
  } catch (error) {
90
46
  console.warn(
91
47
  'Failed to fetch AiHubMix models. Please ensure your AiHubMix API key is valid:',
@@ -97,25 +53,26 @@ export const LobeAiHubMixAI = createRouterRuntime({
97
53
  routers: [
98
54
  {
99
55
  apiType: 'anthropic',
100
- models: async () => {
101
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
102
- return LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
103
- (id) => id.startsWith('claude') || id.startsWith('kimi-k2'),
104
- );
105
- },
56
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
57
+ (id) => detectModelProvider(id) === 'anthropic',
58
+ ),
106
59
  options: { baseURL },
107
60
  },
108
61
  {
109
62
  apiType: 'google',
110
- models: async () => {
111
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
112
- return LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter((id) => id.startsWith('gemini'));
113
- },
63
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
64
+ (id) => detectModelProvider(id) === 'google',
65
+ ),
114
66
  options: { baseURL: urlJoin(baseURL, '/gemini') },
115
67
  },
116
68
  {
117
69
  apiType: 'openai',
118
- options: { baseURL: urlJoin(baseURL, '/v1') },
70
+ options: {
71
+ baseURL: urlJoin(baseURL, '/v1'),
72
+ chatCompletion: {
73
+ handlePayload,
74
+ },
75
+ },
119
76
  },
120
77
  ],
121
78
  });
@@ -1,7 +1,5 @@
1
1
  import Anthropic, { ClientOptions } from '@anthropic-ai/sdk';
2
2
 
3
- import type { ChatModelCard } from '@/types/llm';
4
-
5
3
  import { LobeRuntimeAI } from '../BaseAI';
6
4
  import { AgentRuntimeErrorType } from '../error';
7
5
  import {
@@ -17,8 +15,10 @@ import { desensitizeUrl } from '../utils/desensitizeUrl';
17
15
  import { StreamingResponse } from '../utils/response';
18
16
  import { AnthropicStream } from '../utils/streams';
19
17
  import { handleAnthropicError } from './handleAnthropicError';
18
+ import { processModelList, MODEL_LIST_CONFIGS } from '../utils/modelParse';
20
19
 
21
20
  export interface AnthropicModelCard {
21
+ created_at: string;
22
22
  display_name: string;
23
23
  id: string;
24
24
  }
@@ -218,8 +218,6 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
218
218
  }
219
219
 
220
220
  async models() {
221
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
222
-
223
221
  const url = `${this.baseURL}/v1/models`;
224
222
  const response = await fetch(url, {
225
223
  headers: {
@@ -232,30 +230,12 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
232
230
 
233
231
  const modelList: AnthropicModelCard[] = json['data'];
234
232
 
235
- return modelList
236
- .map((model) => {
237
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
238
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
239
- );
240
-
241
- return {
242
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
233
+ const standardModelList = modelList.map((model) => ({
234
+ created: model.created_at,
243
235
  displayName: model.display_name,
244
- enabled: knownModel?.enabled || false,
245
- functionCall:
246
- model.id.toLowerCase().includes('claude-3') ||
247
- knownModel?.abilities?.functionCall ||
248
- false,
249
236
  id: model.id,
250
- reasoning: knownModel?.abilities?.reasoning || false,
251
- vision:
252
- (model.id.toLowerCase().includes('claude-3') &&
253
- !model.id.toLowerCase().includes('claude-3-5-haiku')) ||
254
- knownModel?.abilities?.vision ||
255
- false,
256
- };
257
- })
258
- .filter(Boolean) as ChatModelCard[];
237
+ }));
238
+ return processModelList(standardModelList, MODEL_LIST_CONFIGS.anthropic, 'anthropic');
259
239
  }
260
240
 
261
241
  private handleError(error: any): ChatCompletionErrorPayload {
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface GiteeAIModelCard {
@@ -13,44 +12,10 @@ export const LobeGiteeAI = createOpenAICompatibleRuntime({
13
12
  chatCompletion: () => process.env.DEBUG_GITEE_AI_CHAT_COMPLETION === '1',
14
13
  },
15
14
  models: async ({ client }) => {
16
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
-
18
- const functionCallKeywords = ['qwen2.5', 'glm-4'];
19
-
20
- const visionKeywords = ['internvl', 'qwen2-vl'];
21
-
22
- const reasoningKeywords = ['deepseek-r1', 'qwq'];
23
-
24
15
  const modelsPage = (await client.models.list()) as any;
25
16
  const modelList: GiteeAIModelCard[] = modelsPage.data;
26
17
 
27
- return modelList
28
- .map((model) => {
29
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
30
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
31
- );
32
-
33
- return {
34
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
35
- displayName: knownModel?.displayName ?? undefined,
36
- enabled: knownModel?.enabled || false,
37
- functionCall:
38
- (functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
39
- !model.id.toLowerCase().includes('qwen2.5-coder')) ||
40
- knownModel?.abilities?.functionCall ||
41
- false,
42
- id: model.id,
43
- reasoning:
44
- reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
45
- knownModel?.abilities?.reasoning ||
46
- false,
47
- vision:
48
- visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
49
- knownModel?.abilities?.vision ||
50
- false,
51
- };
52
- })
53
- .filter(Boolean) as ChatModelCard[];
18
+ return await processMultiProviderModelList(modelList, 'giteeai');
54
19
  },
55
20
  provider: ModelProvider.GiteeAI,
56
21
  });
@@ -1,17 +1,26 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { AgentRuntimeErrorType } from '../error';
4
2
  import { ModelProvider } from '../types';
3
+ import { processMultiProviderModelList } from '../utils/modelParse';
5
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
5
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
7
6
 
8
7
  export interface GithubModelCard {
9
- description: string;
10
- friendly_name: string;
8
+ capabilities: string[];
9
+ html_url: string;
11
10
  id: string;
11
+ limits: {
12
+ max_input_tokens: number;
13
+ max_output_tokens: number;
14
+ };
12
15
  name: string;
16
+ publisher: string;
17
+ rate_limit_tier: string;
18
+ registry: string;
19
+ summary: string;
20
+ supported_input_modalities: string[];
21
+ supported_output_modalities: string[];
13
22
  tags: string[];
14
- task: string;
23
+ version: string;
15
24
  }
16
25
 
17
26
  /* eslint-enable typescript-sort-keys/interface */
@@ -40,47 +49,27 @@ export const LobeGithubAI = createOpenAICompatibleRuntime({
40
49
  bizError: AgentRuntimeErrorType.ProviderBizError,
41
50
  invalidAPIKey: AgentRuntimeErrorType.InvalidGithubToken,
42
51
  },
43
- models: async ({ client }) => {
44
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
45
-
46
- const functionCallKeywords = ['function', 'tool'];
47
-
48
- const visionKeywords = ['vision'];
49
-
50
- const reasoningKeywords = ['deepseek-r1', 'o1', 'o3', 'grok-3-mini'];
51
-
52
- const modelsPage = (await client.models.list()) as any;
53
- const modelList: GithubModelCard[] = modelsPage.body;
52
+ models: async () => {
53
+ const response = await fetch('https://models.github.ai/catalog/models');
54
+ const modelList: GithubModelCard[] = await response.json();
54
55
 
55
- return modelList
56
- .map((model) => {
57
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
58
- (m) => model.name.toLowerCase() === m.id.toLowerCase(),
59
- );
56
+ const formattedModels = modelList.map((model) => ({
57
+ contextWindowTokens: model.limits?.max_input_tokens + model.limits?.max_output_tokens,
58
+ description: model.summary,
59
+ displayName: model.name,
60
+ functionCall: model.capabilities?.includes('tool-calling') ?? undefined,
61
+ id: model.id,
62
+ maxOutput: model.limits?.max_output_tokens ?? undefined,
63
+ reasoning: model.tags?.includes('reasoning') ?? undefined,
64
+ releasedAt:
65
+ model.version && /^\d{4}-\d{2}-\d{2}$/.test(model.version) ? model.version : undefined,
66
+ vision:
67
+ (model.tags?.includes('multimodal') ||
68
+ model.supported_input_modalities?.includes('image')) ??
69
+ undefined,
70
+ }));
60
71
 
61
- return {
62
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
63
- description: model.description,
64
- displayName: model.friendly_name,
65
- enabled: knownModel?.enabled || false,
66
- functionCall:
67
- functionCallKeywords.some((keyword) =>
68
- model.description.toLowerCase().includes(keyword),
69
- ) ||
70
- knownModel?.abilities?.functionCall ||
71
- false,
72
- id: model.name,
73
- reasoning:
74
- reasoningKeywords.some((keyword) => model.name.toLowerCase().includes(keyword)) ||
75
- knownModel?.abilities?.reasoning ||
76
- false,
77
- vision:
78
- visionKeywords.some((keyword) => model.description.toLowerCase().includes(keyword)) ||
79
- knownModel?.abilities?.vision ||
80
- false,
81
- };
82
- })
83
- .filter(Boolean) as ChatModelCard[];
72
+ return await processMultiProviderModelList(formattedModels, 'github');
84
73
  },
85
74
  provider: ModelProvider.Github,
86
75
  });
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface ModelScopeModelCard {
@@ -16,46 +15,11 @@ export const LobeModelScopeAI = createOpenAICompatibleRuntime({
16
15
  chatCompletion: () => process.env.DEBUG_MODELSCOPE_CHAT_COMPLETION === '1',
17
16
  },
18
17
  models: async ({ client }) => {
19
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
20
-
21
- const functionCallKeywords = ['qwen', 'deepseek', 'llama'];
22
-
23
- const visionKeywords = ['qwen-vl', 'qwen2-vl', 'llava'];
24
-
25
- const reasoningKeywords = ['qwq', 'deepseek-r1'];
26
-
27
18
  try {
28
19
  const modelsPage = (await client.models.list()) as any;
29
20
  const modelList: ModelScopeModelCard[] = modelsPage.data || [];
30
21
 
31
- return modelList
32
- .map((model) => {
33
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
34
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
35
- );
36
-
37
- const modelId = model.id.toLowerCase();
38
-
39
- return {
40
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
41
- displayName: knownModel?.displayName ?? model.id,
42
- enabled: knownModel?.enabled || false,
43
- functionCall:
44
- functionCallKeywords.some((keyword) => modelId.includes(keyword)) ||
45
- knownModel?.abilities?.functionCall ||
46
- false,
47
- id: model.id,
48
- reasoning:
49
- reasoningKeywords.some((keyword) => modelId.includes(keyword)) ||
50
- knownModel?.abilities?.reasoning ||
51
- false,
52
- vision:
53
- visionKeywords.some((keyword) => modelId.includes(keyword)) ||
54
- knownModel?.abilities?.vision ||
55
- false,
56
- };
57
- })
58
- .filter(Boolean) as ChatModelCard[];
22
+ return await processMultiProviderModelList(modelList, 'modelscope');
59
23
  } catch (error) {
60
24
  console.warn(
61
25
  'Failed to fetch ModelScope models. Please ensure your ModelScope API key is valid and your Alibaba Cloud account is properly bound:',
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ChatStreamPayload, ModelProvider } from '../types';
2
+ import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface MoonshotModelCard {
@@ -45,43 +44,10 @@ export const LobeMoonshotAI = createOpenAICompatibleRuntime({
45
44
  chatCompletion: () => process.env.DEBUG_MOONSHOT_CHAT_COMPLETION === '1',
46
45
  },
47
46
  models: async ({ client }) => {
48
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
49
-
50
- const functionCallKeywords = ['moonshot-v1', 'kimi-latest'];
51
-
52
- const visionKeywords = ['kimi-latest', 'kimi-thinking', 'vision'];
53
-
54
- const reasoningKeywords = ['thinking'];
55
-
56
47
  const modelsPage = (await client.models.list()) as any;
57
48
  const modelList: MoonshotModelCard[] = modelsPage.data;
58
49
 
59
- return modelList
60
- .map((model) => {
61
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
62
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
63
- );
64
-
65
- return {
66
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
67
- displayName: knownModel?.displayName ?? undefined,
68
- enabled: knownModel?.enabled || false,
69
- functionCall:
70
- functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
71
- knownModel?.abilities?.functionCall ||
72
- false,
73
- id: model.id,
74
- reasoning:
75
- reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
76
- knownModel?.abilities?.reasoning ||
77
- false,
78
- vision:
79
- visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
80
- knownModel?.abilities?.vision ||
81
- false,
82
- };
83
- })
84
- .filter(Boolean) as ChatModelCard[];
50
+ return processModelList(modelList, MODEL_LIST_CONFIGS.moonshot, 'moonshot');
85
51
  },
86
52
  provider: ModelProvider.Moonshot,
87
53
  });