@lobehub/chat 1.53.11 → 1.53.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/package.json +1 -1
  4. package/src/config/aiModels/spark.ts +9 -0
  5. package/src/libs/agent-runtime/ai360/index.ts +37 -21
  6. package/src/libs/agent-runtime/anthropic/index.ts +17 -5
  7. package/src/libs/agent-runtime/baichuan/index.ts +11 -2
  8. package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
  9. package/src/libs/agent-runtime/deepseek/index.ts +29 -13
  10. package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
  11. package/src/libs/agent-runtime/giteeai/index.ts +46 -30
  12. package/src/libs/agent-runtime/github/index.test.ts +0 -49
  13. package/src/libs/agent-runtime/github/index.ts +18 -6
  14. package/src/libs/agent-runtime/google/index.ts +17 -7
  15. package/src/libs/agent-runtime/groq/index.ts +43 -27
  16. package/src/libs/agent-runtime/higress/index.ts +45 -25
  17. package/src/libs/agent-runtime/huggingface/index.ts +20 -9
  18. package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
  19. package/src/libs/agent-runtime/internlm/index.ts +27 -12
  20. package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
  21. package/src/libs/agent-runtime/mistral/index.ts +24 -14
  22. package/src/libs/agent-runtime/moonshot/index.ts +28 -13
  23. package/src/libs/agent-runtime/novita/index.ts +35 -18
  24. package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
  25. package/src/libs/agent-runtime/ollama/index.ts +33 -5
  26. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
  27. package/src/libs/agent-runtime/openai/index.ts +43 -27
  28. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
  29. package/src/libs/agent-runtime/openrouter/index.ts +51 -33
  30. package/src/libs/agent-runtime/qwen/index.ts +45 -29
  31. package/src/libs/agent-runtime/sensenova/index.ts +24 -6
  32. package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
  33. package/src/libs/agent-runtime/stepfun/index.ts +42 -26
  34. package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
  35. package/src/libs/agent-runtime/togetherai/index.ts +19 -6
  36. package/src/libs/agent-runtime/xai/index.ts +28 -13
  37. package/src/libs/agent-runtime/zeroone/index.ts +29 -13
  38. package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
  39. package/src/libs/agent-runtime/zhipu/index.ts +18 -6
  40. package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
  41. package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -8,7 +8,6 @@ import {
8
8
  SchemaType,
9
9
  } from '@google/generative-ai';
10
10
 
11
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
12
11
  import type { ChatModelCard } from '@/types/llm';
13
12
  import { imageUrlToBase64 } from '@/utils/imageToBase64';
14
13
  import { safeParseJSON } from '@/utils/safeParseJSON';
@@ -137,6 +136,8 @@ export class LobeGoogleAI implements LobeRuntimeAI {
137
136
  }
138
137
 
139
138
  async models() {
139
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
140
+
140
141
  const url = `${this.baseURL}/v1beta/models?key=${this.apiKey}`;
141
142
  const response = await fetch(url, {
142
143
  method: 'GET',
@@ -149,17 +150,26 @@ export class LobeGoogleAI implements LobeRuntimeAI {
149
150
  .map((model) => {
150
151
  const modelName = model.name.replace(/^models\//, '');
151
152
 
153
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => modelName.toLowerCase() === m.id.toLowerCase());
154
+
152
155
  return {
153
156
  contextWindowTokens: model.inputTokenLimit + model.outputTokenLimit,
154
157
  displayName: model.displayName,
155
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => modelName === m.id)?.enabled || false,
156
- functionCall: modelName.toLowerCase().includes('gemini'),
158
+ enabled: knownModel?.enabled || false,
159
+ functionCall:
160
+ modelName.toLowerCase().includes('gemini') && !modelName.toLowerCase().includes('thinking')
161
+ || knownModel?.abilities?.functionCall
162
+ || false,
157
163
  id: modelName,
158
- reasoning: modelName.toLowerCase().includes('thinking'),
164
+ reasoning:
165
+ modelName.toLowerCase().includes('thinking')
166
+ || knownModel?.abilities?.reasoning
167
+ || false,
159
168
  vision:
160
- modelName.toLowerCase().includes('vision') ||
161
- (modelName.toLowerCase().includes('gemini') &&
162
- !modelName.toLowerCase().includes('gemini-1.0')),
169
+ modelName.toLowerCase().includes('vision')
170
+ || (modelName.toLowerCase().includes('gemini') && !modelName.toLowerCase().includes('gemini-1.0'))
171
+ || knownModel?.abilities?.vision
172
+ || false,
163
173
  };
164
174
  })
165
175
  .filter(Boolean) as ChatModelCard[];
@@ -2,7 +2,7 @@ import { AgentRuntimeErrorType } from '../error';
2
2
  import { ModelProvider } from '../types';
3
3
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  export interface GroqModelCard {
8
8
  context_window: number;
@@ -31,33 +31,49 @@ export const LobeGroq = LobeOpenAICompatibleFactory({
31
31
  debug: {
32
32
  chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
33
33
  },
34
- models: {
35
- transformModel: (m) => {
36
- const functionCallKeywords = [
37
- 'tool',
38
- 'llama-3.3',
39
- 'llama-3.1',
40
- 'llama3-',
41
- 'mixtral-8x7b-32768',
42
- 'gemma2-9b-it',
43
- ];
44
-
45
- const reasoningKeywords = [
46
- 'deepseek-r1',
47
- ];
48
-
49
- const model = m as unknown as GroqModelCard;
34
+ models: async ({ client }) => {
35
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
50
36
 
51
- return {
52
- contextWindowTokens: model.context_window,
53
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
54
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
55
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
56
- id: model.id,
57
- reasoning: reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
58
- vision: model.id.toLowerCase().includes('vision'),
59
- };
60
- },
37
+ const functionCallKeywords = [
38
+ 'tool',
39
+ 'llama-3.3',
40
+ 'llama-3.1',
41
+ 'llama3-',
42
+ 'mixtral-8x7b-32768',
43
+ 'gemma2-9b-it',
44
+ ];
45
+
46
+ const reasoningKeywords = [
47
+ 'deepseek-r1',
48
+ ];
49
+
50
+ const modelsPage = await client.models.list() as any;
51
+ const modelList: GroqModelCard[] = modelsPage.data;
52
+
53
+ return modelList
54
+ .map((model) => {
55
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
56
+
57
+ return {
58
+ contextWindowTokens: model.context_window,
59
+ displayName: knownModel?.displayName ?? undefined,
60
+ enabled: knownModel?.enabled || false,
61
+ functionCall:
62
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
63
+ || knownModel?.abilities?.functionCall
64
+ || false,
65
+ id: model.id,
66
+ reasoning:
67
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
68
+ || knownModel?.abilities?.reasoning
69
+ || false,
70
+ vision:
71
+ model.id.toLowerCase().includes('vision')
72
+ || knownModel?.abilities?.vision
73
+ || false,
74
+ };
75
+ })
76
+ .filter(Boolean) as ChatModelCard[];
61
77
  },
62
78
  provider: ModelProvider.Groq,
63
79
  });
@@ -1,11 +1,19 @@
1
1
  import { uniqueId } from 'lodash-es';
2
2
 
3
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
4
-
5
3
  import { ModelProvider } from '../types';
6
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
7
5
 
8
- // import { OpenRouterModelCard } from './type';
6
+ import type { ChatModelCard } from '@/types/llm';
7
+
8
+ export interface HigressModelCard {
9
+ context_length: number;
10
+ description: string;
11
+ id: string;
12
+ name: string;
13
+ top_provider: {
14
+ max_completion_tokens: number;
15
+ }
16
+ }
9
17
 
10
18
  export const LobeHigressAI = LobeOpenAICompatibleFactory({
11
19
  constructorOptions: {
@@ -18,29 +26,41 @@ export const LobeHigressAI = LobeOpenAICompatibleFactory({
18
26
  debug: {
19
27
  chatCompletion: () => process.env.DEBUG_HIGRESS_CHAT_COMPLETION === '1',
20
28
  },
21
- models: {
22
- transformModel: (m) => {
23
- const model = m as any;
29
+ models: async ({ client }) => {
30
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
24
31
 
25
- return {
26
- contextWindowTokens: model.context_length,
27
- description: model.description,
28
- displayName: model.name,
29
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
30
- functionCall:
31
- model.description.includes('function calling') || model.description.includes('tools'),
32
- id: model.id,
33
- maxTokens:
34
- typeof model.top_provider.max_completion_tokens === 'number'
35
- ? model.top_provider.max_completion_tokens
36
- : undefined,
37
- reasoning: model.description.includes('reasoning'),
38
- vision:
39
- model.description.includes('vision') ||
40
- model.description.includes('multimodal') ||
41
- model.id.includes('vision'),
42
- };
43
- },
32
+ const modelsPage = await client.models.list() as any;
33
+ const modelList: HigressModelCard[] = modelsPage.data;
34
+
35
+ return modelList
36
+ .map((model) => {
37
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
38
+
39
+ return {
40
+ contextWindowTokens: model.context_length,
41
+ description: model.description,
42
+ displayName: model.name,
43
+ enabled: knownModel?.enabled || false,
44
+ functionCall:
45
+ model.description.includes('function calling')
46
+ || model.description.includes('tools')
47
+ || knownModel?.abilities?.functionCall
48
+ || false,
49
+ id: model.id,
50
+ maxTokens: model.top_provider.max_completion_tokens,
51
+ reasoning:
52
+ model.description.includes('reasoning')
53
+ || knownModel?.abilities?.reasoning
54
+ || false,
55
+ vision:
56
+ model.description.includes('vision')
57
+ || model.description.includes('multimodal')
58
+ || model.id.includes('vision')
59
+ || knownModel?.abilities?.vision
60
+ || false,
61
+ };
62
+ })
63
+ .filter(Boolean) as ChatModelCard[];
44
64
  },
45
65
  provider: ModelProvider.Higress,
46
66
  });
@@ -6,7 +6,6 @@ import { ModelProvider } from '../types';
6
6
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
7
7
  import { convertIterableToStream } from '../utils/streams';
8
8
 
9
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
10
9
  import type { ChatModelCard } from '@/types/llm';
11
10
 
12
11
  export interface HuggingFaceModelCard {
@@ -56,6 +55,8 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
56
55
  chatCompletion: () => process.env.DEBUG_HUGGINGFACE_CHAT_COMPLETION === '1',
57
56
  },
58
57
  models: async () => {
58
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
59
+
59
60
  const visionKeywords = [
60
61
  'image-text-to-text',
61
62
  'multimodal',
@@ -79,16 +80,26 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
79
80
 
80
81
  return modelList
81
82
  .map((model) => {
83
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
84
+
82
85
  return {
83
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
84
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
85
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
86
- functionCall: model.tags.some(tag => tag.toLowerCase().includes('function-calling')),
86
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
87
+ displayName: knownModel?.displayName ?? undefined,
88
+ enabled: knownModel?.enabled || false,
89
+ functionCall:
90
+ model.tags.some(tag => tag.toLowerCase().includes('function-calling'))
91
+ || knownModel?.abilities?.functionCall
92
+ || false,
87
93
  id: model.id,
88
- reasoning: model.tags.some(tag => tag.toLowerCase().includes('reasoning')) || reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
89
- vision: model.tags.some(tag =>
90
- visionKeywords.some(keyword => tag.toLowerCase().includes(keyword))
91
- ),
94
+ reasoning:
95
+ model.tags.some(tag => tag.toLowerCase().includes('reasoning'))
96
+ || reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
97
+ || knownModel?.abilities?.reasoning
98
+ || false,
99
+ vision:
100
+ model.tags.some(tag => visionKeywords.some(keyword => tag.toLowerCase().includes(keyword)))
101
+ || knownModel?.abilities?.vision
102
+ || false,
92
103
  };
93
104
  })
94
105
  .filter(Boolean) as ChatModelCard[];
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface HunyuanModelCard {
7
7
  id: string;
@@ -12,25 +12,41 @@ export const LobeHunyuanAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_HUNYUAN_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const functionCallKeywords = [
18
- 'hunyuan-functioncall',
19
- 'hunyuan-turbo',
20
- 'hunyuan-pro',
21
- ];
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
22
17
 
23
- const model = m as unknown as HunyuanModelCard;
18
+ const functionCallKeywords = [
19
+ 'hunyuan-functioncall',
20
+ 'hunyuan-turbo',
21
+ 'hunyuan-pro',
22
+ ];
24
23
 
25
- return {
26
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
27
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
28
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
29
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('vision'),
30
- id: model.id,
31
- vision: model.id.toLowerCase().includes('vision'),
32
- };
33
- },
24
+ const modelsPage = await client.models.list() as any;
25
+ const modelList: HunyuanModelCard[] = modelsPage.data;
26
+
27
+ return modelList
28
+ .map((model) => {
29
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
30
+
31
+ return {
32
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
33
+ displayName: knownModel?.displayName ?? undefined,
34
+ enabled: knownModel?.enabled || false,
35
+ functionCall:
36
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('vision')
37
+ || knownModel?.abilities?.functionCall
38
+ || false,
39
+ id: model.id,
40
+ reasoning:
41
+ knownModel?.abilities?.reasoning
42
+ || false,
43
+ vision:
44
+ model.id.toLowerCase().includes('vision')
45
+ || knownModel?.abilities?.vision
46
+ || false,
47
+ };
48
+ })
49
+ .filter(Boolean) as ChatModelCard[];
34
50
  },
35
51
  provider: ModelProvider.Hunyuan,
36
52
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface InternLMModelCard {
7
7
  id: string;
@@ -20,18 +20,33 @@ export const LobeInternLMAI = LobeOpenAICompatibleFactory({
20
20
  debug: {
21
21
  chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
22
22
  },
23
- models: {
24
- transformModel: (m) => {
25
- const model = m as unknown as InternLMModelCard;
23
+ models: async ({ client }) => {
24
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
26
25
 
27
- return {
28
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
29
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
30
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
31
- functionCall: true,
32
- id: model.id,
33
- };
34
- },
26
+ const modelsPage = await client.models.list() as any;
27
+ const modelList: InternLMModelCard[] = modelsPage.data;
28
+
29
+ return modelList
30
+ .map((model) => {
31
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
32
+
33
+ return {
34
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
35
+ displayName: knownModel?.displayName ?? undefined,
36
+ enabled: knownModel?.enabled || false,
37
+ functionCall:
38
+ knownModel?.abilities?.functionCall
39
+ || false,
40
+ id: model.id,
41
+ reasoning:
42
+ knownModel?.abilities?.reasoning
43
+ || false,
44
+ vision:
45
+ knownModel?.abilities?.vision
46
+ || false,
47
+ };
48
+ })
49
+ .filter(Boolean) as ChatModelCard[];
35
50
  },
36
51
  provider: ModelProvider.InternLM,
37
52
  });
@@ -1,11 +1,45 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface LMStudioModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeLMStudioAI = LobeOpenAICompatibleFactory({
5
11
  apiKey: 'placeholder-to-avoid-error',
6
12
  baseURL: 'http://127.0.0.1:1234/v1',
7
13
  debug: {
8
14
  chatCompletion: () => process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION === '1',
9
15
  },
16
+ models: async ({ client }) => {
17
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
+
19
+ const modelsPage = await client.models.list() as any;
20
+ const modelList: LMStudioModelCard[] = modelsPage.data;
21
+
22
+ return modelList
23
+ .map((model) => {
24
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
25
+
26
+ return {
27
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
28
+ displayName: knownModel?.displayName ?? undefined,
29
+ enabled: knownModel?.enabled || false,
30
+ functionCall:
31
+ knownModel?.abilities?.functionCall
32
+ || false,
33
+ id: model.id,
34
+ reasoning:
35
+ knownModel?.abilities?.reasoning
36
+ || false,
37
+ vision:
38
+ knownModel?.abilities?.vision
39
+ || false,
40
+ };
41
+ })
42
+ .filter(Boolean) as ChatModelCard[];
43
+ },
10
44
  provider: ModelProvider.LMStudio,
11
45
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface MistralModelCard {
7
7
  capabilities: {
@@ -30,20 +30,30 @@ export const LobeMistralAI = LobeOpenAICompatibleFactory({
30
30
  debug: {
31
31
  chatCompletion: () => process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1',
32
32
  },
33
- models: {
34
- transformModel: (m) => {
35
- const model = m as unknown as MistralModelCard;
33
+ models: async ({ client }) => {
34
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
36
35
 
37
- return {
38
- contextWindowTokens: model.max_context_length,
39
- description: model.description,
40
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
41
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
42
- functionCall: model.capabilities.function_calling,
43
- id: model.id,
44
- vision: model.capabilities.vision,
45
- };
46
- },
36
+ const modelsPage = await client.models.list() as any;
37
+ const modelList: MistralModelCard[] = modelsPage.data;
38
+
39
+ return modelList
40
+ .map((model) => {
41
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
42
+
43
+ return {
44
+ contextWindowTokens: model.max_context_length,
45
+ description: model.description,
46
+ displayName: knownModel?.displayName ?? undefined,
47
+ enabled: knownModel?.enabled || false,
48
+ functionCall: model.capabilities.function_calling,
49
+ id: model.id,
50
+ reasoning:
51
+ knownModel?.abilities?.reasoning
52
+ || false,
53
+ vision: model.capabilities.vision,
54
+ };
55
+ })
56
+ .filter(Boolean) as ChatModelCard[];
47
57
  },
48
58
  provider: ModelProvider.Mistral,
49
59
  });
@@ -3,7 +3,7 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
+ import type { ChatModelCard } from '@/types/llm';
7
7
 
8
8
  export interface MoonshotModelCard {
9
9
  id: string;
@@ -24,19 +24,34 @@ export const LobeMoonshotAI = LobeOpenAICompatibleFactory({
24
24
  debug: {
25
25
  chatCompletion: () => process.env.DEBUG_MOONSHOT_CHAT_COMPLETION === '1',
26
26
  },
27
- models: {
28
- transformModel: (m) => {
29
- const model = m as unknown as MoonshotModelCard;
27
+ models: async ({ client }) => {
28
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
30
29
 
31
- return {
32
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
33
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
34
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
35
- functionCall: true,
36
- id: model.id,
37
- vision: model.id.toLowerCase().includes('vision'),
38
- };
39
- },
30
+ const modelsPage = await client.models.list() as any;
31
+ const modelList: MoonshotModelCard[] = modelsPage.data;
32
+
33
+ return modelList
34
+ .map((model) => {
35
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
36
+
37
+ return {
38
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
39
+ displayName: knownModel?.displayName ?? undefined,
40
+ enabled: knownModel?.enabled || false,
41
+ functionCall:
42
+ knownModel?.abilities?.functionCall
43
+ || false,
44
+ id: model.id,
45
+ reasoning:
46
+ knownModel?.abilities?.reasoning
47
+ || false,
48
+ vision:
49
+ model.id.toLowerCase().includes('vision')
50
+ || knownModel?.abilities?.vision
51
+ || false,
52
+ };
53
+ })
54
+ .filter(Boolean) as ChatModelCard[];
40
55
  },
41
56
  provider: ModelProvider.Moonshot,
42
57
  });
@@ -2,7 +2,7 @@ import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
  import { NovitaModelCard } from './type';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  export const LobeNovitaAI = LobeOpenAICompatibleFactory({
8
8
  baseURL: 'https://api.novita.ai/v3/openai',
@@ -14,25 +14,42 @@ export const LobeNovitaAI = LobeOpenAICompatibleFactory({
14
14
  debug: {
15
15
  chatCompletion: () => process.env.DEBUG_NOVITA_CHAT_COMPLETION === '1',
16
16
  },
17
- models: {
18
- transformModel: (m) => {
19
- const reasoningKeywords = [
20
- 'deepseek-r1',
21
- ];
17
+ models: async ({ client }) => {
18
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
22
19
 
23
- const model = m as unknown as NovitaModelCard;
20
+ const reasoningKeywords = [
21
+ 'deepseek-r1',
22
+ ];
24
23
 
25
- return {
26
- contextWindowTokens: model.context_size,
27
- description: model.description,
28
- displayName: model.title,
29
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
30
- functionCall: model.description.toLowerCase().includes('function calling'),
31
- id: model.id,
32
- reasoning: model.description.toLowerCase().includes('reasoning task') || reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
33
- vision: model.description.toLowerCase().includes('vision'),
34
- };
35
- },
24
+ const modelsPage = await client.models.list() as any;
25
+ const modelList: NovitaModelCard[] = modelsPage.data;
26
+
27
+ return modelList
28
+ .map((model) => {
29
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
30
+
31
+ return {
32
+ contextWindowTokens: model.context_size,
33
+ description: model.description,
34
+ displayName: model.title,
35
+ enabled: knownModel?.enabled || false,
36
+ functionCall:
37
+ model.description.toLowerCase().includes('function calling')
38
+ || knownModel?.abilities?.functionCall
39
+ || false,
40
+ id: model.id,
41
+ reasoning:
42
+ model.description.toLowerCase().includes('reasoning task')
43
+ || reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
44
+ || knownModel?.abilities?.reasoning
45
+ || false,
46
+ vision:
47
+ model.description.toLowerCase().includes('vision')
48
+ || knownModel?.abilities?.vision
49
+ || false,
50
+ };
51
+ })
52
+ .filter(Boolean) as ChatModelCard[];
36
53
  },
37
54
  provider: ModelProvider.Novita,
38
55
  });
@@ -145,7 +145,26 @@ describe('LobeOllamaAI', () => {
145
145
  const models = await ollamaAI.models();
146
146
 
147
147
  expect(listMock).toHaveBeenCalled();
148
- expect(models).toEqual([{ id: 'model-1' }, { id: 'model-2' }]);
148
+ expect(models).toEqual([
149
+ {
150
+ contextWindowTokens: undefined,
151
+ displayName: undefined,
152
+ enabled: false,
153
+ functionCall: false,
154
+ id: 'model-1',
155
+ reasoning: false,
156
+ vision: false
157
+ },
158
+ {
159
+ contextWindowTokens: undefined,
160
+ displayName: undefined,
161
+ enabled: false,
162
+ functionCall: false,
163
+ id: 'model-2',
164
+ reasoning: false,
165
+ vision: false
166
+ }
167
+ ]);
149
168
  });
150
169
  });
151
170