@lobehub/chat 1.53.10 → 1.53.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/modelProvider.json +2 -2
  4. package/locales/bg-BG/modelProvider.json +2 -2
  5. package/locales/de-DE/modelProvider.json +2 -2
  6. package/locales/en-US/modelProvider.json +2 -2
  7. package/locales/es-ES/modelProvider.json +2 -2
  8. package/locales/fa-IR/modelProvider.json +2 -2
  9. package/locales/fr-FR/modelProvider.json +2 -2
  10. package/locales/it-IT/modelProvider.json +2 -2
  11. package/locales/ja-JP/modelProvider.json +2 -2
  12. package/locales/ko-KR/modelProvider.json +2 -2
  13. package/locales/nl-NL/modelProvider.json +2 -2
  14. package/locales/pl-PL/modelProvider.json +2 -2
  15. package/locales/pt-BR/modelProvider.json +2 -2
  16. package/locales/ru-RU/modelProvider.json +2 -2
  17. package/locales/tr-TR/modelProvider.json +2 -2
  18. package/locales/vi-VN/modelProvider.json +2 -2
  19. package/locales/zh-CN/modelProvider.json +3 -3
  20. package/locales/zh-TW/modelProvider.json +2 -2
  21. package/package.json +1 -1
  22. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +8 -8
  23. package/src/config/aiModels/spark.ts +9 -0
  24. package/src/libs/agent-runtime/ai360/index.ts +37 -21
  25. package/src/libs/agent-runtime/anthropic/index.ts +17 -5
  26. package/src/libs/agent-runtime/baichuan/index.ts +11 -2
  27. package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
  28. package/src/libs/agent-runtime/deepseek/index.ts +29 -13
  29. package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
  30. package/src/libs/agent-runtime/giteeai/index.ts +46 -30
  31. package/src/libs/agent-runtime/github/index.test.ts +0 -49
  32. package/src/libs/agent-runtime/github/index.ts +18 -6
  33. package/src/libs/agent-runtime/google/index.ts +17 -7
  34. package/src/libs/agent-runtime/groq/index.ts +43 -27
  35. package/src/libs/agent-runtime/higress/index.ts +45 -25
  36. package/src/libs/agent-runtime/huggingface/index.ts +20 -9
  37. package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
  38. package/src/libs/agent-runtime/internlm/index.ts +27 -12
  39. package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
  40. package/src/libs/agent-runtime/mistral/index.ts +24 -14
  41. package/src/libs/agent-runtime/moonshot/index.ts +28 -13
  42. package/src/libs/agent-runtime/novita/index.ts +35 -18
  43. package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
  44. package/src/libs/agent-runtime/ollama/index.ts +33 -5
  45. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
  46. package/src/libs/agent-runtime/openai/index.ts +43 -27
  47. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
  48. package/src/libs/agent-runtime/openrouter/index.ts +51 -33
  49. package/src/libs/agent-runtime/qwen/index.ts +45 -29
  50. package/src/libs/agent-runtime/sensenova/index.ts +24 -6
  51. package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
  52. package/src/libs/agent-runtime/stepfun/index.ts +42 -26
  53. package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
  54. package/src/libs/agent-runtime/togetherai/index.ts +19 -6
  55. package/src/libs/agent-runtime/xai/index.ts +28 -13
  56. package/src/libs/agent-runtime/zeroone/index.ts +29 -13
  57. package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
  58. package/src/libs/agent-runtime/zhipu/index.ts +18 -6
  59. package/src/locales/default/modelProvider.ts +1 -2
  60. package/src/server/manifest.ts +2 -2
  61. package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
  62. package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -1,7 +1,7 @@
1
1
  import { ChatStreamPayload, ModelProvider, OpenAIChatMessage } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface OpenAIModelCard {
7
7
  id: string;
@@ -55,36 +55,52 @@ export const LobeOpenAI = LobeOpenAICompatibleFactory({
55
55
  debug: {
56
56
  chatCompletion: () => process.env.DEBUG_OPENAI_CHAT_COMPLETION === '1',
57
57
  },
58
- models: {
59
- transformModel: (m) => {
60
- const functionCallKeywords = [
61
- 'gpt-4',
62
- 'gpt-3.5',
63
- 'o3-mini',
64
- ];
58
+ models: async ({ client }) => {
59
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
65
60
 
66
- const visionKeywords = [
67
- 'gpt-4o',
68
- 'vision',
69
- ];
61
+ const functionCallKeywords = [
62
+ 'gpt-4',
63
+ 'gpt-3.5',
64
+ 'o3-mini',
65
+ ];
70
66
 
71
- const reasoningKeywords = [
72
- 'o1',
73
- 'o3',
74
- ];
67
+ const visionKeywords = [
68
+ 'gpt-4o',
69
+ 'vision',
70
+ ];
75
71
 
76
- const model = m as unknown as OpenAIModelCard;
72
+ const reasoningKeywords = [
73
+ 'o1',
74
+ 'o3',
75
+ ];
77
76
 
78
- return {
79
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
80
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
81
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
82
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('audio'),
83
- id: model.id,
84
- reasoning: reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
85
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('audio'),
86
- };
87
- },
77
+ const modelsPage = await client.models.list() as any;
78
+ const modelList: OpenAIModelCard[] = modelsPage.data;
79
+
80
+ return modelList
81
+ .map((model) => {
82
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
83
+
84
+ return {
85
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
86
+ displayName: knownModel?.displayName ?? undefined,
87
+ enabled: knownModel?.enabled || false,
88
+ functionCall:
89
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('audio')
90
+ || knownModel?.abilities?.functionCall
91
+ || false,
92
+ id: model.id,
93
+ reasoning:
94
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
95
+ || knownModel?.abilities?.reasoning
96
+ || false,
97
+ vision:
98
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('audio')
99
+ || knownModel?.abilities?.vision
100
+ || false,
101
+ };
102
+ })
103
+ .filter(Boolean) as ChatModelCard[];
88
104
  },
89
105
  provider: ModelProvider.OpenAI,
90
106
  });
@@ -2,6 +2,34 @@
2
2
 
3
3
  exports[`LobeOpenRouterAI > models > should get models 1`] = `
4
4
  [
5
+ {
6
+ "contextWindowTokens": 131072,
7
+ "description": "Reflection Llama-3.1 70B is trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course.
8
+
9
+ The model was trained on synthetic data.
10
+
11
+ _These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/reflection-70b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
12
+ "displayName": "Reflection 70B (free)",
13
+ "enabled": false,
14
+ "functionCall": false,
15
+ "id": "mattshumer/reflection-70b:free",
16
+ "maxTokens": 4096,
17
+ "reasoning": false,
18
+ "vision": false,
19
+ },
20
+ {
21
+ "contextWindowTokens": 131072,
22
+ "description": "Reflection Llama-3.1 70B is trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course.
23
+
24
+ The model was trained on synthetic data.",
25
+ "displayName": "Reflection 70B",
26
+ "enabled": false,
27
+ "functionCall": false,
28
+ "id": "mattshumer/reflection-70b",
29
+ "maxTokens": undefined,
30
+ "reasoning": false,
31
+ "vision": false,
32
+ },
5
33
  {
6
34
  "contextWindowTokens": 128000,
7
35
  "description": "Command-R is a 35B parameter model that performs conversational language tasks at a higher quality, more reliably, and with a longer context than previous models. It can be used for complex workflows like code generation, retrieval augmented generation (RAG), tool use, and agents.
@@ -510,11 +538,11 @@ GPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4
510
538
  Check out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.",
511
539
  "displayName": "OpenAI: GPT-4o-mini",
512
540
  "enabled": true,
513
- "functionCall": false,
541
+ "functionCall": true,
514
542
  "id": "openai/gpt-4o-mini",
515
543
  "maxTokens": 16384,
516
544
  "reasoning": false,
517
- "vision": false,
545
+ "vision": true,
518
546
  },
519
547
  {
520
548
  "contextWindowTokens": 32768,
@@ -560,7 +588,7 @@ Gemma models are well-suited for a variety of text generation tasks, including q
560
588
 
561
589
  See the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).",
562
590
  "displayName": "Google: Gemma 2 27B",
563
- "enabled": true,
591
+ "enabled": false,
564
592
  "functionCall": false,
565
593
  "id": "google/gemma-2-27b-it",
566
594
  "maxTokens": undefined,
@@ -940,7 +968,7 @@ Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.d
940
968
  #multimodal",
941
969
  "displayName": "Google: Gemini Flash 1.5",
942
970
  "enabled": true,
943
- "functionCall": false,
971
+ "functionCall": true,
944
972
  "id": "google/gemini-flash-1.5",
945
973
  "maxTokens": 32768,
946
974
  "reasoning": false,
@@ -968,7 +996,7 @@ Compared with DeepSeek 67B, DeepSeek-V2 achieves stronger performance, and meanw
968
996
  DeepSeek-V2 achieves remarkable performance on both standard benchmarks and open-ended generation evaluations.",
969
997
  "displayName": "DeepSeek-V2 Chat",
970
998
  "enabled": true,
971
- "functionCall": false,
999
+ "functionCall": true,
972
1000
  "id": "deepseek/deepseek-chat",
973
1001
  "maxTokens": 4096,
974
1002
  "reasoning": false,
@@ -1065,11 +1093,11 @@ For benchmarking against other models, it was briefly called ["im-also-a-good-gp
1065
1093
  For benchmarking against other models, it was briefly called ["im-also-a-good-gpt2-chatbot"](https://twitter.com/LiamFedus/status/1790064963966370209)",
1066
1094
  "displayName": "OpenAI: GPT-4o",
1067
1095
  "enabled": true,
1068
- "functionCall": false,
1096
+ "functionCall": true,
1069
1097
  "id": "openai/gpt-4o",
1070
1098
  "maxTokens": 4096,
1071
1099
  "reasoning": false,
1072
- "vision": false,
1100
+ "vision": true,
1073
1101
  },
1074
1102
  {
1075
1103
  "contextWindowTokens": 128000,
@@ -1336,7 +1364,7 @@ Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.d
1336
1364
  #multimodal",
1337
1365
  "displayName": "Google: Gemini Pro 1.5",
1338
1366
  "enabled": true,
1339
- "functionCall": false,
1367
+ "functionCall": true,
1340
1368
  "id": "google/gemini-pro-1.5",
1341
1369
  "maxTokens": 32768,
1342
1370
  "reasoning": false,
@@ -1438,7 +1466,7 @@ See the launch announcement and benchmark results [here](https://www.anthropic.c
1438
1466
  #multimodal",
1439
1467
  "displayName": "Anthropic: Claude 3 Haiku",
1440
1468
  "enabled": true,
1441
- "functionCall": false,
1469
+ "functionCall": true,
1442
1470
  "id": "anthropic/claude-3-haiku",
1443
1471
  "maxTokens": 4096,
1444
1472
  "reasoning": false,
@@ -1503,7 +1531,7 @@ See the launch announcement and benchmark results [here](https://www.anthropic.c
1503
1531
  #multimodal",
1504
1532
  "displayName": "Anthropic: Claude 3 Opus",
1505
1533
  "enabled": true,
1506
- "functionCall": false,
1534
+ "functionCall": true,
1507
1535
  "id": "anthropic/claude-3-opus",
1508
1536
  "maxTokens": 4096,
1509
1537
  "reasoning": false,
@@ -2101,7 +2129,7 @@ Currently based on [jondurbin/airoboros-l2-70b](https://huggingface.co/jondurbin
2101
2129
  "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.",
2102
2130
  "displayName": "Mistral: Mistral 7B Instruct v0.1",
2103
2131
  "enabled": false,
2104
- "functionCall": false,
2132
+ "functionCall": true,
2105
2133
  "id": "mistralai/mistral-7b-instruct-v0.1",
2106
2134
  "maxTokens": undefined,
2107
2135
  "reasoning": false,
@@ -1,9 +1,9 @@
1
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
2
-
3
1
  import { ModelProvider } from '../types';
4
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
3
  import { OpenRouterModelCard } from './type';
6
4
 
5
+ import type { ChatModelCard } from '@/types/llm';
6
+
7
7
  export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
8
8
  baseURL: 'https://openrouter.ai/api/v1',
9
9
  chatCompletion: {
@@ -24,40 +24,58 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
24
24
  debug: {
25
25
  chatCompletion: () => process.env.DEBUG_OPENROUTER_CHAT_COMPLETION === '1',
26
26
  },
27
- models: {
28
- transformModel: (m) => {
29
- const visionKeywords = ['qwen/qvq', 'vision'];
27
+ models: async ({ client }) => {
28
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
30
29
 
31
- const reasoningKeywords = [
32
- 'deepseek/deepseek-r1',
33
- 'openai/o1',
34
- 'openai/o3',
35
- 'qwen/qvq',
36
- 'qwen/qwq',
37
- 'thinking',
38
- ];
30
+ const visionKeywords = [
31
+ 'qwen/qvq',
32
+ 'vision'
33
+ ];
39
34
 
40
- const model = m as unknown as OpenRouterModelCard;
35
+ const reasoningKeywords = [
36
+ 'deepseek/deepseek-r1',
37
+ 'openai/o1',
38
+ 'openai/o3',
39
+ 'qwen/qvq',
40
+ 'qwen/qwq',
41
+ 'thinking',
42
+ ];
41
43
 
42
- return {
43
- contextWindowTokens: model.context_length,
44
- description: model.description,
45
- displayName: model.name,
46
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
47
- functionCall:
48
- model.description.includes('function calling') || model.description.includes('tools'),
49
- id: model.id,
50
- maxTokens:
51
- typeof model.top_provider.max_completion_tokens === 'number'
52
- ? model.top_provider.max_completion_tokens
53
- : undefined,
54
- reasoning: reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)),
55
- vision:
56
- model.description.includes('vision') ||
57
- model.description.includes('multimodal') ||
58
- visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)),
59
- };
60
- },
44
+ const modelsPage = await client.models.list() as any;
45
+ const modelList: OpenRouterModelCard[] = modelsPage.data;
46
+
47
+ return modelList
48
+ .map((model) => {
49
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
50
+
51
+ return {
52
+ contextWindowTokens: model.context_length,
53
+ description: model.description,
54
+ displayName: model.name,
55
+ enabled: knownModel?.enabled || false,
56
+ functionCall:
57
+ model.description.includes('function calling')
58
+ || model.description.includes('tools')
59
+ || knownModel?.abilities?.functionCall
60
+ || false,
61
+ id: model.id,
62
+ maxTokens:
63
+ typeof model.top_provider.max_completion_tokens === 'number'
64
+ ? model.top_provider.max_completion_tokens
65
+ : undefined,
66
+ reasoning:
67
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
68
+ || knownModel?.abilities?.reasoning
69
+ || false,
70
+ vision:
71
+ model.description.includes('vision')
72
+ || model.description.includes('multimodal')
73
+ || visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
74
+ || knownModel?.abilities?.vision
75
+ || false,
76
+ };
77
+ })
78
+ .filter(Boolean) as ChatModelCard[];
61
79
  },
62
80
  provider: ModelProvider.OpenRouter,
63
81
  });
@@ -3,7 +3,7 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
4
  import { QwenAIStream } from '../utils/streams';
5
5
 
6
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
+ import type { ChatModelCard } from '@/types/llm';
7
7
 
8
8
  export interface QwenModelCard {
9
9
  id: string;
@@ -70,38 +70,54 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
70
70
  debug: {
71
71
  chatCompletion: () => process.env.DEBUG_QWEN_CHAT_COMPLETION === '1',
72
72
  },
73
- models: {
74
- transformModel: (m) => {
75
- const functionCallKeywords = [
76
- 'qwen-max',
77
- 'qwen-plus',
78
- 'qwen-turbo',
79
- 'qwen2.5',
80
- ];
73
+ models: async ({ client }) => {
74
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
81
75
 
82
- const visionKeywords = [
83
- 'qvq',
84
- 'vl',
85
- ];
76
+ const functionCallKeywords = [
77
+ 'qwen-max',
78
+ 'qwen-plus',
79
+ 'qwen-turbo',
80
+ 'qwen2.5',
81
+ ];
86
82
 
87
- const reasoningKeywords = [
88
- 'qvq',
89
- 'qwq',
90
- 'deepseek-r1'
91
- ];
83
+ const visionKeywords = [
84
+ 'qvq',
85
+ 'vl',
86
+ ];
92
87
 
93
- const model = m as unknown as QwenModelCard;
88
+ const reasoningKeywords = [
89
+ 'qvq',
90
+ 'qwq',
91
+ 'deepseek-r1'
92
+ ];
94
93
 
95
- return {
96
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
97
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
98
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
99
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
100
- id: model.id,
101
- reasoning: reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
102
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
103
- };
104
- },
94
+ const modelsPage = await client.models.list() as any;
95
+ const modelList: QwenModelCard[] = modelsPage.data;
96
+
97
+ return modelList
98
+ .map((model) => {
99
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
100
+
101
+ return {
102
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
103
+ displayName: knownModel?.displayName ?? undefined,
104
+ enabled: knownModel?.enabled || false,
105
+ functionCall:
106
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
107
+ || knownModel?.abilities?.functionCall
108
+ || false,
109
+ id: model.id,
110
+ reasoning:
111
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
112
+ || knownModel?.abilities?.reasoning
113
+ || false,
114
+ vision:
115
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
116
+ || knownModel?.abilities?.vision
117
+ || false,
118
+ };
119
+ })
120
+ .filter(Boolean) as ChatModelCard[];
105
121
  },
106
122
  provider: ModelProvider.Qwen,
107
123
  });
@@ -1,7 +1,6 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
4
  import type { ChatModelCard } from '@/types/llm';
6
5
 
7
6
  export interface SenseNovaModelCard {
@@ -33,10 +32,17 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
33
32
  chatCompletion: () => process.env.DEBUG_SENSENOVA_CHAT_COMPLETION === '1',
34
33
  },
35
34
  models: async ({ client }) => {
35
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
36
+
36
37
  const functionCallKeywords = [
38
+ 'deepseek-v3',
37
39
  'sensechat-5',
38
40
  ];
39
41
 
42
+ const reasoningKeywords = [
43
+ 'deepseek-r1'
44
+ ];
45
+
40
46
  client.baseURL = 'https://api.sensenova.cn/v1/llm';
41
47
 
42
48
  const modelsPage = await client.models.list() as any;
@@ -44,13 +50,25 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
44
50
 
45
51
  return modelList
46
52
  .map((model) => {
53
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
54
+
47
55
  return {
48
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
49
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
50
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
51
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
56
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
57
+ displayName: knownModel?.displayName ?? undefined,
58
+ enabled: knownModel?.enabled || false,
59
+ functionCall:
60
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
61
+ || knownModel?.abilities?.functionCall
62
+ || false,
52
63
  id: model.id,
53
- vision: model.id.toLowerCase().includes('vision'),
64
+ reasoning:
65
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
66
+ || knownModel?.abilities?.reasoning
67
+ || false,
68
+ vision:
69
+ model.id.toLowerCase().includes('vision')
70
+ || knownModel?.abilities?.vision
71
+ || false,
54
72
  };
55
73
  })
56
74
  .filter(Boolean) as ChatModelCard[];
@@ -2,7 +2,7 @@ import { AgentRuntimeErrorType } from '../error';
2
2
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
3
3
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  export interface SiliconCloudModelCard {
8
8
  id: string;
@@ -52,43 +52,59 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
52
52
  bizError: AgentRuntimeErrorType.ProviderBizError,
53
53
  invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
54
54
  },
55
- models: {
56
- transformModel: (m) => {
57
- const functionCallKeywords = [
58
- 'qwen/qwen2.5',
59
- 'thudm/glm-4',
60
- 'deepseek-ai/deepseek',
61
- 'internlm/internlm2_5',
62
- 'meta-llama/meta-llama-3.1',
63
- 'meta-llama/meta-llama-3.3',
64
- ];
55
+ models: async ({ client }) => {
56
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
65
57
 
66
- const visionKeywords = [
67
- 'opengvlab/internvl',
68
- 'qwen/qvq',
69
- 'qwen/qwen2-vl',
70
- 'teleai/telemm',
71
- 'deepseek-ai/deepseek-vl',
72
- ];
58
+ const functionCallKeywords = [
59
+ 'qwen/qwen2.5',
60
+ 'thudm/glm-4',
61
+ 'deepseek-ai/deepseek',
62
+ 'internlm/internlm2_5',
63
+ 'meta-llama/meta-llama-3.1',
64
+ 'meta-llama/meta-llama-3.3',
65
+ ];
73
66
 
74
- const reasoningKeywords = [
75
- 'deepseek-ai/deepseek-r1',
76
- 'qwen/qvq',
77
- 'qwen/qwq',
78
- ];
67
+ const visionKeywords = [
68
+ 'opengvlab/internvl',
69
+ 'qwen/qvq',
70
+ 'qwen/qwen2-vl',
71
+ 'teleai/telemm',
72
+ 'deepseek-ai/deepseek-vl',
73
+ ];
79
74
 
80
- const model = m as unknown as SiliconCloudModelCard;
75
+ const reasoningKeywords = [
76
+ 'deepseek-ai/deepseek-r1',
77
+ 'qwen/qvq',
78
+ 'qwen/qwq',
79
+ ];
81
80
 
82
- return {
83
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
84
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
85
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
86
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1'),
87
- id: model.id,
88
- reasoning: reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
89
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
90
- };
91
- },
81
+ const modelsPage = await client.models.list() as any;
82
+ const modelList: SiliconCloudModelCard[] = modelsPage.data;
83
+
84
+ return modelList
85
+ .map((model) => {
86
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
87
+
88
+ return {
89
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
90
+ displayName: knownModel?.displayName ?? undefined,
91
+ enabled: knownModel?.enabled || false,
92
+ functionCall:
93
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1')
94
+ || knownModel?.abilities?.functionCall
95
+ || false,
96
+ id: model.id,
97
+ reasoning:
98
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
99
+ || knownModel?.abilities?.reasoning
100
+ || false,
101
+ vision:
102
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
103
+ || knownModel?.abilities?.vision
104
+ || false,
105
+ };
106
+ })
107
+ .filter(Boolean) as ChatModelCard[];
92
108
  },
93
109
  provider: ModelProvider.SiliconCloud,
94
110
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface StepfunModelCard {
7
7
  id: string;
@@ -20,32 +20,48 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
20
20
  debug: {
21
21
  chatCompletion: () => process.env.DEBUG_STEPFUN_CHAT_COMPLETION === '1',
22
22
  },
23
- models: {
24
- transformModel: (m) => {
25
- // ref: https://platform.stepfun.com/docs/llm/modeloverview
26
- const functionCallKeywords = [
27
- 'step-1-',
28
- 'step-1o-',
29
- 'step-1v-',
30
- 'step-2-',
31
- ];
32
-
33
- const visionKeywords = [
34
- 'step-1o-',
35
- 'step-1v-',
36
- ];
37
-
38
- const model = m as unknown as StepfunModelCard;
23
+ models: async ({ client }) => {
24
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
39
25
 
40
- return {
41
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
42
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
43
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
44
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
45
- id: model.id,
46
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
47
- };
48
- },
26
+ // ref: https://platform.stepfun.com/docs/llm/modeloverview
27
+ const functionCallKeywords = [
28
+ 'step-1-',
29
+ 'step-1o-',
30
+ 'step-1v-',
31
+ 'step-2-',
32
+ ];
33
+
34
+ const visionKeywords = [
35
+ 'step-1o-',
36
+ 'step-1v-',
37
+ ];
38
+
39
+ const modelsPage = await client.models.list() as any;
40
+ const modelList: StepfunModelCard[] = modelsPage.data;
41
+
42
+ return modelList
43
+ .map((model) => {
44
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
45
+
46
+ return {
47
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
48
+ displayName: knownModel?.displayName ?? undefined,
49
+ enabled: knownModel?.enabled || false,
50
+ functionCall:
51
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
52
+ || knownModel?.abilities?.functionCall
53
+ || false,
54
+ id: model.id,
55
+ reasoning:
56
+ knownModel?.abilities?.reasoning
57
+ || false,
58
+ vision:
59
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
60
+ || knownModel?.abilities?.vision
61
+ || false,
62
+ };
63
+ })
64
+ .filter(Boolean) as ChatModelCard[];
49
65
  },
50
66
  provider: ModelProvider.Stepfun,
51
67
  });