@lobehub/chat 1.91.1 → 1.91.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/.eslintrc.js +2 -0
  2. package/CHANGELOG.md +58 -0
  3. package/changelog/v1.json +21 -0
  4. package/package.json +2 -2
  5. package/src/app/(backend)/middleware/auth/utils.ts +2 -1
  6. package/src/app/[variants]/(main)/profile/features/ClerkProfile.tsx +1 -4
  7. package/src/config/aiModels/modelscope.ts +4 -1
  8. package/src/config/aiModels/novita.ts +2 -0
  9. package/src/config/aiModels/openrouter.ts +2 -0
  10. package/src/config/aiModels/siliconcloud.ts +1 -0
  11. package/src/config/modelProviders/anthropic.ts +30 -11
  12. package/src/config/modelProviders/openai.ts +14 -0
  13. package/src/layout/AuthProvider/Clerk/useAppearance.ts +1 -4
  14. package/src/libs/model-runtime/google/index.ts +30 -40
  15. package/src/libs/model-runtime/novita/__snapshots__/index.test.ts.snap +19 -1
  16. package/src/libs/model-runtime/novita/index.ts +14 -15
  17. package/src/libs/model-runtime/nvidia/index.ts +2 -21
  18. package/src/libs/model-runtime/openai/__snapshots__/index.test.ts.snap +39 -11
  19. package/src/libs/model-runtime/openai/index.ts +3 -38
  20. package/src/libs/model-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -0
  21. package/src/libs/model-runtime/openrouter/index.ts +45 -54
  22. package/src/libs/model-runtime/qwen/index.ts +2 -45
  23. package/src/libs/model-runtime/siliconcloud/index.ts +2 -51
  24. package/src/libs/model-runtime/utils/modelParse.test.ts +761 -0
  25. package/src/libs/model-runtime/utils/modelParse.ts +186 -0
  26. package/src/libs/model-runtime/volcengine/index.ts +11 -0
  27. package/src/libs/model-runtime/zeroone/index.ts +2 -23
  28. package/src/libs/model-runtime/zhipu/index.ts +7 -34
  29. package/src/app/[variants]/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/index.tsx +0 -0
@@ -0,0 +1,186 @@
1
+ import type { ChatModelCard } from '@/types/llm';
2
+
3
+ export interface ModelProcessorConfig {
4
+ excludeKeywords?: readonly string[]; // 对符合的模型不添加标签
5
+ functionCallKeywords?: readonly string[];
6
+ reasoningKeywords?: readonly string[];
7
+ visionKeywords?: readonly string[];
8
+ }
9
+
10
+ // 模型标签关键词配置
11
+ export const MODEL_LIST_CONFIGS = {
12
+ anthropic: {
13
+ functionCallKeywords: ['claude'],
14
+ reasoningKeywords: ['-3-7', '3.7', '-4'],
15
+ visionKeywords: ['claude'],
16
+ },
17
+ deepseek: {
18
+ functionCallKeywords: ['v3', 'r1'],
19
+ reasoningKeywords: ['r1'],
20
+ },
21
+ google: {
22
+ functionCallKeywords: ['gemini'],
23
+ reasoningKeywords: ['thinking', '-2.5-'],
24
+ visionKeywords: ['gemini', 'learnlm'],
25
+ },
26
+ llama: {
27
+ functionCallKeywords: ['llama-3.2', 'llama-3.3', 'llama-4'],
28
+ reasoningKeywords: [],
29
+ visionKeywords: [],
30
+ },
31
+ openai: {
32
+ excludeKeywords: ['audio'],
33
+ functionCallKeywords: ['4o', '4.1', 'o3', 'o4'],
34
+ reasoningKeywords: ['o1', 'o3', 'o4'],
35
+ visionKeywords: ['4o', '4.1', 'o4'],
36
+ },
37
+ qwen: {
38
+ functionCallKeywords: [
39
+ 'qwen-max',
40
+ 'qwen-plus',
41
+ 'qwen-turbo',
42
+ 'qwen-long',
43
+ 'qwen1.5',
44
+ 'qwen2',
45
+ 'qwen2.5',
46
+ 'qwen3',
47
+ ],
48
+ reasoningKeywords: ['qvq', 'qwq', 'qwen3'],
49
+ visionKeywords: ['qvq', 'vl'],
50
+ },
51
+ volcengine: {
52
+ functionCallKeywords: ['doubao-1.5'],
53
+ reasoningKeywords: ['thinking', '-r1'],
54
+ visionKeywords: ['vision', '-m'],
55
+ },
56
+ zeroone: {
57
+ functionCallKeywords: ['fc'],
58
+ visionKeywords: ['vision'],
59
+ },
60
+ zhipu: {
61
+ functionCallKeywords: ['glm-4', 'glm-z1'],
62
+ reasoningKeywords: ['glm-zero', 'glm-z1'],
63
+ visionKeywords: ['glm-4v'],
64
+ },
65
+ } as const;
66
+
67
+ // 模型提供商关键词配置
68
+ export const PROVIDER_DETECTION_CONFIG = {
69
+ anthropic: ['claude'],
70
+ deepseek: ['deepseek'],
71
+ google: ['gemini'],
72
+ llama: ['llama'],
73
+ openai: ['o1', 'o3', 'o4', 'gpt-'],
74
+ qwen: ['qwen', 'qwq', 'qvq'],
75
+ volcengine: ['doubao'],
76
+ zeroone: ['yi-'],
77
+ zhipu: ['glm'],
78
+ } as const;
79
+
80
+ /**
81
+ * 检测单个模型的提供商类型
82
+ * @param modelId 模型ID
83
+ * @returns 检测到的提供商配置键名,默认为 'openai'
84
+ */
85
+ export const detectModelProvider = (modelId: string): keyof typeof MODEL_LIST_CONFIGS => {
86
+ const lowerModelId = modelId.toLowerCase();
87
+
88
+ for (const [provider, keywords] of Object.entries(PROVIDER_DETECTION_CONFIG)) {
89
+ const hasKeyword = keywords.some((keyword) => lowerModelId.includes(keyword));
90
+
91
+ if (hasKeyword && provider in MODEL_LIST_CONFIGS) {
92
+ return provider as keyof typeof MODEL_LIST_CONFIGS;
93
+ }
94
+ }
95
+
96
+ return 'openai';
97
+ };
98
+
99
+ /**
100
+ * 处理模型卡片的通用逻辑
101
+ */
102
+ const processModelCard = (
103
+ model: { [key: string]: any; id: string },
104
+ config: ModelProcessorConfig,
105
+ knownModel?: any,
106
+ ): ChatModelCard => {
107
+ const {
108
+ functionCallKeywords = [],
109
+ visionKeywords = [],
110
+ reasoningKeywords = [],
111
+ excludeKeywords = [],
112
+ } = config;
113
+
114
+ const isExcludedModel = excludeKeywords.some((keyword) =>
115
+ model.id.toLowerCase().includes(keyword),
116
+ );
117
+
118
+ return {
119
+ contextWindowTokens: model.contextWindowTokens ?? knownModel?.contextWindowTokens ?? undefined,
120
+ displayName: model.displayName ?? knownModel?.displayName ?? model.id,
121
+ enabled: knownModel?.enabled || false,
122
+ functionCall:
123
+ (functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
124
+ !isExcludedModel) ||
125
+ knownModel?.abilities?.functionCall ||
126
+ false,
127
+ id: model.id,
128
+ maxOutput: model.maxOutput ?? knownModel?.maxOutput ?? undefined,
129
+ reasoning:
130
+ reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
131
+ knownModel?.abilities?.reasoning ||
132
+ false,
133
+ vision:
134
+ (visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
135
+ !isExcludedModel) ||
136
+ knownModel?.abilities?.vision ||
137
+ false,
138
+ };
139
+ };
140
+
141
+ /**
142
+ * 处理单一提供商的模型列表
143
+ * @param modelList 模型列表
144
+ * @param config 提供商配置
145
+ * @returns 处理后的模型卡片列表
146
+ */
147
+ export const processModelList = async (
148
+ modelList: Array<{ id: string }>,
149
+ config: ModelProcessorConfig,
150
+ ): Promise<ChatModelCard[]> => {
151
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
152
+
153
+ return modelList
154
+ .map((model) => {
155
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
156
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
157
+ );
158
+
159
+ return processModelCard(model, config, knownModel);
160
+ })
161
+ .filter(Boolean);
162
+ };
163
+
164
+ /**
165
+ * 处理混合提供商的模型列表
166
+ * @param modelList 模型列表
167
+ * @returns 处理后的模型卡片列表
168
+ */
169
+ export const processMultiProviderModelList = async (
170
+ modelList: Array<{ id: string }>,
171
+ ): Promise<ChatModelCard[]> => {
172
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
173
+
174
+ return modelList
175
+ .map((model) => {
176
+ const detectedProvider = detectModelProvider(model.id);
177
+ const config = MODEL_LIST_CONFIGS[detectedProvider];
178
+
179
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
180
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
181
+ );
182
+
183
+ return processModelCard(model, config, knownModel);
184
+ })
185
+ .filter(Boolean);
186
+ };
@@ -1,5 +1,10 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
+ import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
4
+
5
+ export interface VolcengineModelCard {
6
+ id: string;
7
+ }
3
8
 
4
9
  export const LobeVolcengineAI = createOpenAICompatibleRuntime({
5
10
  baseURL: 'https://ark.cn-beijing.volces.com/api/v3',
@@ -24,5 +29,11 @@ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
24
29
  debug: {
25
30
  chatCompletion: () => process.env.DEBUG_VOLCENGINE_CHAT_COMPLETION === '1',
26
31
  },
32
+ models: async ({ client }) => {
33
+ const modelsPage = (await client.models.list()) as any;
34
+ const modelList: VolcengineModelCard[] = modelsPage.data;
35
+
36
+ return processModelList(modelList, MODEL_LIST_CONFIGS.volcengine);
37
+ },
27
38
  provider: ModelProvider.Volcengine,
28
39
  });
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface ZeroOneModelCard {
@@ -13,30 +12,10 @@ export const LobeZeroOneAI = createOpenAICompatibleRuntime({
13
12
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
14
13
  },
15
14
  models: async ({ client }) => {
16
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
-
18
15
  const modelsPage = (await client.models.list()) as any;
19
16
  const modelList: ZeroOneModelCard[] = modelsPage.data;
20
17
 
21
- return modelList
22
- .map((model) => {
23
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
24
- (m) => model.id.toLowerCase() === m.id.toLowerCase(),
25
- );
26
-
27
- return {
28
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
29
- displayName: knownModel?.displayName ?? undefined,
30
- enabled: knownModel?.enabled || false,
31
- functionCall:
32
- model.id.toLowerCase().includes('fc') || knownModel?.abilities?.functionCall || false,
33
- id: model.id,
34
- reasoning: knownModel?.abilities?.reasoning || false,
35
- vision:
36
- model.id.toLowerCase().includes('vision') || knownModel?.abilities?.vision || false,
37
- };
38
- })
39
- .filter(Boolean) as ChatModelCard[];
18
+ return processModelList(modelList, MODEL_LIST_CONFIGS.zeroone);
40
19
  },
41
20
  provider: ModelProvider.ZeroOne,
42
21
  });
@@ -1,6 +1,5 @@
1
- import type { ChatModelCard } from '@/types/llm';
2
-
3
1
  import { ModelProvider } from '../types';
2
+ import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
4
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
4
 
6
5
  export interface ZhipuModelCard {
@@ -60,10 +59,6 @@ export const LobeZhipuAI = createOpenAICompatibleRuntime({
60
59
  chatCompletion: () => process.env.DEBUG_ZHIPU_CHAT_COMPLETION === '1',
61
60
  },
62
61
  models: async ({ client }) => {
63
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
64
-
65
- const reasoningKeywords = ['glm-zero', 'glm-z1'];
66
-
67
62
  // ref: https://open.bigmodel.cn/console/modelcenter/square
68
63
  const url = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
69
64
  const response = await fetch(url, {
@@ -78,34 +73,12 @@ export const LobeZhipuAI = createOpenAICompatibleRuntime({
78
73
 
79
74
  const modelList: ZhipuModelCard[] = json.rows;
80
75
 
81
- return modelList
82
- .map((model) => {
83
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
84
- (m) => model.modelCode.toLowerCase() === m.id.toLowerCase(),
85
- );
86
-
87
- return {
88
- contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
89
- description: model.description,
90
- displayName: model.modelName,
91
- enabled: knownModel?.enabled || false,
92
- functionCall:
93
- (model.modelCode.toLowerCase().includes('glm-4') &&
94
- !model.modelCode.toLowerCase().includes('glm-4v')) ||
95
- knownModel?.abilities?.functionCall ||
96
- false,
97
- id: model.modelCode,
98
- reasoning:
99
- reasoningKeywords.some((keyword) => model.modelCode.toLowerCase().includes(keyword)) ||
100
- knownModel?.abilities?.reasoning ||
101
- false,
102
- vision:
103
- model.modelCode.toLowerCase().includes('glm-4v') ||
104
- knownModel?.abilities?.vision ||
105
- false,
106
- };
107
- })
108
- .filter(Boolean) as ChatModelCard[];
76
+ const standardModelList = modelList.map((model) => ({
77
+ description: model.description,
78
+ displayName: model.modelName,
79
+ id: model.modelCode,
80
+ }));
81
+ return processModelList(standardModelList, MODEL_LIST_CONFIGS.zhipu);
109
82
  },
110
83
  provider: ModelProvider.ZhiPu,
111
84
  });