@lobehub/chat 1.53.10 → 1.53.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/modelProvider.json +2 -2
  4. package/locales/bg-BG/modelProvider.json +2 -2
  5. package/locales/de-DE/modelProvider.json +2 -2
  6. package/locales/en-US/modelProvider.json +2 -2
  7. package/locales/es-ES/modelProvider.json +2 -2
  8. package/locales/fa-IR/modelProvider.json +2 -2
  9. package/locales/fr-FR/modelProvider.json +2 -2
  10. package/locales/it-IT/modelProvider.json +2 -2
  11. package/locales/ja-JP/modelProvider.json +2 -2
  12. package/locales/ko-KR/modelProvider.json +2 -2
  13. package/locales/nl-NL/modelProvider.json +2 -2
  14. package/locales/pl-PL/modelProvider.json +2 -2
  15. package/locales/pt-BR/modelProvider.json +2 -2
  16. package/locales/ru-RU/modelProvider.json +2 -2
  17. package/locales/tr-TR/modelProvider.json +2 -2
  18. package/locales/vi-VN/modelProvider.json +2 -2
  19. package/locales/zh-CN/modelProvider.json +3 -3
  20. package/locales/zh-TW/modelProvider.json +2 -2
  21. package/package.json +1 -1
  22. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +8 -8
  23. package/src/config/aiModels/spark.ts +9 -0
  24. package/src/libs/agent-runtime/ai360/index.ts +37 -21
  25. package/src/libs/agent-runtime/anthropic/index.ts +17 -5
  26. package/src/libs/agent-runtime/baichuan/index.ts +11 -2
  27. package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
  28. package/src/libs/agent-runtime/deepseek/index.ts +29 -13
  29. package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
  30. package/src/libs/agent-runtime/giteeai/index.ts +46 -30
  31. package/src/libs/agent-runtime/github/index.test.ts +0 -49
  32. package/src/libs/agent-runtime/github/index.ts +18 -6
  33. package/src/libs/agent-runtime/google/index.ts +17 -7
  34. package/src/libs/agent-runtime/groq/index.ts +43 -27
  35. package/src/libs/agent-runtime/higress/index.ts +45 -25
  36. package/src/libs/agent-runtime/huggingface/index.ts +20 -9
  37. package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
  38. package/src/libs/agent-runtime/internlm/index.ts +27 -12
  39. package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
  40. package/src/libs/agent-runtime/mistral/index.ts +24 -14
  41. package/src/libs/agent-runtime/moonshot/index.ts +28 -13
  42. package/src/libs/agent-runtime/novita/index.ts +35 -18
  43. package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
  44. package/src/libs/agent-runtime/ollama/index.ts +33 -5
  45. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
  46. package/src/libs/agent-runtime/openai/index.ts +43 -27
  47. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
  48. package/src/libs/agent-runtime/openrouter/index.ts +51 -33
  49. package/src/libs/agent-runtime/qwen/index.ts +45 -29
  50. package/src/libs/agent-runtime/sensenova/index.ts +24 -6
  51. package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
  52. package/src/libs/agent-runtime/stepfun/index.ts +42 -26
  53. package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
  54. package/src/libs/agent-runtime/togetherai/index.ts +19 -6
  55. package/src/libs/agent-runtime/xai/index.ts +28 -13
  56. package/src/libs/agent-runtime/zeroone/index.ts +29 -13
  57. package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
  58. package/src/libs/agent-runtime/zhipu/index.ts +18 -6
  59. package/src/locales/default/modelProvider.ts +1 -2
  60. package/src/server/manifest.ts +2 -2
  61. package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
  62. package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -1,10 +1,54 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface TencentCloudModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeTencentCloudAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.lkeap.cloud.tencent.com/v1',
6
12
  debug: {
7
13
  chatCompletion: () => process.env.DEBUG_TENCENT_CLOUD_CHAT_COMPLETION === '1',
8
14
  },
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
+
18
+ const functionCallKeywords = [
19
+ 'deepseek-v3',
20
+ ];
21
+
22
+ const reasoningKeywords = [
23
+ 'deepseek-r1',
24
+ ];
25
+
26
+ const modelsPage = await client.models.list() as any;
27
+ const modelList: TencentCloudModelCard[] = modelsPage.data;
28
+
29
+ return modelList
30
+ .map((model) => {
31
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
32
+
33
+ return {
34
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
35
+ displayName: knownModel?.displayName ?? undefined,
36
+ enabled: knownModel?.enabled || false,
37
+ functionCall:
38
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
39
+ || knownModel?.abilities?.functionCall
40
+ || false,
41
+ id: model.id,
42
+ reasoning:
43
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
44
+ || knownModel?.abilities?.reasoning
45
+ || false,
46
+ vision:
47
+ knownModel?.abilities?.vision
48
+ || false,
49
+ };
50
+ })
51
+ .filter(Boolean) as ChatModelCard[];
52
+ },
9
53
  provider: ModelProvider.TencentCloud,
10
54
  });
@@ -2,7 +2,6 @@ import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
  import { TogetherAIModel } from './type';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
5
  import type { ChatModelCard } from '@/types/llm';
7
6
 
8
7
  export const LobeTogetherAI = LobeOpenAICompatibleFactory({
@@ -17,6 +16,8 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
17
16
  chatCompletion: () => process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION === '1',
18
17
  },
19
18
  models: async ({ client }) => {
19
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
20
+
20
21
  const visionKeywords = [
21
22
  'qvq',
22
23
  'vision',
@@ -34,17 +35,29 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
34
35
 
35
36
  return modelList
36
37
  .map((model) => {
38
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
39
+
37
40
  return {
38
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name === m.id)?.contextWindowTokens ?? undefined,
41
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
39
42
  description: model.description,
40
43
  displayName: model.display_name,
41
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name === m.id)?.enabled || false,
42
- functionCall: model.description?.toLowerCase().includes('function calling'),
44
+ enabled: knownModel?.enabled || false,
45
+ functionCall:
46
+ model.description?.toLowerCase().includes('function calling')
47
+ || knownModel?.abilities?.functionCall
48
+ || false,
43
49
  id: model.name,
44
50
  maxOutput: model.context_length,
45
- reasoning: reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword)),
51
+ reasoning:
52
+ reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword))
53
+ || knownModel?.abilities?.functionCall
54
+ || false,
46
55
  tokens: model.context_length,
47
- vision: model.description?.toLowerCase().includes('vision') || visionKeywords.some(keyword => model.name?.toLowerCase().includes(keyword)),
56
+ vision:
57
+ model.description?.toLowerCase().includes('vision')
58
+ || visionKeywords.some(keyword => model.name?.toLowerCase().includes(keyword))
59
+ || knownModel?.abilities?.functionCall
60
+ || false,
48
61
  };
49
62
  })
50
63
  .filter(Boolean) as ChatModelCard[];
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface XAIModelCard {
7
7
  id: string;
@@ -12,19 +12,34 @@ export const LobeXAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const model = m as unknown as XAIModelCard;
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
17
 
19
- return {
20
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
21
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
22
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
23
- functionCall: true,
24
- id: model.id,
25
- vision: model.id.toLowerCase().includes('vision'),
26
- };
27
- },
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: XAIModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ knownModel?.abilities?.functionCall
31
+ || false,
32
+ id: model.id,
33
+ reasoning:
34
+ knownModel?.abilities?.reasoning
35
+ || false,
36
+ vision:
37
+ model.id.toLowerCase().includes('vision')
38
+ || knownModel?.abilities?.functionCall
39
+ || false,
40
+ };
41
+ })
42
+ .filter(Boolean) as ChatModelCard[];
28
43
  },
29
44
  provider: ModelProvider.XAI,
30
45
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface ZeroOneModelCard {
7
7
  id: string;
@@ -12,19 +12,35 @@ export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const model = m as unknown as ZeroOneModelCard;
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
17
 
19
- return {
20
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
21
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
22
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
23
- functionCall: model.id.toLowerCase().includes('fc'),
24
- id: model.id,
25
- vision: model.id.toLowerCase().includes('vision'),
26
- };
27
- },
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: ZeroOneModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ model.id.toLowerCase().includes('fc')
31
+ || knownModel?.abilities?.functionCall
32
+ || false,
33
+ id: model.id,
34
+ reasoning:
35
+ knownModel?.abilities?.reasoning
36
+ || false,
37
+ vision:
38
+ model.id.toLowerCase().includes('vision')
39
+ || knownModel?.abilities?.vision
40
+ || false,
41
+ };
42
+ })
43
+ .filter(Boolean) as ChatModelCard[];
28
44
  },
29
45
  provider: ModelProvider.ZeroOne,
30
46
  });
@@ -5,21 +5,12 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
  import { ChatStreamCallbacks, LobeOpenAI, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
6
6
  import * as debugStreamModule from '@/libs/agent-runtime/utils/debugStream';
7
7
 
8
- import * as authTokenModule from './authToken';
9
8
  import { LobeZhipuAI } from './index';
10
9
 
11
10
  const bizErrorType = 'ProviderBizError';
12
11
  const invalidErrorType = 'InvalidProviderAPIKey';
13
12
 
14
- // Mock相关依赖
15
- vi.mock('./authToken');
16
-
17
13
  describe('LobeZhipuAI', () => {
18
- beforeEach(() => {
19
- // Mock generateApiToken
20
- vi.spyOn(authTokenModule, 'generateApiToken').mockResolvedValue('mocked_token');
21
- });
22
-
23
14
  afterEach(() => {
24
15
  vi.restoreAllMocks();
25
16
  });
@@ -3,7 +3,6 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
7
6
  import type { ChatModelCard } from '@/types/llm';
8
7
 
9
8
  export interface ZhipuModelCard {
@@ -49,6 +48,8 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
49
48
  chatCompletion: () => process.env.DEBUG_ZHIPU_CHAT_COMPLETION === '1',
50
49
  },
51
50
  models: async ({ client }) => {
51
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
52
+
52
53
  // ref: https://open.bigmodel.cn/console/modelcenter/square
53
54
  client.baseURL = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
54
55
 
@@ -57,15 +58,26 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
57
58
 
58
59
  return modelList
59
60
  .map((model) => {
61
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode.toLowerCase() === m.id.toLowerCase());
62
+
60
63
  return {
61
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode === m.id)?.contextWindowTokens ?? undefined,
64
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
62
65
  description: model.description,
63
66
  displayName: model.modelName,
64
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode === m.id)?.enabled || false,
65
- functionCall: model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v'),
67
+ enabled: knownModel?.enabled || false,
68
+ functionCall:
69
+ model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v')
70
+ || knownModel?.abilities?.functionCall
71
+ || false,
66
72
  id: model.modelCode,
67
- reasoning: model.modelCode.toLowerCase().includes('glm-zero-preview'),
68
- vision: model.modelCode.toLowerCase().includes('glm-4v'),
73
+ reasoning:
74
+ model.modelCode.toLowerCase().includes('glm-zero-preview')
75
+ || knownModel?.abilities?.reasoning
76
+ || false,
77
+ vision:
78
+ model.modelCode.toLowerCase().includes('glm-4v')
79
+ || knownModel?.abilities?.vision
80
+ || false,
69
81
  };
70
82
  })
71
83
  .filter(Boolean) as ChatModelCard[];
@@ -67,7 +67,6 @@ export default {
67
67
  createNewAiProvider: {
68
68
  apiKey: {
69
69
  placeholder: '请填写你的 API Key',
70
- required: '请填写你的 API Key',
71
70
  title: 'API Key',
72
71
  },
73
72
  basicTitle: '基本信息',
@@ -95,7 +94,7 @@ export default {
95
94
  title: '服务商名称',
96
95
  },
97
96
  proxyUrl: {
98
- placeholder: '请填写你的请求地址,如果不填则会使用 SDK 对应的请求地址',
97
+ required: '请填写代理地址',
99
98
  title: '代理地址',
100
99
  },
101
100
  sdkType: {
@@ -63,10 +63,10 @@ export class Manifest {
63
63
  screenshots: screenshots.map((item) => this._getScreenshot(item)),
64
64
  short_name: name,
65
65
  splash_pages: null,
66
- start_url: '.',
66
+ start_url: '/chat',
67
67
  tab_strip: {
68
68
  new_tab_button: {
69
- url: '/',
69
+ url: '/chat',
70
70
  },
71
71
  },
72
72
  theme_color: color,
@@ -1,18 +0,0 @@
1
- // @vitest-environment node
2
- import { generateApiToken } from './authToken';
3
-
4
- describe('generateApiToken', () => {
5
- it('should throw an error if no apiKey is provided', async () => {
6
- await expect(generateApiToken()).rejects.toThrow('Invalid apiKey');
7
- });
8
-
9
- it('should throw an error if apiKey is invalid', async () => {
10
- await expect(generateApiToken('invalid')).rejects.toThrow('Invalid apiKey');
11
- });
12
-
13
- it('should return a token if a valid apiKey is provided', async () => {
14
- const apiKey = 'id.secret';
15
- const token = await generateApiToken(apiKey);
16
- expect(token).toBeDefined();
17
- });
18
- });
@@ -1,22 +0,0 @@
1
- import { SignJWT } from 'jose';
2
-
3
- export const generateApiToken = async (apiKey?: string): Promise<string> => {
4
- if (!apiKey) {
5
- throw new Error('Invalid apiKey');
6
- }
7
-
8
- const [id, secret] = apiKey.split('.');
9
- if (!id || !secret) {
10
- throw new Error('Invalid apiKey');
11
- }
12
-
13
- const expSeconds = 60 * 60 * 24 * 30;
14
- const nowSeconds = Math.floor(Date.now() / 1000);
15
- const exp = nowSeconds + expSeconds;
16
- const jwtConstructor = new SignJWT({ api_key: id })
17
- .setProtectedHeader({ alg: 'HS256', sign_type: 'SIGN', typ: 'JWT' })
18
- .setExpirationTime(exp)
19
- .setIssuedAt(nowSeconds);
20
-
21
- return jwtConstructor.sign(new TextEncoder().encode(secret));
22
- };