@lobehub/chat 1.53.11 → 1.53.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/package.json +1 -1
  4. package/src/config/aiModels/spark.ts +9 -0
  5. package/src/libs/agent-runtime/ai360/index.ts +37 -21
  6. package/src/libs/agent-runtime/anthropic/index.ts +17 -5
  7. package/src/libs/agent-runtime/baichuan/index.ts +11 -2
  8. package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
  9. package/src/libs/agent-runtime/deepseek/index.ts +29 -13
  10. package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
  11. package/src/libs/agent-runtime/giteeai/index.ts +46 -30
  12. package/src/libs/agent-runtime/github/index.test.ts +0 -49
  13. package/src/libs/agent-runtime/github/index.ts +18 -6
  14. package/src/libs/agent-runtime/google/index.ts +17 -7
  15. package/src/libs/agent-runtime/groq/index.ts +43 -27
  16. package/src/libs/agent-runtime/higress/index.ts +45 -25
  17. package/src/libs/agent-runtime/huggingface/index.ts +20 -9
  18. package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
  19. package/src/libs/agent-runtime/internlm/index.ts +27 -12
  20. package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
  21. package/src/libs/agent-runtime/mistral/index.ts +24 -14
  22. package/src/libs/agent-runtime/moonshot/index.ts +28 -13
  23. package/src/libs/agent-runtime/novita/index.ts +35 -18
  24. package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
  25. package/src/libs/agent-runtime/ollama/index.ts +33 -5
  26. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
  27. package/src/libs/agent-runtime/openai/index.ts +43 -27
  28. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
  29. package/src/libs/agent-runtime/openrouter/index.ts +51 -33
  30. package/src/libs/agent-runtime/qwen/index.ts +45 -29
  31. package/src/libs/agent-runtime/sensenova/index.ts +24 -6
  32. package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
  33. package/src/libs/agent-runtime/stepfun/index.ts +42 -26
  34. package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
  35. package/src/libs/agent-runtime/togetherai/index.ts +19 -6
  36. package/src/libs/agent-runtime/xai/index.ts +28 -13
  37. package/src/libs/agent-runtime/zeroone/index.ts +29 -13
  38. package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
  39. package/src/libs/agent-runtime/zhipu/index.ts +18 -6
  40. package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
  41. package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -1,7 +1,6 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
4
  import type { ChatModelCard } from '@/types/llm';
6
5
 
7
6
  export interface SenseNovaModelCard {
@@ -33,10 +32,17 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
33
32
  chatCompletion: () => process.env.DEBUG_SENSENOVA_CHAT_COMPLETION === '1',
34
33
  },
35
34
  models: async ({ client }) => {
35
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
36
+
36
37
  const functionCallKeywords = [
38
+ 'deepseek-v3',
37
39
  'sensechat-5',
38
40
  ];
39
41
 
42
+ const reasoningKeywords = [
43
+ 'deepseek-r1'
44
+ ];
45
+
40
46
  client.baseURL = 'https://api.sensenova.cn/v1/llm';
41
47
 
42
48
  const modelsPage = await client.models.list() as any;
@@ -44,13 +50,25 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
44
50
 
45
51
  return modelList
46
52
  .map((model) => {
53
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
54
+
47
55
  return {
48
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
49
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
50
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
51
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
56
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
57
+ displayName: knownModel?.displayName ?? undefined,
58
+ enabled: knownModel?.enabled || false,
59
+ functionCall:
60
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
61
+ || knownModel?.abilities?.functionCall
62
+ || false,
52
63
  id: model.id,
53
- vision: model.id.toLowerCase().includes('vision'),
64
+ reasoning:
65
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
66
+ || knownModel?.abilities?.reasoning
67
+ || false,
68
+ vision:
69
+ model.id.toLowerCase().includes('vision')
70
+ || knownModel?.abilities?.vision
71
+ || false,
54
72
  };
55
73
  })
56
74
  .filter(Boolean) as ChatModelCard[];
@@ -2,7 +2,7 @@ import { AgentRuntimeErrorType } from '../error';
2
2
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
3
3
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  export interface SiliconCloudModelCard {
8
8
  id: string;
@@ -52,43 +52,59 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
52
52
  bizError: AgentRuntimeErrorType.ProviderBizError,
53
53
  invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
54
54
  },
55
- models: {
56
- transformModel: (m) => {
57
- const functionCallKeywords = [
58
- 'qwen/qwen2.5',
59
- 'thudm/glm-4',
60
- 'deepseek-ai/deepseek',
61
- 'internlm/internlm2_5',
62
- 'meta-llama/meta-llama-3.1',
63
- 'meta-llama/meta-llama-3.3',
64
- ];
55
+ models: async ({ client }) => {
56
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
65
57
 
66
- const visionKeywords = [
67
- 'opengvlab/internvl',
68
- 'qwen/qvq',
69
- 'qwen/qwen2-vl',
70
- 'teleai/telemm',
71
- 'deepseek-ai/deepseek-vl',
72
- ];
58
+ const functionCallKeywords = [
59
+ 'qwen/qwen2.5',
60
+ 'thudm/glm-4',
61
+ 'deepseek-ai/deepseek',
62
+ 'internlm/internlm2_5',
63
+ 'meta-llama/meta-llama-3.1',
64
+ 'meta-llama/meta-llama-3.3',
65
+ ];
73
66
 
74
- const reasoningKeywords = [
75
- 'deepseek-ai/deepseek-r1',
76
- 'qwen/qvq',
77
- 'qwen/qwq',
78
- ];
67
+ const visionKeywords = [
68
+ 'opengvlab/internvl',
69
+ 'qwen/qvq',
70
+ 'qwen/qwen2-vl',
71
+ 'teleai/telemm',
72
+ 'deepseek-ai/deepseek-vl',
73
+ ];
79
74
 
80
- const model = m as unknown as SiliconCloudModelCard;
75
+ const reasoningKeywords = [
76
+ 'deepseek-ai/deepseek-r1',
77
+ 'qwen/qvq',
78
+ 'qwen/qwq',
79
+ ];
81
80
 
82
- return {
83
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
84
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
85
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
86
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1'),
87
- id: model.id,
88
- reasoning: reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
89
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
90
- };
91
- },
81
+ const modelsPage = await client.models.list() as any;
82
+ const modelList: SiliconCloudModelCard[] = modelsPage.data;
83
+
84
+ return modelList
85
+ .map((model) => {
86
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
87
+
88
+ return {
89
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
90
+ displayName: knownModel?.displayName ?? undefined,
91
+ enabled: knownModel?.enabled || false,
92
+ functionCall:
93
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1')
94
+ || knownModel?.abilities?.functionCall
95
+ || false,
96
+ id: model.id,
97
+ reasoning:
98
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
99
+ || knownModel?.abilities?.reasoning
100
+ || false,
101
+ vision:
102
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
103
+ || knownModel?.abilities?.vision
104
+ || false,
105
+ };
106
+ })
107
+ .filter(Boolean) as ChatModelCard[];
92
108
  },
93
109
  provider: ModelProvider.SiliconCloud,
94
110
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface StepfunModelCard {
7
7
  id: string;
@@ -20,32 +20,48 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
20
20
  debug: {
21
21
  chatCompletion: () => process.env.DEBUG_STEPFUN_CHAT_COMPLETION === '1',
22
22
  },
23
- models: {
24
- transformModel: (m) => {
25
- // ref: https://platform.stepfun.com/docs/llm/modeloverview
26
- const functionCallKeywords = [
27
- 'step-1-',
28
- 'step-1o-',
29
- 'step-1v-',
30
- 'step-2-',
31
- ];
32
-
33
- const visionKeywords = [
34
- 'step-1o-',
35
- 'step-1v-',
36
- ];
37
-
38
- const model = m as unknown as StepfunModelCard;
23
+ models: async ({ client }) => {
24
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
39
25
 
40
- return {
41
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
42
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
43
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
44
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
45
- id: model.id,
46
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
47
- };
48
- },
26
+ // ref: https://platform.stepfun.com/docs/llm/modeloverview
27
+ const functionCallKeywords = [
28
+ 'step-1-',
29
+ 'step-1o-',
30
+ 'step-1v-',
31
+ 'step-2-',
32
+ ];
33
+
34
+ const visionKeywords = [
35
+ 'step-1o-',
36
+ 'step-1v-',
37
+ ];
38
+
39
+ const modelsPage = await client.models.list() as any;
40
+ const modelList: StepfunModelCard[] = modelsPage.data;
41
+
42
+ return modelList
43
+ .map((model) => {
44
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
45
+
46
+ return {
47
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
48
+ displayName: knownModel?.displayName ?? undefined,
49
+ enabled: knownModel?.enabled || false,
50
+ functionCall:
51
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
52
+ || knownModel?.abilities?.functionCall
53
+ || false,
54
+ id: model.id,
55
+ reasoning:
56
+ knownModel?.abilities?.reasoning
57
+ || false,
58
+ vision:
59
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
60
+ || knownModel?.abilities?.vision
61
+ || false,
62
+ };
63
+ })
64
+ .filter(Boolean) as ChatModelCard[];
49
65
  },
50
66
  provider: ModelProvider.Stepfun,
51
67
  });
@@ -1,10 +1,54 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface TencentCloudModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeTencentCloudAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.lkeap.cloud.tencent.com/v1',
6
12
  debug: {
7
13
  chatCompletion: () => process.env.DEBUG_TENCENT_CLOUD_CHAT_COMPLETION === '1',
8
14
  },
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
+
18
+ const functionCallKeywords = [
19
+ 'deepseek-v3',
20
+ ];
21
+
22
+ const reasoningKeywords = [
23
+ 'deepseek-r1',
24
+ ];
25
+
26
+ const modelsPage = await client.models.list() as any;
27
+ const modelList: TencentCloudModelCard[] = modelsPage.data;
28
+
29
+ return modelList
30
+ .map((model) => {
31
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
32
+
33
+ return {
34
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
35
+ displayName: knownModel?.displayName ?? undefined,
36
+ enabled: knownModel?.enabled || false,
37
+ functionCall:
38
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
39
+ || knownModel?.abilities?.functionCall
40
+ || false,
41
+ id: model.id,
42
+ reasoning:
43
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
44
+ || knownModel?.abilities?.reasoning
45
+ || false,
46
+ vision:
47
+ knownModel?.abilities?.vision
48
+ || false,
49
+ };
50
+ })
51
+ .filter(Boolean) as ChatModelCard[];
52
+ },
9
53
  provider: ModelProvider.TencentCloud,
10
54
  });
@@ -2,7 +2,6 @@ import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
  import { TogetherAIModel } from './type';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
5
  import type { ChatModelCard } from '@/types/llm';
7
6
 
8
7
  export const LobeTogetherAI = LobeOpenAICompatibleFactory({
@@ -17,6 +16,8 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
17
16
  chatCompletion: () => process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION === '1',
18
17
  },
19
18
  models: async ({ client }) => {
19
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
20
+
20
21
  const visionKeywords = [
21
22
  'qvq',
22
23
  'vision',
@@ -34,17 +35,29 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
34
35
 
35
36
  return modelList
36
37
  .map((model) => {
38
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
39
+
37
40
  return {
38
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name === m.id)?.contextWindowTokens ?? undefined,
41
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
39
42
  description: model.description,
40
43
  displayName: model.display_name,
41
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name === m.id)?.enabled || false,
42
- functionCall: model.description?.toLowerCase().includes('function calling'),
44
+ enabled: knownModel?.enabled || false,
45
+ functionCall:
46
+ model.description?.toLowerCase().includes('function calling')
47
+ || knownModel?.abilities?.functionCall
48
+ || false,
43
49
  id: model.name,
44
50
  maxOutput: model.context_length,
45
- reasoning: reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword)),
51
+ reasoning:
52
+ reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword))
53
+ || knownModel?.abilities?.functionCall
54
+ || false,
46
55
  tokens: model.context_length,
47
- vision: model.description?.toLowerCase().includes('vision') || visionKeywords.some(keyword => model.name?.toLowerCase().includes(keyword)),
56
+ vision:
57
+ model.description?.toLowerCase().includes('vision')
58
+ || visionKeywords.some(keyword => model.name?.toLowerCase().includes(keyword))
59
+ || knownModel?.abilities?.functionCall
60
+ || false,
48
61
  };
49
62
  })
50
63
  .filter(Boolean) as ChatModelCard[];
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface XAIModelCard {
7
7
  id: string;
@@ -12,19 +12,34 @@ export const LobeXAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const model = m as unknown as XAIModelCard;
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
17
 
19
- return {
20
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
21
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
22
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
23
- functionCall: true,
24
- id: model.id,
25
- vision: model.id.toLowerCase().includes('vision'),
26
- };
27
- },
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: XAIModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ knownModel?.abilities?.functionCall
31
+ || false,
32
+ id: model.id,
33
+ reasoning:
34
+ knownModel?.abilities?.reasoning
35
+ || false,
36
+ vision:
37
+ model.id.toLowerCase().includes('vision')
38
+ || knownModel?.abilities?.functionCall
39
+ || false,
40
+ };
41
+ })
42
+ .filter(Boolean) as ChatModelCard[];
28
43
  },
29
44
  provider: ModelProvider.XAI,
30
45
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface ZeroOneModelCard {
7
7
  id: string;
@@ -12,19 +12,35 @@ export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const model = m as unknown as ZeroOneModelCard;
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
17
 
19
- return {
20
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
21
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
22
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
23
- functionCall: model.id.toLowerCase().includes('fc'),
24
- id: model.id,
25
- vision: model.id.toLowerCase().includes('vision'),
26
- };
27
- },
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: ZeroOneModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ model.id.toLowerCase().includes('fc')
31
+ || knownModel?.abilities?.functionCall
32
+ || false,
33
+ id: model.id,
34
+ reasoning:
35
+ knownModel?.abilities?.reasoning
36
+ || false,
37
+ vision:
38
+ model.id.toLowerCase().includes('vision')
39
+ || knownModel?.abilities?.vision
40
+ || false,
41
+ };
42
+ })
43
+ .filter(Boolean) as ChatModelCard[];
28
44
  },
29
45
  provider: ModelProvider.ZeroOne,
30
46
  });
@@ -5,21 +5,12 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
  import { ChatStreamCallbacks, LobeOpenAI, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
6
6
  import * as debugStreamModule from '@/libs/agent-runtime/utils/debugStream';
7
7
 
8
- import * as authTokenModule from './authToken';
9
8
  import { LobeZhipuAI } from './index';
10
9
 
11
10
  const bizErrorType = 'ProviderBizError';
12
11
  const invalidErrorType = 'InvalidProviderAPIKey';
13
12
 
14
- // Mock相关依赖
15
- vi.mock('./authToken');
16
-
17
13
  describe('LobeZhipuAI', () => {
18
- beforeEach(() => {
19
- // Mock generateApiToken
20
- vi.spyOn(authTokenModule, 'generateApiToken').mockResolvedValue('mocked_token');
21
- });
22
-
23
14
  afterEach(() => {
24
15
  vi.restoreAllMocks();
25
16
  });
@@ -3,7 +3,6 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
7
6
  import type { ChatModelCard } from '@/types/llm';
8
7
 
9
8
  export interface ZhipuModelCard {
@@ -49,6 +48,8 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
49
48
  chatCompletion: () => process.env.DEBUG_ZHIPU_CHAT_COMPLETION === '1',
50
49
  },
51
50
  models: async ({ client }) => {
51
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
52
+
52
53
  // ref: https://open.bigmodel.cn/console/modelcenter/square
53
54
  client.baseURL = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
54
55
 
@@ -57,15 +58,26 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
57
58
 
58
59
  return modelList
59
60
  .map((model) => {
61
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode.toLowerCase() === m.id.toLowerCase());
62
+
60
63
  return {
61
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode === m.id)?.contextWindowTokens ?? undefined,
64
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
62
65
  description: model.description,
63
66
  displayName: model.modelName,
64
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode === m.id)?.enabled || false,
65
- functionCall: model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v'),
67
+ enabled: knownModel?.enabled || false,
68
+ functionCall:
69
+ model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v')
70
+ || knownModel?.abilities?.functionCall
71
+ || false,
66
72
  id: model.modelCode,
67
- reasoning: model.modelCode.toLowerCase().includes('glm-zero-preview'),
68
- vision: model.modelCode.toLowerCase().includes('glm-4v'),
73
+ reasoning:
74
+ model.modelCode.toLowerCase().includes('glm-zero-preview')
75
+ || knownModel?.abilities?.reasoning
76
+ || false,
77
+ vision:
78
+ model.modelCode.toLowerCase().includes('glm-4v')
79
+ || knownModel?.abilities?.vision
80
+ || false,
69
81
  };
70
82
  })
71
83
  .filter(Boolean) as ChatModelCard[];
@@ -1,18 +0,0 @@
1
- // @vitest-environment node
2
- import { generateApiToken } from './authToken';
3
-
4
- describe('generateApiToken', () => {
5
- it('should throw an error if no apiKey is provided', async () => {
6
- await expect(generateApiToken()).rejects.toThrow('Invalid apiKey');
7
- });
8
-
9
- it('should throw an error if apiKey is invalid', async () => {
10
- await expect(generateApiToken('invalid')).rejects.toThrow('Invalid apiKey');
11
- });
12
-
13
- it('should return a token if a valid apiKey is provided', async () => {
14
- const apiKey = 'id.secret';
15
- const token = await generateApiToken(apiKey);
16
- expect(token).toBeDefined();
17
- });
18
- });
@@ -1,22 +0,0 @@
1
- import { SignJWT } from 'jose';
2
-
3
- export const generateApiToken = async (apiKey?: string): Promise<string> => {
4
- if (!apiKey) {
5
- throw new Error('Invalid apiKey');
6
- }
7
-
8
- const [id, secret] = apiKey.split('.');
9
- if (!id || !secret) {
10
- throw new Error('Invalid apiKey');
11
- }
12
-
13
- const expSeconds = 60 * 60 * 24 * 30;
14
- const nowSeconds = Math.floor(Date.now() / 1000);
15
- const exp = nowSeconds + expSeconds;
16
- const jwtConstructor = new SignJWT({ api_key: id })
17
- .setProtectedHeader({ alg: 'HS256', sign_type: 'SIGN', typ: 'JWT' })
18
- .setExpirationTime(exp)
19
- .setIssuedAt(nowSeconds);
20
-
21
- return jwtConstructor.sign(new TextEncoder().encode(secret));
22
- };