@lobehub/chat 1.53.11 → 1.54.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/Dockerfile +2 -0
  3. package/Dockerfile.database +2 -0
  4. package/changelog/v1.json +21 -0
  5. package/locales/ar/modelProvider.json +0 -1
  6. package/locales/ar/setting.json +12 -9
  7. package/locales/bg-BG/modelProvider.json +0 -1
  8. package/locales/bg-BG/setting.json +12 -9
  9. package/locales/de-DE/modelProvider.json +0 -1
  10. package/locales/de-DE/setting.json +13 -10
  11. package/locales/en-US/modelProvider.json +0 -1
  12. package/locales/en-US/setting.json +12 -9
  13. package/locales/es-ES/modelProvider.json +0 -1
  14. package/locales/es-ES/setting.json +12 -9
  15. package/locales/fa-IR/modelProvider.json +0 -1
  16. package/locales/fa-IR/setting.json +12 -9
  17. package/locales/fr-FR/modelProvider.json +0 -1
  18. package/locales/fr-FR/setting.json +12 -9
  19. package/locales/it-IT/modelProvider.json +0 -1
  20. package/locales/it-IT/setting.json +13 -10
  21. package/locales/ja-JP/modelProvider.json +0 -1
  22. package/locales/ja-JP/setting.json +12 -9
  23. package/locales/ko-KR/modelProvider.json +0 -1
  24. package/locales/ko-KR/setting.json +12 -9
  25. package/locales/nl-NL/modelProvider.json +0 -1
  26. package/locales/nl-NL/setting.json +12 -9
  27. package/locales/pl-PL/modelProvider.json +0 -1
  28. package/locales/pl-PL/setting.json +12 -9
  29. package/locales/pt-BR/modelProvider.json +0 -1
  30. package/locales/pt-BR/setting.json +13 -10
  31. package/locales/ru-RU/modelProvider.json +0 -1
  32. package/locales/ru-RU/setting.json +12 -9
  33. package/locales/tr-TR/modelProvider.json +0 -1
  34. package/locales/tr-TR/setting.json +12 -9
  35. package/locales/vi-VN/modelProvider.json +0 -1
  36. package/locales/vi-VN/setting.json +12 -9
  37. package/locales/zh-CN/modelProvider.json +0 -1
  38. package/locales/zh-CN/setting.json +13 -10
  39. package/locales/zh-TW/modelProvider.json +0 -1
  40. package/locales/zh-TW/setting.json +12 -9
  41. package/package.json +1 -1
  42. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -1
  43. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  44. package/src/components/InfoTooltip/index.tsx +25 -0
  45. package/src/components/Loading/UpdateLoading/index.tsx +19 -0
  46. package/src/config/aiModels/index.ts +3 -0
  47. package/src/config/aiModels/nvidia.ts +155 -0
  48. package/src/config/aiModels/spark.ts +9 -0
  49. package/src/config/llm.ts +6 -0
  50. package/src/config/modelProviders/index.ts +4 -0
  51. package/src/config/modelProviders/nvidia.ts +21 -0
  52. package/src/features/ChatInput/ActionBar/Params/ParamsControls.tsx +95 -0
  53. package/src/features/ChatInput/ActionBar/Params/index.tsx +47 -0
  54. package/src/features/ChatInput/ActionBar/config.ts +3 -2
  55. package/src/features/ChatInput/Mobile/index.tsx +1 -1
  56. package/src/features/ModelParamsControl/FrequencyPenalty.tsx +37 -0
  57. package/src/features/ModelParamsControl/PresencePenalty.tsx +35 -0
  58. package/src/features/ModelParamsControl/Temperature.tsx +71 -0
  59. package/src/features/ModelParamsControl/TopP.tsx +39 -0
  60. package/src/features/ModelParamsControl/index.ts +4 -0
  61. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  62. package/src/libs/agent-runtime/ai360/index.ts +37 -21
  63. package/src/libs/agent-runtime/anthropic/index.ts +17 -5
  64. package/src/libs/agent-runtime/baichuan/index.ts +11 -2
  65. package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
  66. package/src/libs/agent-runtime/deepseek/index.ts +29 -13
  67. package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
  68. package/src/libs/agent-runtime/giteeai/index.ts +46 -30
  69. package/src/libs/agent-runtime/github/index.test.ts +0 -49
  70. package/src/libs/agent-runtime/github/index.ts +18 -6
  71. package/src/libs/agent-runtime/google/index.ts +17 -7
  72. package/src/libs/agent-runtime/groq/index.ts +43 -27
  73. package/src/libs/agent-runtime/higress/index.ts +45 -25
  74. package/src/libs/agent-runtime/huggingface/index.ts +20 -9
  75. package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
  76. package/src/libs/agent-runtime/internlm/index.ts +27 -12
  77. package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
  78. package/src/libs/agent-runtime/mistral/index.ts +24 -14
  79. package/src/libs/agent-runtime/moonshot/index.ts +28 -13
  80. package/src/libs/agent-runtime/novita/index.ts +35 -18
  81. package/src/libs/agent-runtime/nvidia/index.ts +44 -0
  82. package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
  83. package/src/libs/agent-runtime/ollama/index.ts +33 -5
  84. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
  85. package/src/libs/agent-runtime/openai/index.ts +43 -27
  86. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
  87. package/src/libs/agent-runtime/openrouter/index.ts +51 -33
  88. package/src/libs/agent-runtime/qwen/index.ts +45 -29
  89. package/src/libs/agent-runtime/sensenova/index.ts +24 -6
  90. package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
  91. package/src/libs/agent-runtime/stepfun/index.ts +42 -26
  92. package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
  93. package/src/libs/agent-runtime/togetherai/index.ts +19 -6
  94. package/src/libs/agent-runtime/types/type.ts +1 -0
  95. package/src/libs/agent-runtime/xai/index.ts +28 -13
  96. package/src/libs/agent-runtime/zeroone/index.ts +29 -13
  97. package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
  98. package/src/libs/agent-runtime/zhipu/index.ts +18 -6
  99. package/src/locales/default/setting.ts +12 -9
  100. package/src/types/user/settings/keyVaults.ts +1 -0
  101. package/src/features/ChatInput/ActionBar/Temperature.tsx +0 -49
  102. package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
  103. package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -1,7 +1,6 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
4
  import type { ChatModelCard } from '@/types/llm';
6
5
 
7
6
  export interface SenseNovaModelCard {
@@ -33,10 +32,17 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
33
32
  chatCompletion: () => process.env.DEBUG_SENSENOVA_CHAT_COMPLETION === '1',
34
33
  },
35
34
  models: async ({ client }) => {
35
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
36
+
36
37
  const functionCallKeywords = [
38
+ 'deepseek-v3',
37
39
  'sensechat-5',
38
40
  ];
39
41
 
42
+ const reasoningKeywords = [
43
+ 'deepseek-r1'
44
+ ];
45
+
40
46
  client.baseURL = 'https://api.sensenova.cn/v1/llm';
41
47
 
42
48
  const modelsPage = await client.models.list() as any;
@@ -44,13 +50,25 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
44
50
 
45
51
  return modelList
46
52
  .map((model) => {
53
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
54
+
47
55
  return {
48
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
49
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
50
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
51
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
56
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
57
+ displayName: knownModel?.displayName ?? undefined,
58
+ enabled: knownModel?.enabled || false,
59
+ functionCall:
60
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
61
+ || knownModel?.abilities?.functionCall
62
+ || false,
52
63
  id: model.id,
53
- vision: model.id.toLowerCase().includes('vision'),
64
+ reasoning:
65
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
66
+ || knownModel?.abilities?.reasoning
67
+ || false,
68
+ vision:
69
+ model.id.toLowerCase().includes('vision')
70
+ || knownModel?.abilities?.vision
71
+ || false,
54
72
  };
55
73
  })
56
74
  .filter(Boolean) as ChatModelCard[];
@@ -2,7 +2,7 @@ import { AgentRuntimeErrorType } from '../error';
2
2
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
3
3
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  export interface SiliconCloudModelCard {
8
8
  id: string;
@@ -52,43 +52,59 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
52
52
  bizError: AgentRuntimeErrorType.ProviderBizError,
53
53
  invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
54
54
  },
55
- models: {
56
- transformModel: (m) => {
57
- const functionCallKeywords = [
58
- 'qwen/qwen2.5',
59
- 'thudm/glm-4',
60
- 'deepseek-ai/deepseek',
61
- 'internlm/internlm2_5',
62
- 'meta-llama/meta-llama-3.1',
63
- 'meta-llama/meta-llama-3.3',
64
- ];
55
+ models: async ({ client }) => {
56
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
65
57
 
66
- const visionKeywords = [
67
- 'opengvlab/internvl',
68
- 'qwen/qvq',
69
- 'qwen/qwen2-vl',
70
- 'teleai/telemm',
71
- 'deepseek-ai/deepseek-vl',
72
- ];
58
+ const functionCallKeywords = [
59
+ 'qwen/qwen2.5',
60
+ 'thudm/glm-4',
61
+ 'deepseek-ai/deepseek',
62
+ 'internlm/internlm2_5',
63
+ 'meta-llama/meta-llama-3.1',
64
+ 'meta-llama/meta-llama-3.3',
65
+ ];
73
66
 
74
- const reasoningKeywords = [
75
- 'deepseek-ai/deepseek-r1',
76
- 'qwen/qvq',
77
- 'qwen/qwq',
78
- ];
67
+ const visionKeywords = [
68
+ 'opengvlab/internvl',
69
+ 'qwen/qvq',
70
+ 'qwen/qwen2-vl',
71
+ 'teleai/telemm',
72
+ 'deepseek-ai/deepseek-vl',
73
+ ];
79
74
 
80
- const model = m as unknown as SiliconCloudModelCard;
75
+ const reasoningKeywords = [
76
+ 'deepseek-ai/deepseek-r1',
77
+ 'qwen/qvq',
78
+ 'qwen/qwq',
79
+ ];
81
80
 
82
- return {
83
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
84
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
85
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
86
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1'),
87
- id: model.id,
88
- reasoning: reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
89
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
90
- };
91
- },
81
+ const modelsPage = await client.models.list() as any;
82
+ const modelList: SiliconCloudModelCard[] = modelsPage.data;
83
+
84
+ return modelList
85
+ .map((model) => {
86
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
87
+
88
+ return {
89
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
90
+ displayName: knownModel?.displayName ?? undefined,
91
+ enabled: knownModel?.enabled || false,
92
+ functionCall:
93
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1')
94
+ || knownModel?.abilities?.functionCall
95
+ || false,
96
+ id: model.id,
97
+ reasoning:
98
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
99
+ || knownModel?.abilities?.reasoning
100
+ || false,
101
+ vision:
102
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
103
+ || knownModel?.abilities?.vision
104
+ || false,
105
+ };
106
+ })
107
+ .filter(Boolean) as ChatModelCard[];
92
108
  },
93
109
  provider: ModelProvider.SiliconCloud,
94
110
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface StepfunModelCard {
7
7
  id: string;
@@ -20,32 +20,48 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
20
20
  debug: {
21
21
  chatCompletion: () => process.env.DEBUG_STEPFUN_CHAT_COMPLETION === '1',
22
22
  },
23
- models: {
24
- transformModel: (m) => {
25
- // ref: https://platform.stepfun.com/docs/llm/modeloverview
26
- const functionCallKeywords = [
27
- 'step-1-',
28
- 'step-1o-',
29
- 'step-1v-',
30
- 'step-2-',
31
- ];
32
-
33
- const visionKeywords = [
34
- 'step-1o-',
35
- 'step-1v-',
36
- ];
37
-
38
- const model = m as unknown as StepfunModelCard;
23
+ models: async ({ client }) => {
24
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
39
25
 
40
- return {
41
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
42
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
43
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
44
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
45
- id: model.id,
46
- vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
47
- };
48
- },
26
+ // ref: https://platform.stepfun.com/docs/llm/modeloverview
27
+ const functionCallKeywords = [
28
+ 'step-1-',
29
+ 'step-1o-',
30
+ 'step-1v-',
31
+ 'step-2-',
32
+ ];
33
+
34
+ const visionKeywords = [
35
+ 'step-1o-',
36
+ 'step-1v-',
37
+ ];
38
+
39
+ const modelsPage = await client.models.list() as any;
40
+ const modelList: StepfunModelCard[] = modelsPage.data;
41
+
42
+ return modelList
43
+ .map((model) => {
44
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
45
+
46
+ return {
47
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
48
+ displayName: knownModel?.displayName ?? undefined,
49
+ enabled: knownModel?.enabled || false,
50
+ functionCall:
51
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
52
+ || knownModel?.abilities?.functionCall
53
+ || false,
54
+ id: model.id,
55
+ reasoning:
56
+ knownModel?.abilities?.reasoning
57
+ || false,
58
+ vision:
59
+ visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
60
+ || knownModel?.abilities?.vision
61
+ || false,
62
+ };
63
+ })
64
+ .filter(Boolean) as ChatModelCard[];
49
65
  },
50
66
  provider: ModelProvider.Stepfun,
51
67
  });
@@ -1,10 +1,54 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface TencentCloudModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeTencentCloudAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.lkeap.cloud.tencent.com/v1',
6
12
  debug: {
7
13
  chatCompletion: () => process.env.DEBUG_TENCENT_CLOUD_CHAT_COMPLETION === '1',
8
14
  },
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
+
18
+ const functionCallKeywords = [
19
+ 'deepseek-v3',
20
+ ];
21
+
22
+ const reasoningKeywords = [
23
+ 'deepseek-r1',
24
+ ];
25
+
26
+ const modelsPage = await client.models.list() as any;
27
+ const modelList: TencentCloudModelCard[] = modelsPage.data;
28
+
29
+ return modelList
30
+ .map((model) => {
31
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
32
+
33
+ return {
34
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
35
+ displayName: knownModel?.displayName ?? undefined,
36
+ enabled: knownModel?.enabled || false,
37
+ functionCall:
38
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
39
+ || knownModel?.abilities?.functionCall
40
+ || false,
41
+ id: model.id,
42
+ reasoning:
43
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
44
+ || knownModel?.abilities?.reasoning
45
+ || false,
46
+ vision:
47
+ knownModel?.abilities?.vision
48
+ || false,
49
+ };
50
+ })
51
+ .filter(Boolean) as ChatModelCard[];
52
+ },
9
53
  provider: ModelProvider.TencentCloud,
10
54
  });
@@ -2,7 +2,6 @@ import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
  import { TogetherAIModel } from './type';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
5
  import type { ChatModelCard } from '@/types/llm';
7
6
 
8
7
  export const LobeTogetherAI = LobeOpenAICompatibleFactory({
@@ -17,6 +16,8 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
17
16
  chatCompletion: () => process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION === '1',
18
17
  },
19
18
  models: async ({ client }) => {
19
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
20
+
20
21
  const visionKeywords = [
21
22
  'qvq',
22
23
  'vision',
@@ -34,17 +35,29 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
34
35
 
35
36
  return modelList
36
37
  .map((model) => {
38
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
39
+
37
40
  return {
38
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name === m.id)?.contextWindowTokens ?? undefined,
41
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
39
42
  description: model.description,
40
43
  displayName: model.display_name,
41
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name === m.id)?.enabled || false,
42
- functionCall: model.description?.toLowerCase().includes('function calling'),
44
+ enabled: knownModel?.enabled || false,
45
+ functionCall:
46
+ model.description?.toLowerCase().includes('function calling')
47
+ || knownModel?.abilities?.functionCall
48
+ || false,
43
49
  id: model.name,
44
50
  maxOutput: model.context_length,
45
- reasoning: reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword)),
51
+ reasoning:
52
+ reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword))
53
+ || knownModel?.abilities?.functionCall
54
+ || false,
46
55
  tokens: model.context_length,
47
- vision: model.description?.toLowerCase().includes('vision') || visionKeywords.some(keyword => model.name?.toLowerCase().includes(keyword)),
56
+ vision:
57
+ model.description?.toLowerCase().includes('vision')
58
+ || visionKeywords.some(keyword => model.name?.toLowerCase().includes(keyword))
59
+ || knownModel?.abilities?.functionCall
60
+ || false,
48
61
  };
49
62
  })
50
63
  .filter(Boolean) as ChatModelCard[];
@@ -45,6 +45,7 @@ export enum ModelProvider {
45
45
  Mistral = 'mistral',
46
46
  Moonshot = 'moonshot',
47
47
  Novita = 'novita',
48
+ Nvidia = 'nvidia',
48
49
  Ollama = 'ollama',
49
50
  OpenAI = 'openai',
50
51
  OpenRouter = 'openrouter',
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface XAIModelCard {
7
7
  id: string;
@@ -12,19 +12,34 @@ export const LobeXAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const model = m as unknown as XAIModelCard;
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
17
 
19
- return {
20
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
21
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
22
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
23
- functionCall: true,
24
- id: model.id,
25
- vision: model.id.toLowerCase().includes('vision'),
26
- };
27
- },
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: XAIModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ knownModel?.abilities?.functionCall
31
+ || false,
32
+ id: model.id,
33
+ reasoning:
34
+ knownModel?.abilities?.reasoning
35
+ || false,
36
+ vision:
37
+ model.id.toLowerCase().includes('vision')
38
+ || knownModel?.abilities?.functionCall
39
+ || false,
40
+ };
41
+ })
42
+ .filter(Boolean) as ChatModelCard[];
28
43
  },
29
44
  provider: ModelProvider.XAI,
30
45
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface ZeroOneModelCard {
7
7
  id: string;
@@ -12,19 +12,35 @@ export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const model = m as unknown as ZeroOneModelCard;
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
17
 
19
- return {
20
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
21
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
22
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
23
- functionCall: model.id.toLowerCase().includes('fc'),
24
- id: model.id,
25
- vision: model.id.toLowerCase().includes('vision'),
26
- };
27
- },
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: ZeroOneModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ model.id.toLowerCase().includes('fc')
31
+ || knownModel?.abilities?.functionCall
32
+ || false,
33
+ id: model.id,
34
+ reasoning:
35
+ knownModel?.abilities?.reasoning
36
+ || false,
37
+ vision:
38
+ model.id.toLowerCase().includes('vision')
39
+ || knownModel?.abilities?.vision
40
+ || false,
41
+ };
42
+ })
43
+ .filter(Boolean) as ChatModelCard[];
28
44
  },
29
45
  provider: ModelProvider.ZeroOne,
30
46
  });
@@ -5,21 +5,12 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
  import { ChatStreamCallbacks, LobeOpenAI, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
6
6
  import * as debugStreamModule from '@/libs/agent-runtime/utils/debugStream';
7
7
 
8
- import * as authTokenModule from './authToken';
9
8
  import { LobeZhipuAI } from './index';
10
9
 
11
10
  const bizErrorType = 'ProviderBizError';
12
11
  const invalidErrorType = 'InvalidProviderAPIKey';
13
12
 
14
- // Mock相关依赖
15
- vi.mock('./authToken');
16
-
17
13
  describe('LobeZhipuAI', () => {
18
- beforeEach(() => {
19
- // Mock generateApiToken
20
- vi.spyOn(authTokenModule, 'generateApiToken').mockResolvedValue('mocked_token');
21
- });
22
-
23
14
  afterEach(() => {
24
15
  vi.restoreAllMocks();
25
16
  });
@@ -3,7 +3,6 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
7
6
  import type { ChatModelCard } from '@/types/llm';
8
7
 
9
8
  export interface ZhipuModelCard {
@@ -49,6 +48,8 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
49
48
  chatCompletion: () => process.env.DEBUG_ZHIPU_CHAT_COMPLETION === '1',
50
49
  },
51
50
  models: async ({ client }) => {
51
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
52
+
52
53
  // ref: https://open.bigmodel.cn/console/modelcenter/square
53
54
  client.baseURL = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
54
55
 
@@ -57,15 +58,26 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
57
58
 
58
59
  return modelList
59
60
  .map((model) => {
61
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode.toLowerCase() === m.id.toLowerCase());
62
+
60
63
  return {
61
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode === m.id)?.contextWindowTokens ?? undefined,
64
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
62
65
  description: model.description,
63
66
  displayName: model.modelName,
64
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode === m.id)?.enabled || false,
65
- functionCall: model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v'),
67
+ enabled: knownModel?.enabled || false,
68
+ functionCall:
69
+ model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v')
70
+ || knownModel?.abilities?.functionCall
71
+ || false,
66
72
  id: model.modelCode,
67
- reasoning: model.modelCode.toLowerCase().includes('glm-zero-preview'),
68
- vision: model.modelCode.toLowerCase().includes('glm-4v'),
73
+ reasoning:
74
+ model.modelCode.toLowerCase().includes('glm-zero-preview')
75
+ || knownModel?.abilities?.reasoning
76
+ || false,
77
+ vision:
78
+ model.modelCode.toLowerCase().includes('glm-4v')
79
+ || knownModel?.abilities?.vision
80
+ || false,
69
81
  };
70
82
  })
71
83
  .filter(Boolean) as ChatModelCard[];
@@ -206,8 +206,8 @@ export default {
206
206
  title: '开启推理强度调整',
207
207
  },
208
208
  frequencyPenalty: {
209
- desc: '值越大,越有可能降低重复字词',
210
- title: '频率惩罚度',
209
+ desc: '值越大,用词越丰富多样;值越低,用词更朴实简单',
210
+ title: '词汇丰富度',
211
211
  },
212
212
  maxTokens: {
213
213
  desc: '单次交互所用的最大 Token 数',
@@ -217,9 +217,12 @@ export default {
217
217
  desc: '{{provider}} 模型',
218
218
  title: '模型',
219
219
  },
220
+ params: {
221
+ title: '高级参数',
222
+ },
220
223
  presencePenalty: {
221
- desc: '值越大,越有可能扩展到新话题',
222
- title: '话题新鲜度',
224
+ desc: '值越大,越倾向不同的表达方式,避免概念重复;值越小,越倾向使用重复的概念或叙述,表达更具一致性',
225
+ title: '表述发散度',
223
226
  },
224
227
  reasoningEffort: {
225
228
  desc: '值越大,推理能力越强,但可能会增加响应时间和 Token 消耗',
@@ -231,14 +234,14 @@ export default {
231
234
  title: '推理强度',
232
235
  },
233
236
  temperature: {
234
- desc: '值越大,回复越随机',
235
- title: '随机性',
236
- titleWithValue: '随机性 {{value}}',
237
+ desc: '数值越大,回答越有创意和想象力;数值越小,回答越严谨',
238
+ title: '创意活跃度',
239
+ warning: '创意活跃度数值过大,输出可能会产生乱码',
237
240
  },
238
241
  title: '模型设置',
239
242
  topP: {
240
- desc: '与随机性类似,但不要和随机性一起更改',
241
- title: '核采样',
243
+ desc: '考虑多少种可能性,值越大,接受更多可能的回答;值越小,倾向选择最可能的回答。不推荐和创意活跃度一起更改',
244
+ title: '思维开放度',
242
245
  },
243
246
  },
244
247
  settingPlugin: {
@@ -50,6 +50,7 @@ export interface UserKeyVaults {
50
50
  mistral?: OpenAICompatibleKeyVault;
51
51
  moonshot?: OpenAICompatibleKeyVault;
52
52
  novita?: OpenAICompatibleKeyVault;
53
+ nvidia?: OpenAICompatibleKeyVault;
53
54
  ollama?: OpenAICompatibleKeyVault;
54
55
  openai?: OpenAICompatibleKeyVault;
55
56
  openrouter?: OpenAICompatibleKeyVault;
@@ -1,49 +0,0 @@
1
- import { ActionIcon, SliderWithInput } from '@lobehub/ui';
2
- import { Popover } from 'antd';
3
- import { Thermometer } from 'lucide-react';
4
- import { memo, useState } from 'react';
5
- import { useTranslation } from 'react-i18next';
6
-
7
- import { useAgentStore } from '@/store/agent';
8
- import { agentSelectors } from '@/store/agent/selectors';
9
-
10
- const Temperature = memo(() => {
11
- const { t } = useTranslation('setting');
12
- const [popoverOpen, setPopoverOpen] = useState(false);
13
-
14
- const [temperature, updateAgentConfig] = useAgentStore((s) => {
15
- const config = agentSelectors.currentAgentConfig(s);
16
- return [config.params?.temperature, s.updateAgentConfig];
17
- });
18
-
19
- const title = t('settingModel.temperature.titleWithValue', { value: temperature });
20
-
21
- return (
22
- <Popover
23
- arrow={false}
24
- content={
25
- <SliderWithInput
26
- controls={false}
27
- max={2}
28
- min={0}
29
- onChange={(v) => {
30
- updateAgentConfig({ params: { temperature: v } });
31
- }}
32
- size={'small'}
33
- step={0.1}
34
- style={{ width: 160 }}
35
- value={temperature}
36
- />
37
- }
38
- onOpenChange={setPopoverOpen}
39
- open={popoverOpen}
40
- placement={'top'}
41
- title={t('settingModel.temperature.title')}
42
- trigger={'click'}
43
- >
44
- <ActionIcon icon={Thermometer} placement={'bottom'} title={popoverOpen ? undefined : title} />
45
- </Popover>
46
- );
47
- });
48
-
49
- export default Temperature;