@lobehub/chat 1.20.8 → 1.21.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/CHANGELOG.md +42 -0
  2. package/Dockerfile +5 -2
  3. package/Dockerfile.database +5 -2
  4. package/locales/ar/error.json +1 -0
  5. package/locales/ar/modelProvider.json +20 -0
  6. package/locales/ar/models.json +73 -4
  7. package/locales/ar/providers.json +6 -0
  8. package/locales/bg-BG/error.json +1 -0
  9. package/locales/bg-BG/modelProvider.json +20 -0
  10. package/locales/bg-BG/models.json +73 -4
  11. package/locales/bg-BG/providers.json +6 -0
  12. package/locales/de-DE/error.json +1 -0
  13. package/locales/de-DE/modelProvider.json +20 -0
  14. package/locales/de-DE/models.json +73 -4
  15. package/locales/de-DE/providers.json +6 -0
  16. package/locales/en-US/error.json +1 -0
  17. package/locales/en-US/modelProvider.json +20 -0
  18. package/locales/en-US/models.json +73 -4
  19. package/locales/en-US/providers.json +6 -0
  20. package/locales/es-ES/error.json +1 -0
  21. package/locales/es-ES/modelProvider.json +20 -0
  22. package/locales/es-ES/models.json +73 -4
  23. package/locales/es-ES/providers.json +6 -0
  24. package/locales/fr-FR/error.json +1 -0
  25. package/locales/fr-FR/modelProvider.json +20 -0
  26. package/locales/fr-FR/models.json +73 -4
  27. package/locales/fr-FR/providers.json +6 -0
  28. package/locales/it-IT/error.json +1 -0
  29. package/locales/it-IT/modelProvider.json +20 -0
  30. package/locales/it-IT/models.json +73 -4
  31. package/locales/it-IT/providers.json +6 -0
  32. package/locales/ja-JP/error.json +1 -0
  33. package/locales/ja-JP/modelProvider.json +20 -0
  34. package/locales/ja-JP/models.json +73 -4
  35. package/locales/ja-JP/providers.json +6 -0
  36. package/locales/ko-KR/error.json +1 -0
  37. package/locales/ko-KR/modelProvider.json +20 -0
  38. package/locales/ko-KR/models.json +73 -4
  39. package/locales/ko-KR/providers.json +6 -0
  40. package/locales/nl-NL/error.json +1 -0
  41. package/locales/nl-NL/modelProvider.json +20 -0
  42. package/locales/nl-NL/models.json +73 -4
  43. package/locales/nl-NL/providers.json +6 -0
  44. package/locales/pl-PL/error.json +1 -0
  45. package/locales/pl-PL/modelProvider.json +20 -0
  46. package/locales/pl-PL/models.json +73 -4
  47. package/locales/pl-PL/providers.json +6 -0
  48. package/locales/pt-BR/error.json +1 -0
  49. package/locales/pt-BR/modelProvider.json +20 -0
  50. package/locales/pt-BR/models.json +73 -4
  51. package/locales/pt-BR/providers.json +6 -0
  52. package/locales/ru-RU/error.json +1 -0
  53. package/locales/ru-RU/modelProvider.json +20 -0
  54. package/locales/ru-RU/models.json +73 -4
  55. package/locales/ru-RU/providers.json +6 -0
  56. package/locales/tr-TR/error.json +1 -0
  57. package/locales/tr-TR/modelProvider.json +20 -0
  58. package/locales/tr-TR/models.json +73 -4
  59. package/locales/tr-TR/providers.json +6 -0
  60. package/locales/vi-VN/error.json +1 -0
  61. package/locales/vi-VN/modelProvider.json +20 -0
  62. package/locales/vi-VN/models.json +73 -4
  63. package/locales/vi-VN/providers.json +6 -0
  64. package/locales/zh-CN/error.json +1 -0
  65. package/locales/zh-CN/modelProvider.json +20 -0
  66. package/locales/zh-CN/models.json +76 -7
  67. package/locales/zh-CN/providers.json +6 -0
  68. package/locales/zh-TW/error.json +1 -0
  69. package/locales/zh-TW/modelProvider.json +20 -0
  70. package/locales/zh-TW/models.json +73 -4
  71. package/locales/zh-TW/providers.json +6 -0
  72. package/package.json +3 -2
  73. package/scripts/serverLauncher/startServer.js +10 -81
  74. package/src/app/(main)/settings/llm/ProviderList/Wenxin/index.tsx +46 -0
  75. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +4 -1
  76. package/src/app/api/chat/agentRuntime.test.ts +21 -0
  77. package/src/app/api/chat/wenxin/route.test.ts +27 -0
  78. package/src/app/api/chat/wenxin/route.ts +30 -0
  79. package/src/app/api/errorResponse.ts +4 -0
  80. package/src/config/llm.ts +8 -0
  81. package/src/config/modelProviders/index.ts +4 -0
  82. package/src/config/modelProviders/wenxin.ts +159 -0
  83. package/src/const/auth.ts +4 -0
  84. package/src/const/settings/llm.ts +5 -0
  85. package/src/features/Conversation/Error/APIKeyForm/Wenxin.tsx +49 -0
  86. package/src/features/Conversation/Error/APIKeyForm/index.tsx +3 -0
  87. package/src/features/Conversation/Error/index.tsx +1 -0
  88. package/src/libs/agent-runtime/AgentRuntime.test.ts +1 -0
  89. package/src/libs/agent-runtime/error.ts +1 -0
  90. package/src/libs/agent-runtime/types/type.ts +1 -0
  91. package/src/libs/agent-runtime/utils/streams/wenxin.test.ts +149 -0
  92. package/src/libs/agent-runtime/utils/streams/wenxin.ts +46 -0
  93. package/src/libs/agent-runtime/wenxin/index.ts +106 -0
  94. package/src/libs/agent-runtime/wenxin/type.ts +84 -0
  95. package/src/locales/default/error.ts +2 -0
  96. package/src/locales/default/modelProvider.ts +20 -0
  97. package/src/server/globalConfig/index.ts +4 -1
  98. package/src/services/_auth.ts +14 -0
  99. package/src/store/user/slices/modelList/selectors/keyVaults.ts +2 -0
  100. package/src/types/user/settings/keyVaults.ts +6 -0
@@ -1,6 +1,5 @@
1
1
  const dns = require('dns').promises;
2
2
  const fs = require('fs').promises;
3
- const tls = require('tls');
4
3
  const { spawn } = require('child_process');
5
4
 
6
5
  // Set file paths
@@ -23,68 +22,6 @@ const isValidIP = (ip, version = 4) => {
23
22
  }
24
23
  };
25
24
 
26
- // Function to check TLS validity of a URL
27
- const isValidTLS = (url = '') => {
28
- if (!url) {
29
- console.log('⚠️ TLS Check: No URL provided. Skipping TLS check. Ensure correct setting ENV.');
30
- console.log('-------------------------------------');
31
- return Promise.resolve();
32
- }
33
-
34
- const { protocol, host, port } = parseUrl(url);
35
- if (protocol !== 'https') {
36
- console.log(`⚠️ TLS Check: Non-HTTPS protocol (${protocol}). Skipping TLS check for ${url}.`);
37
- console.log('-------------------------------------');
38
- return Promise.resolve();
39
- }
40
-
41
- const options = { host, port, servername: host };
42
- return new Promise((resolve, reject) => {
43
- const socket = tls.connect(options, () => {
44
- console.log(`✅ TLS Check: Valid certificate for ${host}:${port}.`);
45
- console.log('-------------------------------------');
46
-
47
- socket.end();
48
-
49
- resolve();
50
- });
51
-
52
- socket.on('error', (err) => {
53
- const errMsg = `❌ TLS Check: Error for ${host}:${port}. Details:`;
54
- switch (err.code) {
55
- case 'CERT_HAS_EXPIRED':
56
- case 'DEPTH_ZERO_SELF_SIGNED_CERT':
57
- case 'ERR_TLS_CERT_ALTNAME_INVALID':
58
- console.error(`${errMsg} Certificate is not valid. Consider setting NODE_TLS_REJECT_UNAUTHORIZED="0" or mapping /etc/ssl/certs/ca-certificates.crt.`);
59
- break;
60
- case 'UNABLE_TO_GET_ISSUER_CERT_LOCALLY':
61
- console.error(`${errMsg} Unable to verify issuer. Ensure correct mapping of /etc/ssl/certs/ca-certificates.crt.`);
62
- break;
63
- default:
64
- console.error(`${errMsg} Network issue. Check firewall or DNS.`);
65
- break;
66
- }
67
- reject(err);
68
- });
69
- });
70
- };
71
-
72
- // Function to check TLS connections for OSS and Auth Issuer
73
- const checkTLSConnections = async () => {
74
- await Promise.all([
75
- isValidTLS(process.env.S3_ENDPOINT),
76
- isValidTLS(process.env.S3_PUBLIC_DOMAIN),
77
- isValidTLS(getEnvVarsByKeyword('_ISSUER')),
78
- ]);
79
- };
80
-
81
- // Function to get environment variable by keyword
82
- const getEnvVarsByKeyword = (keyword) => {
83
- return Object.entries(process.env)
84
- .filter(([key, value]) => key.includes(keyword) && value)
85
- .map(([, value]) => value)[0] || null;
86
- };
87
-
88
25
  // Function to parse protocol, host and port from a URL
89
26
  const parseUrl = (url) => {
90
27
  const { protocol, hostname: host, port } = new URL(url);
@@ -170,26 +107,18 @@ const runServer = async () => {
170
107
 
171
108
  if (process.env.DATABASE_DRIVER) {
172
109
  try {
173
- try {
174
- await fs.access(DB_MIGRATION_SCRIPT_PATH);
175
-
176
- await runScript(DB_MIGRATION_SCRIPT_PATH);
177
- } catch (err) {
178
- if (err.code === 'ENOENT') {
179
- console.log(`⚠️ DB Migration: Not found ${DB_MIGRATION_SCRIPT_PATH}. Skipping DB migration. Ensure to migrate database manually.`);
180
- console.log('-------------------------------------');
181
- } else {
182
- console.error('❌ Error during DB migration:');
183
- console.error(err);
184
- process.exit(1);
185
- }
186
- }
110
+ await fs.access(DB_MIGRATION_SCRIPT_PATH);
187
111
 
188
- await checkTLSConnections();
112
+ await runScript(DB_MIGRATION_SCRIPT_PATH);
189
113
  } catch (err) {
190
- console.error('❌ Error during TLS connection check:');
191
- console.error(err);
192
- process.exit(1);
114
+ if (err.code === 'ENOENT') {
115
+ console.log(`⚠️ DB Migration: Not found ${DB_MIGRATION_SCRIPT_PATH}. Skipping DB migration. Ensure to migrate database manually.`);
116
+ console.log('-------------------------------------');
117
+ } else {
118
+ console.error('❌ Error during DB migration:');
119
+ console.error(err);
120
+ process.exit(1);
121
+ }
193
122
  }
194
123
  }
195
124
 
@@ -0,0 +1,46 @@
1
+ 'use client';
2
+
3
+ import { Wenxin } from '@lobehub/icons';
4
+ import { Input } from 'antd';
5
+ import { useTranslation } from 'react-i18next';
6
+
7
+ import { WenxinProviderCard } from '@/config/modelProviders';
8
+ import { GlobalLLMProviderKey } from '@/types/user/settings';
9
+
10
+ import { KeyVaultsConfigKey } from '../../const';
11
+ import { ProviderItem } from '../../type';
12
+
13
+ const providerKey: GlobalLLMProviderKey = 'wenxin';
14
+
15
+ export const useWenxinProvider = (): ProviderItem => {
16
+ const { t } = useTranslation('modelProvider');
17
+
18
+ return {
19
+ ...WenxinProviderCard,
20
+ apiKeyItems: [
21
+ {
22
+ children: (
23
+ <Input.Password
24
+ autoComplete={'new-password'}
25
+ placeholder={t(`${providerKey}.accessKey.placeholder`)}
26
+ />
27
+ ),
28
+ desc: t(`${providerKey}.accessKey.desc`),
29
+ label: t(`${providerKey}.accessKey.title`),
30
+ name: [KeyVaultsConfigKey, providerKey, 'accessKey'],
31
+ },
32
+ {
33
+ children: (
34
+ <Input.Password
35
+ autoComplete={'new-password'}
36
+ placeholder={t(`${providerKey}.secretKey.placeholder`)}
37
+ />
38
+ ),
39
+ desc: t(`${providerKey}.secretKey.desc`),
40
+ label: t(`${providerKey}.secretKey.title`),
41
+ name: [KeyVaultsConfigKey, providerKey, 'secretKey'],
42
+ },
43
+ ],
44
+ title: <Wenxin.Combine size={32} type={'color'} />,
45
+ };
46
+ };
@@ -33,6 +33,7 @@ import { useBedrockProvider } from './Bedrock';
33
33
  import { useGithubProvider } from './Github';
34
34
  import { useOllamaProvider } from './Ollama';
35
35
  import { useOpenAIProvider } from './OpenAI';
36
+ import { useWenxinProvider } from './Wenxin';
36
37
 
37
38
  export const useProviderList = (): ProviderItem[] => {
38
39
  const AzureProvider = useAzureProvider();
@@ -40,6 +41,7 @@ export const useProviderList = (): ProviderItem[] => {
40
41
  const OpenAIProvider = useOpenAIProvider();
41
42
  const BedrockProvider = useBedrockProvider();
42
43
  const GithubProvider = useGithubProvider();
44
+ const WenxinProvider = useWenxinProvider();
43
45
 
44
46
  return useMemo(
45
47
  () => [
@@ -61,6 +63,7 @@ export const useProviderList = (): ProviderItem[] => {
61
63
  Ai21ProviderCard,
62
64
  UpstageProviderCard,
63
65
  QwenProviderCard,
66
+ WenxinProvider,
64
67
  HunyuanProviderCard,
65
68
  SparkProviderCard,
66
69
  ZhiPuProviderCard,
@@ -73,6 +76,6 @@ export const useProviderList = (): ProviderItem[] => {
73
76
  TaichuProviderCard,
74
77
  SiliconCloudProviderCard,
75
78
  ],
76
- [AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider, GithubProvider],
79
+ [AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider, GithubProvider,WenxinProvider],
77
80
  );
78
81
  };
@@ -25,6 +25,7 @@ import {
25
25
  } from '@/libs/agent-runtime';
26
26
  import { AgentRuntime } from '@/libs/agent-runtime';
27
27
  import { LobeStepfunAI } from '@/libs/agent-runtime/stepfun';
28
+ import LobeWenxinAI from '@/libs/agent-runtime/wenxin';
28
29
 
29
30
  import { initAgentRuntimeWithUserPayload } from './agentRuntime';
30
31
 
@@ -54,6 +55,9 @@ vi.mock('@/config/llm', () => ({
54
55
  TOGETHERAI_API_KEY: 'test-togetherai-key',
55
56
  QWEN_API_KEY: 'test-qwen-key',
56
57
  STEPFUN_API_KEY: 'test-stepfun-key',
58
+
59
+ WENXIN_ACCESS_KEY: 'test-wenxin-access-key',
60
+ WENXIN_SECRET_KEY: 'test-wenxin-secret-key',
57
61
  })),
58
62
  }));
59
63
 
@@ -202,6 +206,16 @@ describe('initAgentRuntimeWithUserPayload method', () => {
202
206
  expect(runtime['_runtime']).toBeInstanceOf(LobeStepfunAI);
203
207
  });
204
208
 
209
+ it.skip('Wenxin AI provider: with apikey', async () => {
210
+ const jwtPayload: JWTPayload = {
211
+ wenxinAccessKey: 'user-wenxin-accessKey',
212
+ wenxinSecretKey: 'wenxin-secret-key',
213
+ };
214
+ const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Wenxin, jwtPayload);
215
+ expect(runtime).toBeInstanceOf(AgentRuntime);
216
+ expect(runtime['_runtime']).toBeInstanceOf(LobeWenxinAI);
217
+ });
218
+
205
219
  it('Unknown Provider: with apikey and endpoint, should initialize to OpenAi', async () => {
206
220
  const jwtPayload: JWTPayload = {
207
221
  apiKey: 'user-unknown-key',
@@ -339,6 +353,13 @@ describe('initAgentRuntimeWithUserPayload method', () => {
339
353
  expect(runtime['_runtime']).toBeInstanceOf(LobeTogetherAI);
340
354
  });
341
355
 
356
+ it.skip('Wenxin AI provider: without apikey', async () => {
357
+ const jwtPayload = {};
358
+ const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Wenxin, jwtPayload);
359
+ expect(runtime).toBeInstanceOf(AgentRuntime);
360
+ expect(runtime['_runtime']).toBeInstanceOf(LobeWenxinAI);
361
+ });
362
+
342
363
  it('Unknown Provider', async () => {
343
364
  const jwtPayload = {};
344
365
  const runtime = await initAgentRuntimeWithUserPayload('unknown', jwtPayload);
@@ -0,0 +1,27 @@
1
+ // @vitest-environment edge-runtime
2
+ import { describe, expect, it, vi } from 'vitest';
3
+
4
+ import { POST as UniverseRoute } from '../[provider]/route';
5
+ import { POST, runtime } from './route';
6
+
7
+ // 模拟 '../[provider]/route'
8
+ vi.mock('../[provider]/route', () => ({
9
+ POST: vi.fn().mockResolvedValue('mocked response'),
10
+ }));
11
+
12
+ describe('Configuration tests', () => {
13
+ it('should have runtime set to "edge"', () => {
14
+ expect(runtime).toBe('nodejs');
15
+ });
16
+ });
17
+
18
+ describe('Wenxin POST function tests', () => {
19
+ it('should call UniverseRoute with correct parameters', async () => {
20
+ const mockRequest = new Request('https://example.com', { method: 'POST' });
21
+ await POST(mockRequest);
22
+ expect(UniverseRoute).toHaveBeenCalledWith(mockRequest, {
23
+ createRuntime: expect.anything(),
24
+ params: { provider: 'wenxin' },
25
+ });
26
+ });
27
+ });
@@ -0,0 +1,30 @@
1
+ import { getLLMConfig } from '@/config/llm';
2
+ import { AgentRuntime } from '@/libs/agent-runtime';
3
+ import LobeWenxinAI from '@/libs/agent-runtime/wenxin';
4
+
5
+ import { POST as UniverseRoute } from '../[provider]/route';
6
+
7
+ export const runtime = 'nodejs';
8
+
9
+ export const maxDuration = 30;
10
+
11
+ export const POST = async (req: Request) =>
12
+ UniverseRoute(req, {
13
+ createRuntime: (payload) => {
14
+ const { WENXIN_ACCESS_KEY, WENXIN_SECRET_KEY } = getLLMConfig();
15
+ let accessKey: string | undefined = WENXIN_ACCESS_KEY;
16
+ let secretKey: string | undefined = WENXIN_SECRET_KEY;
17
+
18
+ // if the payload has the api key, use user
19
+ if (payload.apiKey) {
20
+ accessKey = payload?.wenxinAccessKey;
21
+ secretKey = payload?.wenxinSecretKey;
22
+ }
23
+
24
+ const params = { accessKey, secretKey };
25
+ const instance = new LobeWenxinAI(params);
26
+
27
+ return new AgentRuntime(instance);
28
+ },
29
+ params: { provider: 'wenxin' },
30
+ });
@@ -16,6 +16,10 @@ const getStatus = (errorType: ILobeAgentRuntimeErrorType | ErrorType) => {
16
16
  return 403;
17
17
  }
18
18
 
19
+ case AgentRuntimeErrorType.QuotaLimitReached: {
20
+ return 429;
21
+ }
22
+
19
23
  // define the 471~480 as provider error
20
24
  case AgentRuntimeErrorType.AgentRuntimeError: {
21
25
  return 470;
package/src/config/llm.ts CHANGED
@@ -80,6 +80,10 @@ export const getLLMConfig = () => {
80
80
  AWS_SECRET_ACCESS_KEY: z.string().optional(),
81
81
  AWS_SESSION_TOKEN: z.string().optional(),
82
82
 
83
+ ENABLED_WENXIN: z.boolean(),
84
+ WENXIN_ACCESS_KEY: z.string().optional(),
85
+ WENXIN_SECRET_KEY: z.string().optional(),
86
+
83
87
  ENABLED_OLLAMA: z.boolean(),
84
88
  OLLAMA_PROXY_URL: z.string().optional(),
85
89
  OLLAMA_MODEL_LIST: z.string().optional(),
@@ -198,6 +202,10 @@ export const getLLMConfig = () => {
198
202
  AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY,
199
203
  AWS_SESSION_TOKEN: process.env.AWS_SESSION_TOKEN,
200
204
 
205
+ ENABLED_WENXIN: !!process.env.WENXIN_ACCESS_KEY && !!process.env.WENXIN_SECRET_KEY,
206
+ WENXIN_ACCESS_KEY: process.env.WENXIN_ACCESS_KEY,
207
+ WENXIN_SECRET_KEY: process.env.WENXIN_SECRET_KEY,
208
+
201
209
  ENABLED_OLLAMA: process.env.ENABLED_OLLAMA !== '0',
202
210
  OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
203
211
  OLLAMA_MODEL_LIST: process.env.OLLAMA_MODEL_LIST,
@@ -27,6 +27,7 @@ import StepfunProvider from './stepfun';
27
27
  import TaichuProvider from './taichu';
28
28
  import TogetherAIProvider from './togetherai';
29
29
  import UpstageProvider from './upstage';
30
+ import WenxinProvider from './wenxin';
30
31
  import ZeroOneProvider from './zeroone';
31
32
  import ZhiPuProvider from './zhipu';
32
33
 
@@ -59,6 +60,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
59
60
  SparkProvider.chatModels,
60
61
  Ai21Provider.chatModels,
61
62
  HunyuanProvider.chatModels,
63
+ WenxinProvider.chatModels,
62
64
  ].flat();
63
65
 
64
66
  export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -80,6 +82,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
80
82
  Ai21Provider,
81
83
  UpstageProvider,
82
84
  QwenProvider,
85
+ WenxinProvider,
83
86
  HunyuanProvider,
84
87
  SparkProvider,
85
88
  ZhiPuProvider,
@@ -129,5 +132,6 @@ export { default as StepfunProviderCard } from './stepfun';
129
132
  export { default as TaichuProviderCard } from './taichu';
130
133
  export { default as TogetherAIProviderCard } from './togetherai';
131
134
  export { default as UpstageProviderCard } from './upstage';
135
+ export { default as WenxinProviderCard } from './wenxin';
132
136
  export { default as ZeroOneProviderCard } from './zeroone';
133
137
  export { default as ZhiPuProviderCard } from './zhipu';
@@ -0,0 +1,159 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
4
+ const BaiduWenxin: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ description:
8
+ '百度自研的旗舰级大规模⼤语⾔模型,覆盖海量中英文语料,具有强大的通用能力,可满足绝大部分对话问答、创作生成、插件应用场景要求;支持自动对接百度搜索插件,保障问答信息时效。',
9
+ displayName: 'ERNIE 3.5 8K',
10
+ enabled: true,
11
+ id: 'ERNIE-3.5-8K',
12
+ pricing: {
13
+ currency: 'CNY',
14
+ input: 0.8,
15
+ output: 2,
16
+ },
17
+ tokens: 8192,
18
+ },
19
+ {
20
+ description:
21
+ '百度自研的旗舰级大规模⼤语⾔模型,覆盖海量中英文语料,具有强大的通用能力,可满足绝大部分对话问答、创作生成、插件应用场景要求;支持自动对接百度搜索插件,保障问答信息时效。',
22
+ displayName: 'ERNIE 3.5 8K Preview',
23
+ id: 'ERNIE-3.5-8K-Preview',
24
+ pricing: {
25
+ currency: 'CNY',
26
+ input: 0.8,
27
+ output: 2,
28
+ },
29
+ tokens: 8192,
30
+ },
31
+ {
32
+ description:
33
+ '百度自研的旗舰级大规模⼤语⾔模型,覆盖海量中英文语料,具有强大的通用能力,可满足绝大部分对话问答、创作生成、插件应用场景要求;支持自动对接百度搜索插件,保障问答信息时效。',
34
+ displayName: 'ERNIE 3.5 128K',
35
+ enabled: true,
36
+ id: 'ERNIE-3.5-128K',
37
+ pricing: {
38
+ currency: 'CNY',
39
+ input: 0.8,
40
+ output: 2,
41
+ },
42
+ tokens: 128_000,
43
+ },
44
+ {
45
+ description:
46
+ '百度自研的旗舰级超大规模⼤语⾔模型,相较ERNIE 3.5实现了模型能力全面升级,广泛适用于各领域复杂任务场景;支持自动对接百度搜索插件,保障问答信息时效。',
47
+ displayName: 'ERNIE 4.0 8K',
48
+ enabled: true,
49
+ id: 'ERNIE-4.0-8K-Latest',
50
+ pricing: {
51
+ currency: 'CNY',
52
+ input: 30,
53
+ output: 90,
54
+ },
55
+ tokens: 8192,
56
+ },
57
+ {
58
+ description:
59
+ '百度自研的旗舰级超大规模⼤语⾔模型,相较ERNIE 3.5实现了模型能力全面升级,广泛适用于各领域复杂任务场景;支持自动对接百度搜索插件,保障问答信息时效。',
60
+ displayName: 'ERNIE 4.0 8K Preview',
61
+ id: 'ERNIE-4.0-8K-Preview',
62
+ pricing: {
63
+ currency: 'CNY',
64
+ input: 30,
65
+ output: 90,
66
+ },
67
+ tokens: 8192,
68
+ },
69
+ {
70
+ description:
71
+ '百度自研的旗舰级超大规模⼤语⾔模型,综合效果表现出色,广泛适用于各领域复杂任务场景;支持自动对接百度搜索插件,保障问答信息时效。相较于ERNIE 4.0在性能表现上更优秀',
72
+ displayName: 'ERNIE 4.0 Turbo 8K',
73
+ enabled: true,
74
+ id: 'ERNIE-4.0-Turbo-8K',
75
+ pricing: {
76
+ currency: 'CNY',
77
+ input: 20,
78
+ output: 60,
79
+ },
80
+ tokens: 8192,
81
+ },
82
+ {
83
+ description:
84
+ '百度自研的旗舰级超大规模⼤语⾔模型,综合效果表现出色,广泛适用于各领域复杂任务场景;支持自动对接百度搜索插件,保障问答信息时效。相较于ERNIE 4.0在性能表现上更优秀',
85
+ displayName: 'ERNIE 4.0 Turbo 8K Preview',
86
+ id: 'ERNIE-4.0-Turbo-8K-Preview',
87
+ pricing: {
88
+ currency: 'CNY',
89
+ input: 20,
90
+ output: 60,
91
+ },
92
+ tokens: 8192,
93
+ },
94
+ {
95
+ description:
96
+ '百度自研的轻量级大语言模型,兼顾优异的模型效果与推理性能,效果比ERNIE Lite更优,适合低算力AI加速卡推理使用。',
97
+ displayName: 'ERNIE Lite Pro 128K',
98
+ enabled: true,
99
+ id: 'ERNIE-Lite-Pro-128K',
100
+ pricing: {
101
+ currency: 'CNY',
102
+ input: 0.2,
103
+ output: 0.4,
104
+ },
105
+ tokens: 128_000,
106
+ },
107
+ {
108
+ description:
109
+ '百度2024年最新发布的自研高性能大语言模型,通用能力优异,效果比ERNIE Speed更优,适合作为基座模型进行精调,更好地处理特定场景问题,同时具备极佳的推理性能。',
110
+ displayName: 'ERNIE Speed Pro 128K',
111
+ enabled: true,
112
+ id: 'ERNIE-Speed-Pro-128K',
113
+ pricing: {
114
+ currency: 'CNY',
115
+ input: 0.3,
116
+ output: 0.6,
117
+ },
118
+ tokens: 128_000,
119
+ },
120
+ {
121
+ description:
122
+ '百度2024年最新发布的自研高性能大语言模型,通用能力优异,适合作为基座模型进行精调,更好地处理特定场景问题,同时具备极佳的推理性能。',
123
+ displayName: 'ERNIE Speed 128K',
124
+ id: 'ERNIE-Speed-128K',
125
+ pricing: {
126
+ currency: 'CNY',
127
+ input: 0,
128
+ output: 0,
129
+ },
130
+ tokens: 128_000,
131
+ },
132
+ {
133
+ description:
134
+ '百度自研的垂直场景大语言模型,适合游戏NPC、客服对话、对话角色扮演等应用场景,人设风格更为鲜明、一致,指令遵循能力更强,推理性能更优。',
135
+ displayName: 'ERNIE Character 8K',
136
+ id: 'ERNIE-Character-8K',
137
+ pricing: {
138
+ currency: 'CNY',
139
+ input: 4,
140
+ output: 8,
141
+ },
142
+ tokens: 8192,
143
+ },
144
+ ],
145
+ checkModel: 'ERNIE-Speed-128K',
146
+ description:
147
+ '企业级一站式大模型与AI原生应用开发及服务平台,提供最全面易用的生成式人工智能模型开发、应用开发全流程工具链',
148
+ disableBrowserRequest: true,
149
+ id: 'wenxin',
150
+ modelsUrl: 'https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu#%E5%AF%B9%E8%AF%9Dchat',
151
+ name: 'Wenxin',
152
+ smoothing: {
153
+ speed: 2,
154
+ text: true,
155
+ },
156
+ url: 'https://cloud.baidu.com/wenxin.html',
157
+ };
158
+
159
+ export default BaiduWenxin;
package/src/const/auth.ts CHANGED
@@ -36,6 +36,10 @@ export interface JWTPayload {
36
36
  awsRegion?: string;
37
37
  awsSecretAccessKey?: string;
38
38
  awsSessionToken?: string;
39
+
40
+ wenxinAccessKey?: string;
41
+ wenxinSecretKey?: string;
42
+
39
43
  /**
40
44
  * user id
41
45
  * in client db mode it's a uuid
@@ -25,6 +25,7 @@ import {
25
25
  TaichuProviderCard,
26
26
  TogetherAIProviderCard,
27
27
  UpstageProviderCard,
28
+ WenxinProviderCard,
28
29
  ZeroOneProviderCard,
29
30
  ZhiPuProviderCard,
30
31
  filterEnabledModels,
@@ -141,6 +142,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
141
142
  enabled: false,
142
143
  enabledModels: filterEnabledModels(UpstageProviderCard),
143
144
  },
145
+ wenxin: {
146
+ enabled: false,
147
+ enabledModels: filterEnabledModels(WenxinProviderCard),
148
+ },
144
149
  zeroone: {
145
150
  enabled: false,
146
151
  enabledModels: filterEnabledModels(ZeroOneProviderCard),
@@ -0,0 +1,49 @@
1
+ import { Wenxin } from '@lobehub/icons';
2
+ import { Input } from 'antd';
3
+ import { memo } from 'react';
4
+ import { useTranslation } from 'react-i18next';
5
+
6
+ import { ModelProvider } from '@/libs/agent-runtime';
7
+ import { useUserStore } from '@/store/user';
8
+ import { keyVaultsConfigSelectors } from '@/store/user/selectors';
9
+
10
+ import { FormAction } from '../style';
11
+
12
+ const WenxinForm = memo(() => {
13
+ const { t } = useTranslation('modelProvider');
14
+
15
+ const [accessKey, secretKey, setConfig] = useUserStore((s) => [
16
+ keyVaultsConfigSelectors.wenxinConfig(s).accessKey,
17
+ keyVaultsConfigSelectors.wenxinConfig(s).secretKey,
18
+ s.updateKeyVaultConfig,
19
+ ]);
20
+
21
+ return (
22
+ <FormAction
23
+ avatar={<Wenxin.Color size={56} />}
24
+ description={t('wenxin.unlock.description')}
25
+ title={t('wenxin.unlock.title')}
26
+ >
27
+ <Input.Password
28
+ autoComplete={'new-password'}
29
+ onChange={(e) => {
30
+ setConfig(ModelProvider.Wenxin, { accessKey: e.target.value });
31
+ }}
32
+ placeholder={'Access Key'}
33
+ type={'block'}
34
+ value={accessKey}
35
+ />
36
+ <Input.Password
37
+ autoComplete={'new-password'}
38
+ onChange={(e) => {
39
+ setConfig(ModelProvider.Wenxin, { secretKey: e.target.value });
40
+ }}
41
+ placeholder={'Secret Key'}
42
+ type={'block'}
43
+ value={secretKey}
44
+ />
45
+ </FormAction>
46
+ );
47
+ });
48
+
49
+ export default WenxinForm;
@@ -10,6 +10,7 @@ import { GlobalLLMProviderKey } from '@/types/user/settings';
10
10
 
11
11
  import BedrockForm from './Bedrock';
12
12
  import ProviderApiKeyForm from './ProviderApiKeyForm';
13
+ import WenxinForm from './Wenxin';
13
14
 
14
15
  interface APIKeyFormProps {
15
16
  id: string;
@@ -65,6 +66,8 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
65
66
  <Center gap={16} style={{ maxWidth: 300 }}>
66
67
  {provider === ModelProvider.Bedrock ? (
67
68
  <BedrockForm />
69
+ ) : provider === ModelProvider.Wenxin ? (
70
+ <WenxinForm />
68
71
  ) : (
69
72
  <ProviderApiKeyForm
70
73
  apiKeyPlaceholder={apiKeyPlaceholder}
@@ -34,6 +34,7 @@ const getErrorAlertConfig = (
34
34
  };
35
35
 
36
36
  switch (errorType) {
37
+ case AgentRuntimeErrorType.QuotaLimitReached:
37
38
  case AgentRuntimeErrorType.LocationNotSupportError: {
38
39
  return {
39
40
  type: 'warning',
@@ -27,6 +27,7 @@ import {
27
27
  ModelProvider,
28
28
  } from '@/libs/agent-runtime';
29
29
  import { LobeStepfunAI } from '@/libs/agent-runtime/stepfun';
30
+ import LobeWenxinAI from '@/libs/agent-runtime/wenxin';
30
31
 
31
32
  import { AgentChatOptions } from './AgentRuntime';
32
33
  import { LobeBedrockAIParams } from './bedrock';
@@ -3,6 +3,7 @@
3
3
  export const AgentRuntimeErrorType = {
4
4
  AgentRuntimeError: 'AgentRuntimeError', // Agent Runtime 模块运行时错误
5
5
  LocationNotSupportError: 'LocationNotSupportError',
6
+ QuotaLimitReached: 'QuotaLimitReached',
6
7
 
7
8
  InvalidProviderAPIKey: 'InvalidProviderAPIKey',
8
9
  ProviderBizError: 'ProviderBizError',
@@ -49,6 +49,7 @@ export enum ModelProvider {
49
49
  Taichu = 'taichu',
50
50
  TogetherAI = 'togetherai',
51
51
  Upstage = 'upstage',
52
+ Wenxin = 'wenxin',
52
53
  ZeroOne = 'zeroone',
53
54
  ZhiPu = 'zhipu',
54
55
  }