@lobehub/chat 1.23.0 → 1.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/Dockerfile +19 -15
  3. package/Dockerfile.database +20 -16
  4. package/locales/ar/modelProvider.json +16 -0
  5. package/locales/ar/models.json +27 -0
  6. package/locales/ar/providers.json +1 -0
  7. package/locales/bg-BG/modelProvider.json +16 -0
  8. package/locales/bg-BG/models.json +27 -0
  9. package/locales/bg-BG/providers.json +1 -0
  10. package/locales/de-DE/modelProvider.json +16 -0
  11. package/locales/de-DE/models.json +27 -0
  12. package/locales/de-DE/providers.json +1 -0
  13. package/locales/en-US/modelProvider.json +16 -0
  14. package/locales/en-US/models.json +27 -0
  15. package/locales/en-US/providers.json +1 -0
  16. package/locales/es-ES/modelProvider.json +16 -0
  17. package/locales/es-ES/models.json +27 -0
  18. package/locales/es-ES/providers.json +1 -0
  19. package/locales/fr-FR/modelProvider.json +16 -0
  20. package/locales/fr-FR/models.json +27 -0
  21. package/locales/fr-FR/providers.json +1 -0
  22. package/locales/it-IT/modelProvider.json +16 -0
  23. package/locales/it-IT/models.json +27 -0
  24. package/locales/it-IT/providers.json +1 -0
  25. package/locales/ja-JP/modelProvider.json +16 -0
  26. package/locales/ja-JP/models.json +27 -0
  27. package/locales/ja-JP/providers.json +1 -0
  28. package/locales/ko-KR/modelProvider.json +16 -0
  29. package/locales/ko-KR/models.json +27 -0
  30. package/locales/ko-KR/providers.json +1 -0
  31. package/locales/nl-NL/modelProvider.json +16 -0
  32. package/locales/nl-NL/models.json +27 -0
  33. package/locales/nl-NL/providers.json +1 -0
  34. package/locales/pl-PL/modelProvider.json +16 -0
  35. package/locales/pl-PL/models.json +27 -0
  36. package/locales/pl-PL/providers.json +1 -0
  37. package/locales/pt-BR/modelProvider.json +16 -0
  38. package/locales/pt-BR/models.json +27 -0
  39. package/locales/pt-BR/providers.json +1 -0
  40. package/locales/ru-RU/modelProvider.json +16 -0
  41. package/locales/ru-RU/models.json +27 -0
  42. package/locales/ru-RU/providers.json +1 -0
  43. package/locales/tr-TR/modelProvider.json +16 -0
  44. package/locales/tr-TR/models.json +27 -0
  45. package/locales/tr-TR/providers.json +1 -0
  46. package/locales/vi-VN/modelProvider.json +16 -0
  47. package/locales/vi-VN/models.json +27 -0
  48. package/locales/vi-VN/providers.json +1 -0
  49. package/locales/zh-CN/modelProvider.json +16 -0
  50. package/locales/zh-CN/models.json +27 -0
  51. package/locales/zh-CN/providers.json +1 -0
  52. package/locales/zh-TW/modelProvider.json +16 -0
  53. package/locales/zh-TW/models.json +27 -0
  54. package/locales/zh-TW/providers.json +1 -0
  55. package/package.json +3 -3
  56. package/src/app/(main)/settings/llm/ProviderList/SenseNova/index.tsx +44 -0
  57. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +4 -0
  58. package/src/config/llm.ts +38 -0
  59. package/src/config/modelProviders/index.ts +4 -0
  60. package/src/config/modelProviders/sensenova.ts +124 -0
  61. package/src/config/modelProviders/spark.ts +6 -6
  62. package/src/const/auth.ts +3 -0
  63. package/src/const/settings/llm.ts +5 -0
  64. package/src/features/Conversation/Error/APIKeyForm/SenseNova.tsx +49 -0
  65. package/src/features/Conversation/Error/APIKeyForm/index.tsx +3 -0
  66. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  67. package/src/libs/agent-runtime/index.ts +1 -0
  68. package/src/libs/agent-runtime/sensenova/authToken.test.ts +18 -0
  69. package/src/libs/agent-runtime/sensenova/authToken.ts +27 -0
  70. package/src/libs/agent-runtime/sensenova/index.test.ts +321 -0
  71. package/src/libs/agent-runtime/sensenova/index.ts +98 -0
  72. package/src/libs/agent-runtime/types/type.ts +1 -0
  73. package/src/locales/default/modelProvider.ts +17 -0
  74. package/src/server/globalConfig/index.ts +158 -16
  75. package/src/server/modules/AgentRuntime/index.ts +10 -0
  76. package/src/services/_auth.ts +14 -0
  77. package/src/store/user/slices/modelList/selectors/keyVaults.ts +2 -0
  78. package/src/store/user/slices/modelList/selectors/modelConfig.ts +2 -0
  79. package/src/types/user/settings/keyVaults.ts +6 -0
@@ -0,0 +1,49 @@
1
+ import { SenseNova } from '@lobehub/icons';
2
+ import { Input } from 'antd';
3
+ import { memo } from 'react';
4
+ import { useTranslation } from 'react-i18next';
5
+
6
+ import { ModelProvider } from '@/libs/agent-runtime';
7
+ import { useUserStore } from '@/store/user';
8
+ import { keyVaultsConfigSelectors } from '@/store/user/selectors';
9
+
10
+ import { FormAction } from '../style';
11
+
12
+ const SenseNovaForm = memo(() => {
13
+ const { t } = useTranslation('modelProvider');
14
+
15
+ const [sensenovaAccessKeyID, sensenovaAccessKeySecret, setConfig] = useUserStore((s) => [
16
+ keyVaultsConfigSelectors.sensenovaConfig(s).sensenovaAccessKeyID,
17
+ keyVaultsConfigSelectors.sensenovaConfig(s).sensenovaAccessKeySecret,
18
+ s.updateKeyVaultConfig,
19
+ ]);
20
+
21
+ return (
22
+ <FormAction
23
+ avatar={<SenseNova color={SenseNova.colorPrimary} size={56} />}
24
+ description={t('sensenova.unlock.description')}
25
+ title={t('sensenova.unlock.title')}
26
+ >
27
+ <Input.Password
28
+ autoComplete={'new-password'}
29
+ onChange={(e) => {
30
+ setConfig(ModelProvider.SenseNova, { sensenovaAccessKeyID: e.target.value });
31
+ }}
32
+ placeholder={t('sensenova.sensenovaAccessKeyID.placeholder')}
33
+ type={'block'}
34
+ value={sensenovaAccessKeyID}
35
+ />
36
+ <Input.Password
37
+ autoComplete={'new-password'}
38
+ onChange={(e) => {
39
+ setConfig(ModelProvider.SenseNova, { sensenovaAccessKeySecret: e.target.value });
40
+ }}
41
+ placeholder={t('sensenova.sensenovaAccessKeySecret.placeholder')}
42
+ type={'block'}
43
+ value={sensenovaAccessKeySecret}
44
+ />
45
+ </FormAction>
46
+ );
47
+ });
48
+
49
+ export default SenseNovaForm;
@@ -10,6 +10,7 @@ import { GlobalLLMProviderKey } from '@/types/user/settings';
10
10
 
11
11
  import BedrockForm from './Bedrock';
12
12
  import ProviderApiKeyForm from './ProviderApiKeyForm';
13
+ import SenseNovaForm from './SenseNova';
13
14
  import WenxinForm from './Wenxin';
14
15
 
15
16
  interface APIKeyFormProps {
@@ -66,6 +67,8 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
66
67
  <Center gap={16} style={{ maxWidth: 300 }}>
67
68
  {provider === ModelProvider.Bedrock ? (
68
69
  <BedrockForm />
70
+ ) : provider === ModelProvider.SenseNova ? (
71
+ <SenseNovaForm />
69
72
  ) : provider === ModelProvider.Wenxin ? (
70
73
  <WenxinForm />
71
74
  ) : (
@@ -25,6 +25,7 @@ import { LobeOpenAI } from './openai';
25
25
  import { LobeOpenRouterAI } from './openrouter';
26
26
  import { LobePerplexityAI } from './perplexity';
27
27
  import { LobeQwenAI } from './qwen';
28
+ import { LobeSenseNovaAI } from './sensenova';
28
29
  import { LobeSiliconCloudAI } from './siliconcloud';
29
30
  import { LobeSparkAI } from './spark';
30
31
  import { LobeStepfunAI } from './stepfun';
@@ -146,6 +147,7 @@ class AgentRuntime {
146
147
  openrouter: Partial<ClientOptions>;
147
148
  perplexity: Partial<ClientOptions>;
148
149
  qwen: Partial<ClientOptions>;
150
+ sensenova: Partial<ClientOptions>;
149
151
  siliconcloud: Partial<ClientOptions>;
150
152
  spark: Partial<ClientOptions>;
151
153
  stepfun: Partial<ClientOptions>;
@@ -314,6 +316,11 @@ class AgentRuntime {
314
316
  runtimeModel = new LobeHunyuanAI(params.hunyuan);
315
317
  break;
316
318
  }
319
+
320
+ case ModelProvider.SenseNova: {
321
+ runtimeModel = await LobeSenseNovaAI.fromAPIKey(params.sensenova);
322
+ break;
323
+ }
317
324
  }
318
325
 
319
326
  return new AgentRuntime(runtimeModel);
@@ -15,6 +15,7 @@ export { LobeOpenAI } from './openai';
15
15
  export { LobeOpenRouterAI } from './openrouter';
16
16
  export { LobePerplexityAI } from './perplexity';
17
17
  export { LobeQwenAI } from './qwen';
18
+ export { LobeSenseNovaAI } from './sensenova';
18
19
  export { LobeTogetherAI } from './togetherai';
19
20
  export * from './types';
20
21
  export { AgentRuntimeError } from './utils/createError';
@@ -0,0 +1,18 @@
1
+ // @vitest-environment node
2
+ import { generateApiToken } from './authToken';
3
+
4
+ describe('generateApiToken', () => {
5
+ it('should throw an error if no apiKey is provided', async () => {
6
+ await expect(generateApiToken()).rejects.toThrow('Invalid apiKey');
7
+ });
8
+
9
+ it('should throw an error if apiKey is invalid', async () => {
10
+ await expect(generateApiToken('invalid')).rejects.toThrow('Invalid apiKey');
11
+ });
12
+
13
+ it('should return a token if a valid apiKey is provided', async () => {
14
+ const apiKey = 'id:secret';
15
+ const token = await generateApiToken(apiKey);
16
+ expect(token).toBeDefined();
17
+ });
18
+ });
@@ -0,0 +1,27 @@
1
+ import { SignJWT } from 'jose';
2
+
3
+ // https://console.sensecore.cn/help/docs/model-as-a-service/nova/overview/Authorization
4
+ export const generateApiToken = async (apiKey?: string): Promise<string> => {
5
+ if (!apiKey) {
6
+ throw new Error('Invalid apiKey');
7
+ }
8
+
9
+ const [id, secret] = apiKey.split(':');
10
+ if (!id || !secret) {
11
+ throw new Error('Invalid apiKey');
12
+ }
13
+
14
+ const currentTime = Math.floor(Date.now() / 1000);
15
+
16
+ const payload = {
17
+ exp: currentTime + 1800,
18
+ iss: id,
19
+ nbf: currentTime - 5,
20
+ };
21
+
22
+ const jwt = await new SignJWT(payload)
23
+ .setProtectedHeader({ alg: 'HS256', typ: 'JWT' })
24
+ .sign(new TextEncoder().encode(secret));
25
+
26
+ return jwt;
27
+ };
@@ -0,0 +1,321 @@
1
+ // @vitest-environment node
2
+ import { OpenAI } from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import { ChatStreamCallbacks, LobeOpenAI } from '@/libs/agent-runtime';
6
+ import * as debugStreamModule from '@/libs/agent-runtime/utils/debugStream';
7
+
8
+ import * as authTokenModule from './authToken';
9
+ import { LobeSenseNovaAI } from './index';
10
+
11
+ const bizErrorType = 'ProviderBizError';
12
+ const invalidErrorType = 'InvalidProviderAPIKey';
13
+
14
+ // Mock相关依赖
15
+ vi.mock('./authToken');
16
+
17
+ describe('LobeSenseNovaAI', () => {
18
+ beforeEach(() => {
19
+ // Mock generateApiToken
20
+ vi.spyOn(authTokenModule, 'generateApiToken').mockResolvedValue('mocked_token');
21
+ });
22
+
23
+ afterEach(() => {
24
+ vi.restoreAllMocks();
25
+ });
26
+
27
+ describe('fromAPIKey', () => {
28
+ it('should correctly initialize with an API key', async () => {
29
+ const lobeSenseNovaAI = await LobeSenseNovaAI.fromAPIKey({ apiKey: 'test_api_key' });
30
+ expect(lobeSenseNovaAI).toBeInstanceOf(LobeSenseNovaAI);
31
+ expect(lobeSenseNovaAI.baseURL).toEqual('https://api.sensenova.cn/compatible-mode/v1');
32
+ });
33
+
34
+ it('should throw an error if API key is invalid', async () => {
35
+ vi.spyOn(authTokenModule, 'generateApiToken').mockRejectedValue(new Error('Invalid API Key'));
36
+ try {
37
+ await LobeSenseNovaAI.fromAPIKey({ apiKey: 'asd' });
38
+ } catch (e) {
39
+ expect(e).toEqual({ errorType: invalidErrorType });
40
+ }
41
+ });
42
+ });
43
+
44
+ describe('chat', () => {
45
+ let instance: LobeSenseNovaAI;
46
+
47
+ beforeEach(async () => {
48
+ instance = await LobeSenseNovaAI.fromAPIKey({
49
+ apiKey: 'test_api_key',
50
+ });
51
+
52
+ // Mock chat.completions.create
53
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
54
+ new ReadableStream() as any,
55
+ );
56
+ });
57
+
58
+ it('should return a StreamingTextResponse on successful API call', async () => {
59
+ const result = await instance.chat({
60
+ messages: [{ content: 'Hello', role: 'user' }],
61
+ model: 'SenseChat',
62
+ temperature: 0,
63
+ });
64
+ expect(result).toBeInstanceOf(Response);
65
+ });
66
+
67
+ it('should handle callback and headers correctly', async () => {
68
+ // 模拟 chat.completions.create 方法返回一个可读流
69
+ const mockCreateMethod = vi
70
+ .spyOn(instance['client'].chat.completions, 'create')
71
+ .mockResolvedValue(
72
+ new ReadableStream({
73
+ start(controller) {
74
+ controller.enqueue({
75
+ id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
76
+ object: 'chat.completion.chunk',
77
+ created: 1709125675,
78
+ model: 'gpt-3.5-turbo-0125',
79
+ system_fingerprint: 'fp_86156a94a0',
80
+ choices: [
81
+ { index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
82
+ ],
83
+ });
84
+ controller.close();
85
+ },
86
+ }) as any,
87
+ );
88
+
89
+ // 准备 callback 和 headers
90
+ const mockCallback: ChatStreamCallbacks = {
91
+ onStart: vi.fn(),
92
+ onToken: vi.fn(),
93
+ };
94
+ const mockHeaders = { 'Custom-Header': 'TestValue' };
95
+
96
+ // 执行测试
97
+ const result = await instance.chat(
98
+ {
99
+ messages: [{ content: 'Hello', role: 'user' }],
100
+ model: 'SenseChat',
101
+ temperature: 0,
102
+ },
103
+ { callback: mockCallback, headers: mockHeaders },
104
+ );
105
+
106
+ // 验证 callback 被调用
107
+ await result.text(); // 确保流被消费
108
+
109
+ // 验证 headers 被正确传递
110
+ expect(result.headers.get('Custom-Header')).toEqual('TestValue');
111
+
112
+ // 清理
113
+ mockCreateMethod.mockRestore();
114
+ });
115
+
116
+ it('should transform messages correctly', async () => {
117
+ const spyOn = vi.spyOn(instance['client'].chat.completions, 'create');
118
+
119
+ await instance.chat({
120
+ frequency_penalty: 0,
121
+ messages: [
122
+ { content: 'Hello', role: 'user' },
123
+ { content: [{ type: 'text', text: 'Hello again' }], role: 'user' },
124
+ ],
125
+ model: 'SenseChat',
126
+ temperature: 0,
127
+ top_p: 1,
128
+ });
129
+
130
+ const calledWithParams = spyOn.mock.calls[0][0];
131
+
132
+ expect(calledWithParams.frequency_penalty).toBeUndefined(); // frequency_penalty 0 should be undefined
133
+ expect(calledWithParams.messages[1].content).toEqual([{ type: 'text', text: 'Hello again' }]);
134
+ expect(calledWithParams.temperature).toBeUndefined(); // temperature 0 should be undefined
135
+ expect(calledWithParams.top_p).toBeUndefined(); // top_p 1 should be undefined
136
+ });
137
+
138
+ describe('Error', () => {
139
+ it('should return SenseNovaAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
140
+ // Arrange
141
+ const apiError = new OpenAI.APIError(
142
+ 400,
143
+ {
144
+ status: 400,
145
+ error: {
146
+ message: 'Bad Request',
147
+ },
148
+ },
149
+ 'Error message',
150
+ {},
151
+ );
152
+
153
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
154
+
155
+ // Act
156
+ try {
157
+ await instance.chat({
158
+ messages: [{ content: 'Hello', role: 'user' }],
159
+ model: 'SenseChat',
160
+ temperature: 0,
161
+ });
162
+ } catch (e) {
163
+ expect(e).toEqual({
164
+ endpoint: 'https://api.sensenova.cn/compatible-mode/v1',
165
+ error: {
166
+ error: { message: 'Bad Request' },
167
+ status: 400,
168
+ },
169
+ errorType: bizErrorType,
170
+ provider: 'sensenova',
171
+ });
172
+ }
173
+ });
174
+
175
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
176
+ try {
177
+ await LobeSenseNovaAI.fromAPIKey({ apiKey: '' });
178
+ } catch (e) {
179
+ expect(e).toEqual({ errorType: invalidErrorType });
180
+ }
181
+ });
182
+
183
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
184
+ // Arrange
185
+ const errorInfo = {
186
+ stack: 'abc',
187
+ cause: {
188
+ message: 'api is undefined',
189
+ },
190
+ };
191
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
192
+
193
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
194
+
195
+ // Act
196
+ try {
197
+ await instance.chat({
198
+ messages: [{ content: 'Hello', role: 'user' }],
199
+ model: 'SenseChat',
200
+ temperature: 0.2,
201
+ });
202
+ } catch (e) {
203
+ expect(e).toEqual({
204
+ endpoint: 'https://api.sensenova.cn/compatible-mode/v1',
205
+ error: {
206
+ cause: { message: 'api is undefined' },
207
+ stack: 'abc',
208
+ },
209
+ errorType: bizErrorType,
210
+ provider: 'sensenova',
211
+ });
212
+ }
213
+ });
214
+
215
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
216
+ // Arrange
217
+ const errorInfo = {
218
+ stack: 'abc',
219
+ cause: { message: 'api is undefined' },
220
+ };
221
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
222
+
223
+ instance = await LobeSenseNovaAI.fromAPIKey({
224
+ apiKey: 'test',
225
+
226
+ baseURL: 'https://abc.com/v2',
227
+ });
228
+
229
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
230
+
231
+ // Act
232
+ try {
233
+ await instance.chat({
234
+ messages: [{ content: 'Hello', role: 'user' }],
235
+ model: 'gpt-3.5-turbo',
236
+ temperature: 0,
237
+ });
238
+ } catch (e) {
239
+ expect(e).toEqual({
240
+ endpoint: 'https://***.com/v2',
241
+ error: {
242
+ cause: { message: 'api is undefined' },
243
+ stack: 'abc',
244
+ },
245
+ errorType: bizErrorType,
246
+ provider: 'sensenova',
247
+ });
248
+ }
249
+ });
250
+
251
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
252
+ // Arrange
253
+ const genericError = new Error('Generic Error');
254
+
255
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
256
+
257
+ // Act
258
+ try {
259
+ await instance.chat({
260
+ messages: [{ content: 'Hello', role: 'user' }],
261
+ model: 'SenseChat',
262
+ temperature: 0,
263
+ });
264
+ } catch (e) {
265
+ expect(e).toEqual({
266
+ endpoint: 'https://api.sensenova.cn/compatible-mode/v1',
267
+ errorType: 'AgentRuntimeError',
268
+ provider: 'sensenova',
269
+ error: {
270
+ name: genericError.name,
271
+ cause: genericError.cause,
272
+ message: genericError.message,
273
+ stack: genericError.stack,
274
+ },
275
+ });
276
+ }
277
+ });
278
+ });
279
+
280
+ describe('DEBUG', () => {
281
+ it('should call debugStream and return StreamingTextResponse when DEBUG_OPENAI_CHAT_COMPLETION is 1', async () => {
282
+ // Arrange
283
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
284
+ const mockDebugStream = new ReadableStream({
285
+ start(controller) {
286
+ controller.enqueue('Debug stream content');
287
+ controller.close();
288
+ },
289
+ }) as any;
290
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
291
+
292
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
293
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
294
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
295
+ });
296
+
297
+ // 保存原始环境变量值
298
+ const originalDebugValue = process.env.DEBUG_SENSENOVA_CHAT_COMPLETION;
299
+
300
+ // 模拟环境变量
301
+ process.env.DEBUG_SENSENOVA_CHAT_COMPLETION = '1';
302
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
303
+
304
+ // 执行测试
305
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
306
+ // 假设的测试函数调用,你可能需要根据实际情况调整
307
+ await instance.chat({
308
+ messages: [{ content: 'Hello', role: 'user' }],
309
+ model: 'SenseChat',
310
+ temperature: 0,
311
+ });
312
+
313
+ // 验证 debugStream 被调用
314
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
315
+
316
+ // 恢复原始环境变量值
317
+ process.env.DEBUG_SENSENOVA_CHAT_COMPLETION = originalDebugValue;
318
+ });
319
+ });
320
+ });
321
+ });
@@ -0,0 +1,98 @@
1
+ import OpenAI, { ClientOptions } from 'openai';
2
+
3
+ import { LobeRuntimeAI } from '../BaseAI';
4
+ import { AgentRuntimeErrorType } from '../error';
5
+ import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
6
+ import { AgentRuntimeError } from '../utils/createError';
7
+ import { debugStream } from '../utils/debugStream';
8
+ import { desensitizeUrl } from '../utils/desensitizeUrl';
9
+ import { handleOpenAIError } from '../utils/handleOpenAIError';
10
+ import { convertOpenAIMessages } from '../utils/openaiHelpers';
11
+ import { StreamingResponse } from '../utils/response';
12
+ import { OpenAIStream } from '../utils/streams';
13
+ import { generateApiToken } from './authToken';
14
+
15
+ const DEFAULT_BASE_URL = 'https://api.sensenova.cn/compatible-mode/v1';
16
+
17
+ export class LobeSenseNovaAI implements LobeRuntimeAI {
18
+ private client: OpenAI;
19
+
20
+ baseURL: string;
21
+
22
+ constructor(oai: OpenAI) {
23
+ this.client = oai;
24
+ this.baseURL = this.client.baseURL;
25
+ }
26
+
27
+ static async fromAPIKey({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions = {}) {
28
+ const invalidSenseNovaAPIKey = AgentRuntimeError.createError(
29
+ AgentRuntimeErrorType.InvalidProviderAPIKey,
30
+ );
31
+
32
+ if (!apiKey) throw invalidSenseNovaAPIKey;
33
+
34
+ let token: string;
35
+
36
+ try {
37
+ token = await generateApiToken(apiKey);
38
+ } catch {
39
+ throw invalidSenseNovaAPIKey;
40
+ }
41
+
42
+ const header = { Authorization: `Bearer ${token}` };
43
+
44
+ const llm = new OpenAI({ apiKey, baseURL, defaultHeaders: header, ...res });
45
+
46
+ return new LobeSenseNovaAI(llm);
47
+ }
48
+
49
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
50
+ try {
51
+ const params = await this.buildCompletionsParams(payload);
52
+
53
+ const response = await this.client.chat.completions.create(
54
+ params as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
55
+ );
56
+
57
+ const [prod, debug] = response.tee();
58
+
59
+ if (process.env.DEBUG_SENSENOVA_CHAT_COMPLETION === '1') {
60
+ debugStream(debug.toReadableStream()).catch(console.error);
61
+ }
62
+
63
+ return StreamingResponse(OpenAIStream(prod), {
64
+ headers: options?.headers,
65
+ });
66
+ } catch (error) {
67
+ const { errorResult, RuntimeError } = handleOpenAIError(error);
68
+
69
+ const errorType = RuntimeError || AgentRuntimeErrorType.ProviderBizError;
70
+ let desensitizedEndpoint = this.baseURL;
71
+
72
+ if (this.baseURL !== DEFAULT_BASE_URL) {
73
+ desensitizedEndpoint = desensitizeUrl(this.baseURL);
74
+ }
75
+ throw AgentRuntimeError.chat({
76
+ endpoint: desensitizedEndpoint,
77
+ error: errorResult,
78
+ errorType,
79
+ provider: ModelProvider.SenseNova,
80
+ });
81
+ }
82
+ }
83
+
84
+ private async buildCompletionsParams(payload: ChatStreamPayload) {
85
+ const { frequency_penalty, messages, temperature, top_p, ...params } = payload;
86
+
87
+ return {
88
+ messages: await convertOpenAIMessages(messages as any),
89
+ ...params,
90
+ frequency_penalty: (frequency_penalty !== undefined && frequency_penalty > 0 && frequency_penalty <= 2) ? frequency_penalty : undefined,
91
+ stream: true,
92
+ temperature: (temperature !== undefined && temperature > 0 && temperature <= 2) ? temperature : undefined,
93
+ top_p: (top_p !== undefined && top_p > 0 && top_p < 1) ? top_p : undefined,
94
+ };
95
+ }
96
+ }
97
+
98
+ export default LobeSenseNovaAI;
@@ -44,6 +44,7 @@ export enum ModelProvider {
44
44
  OpenRouter = 'openrouter',
45
45
  Perplexity = 'perplexity',
46
46
  Qwen = 'qwen',
47
+ SenseNova = 'sensenova',
47
48
  SiliconCloud = 'siliconcloud',
48
49
  Spark = 'spark',
49
50
  Stepfun = 'stepfun',
@@ -122,6 +122,23 @@ export default {
122
122
  title: '下载指定的 Ollama 模型',
123
123
  },
124
124
  },
125
+ sensenova: {
126
+ sensenovaAccessKeyID: {
127
+ desc: '填入 SenseNova Access Key ID',
128
+ placeholder: 'SenseNova Access Key ID',
129
+ title: 'Access Key ID',
130
+ },
131
+ sensenovaAccessKeySecret: {
132
+ desc: '填入 SenseNova Access Key Secret',
133
+ placeholder: 'SenseNova Access Key Secret',
134
+ title: 'Access Key Secret',
135
+ },
136
+ unlock: {
137
+ description:
138
+ '输入你的 Access Key ID / Access Key Secret 即可开始会话。应用不会记录你的鉴权配置',
139
+ title: '使用自定义 SenseNova 鉴权信息',
140
+ },
141
+ },
125
142
  wenxin: {
126
143
  accessKey: {
127
144
  desc: '填入百度千帆平台的 Access Key',