@lobehub/chat 1.68.5 → 1.68.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/README.md +2 -2
  3. package/README.zh-CN.md +2 -2
  4. package/changelog/v1.json +18 -0
  5. package/locales/ar/modelProvider.json +3 -0
  6. package/locales/bg-BG/modelProvider.json +3 -0
  7. package/locales/de-DE/modelProvider.json +3 -0
  8. package/locales/en-US/modelProvider.json +3 -0
  9. package/locales/es-ES/modelProvider.json +3 -0
  10. package/locales/fa-IR/modelProvider.json +3 -0
  11. package/locales/fr-FR/modelProvider.json +3 -0
  12. package/locales/it-IT/modelProvider.json +3 -0
  13. package/locales/ja-JP/modelProvider.json +3 -0
  14. package/locales/ko-KR/modelProvider.json +3 -0
  15. package/locales/nl-NL/modelProvider.json +3 -0
  16. package/locales/pl-PL/modelProvider.json +3 -0
  17. package/locales/pt-BR/modelProvider.json +3 -0
  18. package/locales/ru-RU/modelProvider.json +3 -0
  19. package/locales/tr-TR/modelProvider.json +3 -0
  20. package/locales/vi-VN/modelProvider.json +3 -0
  21. package/locales/zh-CN/modelProvider.json +3 -0
  22. package/locales/zh-TW/modelProvider.json +3 -0
  23. package/package.json +1 -1
  24. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +2 -2
  25. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +1 -1
  26. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/UpdateProviderInfo/SettingModal.tsx +34 -2
  27. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +14 -12
  28. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +6 -6
  29. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +6 -6
  30. package/src/libs/agent-runtime/AgentRuntime.test.ts +76 -255
  31. package/src/libs/agent-runtime/AgentRuntime.ts +13 -338
  32. package/src/libs/agent-runtime/azureOpenai/index.test.ts +9 -9
  33. package/src/libs/agent-runtime/azureOpenai/index.ts +6 -6
  34. package/src/libs/agent-runtime/runtimeMap.ts +97 -0
  35. package/src/libs/agent-runtime/vertexai/index.ts +3 -1
  36. package/src/locales/default/modelProvider.ts +3 -0
  37. package/src/server/modules/AgentRuntime/index.ts +3 -2
  38. package/src/services/chat.ts +4 -6
@@ -3,61 +3,18 @@ import { ClientOptions } from 'openai';
3
3
  import type { TracePayload } from '@/const/trace';
4
4
 
5
5
  import { LobeRuntimeAI } from './BaseAI';
6
- import { LobeAi21AI } from './ai21';
7
- import { LobeAi360AI } from './ai360';
8
- import { LobeAnthropicAI } from './anthropic';
9
- import { LobeAzureOpenAI } from './azureOpenai';
10
- import { LobeAzureAI } from './azureai';
11
- import { LobeBaichuanAI } from './baichuan';
12
- import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
13
- import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
14
- import { LobeDeepSeekAI } from './deepseek';
15
- import { LobeFireworksAI } from './fireworksai';
16
- import { LobeGiteeAI } from './giteeai';
17
- import { LobeGithubAI } from './github';
18
- import { LobeGoogleAI } from './google';
19
- import { LobeGroq } from './groq';
20
- import { LobeHigressAI } from './higress';
21
- import { LobeHuggingFaceAI } from './huggingface';
22
- import { LobeHunyuanAI } from './hunyuan';
23
- import { LobeInternLMAI } from './internlm';
24
- import { LobeJinaAI } from './jina';
25
- import { LobeLMStudioAI } from './lmstudio';
26
- import { LobeMinimaxAI } from './minimax';
27
- import { LobeMistralAI } from './mistral';
28
- import { LobeMoonshotAI } from './moonshot';
29
- import { LobeNovitaAI } from './novita';
30
- import { LobeNvidiaAI } from './nvidia';
31
- import { LobeOllamaAI } from './ollama';
6
+ import { LobeBedrockAIParams } from './bedrock';
7
+ import { LobeCloudflareParams } from './cloudflare';
32
8
  import { LobeOpenAI } from './openai';
33
- import { LobeOpenRouterAI } from './openrouter';
34
- import { LobePerplexityAI } from './perplexity';
35
- import { LobePPIOAI } from './ppio';
36
- import { LobeQwenAI } from './qwen';
37
- import { LobeSambaNovaAI } from './sambanova';
38
- import { LobeSenseNovaAI } from './sensenova';
39
- import { LobeSiliconCloudAI } from './siliconcloud';
40
- import { LobeSparkAI } from './spark';
41
- import { LobeStepfunAI } from './stepfun';
42
- import { LobeTaichuAI } from './taichu';
43
- import { LobeTencentCloudAI } from './tencentcloud';
44
- import { LobeTogetherAI } from './togetherai';
9
+ import { providerRuntimeMap } from './runtimeMap';
45
10
  import {
46
11
  ChatCompetitionOptions,
47
12
  ChatStreamPayload,
48
13
  EmbeddingsOptions,
49
14
  EmbeddingsPayload,
50
- ModelProvider,
51
15
  TextToImagePayload,
52
16
  TextToSpeechPayload,
53
17
  } from './types';
54
- import { LobeUpstageAI } from './upstage';
55
- import { LobeVLLMAI } from './vllm';
56
- import { LobeVolcengineAI } from './volcengine';
57
- import { LobeWenxinAI } from './wenxin';
58
- import { LobeXAI } from './xai';
59
- import { LobeZeroOneAI } from './zeroone';
60
- import { LobeZhipuAI } from './zhipu';
61
18
 
62
19
  export interface AgentChatOptions {
63
20
  enableTrace?: boolean;
@@ -128,307 +85,25 @@ class AgentRuntime {
128
85
  * Try to initialize the runtime with the provider and the options.
129
86
  * @example
130
87
  * ```ts
131
- * const runtime = await AgentRuntime.initializeWithProviderOptions(provider, {
132
- * [provider]: {...options},
133
- * })
88
+ * const runtime = await AgentRuntime.initializeWithProviderOptions(provider, options)
134
89
  * ```
135
90
  * **Note**: If you try to get a AgentRuntime instance from client or server,
136
91
  * you should use the methods to get the runtime instance at first.
137
92
  * - `src/app/api/chat/agentRuntime.ts: initAgentRuntimeWithUserPayload` on server
138
93
  * - `src/services/chat.ts: initializeWithClientStore` on client
139
94
  */
140
- static async initializeWithProviderOptions(
95
+ static async initializeWithProvider(
141
96
  provider: string,
142
- params: Partial<{
143
- ai21: Partial<ClientOptions>;
144
- ai360: Partial<ClientOptions>;
145
- anthropic: Partial<ClientOptions>;
146
- azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
147
- azureai: { apiKey?: string; apiVersion?: string; baseURL?: string };
148
- baichuan: Partial<ClientOptions>;
149
- bedrock: Partial<LobeBedrockAIParams>;
150
- cloudflare: Partial<LobeCloudflareParams>;
151
- deepseek: Partial<ClientOptions>;
152
- doubao: Partial<ClientOptions>;
153
- fireworksai: Partial<ClientOptions>;
154
- giteeai: Partial<ClientOptions>;
155
- github: Partial<ClientOptions>;
156
- google: { apiKey?: string; baseURL?: string };
157
- groq: Partial<ClientOptions>;
158
- higress: Partial<ClientOptions>;
159
- huggingface: { apiKey?: string; baseURL?: string };
160
- hunyuan: Partial<ClientOptions>;
161
- internlm: Partial<ClientOptions>;
162
- jina: Partial<ClientOptions>;
163
- lmstudio: Partial<ClientOptions>;
164
- minimax: Partial<ClientOptions>;
165
- mistral: Partial<ClientOptions>;
166
- moonshot: Partial<ClientOptions>;
167
- novita: Partial<ClientOptions>;
168
- nvidia: Partial<ClientOptions>;
169
- ollama: Partial<ClientOptions>;
170
- openai: Partial<ClientOptions>;
171
- openrouter: Partial<ClientOptions>;
172
- perplexity: Partial<ClientOptions>;
173
- ppio: Partial<ClientOptions>;
174
- qwen: Partial<ClientOptions>;
175
- sambanova: Partial<ClientOptions>;
176
- sensenova: Partial<ClientOptions>;
177
- siliconcloud: Partial<ClientOptions>;
178
- spark: Partial<ClientOptions>;
179
- stepfun: Partial<ClientOptions>;
180
- taichu: Partial<ClientOptions>;
181
- tencentcloud: Partial<ClientOptions>;
182
- togetherai: Partial<ClientOptions>;
183
- upstage: Partial<ClientOptions>;
184
- vllm: Partial<ClientOptions>;
185
- volcengine: Partial<ClientOptions>;
186
- wenxin: Partial<ClientOptions>;
187
- xai: Partial<ClientOptions>;
188
- zeroone: Partial<ClientOptions>;
189
- zhipu: Partial<ClientOptions>;
190
- }>,
97
+ params: Partial<
98
+ ClientOptions &
99
+ LobeBedrockAIParams &
100
+ LobeCloudflareParams & { apiKey?: string; apiVersion?: string; baseURL?: string }
101
+ >,
191
102
  ) {
192
- let runtimeModel: LobeRuntimeAI;
103
+ // @ts-expect-error runtime map not include vertex so it will be undefined
104
+ const providerAI = providerRuntimeMap[provider] ?? LobeOpenAI;
105
+ const runtimeModel: LobeRuntimeAI = new providerAI(params);
193
106
 
194
- switch (provider) {
195
- default:
196
- case ModelProvider.OpenAI: {
197
- // Will use the openai as default provider
198
- runtimeModel = new LobeOpenAI(params.openai ?? (params as any)[provider]);
199
- break;
200
- }
201
-
202
- case ModelProvider.Azure: {
203
- runtimeModel = new LobeAzureOpenAI(
204
- params.azure?.baseURL,
205
- params.azure?.apiKey,
206
- params.azure?.apiVersion,
207
- );
208
- break;
209
- }
210
-
211
- case ModelProvider.AzureAI: {
212
- runtimeModel = new LobeAzureAI(params.azureai);
213
- break;
214
- }
215
-
216
- case ModelProvider.ZhiPu: {
217
- runtimeModel = new LobeZhipuAI(params.zhipu);
218
- break;
219
- }
220
-
221
- case ModelProvider.Google: {
222
- runtimeModel = new LobeGoogleAI(params.google);
223
- break;
224
- }
225
-
226
- case ModelProvider.Moonshot: {
227
- runtimeModel = new LobeMoonshotAI(params.moonshot);
228
- break;
229
- }
230
-
231
- case ModelProvider.Bedrock: {
232
- runtimeModel = new LobeBedrockAI(params.bedrock);
233
- break;
234
- }
235
-
236
- case ModelProvider.LMStudio: {
237
- runtimeModel = new LobeLMStudioAI(params.lmstudio);
238
- break;
239
- }
240
-
241
- case ModelProvider.Ollama: {
242
- runtimeModel = new LobeOllamaAI(params.ollama);
243
- break;
244
- }
245
-
246
- case ModelProvider.VLLM: {
247
- runtimeModel = new LobeVLLMAI(params.vllm);
248
- break;
249
- }
250
-
251
- case ModelProvider.Perplexity: {
252
- runtimeModel = new LobePerplexityAI(params.perplexity);
253
- break;
254
- }
255
-
256
- case ModelProvider.Anthropic: {
257
- runtimeModel = new LobeAnthropicAI(params.anthropic);
258
- break;
259
- }
260
-
261
- case ModelProvider.DeepSeek: {
262
- runtimeModel = new LobeDeepSeekAI(params.deepseek);
263
- break;
264
- }
265
-
266
- case ModelProvider.HuggingFace: {
267
- runtimeModel = new LobeHuggingFaceAI(params.huggingface);
268
- break;
269
- }
270
-
271
- case ModelProvider.Minimax: {
272
- runtimeModel = new LobeMinimaxAI(params.minimax);
273
- break;
274
- }
275
-
276
- case ModelProvider.Mistral: {
277
- runtimeModel = new LobeMistralAI(params.mistral);
278
- break;
279
- }
280
-
281
- case ModelProvider.Groq: {
282
- runtimeModel = new LobeGroq(params.groq);
283
- break;
284
- }
285
-
286
- case ModelProvider.Github: {
287
- runtimeModel = new LobeGithubAI(params.github);
288
- break;
289
- }
290
-
291
- case ModelProvider.OpenRouter: {
292
- runtimeModel = new LobeOpenRouterAI(params.openrouter);
293
- break;
294
- }
295
-
296
- case ModelProvider.TogetherAI: {
297
- runtimeModel = new LobeTogetherAI(params.togetherai);
298
- break;
299
- }
300
-
301
- case ModelProvider.FireworksAI: {
302
- runtimeModel = new LobeFireworksAI(params.fireworksai);
303
- break;
304
- }
305
-
306
- case ModelProvider.ZeroOne: {
307
- runtimeModel = new LobeZeroOneAI(params.zeroone);
308
- break;
309
- }
310
-
311
- case ModelProvider.Qwen: {
312
- runtimeModel = new LobeQwenAI(params.qwen);
313
- break;
314
- }
315
-
316
- case ModelProvider.Stepfun: {
317
- runtimeModel = new LobeStepfunAI(params.stepfun);
318
- break;
319
- }
320
-
321
- case ModelProvider.Novita: {
322
- runtimeModel = new LobeNovitaAI(params.novita);
323
- break;
324
- }
325
-
326
- case ModelProvider.Nvidia: {
327
- runtimeModel = new LobeNvidiaAI(params.nvidia);
328
- break;
329
- }
330
-
331
- case ModelProvider.Baichuan: {
332
- runtimeModel = new LobeBaichuanAI(params.baichuan);
333
- break;
334
- }
335
-
336
- case ModelProvider.Taichu: {
337
- runtimeModel = new LobeTaichuAI(params.taichu);
338
- break;
339
- }
340
-
341
- case ModelProvider.Ai360: {
342
- runtimeModel = new LobeAi360AI(params.ai360);
343
- break;
344
- }
345
-
346
- case ModelProvider.SiliconCloud: {
347
- runtimeModel = new LobeSiliconCloudAI(params.siliconcloud);
348
- break;
349
- }
350
-
351
- case ModelProvider.GiteeAI: {
352
- runtimeModel = new LobeGiteeAI(params.giteeai);
353
- break;
354
- }
355
-
356
- case ModelProvider.Upstage: {
357
- runtimeModel = new LobeUpstageAI(params.upstage);
358
- break;
359
- }
360
-
361
- case ModelProvider.Spark: {
362
- runtimeModel = new LobeSparkAI(params.spark);
363
- break;
364
- }
365
-
366
- case ModelProvider.Ai21: {
367
- runtimeModel = new LobeAi21AI(params.ai21);
368
- break;
369
- }
370
-
371
- case ModelProvider.Hunyuan: {
372
- runtimeModel = new LobeHunyuanAI(params.hunyuan);
373
- break;
374
- }
375
-
376
- case ModelProvider.SenseNova: {
377
- runtimeModel = new LobeSenseNovaAI(params.sensenova);
378
- break;
379
- }
380
-
381
- case ModelProvider.XAI: {
382
- runtimeModel = new LobeXAI(params.xai);
383
- break;
384
- }
385
-
386
- case ModelProvider.Jina: {
387
- runtimeModel = new LobeJinaAI(params.jina);
388
- break;
389
- }
390
-
391
- case ModelProvider.SambaNova: {
392
- runtimeModel = new LobeSambaNovaAI(params.sambanova);
393
- break;
394
- }
395
-
396
- case ModelProvider.Cloudflare: {
397
- runtimeModel = new LobeCloudflareAI(params.cloudflare);
398
- break;
399
- }
400
-
401
- case ModelProvider.InternLM: {
402
- runtimeModel = new LobeInternLMAI(params.internlm);
403
- break;
404
- }
405
-
406
- case ModelProvider.Higress: {
407
- runtimeModel = new LobeHigressAI(params.higress);
408
- break;
409
- }
410
-
411
- case ModelProvider.TencentCloud: {
412
- runtimeModel = new LobeTencentCloudAI(params[provider]);
413
- break;
414
- }
415
-
416
- case ModelProvider.Volcengine:
417
- case ModelProvider.Doubao: {
418
- runtimeModel = new LobeVolcengineAI(params.volcengine || params.doubao);
419
- break;
420
- }
421
-
422
- case ModelProvider.Wenxin: {
423
- runtimeModel = new LobeWenxinAI(params.wenxin);
424
- break;
425
- }
426
-
427
- case ModelProvider.PPIO: {
428
- runtimeModel = new LobePPIOAI(params.ppio ?? {});
429
- break;
430
- }
431
- }
432
107
  return new AgentRuntime(runtimeModel);
433
108
  }
434
109
  }
@@ -16,11 +16,11 @@ describe('LobeAzureOpenAI', () => {
16
16
  let instance: LobeAzureOpenAI;
17
17
 
18
18
  beforeEach(() => {
19
- instance = new LobeAzureOpenAI(
20
- 'https://test.openai.azure.com/',
21
- 'test_key',
22
- '2023-03-15-preview',
23
- );
19
+ instance = new LobeAzureOpenAI({
20
+ baseURL: 'https://test.openai.azure.com/',
21
+ apiKey: 'test_key',
22
+ apiVersion: '2023-03-15-preview',
23
+ });
24
24
 
25
25
  // 使用 vi.spyOn 来模拟 streamChatCompletions 方法
26
26
  vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
@@ -42,14 +42,14 @@ describe('LobeAzureOpenAI', () => {
42
42
  });
43
43
 
44
44
  it('should create an instance of OpenAIClient with correct parameters', () => {
45
- const endpoint = 'https://test.openai.azure.com/';
46
- const apikey = 'test_key';
45
+ const baseURL = 'https://test.openai.azure.com/';
46
+ const apiKey = 'test_key';
47
47
  const apiVersion = '2023-03-15-preview';
48
48
 
49
- const instance = new LobeAzureOpenAI(endpoint, apikey, apiVersion);
49
+ const instance = new LobeAzureOpenAI({ baseURL, apiKey, apiVersion });
50
50
 
51
51
  expect(instance.client).toBeInstanceOf(AzureOpenAI);
52
- expect(instance.baseURL).toBe(endpoint);
52
+ expect(instance.baseURL).toBe(baseURL);
53
53
  });
54
54
  });
55
55
 
@@ -13,18 +13,18 @@ import { OpenAIStream } from '../utils/streams';
13
13
  export class LobeAzureOpenAI implements LobeRuntimeAI {
14
14
  client: AzureOpenAI;
15
15
 
16
- constructor(endpoint?: string, apikey?: string, apiVersion?: string) {
17
- if (!apikey || !endpoint)
16
+ constructor(params: { apiKey?: string; apiVersion?: string, baseURL?: string; } = {}) {
17
+ if (!params.apiKey || !params.baseURL)
18
18
  throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
19
19
 
20
20
  this.client = new AzureOpenAI({
21
- apiKey: apikey,
22
- apiVersion,
21
+ apiKey: params.apiKey,
22
+ apiVersion: params.apiVersion,
23
23
  dangerouslyAllowBrowser: true,
24
- endpoint,
24
+ endpoint: params.baseURL,
25
25
  });
26
26
 
27
- this.baseURL = endpoint;
27
+ this.baseURL = params.baseURL;
28
28
  }
29
29
 
30
30
  baseURL: string;
@@ -0,0 +1,97 @@
1
+ import { LobeAi21AI } from './ai21';
2
+ import { LobeAi360AI } from './ai360';
3
+ import LobeAnthropicAI from './anthropic';
4
+ import { LobeAzureOpenAI } from './azureOpenai';
5
+ import { LobeAzureAI } from './azureai';
6
+ import { LobeBaichuanAI } from './baichuan';
7
+ import LobeBedrockAI from './bedrock';
8
+ import { LobeCloudflareAI } from './cloudflare';
9
+ import { LobeDeepSeekAI } from './deepseek';
10
+ import { LobeFireworksAI } from './fireworksai';
11
+ import { LobeGiteeAI } from './giteeai';
12
+ import { LobeGithubAI } from './github';
13
+ import LobeGoogleAI from './google';
14
+ import { LobeGroq } from './groq';
15
+ import { LobeHigressAI } from './higress';
16
+ import { LobeHuggingFaceAI } from './huggingface';
17
+ import { LobeHunyuanAI } from './hunyuan';
18
+ import { LobeInternLMAI } from './internlm';
19
+ import { LobeJinaAI } from './jina';
20
+ import { LobeLMStudioAI } from './lmstudio';
21
+ import { LobeMinimaxAI } from './minimax';
22
+ import { LobeMistralAI } from './mistral';
23
+ import { LobeMoonshotAI } from './moonshot';
24
+ import { LobeNovitaAI } from './novita';
25
+ import { LobeNvidiaAI } from './nvidia';
26
+ import LobeOllamaAI from './ollama';
27
+ import { LobeOpenAI } from './openai';
28
+ import { LobeOpenRouterAI } from './openrouter';
29
+ import { LobePerplexityAI } from './perplexity';
30
+ import { LobePPIOAI } from './ppio';
31
+ import { LobeQwenAI } from './qwen';
32
+ import { LobeSambaNovaAI } from './sambanova';
33
+ import { LobeSenseNovaAI } from './sensenova';
34
+ import { LobeSiliconCloudAI } from './siliconcloud';
35
+ import { LobeSparkAI } from './spark';
36
+ import { LobeStepfunAI } from './stepfun';
37
+ import { LobeTaichuAI } from './taichu';
38
+ import { LobeTencentCloudAI } from './tencentcloud';
39
+ import { LobeTogetherAI } from './togetherai';
40
+ import { ModelProvider } from './types';
41
+ import { LobeUpstageAI } from './upstage';
42
+ import { LobeVLLMAI } from './vllm';
43
+ import { LobeVolcengineAI } from './volcengine';
44
+ import { LobeWenxinAI } from './wenxin';
45
+ import { LobeXAI } from './xai';
46
+ import { LobeZeroOneAI } from './zeroone';
47
+ import { LobeZhipuAI } from './zhipu';
48
+
49
+ export const providerRuntimeMap = {
50
+ [ModelProvider.OpenAI]: LobeOpenAI,
51
+ [ModelProvider.Azure]: LobeAzureOpenAI,
52
+ [ModelProvider.AzureAI]: LobeAzureAI,
53
+ [ModelProvider.ZhiPu]: LobeZhipuAI,
54
+ [ModelProvider.Google]: LobeGoogleAI,
55
+ [ModelProvider.Moonshot]: LobeMoonshotAI,
56
+ [ModelProvider.Bedrock]: LobeBedrockAI,
57
+ [ModelProvider.LMStudio]: LobeLMStudioAI,
58
+ [ModelProvider.Ollama]: LobeOllamaAI,
59
+ [ModelProvider.VLLM]: LobeVLLMAI,
60
+ [ModelProvider.Perplexity]: LobePerplexityAI,
61
+ [ModelProvider.Anthropic]: LobeAnthropicAI,
62
+ [ModelProvider.DeepSeek]: LobeDeepSeekAI,
63
+ [ModelProvider.HuggingFace]: LobeHuggingFaceAI,
64
+ [ModelProvider.Minimax]: LobeMinimaxAI,
65
+ [ModelProvider.Mistral]: LobeMistralAI,
66
+ [ModelProvider.Groq]: LobeGroq,
67
+ [ModelProvider.Github]: LobeGithubAI,
68
+ [ModelProvider.OpenRouter]: LobeOpenRouterAI,
69
+ [ModelProvider.TogetherAI]: LobeTogetherAI,
70
+ [ModelProvider.FireworksAI]: LobeFireworksAI,
71
+ [ModelProvider.ZeroOne]: LobeZeroOneAI,
72
+ [ModelProvider.Stepfun]: LobeStepfunAI,
73
+ [ModelProvider.Qwen]: LobeQwenAI,
74
+ [ModelProvider.Novita]: LobeNovitaAI,
75
+ [ModelProvider.Nvidia]: LobeNvidiaAI,
76
+ [ModelProvider.Taichu]: LobeTaichuAI,
77
+ [ModelProvider.Baichuan]: LobeBaichuanAI,
78
+ [ModelProvider.Ai360]: LobeAi360AI,
79
+ [ModelProvider.SiliconCloud]: LobeSiliconCloudAI,
80
+ [ModelProvider.GiteeAI]: LobeGiteeAI,
81
+ [ModelProvider.Upstage]: LobeUpstageAI,
82
+ [ModelProvider.Spark]: LobeSparkAI,
83
+ [ModelProvider.Ai21]: LobeAi21AI,
84
+ [ModelProvider.Hunyuan]: LobeHunyuanAI,
85
+ [ModelProvider.SenseNova]: LobeSenseNovaAI,
86
+ [ModelProvider.XAI]: LobeXAI,
87
+ [ModelProvider.Jina]: LobeJinaAI,
88
+ [ModelProvider.SambaNova]: LobeSambaNovaAI,
89
+ [ModelProvider.Cloudflare]: LobeCloudflareAI,
90
+ [ModelProvider.InternLM]: LobeInternLMAI,
91
+ [ModelProvider.Higress]: LobeHigressAI,
92
+ [ModelProvider.TencentCloud]: LobeTencentCloudAI,
93
+ [ModelProvider.Volcengine]: LobeVolcengineAI,
94
+ [ModelProvider.PPIO]: LobePPIOAI,
95
+ [ModelProvider.Doubao]: LobeVolcengineAI,
96
+ [ModelProvider.Wenxin]: LobeWenxinAI,
97
+ };
@@ -1,6 +1,8 @@
1
1
  import { VertexAI, VertexInit } from '@google-cloud/vertexai';
2
2
 
3
- import { AgentRuntimeError, AgentRuntimeErrorType, LobeGoogleAI } from '@/libs/agent-runtime';
3
+ import { AgentRuntimeErrorType } from '../error';
4
+ import { LobeGoogleAI } from '../google';
5
+ import { AgentRuntimeError } from '../utils/createError';
4
6
 
5
7
  export class LobeVertexAI extends LobeGoogleAI {
6
8
  static initFromVertexAI(params?: VertexInit) {
@@ -325,6 +325,9 @@ export default {
325
325
  tooltip: '更新服务商基础配置',
326
326
  updateSuccess: '更新成功',
327
327
  },
328
+ updateCustomAiProvider: {
329
+ title: '更新自定义 AI 服务商配置',
330
+ },
328
331
  vertexai: {
329
332
  apiKey: {
330
333
  desc: '填入你的 Vertex Ai Keys',
@@ -130,8 +130,9 @@ export const initAgentRuntimeWithUserPayload = (
130
130
  payload: JWTPayload,
131
131
  params: any = {},
132
132
  ) => {
133
- return AgentRuntime.initializeWithProviderOptions(provider, {
134
- [provider]: { ...getLlmOptionsFromPayload(provider, payload), ...params },
133
+ return AgentRuntime.initializeWithProvider(provider, {
134
+ ...getLlmOptionsFromPayload(provider, payload),
135
+ ...params,
135
136
  });
136
137
  };
137
138
 
@@ -147,12 +147,10 @@ export function initializeWithClientStore(provider: string, payload: any) {
147
147
  * Configuration override order:
148
148
  * payload -> providerAuthPayload -> commonOptions
149
149
  */
150
- return AgentRuntime.initializeWithProviderOptions(provider, {
151
- [provider]: {
152
- ...commonOptions,
153
- ...providerAuthPayload,
154
- ...payload,
155
- },
150
+ return AgentRuntime.initializeWithProvider(provider, {
151
+ ...commonOptions,
152
+ ...providerAuthPayload,
153
+ ...payload,
156
154
  });
157
155
  }
158
156