@lobehub/chat 1.68.6 → 1.68.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,61 +3,18 @@ import { ClientOptions } from 'openai';
3
3
  import type { TracePayload } from '@/const/trace';
4
4
 
5
5
  import { LobeRuntimeAI } from './BaseAI';
6
- import { LobeAi21AI } from './ai21';
7
- import { LobeAi360AI } from './ai360';
8
- import { LobeAnthropicAI } from './anthropic';
9
- import { LobeAzureOpenAI } from './azureOpenai';
10
- import { LobeAzureAI } from './azureai';
11
- import { LobeBaichuanAI } from './baichuan';
12
- import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
13
- import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
14
- import { LobeDeepSeekAI } from './deepseek';
15
- import { LobeFireworksAI } from './fireworksai';
16
- import { LobeGiteeAI } from './giteeai';
17
- import { LobeGithubAI } from './github';
18
- import { LobeGoogleAI } from './google';
19
- import { LobeGroq } from './groq';
20
- import { LobeHigressAI } from './higress';
21
- import { LobeHuggingFaceAI } from './huggingface';
22
- import { LobeHunyuanAI } from './hunyuan';
23
- import { LobeInternLMAI } from './internlm';
24
- import { LobeJinaAI } from './jina';
25
- import { LobeLMStudioAI } from './lmstudio';
26
- import { LobeMinimaxAI } from './minimax';
27
- import { LobeMistralAI } from './mistral';
28
- import { LobeMoonshotAI } from './moonshot';
29
- import { LobeNovitaAI } from './novita';
30
- import { LobeNvidiaAI } from './nvidia';
31
- import { LobeOllamaAI } from './ollama';
6
+ import { LobeBedrockAIParams } from './bedrock';
7
+ import { LobeCloudflareParams } from './cloudflare';
32
8
  import { LobeOpenAI } from './openai';
33
- import { LobeOpenRouterAI } from './openrouter';
34
- import { LobePerplexityAI } from './perplexity';
35
- import { LobePPIOAI } from './ppio';
36
- import { LobeQwenAI } from './qwen';
37
- import { LobeSambaNovaAI } from './sambanova';
38
- import { LobeSenseNovaAI } from './sensenova';
39
- import { LobeSiliconCloudAI } from './siliconcloud';
40
- import { LobeSparkAI } from './spark';
41
- import { LobeStepfunAI } from './stepfun';
42
- import { LobeTaichuAI } from './taichu';
43
- import { LobeTencentCloudAI } from './tencentcloud';
44
- import { LobeTogetherAI } from './togetherai';
9
+ import { providerRuntimeMap } from './runtimeMap';
45
10
  import {
46
11
  ChatCompetitionOptions,
47
12
  ChatStreamPayload,
48
13
  EmbeddingsOptions,
49
14
  EmbeddingsPayload,
50
- ModelProvider,
51
15
  TextToImagePayload,
52
16
  TextToSpeechPayload,
53
17
  } from './types';
54
- import { LobeUpstageAI } from './upstage';
55
- import { LobeVLLMAI } from './vllm';
56
- import { LobeVolcengineAI } from './volcengine';
57
- import { LobeWenxinAI } from './wenxin';
58
- import { LobeXAI } from './xai';
59
- import { LobeZeroOneAI } from './zeroone';
60
- import { LobeZhipuAI } from './zhipu';
61
18
 
62
19
  export interface AgentChatOptions {
63
20
  enableTrace?: boolean;
@@ -128,307 +85,25 @@ class AgentRuntime {
128
85
  * Try to initialize the runtime with the provider and the options.
129
86
  * @example
130
87
  * ```ts
131
- * const runtime = await AgentRuntime.initializeWithProviderOptions(provider, {
132
- * [provider]: {...options},
133
- * })
88
+ * const runtime = await AgentRuntime.initializeWithProviderOptions(provider, options)
134
89
  * ```
135
90
  * **Note**: If you try to get a AgentRuntime instance from client or server,
136
91
  * you should use the methods to get the runtime instance at first.
137
92
  * - `src/app/api/chat/agentRuntime.ts: initAgentRuntimeWithUserPayload` on server
138
93
  * - `src/services/chat.ts: initializeWithClientStore` on client
139
94
  */
140
- static async initializeWithProviderOptions(
95
+ static async initializeWithProvider(
141
96
  provider: string,
142
- params: Partial<{
143
- ai21: Partial<ClientOptions>;
144
- ai360: Partial<ClientOptions>;
145
- anthropic: Partial<ClientOptions>;
146
- azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
147
- azureai: { apiKey?: string; apiVersion?: string; baseURL?: string };
148
- baichuan: Partial<ClientOptions>;
149
- bedrock: Partial<LobeBedrockAIParams>;
150
- cloudflare: Partial<LobeCloudflareParams>;
151
- deepseek: Partial<ClientOptions>;
152
- doubao: Partial<ClientOptions>;
153
- fireworksai: Partial<ClientOptions>;
154
- giteeai: Partial<ClientOptions>;
155
- github: Partial<ClientOptions>;
156
- google: { apiKey?: string; baseURL?: string };
157
- groq: Partial<ClientOptions>;
158
- higress: Partial<ClientOptions>;
159
- huggingface: { apiKey?: string; baseURL?: string };
160
- hunyuan: Partial<ClientOptions>;
161
- internlm: Partial<ClientOptions>;
162
- jina: Partial<ClientOptions>;
163
- lmstudio: Partial<ClientOptions>;
164
- minimax: Partial<ClientOptions>;
165
- mistral: Partial<ClientOptions>;
166
- moonshot: Partial<ClientOptions>;
167
- novita: Partial<ClientOptions>;
168
- nvidia: Partial<ClientOptions>;
169
- ollama: Partial<ClientOptions>;
170
- openai: Partial<ClientOptions>;
171
- openrouter: Partial<ClientOptions>;
172
- perplexity: Partial<ClientOptions>;
173
- ppio: Partial<ClientOptions>;
174
- qwen: Partial<ClientOptions>;
175
- sambanova: Partial<ClientOptions>;
176
- sensenova: Partial<ClientOptions>;
177
- siliconcloud: Partial<ClientOptions>;
178
- spark: Partial<ClientOptions>;
179
- stepfun: Partial<ClientOptions>;
180
- taichu: Partial<ClientOptions>;
181
- tencentcloud: Partial<ClientOptions>;
182
- togetherai: Partial<ClientOptions>;
183
- upstage: Partial<ClientOptions>;
184
- vllm: Partial<ClientOptions>;
185
- volcengine: Partial<ClientOptions>;
186
- wenxin: Partial<ClientOptions>;
187
- xai: Partial<ClientOptions>;
188
- zeroone: Partial<ClientOptions>;
189
- zhipu: Partial<ClientOptions>;
190
- }>,
97
+ params: Partial<
98
+ ClientOptions &
99
+ LobeBedrockAIParams &
100
+ LobeCloudflareParams & { apiKey?: string; apiVersion?: string; baseURL?: string }
101
+ >,
191
102
  ) {
192
- let runtimeModel: LobeRuntimeAI;
103
+ // @ts-expect-error runtime map not include vertex so it will be undefined
104
+ const providerAI = providerRuntimeMap[provider] ?? LobeOpenAI;
105
+ const runtimeModel: LobeRuntimeAI = new providerAI(params);
193
106
 
194
- switch (provider) {
195
- default:
196
- case ModelProvider.OpenAI: {
197
- // Will use the openai as default provider
198
- runtimeModel = new LobeOpenAI(params.openai ?? (params as any)[provider]);
199
- break;
200
- }
201
-
202
- case ModelProvider.Azure: {
203
- runtimeModel = new LobeAzureOpenAI(
204
- params.azure?.baseURL,
205
- params.azure?.apiKey,
206
- params.azure?.apiVersion,
207
- );
208
- break;
209
- }
210
-
211
- case ModelProvider.AzureAI: {
212
- runtimeModel = new LobeAzureAI(params.azureai);
213
- break;
214
- }
215
-
216
- case ModelProvider.ZhiPu: {
217
- runtimeModel = new LobeZhipuAI(params.zhipu);
218
- break;
219
- }
220
-
221
- case ModelProvider.Google: {
222
- runtimeModel = new LobeGoogleAI(params.google);
223
- break;
224
- }
225
-
226
- case ModelProvider.Moonshot: {
227
- runtimeModel = new LobeMoonshotAI(params.moonshot);
228
- break;
229
- }
230
-
231
- case ModelProvider.Bedrock: {
232
- runtimeModel = new LobeBedrockAI(params.bedrock);
233
- break;
234
- }
235
-
236
- case ModelProvider.LMStudio: {
237
- runtimeModel = new LobeLMStudioAI(params.lmstudio);
238
- break;
239
- }
240
-
241
- case ModelProvider.Ollama: {
242
- runtimeModel = new LobeOllamaAI(params.ollama);
243
- break;
244
- }
245
-
246
- case ModelProvider.VLLM: {
247
- runtimeModel = new LobeVLLMAI(params.vllm);
248
- break;
249
- }
250
-
251
- case ModelProvider.Perplexity: {
252
- runtimeModel = new LobePerplexityAI(params.perplexity);
253
- break;
254
- }
255
-
256
- case ModelProvider.Anthropic: {
257
- runtimeModel = new LobeAnthropicAI(params.anthropic);
258
- break;
259
- }
260
-
261
- case ModelProvider.DeepSeek: {
262
- runtimeModel = new LobeDeepSeekAI(params.deepseek);
263
- break;
264
- }
265
-
266
- case ModelProvider.HuggingFace: {
267
- runtimeModel = new LobeHuggingFaceAI(params.huggingface);
268
- break;
269
- }
270
-
271
- case ModelProvider.Minimax: {
272
- runtimeModel = new LobeMinimaxAI(params.minimax);
273
- break;
274
- }
275
-
276
- case ModelProvider.Mistral: {
277
- runtimeModel = new LobeMistralAI(params.mistral);
278
- break;
279
- }
280
-
281
- case ModelProvider.Groq: {
282
- runtimeModel = new LobeGroq(params.groq);
283
- break;
284
- }
285
-
286
- case ModelProvider.Github: {
287
- runtimeModel = new LobeGithubAI(params.github);
288
- break;
289
- }
290
-
291
- case ModelProvider.OpenRouter: {
292
- runtimeModel = new LobeOpenRouterAI(params.openrouter);
293
- break;
294
- }
295
-
296
- case ModelProvider.TogetherAI: {
297
- runtimeModel = new LobeTogetherAI(params.togetherai);
298
- break;
299
- }
300
-
301
- case ModelProvider.FireworksAI: {
302
- runtimeModel = new LobeFireworksAI(params.fireworksai);
303
- break;
304
- }
305
-
306
- case ModelProvider.ZeroOne: {
307
- runtimeModel = new LobeZeroOneAI(params.zeroone);
308
- break;
309
- }
310
-
311
- case ModelProvider.Qwen: {
312
- runtimeModel = new LobeQwenAI(params.qwen);
313
- break;
314
- }
315
-
316
- case ModelProvider.Stepfun: {
317
- runtimeModel = new LobeStepfunAI(params.stepfun);
318
- break;
319
- }
320
-
321
- case ModelProvider.Novita: {
322
- runtimeModel = new LobeNovitaAI(params.novita);
323
- break;
324
- }
325
-
326
- case ModelProvider.Nvidia: {
327
- runtimeModel = new LobeNvidiaAI(params.nvidia);
328
- break;
329
- }
330
-
331
- case ModelProvider.Baichuan: {
332
- runtimeModel = new LobeBaichuanAI(params.baichuan);
333
- break;
334
- }
335
-
336
- case ModelProvider.Taichu: {
337
- runtimeModel = new LobeTaichuAI(params.taichu);
338
- break;
339
- }
340
-
341
- case ModelProvider.Ai360: {
342
- runtimeModel = new LobeAi360AI(params.ai360);
343
- break;
344
- }
345
-
346
- case ModelProvider.SiliconCloud: {
347
- runtimeModel = new LobeSiliconCloudAI(params.siliconcloud);
348
- break;
349
- }
350
-
351
- case ModelProvider.GiteeAI: {
352
- runtimeModel = new LobeGiteeAI(params.giteeai);
353
- break;
354
- }
355
-
356
- case ModelProvider.Upstage: {
357
- runtimeModel = new LobeUpstageAI(params.upstage);
358
- break;
359
- }
360
-
361
- case ModelProvider.Spark: {
362
- runtimeModel = new LobeSparkAI(params.spark);
363
- break;
364
- }
365
-
366
- case ModelProvider.Ai21: {
367
- runtimeModel = new LobeAi21AI(params.ai21);
368
- break;
369
- }
370
-
371
- case ModelProvider.Hunyuan: {
372
- runtimeModel = new LobeHunyuanAI(params.hunyuan);
373
- break;
374
- }
375
-
376
- case ModelProvider.SenseNova: {
377
- runtimeModel = new LobeSenseNovaAI(params.sensenova);
378
- break;
379
- }
380
-
381
- case ModelProvider.XAI: {
382
- runtimeModel = new LobeXAI(params.xai);
383
- break;
384
- }
385
-
386
- case ModelProvider.Jina: {
387
- runtimeModel = new LobeJinaAI(params.jina);
388
- break;
389
- }
390
-
391
- case ModelProvider.SambaNova: {
392
- runtimeModel = new LobeSambaNovaAI(params.sambanova);
393
- break;
394
- }
395
-
396
- case ModelProvider.Cloudflare: {
397
- runtimeModel = new LobeCloudflareAI(params.cloudflare);
398
- break;
399
- }
400
-
401
- case ModelProvider.InternLM: {
402
- runtimeModel = new LobeInternLMAI(params.internlm);
403
- break;
404
- }
405
-
406
- case ModelProvider.Higress: {
407
- runtimeModel = new LobeHigressAI(params.higress);
408
- break;
409
- }
410
-
411
- case ModelProvider.TencentCloud: {
412
- runtimeModel = new LobeTencentCloudAI(params[provider]);
413
- break;
414
- }
415
-
416
- case ModelProvider.Volcengine:
417
- case ModelProvider.Doubao: {
418
- runtimeModel = new LobeVolcengineAI(params.volcengine || params.doubao);
419
- break;
420
- }
421
-
422
- case ModelProvider.Wenxin: {
423
- runtimeModel = new LobeWenxinAI(params.wenxin);
424
- break;
425
- }
426
-
427
- case ModelProvider.PPIO: {
428
- runtimeModel = new LobePPIOAI(params.ppio ?? {});
429
- break;
430
- }
431
- }
432
107
  return new AgentRuntime(runtimeModel);
433
108
  }
434
109
  }
@@ -16,11 +16,11 @@ describe('LobeAzureOpenAI', () => {
16
16
  let instance: LobeAzureOpenAI;
17
17
 
18
18
  beforeEach(() => {
19
- instance = new LobeAzureOpenAI(
20
- 'https://test.openai.azure.com/',
21
- 'test_key',
22
- '2023-03-15-preview',
23
- );
19
+ instance = new LobeAzureOpenAI({
20
+ baseURL: 'https://test.openai.azure.com/',
21
+ apiKey: 'test_key',
22
+ apiVersion: '2023-03-15-preview',
23
+ });
24
24
 
25
25
  // 使用 vi.spyOn 来模拟 streamChatCompletions 方法
26
26
  vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
@@ -42,14 +42,14 @@ describe('LobeAzureOpenAI', () => {
42
42
  });
43
43
 
44
44
  it('should create an instance of OpenAIClient with correct parameters', () => {
45
- const endpoint = 'https://test.openai.azure.com/';
46
- const apikey = 'test_key';
45
+ const baseURL = 'https://test.openai.azure.com/';
46
+ const apiKey = 'test_key';
47
47
  const apiVersion = '2023-03-15-preview';
48
48
 
49
- const instance = new LobeAzureOpenAI(endpoint, apikey, apiVersion);
49
+ const instance = new LobeAzureOpenAI({ baseURL, apiKey, apiVersion });
50
50
 
51
51
  expect(instance.client).toBeInstanceOf(AzureOpenAI);
52
- expect(instance.baseURL).toBe(endpoint);
52
+ expect(instance.baseURL).toBe(baseURL);
53
53
  });
54
54
  });
55
55
 
@@ -13,18 +13,18 @@ import { OpenAIStream } from '../utils/streams';
13
13
  export class LobeAzureOpenAI implements LobeRuntimeAI {
14
14
  client: AzureOpenAI;
15
15
 
16
- constructor(endpoint?: string, apikey?: string, apiVersion?: string) {
17
- if (!apikey || !endpoint)
16
+ constructor(params: { apiKey?: string; apiVersion?: string, baseURL?: string; } = {}) {
17
+ if (!params.apiKey || !params.baseURL)
18
18
  throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
19
19
 
20
20
  this.client = new AzureOpenAI({
21
- apiKey: apikey,
22
- apiVersion,
21
+ apiKey: params.apiKey,
22
+ apiVersion: params.apiVersion,
23
23
  dangerouslyAllowBrowser: true,
24
- endpoint,
24
+ endpoint: params.baseURL,
25
25
  });
26
26
 
27
- this.baseURL = endpoint;
27
+ this.baseURL = params.baseURL;
28
28
  }
29
29
 
30
30
  baseURL: string;