@lobehub/chat 0.156.2 → 0.157.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/package.json +3 -2
  3. package/src/config/modelProviders/anthropic.ts +3 -0
  4. package/src/config/modelProviders/google.ts +3 -0
  5. package/src/config/modelProviders/groq.ts +5 -1
  6. package/src/config/modelProviders/minimax.ts +10 -7
  7. package/src/config/modelProviders/mistral.ts +1 -0
  8. package/src/config/modelProviders/moonshot.ts +3 -0
  9. package/src/config/modelProviders/zhipu.ts +2 -6
  10. package/src/config/server/provider.ts +1 -1
  11. package/src/database/client/core/db.ts +32 -0
  12. package/src/database/client/core/schemas.ts +9 -0
  13. package/src/database/client/models/__tests__/message.test.ts +2 -2
  14. package/src/database/client/schemas/message.ts +8 -1
  15. package/src/features/AgentSetting/store/action.ts +15 -6
  16. package/src/features/Conversation/Actions/Tool.tsx +16 -0
  17. package/src/features/Conversation/Actions/index.ts +2 -2
  18. package/src/features/Conversation/Messages/Assistant/ToolCalls/index.tsx +78 -0
  19. package/src/features/Conversation/Messages/Assistant/ToolCalls/style.ts +25 -0
  20. package/src/features/Conversation/Messages/Assistant/index.tsx +47 -0
  21. package/src/features/Conversation/Messages/Default.tsx +4 -1
  22. package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/index.tsx +34 -35
  23. package/src/features/Conversation/Messages/Tool/index.tsx +44 -0
  24. package/src/features/Conversation/Messages/index.ts +3 -2
  25. package/src/features/Conversation/Plugins/Render/StandaloneType/Iframe.tsx +1 -1
  26. package/src/features/Conversation/components/SkeletonList.tsx +2 -2
  27. package/src/features/Conversation/index.tsx +2 -3
  28. package/src/libs/agent-runtime/BaseAI.ts +2 -9
  29. package/src/libs/agent-runtime/anthropic/index.test.ts +195 -0
  30. package/src/libs/agent-runtime/anthropic/index.ts +71 -15
  31. package/src/libs/agent-runtime/azureOpenai/index.ts +6 -5
  32. package/src/libs/agent-runtime/bedrock/index.ts +24 -18
  33. package/src/libs/agent-runtime/google/index.test.ts +154 -0
  34. package/src/libs/agent-runtime/google/index.ts +91 -10
  35. package/src/libs/agent-runtime/groq/index.test.ts +41 -72
  36. package/src/libs/agent-runtime/groq/index.ts +7 -0
  37. package/src/libs/agent-runtime/minimax/index.test.ts +2 -2
  38. package/src/libs/agent-runtime/minimax/index.ts +14 -37
  39. package/src/libs/agent-runtime/mistral/index.test.ts +0 -53
  40. package/src/libs/agent-runtime/mistral/index.ts +1 -0
  41. package/src/libs/agent-runtime/moonshot/index.test.ts +1 -71
  42. package/src/libs/agent-runtime/ollama/index.test.ts +197 -0
  43. package/src/libs/agent-runtime/ollama/index.ts +3 -3
  44. package/src/libs/agent-runtime/openai/index.test.ts +0 -53
  45. package/src/libs/agent-runtime/openrouter/index.test.ts +1 -53
  46. package/src/libs/agent-runtime/perplexity/index.test.ts +0 -71
  47. package/src/libs/agent-runtime/perplexity/index.ts +2 -3
  48. package/src/libs/agent-runtime/togetherai/__snapshots__/index.test.ts.snap +886 -0
  49. package/src/libs/agent-runtime/togetherai/fixtures/models.json +8111 -0
  50. package/src/libs/agent-runtime/togetherai/index.test.ts +16 -54
  51. package/src/libs/agent-runtime/types/chat.ts +19 -3
  52. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +120 -1
  53. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +67 -4
  54. package/src/libs/agent-runtime/utils/debugStream.test.ts +70 -0
  55. package/src/libs/agent-runtime/utils/debugStream.ts +39 -9
  56. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +521 -0
  57. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +76 -5
  58. package/src/libs/agent-runtime/utils/response.ts +12 -0
  59. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +197 -0
  60. package/src/libs/agent-runtime/utils/streams/anthropic.ts +91 -0
  61. package/src/libs/agent-runtime/utils/streams/bedrock/claude.ts +21 -0
  62. package/src/libs/agent-runtime/utils/streams/bedrock/common.ts +32 -0
  63. package/src/libs/agent-runtime/utils/streams/bedrock/index.ts +3 -0
  64. package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +196 -0
  65. package/src/libs/agent-runtime/utils/streams/bedrock/llama.ts +51 -0
  66. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +97 -0
  67. package/src/libs/agent-runtime/utils/streams/google-ai.ts +68 -0
  68. package/src/libs/agent-runtime/utils/streams/index.ts +7 -0
  69. package/src/libs/agent-runtime/utils/streams/minimax.ts +39 -0
  70. package/src/libs/agent-runtime/utils/streams/ollama.test.ts +77 -0
  71. package/src/libs/agent-runtime/utils/streams/ollama.ts +38 -0
  72. package/src/libs/agent-runtime/utils/streams/openai.test.ts +263 -0
  73. package/src/libs/agent-runtime/utils/streams/openai.ts +79 -0
  74. package/src/libs/agent-runtime/utils/streams/protocol.ts +100 -0
  75. package/src/libs/agent-runtime/zeroone/index.test.ts +1 -53
  76. package/src/libs/agent-runtime/zhipu/index.test.ts +1 -1
  77. package/src/libs/agent-runtime/zhipu/index.ts +3 -2
  78. package/src/locales/default/plugin.ts +3 -4
  79. package/src/migrations/FromV4ToV5/fixtures/from-v1-to-v5-output.json +245 -0
  80. package/src/migrations/FromV4ToV5/fixtures/function-input-v4.json +96 -0
  81. package/src/migrations/FromV4ToV5/fixtures/function-output-v5.json +120 -0
  82. package/src/migrations/FromV4ToV5/index.ts +58 -0
  83. package/src/migrations/FromV4ToV5/migrations.test.ts +49 -0
  84. package/src/migrations/FromV4ToV5/types/v4.ts +21 -0
  85. package/src/migrations/FromV4ToV5/types/v5.ts +27 -0
  86. package/src/migrations/index.ts +8 -1
  87. package/src/services/__tests__/chat.test.ts +10 -20
  88. package/src/services/chat.ts +78 -65
  89. package/src/store/chat/slices/enchance/action.ts +15 -10
  90. package/src/store/chat/slices/message/action.test.ts +36 -86
  91. package/src/store/chat/slices/message/action.ts +70 -79
  92. package/src/store/chat/slices/message/reducer.ts +18 -1
  93. package/src/store/chat/slices/message/selectors.test.ts +38 -68
  94. package/src/store/chat/slices/message/selectors.ts +1 -22
  95. package/src/store/chat/slices/plugin/action.test.ts +147 -203
  96. package/src/store/chat/slices/plugin/action.ts +96 -82
  97. package/src/store/chat/slices/share/action.test.ts +3 -3
  98. package/src/store/chat/slices/share/action.ts +1 -1
  99. package/src/store/chat/slices/topic/action.ts +7 -2
  100. package/src/store/tool/selectors/tool.ts +6 -24
  101. package/src/store/tool/slices/builtin/action.test.ts +90 -0
  102. package/src/types/llm.ts +1 -1
  103. package/src/types/message/index.ts +9 -4
  104. package/src/types/message/tools.ts +57 -0
  105. package/src/types/openai/chat.ts +6 -0
  106. package/src/utils/fetch.test.ts +245 -1
  107. package/src/utils/fetch.ts +120 -44
  108. package/src/utils/toolCall.ts +21 -0
  109. package/src/features/Conversation/Messages/Assistant.tsx +0 -26
  110. package/src/features/Conversation/Messages/Function.tsx +0 -35
  111. package/src/libs/agent-runtime/ollama/stream.ts +0 -31
  112. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/PluginResultJSON.tsx +0 -0
  113. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/Settings.tsx +0 -0
  114. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/style.ts +0 -0
@@ -2,7 +2,6 @@ import {
2
2
  BedrockRuntimeClient,
3
3
  InvokeModelWithResponseStreamCommand,
4
4
  } from '@aws-sdk/client-bedrock-runtime';
5
- import { AWSBedrockLlama2Stream, AWSBedrockStream, StreamingTextResponse } from 'ai';
6
5
  import { experimental_buildLlama2Prompt } from 'ai/prompts';
7
6
 
8
7
  import { LobeRuntimeAI } from '../BaseAI';
@@ -11,6 +10,12 @@ import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../typ
11
10
  import { buildAnthropicMessages } from '../utils/anthropicHelpers';
12
11
  import { AgentRuntimeError } from '../utils/createError';
13
12
  import { debugStream } from '../utils/debugStream';
13
+ import { StreamingResponse } from '../utils/response';
14
+ import {
15
+ AWSBedrockClaudeStream,
16
+ AWSBedrockLlamaStream,
17
+ createBedrockStream,
18
+ } from '../utils/streams';
14
19
 
15
20
  export interface LobeBedrockAIParams {
16
21
  accessKeyId?: string;
@@ -39,7 +44,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
39
44
  }
40
45
 
41
46
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
42
- if (payload.model.startsWith('meta')) return this.invokeLlamaModel(payload);
47
+ if (payload.model.startsWith('meta')) return this.invokeLlamaModel(payload, options);
43
48
 
44
49
  return this.invokeClaudeModel(payload, options);
45
50
  }
@@ -47,7 +52,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
47
52
  private invokeClaudeModel = async (
48
53
  payload: ChatStreamPayload,
49
54
  options?: ChatCompetitionOptions,
50
- ): Promise<StreamingTextResponse> => {
55
+ ): Promise<Response> => {
51
56
  const { max_tokens, messages, model, temperature, top_p } = payload;
52
57
  const system_message = messages.find((m) => m.role === 'system');
53
58
  const user_messages = messages.filter((m) => m.role !== 'system');
@@ -68,23 +73,20 @@ export class LobeBedrockAI implements LobeRuntimeAI {
68
73
 
69
74
  try {
70
75
  // Ask Claude for a streaming chat completion given the prompt
71
- const bedrockResponse = await this.client.send(command, { abortSignal: options?.signal });
76
+ const res = await this.client.send(command, { abortSignal: options?.signal });
72
77
 
73
- // Convert the response into a friendly text-stream
74
- const stream = AWSBedrockStream(
75
- bedrockResponse,
76
- options?.callback,
77
- (chunk) => chunk.delta?.text,
78
- );
78
+ const claudeStream = createBedrockStream(res);
79
79
 
80
- const [debug, output] = stream.tee();
80
+ const [prod, debug] = claudeStream.tee();
81
81
 
82
82
  if (process.env.DEBUG_BEDROCK_CHAT_COMPLETION === '1') {
83
83
  debugStream(debug).catch(console.error);
84
84
  }
85
85
 
86
86
  // Respond with the stream
87
- return new StreamingTextResponse(output);
87
+ return StreamingResponse(AWSBedrockClaudeStream(prod, options?.callback), {
88
+ headers: options?.headers,
89
+ });
88
90
  } catch (e) {
89
91
  const err = e as Error & { $metadata: any };
90
92
 
@@ -101,7 +103,10 @@ export class LobeBedrockAI implements LobeRuntimeAI {
101
103
  }
102
104
  };
103
105
 
104
- private invokeLlamaModel = async (payload: ChatStreamPayload): Promise<StreamingTextResponse> => {
106
+ private invokeLlamaModel = async (
107
+ payload: ChatStreamPayload,
108
+ options?: ChatCompetitionOptions,
109
+ ): Promise<Response> => {
105
110
  const { max_tokens, messages, model } = payload;
106
111
  const command = new InvokeModelWithResponseStreamCommand({
107
112
  accept: 'application/json',
@@ -115,18 +120,19 @@ export class LobeBedrockAI implements LobeRuntimeAI {
115
120
 
116
121
  try {
117
122
  // Ask Claude for a streaming chat completion given the prompt
118
- const bedrockResponse = await this.client.send(command);
123
+ const res = await this.client.send(command);
119
124
 
120
- // Convert the response into a friendly text-stream
121
- const stream = AWSBedrockLlama2Stream(bedrockResponse);
125
+ const stream = createBedrockStream(res);
122
126
 
123
- const [debug, output] = stream.tee();
127
+ const [prod, debug] = stream.tee();
124
128
 
125
129
  if (process.env.DEBUG_BEDROCK_CHAT_COMPLETION === '1') {
126
130
  debugStream(debug).catch(console.error);
127
131
  }
128
132
  // Respond with the stream
129
- return new StreamingTextResponse(output);
133
+ return StreamingResponse(AWSBedrockLlamaStream(prod, options?.callback), {
134
+ headers: options?.headers,
135
+ });
130
136
  } catch (e) {
131
137
  const err = e as Error & { $metadata: any };
132
138
 
@@ -1,4 +1,6 @@
1
1
  // @vitest-environment edge-runtime
2
+ import { FunctionDeclarationSchemaType } from '@google/generative-ai';
3
+ import { JSONSchema7 } from 'json-schema';
2
4
  import OpenAI from 'openai';
3
5
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
6
 
@@ -426,5 +428,157 @@ describe('LobeGoogleAI', () => {
426
428
  expect(model).toEqual('gemini-pro-vision');
427
429
  });
428
430
  });
431
+
432
+ describe('buildGoogleTools', () => {
433
+ it('should return undefined when tools is undefined or empty', () => {
434
+ expect(instance['buildGoogleTools'](undefined)).toBeUndefined();
435
+ expect(instance['buildGoogleTools']([])).toBeUndefined();
436
+ });
437
+
438
+ it('should correctly convert ChatCompletionTool to GoogleFunctionCallTool', () => {
439
+ const tools: OpenAI.ChatCompletionTool[] = [
440
+ {
441
+ function: {
442
+ name: 'testTool',
443
+ description: 'A test tool',
444
+ parameters: {
445
+ type: 'object',
446
+ properties: {
447
+ param1: { type: 'string' },
448
+ param2: { type: 'number' },
449
+ },
450
+ required: ['param1'],
451
+ },
452
+ },
453
+ type: 'function',
454
+ },
455
+ ];
456
+
457
+ const googleTools = instance['buildGoogleTools'](tools);
458
+
459
+ expect(googleTools).toHaveLength(1);
460
+ expect(googleTools![0].functionDeclarations![0]).toEqual({
461
+ name: 'testTool',
462
+ description: 'A test tool',
463
+ parameters: {
464
+ type: FunctionDeclarationSchemaType.OBJECT,
465
+ properties: {
466
+ param1: { type: FunctionDeclarationSchemaType.STRING },
467
+ param2: { type: FunctionDeclarationSchemaType.NUMBER },
468
+ },
469
+ required: ['param1'],
470
+ },
471
+ });
472
+ });
473
+ });
474
+
475
+ describe('convertSchemaObject', () => {
476
+ it('should correctly convert object schema', () => {
477
+ const schema: JSONSchema7 = {
478
+ type: 'object',
479
+ properties: {
480
+ prop1: { type: 'string' },
481
+ prop2: { type: 'number' },
482
+ },
483
+ };
484
+
485
+ const converted = instance['convertSchemaObject'](schema);
486
+
487
+ expect(converted).toEqual({
488
+ type: FunctionDeclarationSchemaType.OBJECT,
489
+ properties: {
490
+ prop1: { type: FunctionDeclarationSchemaType.STRING },
491
+ prop2: { type: FunctionDeclarationSchemaType.NUMBER },
492
+ },
493
+ });
494
+ });
495
+
496
+ // 类似地添加 array/string/number/boolean 类型schema的测试用例
497
+ // ...
498
+
499
+ it('should correctly convert nested schema', () => {
500
+ const schema: JSONSchema7 = {
501
+ type: 'object',
502
+ properties: {
503
+ nested: {
504
+ type: 'array',
505
+ items: {
506
+ type: 'object',
507
+ properties: {
508
+ prop: { type: 'string' },
509
+ },
510
+ },
511
+ },
512
+ },
513
+ };
514
+
515
+ const converted = instance['convertSchemaObject'](schema);
516
+
517
+ expect(converted).toEqual({
518
+ type: FunctionDeclarationSchemaType.OBJECT,
519
+ properties: {
520
+ nested: {
521
+ type: FunctionDeclarationSchemaType.ARRAY,
522
+ items: {
523
+ type: FunctionDeclarationSchemaType.OBJECT,
524
+ properties: {
525
+ prop: { type: FunctionDeclarationSchemaType.STRING },
526
+ },
527
+ },
528
+ },
529
+ },
530
+ });
531
+ });
532
+ });
533
+
534
+ describe('convertOAIMessagesToGoogleMessage', () => {
535
+ it('should correctly convert assistant message', () => {
536
+ const message: OpenAIChatMessage = {
537
+ role: 'assistant',
538
+ content: 'Hello',
539
+ };
540
+
541
+ const converted = instance['convertOAIMessagesToGoogleMessage'](message);
542
+
543
+ expect(converted).toEqual({
544
+ role: 'model',
545
+ parts: [{ text: 'Hello' }],
546
+ });
547
+ });
548
+
549
+ it('should correctly convert user message', () => {
550
+ const message: OpenAIChatMessage = {
551
+ role: 'user',
552
+ content: 'Hi',
553
+ };
554
+
555
+ const converted = instance['convertOAIMessagesToGoogleMessage'](message);
556
+
557
+ expect(converted).toEqual({
558
+ role: 'user',
559
+ parts: [{ text: 'Hi' }],
560
+ });
561
+ });
562
+
563
+ it('should correctly convert message with content parts', () => {
564
+ const message: OpenAIChatMessage = {
565
+ role: 'user',
566
+ content: [
567
+ { type: 'text', text: 'Check this image:' },
568
+ { type: 'image_url', image_url: { url: 'data:image/png;base64,...' } },
569
+ ],
570
+ };
571
+
572
+ const converted = instance['convertOAIMessagesToGoogleMessage'](message);
573
+
574
+ expect(converted).toEqual({
575
+ role: 'user',
576
+ parts: [
577
+ { text: 'Check this image:' },
578
+ { inlineData: { data: '...', mimeType: 'image/png' } },
579
+ ],
580
+ });
581
+ });
582
+ });
429
583
  });
430
584
  });
@@ -1,10 +1,20 @@
1
- import { Content, GoogleGenerativeAI, Part } from '@google/generative-ai';
2
- import { GoogleGenerativeAIStream, StreamingTextResponse } from 'ai';
1
+ import {
2
+ Content,
3
+ FunctionDeclaration,
4
+ FunctionDeclarationSchemaProperty,
5
+ FunctionDeclarationSchemaType,
6
+ Tool as GoogleFunctionCallTool,
7
+ GoogleGenerativeAI,
8
+ Part,
9
+ } from '@google/generative-ai';
10
+ import { JSONSchema7 } from 'json-schema';
11
+ import { transform } from 'lodash-es';
3
12
 
4
13
  import { LobeRuntimeAI } from '../BaseAI';
5
14
  import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../error';
6
15
  import {
7
16
  ChatCompetitionOptions,
17
+ ChatCompletionTool,
8
18
  ChatStreamPayload,
9
19
  OpenAIChatMessage,
10
20
  UserMessageContentPart,
@@ -12,6 +22,8 @@ import {
12
22
  import { ModelProvider } from '../types/type';
13
23
  import { AgentRuntimeError } from '../utils/createError';
14
24
  import { debugStream } from '../utils/debugStream';
25
+ import { StreamingResponse } from '../utils/response';
26
+ import { GoogleGenerativeAIStream, googleGenAIResultToStream } from '../utils/streams';
15
27
  import { parseDataUri } from '../utils/uriParser';
16
28
 
17
29
  enum HarmCategory {
@@ -42,7 +54,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
42
54
 
43
55
  const contents = this.buildGoogleMessages(payload.messages, model);
44
56
 
45
- const geminiStream = await this.client
57
+ const geminiStreamResult = await this.client
46
58
  .getGenerativeModel(
47
59
  {
48
60
  generationConfig: {
@@ -74,19 +86,20 @@ export class LobeGoogleAI implements LobeRuntimeAI {
74
86
  },
75
87
  { apiVersion: 'v1beta', baseUrl: this.baseURL },
76
88
  )
77
- .generateContentStream({ contents });
78
-
79
- // Convert the response into a friendly text-stream
80
- const stream = GoogleGenerativeAIStream(geminiStream, options?.callback);
89
+ .generateContentStream({ contents, tools: this.buildGoogleTools(payload.tools) });
81
90
 
82
- const [debug, output] = stream.tee();
91
+ const googleStream = googleGenAIResultToStream(geminiStreamResult);
92
+ const [prod, useForDebug] = googleStream.tee();
83
93
 
84
94
  if (process.env.DEBUG_GOOGLE_CHAT_COMPLETION === '1') {
85
- debugStream(debug).catch(console.error);
95
+ debugStream(useForDebug).catch();
86
96
  }
87
97
 
98
+ // Convert the response into a friendly text-stream
99
+ const stream = GoogleGenerativeAIStream(prod, options?.callback);
100
+
88
101
  // Respond with the stream
89
- return new StreamingTextResponse(output, { headers: options?.headers });
102
+ return StreamingResponse(stream, { headers: options?.headers });
90
103
  } catch (e) {
91
104
  const err = e as Error;
92
105
 
@@ -226,6 +239,74 @@ export class LobeGoogleAI implements LobeRuntimeAI {
226
239
  return defaultError;
227
240
  }
228
241
  }
242
+
243
+ private buildGoogleTools(
244
+ tools: ChatCompletionTool[] | undefined,
245
+ ): GoogleFunctionCallTool[] | undefined {
246
+ if (!tools || tools.length === 0) return;
247
+
248
+ return [
249
+ {
250
+ functionDeclarations: tools.map((tool) => this.convertToolToGoogleTool(tool)),
251
+ },
252
+ ];
253
+ }
254
+
255
+ private convertToolToGoogleTool = (tool: ChatCompletionTool): FunctionDeclaration => {
256
+ const functionDeclaration = tool.function;
257
+ const parameters = functionDeclaration.parameters;
258
+
259
+ return {
260
+ description: functionDeclaration.description,
261
+ name: functionDeclaration.name,
262
+ parameters: {
263
+ description: parameters?.description,
264
+ properties: transform(parameters?.properties, (result, value, key: string) => {
265
+ result[key] = this.convertSchemaObject(value as JSONSchema7);
266
+ }),
267
+ required: parameters?.required,
268
+ type: FunctionDeclarationSchemaType.OBJECT,
269
+ },
270
+ };
271
+ };
272
+
273
+ private convertSchemaObject(schema: JSONSchema7): FunctionDeclarationSchemaProperty {
274
+ switch (schema.type) {
275
+ default:
276
+ case 'object': {
277
+ return {
278
+ ...schema,
279
+ properties: Object.fromEntries(
280
+ Object.entries(schema.properties || {}).map(([key, value]) => [
281
+ key,
282
+ this.convertSchemaObject(value as JSONSchema7),
283
+ ]),
284
+ ),
285
+ type: FunctionDeclarationSchemaType.OBJECT,
286
+ } as any;
287
+ }
288
+
289
+ case 'array': {
290
+ return {
291
+ ...schema,
292
+ items: this.convertSchemaObject(schema.items as JSONSchema7),
293
+ type: FunctionDeclarationSchemaType.ARRAY,
294
+ } as any;
295
+ }
296
+
297
+ case 'string': {
298
+ return { ...schema, type: FunctionDeclarationSchemaType.STRING } as any;
299
+ }
300
+
301
+ case 'number': {
302
+ return { ...schema, type: FunctionDeclarationSchemaType.NUMBER } as any;
303
+ }
304
+
305
+ case 'boolean': {
306
+ return { ...schema, type: FunctionDeclarationSchemaType.BOOLEAN } as any;
307
+ }
308
+ }
309
+ }
229
310
  }
230
311
 
231
312
  export default LobeGoogleAI;
@@ -40,25 +40,7 @@ describe('LobeGroqAI', () => {
40
40
  });
41
41
 
42
42
  describe('chat', () => {
43
- it('should return a StreamingTextResponse on successful API call', async () => {
44
- // Arrange
45
- const mockStream = new ReadableStream();
46
- const mockResponse = Promise.resolve(mockStream);
47
-
48
- (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
49
-
50
- // Act
51
- const result = await instance.chat({
52
- messages: [{ content: 'Hello', role: 'user' }],
53
- model: 'mistralai/mistral-7b-instruct:free',
54
- temperature: 0,
55
- });
56
-
57
- // Assert
58
- expect(result).toBeInstanceOf(Response);
59
- });
60
-
61
- it('should call OpenRouter API with corresponding options', async () => {
43
+ it('should call chat with corresponding options', async () => {
62
44
  // Arrange
63
45
  const mockStream = new ReadableStream();
64
46
  const mockResponse = Promise.resolve(mockStream);
@@ -78,6 +60,7 @@ describe('LobeGroqAI', () => {
78
60
  expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
79
61
  {
80
62
  max_tokens: 1024,
63
+ stream: true,
81
64
  messages: [{ content: 'Hello', role: 'user' }],
82
65
  model: 'mistralai/mistral-7b-instruct:free',
83
66
  temperature: 0.7,
@@ -88,6 +71,45 @@ describe('LobeGroqAI', () => {
88
71
  expect(result).toBeInstanceOf(Response);
89
72
  });
90
73
 
74
+ describe('handlePayload option', () => {
75
+ it('should set stream to false when payload contains tools', async () => {
76
+ const mockCreateMethod = vi
77
+ .spyOn(instance['client'].chat.completions, 'create')
78
+ .mockResolvedValue({
79
+ id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
80
+ object: 'chat.completion',
81
+ created: 1709125675,
82
+ model: 'mistralai/mistral-7b-instruct:free',
83
+ system_fingerprint: 'fp_86156a94a0',
84
+ choices: [
85
+ {
86
+ index: 0,
87
+ message: { role: 'assistant', content: 'hello' },
88
+ logprobs: null,
89
+ finish_reason: 'stop',
90
+ },
91
+ ],
92
+ });
93
+
94
+ await instance.chat({
95
+ messages: [{ content: 'Hello', role: 'user' }],
96
+ model: 'mistralai/mistral-7b-instruct:free',
97
+ temperature: 0,
98
+ tools: [
99
+ {
100
+ type: 'function',
101
+ function: { name: 'tool1', description: '', parameters: {} },
102
+ },
103
+ ],
104
+ });
105
+
106
+ expect(mockCreateMethod).toHaveBeenCalledWith(
107
+ expect.objectContaining({ stream: false }),
108
+ expect.anything(),
109
+ );
110
+ });
111
+ });
112
+
91
113
  describe('Error', () => {
92
114
  it('should return OpenRouterBizError with an openai error response when OpenAI.APIError is thrown', async () => {
93
115
  // Arrange
@@ -253,59 +275,6 @@ describe('LobeGroqAI', () => {
253
275
  });
254
276
  });
255
277
 
256
- describe('LobeGroqAI chat with callback and headers', () => {
257
- it('should handle callback and headers correctly', async () => {
258
- // 模拟 chat.completions.create 方法返回一个可读流
259
- const mockCreateMethod = vi
260
- .spyOn(instance['client'].chat.completions, 'create')
261
- .mockResolvedValue(
262
- new ReadableStream({
263
- start(controller) {
264
- controller.enqueue({
265
- id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
266
- object: 'chat.completion.chunk',
267
- created: 1709125675,
268
- model: 'mistralai/mistral-7b-instruct:free',
269
- system_fingerprint: 'fp_86156a94a0',
270
- choices: [
271
- { index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
272
- ],
273
- });
274
- controller.close();
275
- },
276
- }) as any,
277
- );
278
-
279
- // 准备 callback 和 headers
280
- const mockCallback: ChatStreamCallbacks = {
281
- onStart: vi.fn(),
282
- onToken: vi.fn(),
283
- };
284
- const mockHeaders = { 'Custom-Header': 'TestValue' };
285
-
286
- // 执行测试
287
- const result = await instance.chat(
288
- {
289
- messages: [{ content: 'Hello', role: 'user' }],
290
- model: 'mistralai/mistral-7b-instruct:free',
291
- temperature: 0,
292
- },
293
- { callback: mockCallback, headers: mockHeaders },
294
- );
295
-
296
- // 验证 callback 被调用
297
- await result.text(); // 确保流被消费
298
- expect(mockCallback.onStart).toHaveBeenCalled();
299
- expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
300
-
301
- // 验证 headers 被正确传递
302
- expect(result.headers.get('Custom-Header')).toEqual('TestValue');
303
-
304
- // 清理
305
- mockCreateMethod.mockRestore();
306
- });
307
- });
308
-
309
278
  describe('DEBUG', () => {
310
279
  it('should call debugStream and return StreamingTextResponse when DEBUG_OPENROUTER_CHAT_COMPLETION is 1', async () => {
311
280
  // Arrange
@@ -10,6 +10,13 @@ export const LobeGroq = LobeOpenAICompatibleFactory({
10
10
  if (error.status === 403)
11
11
  return { error, errorType: AgentRuntimeErrorType.LocationNotSupportError };
12
12
  },
13
+ handlePayload: (payload) => {
14
+ return {
15
+ ...payload,
16
+ // disable stream for tools due to groq dont support
17
+ stream: !payload.tools,
18
+ } as any;
19
+ },
13
20
  },
14
21
  debug: {
15
22
  chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
@@ -62,7 +62,7 @@ describe('LobeMinimaxAI', () => {
62
62
  temperature: 0,
63
63
  });
64
64
 
65
- expect(result).toBeInstanceOf(StreamingTextResponse);
65
+ expect(result).toBeInstanceOf(Response);
66
66
  });
67
67
 
68
68
  it('should handle text messages correctly', async () => {
@@ -85,7 +85,7 @@ describe('LobeMinimaxAI', () => {
85
85
  temperature: 0,
86
86
  });
87
87
 
88
- expect(result).toBeInstanceOf(StreamingTextResponse);
88
+ expect(result).toBeInstanceOf(Response);
89
89
  });
90
90
 
91
91
  it('should call debugStream in DEBUG mode', async () => {
@@ -1,9 +1,6 @@
1
- import { StreamingTextResponse } from 'ai';
2
1
  import { isEmpty } from 'lodash-es';
3
2
  import OpenAI from 'openai';
4
3
 
5
- import { debugStream } from '@/libs/agent-runtime/utils/debugStream';
6
-
7
4
  import { LobeRuntimeAI } from '../BaseAI';
8
5
  import { AgentRuntimeErrorType } from '../error';
9
6
  import {
@@ -13,6 +10,9 @@ import {
13
10
  ModelProvider,
14
11
  } from '../types';
15
12
  import { AgentRuntimeError } from '../utils/createError';
13
+ import { debugStream } from '../utils/debugStream';
14
+ import { StreamingResponse } from '../utils/response';
15
+ import { MinimaxStream } from '../utils/streams';
16
16
 
17
17
  interface MinimaxBaseResponse {
18
18
  base_resp?: {
@@ -69,18 +69,8 @@ export class LobeMinimaxAI implements LobeRuntimeAI {
69
69
  this.apiKey = apiKey;
70
70
  }
71
71
 
72
- async chat(
73
- payload: ChatStreamPayload,
74
- options?: ChatCompetitionOptions,
75
- ): Promise<StreamingTextResponse> {
72
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions): Promise<Response> {
76
73
  try {
77
- let streamController: ReadableStreamDefaultController | undefined;
78
- const readableStream = new ReadableStream({
79
- start(controller) {
80
- streamController = controller;
81
- },
82
- });
83
-
84
74
  const response = await fetch('https://api.minimax.chat/v1/text/chatcompletion_v2', {
85
75
  body: JSON.stringify(this.buildCompletionsParams(payload)),
86
76
  headers: {
@@ -107,12 +97,10 @@ export class LobeMinimaxAI implements LobeRuntimeAI {
107
97
  debugStream(debug).catch(console.error);
108
98
  }
109
99
 
110
- this.parseResponse(prod.getReader(), streamController);
111
-
112
100
  // wait for the first response, and throw error if minix returns an error
113
101
  await this.parseFirstResponse(prod2.getReader());
114
102
 
115
- return new StreamingTextResponse(readableStream, { headers: options?.headers });
103
+ return StreamingResponse(MinimaxStream(prod), { headers: options?.headers });
116
104
  } catch (error) {
117
105
  console.log('error', error);
118
106
  const err = error as Error | ChatCompletionErrorPayload;
@@ -154,30 +142,19 @@ export class LobeMinimaxAI implements LobeRuntimeAI {
154
142
  max_tokens: this.getMaxTokens(payload.model),
155
143
  stream: true,
156
144
  temperature: temperature === 0 ? undefined : temperature,
145
+
146
+ tools: params.tools?.map((tool) => ({
147
+ function: {
148
+ description: tool.function.description,
149
+ name: tool.function.name,
150
+ parameters: JSON.stringify(tool.function.parameters),
151
+ },
152
+ type: 'function',
153
+ })),
157
154
  top_p: top_p === 0 ? undefined : top_p,
158
155
  };
159
156
  }
160
157
 
161
- private async parseResponse(
162
- reader: ReadableStreamDefaultReader<Uint8Array>,
163
- streamController: ReadableStreamDefaultController | undefined,
164
- ) {
165
- const encoder = new TextEncoder();
166
- const decoder = new TextDecoder();
167
- let done = false;
168
-
169
- while (!done) {
170
- const { value, done: doneReading } = await reader.read();
171
- done = doneReading;
172
- const chunkValue = decoder.decode(value, { stream: true });
173
- const data = parseMinimaxResponse(chunkValue);
174
- const text = data?.choices?.at(0)?.delta?.content || undefined;
175
- streamController?.enqueue(encoder.encode(text));
176
- }
177
-
178
- streamController?.close();
179
- }
180
-
181
158
  private async parseFirstResponse(reader: ReadableStreamDefaultReader<Uint8Array>) {
182
159
  const decoder = new TextDecoder();
183
160