@lobehub/chat 0.156.2 → 0.157.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/package.json +3 -2
  3. package/src/config/modelProviders/anthropic.ts +3 -0
  4. package/src/config/modelProviders/google.ts +3 -0
  5. package/src/config/modelProviders/groq.ts +5 -1
  6. package/src/config/modelProviders/minimax.ts +10 -7
  7. package/src/config/modelProviders/mistral.ts +1 -0
  8. package/src/config/modelProviders/moonshot.ts +3 -0
  9. package/src/config/modelProviders/zhipu.ts +2 -6
  10. package/src/config/server/provider.ts +1 -1
  11. package/src/database/client/core/db.ts +32 -0
  12. package/src/database/client/core/schemas.ts +9 -0
  13. package/src/database/client/models/__tests__/message.test.ts +2 -2
  14. package/src/database/client/schemas/message.ts +8 -1
  15. package/src/features/AgentSetting/store/action.ts +15 -6
  16. package/src/features/Conversation/Actions/Tool.tsx +16 -0
  17. package/src/features/Conversation/Actions/index.ts +2 -2
  18. package/src/features/Conversation/Messages/Assistant/ToolCalls/index.tsx +78 -0
  19. package/src/features/Conversation/Messages/Assistant/ToolCalls/style.ts +25 -0
  20. package/src/features/Conversation/Messages/Assistant/index.tsx +47 -0
  21. package/src/features/Conversation/Messages/Default.tsx +4 -1
  22. package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/index.tsx +34 -35
  23. package/src/features/Conversation/Messages/Tool/index.tsx +44 -0
  24. package/src/features/Conversation/Messages/index.ts +3 -2
  25. package/src/features/Conversation/Plugins/Render/StandaloneType/Iframe.tsx +1 -1
  26. package/src/features/Conversation/components/SkeletonList.tsx +2 -2
  27. package/src/features/Conversation/index.tsx +2 -3
  28. package/src/libs/agent-runtime/BaseAI.ts +2 -9
  29. package/src/libs/agent-runtime/anthropic/index.test.ts +195 -0
  30. package/src/libs/agent-runtime/anthropic/index.ts +71 -15
  31. package/src/libs/agent-runtime/azureOpenai/index.ts +6 -5
  32. package/src/libs/agent-runtime/bedrock/index.ts +24 -18
  33. package/src/libs/agent-runtime/google/index.test.ts +154 -0
  34. package/src/libs/agent-runtime/google/index.ts +91 -10
  35. package/src/libs/agent-runtime/groq/index.test.ts +41 -72
  36. package/src/libs/agent-runtime/groq/index.ts +7 -0
  37. package/src/libs/agent-runtime/minimax/index.test.ts +2 -2
  38. package/src/libs/agent-runtime/minimax/index.ts +14 -37
  39. package/src/libs/agent-runtime/mistral/index.test.ts +0 -53
  40. package/src/libs/agent-runtime/mistral/index.ts +1 -0
  41. package/src/libs/agent-runtime/moonshot/index.test.ts +1 -71
  42. package/src/libs/agent-runtime/ollama/index.test.ts +197 -0
  43. package/src/libs/agent-runtime/ollama/index.ts +3 -3
  44. package/src/libs/agent-runtime/openai/index.test.ts +0 -53
  45. package/src/libs/agent-runtime/openrouter/index.test.ts +1 -53
  46. package/src/libs/agent-runtime/perplexity/index.test.ts +0 -71
  47. package/src/libs/agent-runtime/perplexity/index.ts +2 -3
  48. package/src/libs/agent-runtime/togetherai/__snapshots__/index.test.ts.snap +886 -0
  49. package/src/libs/agent-runtime/togetherai/fixtures/models.json +8111 -0
  50. package/src/libs/agent-runtime/togetherai/index.test.ts +16 -54
  51. package/src/libs/agent-runtime/types/chat.ts +19 -3
  52. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +120 -1
  53. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +67 -4
  54. package/src/libs/agent-runtime/utils/debugStream.test.ts +70 -0
  55. package/src/libs/agent-runtime/utils/debugStream.ts +39 -9
  56. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +521 -0
  57. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +76 -5
  58. package/src/libs/agent-runtime/utils/response.ts +12 -0
  59. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +197 -0
  60. package/src/libs/agent-runtime/utils/streams/anthropic.ts +91 -0
  61. package/src/libs/agent-runtime/utils/streams/bedrock/claude.ts +21 -0
  62. package/src/libs/agent-runtime/utils/streams/bedrock/common.ts +32 -0
  63. package/src/libs/agent-runtime/utils/streams/bedrock/index.ts +3 -0
  64. package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +196 -0
  65. package/src/libs/agent-runtime/utils/streams/bedrock/llama.ts +51 -0
  66. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +97 -0
  67. package/src/libs/agent-runtime/utils/streams/google-ai.ts +68 -0
  68. package/src/libs/agent-runtime/utils/streams/index.ts +7 -0
  69. package/src/libs/agent-runtime/utils/streams/minimax.ts +39 -0
  70. package/src/libs/agent-runtime/utils/streams/ollama.test.ts +77 -0
  71. package/src/libs/agent-runtime/utils/streams/ollama.ts +38 -0
  72. package/src/libs/agent-runtime/utils/streams/openai.test.ts +263 -0
  73. package/src/libs/agent-runtime/utils/streams/openai.ts +79 -0
  74. package/src/libs/agent-runtime/utils/streams/protocol.ts +100 -0
  75. package/src/libs/agent-runtime/zeroone/index.test.ts +1 -53
  76. package/src/libs/agent-runtime/zhipu/index.test.ts +1 -1
  77. package/src/libs/agent-runtime/zhipu/index.ts +3 -2
  78. package/src/locales/default/plugin.ts +3 -4
  79. package/src/migrations/FromV4ToV5/fixtures/from-v1-to-v5-output.json +245 -0
  80. package/src/migrations/FromV4ToV5/fixtures/function-input-v4.json +96 -0
  81. package/src/migrations/FromV4ToV5/fixtures/function-output-v5.json +120 -0
  82. package/src/migrations/FromV4ToV5/index.ts +58 -0
  83. package/src/migrations/FromV4ToV5/migrations.test.ts +49 -0
  84. package/src/migrations/FromV4ToV5/types/v4.ts +21 -0
  85. package/src/migrations/FromV4ToV5/types/v5.ts +27 -0
  86. package/src/migrations/index.ts +8 -1
  87. package/src/services/__tests__/chat.test.ts +10 -20
  88. package/src/services/chat.ts +78 -65
  89. package/src/store/chat/slices/enchance/action.ts +15 -10
  90. package/src/store/chat/slices/message/action.test.ts +36 -86
  91. package/src/store/chat/slices/message/action.ts +70 -79
  92. package/src/store/chat/slices/message/reducer.ts +18 -1
  93. package/src/store/chat/slices/message/selectors.test.ts +38 -68
  94. package/src/store/chat/slices/message/selectors.ts +1 -22
  95. package/src/store/chat/slices/plugin/action.test.ts +147 -203
  96. package/src/store/chat/slices/plugin/action.ts +96 -82
  97. package/src/store/chat/slices/share/action.test.ts +3 -3
  98. package/src/store/chat/slices/share/action.ts +1 -1
  99. package/src/store/chat/slices/topic/action.ts +7 -2
  100. package/src/store/tool/selectors/tool.ts +6 -24
  101. package/src/store/tool/slices/builtin/action.test.ts +90 -0
  102. package/src/types/llm.ts +1 -1
  103. package/src/types/message/index.ts +9 -4
  104. package/src/types/message/tools.ts +57 -0
  105. package/src/types/openai/chat.ts +6 -0
  106. package/src/utils/fetch.test.ts +245 -1
  107. package/src/utils/fetch.ts +120 -44
  108. package/src/utils/toolCall.ts +21 -0
  109. package/src/features/Conversation/Messages/Assistant.tsx +0 -26
  110. package/src/features/Conversation/Messages/Function.tsx +0 -35
  111. package/src/libs/agent-runtime/ollama/stream.ts +0 -31
  112. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/PluginResultJSON.tsx +0 -0
  113. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/Settings.tsx +0 -0
  114. /package/src/features/Conversation/{Plugins → Messages/Tool}/Inspector/style.ts +0 -0
@@ -0,0 +1,68 @@
1
+ import {
2
+ EnhancedGenerateContentResponse,
3
+ GenerateContentStreamResult,
4
+ } from '@google/generative-ai';
5
+ import { readableFromAsyncIterable } from 'ai';
6
+
7
+ import { nanoid } from '@/utils/uuid';
8
+
9
+ import { ChatStreamCallbacks } from '../../types';
10
+ import {
11
+ StreamProtocolChunk,
12
+ StreamStack,
13
+ StreamToolCallChunkData,
14
+ chatStreamable,
15
+ createCallbacksTransformer,
16
+ createSSEProtocolTransformer,
17
+ generateToolCallId,
18
+ } from './protocol';
19
+
20
+ const transformGoogleGenerativeAIStream = (
21
+ chunk: EnhancedGenerateContentResponse,
22
+ stack: StreamStack,
23
+ ): StreamProtocolChunk => {
24
+ // maybe need another structure to add support for multiple choices
25
+ const functionCalls = chunk.functionCalls();
26
+
27
+ if (functionCalls) {
28
+ return {
29
+ data: functionCalls.map(
30
+ (value, index): StreamToolCallChunkData => ({
31
+ function: {
32
+ arguments: JSON.stringify(value.args),
33
+ name: value.name,
34
+ },
35
+ id: generateToolCallId(index, value.name),
36
+ index: index,
37
+ type: 'function',
38
+ }),
39
+ ),
40
+ id: stack.id,
41
+ type: 'tool_calls',
42
+ };
43
+ }
44
+ const text = chunk.text();
45
+
46
+ return {
47
+ data: text,
48
+ id: stack?.id,
49
+ type: 'text',
50
+ };
51
+ };
52
+
53
+ // only use for debug
54
+ export const googleGenAIResultToStream = (stream: GenerateContentStreamResult) => {
55
+ // make the response to the streamable format
56
+ return readableFromAsyncIterable(chatStreamable(stream.stream));
57
+ };
58
+
59
+ export const GoogleGenerativeAIStream = (
60
+ rawStream: ReadableStream<EnhancedGenerateContentResponse>,
61
+ callbacks?: ChatStreamCallbacks,
62
+ ) => {
63
+ const streamStack: StreamStack = { id: 'chat_' + nanoid() };
64
+
65
+ return rawStream
66
+ .pipeThrough(createSSEProtocolTransformer(transformGoogleGenerativeAIStream, streamStack))
67
+ .pipeThrough(createCallbacksTransformer(callbacks));
68
+ };
@@ -0,0 +1,7 @@
1
+ export * from './anthropic';
2
+ export * from './bedrock';
3
+ export * from './google-ai';
4
+ export * from './minimax';
5
+ export * from './ollama';
6
+ export * from './openai';
7
+ export * from './protocol';
@@ -0,0 +1,39 @@
1
+ import OpenAI from 'openai';
2
+
3
+ import { ChatStreamCallbacks } from '../../types';
4
+ import { transformOpenAIStream } from './openai';
5
+ import { createCallbacksTransformer, createSSEProtocolTransformer } from './protocol';
6
+
7
+ const unit8ArrayToJSONChunk = (unit8Array: Uint8Array): OpenAI.ChatCompletionChunk => {
8
+ const decoder = new TextDecoder();
9
+
10
+ let chunkValue = decoder.decode(unit8Array, { stream: true });
11
+
12
+ // chunkValue example:
13
+ // data: {"id":"028a65377137d57aaceeffddf48ae99f","choices":[{"finish_reason":"tool_calls","index":0,"delta":{"role":"assistant","tool_calls":[{"id":"call_function_7371372822","type":"function","function":{"name":"realtime-weather____fetchCurrentWeather","arguments":"{\"city\": [\"杭州\", \"北京\"]}"}}]}}],"created":155511,"model":"abab6.5s-chat","object":"chat.completion.chunk"}
14
+
15
+ // so we need to remove `data:` prefix and then parse it as JSON
16
+ if (chunkValue.startsWith('data:')) {
17
+ chunkValue = chunkValue.slice(5).trim();
18
+ }
19
+
20
+ try {
21
+ return JSON.parse(chunkValue);
22
+ } catch (e) {
23
+ console.error('minimax chunk parse error:', e);
24
+
25
+ return { raw: chunkValue } as any;
26
+ }
27
+ };
28
+
29
+ export const MinimaxStream = (stream: ReadableStream, callbacks?: ChatStreamCallbacks) => {
30
+ return stream
31
+ .pipeThrough(
32
+ createSSEProtocolTransformer((buffer) => {
33
+ const chunk = unit8ArrayToJSONChunk(buffer);
34
+
35
+ return transformOpenAIStream(chunk);
36
+ }),
37
+ )
38
+ .pipeThrough(createCallbacksTransformer(callbacks));
39
+ };
@@ -0,0 +1,77 @@
1
+ import { ChatResponse } from 'ollama/browser';
2
+ import { describe, expect, it, vi } from 'vitest';
3
+
4
+ import * as uuidModule from '@/utils/uuid';
5
+
6
+ import { OllamaStream } from './ollama';
7
+
8
+ describe('OllamaStream', () => {
9
+ it('should transform Ollama stream to protocol stream', async () => {
10
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
11
+
12
+ const mockOllamaStream: AsyncIterable<ChatResponse> = {
13
+ // @ts-ignore
14
+ async *[Symbol.asyncIterator]() {
15
+ yield { message: { content: 'Hello' }, done: false };
16
+ yield { message: { content: ' world!' }, done: false };
17
+ yield { message: { content: '' }, done: true };
18
+ },
19
+ };
20
+
21
+ const onStartMock = vi.fn();
22
+ const onTextMock = vi.fn();
23
+ const onTokenMock = vi.fn();
24
+ const onCompletionMock = vi.fn();
25
+
26
+ const protocolStream = OllamaStream(mockOllamaStream, {
27
+ onStart: onStartMock,
28
+ onText: onTextMock,
29
+ onToken: onTokenMock,
30
+ onCompletion: onCompletionMock,
31
+ });
32
+
33
+ const decoder = new TextDecoder();
34
+ const chunks = [];
35
+
36
+ // @ts-ignore
37
+ for await (const chunk of protocolStream) {
38
+ chunks.push(decoder.decode(chunk, { stream: true }));
39
+ }
40
+
41
+ expect(chunks).toEqual([
42
+ 'id: chat_1\n',
43
+ 'event: text\n',
44
+ `data: "Hello"\n\n`,
45
+ 'id: chat_1\n',
46
+ 'event: text\n',
47
+ `data: " world!"\n\n`,
48
+ 'id: chat_1\n',
49
+ 'event: stop\n',
50
+ `data: "finished"\n\n`,
51
+ ]);
52
+
53
+ expect(onStartMock).toHaveBeenCalledTimes(1);
54
+ expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
55
+ expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
56
+ expect(onTokenMock).toHaveBeenCalledTimes(2);
57
+ expect(onCompletionMock).toHaveBeenCalledTimes(1);
58
+ });
59
+
60
+ it('should handle empty stream', async () => {
61
+ const mockOllamaStream = {
62
+ async *[Symbol.asyncIterator]() {},
63
+ };
64
+
65
+ const protocolStream = OllamaStream(mockOllamaStream);
66
+
67
+ const decoder = new TextDecoder();
68
+ const chunks = [];
69
+
70
+ // @ts-ignore
71
+ for await (const chunk of protocolStream) {
72
+ chunks.push(decoder.decode(chunk, { stream: true }));
73
+ }
74
+
75
+ expect(chunks).toEqual([]);
76
+ });
77
+ });
@@ -0,0 +1,38 @@
1
+ import { readableFromAsyncIterable } from 'ai';
2
+ import { ChatResponse } from 'ollama/browser';
3
+
4
+ import { ChatStreamCallbacks } from '@/libs/agent-runtime';
5
+ import { nanoid } from '@/utils/uuid';
6
+
7
+ import {
8
+ StreamProtocolChunk,
9
+ StreamStack,
10
+ createCallbacksTransformer,
11
+ createSSEProtocolTransformer,
12
+ } from './protocol';
13
+
14
+ const transformOllamaStream = (chunk: ChatResponse, stack: StreamStack): StreamProtocolChunk => {
15
+ // maybe need another structure to add support for multiple choices
16
+ if (chunk.done) {
17
+ return { data: 'finished', id: stack.id, type: 'stop' };
18
+ }
19
+
20
+ return { data: chunk.message.content, id: stack.id, type: 'text' };
21
+ };
22
+
23
+ const chatStreamable = async function* (stream: AsyncIterable<ChatResponse>) {
24
+ for await (const response of stream) {
25
+ yield response;
26
+ }
27
+ };
28
+
29
+ export const OllamaStream = (
30
+ res: AsyncIterable<ChatResponse>,
31
+ cb?: ChatStreamCallbacks,
32
+ ): ReadableStream<string> => {
33
+ const streamStack: StreamStack = { id: 'chat_' + nanoid() };
34
+
35
+ return readableFromAsyncIterable(chatStreamable(res))
36
+ .pipeThrough(createSSEProtocolTransformer(transformOllamaStream, streamStack))
37
+ .pipeThrough(createCallbacksTransformer(cb));
38
+ };
@@ -0,0 +1,263 @@
1
+ import { describe, expect, it, vi } from 'vitest';
2
+
3
+ import { OpenAIStream } from './openai';
4
+
5
+ describe('OpenAIStream', () => {
6
+ it('should transform OpenAI stream to protocol stream', async () => {
7
+ const mockOpenAIStream = new ReadableStream({
8
+ start(controller) {
9
+ controller.enqueue({
10
+ choices: [
11
+ {
12
+ delta: { content: 'Hello' },
13
+ index: 0,
14
+ },
15
+ ],
16
+ id: '1',
17
+ });
18
+ controller.enqueue({
19
+ choices: [
20
+ {
21
+ delta: { content: ' world!' },
22
+ index: 1,
23
+ },
24
+ ],
25
+ id: '1',
26
+ });
27
+ controller.enqueue({
28
+ choices: [
29
+ {
30
+ delta: null,
31
+ finish_reason: 'stop',
32
+ index: 2,
33
+ },
34
+ ],
35
+ id: '1',
36
+ });
37
+
38
+ controller.close();
39
+ },
40
+ });
41
+
42
+ const onStartMock = vi.fn();
43
+ const onTextMock = vi.fn();
44
+ const onTokenMock = vi.fn();
45
+ const onCompletionMock = vi.fn();
46
+
47
+ const protocolStream = OpenAIStream(mockOpenAIStream, {
48
+ onStart: onStartMock,
49
+ onText: onTextMock,
50
+ onToken: onTokenMock,
51
+ onCompletion: onCompletionMock,
52
+ });
53
+
54
+ const decoder = new TextDecoder();
55
+ const chunks = [];
56
+
57
+ // @ts-ignore
58
+ for await (const chunk of protocolStream) {
59
+ chunks.push(decoder.decode(chunk, { stream: true }));
60
+ }
61
+
62
+ expect(chunks).toEqual([
63
+ 'id: 1\n',
64
+ 'event: text\n',
65
+ `data: "Hello"\n\n`,
66
+ 'id: 1\n',
67
+ 'event: text\n',
68
+ `data: " world!"\n\n`,
69
+ 'id: 1\n',
70
+ 'event: stop\n',
71
+ `data: "stop"\n\n`,
72
+ ]);
73
+
74
+ expect(onStartMock).toHaveBeenCalledTimes(1);
75
+ expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
76
+ expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
77
+ expect(onTokenMock).toHaveBeenCalledTimes(2);
78
+ expect(onCompletionMock).toHaveBeenCalledTimes(1);
79
+ });
80
+
81
+ it('should handle tool calls', async () => {
82
+ const mockOpenAIStream = new ReadableStream({
83
+ start(controller) {
84
+ controller.enqueue({
85
+ choices: [
86
+ {
87
+ delta: {
88
+ tool_calls: [
89
+ {
90
+ function: { name: 'tool1', arguments: '{}' },
91
+ id: 'call_1',
92
+ index: 0,
93
+ type: 'function',
94
+ },
95
+ {
96
+ function: { name: 'tool2', arguments: '{}' },
97
+ id: 'call_2',
98
+ index: 1,
99
+ },
100
+ ],
101
+ },
102
+ index: 0,
103
+ },
104
+ ],
105
+ id: '2',
106
+ });
107
+
108
+ controller.close();
109
+ },
110
+ });
111
+
112
+ const onToolCallMock = vi.fn();
113
+
114
+ const protocolStream = OpenAIStream(mockOpenAIStream, {
115
+ onToolCall: onToolCallMock,
116
+ });
117
+
118
+ const decoder = new TextDecoder();
119
+ const chunks = [];
120
+
121
+ // @ts-ignore
122
+ for await (const chunk of protocolStream) {
123
+ chunks.push(decoder.decode(chunk, { stream: true }));
124
+ }
125
+
126
+ expect(chunks).toEqual([
127
+ 'id: 2\n',
128
+ 'event: tool_calls\n',
129
+ `data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
130
+ ]);
131
+
132
+ expect(onToolCallMock).toHaveBeenCalledTimes(1);
133
+ });
134
+
135
+ it('should handle empty stream', async () => {
136
+ const mockStream = new ReadableStream({
137
+ start(controller) {
138
+ controller.close();
139
+ },
140
+ });
141
+
142
+ const protocolStream = OpenAIStream(mockStream);
143
+
144
+ const decoder = new TextDecoder();
145
+ const chunks = [];
146
+
147
+ // @ts-ignore
148
+ for await (const chunk of protocolStream) {
149
+ chunks.push(decoder.decode(chunk, { stream: true }));
150
+ }
151
+
152
+ expect(chunks).toEqual([]);
153
+ });
154
+
155
+ it('should handle delta content null', async () => {
156
+ const mockOpenAIStream = new ReadableStream({
157
+ start(controller) {
158
+ controller.enqueue({
159
+ choices: [
160
+ {
161
+ delta: { content: null },
162
+ index: 0,
163
+ },
164
+ ],
165
+ id: '3',
166
+ });
167
+
168
+ controller.close();
169
+ },
170
+ });
171
+
172
+ const protocolStream = OpenAIStream(mockOpenAIStream);
173
+
174
+ const decoder = new TextDecoder();
175
+ const chunks = [];
176
+
177
+ // @ts-ignore
178
+ for await (const chunk of protocolStream) {
179
+ chunks.push(decoder.decode(chunk, { stream: true }));
180
+ }
181
+
182
+ expect(chunks).toEqual(['id: 3\n', 'event: data\n', `data: {"content":null}\n\n`]);
183
+ });
184
+
185
+ it('should handle other delta data', async () => {
186
+ const mockOpenAIStream = new ReadableStream({
187
+ start(controller) {
188
+ controller.enqueue({
189
+ choices: [
190
+ {
191
+ delta: { custom_field: 'custom_value' },
192
+ index: 0,
193
+ },
194
+ ],
195
+ id: '4',
196
+ });
197
+
198
+ controller.close();
199
+ },
200
+ });
201
+
202
+ const protocolStream = OpenAIStream(mockOpenAIStream);
203
+
204
+ const decoder = new TextDecoder();
205
+ const chunks = [];
206
+
207
+ // @ts-ignore
208
+ for await (const chunk of protocolStream) {
209
+ chunks.push(decoder.decode(chunk, { stream: true }));
210
+ }
211
+
212
+ expect(chunks).toEqual([
213
+ 'id: 4\n',
214
+ 'event: data\n',
215
+ `data: {"delta":{"custom_field":"custom_value"},"id":"4","index":0}\n\n`,
216
+ ]);
217
+ });
218
+
219
+ it('should handle tool calls without index and type', async () => {
220
+ const mockOpenAIStream = new ReadableStream({
221
+ start(controller) {
222
+ controller.enqueue({
223
+ choices: [
224
+ {
225
+ delta: {
226
+ tool_calls: [
227
+ {
228
+ function: { name: 'tool1', arguments: '{}' },
229
+ id: 'call_1',
230
+ },
231
+ {
232
+ function: { name: 'tool2', arguments: '{}' },
233
+ id: 'call_2',
234
+ },
235
+ ],
236
+ },
237
+ index: 0,
238
+ },
239
+ ],
240
+ id: '5',
241
+ });
242
+
243
+ controller.close();
244
+ },
245
+ });
246
+
247
+ const protocolStream = OpenAIStream(mockOpenAIStream);
248
+
249
+ const decoder = new TextDecoder();
250
+ const chunks = [];
251
+
252
+ // @ts-ignore
253
+ for await (const chunk of protocolStream) {
254
+ chunks.push(decoder.decode(chunk, { stream: true }));
255
+ }
256
+
257
+ expect(chunks).toEqual([
258
+ 'id: 5\n',
259
+ 'event: tool_calls\n',
260
+ `data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
261
+ ]);
262
+ });
263
+ });
@@ -0,0 +1,79 @@
1
+ import { readableFromAsyncIterable } from 'ai';
2
+ import OpenAI from 'openai';
3
+ import type { Stream } from 'openai/streaming';
4
+
5
+ import { ChatStreamCallbacks } from '../../types';
6
+ import {
7
+ StreamProtocolChunk,
8
+ StreamProtocolToolCallChunk,
9
+ StreamToolCallChunkData,
10
+ createCallbacksTransformer,
11
+ createSSEProtocolTransformer,
12
+ generateToolCallId,
13
+ } from './protocol';
14
+
15
+ export const transformOpenAIStream = (chunk: OpenAI.ChatCompletionChunk): StreamProtocolChunk => {
16
+ // maybe need another structure to add support for multiple choices
17
+ const item = chunk.choices[0];
18
+
19
+ if (typeof item.delta?.content === 'string') {
20
+ return { data: item.delta.content, id: chunk.id, type: 'text' };
21
+ }
22
+
23
+ if (item.delta?.tool_calls) {
24
+ return {
25
+ data: item.delta.tool_calls.map(
26
+ (value, index): StreamToolCallChunkData => ({
27
+ function: value.function,
28
+ id: value.id || generateToolCallId(index, value.function?.name),
29
+
30
+ // mistral's tool calling don't have index and function field, it's data like:
31
+ // [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
32
+
33
+ // minimax's tool calling don't have index field, it's data like:
34
+ // [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
35
+
36
+ // so we need to add these default values
37
+ index: typeof value.index !== 'undefined' ? value.index : index,
38
+ type: value.type || 'function',
39
+ }),
40
+ ),
41
+ id: chunk.id,
42
+ type: 'tool_calls',
43
+ } as StreamProtocolToolCallChunk;
44
+ }
45
+
46
+ // 给定结束原因
47
+ if (item.finish_reason) {
48
+ return { data: item.finish_reason, id: chunk.id, type: 'stop' };
49
+ }
50
+
51
+ if (item.delta.content === null) {
52
+ return { data: item.delta, id: chunk.id, type: 'data' };
53
+ }
54
+
55
+ // 其余情况下,返回 delta 和 index
56
+ return {
57
+ data: { delta: item.delta, id: chunk.id, index: item.index },
58
+ id: chunk.id,
59
+ type: 'data',
60
+ };
61
+ };
62
+
63
+ const chatStreamable = async function* (stream: AsyncIterable<OpenAI.ChatCompletionChunk>) {
64
+ for await (const response of stream) {
65
+ yield response;
66
+ }
67
+ };
68
+
69
+ export const OpenAIStream = (
70
+ stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
71
+ callbacks?: ChatStreamCallbacks,
72
+ ) => {
73
+ const readableStream =
74
+ stream instanceof ReadableStream ? stream : readableFromAsyncIterable(chatStreamable(stream));
75
+
76
+ return readableStream
77
+ .pipeThrough(createSSEProtocolTransformer(transformOpenAIStream))
78
+ .pipeThrough(createCallbacksTransformer(callbacks));
79
+ };
@@ -0,0 +1,100 @@
1
+ import { ChatStreamCallbacks } from '@/libs/agent-runtime';
2
+
3
+ export interface StreamStack {
4
+ id: string;
5
+ }
6
+
7
+ export interface StreamProtocolChunk {
8
+ data: any;
9
+ id?: string;
10
+ type: 'text' | 'tool_calls' | 'data' | 'stop';
11
+ }
12
+
13
+ export interface StreamToolCallChunkData {
14
+ function?: {
15
+ arguments?: string;
16
+ name?: string | null;
17
+ };
18
+ id: string;
19
+ index: number;
20
+ type: 'function' | string;
21
+ }
22
+
23
+ export interface StreamProtocolToolCallChunk {
24
+ data: StreamToolCallChunkData[];
25
+ id: string;
26
+ index: number;
27
+ type: 'tool_calls';
28
+ }
29
+
30
+ export const generateToolCallId = (index: number, functionName?: string) =>
31
+ `${functionName || 'unknown_tool_call'}_${index}`;
32
+
33
+ export const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
34
+ for await (const response of stream) {
35
+ yield response;
36
+ }
37
+ };
38
+
39
+ export const createSSEProtocolTransformer = (
40
+ transformer: (chunk: any, stack: StreamStack) => StreamProtocolChunk,
41
+ streamStack?: StreamStack,
42
+ ) =>
43
+ new TransformStream({
44
+ transform: (chunk, controller) => {
45
+ const { type, id, data } = transformer(chunk, streamStack || { id: '' });
46
+
47
+ controller.enqueue(`id: ${id}\n`);
48
+ controller.enqueue(`event: ${type}\n`);
49
+ controller.enqueue(`data: ${JSON.stringify(data)}\n\n`);
50
+ },
51
+ });
52
+
53
+ export function createCallbacksTransformer(cb: ChatStreamCallbacks | undefined) {
54
+ const textEncoder = new TextEncoder();
55
+ let aggregatedResponse = '';
56
+ let currentType = '';
57
+ const callbacks = cb || {};
58
+
59
+ return new TransformStream({
60
+ async flush(): Promise<void> {
61
+ if (callbacks.onCompletion) {
62
+ await callbacks.onCompletion(aggregatedResponse);
63
+ }
64
+
65
+ if (callbacks.onFinal) {
66
+ await callbacks.onFinal(aggregatedResponse);
67
+ }
68
+ },
69
+
70
+ async start(): Promise<void> {
71
+ if (callbacks.onStart) await callbacks.onStart();
72
+ },
73
+
74
+ async transform(chunk: string, controller): Promise<void> {
75
+ controller.enqueue(textEncoder.encode(chunk));
76
+
77
+ // track the type of the chunk
78
+ if (chunk.startsWith('event:')) {
79
+ currentType = chunk.split('event:')[1].trim();
80
+ }
81
+ // if the message is a data chunk, handle the callback
82
+ else if (chunk.startsWith('data:')) {
83
+ const content = chunk.split('data:')[1].trim();
84
+
85
+ switch (currentType) {
86
+ case 'text': {
87
+ await callbacks.onText?.(content);
88
+ await callbacks.onToken?.(JSON.parse(content));
89
+ break;
90
+ }
91
+
92
+ case 'tool_calls': {
93
+ // TODO: make on ToolCall callback
94
+ await callbacks.onToolCall?.();
95
+ }
96
+ }
97
+ }
98
+ },
99
+ });
100
+ }