@agentica/core 0.32.3-dev.1 → 0.32.3-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.mjs +47 -33
- package/lib/index.mjs.map +1 -1
- package/lib/utils/ChatGptCompletionStreamingUtil.js +41 -29
- package/lib/utils/ChatGptCompletionStreamingUtil.js.map +1 -1
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.d.ts +1 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.js +855 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.js.map +1 -0
- package/lib/utils/MPSC.js +8 -6
- package/lib/utils/MPSC.js.map +1 -1
- package/lib/utils/StreamUtil.d.ts +1 -1
- package/lib/utils/StreamUtil.js +2 -2
- package/lib/utils/StreamUtil.js.map +1 -1
- package/package.json +1 -1
- package/src/utils/ChatGptCompletionStreamingUtil.spec.ts +908 -0
- package/src/utils/ChatGptCompletionStreamingUtil.ts +45 -36
- package/src/utils/MPSC.ts +8 -6
- package/src/utils/StreamUtil.ts +2 -2
|
@@ -0,0 +1,908 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from "vitest";
|
|
2
|
+
import type { ChatCompletionChunk } from "openai/resources";
|
|
3
|
+
import { reduceStreamingWithDispatch } from "./ChatGptCompletionStreamingUtil";
|
|
4
|
+
import { StreamUtil } from "./StreamUtil";
|
|
5
|
+
|
|
6
|
+
describe("reduceStreamingWithDispatch", () => {
|
|
7
|
+
beforeEach(() => {
|
|
8
|
+
vi.clearAllMocks();
|
|
9
|
+
});
|
|
10
|
+
|
|
11
|
+
describe("basic functionality", () => {
|
|
12
|
+
it("should process single chunk successfully", async () => {
|
|
13
|
+
const mockChunk: ChatCompletionChunk = {
|
|
14
|
+
id: "test-id",
|
|
15
|
+
object: "chat.completion.chunk",
|
|
16
|
+
created: 1234567890,
|
|
17
|
+
model: "gpt-3.5-turbo",
|
|
18
|
+
choices: [
|
|
19
|
+
{
|
|
20
|
+
index: 0,
|
|
21
|
+
delta: { content: "Hello" },
|
|
22
|
+
finish_reason: null,
|
|
23
|
+
},
|
|
24
|
+
],
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
28
|
+
start(controller) {
|
|
29
|
+
controller.enqueue(mockChunk);
|
|
30
|
+
controller.close();
|
|
31
|
+
},
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
const eventProcessor = vi.fn();
|
|
35
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
36
|
+
|
|
37
|
+
expect(result).toBeDefined();
|
|
38
|
+
expect(result.object).toBe("chat.completion");
|
|
39
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
it("should handle multiple chunks with content accumulation", async () => {
|
|
43
|
+
const chunks: ChatCompletionChunk[] = [
|
|
44
|
+
{
|
|
45
|
+
id: "test-id",
|
|
46
|
+
object: "chat.completion.chunk",
|
|
47
|
+
created: 1234567890,
|
|
48
|
+
model: "gpt-3.5-turbo",
|
|
49
|
+
choices: [
|
|
50
|
+
{
|
|
51
|
+
index: 0,
|
|
52
|
+
delta: { content: "Hello" },
|
|
53
|
+
finish_reason: null,
|
|
54
|
+
},
|
|
55
|
+
],
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
id: "test-id",
|
|
59
|
+
object: "chat.completion.chunk",
|
|
60
|
+
created: 1234567890,
|
|
61
|
+
model: "gpt-3.5-turbo",
|
|
62
|
+
choices: [
|
|
63
|
+
{
|
|
64
|
+
index: 0,
|
|
65
|
+
delta: { content: " World" },
|
|
66
|
+
finish_reason: null,
|
|
67
|
+
},
|
|
68
|
+
],
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
id: "test-id",
|
|
72
|
+
object: "chat.completion.chunk",
|
|
73
|
+
created: 1234567890,
|
|
74
|
+
model: "gpt-3.5-turbo",
|
|
75
|
+
choices: [
|
|
76
|
+
{
|
|
77
|
+
index: 0,
|
|
78
|
+
delta: { content: "!" },
|
|
79
|
+
finish_reason: "stop",
|
|
80
|
+
},
|
|
81
|
+
],
|
|
82
|
+
},
|
|
83
|
+
];
|
|
84
|
+
|
|
85
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
86
|
+
start(controller) {
|
|
87
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
88
|
+
controller.close();
|
|
89
|
+
},
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
const eventProcessor = vi.fn();
|
|
93
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
94
|
+
|
|
95
|
+
expect(result).toBeDefined();
|
|
96
|
+
expect(result.object).toBe("chat.completion");
|
|
97
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
98
|
+
|
|
99
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
100
|
+
expect(eventCall.get()).toBe("Hello World!");
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it("should handle empty content chunks", async () => {
|
|
104
|
+
const chunks: ChatCompletionChunk[] = [
|
|
105
|
+
{
|
|
106
|
+
id: "test-id",
|
|
107
|
+
object: "chat.completion.chunk",
|
|
108
|
+
created: 1234567890,
|
|
109
|
+
model: "gpt-3.5-turbo",
|
|
110
|
+
choices: [
|
|
111
|
+
{
|
|
112
|
+
index: 0,
|
|
113
|
+
delta: { content: "" },
|
|
114
|
+
finish_reason: null,
|
|
115
|
+
},
|
|
116
|
+
],
|
|
117
|
+
},
|
|
118
|
+
{
|
|
119
|
+
id: "test-id",
|
|
120
|
+
object: "chat.completion.chunk",
|
|
121
|
+
created: 1234567890,
|
|
122
|
+
model: "gpt-3.5-turbo",
|
|
123
|
+
choices: [
|
|
124
|
+
{
|
|
125
|
+
index: 0,
|
|
126
|
+
delta: { content: "Hello" },
|
|
127
|
+
finish_reason: null,
|
|
128
|
+
},
|
|
129
|
+
],
|
|
130
|
+
},
|
|
131
|
+
];
|
|
132
|
+
|
|
133
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
134
|
+
start(controller) {
|
|
135
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
136
|
+
controller.close();
|
|
137
|
+
},
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
const eventProcessor = vi.fn();
|
|
141
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
142
|
+
|
|
143
|
+
expect(result).toBeDefined();
|
|
144
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
145
|
+
|
|
146
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
147
|
+
expect(eventCall.get()).toBe("Hello");
|
|
148
|
+
});
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
describe("multiple choices handling", () => {
|
|
152
|
+
it("should handle multiple choices with different indices", async () => {
|
|
153
|
+
const chunks: ChatCompletionChunk[] = [
|
|
154
|
+
{
|
|
155
|
+
id: "test-id",
|
|
156
|
+
object: "chat.completion.chunk",
|
|
157
|
+
created: 1234567890,
|
|
158
|
+
model: "gpt-3.5-turbo",
|
|
159
|
+
choices: [
|
|
160
|
+
{
|
|
161
|
+
index: 0,
|
|
162
|
+
delta: { content: "Choice 1" },
|
|
163
|
+
finish_reason: null,
|
|
164
|
+
},
|
|
165
|
+
{
|
|
166
|
+
index: 1,
|
|
167
|
+
delta: { content: "Choice 2" },
|
|
168
|
+
finish_reason: null,
|
|
169
|
+
},
|
|
170
|
+
],
|
|
171
|
+
},
|
|
172
|
+
{
|
|
173
|
+
id: "test-id",
|
|
174
|
+
object: "chat.completion.chunk",
|
|
175
|
+
created: 1234567890,
|
|
176
|
+
model: "gpt-3.5-turbo",
|
|
177
|
+
choices: [
|
|
178
|
+
{
|
|
179
|
+
index: 0,
|
|
180
|
+
delta: { content: " continued" },
|
|
181
|
+
finish_reason: "stop",
|
|
182
|
+
},
|
|
183
|
+
{
|
|
184
|
+
index: 1,
|
|
185
|
+
delta: { content: " continued" },
|
|
186
|
+
finish_reason: "stop",
|
|
187
|
+
},
|
|
188
|
+
],
|
|
189
|
+
},
|
|
190
|
+
];
|
|
191
|
+
|
|
192
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
193
|
+
start(controller) {
|
|
194
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
195
|
+
controller.close();
|
|
196
|
+
},
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
const eventProcessor = vi.fn();
|
|
200
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
201
|
+
|
|
202
|
+
expect(result).toBeDefined();
|
|
203
|
+
expect(eventProcessor).toHaveBeenCalledTimes(2);
|
|
204
|
+
|
|
205
|
+
const firstCall = eventProcessor.mock.calls[0]?.[0];
|
|
206
|
+
const secondCall = eventProcessor.mock.calls[1]?.[0];
|
|
207
|
+
expect(firstCall.get()).toBe("Choice 1 continued");
|
|
208
|
+
expect(secondCall.get()).toBe("Choice 2 continued");
|
|
209
|
+
});
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
describe("finish reason handling", () => {
|
|
213
|
+
it("should close context when finish_reason is provided", async () => {
|
|
214
|
+
const chunks: ChatCompletionChunk[] = [
|
|
215
|
+
{
|
|
216
|
+
id: "test-id",
|
|
217
|
+
object: "chat.completion.chunk",
|
|
218
|
+
created: 1234567890,
|
|
219
|
+
model: "gpt-3.5-turbo",
|
|
220
|
+
choices: [
|
|
221
|
+
{
|
|
222
|
+
index: 0,
|
|
223
|
+
delta: { content: "Hello" },
|
|
224
|
+
finish_reason: null,
|
|
225
|
+
},
|
|
226
|
+
],
|
|
227
|
+
},
|
|
228
|
+
{
|
|
229
|
+
id: "test-id",
|
|
230
|
+
object: "chat.completion.chunk",
|
|
231
|
+
created: 1234567890,
|
|
232
|
+
model: "gpt-3.5-turbo",
|
|
233
|
+
choices: [
|
|
234
|
+
{
|
|
235
|
+
index: 0,
|
|
236
|
+
delta: { content: " World" },
|
|
237
|
+
finish_reason: "stop",
|
|
238
|
+
},
|
|
239
|
+
],
|
|
240
|
+
},
|
|
241
|
+
];
|
|
242
|
+
|
|
243
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
244
|
+
start(controller) {
|
|
245
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
246
|
+
controller.close();
|
|
247
|
+
},
|
|
248
|
+
});
|
|
249
|
+
|
|
250
|
+
const eventProcessor = vi.fn();
|
|
251
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
252
|
+
|
|
253
|
+
expect(result).toBeDefined();
|
|
254
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
255
|
+
|
|
256
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
257
|
+
expect(eventCall.get()).toBe("Hello World");
|
|
258
|
+
expect(eventCall.done()).toBe(true);
|
|
259
|
+
});
|
|
260
|
+
});
|
|
261
|
+
|
|
262
|
+
describe("stream processing", () => {
|
|
263
|
+
it("should provide working stream in event processor", async () => {
|
|
264
|
+
const chunks: ChatCompletionChunk[] = [
|
|
265
|
+
{
|
|
266
|
+
id: "test-id",
|
|
267
|
+
object: "chat.completion.chunk",
|
|
268
|
+
created: 1234567890,
|
|
269
|
+
model: "gpt-3.5-turbo",
|
|
270
|
+
choices: [
|
|
271
|
+
{
|
|
272
|
+
index: 0,
|
|
273
|
+
delta: { content: "Hello" },
|
|
274
|
+
finish_reason: null,
|
|
275
|
+
},
|
|
276
|
+
],
|
|
277
|
+
},
|
|
278
|
+
{
|
|
279
|
+
id: "test-id",
|
|
280
|
+
object: "chat.completion.chunk",
|
|
281
|
+
created: 1234567890,
|
|
282
|
+
model: "gpt-3.5-turbo",
|
|
283
|
+
choices: [
|
|
284
|
+
{
|
|
285
|
+
index: 0,
|
|
286
|
+
delta: { content: " World" },
|
|
287
|
+
finish_reason: "stop",
|
|
288
|
+
},
|
|
289
|
+
],
|
|
290
|
+
},
|
|
291
|
+
];
|
|
292
|
+
|
|
293
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
294
|
+
start(controller) {
|
|
295
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
296
|
+
controller.close();
|
|
297
|
+
},
|
|
298
|
+
});
|
|
299
|
+
const streamedContent: string[] = [];
|
|
300
|
+
await new Promise(async (resolve) => {
|
|
301
|
+
const eventProcessor = vi.fn(({ stream: contentStream }) => {
|
|
302
|
+
(async () => {
|
|
303
|
+
for await (const content of contentStream) {
|
|
304
|
+
streamedContent.push(content);
|
|
305
|
+
}
|
|
306
|
+
resolve(true);
|
|
307
|
+
})();
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
311
|
+
});
|
|
312
|
+
expect(streamedContent).toEqual(["Hello", " World"]);
|
|
313
|
+
});
|
|
314
|
+
|
|
315
|
+
it("should provide working join function", async () => {
|
|
316
|
+
const chunks: ChatCompletionChunk[] = [
|
|
317
|
+
{
|
|
318
|
+
id: "test-id",
|
|
319
|
+
object: "chat.completion.chunk",
|
|
320
|
+
created: 1234567890,
|
|
321
|
+
model: "gpt-3.5-turbo",
|
|
322
|
+
choices: [
|
|
323
|
+
{
|
|
324
|
+
index: 0,
|
|
325
|
+
delta: { content: "Hello" },
|
|
326
|
+
finish_reason: null,
|
|
327
|
+
},
|
|
328
|
+
],
|
|
329
|
+
},
|
|
330
|
+
{
|
|
331
|
+
id: "test-id",
|
|
332
|
+
object: "chat.completion.chunk",
|
|
333
|
+
created: 1234567890,
|
|
334
|
+
model: "gpt-3.5-turbo",
|
|
335
|
+
choices: [
|
|
336
|
+
{
|
|
337
|
+
index: 0,
|
|
338
|
+
delta: { content: " World" },
|
|
339
|
+
finish_reason: "stop",
|
|
340
|
+
},
|
|
341
|
+
],
|
|
342
|
+
},
|
|
343
|
+
];
|
|
344
|
+
|
|
345
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
346
|
+
start(controller) {
|
|
347
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
348
|
+
controller.close();
|
|
349
|
+
},
|
|
350
|
+
});
|
|
351
|
+
|
|
352
|
+
let joinedContent = "";
|
|
353
|
+
const eventProcessor = vi.fn(async ({ join }) => {
|
|
354
|
+
joinedContent = await join();
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
358
|
+
|
|
359
|
+
expect(result).toBeDefined();
|
|
360
|
+
expect(joinedContent).toBe("Hello World");
|
|
361
|
+
});
|
|
362
|
+
});
|
|
363
|
+
|
|
364
|
+
describe("error handling", () => {
|
|
365
|
+
it("should throw error for empty stream", async () => {
|
|
366
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
367
|
+
start(controller) {
|
|
368
|
+
controller.close();
|
|
369
|
+
},
|
|
370
|
+
});
|
|
371
|
+
|
|
372
|
+
const eventProcessor = vi.fn();
|
|
373
|
+
|
|
374
|
+
await expect(reduceStreamingWithDispatch(stream, eventProcessor)).rejects.toThrow(
|
|
375
|
+
"StreamUtil.reduce did not produce a ChatCompletion"
|
|
376
|
+
);
|
|
377
|
+
});
|
|
378
|
+
|
|
379
|
+
it("should handle stream with only finish_reason chunks", async () => {
|
|
380
|
+
const chunks: ChatCompletionChunk[] = [
|
|
381
|
+
{
|
|
382
|
+
id: "test-id",
|
|
383
|
+
object: "chat.completion.chunk",
|
|
384
|
+
created: 1234567890,
|
|
385
|
+
model: "gpt-3.5-turbo",
|
|
386
|
+
choices: [
|
|
387
|
+
{
|
|
388
|
+
index: 0,
|
|
389
|
+
delta: { content: null },
|
|
390
|
+
finish_reason: "stop",
|
|
391
|
+
},
|
|
392
|
+
],
|
|
393
|
+
},
|
|
394
|
+
];
|
|
395
|
+
|
|
396
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
397
|
+
start(controller) {
|
|
398
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
399
|
+
controller.close();
|
|
400
|
+
},
|
|
401
|
+
});
|
|
402
|
+
|
|
403
|
+
const eventProcessor = vi.fn();
|
|
404
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
405
|
+
|
|
406
|
+
expect(result).toBeDefined();
|
|
407
|
+
expect(eventProcessor).not.toHaveBeenCalled();
|
|
408
|
+
});
|
|
409
|
+
});
|
|
410
|
+
|
|
411
|
+
describe("complex scenarios", () => {
|
|
412
|
+
it("should handle mixed content and finish_reason chunks", async () => {
|
|
413
|
+
const chunks: ChatCompletionChunk[] = [
|
|
414
|
+
{
|
|
415
|
+
id: "test-id",
|
|
416
|
+
object: "chat.completion.chunk",
|
|
417
|
+
created: 1234567890,
|
|
418
|
+
model: "gpt-3.5-turbo",
|
|
419
|
+
choices: [
|
|
420
|
+
{
|
|
421
|
+
index: 0,
|
|
422
|
+
delta: { content: "Hello" },
|
|
423
|
+
finish_reason: null,
|
|
424
|
+
},
|
|
425
|
+
],
|
|
426
|
+
},
|
|
427
|
+
{
|
|
428
|
+
id: "test-id",
|
|
429
|
+
object: "chat.completion.chunk",
|
|
430
|
+
created: 1234567890,
|
|
431
|
+
model: "gpt-3.5-turbo",
|
|
432
|
+
choices: [
|
|
433
|
+
{
|
|
434
|
+
index: 0,
|
|
435
|
+
delta: { content: null },
|
|
436
|
+
finish_reason: null,
|
|
437
|
+
},
|
|
438
|
+
],
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
id: "test-id",
|
|
442
|
+
object: "chat.completion.chunk",
|
|
443
|
+
created: 1234567890,
|
|
444
|
+
model: "gpt-3.5-turbo",
|
|
445
|
+
choices: [
|
|
446
|
+
{
|
|
447
|
+
index: 0,
|
|
448
|
+
delta: { content: " World" },
|
|
449
|
+
finish_reason: null,
|
|
450
|
+
},
|
|
451
|
+
],
|
|
452
|
+
},
|
|
453
|
+
{
|
|
454
|
+
id: "test-id",
|
|
455
|
+
object: "chat.completion.chunk",
|
|
456
|
+
created: 1234567890,
|
|
457
|
+
model: "gpt-3.5-turbo",
|
|
458
|
+
choices: [
|
|
459
|
+
{
|
|
460
|
+
index: 0,
|
|
461
|
+
delta: { content: null },
|
|
462
|
+
finish_reason: "stop",
|
|
463
|
+
},
|
|
464
|
+
],
|
|
465
|
+
},
|
|
466
|
+
];
|
|
467
|
+
|
|
468
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
469
|
+
start(controller) {
|
|
470
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
471
|
+
controller.close();
|
|
472
|
+
},
|
|
473
|
+
});
|
|
474
|
+
|
|
475
|
+
const eventProcessor = vi.fn();
|
|
476
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
477
|
+
|
|
478
|
+
expect(result).toBeDefined();
|
|
479
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
480
|
+
|
|
481
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
482
|
+
expect(eventCall.get()).toBe("Hello World");
|
|
483
|
+
});
|
|
484
|
+
});
|
|
485
|
+
|
|
486
|
+
describe("edge cases and exceptions", () => {
|
|
487
|
+
it("should handle null delta content", async () => {
|
|
488
|
+
const chunks: ChatCompletionChunk[] = [
|
|
489
|
+
{
|
|
490
|
+
id: "test-id",
|
|
491
|
+
object: "chat.completion.chunk",
|
|
492
|
+
created: 1234567890,
|
|
493
|
+
model: "gpt-3.5-turbo",
|
|
494
|
+
choices: [
|
|
495
|
+
{
|
|
496
|
+
index: 0,
|
|
497
|
+
delta: { content: null },
|
|
498
|
+
finish_reason: null,
|
|
499
|
+
},
|
|
500
|
+
],
|
|
501
|
+
},
|
|
502
|
+
{
|
|
503
|
+
id: "test-id",
|
|
504
|
+
object: "chat.completion.chunk",
|
|
505
|
+
created: 1234567890,
|
|
506
|
+
model: "gpt-3.5-turbo",
|
|
507
|
+
choices: [
|
|
508
|
+
{
|
|
509
|
+
index: 0,
|
|
510
|
+
delta: { content: "Hello" },
|
|
511
|
+
finish_reason: "stop",
|
|
512
|
+
},
|
|
513
|
+
],
|
|
514
|
+
},
|
|
515
|
+
];
|
|
516
|
+
|
|
517
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
518
|
+
start(controller) {
|
|
519
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
520
|
+
controller.close();
|
|
521
|
+
},
|
|
522
|
+
});
|
|
523
|
+
|
|
524
|
+
const eventProcessor = vi.fn();
|
|
525
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
526
|
+
|
|
527
|
+
expect(result).toBeDefined();
|
|
528
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
529
|
+
|
|
530
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
531
|
+
expect(eventCall.get()).toBe("Hello");
|
|
532
|
+
});
|
|
533
|
+
|
|
534
|
+
it("should handle missing delta object", async () => {
|
|
535
|
+
const chunks: ChatCompletionChunk[] = [
|
|
536
|
+
{
|
|
537
|
+
id: "test-id",
|
|
538
|
+
object: "chat.completion.chunk",
|
|
539
|
+
created: 1234567890,
|
|
540
|
+
model: "gpt-3.5-turbo",
|
|
541
|
+
choices: [
|
|
542
|
+
{
|
|
543
|
+
index: 0,
|
|
544
|
+
delta: {},
|
|
545
|
+
finish_reason: null,
|
|
546
|
+
},
|
|
547
|
+
],
|
|
548
|
+
},
|
|
549
|
+
{
|
|
550
|
+
id: "test-id",
|
|
551
|
+
object: "chat.completion.chunk",
|
|
552
|
+
created: 1234567890,
|
|
553
|
+
model: "gpt-3.5-turbo",
|
|
554
|
+
choices: [
|
|
555
|
+
{
|
|
556
|
+
index: 0,
|
|
557
|
+
delta: { content: "Hello" },
|
|
558
|
+
finish_reason: "stop",
|
|
559
|
+
},
|
|
560
|
+
],
|
|
561
|
+
},
|
|
562
|
+
];
|
|
563
|
+
|
|
564
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
565
|
+
start(controller) {
|
|
566
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
567
|
+
controller.close();
|
|
568
|
+
},
|
|
569
|
+
});
|
|
570
|
+
|
|
571
|
+
const eventProcessor = vi.fn();
|
|
572
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
573
|
+
|
|
574
|
+
expect(result).toBeDefined();
|
|
575
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
576
|
+
|
|
577
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
578
|
+
expect(eventCall.get()).toBe("Hello");
|
|
579
|
+
});
|
|
580
|
+
|
|
581
|
+
it("should handle chunks with no choices", async () => {
|
|
582
|
+
const chunks: ChatCompletionChunk[] = [
|
|
583
|
+
{
|
|
584
|
+
id: "test-id",
|
|
585
|
+
object: "chat.completion.chunk",
|
|
586
|
+
created: 1234567890,
|
|
587
|
+
model: "gpt-3.5-turbo",
|
|
588
|
+
choices: [],
|
|
589
|
+
},
|
|
590
|
+
{
|
|
591
|
+
id: "test-id",
|
|
592
|
+
object: "chat.completion.chunk",
|
|
593
|
+
created: 1234567890,
|
|
594
|
+
model: "gpt-3.5-turbo",
|
|
595
|
+
choices: [
|
|
596
|
+
{
|
|
597
|
+
index: 0,
|
|
598
|
+
delta: { content: "Hello" },
|
|
599
|
+
finish_reason: "stop",
|
|
600
|
+
},
|
|
601
|
+
],
|
|
602
|
+
},
|
|
603
|
+
];
|
|
604
|
+
|
|
605
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
606
|
+
start(controller) {
|
|
607
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
608
|
+
controller.close();
|
|
609
|
+
},
|
|
610
|
+
});
|
|
611
|
+
|
|
612
|
+
const eventProcessor = vi.fn();
|
|
613
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
614
|
+
|
|
615
|
+
expect(result).toBeDefined();
|
|
616
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
617
|
+
|
|
618
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
619
|
+
expect(eventCall.get()).toBe("Hello");
|
|
620
|
+
});
|
|
621
|
+
|
|
622
|
+
it("should handle very large content chunks", async () => {
|
|
623
|
+
const largeContent = "x".repeat(10000);
|
|
624
|
+
const chunks: ChatCompletionChunk[] = [
|
|
625
|
+
{
|
|
626
|
+
id: "test-id",
|
|
627
|
+
object: "chat.completion.chunk",
|
|
628
|
+
created: 1234567890,
|
|
629
|
+
model: "gpt-3.5-turbo",
|
|
630
|
+
choices: [
|
|
631
|
+
{
|
|
632
|
+
index: 0,
|
|
633
|
+
delta: { content: largeContent },
|
|
634
|
+
finish_reason: "stop",
|
|
635
|
+
},
|
|
636
|
+
],
|
|
637
|
+
},
|
|
638
|
+
];
|
|
639
|
+
|
|
640
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
641
|
+
start(controller) {
|
|
642
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
643
|
+
controller.close();
|
|
644
|
+
},
|
|
645
|
+
});
|
|
646
|
+
|
|
647
|
+
const eventProcessor = vi.fn();
|
|
648
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
649
|
+
|
|
650
|
+
expect(result).toBeDefined();
|
|
651
|
+
// Now single chunk with content should trigger eventProcessor
|
|
652
|
+
expect(eventProcessor).toHaveBeenCalledOnce();
|
|
653
|
+
|
|
654
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
655
|
+
expect(eventCall.get()).toBe(largeContent);
|
|
656
|
+
});
|
|
657
|
+
|
|
658
|
+
it("should handle rapid consecutive chunks", async () => {
|
|
659
|
+
const chunks: ChatCompletionChunk[] = Array.from({ length: 100 }, (_, i) => ({
|
|
660
|
+
id: "test-id",
|
|
661
|
+
object: "chat.completion.chunk" as const,
|
|
662
|
+
created: 1234567890,
|
|
663
|
+
model: "gpt-3.5-turbo",
|
|
664
|
+
choices: [
|
|
665
|
+
{
|
|
666
|
+
index: 0,
|
|
667
|
+
delta: { content: i.toString() },
|
|
668
|
+
finish_reason: i === 99 ? "stop" as const : null,
|
|
669
|
+
},
|
|
670
|
+
],
|
|
671
|
+
}));
|
|
672
|
+
|
|
673
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
674
|
+
start(controller) {
|
|
675
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
676
|
+
controller.close();
|
|
677
|
+
},
|
|
678
|
+
});
|
|
679
|
+
|
|
680
|
+
const eventProcessor = vi.fn();
|
|
681
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
682
|
+
|
|
683
|
+
expect(result).toBeDefined();
|
|
684
|
+
expect(eventProcessor).toHaveBeenCalledTimes(1);
|
|
685
|
+
|
|
686
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
687
|
+
const expectedContent = Array.from({ length: 100 }, (_, i) => i.toString()).join("");
|
|
688
|
+
expect(eventCall.get()).toBe(expectedContent);
|
|
689
|
+
});
|
|
690
|
+
|
|
691
|
+
it("should handle out-of-order choice indices", async () => {
|
|
692
|
+
const chunks: ChatCompletionChunk[] = [
|
|
693
|
+
{
|
|
694
|
+
id: "test-id",
|
|
695
|
+
object: "chat.completion.chunk",
|
|
696
|
+
created: 1234567890,
|
|
697
|
+
model: "gpt-3.5-turbo",
|
|
698
|
+
choices: [
|
|
699
|
+
{
|
|
700
|
+
index: 2,
|
|
701
|
+
delta: { content: "Third" },
|
|
702
|
+
finish_reason: null,
|
|
703
|
+
},
|
|
704
|
+
{
|
|
705
|
+
index: 0,
|
|
706
|
+
delta: { content: "First" },
|
|
707
|
+
finish_reason: null,
|
|
708
|
+
},
|
|
709
|
+
{
|
|
710
|
+
index: 1,
|
|
711
|
+
delta: { content: "Second" },
|
|
712
|
+
finish_reason: null,
|
|
713
|
+
},
|
|
714
|
+
],
|
|
715
|
+
},
|
|
716
|
+
{
|
|
717
|
+
id: "test-id",
|
|
718
|
+
object: "chat.completion.chunk",
|
|
719
|
+
created: 1234567890,
|
|
720
|
+
model: "gpt-3.5-turbo",
|
|
721
|
+
choices: [
|
|
722
|
+
{
|
|
723
|
+
index: 0,
|
|
724
|
+
delta: { content: " content" },
|
|
725
|
+
finish_reason: "stop",
|
|
726
|
+
},
|
|
727
|
+
{
|
|
728
|
+
index: 1,
|
|
729
|
+
delta: { content: " content" },
|
|
730
|
+
finish_reason: "stop",
|
|
731
|
+
},
|
|
732
|
+
{
|
|
733
|
+
index: 2,
|
|
734
|
+
delta: { content: " content" },
|
|
735
|
+
finish_reason: "stop",
|
|
736
|
+
},
|
|
737
|
+
],
|
|
738
|
+
},
|
|
739
|
+
];
|
|
740
|
+
|
|
741
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
742
|
+
start(controller) {
|
|
743
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
744
|
+
controller.close();
|
|
745
|
+
},
|
|
746
|
+
});
|
|
747
|
+
|
|
748
|
+
const eventProcessor = vi.fn();
|
|
749
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
750
|
+
|
|
751
|
+
expect(result).toBeDefined();
|
|
752
|
+
expect(eventProcessor).toHaveBeenCalledTimes(3);
|
|
753
|
+
|
|
754
|
+
const calls = eventProcessor.mock.calls.map(call => call[0]);
|
|
755
|
+
expect(calls[0].get()).toBe("Third content");
|
|
756
|
+
expect(calls[1].get()).toBe("First content");
|
|
757
|
+
expect(calls[2].get()).toBe("Second content");
|
|
758
|
+
});
|
|
759
|
+
|
|
760
|
+
it("should handle mixed finish reasons", async () => {
|
|
761
|
+
const chunks: ChatCompletionChunk[] = [
|
|
762
|
+
{
|
|
763
|
+
id: "test-id",
|
|
764
|
+
object: "chat.completion.chunk",
|
|
765
|
+
created: 1234567890,
|
|
766
|
+
model: "gpt-3.5-turbo",
|
|
767
|
+
choices: [
|
|
768
|
+
{
|
|
769
|
+
index: 0,
|
|
770
|
+
delta: { content: "Hello" },
|
|
771
|
+
finish_reason: null,
|
|
772
|
+
},
|
|
773
|
+
{
|
|
774
|
+
index: 1,
|
|
775
|
+
delta: { content: "World" },
|
|
776
|
+
finish_reason: null,
|
|
777
|
+
},
|
|
778
|
+
],
|
|
779
|
+
},
|
|
780
|
+
{
|
|
781
|
+
id: "test-id",
|
|
782
|
+
object: "chat.completion.chunk",
|
|
783
|
+
created: 1234567890,
|
|
784
|
+
model: "gpt-3.5-turbo",
|
|
785
|
+
choices: [
|
|
786
|
+
{
|
|
787
|
+
index: 0,
|
|
788
|
+
delta: { content: " there" },
|
|
789
|
+
finish_reason: "stop",
|
|
790
|
+
},
|
|
791
|
+
{
|
|
792
|
+
index: 1,
|
|
793
|
+
delta: { content: "!" },
|
|
794
|
+
finish_reason: "length",
|
|
795
|
+
},
|
|
796
|
+
],
|
|
797
|
+
},
|
|
798
|
+
];
|
|
799
|
+
|
|
800
|
+
const stream = StreamUtil.from(...chunks);
|
|
801
|
+
|
|
802
|
+
const eventProcessor = vi.fn();
|
|
803
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
804
|
+
|
|
805
|
+
expect(result).toBeDefined();
|
|
806
|
+
expect(eventProcessor).toHaveBeenCalledTimes(2);
|
|
807
|
+
|
|
808
|
+
const firstCall = eventProcessor.mock.calls[0]?.[0];
|
|
809
|
+
const secondCall = eventProcessor.mock.calls[1]?.[0];
|
|
810
|
+
expect(firstCall.get()).toBe("Hello there");
|
|
811
|
+
expect(secondCall.get()).toBe("World!");
|
|
812
|
+
await firstCall.join();
|
|
813
|
+
await secondCall.join();
|
|
814
|
+
expect(firstCall.done()).toBe(true);
|
|
815
|
+
expect(secondCall.done()).toBe(true);
|
|
816
|
+
});
|
|
817
|
+
|
|
818
|
+
it("should handle Unicode and special characters", async () => {
|
|
819
|
+
const specialContent = "Hello 🌍! 안녕하세요 مرحبا 🚀 ñáéíóú";
|
|
820
|
+
const chunks: ChatCompletionChunk[] = [
|
|
821
|
+
{
|
|
822
|
+
id: "test-id",
|
|
823
|
+
object: "chat.completion.chunk",
|
|
824
|
+
created: 1234567890,
|
|
825
|
+
model: "gpt-3.5-turbo",
|
|
826
|
+
choices: [
|
|
827
|
+
{
|
|
828
|
+
index: 0,
|
|
829
|
+
delta: { content: specialContent },
|
|
830
|
+
finish_reason: "stop",
|
|
831
|
+
},
|
|
832
|
+
],
|
|
833
|
+
},
|
|
834
|
+
];
|
|
835
|
+
|
|
836
|
+
const stream = StreamUtil.from(...chunks);
|
|
837
|
+
|
|
838
|
+
const eventProcessor = vi.fn();
|
|
839
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
840
|
+
|
|
841
|
+
expect(result).toBeDefined();
|
|
842
|
+
// Now single chunk with content should trigger eventProcessor
|
|
843
|
+
expect(eventProcessor).toHaveBeenCalledOnce();
|
|
844
|
+
|
|
845
|
+
const eventCall = eventProcessor.mock.calls[0]?.[0];
|
|
846
|
+
expect(eventCall.get()).toBe(specialContent);
|
|
847
|
+
});
|
|
848
|
+
|
|
849
|
+
it("should handle stream reader errors gracefully", async () => {
|
|
850
|
+
const chunks: ChatCompletionChunk[] = [
|
|
851
|
+
{
|
|
852
|
+
id: "test-id",
|
|
853
|
+
object: "chat.completion.chunk",
|
|
854
|
+
created: 1234567890,
|
|
855
|
+
model: "gpt-3.5-turbo",
|
|
856
|
+
choices: [
|
|
857
|
+
{
|
|
858
|
+
index: 0,
|
|
859
|
+
delta: { content: "Hello" },
|
|
860
|
+
finish_reason: null,
|
|
861
|
+
},
|
|
862
|
+
],
|
|
863
|
+
},
|
|
864
|
+
];
|
|
865
|
+
|
|
866
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
867
|
+
start(controller) {
|
|
868
|
+
controller.enqueue(chunks[0]);
|
|
869
|
+
// Simulate an error in the stream
|
|
870
|
+
controller.error(new Error("Stream error"));
|
|
871
|
+
},
|
|
872
|
+
});
|
|
873
|
+
|
|
874
|
+
const eventProcessor = vi.fn();
|
|
875
|
+
|
|
876
|
+
await expect(reduceStreamingWithDispatch(stream, eventProcessor))
|
|
877
|
+
.rejects.toThrow("Stream error");
|
|
878
|
+
});
|
|
879
|
+
|
|
880
|
+
it("should handle completely malformed chunks gracefully", async () => {
|
|
881
|
+
const malformedChunk = {
|
|
882
|
+
// Missing required fields
|
|
883
|
+
object: "chat.completion.chunk",
|
|
884
|
+
choices: [
|
|
885
|
+
{
|
|
886
|
+
// Missing index
|
|
887
|
+
delta: { content: "Hello" },
|
|
888
|
+
finish_reason: null,
|
|
889
|
+
},
|
|
890
|
+
],
|
|
891
|
+
} as any;
|
|
892
|
+
|
|
893
|
+
const stream = new ReadableStream<ChatCompletionChunk>({
|
|
894
|
+
start(controller) {
|
|
895
|
+
controller.enqueue(malformedChunk);
|
|
896
|
+
controller.close();
|
|
897
|
+
},
|
|
898
|
+
});
|
|
899
|
+
|
|
900
|
+
const eventProcessor = vi.fn();
|
|
901
|
+
|
|
902
|
+
// Should not throw, but should handle gracefully
|
|
903
|
+
const result = await reduceStreamingWithDispatch(stream, eventProcessor);
|
|
904
|
+
expect(result).toBeDefined();
|
|
905
|
+
});
|
|
906
|
+
});
|
|
907
|
+
});
|
|
908
|
+
|