@agentica/core 0.32.3-dev.1 → 0.32.3-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.mjs +47 -33
- package/lib/index.mjs.map +1 -1
- package/lib/utils/ChatGptCompletionStreamingUtil.js +41 -29
- package/lib/utils/ChatGptCompletionStreamingUtil.js.map +1 -1
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.d.ts +1 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.js +855 -0
- package/lib/utils/ChatGptCompletionStreamingUtil.spec.js.map +1 -0
- package/lib/utils/MPSC.js +8 -6
- package/lib/utils/MPSC.js.map +1 -1
- package/lib/utils/StreamUtil.d.ts +1 -1
- package/lib/utils/StreamUtil.js +2 -2
- package/lib/utils/StreamUtil.js.map +1 -1
- package/package.json +1 -1
- package/src/utils/ChatGptCompletionStreamingUtil.spec.ts +908 -0
- package/src/utils/ChatGptCompletionStreamingUtil.ts +45 -36
- package/src/utils/MPSC.ts +8 -6
- package/src/utils/StreamUtil.ts +2 -2
|
@@ -0,0 +1,855 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
var __asyncValues = (this && this.__asyncValues) || function (o) {
|
|
12
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
13
|
+
var m = o[Symbol.asyncIterator], i;
|
|
14
|
+
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
|
|
15
|
+
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
|
|
16
|
+
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
|
17
|
+
};
|
|
18
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
19
|
+
const vitest_1 = require("vitest");
|
|
20
|
+
const ChatGptCompletionStreamingUtil_1 = require("./ChatGptCompletionStreamingUtil");
|
|
21
|
+
const StreamUtil_1 = require("./StreamUtil");
|
|
22
|
+
(0, vitest_1.describe)("reduceStreamingWithDispatch", () => {
|
|
23
|
+
(0, vitest_1.beforeEach)(() => {
|
|
24
|
+
vitest_1.vi.clearAllMocks();
|
|
25
|
+
});
|
|
26
|
+
(0, vitest_1.describe)("basic functionality", () => {
|
|
27
|
+
(0, vitest_1.it)("should process single chunk successfully", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
28
|
+
const mockChunk = {
|
|
29
|
+
id: "test-id",
|
|
30
|
+
object: "chat.completion.chunk",
|
|
31
|
+
created: 1234567890,
|
|
32
|
+
model: "gpt-3.5-turbo",
|
|
33
|
+
choices: [
|
|
34
|
+
{
|
|
35
|
+
index: 0,
|
|
36
|
+
delta: { content: "Hello" },
|
|
37
|
+
finish_reason: null,
|
|
38
|
+
},
|
|
39
|
+
],
|
|
40
|
+
};
|
|
41
|
+
const stream = new ReadableStream({
|
|
42
|
+
start(controller) {
|
|
43
|
+
controller.enqueue(mockChunk);
|
|
44
|
+
controller.close();
|
|
45
|
+
},
|
|
46
|
+
});
|
|
47
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
48
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
49
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
50
|
+
(0, vitest_1.expect)(result.object).toBe("chat.completion");
|
|
51
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
52
|
+
}));
|
|
53
|
+
(0, vitest_1.it)("should handle multiple chunks with content accumulation", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
54
|
+
var _a;
|
|
55
|
+
const chunks = [
|
|
56
|
+
{
|
|
57
|
+
id: "test-id",
|
|
58
|
+
object: "chat.completion.chunk",
|
|
59
|
+
created: 1234567890,
|
|
60
|
+
model: "gpt-3.5-turbo",
|
|
61
|
+
choices: [
|
|
62
|
+
{
|
|
63
|
+
index: 0,
|
|
64
|
+
delta: { content: "Hello" },
|
|
65
|
+
finish_reason: null,
|
|
66
|
+
},
|
|
67
|
+
],
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
id: "test-id",
|
|
71
|
+
object: "chat.completion.chunk",
|
|
72
|
+
created: 1234567890,
|
|
73
|
+
model: "gpt-3.5-turbo",
|
|
74
|
+
choices: [
|
|
75
|
+
{
|
|
76
|
+
index: 0,
|
|
77
|
+
delta: { content: " World" },
|
|
78
|
+
finish_reason: null,
|
|
79
|
+
},
|
|
80
|
+
],
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
id: "test-id",
|
|
84
|
+
object: "chat.completion.chunk",
|
|
85
|
+
created: 1234567890,
|
|
86
|
+
model: "gpt-3.5-turbo",
|
|
87
|
+
choices: [
|
|
88
|
+
{
|
|
89
|
+
index: 0,
|
|
90
|
+
delta: { content: "!" },
|
|
91
|
+
finish_reason: "stop",
|
|
92
|
+
},
|
|
93
|
+
],
|
|
94
|
+
},
|
|
95
|
+
];
|
|
96
|
+
const stream = new ReadableStream({
|
|
97
|
+
start(controller) {
|
|
98
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
99
|
+
controller.close();
|
|
100
|
+
},
|
|
101
|
+
});
|
|
102
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
103
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
104
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
105
|
+
(0, vitest_1.expect)(result.object).toBe("chat.completion");
|
|
106
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
107
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
108
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello World!");
|
|
109
|
+
}));
|
|
110
|
+
(0, vitest_1.it)("should handle empty content chunks", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
111
|
+
var _a;
|
|
112
|
+
const chunks = [
|
|
113
|
+
{
|
|
114
|
+
id: "test-id",
|
|
115
|
+
object: "chat.completion.chunk",
|
|
116
|
+
created: 1234567890,
|
|
117
|
+
model: "gpt-3.5-turbo",
|
|
118
|
+
choices: [
|
|
119
|
+
{
|
|
120
|
+
index: 0,
|
|
121
|
+
delta: { content: "" },
|
|
122
|
+
finish_reason: null,
|
|
123
|
+
},
|
|
124
|
+
],
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
id: "test-id",
|
|
128
|
+
object: "chat.completion.chunk",
|
|
129
|
+
created: 1234567890,
|
|
130
|
+
model: "gpt-3.5-turbo",
|
|
131
|
+
choices: [
|
|
132
|
+
{
|
|
133
|
+
index: 0,
|
|
134
|
+
delta: { content: "Hello" },
|
|
135
|
+
finish_reason: null,
|
|
136
|
+
},
|
|
137
|
+
],
|
|
138
|
+
},
|
|
139
|
+
];
|
|
140
|
+
const stream = new ReadableStream({
|
|
141
|
+
start(controller) {
|
|
142
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
143
|
+
controller.close();
|
|
144
|
+
},
|
|
145
|
+
});
|
|
146
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
147
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
148
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
149
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
150
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
151
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
|
|
152
|
+
}));
|
|
153
|
+
});
|
|
154
|
+
(0, vitest_1.describe)("multiple choices handling", () => {
|
|
155
|
+
(0, vitest_1.it)("should handle multiple choices with different indices", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
156
|
+
var _a, _b;
|
|
157
|
+
const chunks = [
|
|
158
|
+
{
|
|
159
|
+
id: "test-id",
|
|
160
|
+
object: "chat.completion.chunk",
|
|
161
|
+
created: 1234567890,
|
|
162
|
+
model: "gpt-3.5-turbo",
|
|
163
|
+
choices: [
|
|
164
|
+
{
|
|
165
|
+
index: 0,
|
|
166
|
+
delta: { content: "Choice 1" },
|
|
167
|
+
finish_reason: null,
|
|
168
|
+
},
|
|
169
|
+
{
|
|
170
|
+
index: 1,
|
|
171
|
+
delta: { content: "Choice 2" },
|
|
172
|
+
finish_reason: null,
|
|
173
|
+
},
|
|
174
|
+
],
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
id: "test-id",
|
|
178
|
+
object: "chat.completion.chunk",
|
|
179
|
+
created: 1234567890,
|
|
180
|
+
model: "gpt-3.5-turbo",
|
|
181
|
+
choices: [
|
|
182
|
+
{
|
|
183
|
+
index: 0,
|
|
184
|
+
delta: { content: " continued" },
|
|
185
|
+
finish_reason: "stop",
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
index: 1,
|
|
189
|
+
delta: { content: " continued" },
|
|
190
|
+
finish_reason: "stop",
|
|
191
|
+
},
|
|
192
|
+
],
|
|
193
|
+
},
|
|
194
|
+
];
|
|
195
|
+
const stream = new ReadableStream({
|
|
196
|
+
start(controller) {
|
|
197
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
198
|
+
controller.close();
|
|
199
|
+
},
|
|
200
|
+
});
|
|
201
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
202
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
203
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
204
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(2);
|
|
205
|
+
const firstCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
206
|
+
const secondCall = (_b = eventProcessor.mock.calls[1]) === null || _b === void 0 ? void 0 : _b[0];
|
|
207
|
+
(0, vitest_1.expect)(firstCall.get()).toBe("Choice 1 continued");
|
|
208
|
+
(0, vitest_1.expect)(secondCall.get()).toBe("Choice 2 continued");
|
|
209
|
+
}));
|
|
210
|
+
});
|
|
211
|
+
(0, vitest_1.describe)("finish reason handling", () => {
|
|
212
|
+
(0, vitest_1.it)("should close context when finish_reason is provided", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
213
|
+
var _a;
|
|
214
|
+
const chunks = [
|
|
215
|
+
{
|
|
216
|
+
id: "test-id",
|
|
217
|
+
object: "chat.completion.chunk",
|
|
218
|
+
created: 1234567890,
|
|
219
|
+
model: "gpt-3.5-turbo",
|
|
220
|
+
choices: [
|
|
221
|
+
{
|
|
222
|
+
index: 0,
|
|
223
|
+
delta: { content: "Hello" },
|
|
224
|
+
finish_reason: null,
|
|
225
|
+
},
|
|
226
|
+
],
|
|
227
|
+
},
|
|
228
|
+
{
|
|
229
|
+
id: "test-id",
|
|
230
|
+
object: "chat.completion.chunk",
|
|
231
|
+
created: 1234567890,
|
|
232
|
+
model: "gpt-3.5-turbo",
|
|
233
|
+
choices: [
|
|
234
|
+
{
|
|
235
|
+
index: 0,
|
|
236
|
+
delta: { content: " World" },
|
|
237
|
+
finish_reason: "stop",
|
|
238
|
+
},
|
|
239
|
+
],
|
|
240
|
+
},
|
|
241
|
+
];
|
|
242
|
+
const stream = new ReadableStream({
|
|
243
|
+
start(controller) {
|
|
244
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
245
|
+
controller.close();
|
|
246
|
+
},
|
|
247
|
+
});
|
|
248
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
249
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
250
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
251
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
252
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
253
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello World");
|
|
254
|
+
(0, vitest_1.expect)(eventCall.done()).toBe(true);
|
|
255
|
+
}));
|
|
256
|
+
});
|
|
257
|
+
(0, vitest_1.describe)("stream processing", () => {
|
|
258
|
+
(0, vitest_1.it)("should provide working stream in event processor", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
259
|
+
const chunks = [
|
|
260
|
+
{
|
|
261
|
+
id: "test-id",
|
|
262
|
+
object: "chat.completion.chunk",
|
|
263
|
+
created: 1234567890,
|
|
264
|
+
model: "gpt-3.5-turbo",
|
|
265
|
+
choices: [
|
|
266
|
+
{
|
|
267
|
+
index: 0,
|
|
268
|
+
delta: { content: "Hello" },
|
|
269
|
+
finish_reason: null,
|
|
270
|
+
},
|
|
271
|
+
],
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
id: "test-id",
|
|
275
|
+
object: "chat.completion.chunk",
|
|
276
|
+
created: 1234567890,
|
|
277
|
+
model: "gpt-3.5-turbo",
|
|
278
|
+
choices: [
|
|
279
|
+
{
|
|
280
|
+
index: 0,
|
|
281
|
+
delta: { content: " World" },
|
|
282
|
+
finish_reason: "stop",
|
|
283
|
+
},
|
|
284
|
+
],
|
|
285
|
+
},
|
|
286
|
+
];
|
|
287
|
+
const stream = new ReadableStream({
|
|
288
|
+
start(controller) {
|
|
289
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
290
|
+
controller.close();
|
|
291
|
+
},
|
|
292
|
+
});
|
|
293
|
+
const streamedContent = [];
|
|
294
|
+
yield new Promise((resolve) => __awaiter(void 0, void 0, void 0, function* () {
|
|
295
|
+
const eventProcessor = vitest_1.vi.fn(({ stream: contentStream }) => {
|
|
296
|
+
(() => __awaiter(void 0, void 0, void 0, function* () {
|
|
297
|
+
var _a, e_1, _b, _c;
|
|
298
|
+
try {
|
|
299
|
+
for (var _d = true, contentStream_1 = __asyncValues(contentStream), contentStream_1_1; contentStream_1_1 = yield contentStream_1.next(), _a = contentStream_1_1.done, !_a; _d = true) {
|
|
300
|
+
_c = contentStream_1_1.value;
|
|
301
|
+
_d = false;
|
|
302
|
+
const content = _c;
|
|
303
|
+
streamedContent.push(content);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
307
|
+
finally {
|
|
308
|
+
try {
|
|
309
|
+
if (!_d && !_a && (_b = contentStream_1.return)) yield _b.call(contentStream_1);
|
|
310
|
+
}
|
|
311
|
+
finally { if (e_1) throw e_1.error; }
|
|
312
|
+
}
|
|
313
|
+
resolve(true);
|
|
314
|
+
}))();
|
|
315
|
+
});
|
|
316
|
+
yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
317
|
+
}));
|
|
318
|
+
(0, vitest_1.expect)(streamedContent).toEqual(["Hello", " World"]);
|
|
319
|
+
}));
|
|
320
|
+
(0, vitest_1.it)("should provide working join function", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
321
|
+
const chunks = [
|
|
322
|
+
{
|
|
323
|
+
id: "test-id",
|
|
324
|
+
object: "chat.completion.chunk",
|
|
325
|
+
created: 1234567890,
|
|
326
|
+
model: "gpt-3.5-turbo",
|
|
327
|
+
choices: [
|
|
328
|
+
{
|
|
329
|
+
index: 0,
|
|
330
|
+
delta: { content: "Hello" },
|
|
331
|
+
finish_reason: null,
|
|
332
|
+
},
|
|
333
|
+
],
|
|
334
|
+
},
|
|
335
|
+
{
|
|
336
|
+
id: "test-id",
|
|
337
|
+
object: "chat.completion.chunk",
|
|
338
|
+
created: 1234567890,
|
|
339
|
+
model: "gpt-3.5-turbo",
|
|
340
|
+
choices: [
|
|
341
|
+
{
|
|
342
|
+
index: 0,
|
|
343
|
+
delta: { content: " World" },
|
|
344
|
+
finish_reason: "stop",
|
|
345
|
+
},
|
|
346
|
+
],
|
|
347
|
+
},
|
|
348
|
+
];
|
|
349
|
+
const stream = new ReadableStream({
|
|
350
|
+
start(controller) {
|
|
351
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
352
|
+
controller.close();
|
|
353
|
+
},
|
|
354
|
+
});
|
|
355
|
+
let joinedContent = "";
|
|
356
|
+
const eventProcessor = vitest_1.vi.fn((_a) => __awaiter(void 0, [_a], void 0, function* ({ join }) {
|
|
357
|
+
joinedContent = yield join();
|
|
358
|
+
}));
|
|
359
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
360
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
361
|
+
(0, vitest_1.expect)(joinedContent).toBe("Hello World");
|
|
362
|
+
}));
|
|
363
|
+
});
|
|
364
|
+
(0, vitest_1.describe)("error handling", () => {
|
|
365
|
+
(0, vitest_1.it)("should throw error for empty stream", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
366
|
+
const stream = new ReadableStream({
|
|
367
|
+
start(controller) {
|
|
368
|
+
controller.close();
|
|
369
|
+
},
|
|
370
|
+
});
|
|
371
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
372
|
+
yield (0, vitest_1.expect)((0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor)).rejects.toThrow("StreamUtil.reduce did not produce a ChatCompletion");
|
|
373
|
+
}));
|
|
374
|
+
(0, vitest_1.it)("should handle stream with only finish_reason chunks", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
375
|
+
const chunks = [
|
|
376
|
+
{
|
|
377
|
+
id: "test-id",
|
|
378
|
+
object: "chat.completion.chunk",
|
|
379
|
+
created: 1234567890,
|
|
380
|
+
model: "gpt-3.5-turbo",
|
|
381
|
+
choices: [
|
|
382
|
+
{
|
|
383
|
+
index: 0,
|
|
384
|
+
delta: { content: null },
|
|
385
|
+
finish_reason: "stop",
|
|
386
|
+
},
|
|
387
|
+
],
|
|
388
|
+
},
|
|
389
|
+
];
|
|
390
|
+
const stream = new ReadableStream({
|
|
391
|
+
start(controller) {
|
|
392
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
393
|
+
controller.close();
|
|
394
|
+
},
|
|
395
|
+
});
|
|
396
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
397
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
398
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
399
|
+
(0, vitest_1.expect)(eventProcessor).not.toHaveBeenCalled();
|
|
400
|
+
}));
|
|
401
|
+
});
|
|
402
|
+
(0, vitest_1.describe)("complex scenarios", () => {
|
|
403
|
+
(0, vitest_1.it)("should handle mixed content and finish_reason chunks", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
404
|
+
var _a;
|
|
405
|
+
const chunks = [
|
|
406
|
+
{
|
|
407
|
+
id: "test-id",
|
|
408
|
+
object: "chat.completion.chunk",
|
|
409
|
+
created: 1234567890,
|
|
410
|
+
model: "gpt-3.5-turbo",
|
|
411
|
+
choices: [
|
|
412
|
+
{
|
|
413
|
+
index: 0,
|
|
414
|
+
delta: { content: "Hello" },
|
|
415
|
+
finish_reason: null,
|
|
416
|
+
},
|
|
417
|
+
],
|
|
418
|
+
},
|
|
419
|
+
{
|
|
420
|
+
id: "test-id",
|
|
421
|
+
object: "chat.completion.chunk",
|
|
422
|
+
created: 1234567890,
|
|
423
|
+
model: "gpt-3.5-turbo",
|
|
424
|
+
choices: [
|
|
425
|
+
{
|
|
426
|
+
index: 0,
|
|
427
|
+
delta: { content: null },
|
|
428
|
+
finish_reason: null,
|
|
429
|
+
},
|
|
430
|
+
],
|
|
431
|
+
},
|
|
432
|
+
{
|
|
433
|
+
id: "test-id",
|
|
434
|
+
object: "chat.completion.chunk",
|
|
435
|
+
created: 1234567890,
|
|
436
|
+
model: "gpt-3.5-turbo",
|
|
437
|
+
choices: [
|
|
438
|
+
{
|
|
439
|
+
index: 0,
|
|
440
|
+
delta: { content: " World" },
|
|
441
|
+
finish_reason: null,
|
|
442
|
+
},
|
|
443
|
+
],
|
|
444
|
+
},
|
|
445
|
+
{
|
|
446
|
+
id: "test-id",
|
|
447
|
+
object: "chat.completion.chunk",
|
|
448
|
+
created: 1234567890,
|
|
449
|
+
model: "gpt-3.5-turbo",
|
|
450
|
+
choices: [
|
|
451
|
+
{
|
|
452
|
+
index: 0,
|
|
453
|
+
delta: { content: null },
|
|
454
|
+
finish_reason: "stop",
|
|
455
|
+
},
|
|
456
|
+
],
|
|
457
|
+
},
|
|
458
|
+
];
|
|
459
|
+
const stream = new ReadableStream({
|
|
460
|
+
start(controller) {
|
|
461
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
462
|
+
controller.close();
|
|
463
|
+
},
|
|
464
|
+
});
|
|
465
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
466
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
467
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
468
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
469
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
470
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello World");
|
|
471
|
+
}));
|
|
472
|
+
});
|
|
473
|
+
(0, vitest_1.describe)("edge cases and exceptions", () => {
|
|
474
|
+
(0, vitest_1.it)("should handle null delta content", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
475
|
+
var _a;
|
|
476
|
+
const chunks = [
|
|
477
|
+
{
|
|
478
|
+
id: "test-id",
|
|
479
|
+
object: "chat.completion.chunk",
|
|
480
|
+
created: 1234567890,
|
|
481
|
+
model: "gpt-3.5-turbo",
|
|
482
|
+
choices: [
|
|
483
|
+
{
|
|
484
|
+
index: 0,
|
|
485
|
+
delta: { content: null },
|
|
486
|
+
finish_reason: null,
|
|
487
|
+
},
|
|
488
|
+
],
|
|
489
|
+
},
|
|
490
|
+
{
|
|
491
|
+
id: "test-id",
|
|
492
|
+
object: "chat.completion.chunk",
|
|
493
|
+
created: 1234567890,
|
|
494
|
+
model: "gpt-3.5-turbo",
|
|
495
|
+
choices: [
|
|
496
|
+
{
|
|
497
|
+
index: 0,
|
|
498
|
+
delta: { content: "Hello" },
|
|
499
|
+
finish_reason: "stop",
|
|
500
|
+
},
|
|
501
|
+
],
|
|
502
|
+
},
|
|
503
|
+
];
|
|
504
|
+
const stream = new ReadableStream({
|
|
505
|
+
start(controller) {
|
|
506
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
507
|
+
controller.close();
|
|
508
|
+
},
|
|
509
|
+
});
|
|
510
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
511
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
512
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
513
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
514
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
515
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
|
|
516
|
+
}));
|
|
517
|
+
(0, vitest_1.it)("should handle missing delta object", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
518
|
+
var _a;
|
|
519
|
+
const chunks = [
|
|
520
|
+
{
|
|
521
|
+
id: "test-id",
|
|
522
|
+
object: "chat.completion.chunk",
|
|
523
|
+
created: 1234567890,
|
|
524
|
+
model: "gpt-3.5-turbo",
|
|
525
|
+
choices: [
|
|
526
|
+
{
|
|
527
|
+
index: 0,
|
|
528
|
+
delta: {},
|
|
529
|
+
finish_reason: null,
|
|
530
|
+
},
|
|
531
|
+
],
|
|
532
|
+
},
|
|
533
|
+
{
|
|
534
|
+
id: "test-id",
|
|
535
|
+
object: "chat.completion.chunk",
|
|
536
|
+
created: 1234567890,
|
|
537
|
+
model: "gpt-3.5-turbo",
|
|
538
|
+
choices: [
|
|
539
|
+
{
|
|
540
|
+
index: 0,
|
|
541
|
+
delta: { content: "Hello" },
|
|
542
|
+
finish_reason: "stop",
|
|
543
|
+
},
|
|
544
|
+
],
|
|
545
|
+
},
|
|
546
|
+
];
|
|
547
|
+
const stream = new ReadableStream({
|
|
548
|
+
start(controller) {
|
|
549
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
550
|
+
controller.close();
|
|
551
|
+
},
|
|
552
|
+
});
|
|
553
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
554
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
555
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
556
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
557
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
558
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
|
|
559
|
+
}));
|
|
560
|
+
(0, vitest_1.it)("should handle chunks with no choices", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
561
|
+
var _a;
|
|
562
|
+
const chunks = [
|
|
563
|
+
{
|
|
564
|
+
id: "test-id",
|
|
565
|
+
object: "chat.completion.chunk",
|
|
566
|
+
created: 1234567890,
|
|
567
|
+
model: "gpt-3.5-turbo",
|
|
568
|
+
choices: [],
|
|
569
|
+
},
|
|
570
|
+
{
|
|
571
|
+
id: "test-id",
|
|
572
|
+
object: "chat.completion.chunk",
|
|
573
|
+
created: 1234567890,
|
|
574
|
+
model: "gpt-3.5-turbo",
|
|
575
|
+
choices: [
|
|
576
|
+
{
|
|
577
|
+
index: 0,
|
|
578
|
+
delta: { content: "Hello" },
|
|
579
|
+
finish_reason: "stop",
|
|
580
|
+
},
|
|
581
|
+
],
|
|
582
|
+
},
|
|
583
|
+
];
|
|
584
|
+
const stream = new ReadableStream({
|
|
585
|
+
start(controller) {
|
|
586
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
587
|
+
controller.close();
|
|
588
|
+
},
|
|
589
|
+
});
|
|
590
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
591
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
592
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
593
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
594
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
595
|
+
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
|
|
596
|
+
}));
|
|
597
|
+
(0, vitest_1.it)("should handle very large content chunks", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
598
|
+
var _a;
|
|
599
|
+
const largeContent = "x".repeat(10000);
|
|
600
|
+
const chunks = [
|
|
601
|
+
{
|
|
602
|
+
id: "test-id",
|
|
603
|
+
object: "chat.completion.chunk",
|
|
604
|
+
created: 1234567890,
|
|
605
|
+
model: "gpt-3.5-turbo",
|
|
606
|
+
choices: [
|
|
607
|
+
{
|
|
608
|
+
index: 0,
|
|
609
|
+
delta: { content: largeContent },
|
|
610
|
+
finish_reason: "stop",
|
|
611
|
+
},
|
|
612
|
+
],
|
|
613
|
+
},
|
|
614
|
+
];
|
|
615
|
+
const stream = new ReadableStream({
|
|
616
|
+
start(controller) {
|
|
617
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
618
|
+
controller.close();
|
|
619
|
+
},
|
|
620
|
+
});
|
|
621
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
622
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
623
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
624
|
+
// Now single chunk with content should trigger eventProcessor
|
|
625
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledOnce();
|
|
626
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
627
|
+
(0, vitest_1.expect)(eventCall.get()).toBe(largeContent);
|
|
628
|
+
}));
|
|
629
|
+
(0, vitest_1.it)("should handle rapid consecutive chunks", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
630
|
+
var _a;
|
|
631
|
+
const chunks = Array.from({ length: 100 }, (_, i) => ({
|
|
632
|
+
id: "test-id",
|
|
633
|
+
object: "chat.completion.chunk",
|
|
634
|
+
created: 1234567890,
|
|
635
|
+
model: "gpt-3.5-turbo",
|
|
636
|
+
choices: [
|
|
637
|
+
{
|
|
638
|
+
index: 0,
|
|
639
|
+
delta: { content: i.toString() },
|
|
640
|
+
finish_reason: i === 99 ? "stop" : null,
|
|
641
|
+
},
|
|
642
|
+
],
|
|
643
|
+
}));
|
|
644
|
+
const stream = new ReadableStream({
|
|
645
|
+
start(controller) {
|
|
646
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
647
|
+
controller.close();
|
|
648
|
+
},
|
|
649
|
+
});
|
|
650
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
651
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
652
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
653
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
|
|
654
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
655
|
+
const expectedContent = Array.from({ length: 100 }, (_, i) => i.toString()).join("");
|
|
656
|
+
(0, vitest_1.expect)(eventCall.get()).toBe(expectedContent);
|
|
657
|
+
}));
|
|
658
|
+
(0, vitest_1.it)("should handle out-of-order choice indices", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
659
|
+
const chunks = [
|
|
660
|
+
{
|
|
661
|
+
id: "test-id",
|
|
662
|
+
object: "chat.completion.chunk",
|
|
663
|
+
created: 1234567890,
|
|
664
|
+
model: "gpt-3.5-turbo",
|
|
665
|
+
choices: [
|
|
666
|
+
{
|
|
667
|
+
index: 2,
|
|
668
|
+
delta: { content: "Third" },
|
|
669
|
+
finish_reason: null,
|
|
670
|
+
},
|
|
671
|
+
{
|
|
672
|
+
index: 0,
|
|
673
|
+
delta: { content: "First" },
|
|
674
|
+
finish_reason: null,
|
|
675
|
+
},
|
|
676
|
+
{
|
|
677
|
+
index: 1,
|
|
678
|
+
delta: { content: "Second" },
|
|
679
|
+
finish_reason: null,
|
|
680
|
+
},
|
|
681
|
+
],
|
|
682
|
+
},
|
|
683
|
+
{
|
|
684
|
+
id: "test-id",
|
|
685
|
+
object: "chat.completion.chunk",
|
|
686
|
+
created: 1234567890,
|
|
687
|
+
model: "gpt-3.5-turbo",
|
|
688
|
+
choices: [
|
|
689
|
+
{
|
|
690
|
+
index: 0,
|
|
691
|
+
delta: { content: " content" },
|
|
692
|
+
finish_reason: "stop",
|
|
693
|
+
},
|
|
694
|
+
{
|
|
695
|
+
index: 1,
|
|
696
|
+
delta: { content: " content" },
|
|
697
|
+
finish_reason: "stop",
|
|
698
|
+
},
|
|
699
|
+
{
|
|
700
|
+
index: 2,
|
|
701
|
+
delta: { content: " content" },
|
|
702
|
+
finish_reason: "stop",
|
|
703
|
+
},
|
|
704
|
+
],
|
|
705
|
+
},
|
|
706
|
+
];
|
|
707
|
+
const stream = new ReadableStream({
|
|
708
|
+
start(controller) {
|
|
709
|
+
chunks.forEach(chunk => controller.enqueue(chunk));
|
|
710
|
+
controller.close();
|
|
711
|
+
},
|
|
712
|
+
});
|
|
713
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
714
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
715
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
716
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(3);
|
|
717
|
+
const calls = eventProcessor.mock.calls.map(call => call[0]);
|
|
718
|
+
(0, vitest_1.expect)(calls[0].get()).toBe("Third content");
|
|
719
|
+
(0, vitest_1.expect)(calls[1].get()).toBe("First content");
|
|
720
|
+
(0, vitest_1.expect)(calls[2].get()).toBe("Second content");
|
|
721
|
+
}));
|
|
722
|
+
(0, vitest_1.it)("should handle mixed finish reasons", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
723
|
+
var _a, _b;
|
|
724
|
+
const chunks = [
|
|
725
|
+
{
|
|
726
|
+
id: "test-id",
|
|
727
|
+
object: "chat.completion.chunk",
|
|
728
|
+
created: 1234567890,
|
|
729
|
+
model: "gpt-3.5-turbo",
|
|
730
|
+
choices: [
|
|
731
|
+
{
|
|
732
|
+
index: 0,
|
|
733
|
+
delta: { content: "Hello" },
|
|
734
|
+
finish_reason: null,
|
|
735
|
+
},
|
|
736
|
+
{
|
|
737
|
+
index: 1,
|
|
738
|
+
delta: { content: "World" },
|
|
739
|
+
finish_reason: null,
|
|
740
|
+
},
|
|
741
|
+
],
|
|
742
|
+
},
|
|
743
|
+
{
|
|
744
|
+
id: "test-id",
|
|
745
|
+
object: "chat.completion.chunk",
|
|
746
|
+
created: 1234567890,
|
|
747
|
+
model: "gpt-3.5-turbo",
|
|
748
|
+
choices: [
|
|
749
|
+
{
|
|
750
|
+
index: 0,
|
|
751
|
+
delta: { content: " there" },
|
|
752
|
+
finish_reason: "stop",
|
|
753
|
+
},
|
|
754
|
+
{
|
|
755
|
+
index: 1,
|
|
756
|
+
delta: { content: "!" },
|
|
757
|
+
finish_reason: "length",
|
|
758
|
+
},
|
|
759
|
+
],
|
|
760
|
+
},
|
|
761
|
+
];
|
|
762
|
+
const stream = StreamUtil_1.StreamUtil.from(...chunks);
|
|
763
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
764
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
765
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
766
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(2);
|
|
767
|
+
const firstCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
768
|
+
const secondCall = (_b = eventProcessor.mock.calls[1]) === null || _b === void 0 ? void 0 : _b[0];
|
|
769
|
+
(0, vitest_1.expect)(firstCall.get()).toBe("Hello there");
|
|
770
|
+
(0, vitest_1.expect)(secondCall.get()).toBe("World!");
|
|
771
|
+
yield firstCall.join();
|
|
772
|
+
yield secondCall.join();
|
|
773
|
+
(0, vitest_1.expect)(firstCall.done()).toBe(true);
|
|
774
|
+
(0, vitest_1.expect)(secondCall.done()).toBe(true);
|
|
775
|
+
}));
|
|
776
|
+
(0, vitest_1.it)("should handle Unicode and special characters", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
777
|
+
var _a;
|
|
778
|
+
const specialContent = "Hello 🌍! 안녕하세요 مرحبا 🚀 ñáéíóú";
|
|
779
|
+
const chunks = [
|
|
780
|
+
{
|
|
781
|
+
id: "test-id",
|
|
782
|
+
object: "chat.completion.chunk",
|
|
783
|
+
created: 1234567890,
|
|
784
|
+
model: "gpt-3.5-turbo",
|
|
785
|
+
choices: [
|
|
786
|
+
{
|
|
787
|
+
index: 0,
|
|
788
|
+
delta: { content: specialContent },
|
|
789
|
+
finish_reason: "stop",
|
|
790
|
+
},
|
|
791
|
+
],
|
|
792
|
+
},
|
|
793
|
+
];
|
|
794
|
+
const stream = StreamUtil_1.StreamUtil.from(...chunks);
|
|
795
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
796
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
797
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
798
|
+
// Now single chunk with content should trigger eventProcessor
|
|
799
|
+
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledOnce();
|
|
800
|
+
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
|
|
801
|
+
(0, vitest_1.expect)(eventCall.get()).toBe(specialContent);
|
|
802
|
+
}));
|
|
803
|
+
(0, vitest_1.it)("should handle stream reader errors gracefully", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
804
|
+
const chunks = [
|
|
805
|
+
{
|
|
806
|
+
id: "test-id",
|
|
807
|
+
object: "chat.completion.chunk",
|
|
808
|
+
created: 1234567890,
|
|
809
|
+
model: "gpt-3.5-turbo",
|
|
810
|
+
choices: [
|
|
811
|
+
{
|
|
812
|
+
index: 0,
|
|
813
|
+
delta: { content: "Hello" },
|
|
814
|
+
finish_reason: null,
|
|
815
|
+
},
|
|
816
|
+
],
|
|
817
|
+
},
|
|
818
|
+
];
|
|
819
|
+
const stream = new ReadableStream({
|
|
820
|
+
start(controller) {
|
|
821
|
+
controller.enqueue(chunks[0]);
|
|
822
|
+
// Simulate an error in the stream
|
|
823
|
+
controller.error(new Error("Stream error"));
|
|
824
|
+
},
|
|
825
|
+
});
|
|
826
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
827
|
+
yield (0, vitest_1.expect)((0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor))
|
|
828
|
+
.rejects.toThrow("Stream error");
|
|
829
|
+
}));
|
|
830
|
+
(0, vitest_1.it)("should handle completely malformed chunks gracefully", () => __awaiter(void 0, void 0, void 0, function* () {
|
|
831
|
+
const malformedChunk = {
|
|
832
|
+
// Missing required fields
|
|
833
|
+
object: "chat.completion.chunk",
|
|
834
|
+
choices: [
|
|
835
|
+
{
|
|
836
|
+
// Missing index
|
|
837
|
+
delta: { content: "Hello" },
|
|
838
|
+
finish_reason: null,
|
|
839
|
+
},
|
|
840
|
+
],
|
|
841
|
+
};
|
|
842
|
+
const stream = new ReadableStream({
|
|
843
|
+
start(controller) {
|
|
844
|
+
controller.enqueue(malformedChunk);
|
|
845
|
+
controller.close();
|
|
846
|
+
},
|
|
847
|
+
});
|
|
848
|
+
const eventProcessor = vitest_1.vi.fn();
|
|
849
|
+
// Should not throw, but should handle gracefully
|
|
850
|
+
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
|
|
851
|
+
(0, vitest_1.expect)(result).toBeDefined();
|
|
852
|
+
}));
|
|
853
|
+
});
|
|
854
|
+
});
|
|
855
|
+
//# sourceMappingURL=ChatGptCompletionStreamingUtil.spec.js.map
|