@desplega.ai/qa-use 2.15.0 → 2.15.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/api/index.d.ts +6 -5
- package/dist/lib/api/index.d.ts.map +1 -1
- package/dist/lib/api/index.js +112 -5
- package/dist/lib/api/index.js.map +1 -1
- package/dist/lib/api/sse.d.ts +22 -2
- package/dist/lib/api/sse.d.ts.map +1 -1
- package/dist/lib/api/sse.js +77 -5
- package/dist/lib/api/sse.js.map +1 -1
- package/dist/package.json +1 -1
- package/dist/src/cli/commands/test/create.d.ts +10 -0
- package/dist/src/cli/commands/test/create.d.ts.map +1 -0
- package/dist/src/cli/commands/test/create.js +82 -0
- package/dist/src/cli/commands/test/create.js.map +1 -0
- package/dist/src/cli/commands/test/index.d.ts.map +1 -1
- package/dist/src/cli/commands/test/index.js +2 -0
- package/dist/src/cli/commands/test/index.js.map +1 -1
- package/dist/src/cli/commands/test/run.d.ts.map +1 -1
- package/dist/src/cli/commands/test/run.js +4 -1
- package/dist/src/cli/commands/test/run.js.map +1 -1
- package/dist/src/cli/commands/test/schema.d.ts.map +1 -1
- package/dist/src/cli/commands/test/schema.js +51 -0
- package/dist/src/cli/commands/test/schema.js.map +1 -1
- package/dist/src/cli/commands/test/sync.d.ts +19 -0
- package/dist/src/cli/commands/test/sync.d.ts.map +1 -1
- package/dist/src/cli/commands/test/sync.js +92 -58
- package/dist/src/cli/commands/test/sync.js.map +1 -1
- package/dist/src/cli/commands/test/validate.d.ts.map +1 -1
- package/dist/src/cli/commands/test/validate.js +13 -0
- package/dist/src/cli/commands/test/validate.js.map +1 -1
- package/dist/src/cli/lib/runner.d.ts +6 -0
- package/dist/src/cli/lib/runner.d.ts.map +1 -1
- package/dist/src/cli/lib/runner.js +2 -2
- package/dist/src/cli/lib/runner.js.map +1 -1
- package/lib/api/index.ts +137 -10
- package/lib/api/sse.test.ts +530 -0
- package/lib/api/sse.ts +105 -5
- package/package.json +1 -1
|
@@ -0,0 +1,530 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unit tests for SSE parsing and streaming utilities.
|
|
3
|
+
*
|
|
4
|
+
* Covers:
|
|
5
|
+
* - parseSSE round-trip (LF + CRLF) — pins existing parsing behavior.
|
|
6
|
+
* - streamSSE returns cleanly when the AbortSignal is aborted mid-stream
|
|
7
|
+
* (no thrown error), within a tight time budget.
|
|
8
|
+
* - runCliTest exits within ~200ms of receiving a terminal `complete` /
|
|
9
|
+
* `error` SSE event even when the underlying stream stays open (Phase 2
|
|
10
|
+
* regression: the cope-api hang where the SSE socket stays alive ~80s
|
|
11
|
+
* after the test finishes).
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { afterEach, beforeEach, describe, expect, it } from 'bun:test';
|
|
15
|
+
import { ApiClient } from './index.js';
|
|
16
|
+
import { parseSSE, streamSSE } from './sse.js';
|
|
17
|
+
|
|
18
|
+
describe('parseSSE', () => {
|
|
19
|
+
it('parses a single event with LF line endings', () => {
|
|
20
|
+
const chunk = 'event: complete\ndata: {"status":"passed"}\n\n';
|
|
21
|
+
const events = parseSSE(chunk);
|
|
22
|
+
|
|
23
|
+
expect(events).toHaveLength(1);
|
|
24
|
+
expect(events[0]?.event).toBe('complete');
|
|
25
|
+
expect(events[0]?.data).toEqual({ status: 'passed' });
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
it('parses a single event with CRLF line endings', () => {
|
|
29
|
+
const chunk = 'event: complete\r\ndata: {"status":"passed"}\r\n\r\n';
|
|
30
|
+
const events = parseSSE(chunk);
|
|
31
|
+
|
|
32
|
+
expect(events).toHaveLength(1);
|
|
33
|
+
expect(events[0]?.event).toBe('complete');
|
|
34
|
+
expect(events[0]?.data).toEqual({ status: 'passed' });
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
it('parses multiple events in a single chunk (LF)', () => {
|
|
38
|
+
const chunk =
|
|
39
|
+
'event: start\ndata: {"run_id":"abc"}\n\n' +
|
|
40
|
+
'event: step_complete\ndata: {"step_index":0}\n\n' +
|
|
41
|
+
'event: complete\ndata: {"status":"passed"}\n\n';
|
|
42
|
+
const events = parseSSE(chunk);
|
|
43
|
+
|
|
44
|
+
expect(events).toHaveLength(3);
|
|
45
|
+
expect(events.map((e) => e.event)).toEqual(['start', 'step_complete', 'complete']);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
it('parses multiple events in a single chunk (CRLF)', () => {
|
|
49
|
+
const chunk =
|
|
50
|
+
'event: start\r\ndata: {"run_id":"abc"}\r\n\r\n' +
|
|
51
|
+
'event: step_complete\r\ndata: {"step_index":0}\r\n\r\n' +
|
|
52
|
+
'event: complete\r\ndata: {"status":"passed"}\r\n\r\n';
|
|
53
|
+
const events = parseSSE(chunk);
|
|
54
|
+
|
|
55
|
+
expect(events).toHaveLength(3);
|
|
56
|
+
expect(events.map((e) => e.event)).toEqual(['start', 'step_complete', 'complete']);
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
it('parses id field when present', () => {
|
|
60
|
+
const chunk = 'id: 42\nevent: ping\ndata: "hi"\n\n';
|
|
61
|
+
const events = parseSSE(chunk);
|
|
62
|
+
|
|
63
|
+
expect(events).toHaveLength(1);
|
|
64
|
+
expect(events[0]?.id).toBe('42');
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
it('falls back to raw string when data is not JSON', () => {
|
|
68
|
+
const chunk = 'event: log\ndata: not-json-payload\n\n';
|
|
69
|
+
const events = parseSSE(chunk);
|
|
70
|
+
|
|
71
|
+
expect(events).toHaveLength(1);
|
|
72
|
+
expect(events[0]?.data).toBe('not-json-payload');
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
it('ignores comment lines (pings)', () => {
|
|
76
|
+
const chunk = ': keep-alive\n\nevent: complete\ndata: {"ok":true}\n\n';
|
|
77
|
+
const events = parseSSE(chunk);
|
|
78
|
+
|
|
79
|
+
expect(events).toHaveLength(1);
|
|
80
|
+
expect(events[0]?.event).toBe('complete');
|
|
81
|
+
});
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
describe('streamSSE abort behavior', () => {
|
|
85
|
+
/**
|
|
86
|
+
* Build a Response whose body is a ReadableStream that emits one initial
|
|
87
|
+
* event, then never closes. Lets us simulate the cope-api hang where the
|
|
88
|
+
* server keeps the SSE stream open after `complete`.
|
|
89
|
+
*/
|
|
90
|
+
function makeHangingResponse(initialChunk: string): {
|
|
91
|
+
response: Response;
|
|
92
|
+
cancel: () => void;
|
|
93
|
+
} {
|
|
94
|
+
let cancelled = false;
|
|
95
|
+
let cancelHandle: (() => void) | null = null;
|
|
96
|
+
|
|
97
|
+
const stream = new ReadableStream<Uint8Array>({
|
|
98
|
+
start(controller) {
|
|
99
|
+
const encoder = new TextEncoder();
|
|
100
|
+
controller.enqueue(encoder.encode(initialChunk));
|
|
101
|
+
// Intentionally never call controller.close() — mimic an open SSE
|
|
102
|
+
// stream that the backend isn't terminating.
|
|
103
|
+
cancelHandle = () => {
|
|
104
|
+
if (cancelled) return;
|
|
105
|
+
cancelled = true;
|
|
106
|
+
try {
|
|
107
|
+
controller.close();
|
|
108
|
+
} catch {
|
|
109
|
+
// Already closed/errored — ignore.
|
|
110
|
+
}
|
|
111
|
+
};
|
|
112
|
+
},
|
|
113
|
+
cancel() {
|
|
114
|
+
cancelled = true;
|
|
115
|
+
},
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
const response = new Response(stream, {
|
|
119
|
+
headers: { 'content-type': 'text/event-stream' },
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
response,
|
|
124
|
+
cancel: () => cancelHandle?.(),
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
it('returns cleanly within 50ms when the signal aborts mid-stream', async () => {
|
|
129
|
+
const { response, cancel } = makeHangingResponse('event: start\ndata: {"run_id":"abc"}\n\n');
|
|
130
|
+
|
|
131
|
+
const controller = new AbortController();
|
|
132
|
+
|
|
133
|
+
// Schedule abort shortly after iteration begins.
|
|
134
|
+
setTimeout(() => controller.abort(), 5);
|
|
135
|
+
|
|
136
|
+
const start = performance.now();
|
|
137
|
+
const events: string[] = [];
|
|
138
|
+
|
|
139
|
+
try {
|
|
140
|
+
for await (const event of streamSSE(response, { signal: controller.signal })) {
|
|
141
|
+
events.push(event.event);
|
|
142
|
+
}
|
|
143
|
+
} finally {
|
|
144
|
+
cancel();
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const elapsed = performance.now() - start;
|
|
148
|
+
|
|
149
|
+
// We received the initial event before the abort fired.
|
|
150
|
+
expect(events).toContain('start');
|
|
151
|
+
// The for-await loop terminated cleanly (no throw) and quickly.
|
|
152
|
+
expect(elapsed).toBeLessThan(50);
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
it('returns immediately if the signal is already aborted', async () => {
|
|
156
|
+
const { response, cancel } = makeHangingResponse('event: start\ndata: {}\n\n');
|
|
157
|
+
|
|
158
|
+
const controller = new AbortController();
|
|
159
|
+
controller.abort();
|
|
160
|
+
|
|
161
|
+
const start = performance.now();
|
|
162
|
+
const events: string[] = [];
|
|
163
|
+
|
|
164
|
+
try {
|
|
165
|
+
for await (const event of streamSSE(response, { signal: controller.signal })) {
|
|
166
|
+
events.push(event.event);
|
|
167
|
+
}
|
|
168
|
+
} finally {
|
|
169
|
+
cancel();
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const elapsed = performance.now() - start;
|
|
173
|
+
|
|
174
|
+
expect(events).toEqual([]);
|
|
175
|
+
expect(elapsed).toBeLessThan(50);
|
|
176
|
+
});
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
describe('runCliTest terminal-event close', () => {
|
|
180
|
+
const realFetch = globalThis.fetch;
|
|
181
|
+
|
|
182
|
+
beforeEach(() => {
|
|
183
|
+
// No-op; per-test installs its own mock.
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
afterEach(() => {
|
|
187
|
+
globalThis.fetch = realFetch;
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Build a Response whose body emits the supplied chunks (one per microtask
|
|
192
|
+
* tick) and then **never closes**. Reproduces the cope-api SSE hang the
|
|
193
|
+
* fix is targeting. Returns a `forceClose` to defensively shut the
|
|
194
|
+
* controller after each test so a leaked stream can't keep the bun
|
|
195
|
+
* process alive.
|
|
196
|
+
*/
|
|
197
|
+
function makeStreamingResponse(chunks: string[]): {
|
|
198
|
+
response: Response;
|
|
199
|
+
forceClose: () => void;
|
|
200
|
+
} {
|
|
201
|
+
let streamController: ReadableStreamDefaultController<Uint8Array> | null = null;
|
|
202
|
+
let closed = false;
|
|
203
|
+
|
|
204
|
+
const stream = new ReadableStream<Uint8Array>({
|
|
205
|
+
start(controller) {
|
|
206
|
+
streamController = controller;
|
|
207
|
+
const encoder = new TextEncoder();
|
|
208
|
+
// Enqueue each chunk on its own microtask so the for-await loop sees
|
|
209
|
+
// them as separate reads. Order is preserved because each Promise
|
|
210
|
+
// resolves before the next is scheduled.
|
|
211
|
+
(async () => {
|
|
212
|
+
for (const chunk of chunks) {
|
|
213
|
+
await Promise.resolve();
|
|
214
|
+
if (closed) return;
|
|
215
|
+
try {
|
|
216
|
+
controller.enqueue(encoder.encode(chunk));
|
|
217
|
+
} catch {
|
|
218
|
+
return;
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
// Intentionally do not call controller.close() — mimic the backend
|
|
222
|
+
// keeping the SSE stream open after `complete`.
|
|
223
|
+
})();
|
|
224
|
+
},
|
|
225
|
+
cancel() {
|
|
226
|
+
closed = true;
|
|
227
|
+
},
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
const response = new Response(stream, {
|
|
231
|
+
status: 200,
|
|
232
|
+
headers: { 'content-type': 'text/event-stream' },
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
return {
|
|
236
|
+
response,
|
|
237
|
+
forceClose: () => {
|
|
238
|
+
if (closed) return;
|
|
239
|
+
closed = true;
|
|
240
|
+
try {
|
|
241
|
+
streamController?.close();
|
|
242
|
+
} catch {
|
|
243
|
+
// Already closed/errored — ignore.
|
|
244
|
+
}
|
|
245
|
+
},
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
it('resolves within 200ms after receiving a `complete` chunk on a never-closing stream', async () => {
|
|
250
|
+
const chunks = [
|
|
251
|
+
'event: start\ndata: {"run_id":"abc"}\n\n',
|
|
252
|
+
'event: step_complete\ndata: {"step_index":0}\n\n',
|
|
253
|
+
'event: complete\ndata: {"run_id":"abc","status":"passed","duration_seconds":1.2,"steps":[]}\n\n',
|
|
254
|
+
];
|
|
255
|
+
|
|
256
|
+
const { response, forceClose } = makeStreamingResponse(chunks);
|
|
257
|
+
|
|
258
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
259
|
+
|
|
260
|
+
const client = new ApiClient('http://localhost:0');
|
|
261
|
+
|
|
262
|
+
const start = performance.now();
|
|
263
|
+
try {
|
|
264
|
+
const result = await client.runCliTest({ test_id: 'abc' });
|
|
265
|
+
const elapsed = performance.now() - start;
|
|
266
|
+
|
|
267
|
+
expect(result.run_id).toBe('abc');
|
|
268
|
+
expect(result.status).toBe('passed');
|
|
269
|
+
// The whole flow (fetch + 3 chunked reads + abort + return) should be
|
|
270
|
+
// well under 200ms — the stream itself never closes, so anything close
|
|
271
|
+
// to the server-side hang would blow this budget.
|
|
272
|
+
expect(elapsed).toBeLessThan(200);
|
|
273
|
+
} finally {
|
|
274
|
+
forceClose();
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
|
|
278
|
+
it('resolves with status=error within 200ms after an `error` event on a never-closing stream', async () => {
|
|
279
|
+
const chunks = [
|
|
280
|
+
'event: start\ndata: {"run_id":"def"}\n\n',
|
|
281
|
+
'event: error\ndata: {"run_id":"def","status":"error","duration_seconds":0.5,"steps":[],"error":"boom"}\n\n',
|
|
282
|
+
];
|
|
283
|
+
|
|
284
|
+
const { response, forceClose } = makeStreamingResponse(chunks);
|
|
285
|
+
|
|
286
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
287
|
+
|
|
288
|
+
const client = new ApiClient('http://localhost:0');
|
|
289
|
+
|
|
290
|
+
const start = performance.now();
|
|
291
|
+
try {
|
|
292
|
+
const result = await client.runCliTest({ test_id: 'def' });
|
|
293
|
+
const elapsed = performance.now() - start;
|
|
294
|
+
|
|
295
|
+
expect(result.status).toBe('error');
|
|
296
|
+
expect(result.run_id).toBe('def');
|
|
297
|
+
expect(elapsed).toBeLessThan(200);
|
|
298
|
+
} finally {
|
|
299
|
+
forceClose();
|
|
300
|
+
}
|
|
301
|
+
});
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
describe('runCliTest idle-timeout watchdog', () => {
|
|
305
|
+
const realFetch = globalThis.fetch;
|
|
306
|
+
|
|
307
|
+
afterEach(() => {
|
|
308
|
+
globalThis.fetch = realFetch;
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
/**
|
|
312
|
+
* Build a Response whose body emits chunks at fixed intervals, then never
|
|
313
|
+
* closes. Used to simulate "events keep arriving" vs "stream went silent"
|
|
314
|
+
* scenarios for the idle-timeout watchdog.
|
|
315
|
+
*
|
|
316
|
+
* @param schedule - Array of `{ delayMs, chunk }`. Each chunk is enqueued
|
|
317
|
+
* `delayMs` after the PREVIOUS chunk (cumulative scheduling).
|
|
318
|
+
*/
|
|
319
|
+
function makeScheduledResponse(schedule: { delayMs: number; chunk: string }[]): {
|
|
320
|
+
response: Response;
|
|
321
|
+
forceClose: () => void;
|
|
322
|
+
} {
|
|
323
|
+
let streamController: ReadableStreamDefaultController<Uint8Array> | null = null;
|
|
324
|
+
let closed = false;
|
|
325
|
+
const timers: ReturnType<typeof setTimeout>[] = [];
|
|
326
|
+
|
|
327
|
+
const stream = new ReadableStream<Uint8Array>({
|
|
328
|
+
start(controller) {
|
|
329
|
+
streamController = controller;
|
|
330
|
+
const encoder = new TextEncoder();
|
|
331
|
+
let cumulativeDelay = 0;
|
|
332
|
+
for (const { delayMs, chunk } of schedule) {
|
|
333
|
+
cumulativeDelay += delayMs;
|
|
334
|
+
const t = setTimeout(() => {
|
|
335
|
+
if (closed) return;
|
|
336
|
+
try {
|
|
337
|
+
controller.enqueue(encoder.encode(chunk));
|
|
338
|
+
} catch {
|
|
339
|
+
// Already closed/errored — ignore.
|
|
340
|
+
}
|
|
341
|
+
}, cumulativeDelay);
|
|
342
|
+
timers.push(t);
|
|
343
|
+
}
|
|
344
|
+
// Intentionally never call controller.close().
|
|
345
|
+
},
|
|
346
|
+
cancel() {
|
|
347
|
+
closed = true;
|
|
348
|
+
for (const t of timers) clearTimeout(t);
|
|
349
|
+
},
|
|
350
|
+
});
|
|
351
|
+
|
|
352
|
+
const response = new Response(stream, {
|
|
353
|
+
status: 200,
|
|
354
|
+
headers: { 'content-type': 'text/event-stream' },
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
return {
|
|
358
|
+
response,
|
|
359
|
+
forceClose: () => {
|
|
360
|
+
if (closed) return;
|
|
361
|
+
closed = true;
|
|
362
|
+
for (const t of timers) clearTimeout(t);
|
|
363
|
+
try {
|
|
364
|
+
streamController?.close();
|
|
365
|
+
} catch {
|
|
366
|
+
// Already closed/errored — ignore.
|
|
367
|
+
}
|
|
368
|
+
},
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
it('rejects with /timed out/ when the stream goes silent past idleTimeoutSec', async () => {
|
|
373
|
+
// Emit `start` immediately, then nothing for 1s. With idleTimeoutSec=0.5,
|
|
374
|
+
// the watchdog must fire ~500ms after the `start` chunk arrives.
|
|
375
|
+
const { response, forceClose } = makeScheduledResponse([
|
|
376
|
+
{ delayMs: 0, chunk: 'event: start\ndata: {"run_id":"abc"}\n\n' },
|
|
377
|
+
]);
|
|
378
|
+
|
|
379
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
380
|
+
|
|
381
|
+
const client = new ApiClient('http://localhost:0');
|
|
382
|
+
|
|
383
|
+
const start = performance.now();
|
|
384
|
+
try {
|
|
385
|
+
await expect(
|
|
386
|
+
client.runCliTest({ test_id: 'abc' }, undefined, { idleTimeoutSec: 0.5 })
|
|
387
|
+
).rejects.toThrow(/timed out/);
|
|
388
|
+
} finally {
|
|
389
|
+
forceClose();
|
|
390
|
+
}
|
|
391
|
+
const elapsed = performance.now() - start;
|
|
392
|
+
|
|
393
|
+
// Should fire within ~500ms; allow generous headroom for CI flake.
|
|
394
|
+
expect(elapsed).toBeLessThan(1500);
|
|
395
|
+
// And it should have actually waited at least the idle window. Allow
|
|
396
|
+
// a little slack on timer scheduling (>= 400ms is a safe lower bound
|
|
397
|
+
// for a 500ms watchdog under load).
|
|
398
|
+
expect(elapsed).toBeGreaterThanOrEqual(400);
|
|
399
|
+
});
|
|
400
|
+
|
|
401
|
+
it('does NOT time out when events arrive faster than idleTimeoutSec', async () => {
|
|
402
|
+
// Emit a chunk every 100ms for ~1s, then a `complete`. With
|
|
403
|
+
// idleTimeoutSec=1 the watchdog should keep getting reset and never fire.
|
|
404
|
+
const schedule: { delayMs: number; chunk: string }[] = [
|
|
405
|
+
{ delayMs: 0, chunk: 'event: start\ndata: {"run_id":"abc"}\n\n' },
|
|
406
|
+
];
|
|
407
|
+
for (let i = 0; i < 8; i++) {
|
|
408
|
+
schedule.push({
|
|
409
|
+
delayMs: 100,
|
|
410
|
+
chunk: `event: step_log\ndata: {"step_index":${i}}\n\n`,
|
|
411
|
+
});
|
|
412
|
+
}
|
|
413
|
+
schedule.push({
|
|
414
|
+
delayMs: 100,
|
|
415
|
+
chunk:
|
|
416
|
+
'event: complete\ndata: {"run_id":"abc","status":"passed","duration_seconds":1,"steps":[]}\n\n',
|
|
417
|
+
});
|
|
418
|
+
|
|
419
|
+
const { response, forceClose } = makeScheduledResponse(schedule);
|
|
420
|
+
|
|
421
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
422
|
+
|
|
423
|
+
const client = new ApiClient('http://localhost:0');
|
|
424
|
+
|
|
425
|
+
try {
|
|
426
|
+
const result = await client.runCliTest({ test_id: 'abc' }, undefined, {
|
|
427
|
+
idleTimeoutSec: 1,
|
|
428
|
+
});
|
|
429
|
+
expect(result.status).toBe('passed');
|
|
430
|
+
expect(result.run_id).toBe('abc');
|
|
431
|
+
} finally {
|
|
432
|
+
forceClose();
|
|
433
|
+
}
|
|
434
|
+
});
|
|
435
|
+
|
|
436
|
+
it('SSE comment pings (no parsed events) reset the watchdog via onChunk', async () => {
|
|
437
|
+
// Emit `start`, then `: ping\n\n` comment lines every 200ms. These produce
|
|
438
|
+
// ZERO parsed events but DO arrive as byte chunks — so onChunk should
|
|
439
|
+
// reset the watchdog. With idleTimeoutSec=0.5 this would otherwise fire
|
|
440
|
+
// at 500ms; we'll forcibly close after ~1.2s and assert no timeout error.
|
|
441
|
+
const schedule: { delayMs: number; chunk: string }[] = [
|
|
442
|
+
{ delayMs: 0, chunk: 'event: start\ndata: {"run_id":"abc"}\n\n' },
|
|
443
|
+
];
|
|
444
|
+
for (let i = 0; i < 5; i++) {
|
|
445
|
+
schedule.push({ delayMs: 200, chunk: `: ping ${i}\n\n` });
|
|
446
|
+
}
|
|
447
|
+
// After ~1s of pings, send `complete` to terminate cleanly.
|
|
448
|
+
schedule.push({
|
|
449
|
+
delayMs: 100,
|
|
450
|
+
chunk:
|
|
451
|
+
'event: complete\ndata: {"run_id":"abc","status":"passed","duration_seconds":1,"steps":[]}\n\n',
|
|
452
|
+
});
|
|
453
|
+
|
|
454
|
+
const { response, forceClose } = makeScheduledResponse(schedule);
|
|
455
|
+
|
|
456
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
457
|
+
|
|
458
|
+
const client = new ApiClient('http://localhost:0');
|
|
459
|
+
|
|
460
|
+
try {
|
|
461
|
+
const result = await client.runCliTest({ test_id: 'abc' }, undefined, {
|
|
462
|
+
idleTimeoutSec: 0.5,
|
|
463
|
+
});
|
|
464
|
+
// We made it to `complete` without the watchdog firing — pings reset it.
|
|
465
|
+
expect(result.status).toBe('passed');
|
|
466
|
+
} finally {
|
|
467
|
+
forceClose();
|
|
468
|
+
}
|
|
469
|
+
});
|
|
470
|
+
|
|
471
|
+
it('propagates external abort via the caller-supplied signal', async () => {
|
|
472
|
+
// Emit `start`, then nothing — we'll abort the caller signal mid-stream
|
|
473
|
+
// and assert runCliTest rejects (re-throws AbortError) rather than
|
|
474
|
+
// swallowing or stalling. Pins Phase 3's "external" terminationReason
|
|
475
|
+
// distinction vs "complete" / "idle-timeout".
|
|
476
|
+
const { response, forceClose } = makeScheduledResponse([
|
|
477
|
+
{ delayMs: 0, chunk: 'event: start\ndata: {"run_id":"abc"}\n\n' },
|
|
478
|
+
]);
|
|
479
|
+
|
|
480
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
481
|
+
|
|
482
|
+
const client = new ApiClient('http://localhost:0');
|
|
483
|
+
|
|
484
|
+
const callerController = new AbortController();
|
|
485
|
+
// Schedule the external abort just after the stream starts.
|
|
486
|
+
setTimeout(() => callerController.abort(), 20);
|
|
487
|
+
|
|
488
|
+
const start = performance.now();
|
|
489
|
+
try {
|
|
490
|
+
// The promise must reject — NOT swallow the abort and NOT stall.
|
|
491
|
+
await expect(
|
|
492
|
+
client.runCliTest({ test_id: 'abc' }, undefined, {
|
|
493
|
+
signal: callerController.signal,
|
|
494
|
+
})
|
|
495
|
+
).rejects.toThrow();
|
|
496
|
+
} finally {
|
|
497
|
+
forceClose();
|
|
498
|
+
}
|
|
499
|
+
const elapsed = performance.now() - start;
|
|
500
|
+
|
|
501
|
+
// Should reject within ~200ms of the external abort firing.
|
|
502
|
+
expect(elapsed).toBeLessThan(500);
|
|
503
|
+
});
|
|
504
|
+
|
|
505
|
+
it('idleTimeoutSec=0 disables the watchdog entirely', async () => {
|
|
506
|
+
// Emit `start`, wait 600ms (longer than any reasonable timeout), then
|
|
507
|
+
// `complete`. With idleTimeoutSec=0 nothing should fire.
|
|
508
|
+
const { response, forceClose } = makeScheduledResponse([
|
|
509
|
+
{ delayMs: 0, chunk: 'event: start\ndata: {"run_id":"abc"}\n\n' },
|
|
510
|
+
{
|
|
511
|
+
delayMs: 600,
|
|
512
|
+
chunk:
|
|
513
|
+
'event: complete\ndata: {"run_id":"abc","status":"passed","duration_seconds":0.6,"steps":[]}\n\n',
|
|
514
|
+
},
|
|
515
|
+
]);
|
|
516
|
+
|
|
517
|
+
globalThis.fetch = (async () => response) as unknown as typeof fetch;
|
|
518
|
+
|
|
519
|
+
const client = new ApiClient('http://localhost:0');
|
|
520
|
+
|
|
521
|
+
try {
|
|
522
|
+
const result = await client.runCliTest({ test_id: 'abc' }, undefined, {
|
|
523
|
+
idleTimeoutSec: 0,
|
|
524
|
+
});
|
|
525
|
+
expect(result.status).toBe('passed');
|
|
526
|
+
} finally {
|
|
527
|
+
forceClose();
|
|
528
|
+
}
|
|
529
|
+
});
|
|
530
|
+
});
|
package/lib/api/sse.ts
CHANGED
|
@@ -73,26 +73,116 @@ export function parseSSE(chunk: string): SSEEvent[] {
|
|
|
73
73
|
return events;
|
|
74
74
|
}
|
|
75
75
|
|
|
76
|
+
/**
|
|
77
|
+
* Options for SSE consumption helpers
|
|
78
|
+
*/
|
|
79
|
+
export interface StreamSSEOptions {
|
|
80
|
+
/**
|
|
81
|
+
* Optional abort signal. When the signal is aborted, the read loop exits
|
|
82
|
+
* cleanly (the generator returns without throwing) so callers using
|
|
83
|
+
* `for await` can rely on graceful termination.
|
|
84
|
+
*/
|
|
85
|
+
signal?: AbortSignal;
|
|
86
|
+
/**
|
|
87
|
+
* Optional callback invoked once per successful `reader.read()` chunk —
|
|
88
|
+
* BEFORE parsing. This fires even for SSE comment pings (which produce
|
|
89
|
+
* zero parsed events) so callers can use it as a heartbeat / idle-timeout
|
|
90
|
+
* reset hook. Wired from `runCliTest` to reset the idle-timeout watchdog.
|
|
91
|
+
*/
|
|
92
|
+
onChunk?: () => void;
|
|
93
|
+
}
|
|
94
|
+
|
|
76
95
|
/**
|
|
77
96
|
* Stream SSE events from a Response object
|
|
78
97
|
*
|
|
79
98
|
* @param response - Fetch Response with SSE stream
|
|
99
|
+
* @param options - Optional configuration (e.g. abort signal)
|
|
80
100
|
* @yields SSE events as they arrive
|
|
81
101
|
*/
|
|
82
|
-
export async function* streamSSE(
|
|
102
|
+
export async function* streamSSE(
|
|
103
|
+
response: Response,
|
|
104
|
+
options: StreamSSEOptions = {}
|
|
105
|
+
): AsyncGenerator<SSEEvent, void, unknown> {
|
|
83
106
|
if (!response.body) {
|
|
84
107
|
throw new Error('Response body is null');
|
|
85
108
|
}
|
|
86
109
|
|
|
110
|
+
const { signal, onChunk } = options;
|
|
111
|
+
|
|
112
|
+
// Fast-path: if the signal is already aborted, return immediately without
|
|
113
|
+
// touching the response body.
|
|
114
|
+
if (signal?.aborted) {
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
|
|
87
118
|
const reader = response.body.getReader();
|
|
88
119
|
const decoder = new TextDecoder();
|
|
89
120
|
let buffer = '';
|
|
90
121
|
|
|
122
|
+
// Wire the signal to a Promise that resolves on abort so we can race it
|
|
123
|
+
// against `reader.read()`. This is needed because a generic ReadableStream
|
|
124
|
+
// (e.g. one not backed by `fetch`) won't surface the AbortSignal on its own
|
|
125
|
+
// — the reader will simply keep awaiting. For `fetch`-backed streams, the
|
|
126
|
+
// race is still safe: the read will reject with AbortError (caught below)
|
|
127
|
+
// at roughly the same time as the abort fires.
|
|
128
|
+
const ABORTED = Symbol('aborted');
|
|
129
|
+
let abortListener: (() => void) | null = null;
|
|
130
|
+
const abortPromise = signal
|
|
131
|
+
? new Promise<typeof ABORTED>((resolve) => {
|
|
132
|
+
abortListener = () => resolve(ABORTED);
|
|
133
|
+
signal.addEventListener('abort', abortListener, { once: true });
|
|
134
|
+
})
|
|
135
|
+
: null;
|
|
136
|
+
|
|
91
137
|
try {
|
|
92
138
|
while (true) {
|
|
93
|
-
|
|
139
|
+
// Check for abort before each read so we exit promptly even if the
|
|
140
|
+
// underlying socket hasn't surfaced the AbortError yet.
|
|
141
|
+
if (signal?.aborted) {
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
let chunk: { done: boolean; value: Uint8Array | undefined };
|
|
146
|
+
try {
|
|
147
|
+
const readPromise = reader.read();
|
|
148
|
+
const raceResult = abortPromise
|
|
149
|
+
? await Promise.race([readPromise, abortPromise])
|
|
150
|
+
: await readPromise;
|
|
151
|
+
|
|
152
|
+
if (raceResult === ABORTED) {
|
|
153
|
+
// Cancel the underlying reader so it doesn't keep the stream alive.
|
|
154
|
+
// `cancel()` may reject if already errored — swallow it.
|
|
155
|
+
reader.cancel().catch(() => {});
|
|
156
|
+
return;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
chunk = raceResult as { done: boolean; value: Uint8Array | undefined };
|
|
160
|
+
} catch (err) {
|
|
161
|
+
// `fetch`'s reader rejects with an AbortError (DOMException) when the
|
|
162
|
+
// associated signal is aborted. Swallow it and exit cleanly so callers
|
|
163
|
+
// see the generator finish without an exception.
|
|
164
|
+
if (err instanceof Error && err.name === 'AbortError') {
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
throw err;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
const { done, value } = chunk;
|
|
94
171
|
if (done) break;
|
|
95
172
|
|
|
173
|
+
// Fire onChunk on every successful read — BEFORE parsing — so callers
|
|
174
|
+
// can use it as an idle-timeout heartbeat. SSE comment pings produce
|
|
175
|
+
// zero parsed events but DO arrive as byte chunks here, which is the
|
|
176
|
+
// whole point: keep the watchdog alive even when only pings flow.
|
|
177
|
+
if (onChunk) {
|
|
178
|
+
try {
|
|
179
|
+
onChunk();
|
|
180
|
+
} catch {
|
|
181
|
+
// Defensively swallow — a misbehaving onChunk must not break the
|
|
182
|
+
// read loop. Hook is "best effort" by design.
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
96
186
|
buffer += decoder.decode(value, { stream: true });
|
|
97
187
|
|
|
98
188
|
// Find complete events (separated by double newlines)
|
|
@@ -120,7 +210,15 @@ export async function* streamSSE(response: Response): AsyncGenerator<SSEEvent, v
|
|
|
120
210
|
}
|
|
121
211
|
}
|
|
122
212
|
} finally {
|
|
123
|
-
|
|
213
|
+
if (signal && abortListener) {
|
|
214
|
+
signal.removeEventListener('abort', abortListener);
|
|
215
|
+
}
|
|
216
|
+
try {
|
|
217
|
+
reader.releaseLock();
|
|
218
|
+
} catch {
|
|
219
|
+
// releaseLock can throw if the reader was already canceled by an abort;
|
|
220
|
+
// safe to ignore — we're already on the cleanup path.
|
|
221
|
+
}
|
|
124
222
|
}
|
|
125
223
|
}
|
|
126
224
|
|
|
@@ -129,13 +227,15 @@ export async function* streamSSE(response: Response): AsyncGenerator<SSEEvent, v
|
|
|
129
227
|
*
|
|
130
228
|
* @param response - Fetch Response with SSE stream
|
|
131
229
|
* @param onEvent - Callback to handle each event
|
|
230
|
+
* @param options - Optional configuration (e.g. abort signal); forwarded to streamSSE
|
|
132
231
|
* @returns Promise that resolves when stream ends
|
|
133
232
|
*/
|
|
134
233
|
export async function consumeSSE(
|
|
135
234
|
response: Response,
|
|
136
|
-
onEvent: (event: SSEEvent) => void | Promise<void
|
|
235
|
+
onEvent: (event: SSEEvent) => void | Promise<void>,
|
|
236
|
+
options: StreamSSEOptions = {}
|
|
137
237
|
): Promise<void> {
|
|
138
|
-
for await (const event of streamSSE(response)) {
|
|
238
|
+
for await (const event of streamSSE(response, options)) {
|
|
139
239
|
await onEvent(event);
|
|
140
240
|
}
|
|
141
241
|
}
|