queasy 0.2.0 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/.github/workflows/check.yml +3 -0
  2. package/.github/workflows/publish.yml +3 -0
  3. package/CLAUDE.md +5 -4
  4. package/Readme.md +9 -4
  5. package/biome.json +5 -1
  6. package/dist/client.d.ts +33 -0
  7. package/dist/client.d.ts.map +1 -0
  8. package/dist/client.js +199 -0
  9. package/dist/client.js.map +1 -0
  10. package/dist/constants.d.ts +10 -0
  11. package/dist/constants.d.ts.map +1 -0
  12. package/{src → dist}/constants.js +2 -10
  13. package/dist/constants.js.map +1 -0
  14. package/dist/errors.d.ts +7 -0
  15. package/dist/errors.d.ts.map +1 -0
  16. package/{src → dist}/errors.js +1 -13
  17. package/dist/errors.js.map +1 -0
  18. package/dist/index.d.ts +3 -0
  19. package/dist/index.d.ts.map +1 -0
  20. package/dist/index.js +3 -0
  21. package/dist/index.js.map +1 -0
  22. package/dist/manager.d.ts +19 -0
  23. package/dist/manager.d.ts.map +1 -0
  24. package/dist/manager.js +67 -0
  25. package/dist/manager.js.map +1 -0
  26. package/dist/pool.d.ts +29 -0
  27. package/dist/pool.d.ts.map +1 -0
  28. package/{src → dist}/pool.js +23 -82
  29. package/dist/pool.js.map +1 -0
  30. package/dist/queasy.lua +390 -0
  31. package/dist/queue.d.ts +22 -0
  32. package/dist/queue.d.ts.map +1 -0
  33. package/dist/queue.js +81 -0
  34. package/dist/queue.js.map +1 -0
  35. package/dist/types.d.ts +92 -0
  36. package/dist/types.d.ts.map +1 -0
  37. package/dist/types.js +2 -0
  38. package/dist/types.js.map +1 -0
  39. package/dist/utils.d.ts +4 -0
  40. package/dist/utils.d.ts.map +1 -0
  41. package/dist/utils.js +24 -0
  42. package/dist/utils.js.map +1 -0
  43. package/dist/worker.d.ts +2 -0
  44. package/dist/worker.d.ts.map +1 -0
  45. package/dist/worker.js +42 -0
  46. package/dist/worker.js.map +1 -0
  47. package/docker-compose.yml +0 -2
  48. package/fuzztest/Readme.md +185 -0
  49. package/fuzztest/fuzz.ts +356 -0
  50. package/fuzztest/handlers/cascade-a.ts +90 -0
  51. package/fuzztest/handlers/cascade-b.ts +71 -0
  52. package/fuzztest/handlers/fail-handler.ts +47 -0
  53. package/fuzztest/handlers/periodic.ts +89 -0
  54. package/fuzztest/process.ts +100 -0
  55. package/fuzztest/shared/chaos.ts +29 -0
  56. package/fuzztest/shared/stream.ts +40 -0
  57. package/package.json +8 -7
  58. package/plans/redis-options.md +279 -0
  59. package/src/client.ts +246 -0
  60. package/src/constants.ts +33 -0
  61. package/src/errors.ts +13 -0
  62. package/src/index.ts +2 -0
  63. package/src/manager.ts +78 -0
  64. package/src/pool.ts +129 -0
  65. package/src/queasy.lua +2 -3
  66. package/src/queue.ts +108 -0
  67. package/src/types.ts +16 -0
  68. package/src/{utils.js → utils.ts} +3 -20
  69. package/src/{worker.js → worker.ts} +5 -12
  70. package/test/{client.test.js → client.test.ts} +6 -7
  71. package/test/{errors.test.js → errors.test.ts} +1 -1
  72. package/test/fixtures/always-fail-handler.ts +5 -0
  73. package/test/fixtures/data-logger-handler.ts +11 -0
  74. package/test/fixtures/failure-handler.ts +6 -0
  75. package/test/fixtures/permanent-error-handler.ts +6 -0
  76. package/test/fixtures/slow-handler.ts +6 -0
  77. package/test/fixtures/success-handler.js +0 -5
  78. package/test/fixtures/success-handler.ts +6 -0
  79. package/test/fixtures/with-failure-handler.ts +5 -0
  80. package/test/{guards.test.js → guards.test.ts} +21 -34
  81. package/test/{manager.test.js → manager.test.ts} +26 -34
  82. package/test/{pool.test.js → pool.test.ts} +14 -16
  83. package/test/{queue.test.js → queue.test.ts} +21 -21
  84. package/test/{redis-functions.test.js → redis-functions.test.ts} +14 -20
  85. package/test/{utils.test.js → utils.test.ts} +1 -1
  86. package/tsconfig.json +20 -0
  87. package/jsconfig.json +0 -17
  88. package/src/client.js +0 -258
  89. package/src/index.js +0 -2
  90. package/src/manager.js +0 -94
  91. package/src/queue.js +0 -154
  92. package/test/fixtures/always-fail-handler.js +0 -8
  93. package/test/fixtures/data-logger-handler.js +0 -19
  94. package/test/fixtures/failure-handler.js +0 -9
  95. package/test/fixtures/permanent-error-handler.js +0 -10
  96. package/test/fixtures/slow-handler.js +0 -9
  97. package/test/fixtures/with-failure-handler.js +0 -8
  98. /package/test/fixtures/{no-handle-handler.js → no-handle-handler.ts} +0 -0
@@ -0,0 +1,356 @@
1
+ /**
2
+ * Fuzz test orchestrator.
3
+ *
4
+ * - Spawns NUM_PROCESSES child processes, each running fuzztest/process.ts
5
+ * - Dispatches seed periodic jobs at startup
6
+ * - Reads events from the fuzz:events Redis stream
7
+ * - Checks system invariants after each event
8
+ * - Logs violations without terminating
9
+ * - Prints a summary every 60 seconds and on SIGINT
10
+ */
11
+
12
+ import { fork } from 'node:child_process';
13
+ import type { ChildProcess } from 'node:child_process';
14
+ import { createWriteStream } from 'node:fs';
15
+ import { dirname, join } from 'node:path';
16
+ import { fileURLToPath } from 'node:url';
17
+ import { createClient } from 'redis';
18
+ import type { RedisClientType } from 'redis';
19
+ import { Client } from '../src/index.ts';
20
+ import { STREAM_KEY, readEvents } from './shared/stream.ts';
21
+
22
+ const __dirname = dirname(fileURLToPath(import.meta.url));
23
+
24
+ // ── Configuration ──────────────────────────────────────────────────────────────
25
+
26
+ const NUM_PROCESSES = 4;
27
+ const NUM_PERIODIC_JOBS = 5;
28
+ const CRASH_INTERVAL_MS = 30_000; // kill a random process this often
29
+ const CLOCK_TOLERANCE_MS = 200; // allow this much early-start slop
30
+ const STALL_THRESHOLD_MS = 30_000; // no start event → progress violation
31
+ const PRIORITY_LAG_MS = 500; // ordering slop between queues
32
+ const PROCESS_RESTART_DELAY_MS = 500;
33
+ const SUMMARY_INTERVAL_MS = 60_000;
34
+ const LOG_FILE = join(__dirname, '..', 'fuzz-output.log');
35
+
36
+ // ── Logging ────────────────────────────────────────────────────────────────────
37
+
38
+ const logStream = createWriteStream(LOG_FILE, { flags: 'a' });
39
+
40
+ function log(level: string, msg: string, data: Record<string, unknown> = {}): void {
41
+ let entry = `${process.uptime().toFixed(2)} ${level.toUpperCase().padEnd(5)}`;
42
+ const { type = '', queue = '', id = '', ...rest } = data;
43
+ entry = `${entry} ${msg.padEnd(10)} ${(type as string).padEnd(10)} ${(queue as string).padEnd(15)} ${(id as string).padEnd(15)} ${JSON.stringify(rest)}`;
44
+ process.stdout.write(`${entry}\n`);
45
+ logStream.write(`${entry}\n`);
46
+ }
47
+
48
+ // ── Invariant state ────────────────────────────────────────────────────────────
49
+
50
+ interface ActiveJob {
51
+ queue: string;
52
+ startedAt: number;
53
+ runAt: number;
54
+ pid: number;
55
+ }
56
+
57
+ interface WaitingJob {
58
+ id: string;
59
+ runAt: number;
60
+ dispatchedAt: number;
61
+ }
62
+
63
+ const activeJobs = new Map<string, ActiveJob>();
64
+ const succeededJobs = new Set<string>();
65
+
66
+ /**
67
+ * Per-queue: list of {id, runAt, dispatchedAt} for jobs seen but not yet started.
68
+ * Used to check priority ordering.
69
+ */
70
+ const waitingByQueue = new Map<string, WaitingJob[]>();
71
+
72
+ /** last start event timestamp per queue */
73
+ const lastStartPerQueue = new Map<string, number>();
74
+
75
+ let violationCount = 0;
76
+ let eventCount = 0;
77
+
78
+ function violation(invariant: string, msg: string, data: Record<string, unknown> = {}): void {
79
+ violationCount++;
80
+ log('error', `[${invariant}] ${msg}`, data);
81
+ }
82
+
83
+ // ── Invariant checks ───────────────────────────────────────────────────────────
84
+
85
+ interface IpcDequeueMsg {
86
+ queue: string;
87
+ jobId: string;
88
+ runAt: number;
89
+ }
90
+
91
+ /**
92
+ * Called when a child process dequeues a job (via IPC).
93
+ */
94
+ function onIpcJobStart(pid: number, msg: IpcDequeueMsg): void {
95
+ const { jobId: id, queue, runAt } = msg;
96
+
97
+ // Mutual exclusion: job must not already be active
98
+ if (activeJobs.has(id)) {
99
+ const existing = activeJobs.get(id);
100
+ violation('MutualExclusion', `Job ${id} dequeued while already active`, {
101
+ existing,
102
+ newDequeue: { queue, pid },
103
+ });
104
+ }
105
+
106
+ activeJobs.set(id, { queue, pid, startedAt: Date.now(), runAt });
107
+ }
108
+
109
+ /**
110
+ * Called when a child process finishes/retries/fails a job (via IPC).
111
+ */
112
+ function onIpcJobDone(jobId: string): void {
113
+ activeJobs.delete(jobId);
114
+ }
115
+
116
+ function onStart(event: Record<string, string>): void {
117
+ const { id, queue, runAt: runAtStr, startedAt: startedAtStr } = event;
118
+ const runAt = Number(runAtStr);
119
+ const startedAt = Number(startedAtStr);
120
+
121
+ // No re-processing of succeeded jobs (except periodic which re-queues itself)
122
+ if (succeededJobs.has(id) && !id.startsWith('fuzz-periodic-')) {
123
+ violation('NoReprocess', `Job ${id} started after already succeeding`, {
124
+ queue,
125
+ startedAt,
126
+ });
127
+ }
128
+
129
+ // Scheduling: not before runAt
130
+ if (runAt > 0 && startedAt < runAt - CLOCK_TOLERANCE_MS) {
131
+ violation('Scheduling', `Job ${id} started ${runAt - startedAt}ms too early`, {
132
+ queue,
133
+ runAt,
134
+ startedAt,
135
+ delta: runAt - startedAt,
136
+ });
137
+ }
138
+
139
+ // Priority ordering: no eligible lower-runAt job in same queue waiting
140
+ const waiting = waitingByQueue.get(queue) ?? [];
141
+ for (const w of waiting) {
142
+ if (w.id === id) continue;
143
+ if (w.runAt <= startedAt - CLOCK_TOLERANCE_MS) {
144
+ if (runAt > w.runAt + PRIORITY_LAG_MS) {
145
+ violation(
146
+ 'Ordering',
147
+ `Job ${id} (runAt=${runAt}) started before ${w.id} (runAt=${w.runAt}) in ${queue}`,
148
+ {
149
+ startedId: id,
150
+ startedRunAt: runAt,
151
+ waitingId: w.id,
152
+ waitingRunAt: w.runAt,
153
+ }
154
+ );
155
+ break;
156
+ }
157
+ }
158
+ }
159
+
160
+ lastStartPerQueue.set(queue, startedAt);
161
+
162
+ // Remove from waiting list
163
+ if (waitingByQueue.has(queue)) {
164
+ waitingByQueue.set(
165
+ queue,
166
+ (waitingByQueue.get(queue) ?? []).filter((w) => w.id !== id)
167
+ );
168
+ }
169
+ }
170
+
171
+ function onFinish(event: Record<string, string>): void {
172
+ succeededJobs.add(event.id);
173
+ }
174
+
175
+ /**
176
+ * Called when a child process exits. Clears all active jobs belonging to that
177
+ * PID so they don't trigger spurious MutualExclusion violations when the
178
+ * queasy sweep retries them and a new process picks them up.
179
+ */
180
+ function onProcessExit(pid: number): void {
181
+ for (const [id, entry] of activeJobs) {
182
+ if (entry.pid === pid) {
183
+ activeJobs.delete(id);
184
+ }
185
+ }
186
+ }
187
+
188
+ /**
189
+ * Called periodically to check queue progress and priority starvation.
190
+ */
191
+ function checkProgress(): void {
192
+ const now = Date.now();
193
+ for (const [queue, lastStart] of lastStartPerQueue) {
194
+ const idle = now - lastStart;
195
+ if (idle > STALL_THRESHOLD_MS) {
196
+ violation('Progress', `Queue ${queue} has not processed a job in ${idle}ms`, {
197
+ queue,
198
+ lastStartAt: lastStart,
199
+ idleMs: idle,
200
+ });
201
+ }
202
+ }
203
+
204
+ // Priority starvation: cascade-b should not start while periodic has old eligible jobs
205
+ const periodicWaiting = waitingByQueue.get('{fuzz}:periodic') ?? [];
206
+ const eligiblePeriodic = periodicWaiting.filter((w) => w.runAt <= now - PRIORITY_LAG_MS);
207
+ if (eligiblePeriodic.length > 0) {
208
+ const lastBStart = lastStartPerQueue.get('{fuzz}:cascade-b') ?? 0;
209
+ const bStartedAfterPeriodic = eligiblePeriodic.some(
210
+ (w) => lastBStart > w.dispatchedAt + PRIORITY_LAG_MS
211
+ );
212
+ if (bStartedAfterPeriodic) {
213
+ violation(
214
+ 'PriorityStarvation',
215
+ 'cascade-b processed while periodic had eligible waiting jobs',
216
+ {
217
+ eligiblePeriodicCount: eligiblePeriodic.length,
218
+ }
219
+ );
220
+ }
221
+ }
222
+ }
223
+
224
+ // ── Event dispatch ─────────────────────────────────────────────────────────────
225
+
226
+ function handleEvent(event: Record<string, string>): void {
227
+ eventCount++;
228
+ const { type } = event;
229
+ log('info', 'event', event);
230
+
231
+ if (type === 'start') {
232
+ const { id, queue, runAt } = event;
233
+ // Register in waiting list for ordering checks (before onStart removes it)
234
+ const runAtNum = Number(runAt);
235
+ const q = waitingByQueue.get(queue) ?? [];
236
+ if (!q.find((w) => w.id === id)) {
237
+ q.push({ id, runAt: runAtNum, dispatchedAt: Date.now() });
238
+ waitingByQueue.set(queue, q);
239
+ }
240
+ onStart(event);
241
+ } else if (type === 'finish') {
242
+ onFinish(event);
243
+ }
244
+ }
245
+
246
+ // ── Summary ────────────────────────────────────────────────────────────────────
247
+
248
+ function printSummary(): void {
249
+ const summary = {
250
+ events: eventCount,
251
+ violations: violationCount,
252
+ activeJobs: activeJobs.size,
253
+ succeededJobs: succeededJobs.size,
254
+ lastStartPerQueue: Object.fromEntries(lastStartPerQueue),
255
+ };
256
+ log('info', 'Summary', summary);
257
+ }
258
+
259
+ // ── Child process management ───────────────────────────────────────────────────
260
+
261
+ const processes = new Set<ChildProcess>();
262
+
263
+ function spawnProcess(): ChildProcess {
264
+ const child = fork(join(__dirname, 'process.ts'));
265
+ processes.add(child);
266
+
267
+ child.on('message', (msg: { type: string; queue: string; jobId: string; runAt: number }) => {
268
+ if (msg.type === 'dequeue') {
269
+ onIpcJobStart(child.pid!, msg);
270
+ } else if (msg.type === 'finish' || msg.type === 'retry' || msg.type === 'fail') {
271
+ onIpcJobDone(msg.jobId);
272
+ }
273
+ });
274
+
275
+ child.on('exit', (code, signal) => {
276
+ processes.delete(child);
277
+ log('info', 'Child process exited', { pid: child.pid, code, signal });
278
+ onProcessExit(child.pid!);
279
+ setTimeout(spawnProcess, PROCESS_RESTART_DELAY_MS);
280
+ });
281
+
282
+ child.on('error', (err) => {
283
+ log('info', 'Child process error', { pid: child.pid, error: err.message });
284
+ });
285
+
286
+ return child;
287
+ }
288
+
289
+ function killRandomProcess(): void {
290
+ const list = [...processes];
291
+ if (list.length === 0) return;
292
+ const target = list[Math.floor(Math.random() * list.length)];
293
+ log('info', 'Killing random child process', { pid: target.pid });
294
+ target.kill('SIGKILL');
295
+ }
296
+
297
+ // ── Redis setup ────────────────────────────────────────────────────────────────
298
+
299
+ const redis = createClient() as RedisClientType;
300
+ await redis.connect();
301
+
302
+ // Clean up state from previous runs
303
+ await redis.del(STREAM_KEY);
304
+ log('info', 'Cleared fuzz:events stream from previous run');
305
+
306
+ // Dispatch seed periodic jobs (await ready to avoid Function not found race)
307
+ const dispatchClient = await new Promise<Client>((resolve) => new Client({}, 0, resolve));
308
+ const periodicQueue = dispatchClient.queue('{fuzz}:periodic', true);
309
+
310
+ for (let i = 0; i < NUM_PERIODIC_JOBS; i++) {
311
+ const id = `fuzz-periodic-${i}`;
312
+ await periodicQueue.dispatch({ periodic: true, index: i }, { id });
313
+ log('info', `Dispatched seed job ${id}`);
314
+ }
315
+
316
+ // ── Spawn child processes ──────────────────────────────────────────────────────
317
+
318
+ for (let i = 0; i < NUM_PROCESSES; i++) spawnProcess();
319
+
320
+ // Periodically kill a random process to simulate crashes
321
+ const crashTimer = setInterval(killRandomProcess, CRASH_INTERVAL_MS);
322
+
323
+ // Periodic progress + starvation check
324
+ const progressTimer = setInterval(checkProgress, 10_000);
325
+
326
+ // Summary timer
327
+ const summaryTimer = setInterval(printSummary, SUMMARY_INTERVAL_MS);
328
+
329
+ log('info', 'Orchestrator started', {
330
+ numProcesses: NUM_PROCESSES,
331
+ numPeriodicJobs: NUM_PERIODIC_JOBS,
332
+ logFile: LOG_FILE,
333
+ });
334
+
335
+ // ── SIGINT handler ─────────────────────────────────────────────────────────────
336
+
337
+ process.on('SIGINT', () => {
338
+ clearInterval(crashTimer);
339
+ clearInterval(progressTimer);
340
+ clearInterval(summaryTimer);
341
+
342
+ for (const child of processes) {
343
+ child.kill('SIGKILL');
344
+ }
345
+
346
+ printSummary();
347
+ process.exit(violationCount > 0 ? 1 : 0);
348
+ });
349
+
350
+ // ── Event loop ─────────────────────────────────────────────────────────────────
351
+
352
+ // Read from the beginning. We cleared the stream above, so '0' reads all
353
+ // events from the fresh start without missing anything.
354
+ for await (const event of readEvents(redis, '0')) {
355
+ handleEvent(event);
356
+ }
@@ -0,0 +1,90 @@
1
+ /**
2
+ * cascade-a handler — dispatches one or two cascade-b jobs on normal completion.
3
+ * Subject to all chaos behaviors.
4
+ */
5
+
6
+ import { BroadcastChannel } from 'node:worker_threads';
7
+ import { createClient } from 'redis';
8
+ import type { RedisClientType } from 'redis';
9
+ import { Client, PermanentError } from '../../src/index.ts';
10
+ import type { Job } from '../../src/types.ts';
11
+ import { pickChaos } from '../shared/chaos.ts';
12
+ import { emitEvent } from '../shared/stream.ts';
13
+
14
+ const eventRedis = createClient() as RedisClientType;
15
+ await eventRedis.connect();
16
+
17
+ // Dispatch-only queasy client (await ready to avoid Function not found race)
18
+ const client = await new Promise<Client>((resolve) => new Client({}, 0, resolve));
19
+ const cascadeBQueue = client.queue('{fuzz}:cascade-b', true);
20
+
21
+ const crashChannel = new BroadcastChannel('fuzz-crash');
22
+
23
+ // biome-ignore lint/suspicious/noExplicitAny: Job data is arbitrary
24
+ export async function handle(_data: any, job: Job): Promise<void> {
25
+ const startedAt = Date.now();
26
+ await emitEvent(eventRedis, {
27
+ type: 'start',
28
+ queue: '{fuzz}:cascade-a',
29
+ id: job.id,
30
+ pid: String(process.pid),
31
+ runAt: String(job.runAt),
32
+ startedAt: String(startedAt),
33
+ });
34
+
35
+ const chaos = pickChaos();
36
+ await emitEvent(eventRedis, {
37
+ type: 'chaos',
38
+ queue: '{fuzz}:cascade-a',
39
+ id: job.id,
40
+ chaos,
41
+ });
42
+
43
+ if (chaos === 'crash') {
44
+ crashChannel.postMessage({ type: 'crash' });
45
+ await new Promise(() => {});
46
+ }
47
+
48
+ if (chaos === 'stall') {
49
+ await new Promise(() => {});
50
+ }
51
+
52
+ if (chaos === 'spin') {
53
+ const end = Date.now() + 10_000;
54
+ while (Date.now() < end) {
55
+ /* busy wait */
56
+ }
57
+ }
58
+
59
+ if (chaos === 'permanent') {
60
+ throw new PermanentError('cascade-a: permanent chaos');
61
+ }
62
+
63
+ if (chaos === 'retriable') {
64
+ throw new Error('cascade-a: retriable chaos');
65
+ }
66
+
67
+ // Normal completion: dispatch 1-2 cascade-b jobs
68
+ const count = Math.random() < 0.5 ? 1 : 2;
69
+ const runAtOffset = Math.random() * 2000;
70
+ const dispatchPromises: Promise<string>[] = [];
71
+ for (let i = 0; i < count; i++) {
72
+ dispatchPromises.push(
73
+ cascadeBQueue.dispatch(
74
+ { from: job.id, index: i },
75
+ {
76
+ runAt: Date.now() + runAtOffset,
77
+ }
78
+ )
79
+ );
80
+ }
81
+ const ids = await Promise.all(dispatchPromises);
82
+
83
+ await emitEvent(eventRedis, {
84
+ type: 'finish',
85
+ queue: '{fuzz}:cascade-a',
86
+ id: job.id,
87
+ finishedAt: process.uptime().toFixed(2),
88
+ dispatched: ids.join(','),
89
+ });
90
+ }
@@ -0,0 +1,71 @@
1
+ /**
2
+ * cascade-b handler — terminal handler, dispatches no further jobs.
3
+ * Subject to all chaos behaviors.
4
+ */
5
+
6
+ import { BroadcastChannel } from 'node:worker_threads';
7
+ import { createClient } from 'redis';
8
+ import type { RedisClientType } from 'redis';
9
+ import { PermanentError } from '../../src/index.ts';
10
+ import type { Job } from '../../src/types.ts';
11
+ import { pickChaos } from '../shared/chaos.ts';
12
+ import { emitEvent } from '../shared/stream.ts';
13
+
14
+ const eventRedis = createClient() as RedisClientType;
15
+ await eventRedis.connect();
16
+
17
+ const crashChannel = new BroadcastChannel('fuzz-crash');
18
+
19
+ // biome-ignore lint/suspicious/noExplicitAny: Job data is arbitrary
20
+ export async function handle(_data: any, job: Job): Promise<void> {
21
+ const startedAt = Date.now();
22
+ await emitEvent(eventRedis, {
23
+ type: 'start',
24
+ queue: '{fuzz}:cascade-b',
25
+ id: job.id,
26
+ pid: String(process.pid),
27
+ runAt: String(job.runAt),
28
+ startedAt: String(startedAt),
29
+ });
30
+
31
+ const chaos = pickChaos();
32
+ await emitEvent(eventRedis, {
33
+ type: 'chaos',
34
+ queue: '{fuzz}:cascade-b',
35
+ id: job.id,
36
+ chaos,
37
+ });
38
+
39
+ if (chaos === 'crash') {
40
+ crashChannel.postMessage({ type: 'crash' });
41
+ // Stall so the process exits before we return
42
+ await new Promise(() => {});
43
+ }
44
+
45
+ if (chaos === 'stall') {
46
+ await new Promise(() => {});
47
+ }
48
+
49
+ if (chaos === 'spin') {
50
+ const end = Date.now() + 10_000;
51
+ while (Date.now() < end) {
52
+ /* busy wait */
53
+ }
54
+ }
55
+
56
+ if (chaos === 'permanent') {
57
+ throw new PermanentError('cascade-b: permanent chaos');
58
+ }
59
+
60
+ if (chaos === 'retriable') {
61
+ throw new Error('cascade-b: retriable chaos');
62
+ }
63
+
64
+ // Normal completion
65
+ await emitEvent(eventRedis, {
66
+ type: 'finish',
67
+ queue: '{fuzz}:cascade-b',
68
+ id: job.id,
69
+ finishedAt: String(Date.now()),
70
+ });
71
+ }
@@ -0,0 +1,47 @@
1
+ /**
2
+ * Fail handler — invoked for every job that exhausts retries/stalls on any queue.
3
+ *
4
+ * Received data: [originalJobId, originalData, errorObject]
5
+ *
6
+ * For periodic jobs (id starts with 'fuzz-periodic-'), re-dispatches the job
7
+ * so that periodic jobs keep running indefinitely even after permanent failure.
8
+ * For cascade jobs, simply records the failure.
9
+ */
10
+
11
+ import { createClient } from 'redis';
12
+ import type { RedisClientType } from 'redis';
13
+ import { Client } from '../../src/index.ts';
14
+ import type { Job } from '../../src/types.ts';
15
+ import { emitEvent } from '../shared/stream.ts';
16
+
17
+ const eventRedis = createClient() as RedisClientType;
18
+ await eventRedis.connect();
19
+
20
+ // Dispatch-only queasy client (await ready to avoid Function not found race)
21
+ const client = await new Promise<Client>((resolve) => new Client({}, 0, resolve));
22
+
23
+ // Queue references (keys already include braces)
24
+ const periodicQueue = client.queue('{fuzz}:periodic', true);
25
+
26
+ export async function handle(data: [string, unknown, { message: string }], job: Job): Promise<void> {
27
+ const [originalId, originalData, error] = data;
28
+
29
+ await emitEvent(eventRedis, {
30
+ type: 'fail',
31
+ queue: 'fail',
32
+ id: originalId,
33
+ failJobId: job.id,
34
+ error: error?.message ?? String(error),
35
+ });
36
+
37
+ // Re-dispatch periodic jobs so they continue indefinitely.
38
+ if (originalId.startsWith('fuzz-periodic-')) {
39
+ const delay = 1000 + Math.random() * 4000;
40
+ await periodicQueue.dispatch(originalData, {
41
+ id: originalId,
42
+ runAt: Date.now() + delay,
43
+ updateRunAt: true,
44
+ updateData: true,
45
+ });
46
+ }
47
+ }
@@ -0,0 +1,89 @@
1
+ /**
2
+ * periodic handler — re-queues itself with the same job ID so it fires indefinitely.
3
+ * Also dispatches cascade-a jobs on normal completion.
4
+ * Subject to all chaos behaviors.
5
+ */
6
+
7
+ import { BroadcastChannel } from 'node:worker_threads';
8
+ import { createClient } from 'redis';
9
+ import type { RedisClientType } from 'redis';
10
+ import { Client, PermanentError } from '../../src/index.ts';
11
+ import type { Job } from '../../src/types.ts';
12
+ import { pickChaos } from '../shared/chaos.ts';
13
+ import { emitEvent } from '../shared/stream.ts';
14
+
15
+ const eventRedis = createClient() as RedisClientType;
16
+ await eventRedis.connect();
17
+
18
+ // Dispatch-only queasy client (await ready to avoid Function not found race)
19
+ const client = await new Promise<Client>((resolve) => new Client({}, 0, resolve));
20
+ const periodicQueue = client.queue('{fuzz}:periodic', true);
21
+ const cascadeAQueue = client.queue('{fuzz}:cascade-a', true);
22
+
23
+ const crashChannel = new BroadcastChannel('fuzz-crash');
24
+
25
+ // biome-ignore lint/suspicious/noExplicitAny: Job data is arbitrary
26
+ export async function handle(data: any, job: Job): Promise<void> {
27
+ const startedAt = Date.now();
28
+ await emitEvent(eventRedis, {
29
+ type: 'start',
30
+ queue: '{fuzz}:periodic',
31
+ id: job.id,
32
+ pid: String(process.pid),
33
+ runAt: String(job.runAt),
34
+ startedAt: String(startedAt),
35
+ });
36
+
37
+ const chaos = pickChaos();
38
+ await emitEvent(eventRedis, {
39
+ type: 'chaos',
40
+ queue: '{fuzz}:periodic',
41
+ id: job.id,
42
+ chaos,
43
+ });
44
+
45
+ if (chaos === 'crash') {
46
+ crashChannel.postMessage({ type: 'crash' });
47
+ await new Promise(() => {});
48
+ }
49
+
50
+ if (chaos === 'stall') {
51
+ await new Promise(() => {});
52
+ }
53
+
54
+ if (chaos === 'spin') {
55
+ const end = Date.now() + 10_000;
56
+ while (Date.now() < end) {
57
+ /* busy wait */
58
+ }
59
+ }
60
+
61
+ if (chaos === 'permanent') {
62
+ throw new PermanentError('periodic: permanent chaos');
63
+ }
64
+
65
+ if (chaos === 'retriable') {
66
+ throw new Error('periodic: retriable chaos');
67
+ }
68
+
69
+ // Normal completion: dispatch a cascade-a job and re-queue self
70
+ const cascadeRunAt = 0;
71
+ const selfDelay = 500;
72
+
73
+ const [cascadeId] = await Promise.all([
74
+ cascadeAQueue.dispatch({ from: job.id }, { runAt: cascadeRunAt }),
75
+ periodicQueue.dispatch(data, {
76
+ id: job.id,
77
+ runAt: Date.now() + selfDelay,
78
+ updateRunAt: true,
79
+ }),
80
+ ]);
81
+
82
+ await emitEvent(eventRedis, {
83
+ type: 'finish',
84
+ queue: '{fuzz}:periodic',
85
+ id: job.id,
86
+ finishedAt: String(Date.now()),
87
+ dispatched: cascadeId,
88
+ });
89
+ }