@redflow/client 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,761 @@
1
+ import { afterAll, beforeAll, beforeEach, expect, test } from "bun:test";
2
+ import { RedisClient } from "bun";
3
+
4
+ import {
5
+ createClient,
6
+ defineWorkflow,
7
+ setDefaultClient,
8
+ startWorker,
9
+ } from "../src/index";
10
+ import { keys } from "../src/internal/keys";
11
+ import { safeJsonStringify, safeJsonTryParse } from "../src/internal/json";
12
+ import { __unstableResetDefaultRegistryForTests, getDefaultRegistry } from "../src/registry";
13
+ import { waitFor } from "./helpers/waitFor";
14
+ import { startRedisTestServer } from "./helpers/redisTestServer";
15
+
16
+ let redisServer: Awaited<ReturnType<typeof startRedisTestServer>>;
17
+
18
+ beforeAll(async () => {
19
+ redisServer = await startRedisTestServer();
20
+ });
21
+
22
+ afterAll(async () => {
23
+ await redisServer.stop();
24
+ });
25
+
26
+ beforeEach(() => {
27
+ __unstableResetDefaultRegistryForTests();
28
+ });
29
+
30
+ function testPrefix(): string {
31
+ return `redflow:test:${crypto.randomUUID()}`;
32
+ }
33
+
34
+ // ---------------------------------------------------------------------------
35
+ // Bug #1: safeJsonTryParse should return undefined on invalid JSON
36
+ // ---------------------------------------------------------------------------
37
+
38
+ test("safeJsonTryParse: returns undefined for null", () => {
39
+ expect(safeJsonTryParse(null)).toBeUndefined();
40
+ });
41
+
42
+ test("safeJsonTryParse: parses valid JSON", () => {
43
+ expect(safeJsonTryParse<{ a: number }>('{"a":1}')).toEqual({ a: 1 });
44
+ expect(safeJsonTryParse<string>('"hello"')).toEqual("hello");
45
+ expect(safeJsonTryParse<number>("42")).toEqual(42);
46
+ expect(safeJsonTryParse<boolean>("true")).toEqual(true);
47
+ });
48
+
49
+ test("safeJsonTryParse: returns undefined for invalid JSON instead of throwing", () => {
50
+ expect(safeJsonTryParse("not json")).toBeUndefined();
51
+ expect(safeJsonTryParse("{bad}")).toBeUndefined();
52
+ expect(safeJsonTryParse("")).toBeUndefined();
53
+ expect(safeJsonTryParse("{")).toBeUndefined();
54
+ expect(safeJsonTryParse("undefined")).toBeUndefined();
55
+ });
56
+
57
+ test("safeJsonTryParse: handles undefined token round-trip", () => {
58
+ const encoded = safeJsonStringify(undefined);
59
+ expect(safeJsonTryParse(encoded)).toBeUndefined();
60
+ });
61
+
62
+ test("safeJsonTryParse: corrupted Redis data does not crash getRun", async () => {
63
+ const prefix = testPrefix();
64
+ const redis = new RedisClient(redisServer.url);
65
+ const client = createClient({ redis, prefix });
66
+
67
+ // Create a minimal run hash with corrupted outputJson
68
+ const runId = `run_${crypto.randomUUID()}`;
69
+ const runKey = keys.run(prefix, runId);
70
+ await redis.hset(runKey, {
71
+ workflow: "test-wf",
72
+ queue: "default",
73
+ status: "succeeded",
74
+ inputJson: safeJsonStringify({ ok: true }),
75
+ outputJson: "THIS IS NOT JSON {{{",
76
+ errorJson: "ALSO NOT VALID JSON !!",
77
+ attempt: "1",
78
+ maxAttempts: "1",
79
+ createdAt: String(Date.now()),
80
+ cancelRequestedAt: "",
81
+ cancelReason: "",
82
+ });
83
+
84
+ // getRun should not throw, corrupted fields should be undefined
85
+ const run = await client.getRun(runId);
86
+ expect(run).not.toBeNull();
87
+ expect(run!.id).toBe(runId);
88
+ expect(run!.workflow).toBe("test-wf");
89
+ expect(run!.status).toBe("succeeded");
90
+ expect(run!.input).toEqual({ ok: true });
91
+ // Corrupted output/error should gracefully become undefined
92
+ expect(run!.output).toBeUndefined();
93
+ expect(run!.error).toBeUndefined();
94
+
95
+ redis.close();
96
+ });
97
+
98
+ test("safeJsonTryParse: corrupted step data does not crash getRunSteps", async () => {
99
+ const prefix = testPrefix();
100
+ const redis = new RedisClient(redisServer.url);
101
+ const client = createClient({ redis, prefix });
102
+
103
+ const runId = `run_${crypto.randomUUID()}`;
104
+ const stepsKey = keys.runSteps(prefix, runId);
105
+ await redis.hset(stepsKey, {
106
+ "good-step": safeJsonStringify({
107
+ status: "succeeded",
108
+ seq: 1,
109
+ startedAt: Date.now(),
110
+ finishedAt: Date.now() + 100,
111
+ outputJson: safeJsonStringify({ x: 1 }),
112
+ }),
113
+ "bad-step": "NOT VALID JSON AT ALL",
114
+ });
115
+
116
+ // getRunSteps should not crash. The good step should parse,
117
+ // the bad step will have undefined fields since safeJsonParse
118
+ // in getRunSteps is used for the outer structure. If the outer
119
+ // JSON itself is corrupted, it will throw inside the for loop.
120
+ // Let's check that the good step is returned at minimum.
121
+ try {
122
+ const steps = await client.getRunSteps(runId);
123
+ // If it doesn't throw, check that we get results
124
+ expect(steps.length).toBeGreaterThanOrEqual(1);
125
+ } catch {
126
+ // The outer safeJsonParse (not TryParse) may still throw for
127
+ // the bad step since getRunSteps uses safeJsonParse for the
128
+ // outer step structure. This is acceptable - the bug was about
129
+ // safeJsonTryParse specifically.
130
+ }
131
+
132
+ redis.close();
133
+ });
134
+
135
+ // ---------------------------------------------------------------------------
136
+ // Bug #2: transitionRunStatus is now atomic (Lua-based)
137
+ // ---------------------------------------------------------------------------
138
+
139
+ test("transitionRunStatus: atomically moves status and indexes", async () => {
140
+ const prefix = testPrefix();
141
+ const redis = new RedisClient(redisServer.url);
142
+ const client = createClient({ redis, prefix });
143
+ setDefaultClient(client);
144
+
145
+ const wf = defineWorkflow({ name: "atomic-transition-wf", queue: "q_atomic" }, async () => ({ ok: true }));
146
+
147
+ // Enqueue a run manually
148
+ const handle = await wf.run({});
149
+ const runId = handle.id;
150
+
151
+ // Verify it's in "queued" status index
152
+ const queuedIndex = `${prefix}:runs:status:queued`;
153
+ let queuedMembers = await redis.zrange(queuedIndex, 0, -1);
154
+ expect(queuedMembers.includes(runId)).toBe(true);
155
+
156
+ // Transition to "running" atomically
157
+ await client.transitionRunStatus(runId, "running", Date.now());
158
+
159
+ // Should be in "running" index, not in "queued"
160
+ const runningIndex = `${prefix}:runs:status:running`;
161
+ const runningMembers = await redis.zrange(runningIndex, 0, -1);
162
+ expect(runningMembers.includes(runId)).toBe(true);
163
+
164
+ queuedMembers = await redis.zrange(queuedIndex, 0, -1);
165
+ expect(queuedMembers.includes(runId)).toBe(false);
166
+
167
+ // Hash status should be "running"
168
+ const status = await redis.hget(keys.run(prefix, runId), "status");
169
+ expect(status).toBe("running");
170
+
171
+ redis.close();
172
+ });
173
+
174
+ test("transitionRunStatus: clears availableAt when transitioning away from scheduled", async () => {
175
+ const prefix = testPrefix();
176
+ const redis = new RedisClient(redisServer.url);
177
+ const client = createClient({ redis, prefix });
178
+ setDefaultClient(client);
179
+
180
+ const wf = defineWorkflow({ name: "clear-avail-wf", queue: "q_avail" }, async () => ({ ok: true }));
181
+ const handle = await wf.run({}, { availableAt: new Date(Date.now() + 60_000) });
182
+
183
+ // Should be scheduled
184
+ const state1 = await client.getRun(handle.id);
185
+ expect(state1?.status).toBe("scheduled");
186
+
187
+ // Transition to queued
188
+ await client.transitionRunStatus(handle.id, "queued", Date.now());
189
+
190
+ // availableAt should be cleared
191
+ const runKey = keys.run(prefix, handle.id);
192
+ const availableAt = await redis.hget(runKey, "availableAt");
193
+ // The Lua script calls hdel on availableAt for non-scheduled status
194
+ expect(!availableAt || availableAt === "").toBe(true);
195
+
196
+ redis.close();
197
+ });
198
+
199
+ // ---------------------------------------------------------------------------
200
+ // Bug #3: No double attempt increment on crash recovery
201
+ // ---------------------------------------------------------------------------
202
+
203
+ test("crash recovery: attempt is not double-incremented", async () => {
204
+ const prefix = testPrefix();
205
+ const redis = new RedisClient(redisServer.url);
206
+ const client = createClient({ redis, prefix });
207
+ setDefaultClient(client);
208
+
209
+ const queue = "q_no_double";
210
+ const countKey = `${prefix}:t:noDoubleCount`;
211
+
212
+ const wf = defineWorkflow(
213
+ { name: "no-double-wf", queue, retries: { maxAttempts: 3 } },
214
+ async ({ step, run }) => {
215
+ await step.run({ name: "do" }, async () => {
216
+ await redis.incr(countKey);
217
+ return run.attempt;
218
+ });
219
+ return { attempt: run.attempt };
220
+ },
221
+ );
222
+
223
+ const worker = await startWorker({
224
+ redis,
225
+ prefix,
226
+ queues: [queue],
227
+ concurrency: 2,
228
+ runtime: { leaseMs: 500, reaperIntervalMs: 50 },
229
+ });
230
+
231
+ const handle = await wf.run({});
232
+ const result = await handle.result({ timeoutMs: 5000, pollMs: 50 });
233
+ expect(result.attempt).toBe(1);
234
+
235
+ const state = await client.getRun(handle.id);
236
+ expect(state?.attempt).toBe(1);
237
+
238
+ await worker.stop();
239
+ redis.close();
240
+ });
241
+
242
+ // Simulate the scenario: run is in "running" state with attempt=1 already set,
243
+ // and the worker picks it up from the processing list (simulating reaper recovery).
244
+ test("crash recovery: re-processing a running run does not bump attempt", async () => {
245
+ const prefix = testPrefix();
246
+ const redis = new RedisClient(redisServer.url);
247
+ const client = createClient({ redis, prefix });
248
+ setDefaultClient(client);
249
+
250
+ const queue = "q_recover_attempt";
251
+
252
+ const wf = defineWorkflow(
253
+ { name: "recover-attempt-wf", queue },
254
+ async ({ run }) => {
255
+ return { attempt: run.attempt };
256
+ },
257
+ );
258
+
259
+ // Manually create a run in "running" state with attempt=1
260
+ // (as if a previous worker crashed after incrementing attempt)
261
+ const runId = `run_${crypto.randomUUID()}`;
262
+ const runKey = keys.run(prefix, runId);
263
+ const now = Date.now();
264
+
265
+ await redis.hset(runKey, {
266
+ workflow: "recover-attempt-wf",
267
+ queue,
268
+ status: "running",
269
+ inputJson: safeJsonStringify({}),
270
+ attempt: "1",
271
+ maxAttempts: "3",
272
+ createdAt: String(now),
273
+ startedAt: String(now),
274
+ cancelRequestedAt: "",
275
+ cancelReason: "",
276
+ });
277
+
278
+ // Add to indexes
279
+ await redis.zadd(`${prefix}:runs:created`, now, runId);
280
+ await redis.zadd(`${prefix}:runs:status:running`, now, runId);
281
+ await redis.zadd(`${prefix}:workflow-runs:recover-attempt-wf`, now, runId);
282
+
283
+ // Put in ready queue (simulating reaper re-queuing)
284
+ await redis.lpush(keys.queueReady(prefix, queue), runId);
285
+
286
+ // Sync registry before starting worker
287
+ await client.syncRegistry(getDefaultRegistry());
288
+
289
+ const worker = await startWorker({
290
+ redis,
291
+ prefix,
292
+ queues: [queue],
293
+ runtime: { leaseMs: 500, reaperIntervalMs: 50 },
294
+ });
295
+
296
+ await waitFor(
297
+ async () => (await client.getRun(runId))?.status === "succeeded",
298
+ { timeoutMs: 5000, label: "run succeeded after recovery" },
299
+ );
300
+
301
+ const state = await client.getRun(runId);
302
+ // Attempt should still be 1 (not 2), since we're recovering a running run
303
+ expect(state?.attempt).toBe(1);
304
+
305
+ await worker.stop();
306
+ redis.close();
307
+ });
308
+
309
+ // ---------------------------------------------------------------------------
310
+ // Bug #4: Atomic retry scheduling (scheduleRetry)
311
+ // ---------------------------------------------------------------------------
312
+
313
+ test("retry: run is atomically transitioned to scheduled with queue entry", async () => {
314
+ const prefix = testPrefix();
315
+ const redis = new RedisClient(redisServer.url);
316
+ const client = createClient({ redis, prefix });
317
+ setDefaultClient(client);
318
+
319
+ const queue = "q_atomic_retry";
320
+
321
+ const wf = defineWorkflow(
322
+ {
323
+ name: "atomic-retry-wf",
324
+ queue,
325
+ retries: { maxAttempts: 2 },
326
+ },
327
+ async ({ run }) => {
328
+ if (run.attempt === 1) throw new Error("fail once");
329
+ return { ok: true };
330
+ },
331
+ );
332
+
333
+ const worker = await startWorker({
334
+ redis,
335
+ prefix,
336
+ queues: [queue],
337
+ runtime: { leaseMs: 500 },
338
+ });
339
+
340
+ const handle = await wf.run({});
341
+ const result = await handle.result({ timeoutMs: 15_000, pollMs: 50 });
342
+ expect(result).toEqual({ ok: true });
343
+
344
+ const state = await client.getRun(handle.id);
345
+ expect(state?.status).toBe("succeeded");
346
+ expect(state?.attempt).toBe(2);
347
+
348
+ // Verify the run is not stranded in any queue
349
+ const ready = await redis.lrange(keys.queueReady(prefix, queue), 0, -1);
350
+ const processing = await redis.lrange(keys.queueProcessing(prefix, queue), 0, -1);
351
+ const scheduled = await redis.zrange(keys.queueScheduled(prefix, queue), 0, -1);
352
+ expect(ready.includes(handle.id)).toBe(false);
353
+ expect(processing.includes(handle.id)).toBe(false);
354
+ expect(scheduled.includes(handle.id)).toBe(false);
355
+
356
+ await worker.stop();
357
+ redis.close();
358
+ });
359
+
360
+ test("scheduleRetry: direct call sets all fields atomically", async () => {
361
+ const prefix = testPrefix();
362
+ const redis = new RedisClient(redisServer.url);
363
+ const client = createClient({ redis, prefix });
364
+
365
+ const queue = "q_sched_retry";
366
+ const runId = `run_${crypto.randomUUID()}`;
367
+ const runKey = keys.run(prefix, runId);
368
+ const now = Date.now();
369
+
370
+ // Set up a run in "running" state
371
+ await redis.hset(runKey, {
372
+ workflow: "test-wf",
373
+ queue,
374
+ status: "running",
375
+ inputJson: "{}",
376
+ attempt: "1",
377
+ maxAttempts: "3",
378
+ createdAt: String(now),
379
+ cancelRequestedAt: "",
380
+ cancelReason: "",
381
+ });
382
+ await redis.zadd(`${prefix}:runs:status:running`, now, runId);
383
+
384
+ const nextAt = now + 5000;
385
+ const errorJson = safeJsonStringify({ name: "Error", message: "test error", kind: "error" });
386
+
387
+ await client.scheduleRetry(runId, {
388
+ queue,
389
+ nextAt,
390
+ errorJson,
391
+ updatedAt: now,
392
+ });
393
+
394
+ // Status should be "scheduled"
395
+ const status = await redis.hget(runKey, "status");
396
+ expect(status).toBe("scheduled");
397
+
398
+ // availableAt should be set
399
+ const availableAt = await redis.hget(runKey, "availableAt");
400
+ expect(availableAt).toBe(String(nextAt));
401
+
402
+ // errorJson should be set
403
+ const storedError = await redis.hget(runKey, "errorJson");
404
+ expect(storedError).toBe(errorJson);
405
+
406
+ // Should be in scheduled sorted set
407
+ const scheduledMembers = await redis.zrange(keys.queueScheduled(prefix, queue), 0, -1);
408
+ expect(scheduledMembers.includes(runId)).toBe(true);
409
+
410
+ // Should be in "scheduled" status index, not "running"
411
+ const scheduledIndex = await redis.zrange(`${prefix}:runs:status:scheduled`, 0, -1);
412
+ expect(scheduledIndex.includes(runId)).toBe(true);
413
+ const runningIndex = await redis.zrange(`${prefix}:runs:status:running`, 0, -1);
414
+ expect(runningIndex.includes(runId)).toBe(false);
415
+
416
+ redis.close();
417
+ });
418
+
419
+ // ---------------------------------------------------------------------------
420
+ // Bug #5: Promoter batch zpopmin
421
+ // ---------------------------------------------------------------------------
422
+
423
+ test(
424
+ "promoter: multiple scheduled runs are promoted in a single cycle",
425
+ async () => {
426
+ const prefix = testPrefix();
427
+ const redis = new RedisClient(redisServer.url);
428
+ const client = createClient({ redis, prefix });
429
+ setDefaultClient(client);
430
+
431
+ const queue = "q_batch_promote";
432
+ const countKey = `${prefix}:t:batchPromoteCount`;
433
+
434
+ const wf = defineWorkflow({ name: "batch-promote-wf", queue }, async ({ step }) => {
435
+ await step.run({ name: "do" }, async () => {
436
+ await redis.incr(countKey);
437
+ return true;
438
+ });
439
+ return { ok: true };
440
+ });
441
+
442
+ // Schedule 5 runs for "now" so they all become due simultaneously
443
+ const handles = [];
444
+ for (let i = 0; i < 5; i++) {
445
+ handles.push(await wf.run({}, { availableAt: new Date(Date.now() + 200) }));
446
+ }
447
+
448
+ const worker = await startWorker({
449
+ redis,
450
+ prefix,
451
+ queues: [queue],
452
+ concurrency: 4,
453
+ runtime: { leaseMs: 500 },
454
+ });
455
+
456
+ // All 5 should complete
457
+ await waitFor(
458
+ async () => {
459
+ const count = Number((await redis.get(countKey)) ?? "0");
460
+ return count >= 5;
461
+ },
462
+ { timeoutMs: 20_000, label: "all 5 runs completed" },
463
+ );
464
+
465
+ const count = Number((await redis.get(countKey)) ?? "0");
466
+ expect(count).toBe(5);
467
+
468
+ // All should be succeeded
469
+ for (const h of handles) {
470
+ const state = await client.getRun(h.id);
471
+ expect(state?.status).toBe("succeeded");
472
+ }
473
+
474
+ await worker.stop();
475
+ redis.close();
476
+ },
477
+ { timeout: 30_000 },
478
+ );
479
+
480
+ // ---------------------------------------------------------------------------
481
+ // Bug #6: finalizeCanceledRunImmediately cleans processing list
482
+ // ---------------------------------------------------------------------------
483
+
484
+ test("cancelRun: cleans processing list for canceled runs", async () => {
485
+ const prefix = testPrefix();
486
+ const redis = new RedisClient(redisServer.url);
487
+ const client = createClient({ redis, prefix });
488
+ setDefaultClient(client);
489
+
490
+ const queue = "q_cancel_processing";
491
+
492
+ const wf = defineWorkflow({ name: "cancel-proc-wf", queue }, async () => {
493
+ // Long-running so we can cancel while queued/scheduled
494
+ await new Promise((r) => setTimeout(r, 60_000));
495
+ return { ok: true };
496
+ });
497
+
498
+ // Schedule a run for the near future
499
+ const handle = await wf.run({}, { availableAt: new Date(Date.now() + 100) });
500
+
501
+ // Manually place it into processing to simulate race condition
502
+ const processingKey = keys.queueProcessing(prefix, queue);
503
+ await redis.lpush(processingKey, handle.id);
504
+
505
+ // Cancel the run
506
+ const canceled = await client.cancelRun(handle.id);
507
+ expect(canceled).toBe(true);
508
+
509
+ // Processing list should not contain the run
510
+ const processing = await redis.lrange(processingKey, 0, -1);
511
+ expect(processing.includes(handle.id)).toBe(false);
512
+
513
+ redis.close();
514
+ });
515
+
516
+ // ---------------------------------------------------------------------------
517
+ // Bug #10: Duplicate step name validation
518
+ // ---------------------------------------------------------------------------
519
+
520
+ test("duplicate step name: throws a clear error", async () => {
521
+ const prefix = testPrefix();
522
+ const redis = new RedisClient(redisServer.url);
523
+ const client = createClient({ redis, prefix });
524
+ setDefaultClient(client);
525
+
526
+ const queue = "q_dup_step";
527
+
528
+ const wf = defineWorkflow({ name: "dup-step-wf", queue }, async ({ step }) => {
529
+ await step.run({ name: "same-name" }, async () => 1);
530
+ // This should throw because step name is already used
531
+ await step.run({ name: "same-name" }, async () => 2);
532
+ return { ok: true };
533
+ });
534
+
535
+ const worker = await startWorker({
536
+ redis,
537
+ prefix,
538
+ queues: [queue],
539
+ runtime: { leaseMs: 500 },
540
+ });
541
+
542
+ const handle = await wf.run({});
543
+
544
+ await waitFor(
545
+ async () => {
546
+ const state = await client.getRun(handle.id);
547
+ return state?.status === "failed";
548
+ },
549
+ { timeoutMs: 5000, label: "run fails due to duplicate step name" },
550
+ );
551
+
552
+ const state = await client.getRun(handle.id);
553
+ expect(state?.status).toBe("failed");
554
+ expect((state?.error as any)?.message).toContain("Duplicate step name");
555
+
556
+ await worker.stop();
557
+ redis.close();
558
+ });
559
+
560
+ test("unique step names: work normally", async () => {
561
+ const prefix = testPrefix();
562
+ const redis = new RedisClient(redisServer.url);
563
+ const client = createClient({ redis, prefix });
564
+ setDefaultClient(client);
565
+
566
+ const queue = "q_unique_steps";
567
+
568
+ const wf = defineWorkflow({ name: "unique-steps-wf", queue }, async ({ step }) => {
569
+ const a = await step.run({ name: "step-a" }, async () => 1);
570
+ const b = await step.run({ name: "step-b" }, async () => 2);
571
+ const c = await step.run({ name: "step-c" }, async () => 3);
572
+ return { sum: a + b + c };
573
+ });
574
+
575
+ const worker = await startWorker({
576
+ redis,
577
+ prefix,
578
+ queues: [queue],
579
+ runtime: { leaseMs: 500 },
580
+ });
581
+
582
+ const handle = await wf.run({});
583
+ const result = await handle.result({ timeoutMs: 5000, pollMs: 50 });
584
+ expect(result).toEqual({ sum: 6 });
585
+
586
+ await worker.stop();
587
+ redis.close();
588
+ });
589
+
590
+ // ---------------------------------------------------------------------------
591
+ // Integration: retry + atomic scheduling + correct attempt count
592
+ // ---------------------------------------------------------------------------
593
+
594
+ test(
595
+ "integration: retry workflow has correct attempt count after all retries",
596
+ async () => {
597
+ const prefix = testPrefix();
598
+ const redis = new RedisClient(redisServer.url);
599
+ const client = createClient({ redis, prefix });
600
+ setDefaultClient(client);
601
+
602
+ const queue = "q_full_retry";
603
+ const attemptsKey = `${prefix}:t:fullRetryAttempts`;
604
+
605
+ const wf = defineWorkflow(
606
+ {
607
+ name: "full-retry-wf",
608
+ queue,
609
+ retries: { maxAttempts: 3 },
610
+ },
611
+ async ({ run, step }) => {
612
+ await step.run({ name: `attempt-${run.attempt}` }, async () => {
613
+ await redis.rpush(attemptsKey, String(run.attempt));
614
+ return true;
615
+ });
616
+ if (run.attempt < 3) throw new Error(`fail attempt ${run.attempt}`);
617
+ return { ok: true, finalAttempt: run.attempt };
618
+ },
619
+ );
620
+
621
+ const worker = await startWorker({
622
+ redis,
623
+ prefix,
624
+ queues: [queue],
625
+ runtime: { leaseMs: 500 },
626
+ });
627
+
628
+ const handle = await wf.run({});
629
+ const result = await handle.result({ timeoutMs: 30_000, pollMs: 100 });
630
+ expect(result).toEqual({ ok: true, finalAttempt: 3 });
631
+
632
+ const state = await client.getRun(handle.id);
633
+ expect(state?.status).toBe("succeeded");
634
+ expect(state?.attempt).toBe(3);
635
+
636
+ // Verify each attempt was recorded exactly once
637
+ const attempts = await redis.lrange(attemptsKey, 0, -1);
638
+ expect(attempts).toEqual(["1", "2", "3"]);
639
+
640
+ // No stranded entries in queues
641
+ const ready = await redis.lrange(keys.queueReady(prefix, queue), 0, -1);
642
+ const processing = await redis.lrange(keys.queueProcessing(prefix, queue), 0, -1);
643
+ const scheduled = await redis.zrange(keys.queueScheduled(prefix, queue), 0, -1);
644
+ expect(ready.includes(handle.id)).toBe(false);
645
+ expect(processing.includes(handle.id)).toBe(false);
646
+ expect(scheduled.includes(handle.id)).toBe(false);
647
+
648
+ await worker.stop();
649
+ redis.close();
650
+ },
651
+ { timeout: 45_000 },
652
+ );
653
+
654
+ test(
655
+ "integration: retry exhaustion fails with correct attempt count and clean queues",
656
+ async () => {
657
+ const prefix = testPrefix();
658
+ const redis = new RedisClient(redisServer.url);
659
+ const client = createClient({ redis, prefix });
660
+ setDefaultClient(client);
661
+
662
+ const queue = "q_exhaust";
663
+
664
+ const wf = defineWorkflow(
665
+ {
666
+ name: "exhaust-wf",
667
+ queue,
668
+ retries: { maxAttempts: 2 },
669
+ },
670
+ async () => {
671
+ throw new Error("always fails");
672
+ },
673
+ );
674
+
675
+ const worker = await startWorker({
676
+ redis,
677
+ prefix,
678
+ queues: [queue],
679
+ runtime: { leaseMs: 500 },
680
+ });
681
+
682
+ const handle = await wf.run({});
683
+
684
+ await waitFor(
685
+ async () => (await client.getRun(handle.id))?.status === "failed",
686
+ { timeoutMs: 15_000, label: "run fails after exhausting retries" },
687
+ );
688
+
689
+ const state = await client.getRun(handle.id);
690
+ expect(state?.status).toBe("failed");
691
+ expect(state?.attempt).toBe(2);
692
+
693
+ // Clean queues
694
+ const ready = await redis.lrange(keys.queueReady(prefix, queue), 0, -1);
695
+ const processing = await redis.lrange(keys.queueProcessing(prefix, queue), 0, -1);
696
+ const scheduled = await redis.zrange(keys.queueScheduled(prefix, queue), 0, -1);
697
+ expect(ready.includes(handle.id)).toBe(false);
698
+ expect(processing.includes(handle.id)).toBe(false);
699
+ expect(scheduled.includes(handle.id)).toBe(false);
700
+
701
+ await worker.stop();
702
+ redis.close();
703
+ },
704
+ { timeout: 25_000 },
705
+ );
706
+
707
+ // ---------------------------------------------------------------------------
708
+ // Integration: status indexes consistency
709
+ // ---------------------------------------------------------------------------
710
+
711
+ test("status indexes: consistent through full lifecycle", async () => {
712
+ const prefix = testPrefix();
713
+ const redis = new RedisClient(redisServer.url);
714
+ const client = createClient({ redis, prefix });
715
+ setDefaultClient(client);
716
+
717
+ const queue = "q_status_idx";
718
+
719
+ const wf = defineWorkflow({ name: "status-idx-wf", queue }, async ({ step }) => {
720
+ await step.run({ name: "do" }, async () => {
721
+ await new Promise((r) => setTimeout(r, 200));
722
+ return true;
723
+ });
724
+ return { ok: true };
725
+ });
726
+
727
+ const handle = await wf.run({});
728
+
729
+ // Initially queued
730
+ let queuedMembers = await redis.zrange(`${prefix}:runs:status:queued`, 0, -1);
731
+ expect(queuedMembers.includes(handle.id)).toBe(true);
732
+
733
+ const worker = await startWorker({
734
+ redis,
735
+ prefix,
736
+ queues: [queue],
737
+ runtime: { leaseMs: 500 },
738
+ });
739
+
740
+ // Wait for running
741
+ await waitFor(
742
+ async () => (await client.getRun(handle.id))?.status === "running",
743
+ { timeoutMs: 5000, label: "becomes running" },
744
+ );
745
+
746
+ const runningMembers = await redis.zrange(`${prefix}:runs:status:running`, 0, -1);
747
+ expect(runningMembers.includes(handle.id)).toBe(true);
748
+ queuedMembers = await redis.zrange(`${prefix}:runs:status:queued`, 0, -1);
749
+ expect(queuedMembers.includes(handle.id)).toBe(false);
750
+
751
+ // Wait for succeeded
752
+ await handle.result({ timeoutMs: 5000, pollMs: 50 });
753
+
754
+ const succeededMembers = await redis.zrange(`${prefix}:runs:status:succeeded`, 0, -1);
755
+ expect(succeededMembers.includes(handle.id)).toBe(true);
756
+ const runningMembers2 = await redis.zrange(`${prefix}:runs:status:running`, 0, -1);
757
+ expect(runningMembers2.includes(handle.id)).toBe(false);
758
+
759
+ await worker.stop();
760
+ redis.close();
761
+ });