queasy 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +27 -0
- package/.luarc.json +13 -0
- package/.zed/settings.json +39 -0
- package/AGENTS.md +102 -0
- package/CLAUDE.md +83 -0
- package/License.md +7 -0
- package/Readme.md +130 -0
- package/biome.json +28 -0
- package/doc/Implementation.md +70 -0
- package/docker-compose.yml +19 -0
- package/jsconfig.json +17 -0
- package/package.json +37 -0
- package/src/client.js +218 -0
- package/src/constants.js +34 -0
- package/src/errors.js +25 -0
- package/src/index.js +2 -0
- package/src/manager.js +94 -0
- package/src/pool.js +164 -0
- package/src/queasy.lua +397 -0
- package/src/queue.js +161 -0
- package/src/types.ts +92 -0
- package/src/utils.js +13 -0
- package/src/worker.js +44 -0
- package/test/client.test.js +49 -0
- package/test/errors.test.js +19 -0
- package/test/fixtures/always-fail-handler.js +8 -0
- package/test/fixtures/data-logger-handler.js +14 -0
- package/test/fixtures/failure-handler.js +9 -0
- package/test/fixtures/no-handle-handler.js +1 -0
- package/test/fixtures/permanent-error-handler.js +10 -0
- package/test/fixtures/slow-handler.js +9 -0
- package/test/fixtures/success-handler.js +9 -0
- package/test/fixtures/with-failure-handler.js +8 -0
- package/test/index.test.js +55 -0
- package/test/manager.test.js +87 -0
- package/test/pool.test.js +66 -0
- package/test/queue.test.js +438 -0
- package/test/redis-functions.test.js +683 -0
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
import assert from 'node:assert';
|
|
2
|
+
import { afterEach, beforeEach, describe, it, mock } from 'node:test';
|
|
3
|
+
import { createClient } from 'redis';
|
|
4
|
+
import { Client } from '../src/index.js';
|
|
5
|
+
|
|
6
|
+
const QUEUE_NAME = 'test';
|
|
7
|
+
|
|
8
|
+
describe('Queue E2E', () => {
|
|
9
|
+
/** @type {import('redis').RedisClientType} */
|
|
10
|
+
let redis;
|
|
11
|
+
/** @type {import('../src/client.js').Client}*/
|
|
12
|
+
let client;
|
|
13
|
+
|
|
14
|
+
beforeEach(async () => {
|
|
15
|
+
redis = createClient();
|
|
16
|
+
await redis.connect();
|
|
17
|
+
const keys = await redis.keys(`${QUEUE_NAME}*`);
|
|
18
|
+
if (keys.length > 0) {
|
|
19
|
+
await redis.del(keys);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
client = new Client(redis, 1);
|
|
23
|
+
|
|
24
|
+
// Mock this so that no actual work is dequeued by the manager.
|
|
25
|
+
if (client.manager) client.manager.addQueue = mock.fn();
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
afterEach(async () => {
|
|
29
|
+
// Terminate worker threads to allow clean exit
|
|
30
|
+
client.close();
|
|
31
|
+
|
|
32
|
+
// Clean up all queue data
|
|
33
|
+
const keys = await redis.keys(`{${QUEUE_NAME}}*`);
|
|
34
|
+
if (keys.length > 0) {
|
|
35
|
+
await redis.del(keys);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Close Redis connection
|
|
39
|
+
await redis.quit();
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
describe('dispatch()', () => {
|
|
43
|
+
it('should dispatch a job and store it in waiting queue', async () => {
|
|
44
|
+
const q = client.queue(QUEUE_NAME);
|
|
45
|
+
const jobId = await q.dispatch({ task: 'test-job' });
|
|
46
|
+
|
|
47
|
+
assert.ok(jobId);
|
|
48
|
+
assert.equal(typeof jobId, 'string');
|
|
49
|
+
|
|
50
|
+
// Job should be in waiting queue
|
|
51
|
+
const score = await redis.zScore(`{${QUEUE_NAME}}`, jobId);
|
|
52
|
+
assert.ok(score !== null);
|
|
53
|
+
|
|
54
|
+
// Job data should exist
|
|
55
|
+
const jobData = await redis.hGetAll(`{${QUEUE_NAME}}:waiting_job:${jobId}`);
|
|
56
|
+
assert.equal(jobData.id, jobId);
|
|
57
|
+
assert.equal(jobData.data, JSON.stringify({ task: 'test-job' }));
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
it('should accept custom job ID', async () => {
|
|
61
|
+
const q = client.queue(QUEUE_NAME);
|
|
62
|
+
const customId = 'my-custom-id';
|
|
63
|
+
const jobId = await q.dispatch({ task: 'test' }, { id: customId });
|
|
64
|
+
|
|
65
|
+
assert.equal(jobId, customId);
|
|
66
|
+
|
|
67
|
+
const score = await redis.zScore(`{${QUEUE_NAME}}`, customId);
|
|
68
|
+
assert.ok(score !== null);
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
it('should respect runAt option', async () => {
|
|
72
|
+
const q = client.queue(QUEUE_NAME);
|
|
73
|
+
const futureTime = Date.now() + 10000;
|
|
74
|
+
const jobId = await q.dispatch({ task: 'future' }, { runAt: futureTime });
|
|
75
|
+
|
|
76
|
+
const score = await redis.zScore(`{${QUEUE_NAME}}`, jobId);
|
|
77
|
+
assert.equal(score, futureTime);
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
it('should update existing job when updateData is true', async () => {
|
|
81
|
+
const q = client.queue(QUEUE_NAME);
|
|
82
|
+
const jobId = 'update-test';
|
|
83
|
+
|
|
84
|
+
await q.dispatch({ value: 1 }, { id: jobId });
|
|
85
|
+
await q.dispatch({ value: 2 }, { id: jobId, updateData: true });
|
|
86
|
+
|
|
87
|
+
const jobData = await redis.hGetAll(`{${QUEUE_NAME}}:waiting_job:${jobId}`);
|
|
88
|
+
assert.equal(jobData.data, JSON.stringify({ value: 2 }));
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
it('should not update existing job when updateData is false', async () => {
|
|
92
|
+
const q = client.queue(QUEUE_NAME);
|
|
93
|
+
const jobId = 'no-update-test';
|
|
94
|
+
|
|
95
|
+
await q.dispatch({ value: 1 }, { id: jobId });
|
|
96
|
+
await q.dispatch({ value: 2 }, { id: jobId, updateData: false });
|
|
97
|
+
|
|
98
|
+
const jobData = await redis.hGetAll(`{${QUEUE_NAME}}:waiting_job:${jobId}`);
|
|
99
|
+
assert.equal(jobData.data, JSON.stringify({ value: 1 }));
|
|
100
|
+
});
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
describe('cancel()', () => {
|
|
104
|
+
it('should cancel a waiting job', async () => {
|
|
105
|
+
const q = client.queue(QUEUE_NAME);
|
|
106
|
+
const jobId = await q.dispatch({ task: 'test' });
|
|
107
|
+
|
|
108
|
+
const cancelled = await q.cancel(jobId);
|
|
109
|
+
assert.equal(cancelled, true);
|
|
110
|
+
|
|
111
|
+
// Job should be removed
|
|
112
|
+
const score = await redis.zScore(`{${QUEUE_NAME}}`, jobId);
|
|
113
|
+
assert.equal(score, null);
|
|
114
|
+
|
|
115
|
+
const exists = await redis.exists(`{${QUEUE_NAME}}:waiting_job:${jobId}`);
|
|
116
|
+
assert.equal(exists, 0);
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
it('should return false for nonexistent job', async () => {
|
|
120
|
+
const q = client.queue(QUEUE_NAME);
|
|
121
|
+
const cancelled = await q.cancel('nonexistent-job-id');
|
|
122
|
+
assert.equal(cancelled, false);
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
describe('listen() and job processing', () => {
|
|
127
|
+
it('should process a job successfully', async () => {
|
|
128
|
+
const q = client.queue(QUEUE_NAME);
|
|
129
|
+
const jobId = await q.dispatch({ greeting: 'hello' });
|
|
130
|
+
|
|
131
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
132
|
+
await q.listen(handlerPath);
|
|
133
|
+
await (await q.dequeue(1)).promise;
|
|
134
|
+
|
|
135
|
+
// Job should be removed from all queues
|
|
136
|
+
const waitingScore = await redis.zScore(`{${QUEUE_NAME}}`, jobId);
|
|
137
|
+
assert.equal(waitingScore, null);
|
|
138
|
+
|
|
139
|
+
const activeJobExists = await redis.exists(`{${QUEUE_NAME}}:active_job:${jobId}`);
|
|
140
|
+
assert.equal(activeJobExists, 0);
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
it('should process multiple jobs', async () => {
|
|
144
|
+
const q = client.queue(QUEUE_NAME);
|
|
145
|
+
await Promise.all([
|
|
146
|
+
q.dispatch({ id: 1 }),
|
|
147
|
+
q.dispatch({ id: 2 }),
|
|
148
|
+
q.dispatch({ id: 3 }),
|
|
149
|
+
]);
|
|
150
|
+
|
|
151
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
152
|
+
await q.listen(handlerPath);
|
|
153
|
+
await (await q.dequeue(5)).promise;
|
|
154
|
+
|
|
155
|
+
// All jobs should be cleaned up
|
|
156
|
+
const waitingJobs = await redis.zRange(`{${QUEUE_NAME}}`, 0, -1);
|
|
157
|
+
assert.equal(waitingJobs.length, 0);
|
|
158
|
+
|
|
159
|
+
const activeJobKeys = await redis.keys(`{${QUEUE_NAME}}:active_job:*`);
|
|
160
|
+
assert.equal(activeJobKeys.length, 0);
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
it('should not process jobs scheduled for the future', async () => {
|
|
164
|
+
const q = client.queue(QUEUE_NAME);
|
|
165
|
+
const futureTime = Date.now() + 10000;
|
|
166
|
+
const jobId = await q.dispatch({ task: 'future' }, { runAt: futureTime });
|
|
167
|
+
|
|
168
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
169
|
+
await q.listen(handlerPath);
|
|
170
|
+
await (await q.dequeue(1)).promise;
|
|
171
|
+
|
|
172
|
+
// Job should still be waiting
|
|
173
|
+
const score = await redis.zScore(`{${QUEUE_NAME}}`, jobId);
|
|
174
|
+
assert.ok(score !== null);
|
|
175
|
+
assert.equal(score, futureTime);
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
it('should handle cancelling a job before it processes', async () => {
|
|
179
|
+
const q = client.queue(QUEUE_NAME);
|
|
180
|
+
const jobId1 = await q.dispatch({ id: 1 });
|
|
181
|
+
const jobId2 = await q.dispatch({ id: 2 });
|
|
182
|
+
|
|
183
|
+
// Cancel job1 before listening
|
|
184
|
+
const cancelled = await q.cancel(jobId1);
|
|
185
|
+
assert.ok(cancelled);
|
|
186
|
+
|
|
187
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
188
|
+
await q.listen(handlerPath);
|
|
189
|
+
await (await q.dequeue(1)).promise;
|
|
190
|
+
|
|
191
|
+
// job1 should not exist
|
|
192
|
+
const job1Score = await redis.zScore(`{${QUEUE_NAME}}`, jobId1);
|
|
193
|
+
assert.equal(job1Score, null);
|
|
194
|
+
|
|
195
|
+
const job1Exists = await redis.exists(`{${QUEUE_NAME}}:waiting_job:${jobId1}`);
|
|
196
|
+
assert.equal(job1Exists, 0);
|
|
197
|
+
|
|
198
|
+
// job2 should be fully processed
|
|
199
|
+
const job2Score = await redis.zScore(`{${QUEUE_NAME}}`, jobId2);
|
|
200
|
+
assert.equal(job2Score, null);
|
|
201
|
+
|
|
202
|
+
const job2Active = await redis.exists(`{${QUEUE_NAME}}:active_job:${jobId2}`);
|
|
203
|
+
assert.equal(job2Active, 0);
|
|
204
|
+
});
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
describe('multiple queues', () => {
|
|
208
|
+
it('should handle multiple independent queues', async () => {
|
|
209
|
+
const queue1 = client.queue('queue1');
|
|
210
|
+
const queue2 = client.queue('queue2');
|
|
211
|
+
|
|
212
|
+
const jobId1 = await queue1.dispatch({ queue: 1 });
|
|
213
|
+
const jobId2 = await queue2.dispatch({ queue: 2 });
|
|
214
|
+
|
|
215
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
216
|
+
await queue1.listen(handlerPath);
|
|
217
|
+
await queue2.listen(handlerPath);
|
|
218
|
+
// First wait for these jobs to be dequeued and sent to workers
|
|
219
|
+
const dequeued = await Promise.all([queue1.dequeue(1), queue2.dequeue(1)]);
|
|
220
|
+
|
|
221
|
+
// Now wait for the workers to finish processing
|
|
222
|
+
await Promise.all(dequeued.map(({ promise }) => promise));
|
|
223
|
+
|
|
224
|
+
// Both jobs should be cleaned up
|
|
225
|
+
const score1 = await redis.zScore('{queue1}', jobId1);
|
|
226
|
+
assert.equal(score1, null, 'Queue 1 job should be processed');
|
|
227
|
+
|
|
228
|
+
const score2 = await redis.zScore('{queue2}', jobId2);
|
|
229
|
+
assert.equal(score2, null, 'Queue 2 job should be processed');
|
|
230
|
+
|
|
231
|
+
// Cleanup
|
|
232
|
+
const keys2 = await redis.keys('{queue2}*');
|
|
233
|
+
if (keys2.length > 0) await redis.del(keys2);
|
|
234
|
+
|
|
235
|
+
const keys1 = await redis.keys('{queue1}*');
|
|
236
|
+
if (keys1.length > 0) await redis.del(keys1);
|
|
237
|
+
});
|
|
238
|
+
});
|
|
239
|
+
|
|
240
|
+
describe('failHandler option', () => {
|
|
241
|
+
it('should set up fail queue via failHandler listen option', async () => {
|
|
242
|
+
const staleKeys = await redis.keys('{fail-opt}*');
|
|
243
|
+
if (staleKeys.length > 0) await redis.del(staleKeys);
|
|
244
|
+
|
|
245
|
+
const q = client.queue('fail-opt');
|
|
246
|
+
await q.dispatch({ task: 'will-fail' });
|
|
247
|
+
|
|
248
|
+
const mainHandler = new URL('./fixtures/with-failure-handler.js', import.meta.url)
|
|
249
|
+
.pathname;
|
|
250
|
+
const failHandler = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
251
|
+
|
|
252
|
+
// listen with failHandler option — this covers queue.js lines 60-63
|
|
253
|
+
await q.listen(mainHandler, { maxRetries: 0, failHandler });
|
|
254
|
+
|
|
255
|
+
// Dequeue from the main queue
|
|
256
|
+
await (await q.dequeue(1)).promise;
|
|
257
|
+
|
|
258
|
+
// Fail job should exist in the fail queue
|
|
259
|
+
const failJobIds = await redis.zRange('{fail-opt}-fail', 0, -1);
|
|
260
|
+
assert.ok(failJobIds.length > 0, 'Fail job should be created in fail queue');
|
|
261
|
+
|
|
262
|
+
// Cleanup
|
|
263
|
+
const keys = await redis.keys('{fail-opt}*');
|
|
264
|
+
if (keys.length > 0) await redis.del(keys);
|
|
265
|
+
});
|
|
266
|
+
});
|
|
267
|
+
|
|
268
|
+
describe('retry and backoff', () => {
|
|
269
|
+
it('should retry a failed job with exponential backoff', async () => {
|
|
270
|
+
const q = client.queue(QUEUE_NAME);
|
|
271
|
+
const jobId = await q.dispatch({ task: 'will-fail' });
|
|
272
|
+
|
|
273
|
+
const handlerPath = new URL('./fixtures/always-fail-handler.js', import.meta.url)
|
|
274
|
+
.pathname;
|
|
275
|
+
await q.listen(handlerPath, { maxRetries: 2, minBackoff: 1000, maxBackoff: 10000 });
|
|
276
|
+
|
|
277
|
+
const before = Date.now();
|
|
278
|
+
await (await q.dequeue(1)).promise;
|
|
279
|
+
|
|
280
|
+
// Job should be back in the waiting set with a backoff score
|
|
281
|
+
const score = await redis.zScore(`{${QUEUE_NAME}}`, jobId);
|
|
282
|
+
assert.ok(score !== null, 'Job should be back in waiting set');
|
|
283
|
+
assert.ok(score >= before + 1000, 'Score should include backoff');
|
|
284
|
+
});
|
|
285
|
+
});
|
|
286
|
+
|
|
287
|
+
describe('maxStalls handling', () => {
|
|
288
|
+
it('should fail a stalled job when maxStalls exceeded and failKey is set', async () => {
|
|
289
|
+
const staleKeys = await redis.keys('{stall-test}*');
|
|
290
|
+
if (staleKeys.length > 0) await redis.del(staleKeys);
|
|
291
|
+
|
|
292
|
+
const q = client.queue('stall-test');
|
|
293
|
+
const jobId = await q.dispatch({ task: 'stalled' });
|
|
294
|
+
|
|
295
|
+
// Manually set stall_count to exceed maxStalls (default 3)
|
|
296
|
+
await redis.hSet(`{stall-test}:waiting_job:${jobId}`, 'stall_count', '5');
|
|
297
|
+
|
|
298
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
299
|
+
await q.listen(handlerPath, { maxStalls: 3 });
|
|
300
|
+
q.failKey = '{stall-test}-fail';
|
|
301
|
+
|
|
302
|
+
await (await q.dequeue(1)).promise;
|
|
303
|
+
|
|
304
|
+
// Job should be removed from waiting set
|
|
305
|
+
const score = await redis.zScore('{stall-test}', jobId);
|
|
306
|
+
assert.equal(score, null, 'Job should be removed from waiting');
|
|
307
|
+
|
|
308
|
+
// Fail job should exist in fail queue
|
|
309
|
+
const failJobIds = await redis.zRange('{stall-test}-fail', 0, -1);
|
|
310
|
+
assert.ok(failJobIds.length > 0, 'Fail job should be created');
|
|
311
|
+
|
|
312
|
+
// Verify fail job data contains stall message
|
|
313
|
+
const failJobId = failJobIds[0];
|
|
314
|
+
const failJobData = await redis.hGet(
|
|
315
|
+
`{stall-test}-fail:waiting_job:${failJobId}`,
|
|
316
|
+
'data'
|
|
317
|
+
);
|
|
318
|
+
const parsed = JSON.parse(failJobData || 'null');
|
|
319
|
+
assert.deepEqual(parsed[2], { message: 'Max stalls exceeded' });
|
|
320
|
+
|
|
321
|
+
// Cleanup
|
|
322
|
+
const keys = await redis.keys('{stall-test}*');
|
|
323
|
+
if (keys.length > 0) await redis.del(keys);
|
|
324
|
+
});
|
|
325
|
+
|
|
326
|
+
it('should finish a stalled job when maxStalls exceeded and no failKey', async () => {
|
|
327
|
+
const staleKeys = await redis.keys('{stall-nofail}*');
|
|
328
|
+
if (staleKeys.length > 0) await redis.del(staleKeys);
|
|
329
|
+
|
|
330
|
+
const q = client.queue('stall-nofail');
|
|
331
|
+
const jobId = await q.dispatch({ task: 'stalled' });
|
|
332
|
+
|
|
333
|
+
await redis.hSet(`{stall-nofail}:waiting_job:${jobId}`, 'stall_count', '5');
|
|
334
|
+
|
|
335
|
+
const handlerPath = new URL('./fixtures/success-handler.js', import.meta.url).pathname;
|
|
336
|
+
await q.listen(handlerPath, { maxStalls: 3 });
|
|
337
|
+
// No failKey set — job should just be finished
|
|
338
|
+
|
|
339
|
+
await (await q.dequeue(1)).promise;
|
|
340
|
+
|
|
341
|
+
// Job should be fully removed
|
|
342
|
+
const score = await redis.zScore('{stall-nofail}', jobId);
|
|
343
|
+
assert.equal(score, null, 'Job should be removed from waiting');
|
|
344
|
+
|
|
345
|
+
const activeExists = await redis.exists(`{stall-nofail}:active_job:${jobId}`);
|
|
346
|
+
assert.equal(activeExists, 0, 'Active job should be cleaned up');
|
|
347
|
+
|
|
348
|
+
// Cleanup
|
|
349
|
+
const keys = await redis.keys('{stall-nofail}*');
|
|
350
|
+
if (keys.length > 0) await redis.del(keys);
|
|
351
|
+
});
|
|
352
|
+
});
|
|
353
|
+
|
|
354
|
+
describe('invalid handler', () => {
|
|
355
|
+
it('should fail when handler has no handle export', async () => {
|
|
356
|
+
const staleKeys = await redis.keys('{bad-handler}*');
|
|
357
|
+
if (staleKeys.length > 0) await redis.del(staleKeys);
|
|
358
|
+
|
|
359
|
+
const q = client.queue('bad-handler');
|
|
360
|
+
await q.dispatch({ task: 'test' });
|
|
361
|
+
|
|
362
|
+
const handlerPath = new URL('./fixtures/no-handle-handler.js', import.meta.url)
|
|
363
|
+
.pathname;
|
|
364
|
+
await q.listen(handlerPath, { maxRetries: 0 });
|
|
365
|
+
q.failKey = '{bad-handler}-fail';
|
|
366
|
+
|
|
367
|
+
await (await q.dequeue(1)).promise;
|
|
368
|
+
|
|
369
|
+
// Fail job should exist
|
|
370
|
+
const failJobIds = await redis.zRange('{bad-handler}-fail', 0, -1);
|
|
371
|
+
assert.ok(failJobIds.length > 0, 'Fail job should be created');
|
|
372
|
+
|
|
373
|
+
// Verify error message
|
|
374
|
+
const failJobId = failJobIds[0];
|
|
375
|
+
const failJobData = await redis.hGet(
|
|
376
|
+
`{bad-handler}-fail:waiting_job:${failJobId}`,
|
|
377
|
+
'data'
|
|
378
|
+
);
|
|
379
|
+
const parsed = JSON.parse(failJobData || 'null');
|
|
380
|
+
assert.ok(
|
|
381
|
+
parsed[2].message.includes('Unable to load handler'),
|
|
382
|
+
'Error should mention unable to load handler'
|
|
383
|
+
);
|
|
384
|
+
|
|
385
|
+
// Cleanup
|
|
386
|
+
const keys = await redis.keys('{bad-handler}*');
|
|
387
|
+
if (keys.length > 0) await redis.del(keys);
|
|
388
|
+
});
|
|
389
|
+
});
|
|
390
|
+
|
|
391
|
+
describe('failure handlers', () => {
|
|
392
|
+
it('should dispatch fail job on permanent failure', async () => {
|
|
393
|
+
// Clean stale keys from previous runs
|
|
394
|
+
const staleKeys = await redis.keys('{fail-test}*');
|
|
395
|
+
if (staleKeys.length > 0) await redis.del(staleKeys);
|
|
396
|
+
|
|
397
|
+
const q = client.queue('fail-test');
|
|
398
|
+
const jobId = await q.dispatch({ task: 'will-fail' });
|
|
399
|
+
|
|
400
|
+
const handlerPath = new URL('./fixtures/with-failure-handler.js', import.meta.url)
|
|
401
|
+
.pathname;
|
|
402
|
+
|
|
403
|
+
// Listen without failHandler so no fail queue listener races us
|
|
404
|
+
await q.listen(handlerPath, { maxRetries: 0 });
|
|
405
|
+
// Set failKey manually so the fail job is created but not consumed
|
|
406
|
+
q.failKey = `${q.key}-fail`;
|
|
407
|
+
await (await q.dequeue(1)).promise;
|
|
408
|
+
|
|
409
|
+
// Original job should be cleaned up
|
|
410
|
+
const activeExists = await redis.exists(`{fail-test}:active_job:${jobId}`);
|
|
411
|
+
assert.equal(activeExists, 0);
|
|
412
|
+
|
|
413
|
+
// Fail job should exist in fail queue
|
|
414
|
+
const failJobIds = await redis.zRange('{fail-test}-fail', 0, -1);
|
|
415
|
+
assert.ok(failJobIds.length > 0, 'Fail job should be created');
|
|
416
|
+
|
|
417
|
+
// Verify fail job data structure
|
|
418
|
+
const failJobId = failJobIds[0];
|
|
419
|
+
const failJobData = await redis.hGet(
|
|
420
|
+
`{fail-test}-fail:waiting_job:${failJobId}`,
|
|
421
|
+
'data'
|
|
422
|
+
);
|
|
423
|
+
const parsedFailData = JSON.parse(failJobData || 'null');
|
|
424
|
+
assert.ok(Array.isArray(parsedFailData), 'Fail job data should be an array');
|
|
425
|
+
assert.equal(parsedFailData.length, 3, 'Fail job should have [jobId, data, error]');
|
|
426
|
+
|
|
427
|
+
// Verify the structure contains the original data
|
|
428
|
+
assert.deepEqual(parsedFailData[1], { task: 'will-fail' });
|
|
429
|
+
|
|
430
|
+
// Verify error is present
|
|
431
|
+
assert.ok(parsedFailData[2].message, 'Error should have a message');
|
|
432
|
+
|
|
433
|
+
// Cleanup
|
|
434
|
+
const keys = await redis.keys('{fail-test}*');
|
|
435
|
+
if (keys.length > 0) await redis.del(keys);
|
|
436
|
+
});
|
|
437
|
+
});
|
|
438
|
+
});
|