@nicnocquee/dataqueue 1.22.0 → 1.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +44 -0
  2. package/dist/index.cjs +2822 -583
  3. package/dist/index.cjs.map +1 -1
  4. package/dist/index.d.cts +589 -12
  5. package/dist/index.d.ts +589 -12
  6. package/dist/index.js +2818 -584
  7. package/dist/index.js.map +1 -1
  8. package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +2 -2
  9. package/migrations/1751186053000_add_job_events_table.sql +12 -8
  10. package/migrations/1751984773000_add_tags_to_job_queue.sql +1 -1
  11. package/migrations/1765809419000_add_force_kill_on_timeout_to_job_queue.sql +6 -0
  12. package/migrations/1771100000000_add_idempotency_key_to_job_queue.sql +7 -0
  13. package/migrations/1781200000000_add_wait_support.sql +12 -0
  14. package/migrations/1781200000001_create_waitpoints_table.sql +18 -0
  15. package/migrations/1781200000002_add_performance_indexes.sql +34 -0
  16. package/migrations/1781200000003_add_progress_to_job_queue.sql +7 -0
  17. package/package.json +20 -6
  18. package/src/backend.ts +163 -0
  19. package/src/backends/postgres.ts +1111 -0
  20. package/src/backends/redis-scripts.ts +533 -0
  21. package/src/backends/redis.test.ts +543 -0
  22. package/src/backends/redis.ts +834 -0
  23. package/src/db-util.ts +4 -2
  24. package/src/handler-validation.test.ts +414 -0
  25. package/src/handler-validation.ts +168 -0
  26. package/src/index.test.ts +230 -1
  27. package/src/index.ts +128 -32
  28. package/src/processor.test.ts +612 -16
  29. package/src/processor.ts +759 -47
  30. package/src/queue.test.ts +736 -3
  31. package/src/queue.ts +346 -660
  32. package/src/test-util.ts +32 -0
  33. package/src/types.ts +451 -16
  34. package/src/wait.test.ts +698 -0
@@ -0,0 +1,533 @@
1
+ /**
2
+ * Lua scripts for atomic Redis operations.
3
+ *
4
+ * Key naming convention (all prefixed with the configurable keyPrefix, default "dq:"):
5
+ * dq:id_seq – INCR counter for auto-increment IDs
6
+ * dq:job:{id} – Hash with all job fields
7
+ * dq:queue – Sorted Set of ready-to-process job IDs (score = priority composite)
8
+ * dq:delayed – Sorted Set of future-scheduled job IDs (score = run_at ms)
9
+ * dq:retry – Sorted Set of retry-waiting job IDs (score = next_attempt_at ms)
10
+ * dq:status:{status} – Set of job IDs per status
11
+ * dq:type:{jobType} – Set of job IDs per type
12
+ * dq:tag:{tag} – Set of job IDs per tag
13
+ * dq:job:{id}:tags – Set of tags for a specific job
14
+ * dq:events:{id} – List of JSON event objects
15
+ * dq:idempotency:{key} – String mapping idempotency key → job ID
16
+ * dq:all – Sorted Set of all jobs (score = createdAt ms, for ordering)
17
+ * dq:event_id_seq – INCR counter for event IDs
18
+ */
19
+
20
+ // ─── Score helpers ──────────────────────────────────────────────────────
21
+ // For the ready queue we need: higher priority first, then earlier createdAt.
22
+ // Score = priority * 1e15 + (1e15 - createdAtMs)
23
+ // ZPOPMAX gives highest score → highest priority, earliest created.
24
+ const SCORE_RANGE = '1000000000000000'; // 1e15
25
+
26
+ /**
27
+ * ADD JOB
28
+ * KEYS: [prefix]
29
+ * ARGV: [jobType, payloadJson, maxAttempts, priority, runAtMs, timeoutMs,
30
+ * forceKillOnTimeout, tagsJson, idempotencyKey, nowMs]
31
+ * Returns: job ID (number)
32
+ */
33
+ export const ADD_JOB_SCRIPT = `
34
+ local prefix = KEYS[1]
35
+ local jobType = ARGV[1]
36
+ local payloadJson = ARGV[2]
37
+ local maxAttempts = tonumber(ARGV[3])
38
+ local priority = tonumber(ARGV[4])
39
+ local runAtMs = ARGV[5] -- "0" means now
40
+ local timeoutMs = ARGV[6] -- "null" string if not set
41
+ local forceKillOnTimeout = ARGV[7]
42
+ local tagsJson = ARGV[8] -- "null" or JSON array string
43
+ local idempotencyKey = ARGV[9] -- "null" string if not set
44
+ local nowMs = tonumber(ARGV[10])
45
+
46
+ -- Idempotency check
47
+ if idempotencyKey ~= "null" then
48
+ local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
49
+ if existing then
50
+ return existing
51
+ end
52
+ end
53
+
54
+ -- Generate ID
55
+ local id = redis.call('INCR', prefix .. 'id_seq')
56
+ local jobKey = prefix .. 'job:' .. id
57
+ local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
58
+
59
+ -- Store the job hash
60
+ redis.call('HMSET', jobKey,
61
+ 'id', id,
62
+ 'jobType', jobType,
63
+ 'payload', payloadJson,
64
+ 'status', 'pending',
65
+ 'maxAttempts', maxAttempts,
66
+ 'attempts', 0,
67
+ 'priority', priority,
68
+ 'runAt', runAt,
69
+ 'timeoutMs', timeoutMs,
70
+ 'forceKillOnTimeout', forceKillOnTimeout,
71
+ 'createdAt', nowMs,
72
+ 'updatedAt', nowMs,
73
+ 'lockedAt', 'null',
74
+ 'lockedBy', 'null',
75
+ 'nextAttemptAt', 'null',
76
+ 'pendingReason', 'null',
77
+ 'errorHistory', '[]',
78
+ 'failureReason', 'null',
79
+ 'completedAt', 'null',
80
+ 'startedAt', 'null',
81
+ 'lastRetriedAt', 'null',
82
+ 'lastFailedAt', 'null',
83
+ 'lastCancelledAt', 'null',
84
+ 'tags', tagsJson,
85
+ 'idempotencyKey', idempotencyKey
86
+ )
87
+
88
+ -- Status index
89
+ redis.call('SADD', prefix .. 'status:pending', id)
90
+
91
+ -- Type index
92
+ redis.call('SADD', prefix .. 'type:' .. jobType, id)
93
+
94
+ -- Tag indexes
95
+ if tagsJson ~= "null" then
96
+ local tags = cjson.decode(tagsJson)
97
+ for _, tag in ipairs(tags) do
98
+ redis.call('SADD', prefix .. 'tag:' .. tag, id)
99
+ end
100
+ -- Store tags for exact-match queries
101
+ for _, tag in ipairs(tags) do
102
+ redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
103
+ end
104
+ end
105
+
106
+ -- Idempotency mapping
107
+ if idempotencyKey ~= "null" then
108
+ redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
109
+ end
110
+
111
+ -- All-jobs sorted set (for ordering by createdAt)
112
+ redis.call('ZADD', prefix .. 'all', nowMs, id)
113
+
114
+ -- Queue or delayed
115
+ if runAt <= nowMs then
116
+ -- Ready now: add to queue with priority score
117
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
118
+ redis.call('ZADD', prefix .. 'queue', score, id)
119
+ else
120
+ -- Future: add to delayed set
121
+ redis.call('ZADD', prefix .. 'delayed', runAt, id)
122
+ end
123
+
124
+ return id
125
+ `;
126
+
127
+ /**
128
+ * GET NEXT BATCH
129
+ * Atomically: move ready delayed/retry jobs into queue, then pop N jobs.
130
+ * KEYS: [prefix]
131
+ * ARGV: [workerId, batchSize, nowMs, jobTypeFilter]
132
+ * jobTypeFilter: "null" or a JSON array like ["email","sms"] or a string like "email"
133
+ * Returns: array of job field arrays (flat: [field1, val1, field2, val2, ...] per job)
134
+ */
135
+ export const GET_NEXT_BATCH_SCRIPT = `
136
+ local prefix = KEYS[1]
137
+ local workerId = ARGV[1]
138
+ local batchSize = tonumber(ARGV[2])
139
+ local nowMs = tonumber(ARGV[3])
140
+ local jobTypeFilter = ARGV[4] -- "null" or JSON array or single string
141
+
142
+ -- 1. Move ready delayed jobs into queue
143
+ local delayed = redis.call('ZRANGEBYSCORE', prefix .. 'delayed', '-inf', nowMs, 'LIMIT', 0, 200)
144
+ for _, jobId in ipairs(delayed) do
145
+ local jk = prefix .. 'job:' .. jobId
146
+ local status = redis.call('HGET', jk, 'status')
147
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
148
+ local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
149
+ if status == 'pending' and attempts < maxAttempts then
150
+ local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
151
+ local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
152
+ local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
153
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
154
+ end
155
+ redis.call('ZREM', prefix .. 'delayed', jobId)
156
+ end
157
+
158
+ -- 2. Move ready retry jobs into queue
159
+ local retries = redis.call('ZRANGEBYSCORE', prefix .. 'retry', '-inf', nowMs, 'LIMIT', 0, 200)
160
+ for _, jobId in ipairs(retries) do
161
+ local jk = prefix .. 'job:' .. jobId
162
+ local status = redis.call('HGET', jk, 'status')
163
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
164
+ local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
165
+ if status == 'failed' and attempts < maxAttempts then
166
+ local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
167
+ local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
168
+ local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
169
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
170
+ redis.call('SREM', prefix .. 'status:failed', jobId)
171
+ redis.call('SADD', prefix .. 'status:pending', jobId)
172
+ redis.call('HMSET', jk, 'status', 'pending')
173
+ end
174
+ redis.call('ZREM', prefix .. 'retry', jobId)
175
+ end
176
+
177
+ -- 3. Parse job type filter
178
+ local filterTypes = nil
179
+ if jobTypeFilter ~= "null" then
180
+ -- Could be a JSON array or a plain string
181
+ local ok, decoded = pcall(cjson.decode, jobTypeFilter)
182
+ if ok and type(decoded) == 'table' then
183
+ filterTypes = {}
184
+ for _, t in ipairs(decoded) do filterTypes[t] = true end
185
+ else
186
+ filterTypes = { [jobTypeFilter] = true }
187
+ end
188
+ end
189
+
190
+ -- 4. Pop candidates from queue (highest score first)
191
+ -- We pop more than batchSize because some may be filtered out
192
+ local popCount = batchSize * 3
193
+ local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
194
+ -- candidates: [member1, score1, member2, score2, ...]
195
+
196
+ local results = {}
197
+ local jobsClaimed = 0
198
+ local putBack = {} -- {score, id} pairs to put back
199
+
200
+ for i = 1, #candidates, 2 do
201
+ local jobId = candidates[i]
202
+ local score = candidates[i + 1]
203
+ local jk = prefix .. 'job:' .. jobId
204
+
205
+ if jobsClaimed >= batchSize then
206
+ -- We have enough; put the rest back
207
+ table.insert(putBack, score)
208
+ table.insert(putBack, jobId)
209
+ else
210
+ -- Check job type filter
211
+ local jt = redis.call('HGET', jk, 'jobType')
212
+ if filterTypes and not filterTypes[jt] then
213
+ -- Doesn't match filter: put back
214
+ table.insert(putBack, score)
215
+ table.insert(putBack, jobId)
216
+ else
217
+ -- Check run_at
218
+ local runAt = tonumber(redis.call('HGET', jk, 'runAt'))
219
+ if runAt > nowMs then
220
+ -- Not ready yet: move to delayed
221
+ redis.call('ZADD', prefix .. 'delayed', runAt, jobId)
222
+ else
223
+ -- Claim this job
224
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
225
+ local startedAt = redis.call('HGET', jk, 'startedAt')
226
+ local lastRetriedAt = redis.call('HGET', jk, 'lastRetriedAt')
227
+ if startedAt == 'null' then startedAt = nowMs end
228
+ if attempts > 0 then lastRetriedAt = nowMs end
229
+
230
+ redis.call('HMSET', jk,
231
+ 'status', 'processing',
232
+ 'lockedAt', nowMs,
233
+ 'lockedBy', workerId,
234
+ 'attempts', attempts + 1,
235
+ 'updatedAt', nowMs,
236
+ 'pendingReason', 'null',
237
+ 'startedAt', startedAt,
238
+ 'lastRetriedAt', lastRetriedAt
239
+ )
240
+
241
+ -- Update status sets
242
+ redis.call('SREM', prefix .. 'status:pending', jobId)
243
+ redis.call('SADD', prefix .. 'status:processing', jobId)
244
+
245
+ -- Return job data as flat array
246
+ local data = redis.call('HGETALL', jk)
247
+ for _, v in ipairs(data) do
248
+ table.insert(results, v)
249
+ end
250
+ -- Separator
251
+ table.insert(results, '__JOB_SEP__')
252
+ jobsClaimed = jobsClaimed + 1
253
+ end
254
+ end
255
+ end
256
+ end
257
+
258
+ -- Put back jobs we didn't claim
259
+ if #putBack > 0 then
260
+ redis.call('ZADD', prefix .. 'queue', unpack(putBack))
261
+ end
262
+
263
+ return results
264
+ `;
265
+
266
+ /**
267
+ * COMPLETE JOB
268
+ * KEYS: [prefix]
269
+ * ARGV: [jobId, nowMs]
270
+ */
271
+ export const COMPLETE_JOB_SCRIPT = `
272
+ local prefix = KEYS[1]
273
+ local jobId = ARGV[1]
274
+ local nowMs = ARGV[2]
275
+ local jk = prefix .. 'job:' .. jobId
276
+
277
+ redis.call('HMSET', jk,
278
+ 'status', 'completed',
279
+ 'updatedAt', nowMs,
280
+ 'completedAt', nowMs
281
+ )
282
+ redis.call('SREM', prefix .. 'status:processing', jobId)
283
+ redis.call('SADD', prefix .. 'status:completed', jobId)
284
+
285
+ return 1
286
+ `;
287
+
288
+ /**
289
+ * FAIL JOB
290
+ * KEYS: [prefix]
291
+ * ARGV: [jobId, errorJson, failureReason, nowMs]
292
+ * errorJson: JSON array like [{"message":"...", "timestamp":"..."}]
293
+ */
294
+ export const FAIL_JOB_SCRIPT = `
295
+ local prefix = KEYS[1]
296
+ local jobId = ARGV[1]
297
+ local errorJson = ARGV[2]
298
+ local failureReason = ARGV[3]
299
+ local nowMs = tonumber(ARGV[4])
300
+ local jk = prefix .. 'job:' .. jobId
301
+
302
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
303
+ local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
304
+
305
+ -- Compute next_attempt_at: 2^attempts minutes from now
306
+ local nextAttemptAt = 'null'
307
+ if attempts < maxAttempts then
308
+ local delayMs = math.pow(2, attempts) * 60000
309
+ nextAttemptAt = nowMs + delayMs
310
+ end
311
+
312
+ -- Append to error_history
313
+ local history = redis.call('HGET', jk, 'errorHistory') or '[]'
314
+ local ok, arr = pcall(cjson.decode, history)
315
+ if not ok then arr = {} end
316
+ local newErrors = cjson.decode(errorJson)
317
+ for _, e in ipairs(newErrors) do
318
+ table.insert(arr, e)
319
+ end
320
+
321
+ redis.call('HMSET', jk,
322
+ 'status', 'failed',
323
+ 'updatedAt', nowMs,
324
+ 'nextAttemptAt', tostring(nextAttemptAt),
325
+ 'errorHistory', cjson.encode(arr),
326
+ 'failureReason', failureReason,
327
+ 'lastFailedAt', nowMs
328
+ )
329
+ redis.call('SREM', prefix .. 'status:processing', jobId)
330
+ redis.call('SADD', prefix .. 'status:failed', jobId)
331
+
332
+ -- Schedule retry if applicable
333
+ if nextAttemptAt ~= 'null' then
334
+ redis.call('ZADD', prefix .. 'retry', nextAttemptAt, jobId)
335
+ end
336
+
337
+ return 1
338
+ `;
339
+
340
+ /**
341
+ * RETRY JOB
342
+ * KEYS: [prefix]
343
+ * ARGV: [jobId, nowMs]
344
+ */
345
+ export const RETRY_JOB_SCRIPT = `
346
+ local prefix = KEYS[1]
347
+ local jobId = ARGV[1]
348
+ local nowMs = tonumber(ARGV[2])
349
+ local jk = prefix .. 'job:' .. jobId
350
+
351
+ local oldStatus = redis.call('HGET', jk, 'status')
352
+
353
+ redis.call('HMSET', jk,
354
+ 'status', 'pending',
355
+ 'updatedAt', nowMs,
356
+ 'lockedAt', 'null',
357
+ 'lockedBy', 'null',
358
+ 'nextAttemptAt', nowMs,
359
+ 'lastRetriedAt', nowMs
360
+ )
361
+
362
+ -- Remove from old status, add to pending
363
+ if oldStatus then
364
+ redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
365
+ end
366
+ redis.call('SADD', prefix .. 'status:pending', jobId)
367
+
368
+ -- Remove from retry sorted set if present
369
+ redis.call('ZREM', prefix .. 'retry', jobId)
370
+
371
+ -- Add to queue (ready now)
372
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
373
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
374
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
375
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
376
+
377
+ return 1
378
+ `;
379
+
380
+ /**
381
+ * CANCEL JOB (only if pending)
382
+ * KEYS: [prefix]
383
+ * ARGV: [jobId, nowMs]
384
+ */
385
+ export const CANCEL_JOB_SCRIPT = `
386
+ local prefix = KEYS[1]
387
+ local jobId = ARGV[1]
388
+ local nowMs = ARGV[2]
389
+ local jk = prefix .. 'job:' .. jobId
390
+
391
+ local status = redis.call('HGET', jk, 'status')
392
+ if status ~= 'pending' then return 0 end
393
+
394
+ redis.call('HMSET', jk,
395
+ 'status', 'cancelled',
396
+ 'updatedAt', nowMs,
397
+ 'lastCancelledAt', nowMs
398
+ )
399
+ redis.call('SREM', prefix .. 'status:pending', jobId)
400
+ redis.call('SADD', prefix .. 'status:cancelled', jobId)
401
+ -- Remove from queue / delayed
402
+ redis.call('ZREM', prefix .. 'queue', jobId)
403
+ redis.call('ZREM', prefix .. 'delayed', jobId)
404
+
405
+ return 1
406
+ `;
407
+
408
+ /**
409
+ * PROLONG JOB
410
+ * KEYS: [prefix]
411
+ * ARGV: [jobId, nowMs]
412
+ */
413
+ export const PROLONG_JOB_SCRIPT = `
414
+ local prefix = KEYS[1]
415
+ local jobId = ARGV[1]
416
+ local nowMs = ARGV[2]
417
+ local jk = prefix .. 'job:' .. jobId
418
+
419
+ local status = redis.call('HGET', jk, 'status')
420
+ if status ~= 'processing' then return 0 end
421
+
422
+ redis.call('HMSET', jk,
423
+ 'lockedAt', nowMs,
424
+ 'updatedAt', nowMs
425
+ )
426
+
427
+ return 1
428
+ `;
429
+
430
+ /**
431
+ * RECLAIM STUCK JOBS
432
+ * KEYS: [prefix]
433
+ * ARGV: [maxAgeMs, nowMs]
434
+ * Returns: count of reclaimed jobs
435
+ */
436
+ export const RECLAIM_STUCK_JOBS_SCRIPT = `
437
+ local prefix = KEYS[1]
438
+ local maxAgeMs = tonumber(ARGV[1])
439
+ local nowMs = tonumber(ARGV[2])
440
+
441
+ local processing = redis.call('SMEMBERS', prefix .. 'status:processing')
442
+ local count = 0
443
+
444
+ for _, jobId in ipairs(processing) do
445
+ local jk = prefix .. 'job:' .. jobId
446
+ local lockedAt = redis.call('HGET', jk, 'lockedAt')
447
+ if lockedAt and lockedAt ~= 'null' then
448
+ local lockedAtNum = tonumber(lockedAt)
449
+ if lockedAtNum then
450
+ -- Use the greater of maxAgeMs and the job's own timeoutMs
451
+ local jobMaxAge = maxAgeMs
452
+ local timeoutMs = redis.call('HGET', jk, 'timeoutMs')
453
+ if timeoutMs and timeoutMs ~= 'null' then
454
+ local tMs = tonumber(timeoutMs)
455
+ if tMs and tMs > jobMaxAge then
456
+ jobMaxAge = tMs
457
+ end
458
+ end
459
+ local cutoff = nowMs - jobMaxAge
460
+ if lockedAtNum < cutoff then
461
+ redis.call('HMSET', jk,
462
+ 'status', 'pending',
463
+ 'lockedAt', 'null',
464
+ 'lockedBy', 'null',
465
+ 'updatedAt', nowMs
466
+ )
467
+ redis.call('SREM', prefix .. 'status:processing', jobId)
468
+ redis.call('SADD', prefix .. 'status:pending', jobId)
469
+
470
+ -- Re-add to queue
471
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
472
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
473
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
474
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
475
+
476
+ count = count + 1
477
+ end
478
+ end
479
+ end
480
+ end
481
+
482
+ return count
483
+ `;
484
+
485
+ /**
486
+ * CLEANUP OLD JOBS
487
+ * KEYS: [prefix]
488
+ * ARGV: [cutoffMs]
489
+ * Returns: count of deleted jobs
490
+ */
491
+ export const CLEANUP_OLD_JOBS_SCRIPT = `
492
+ local prefix = KEYS[1]
493
+ local cutoffMs = tonumber(ARGV[1])
494
+
495
+ local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
496
+ local count = 0
497
+
498
+ for _, jobId in ipairs(completed) do
499
+ local jk = prefix .. 'job:' .. jobId
500
+ local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
501
+ if updatedAt and updatedAt < cutoffMs then
502
+ -- Remove all indexes
503
+ local jobType = redis.call('HGET', jk, 'jobType')
504
+ local tagsJson = redis.call('HGET', jk, 'tags')
505
+ local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
506
+
507
+ redis.call('DEL', jk)
508
+ redis.call('SREM', prefix .. 'status:completed', jobId)
509
+ redis.call('ZREM', prefix .. 'all', jobId)
510
+ if jobType then
511
+ redis.call('SREM', prefix .. 'type:' .. jobType, jobId)
512
+ end
513
+ if tagsJson and tagsJson ~= 'null' then
514
+ local ok, tags = pcall(cjson.decode, tagsJson)
515
+ if ok and type(tags) == 'table' then
516
+ for _, tag in ipairs(tags) do
517
+ redis.call('SREM', prefix .. 'tag:' .. tag, jobId)
518
+ end
519
+ end
520
+ redis.call('DEL', prefix .. 'job:' .. jobId .. ':tags')
521
+ end
522
+ if idempotencyKey and idempotencyKey ~= 'null' then
523
+ redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
524
+ end
525
+ -- Delete events
526
+ redis.call('DEL', prefix .. 'events:' .. jobId)
527
+
528
+ count = count + 1
529
+ end
530
+ end
531
+
532
+ return count
533
+ `;