@nicnocquee/dataqueue 1.25.0 → 1.26.0-beta.20260223202259

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/ai/build-docs-content.ts +96 -0
  2. package/ai/build-llms-full.ts +42 -0
  3. package/ai/docs-content.json +284 -0
  4. package/ai/rules/advanced.md +150 -0
  5. package/ai/rules/basic.md +159 -0
  6. package/ai/rules/react-dashboard.md +83 -0
  7. package/ai/skills/dataqueue-advanced/SKILL.md +370 -0
  8. package/ai/skills/dataqueue-core/SKILL.md +234 -0
  9. package/ai/skills/dataqueue-react/SKILL.md +189 -0
  10. package/dist/cli.cjs +1149 -14
  11. package/dist/cli.cjs.map +1 -1
  12. package/dist/cli.d.cts +66 -1
  13. package/dist/cli.d.ts +66 -1
  14. package/dist/cli.js +1146 -13
  15. package/dist/cli.js.map +1 -1
  16. package/dist/index.cjs +3236 -1237
  17. package/dist/index.cjs.map +1 -1
  18. package/dist/index.d.cts +697 -23
  19. package/dist/index.d.ts +697 -23
  20. package/dist/index.js +3235 -1238
  21. package/dist/index.js.map +1 -1
  22. package/dist/mcp-server.cjs +186 -0
  23. package/dist/mcp-server.cjs.map +1 -0
  24. package/dist/mcp-server.d.cts +32 -0
  25. package/dist/mcp-server.d.ts +32 -0
  26. package/dist/mcp-server.js +175 -0
  27. package/dist/mcp-server.js.map +1 -0
  28. package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
  29. package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
  30. package/package.json +24 -21
  31. package/src/backend.ts +170 -5
  32. package/src/backends/postgres.ts +992 -63
  33. package/src/backends/redis-scripts.ts +358 -26
  34. package/src/backends/redis.test.ts +1532 -0
  35. package/src/backends/redis.ts +993 -35
  36. package/src/cli.test.ts +82 -6
  37. package/src/cli.ts +73 -10
  38. package/src/cron.test.ts +126 -0
  39. package/src/cron.ts +40 -0
  40. package/src/db-util.ts +1 -1
  41. package/src/index.test.ts +1034 -11
  42. package/src/index.ts +267 -39
  43. package/src/init-command.test.ts +449 -0
  44. package/src/init-command.ts +709 -0
  45. package/src/install-mcp-command.test.ts +216 -0
  46. package/src/install-mcp-command.ts +185 -0
  47. package/src/install-rules-command.test.ts +218 -0
  48. package/src/install-rules-command.ts +233 -0
  49. package/src/install-skills-command.test.ts +176 -0
  50. package/src/install-skills-command.ts +124 -0
  51. package/src/mcp-server.test.ts +162 -0
  52. package/src/mcp-server.ts +231 -0
  53. package/src/processor.ts +104 -113
  54. package/src/queue.test.ts +465 -0
  55. package/src/queue.ts +34 -252
  56. package/src/supervisor.test.ts +340 -0
  57. package/src/supervisor.ts +177 -0
  58. package/src/types.ts +476 -12
  59. package/LICENSE +0 -21
@@ -15,6 +15,10 @@
15
15
  * dq:idempotency:{key} – String mapping idempotency key → job ID
16
16
  * dq:all – Sorted Set of all jobs (score = createdAt ms, for ordering)
17
17
  * dq:event_id_seq – INCR counter for event IDs
18
+ * dq:waiting – Sorted Set of time-based waiting job IDs (score = waitUntil ms)
19
+ * dq:waitpoint:{id} – Hash with waitpoint fields (id, jobId, status, output, timeoutAt, etc.)
20
+ * dq:waitpoint_timeout – Sorted Set of waitpoint IDs with timeouts (score = timeoutAt ms)
21
+ * dq:waitpoint_id_seq – INCR counter for waitpoint sequence (used if needed)
18
22
  */
19
23
 
20
24
  // ─── Score helpers ──────────────────────────────────────────────────────
@@ -27,7 +31,8 @@ const SCORE_RANGE = '1000000000000000'; // 1e15
27
31
  * ADD JOB
28
32
  * KEYS: [prefix]
29
33
  * ARGV: [jobType, payloadJson, maxAttempts, priority, runAtMs, timeoutMs,
30
- * forceKillOnTimeout, tagsJson, idempotencyKey, nowMs]
34
+ * forceKillOnTimeout, tagsJson, idempotencyKey, nowMs,
35
+ * retryDelay, retryBackoff, retryDelayMax]
31
36
  * Returns: job ID (number)
32
37
  */
33
38
  export const ADD_JOB_SCRIPT = `
@@ -42,6 +47,9 @@ local forceKillOnTimeout = ARGV[7]
42
47
  local tagsJson = ARGV[8] -- "null" or JSON array string
43
48
  local idempotencyKey = ARGV[9] -- "null" string if not set
44
49
  local nowMs = tonumber(ARGV[10])
50
+ local retryDelay = ARGV[11] -- "null" or seconds string
51
+ local retryBackoff = ARGV[12] -- "null" or "true"/"false"
52
+ local retryDelayMax = ARGV[13] -- "null" or seconds string
45
53
 
46
54
  -- Idempotency check
47
55
  if idempotencyKey ~= "null" then
@@ -82,7 +90,13 @@ redis.call('HMSET', jobKey,
82
90
  'lastFailedAt', 'null',
83
91
  'lastCancelledAt', 'null',
84
92
  'tags', tagsJson,
85
- 'idempotencyKey', idempotencyKey
93
+ 'idempotencyKey', idempotencyKey,
94
+ 'waitUntil', 'null',
95
+ 'waitTokenId', 'null',
96
+ 'stepData', 'null',
97
+ 'retryDelay', retryDelay,
98
+ 'retryBackoff', retryBackoff,
99
+ 'retryDelayMax', retryDelayMax
86
100
  )
87
101
 
88
102
  -- Status index
@@ -124,6 +138,129 @@ end
124
138
  return id
125
139
  `;
126
140
 
141
+ /**
142
+ * ADD JOBS (batch)
143
+ * KEYS: [prefix]
144
+ * ARGV: [jobsJson, nowMs]
145
+ * jobsJson is a JSON array of objects, each with:
146
+ * jobType, payload (already JSON string), maxAttempts, priority,
147
+ * runAtMs, timeoutMs, forceKillOnTimeout, tags (JSON or "null"),
148
+ * idempotencyKey
149
+ * Returns: array of job IDs (one per input job, in order)
150
+ */
151
+ export const ADD_JOBS_SCRIPT = `
152
+ local prefix = KEYS[1]
153
+ local jobsJson = ARGV[1]
154
+ local nowMs = tonumber(ARGV[2])
155
+
156
+ local jobs = cjson.decode(jobsJson)
157
+ local results = {}
158
+
159
+ for i, job in ipairs(jobs) do
160
+ local jobType = job.jobType
161
+ local payloadJson = job.payload
162
+ local maxAttempts = tonumber(job.maxAttempts)
163
+ local priority = tonumber(job.priority)
164
+ local runAtMs = tostring(job.runAtMs)
165
+ local timeoutMs = tostring(job.timeoutMs)
166
+ local forceKillOnTimeout = tostring(job.forceKillOnTimeout)
167
+ local tagsJson = tostring(job.tags)
168
+ local idempotencyKey = tostring(job.idempotencyKey)
169
+ local retryDelay = tostring(job.retryDelay)
170
+ local retryBackoff = tostring(job.retryBackoff)
171
+ local retryDelayMax = tostring(job.retryDelayMax)
172
+
173
+ -- Idempotency check
174
+ local skip = false
175
+ if idempotencyKey ~= "null" then
176
+ local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
177
+ if existing then
178
+ results[i] = tonumber(existing)
179
+ skip = true
180
+ end
181
+ end
182
+
183
+ if not skip then
184
+ -- Generate ID
185
+ local id = redis.call('INCR', prefix .. 'id_seq')
186
+ local jobKey = prefix .. 'job:' .. id
187
+ local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
188
+
189
+ -- Store the job hash
190
+ redis.call('HMSET', jobKey,
191
+ 'id', id,
192
+ 'jobType', jobType,
193
+ 'payload', payloadJson,
194
+ 'status', 'pending',
195
+ 'maxAttempts', maxAttempts,
196
+ 'attempts', 0,
197
+ 'priority', priority,
198
+ 'runAt', runAt,
199
+ 'timeoutMs', timeoutMs,
200
+ 'forceKillOnTimeout', forceKillOnTimeout,
201
+ 'createdAt', nowMs,
202
+ 'updatedAt', nowMs,
203
+ 'lockedAt', 'null',
204
+ 'lockedBy', 'null',
205
+ 'nextAttemptAt', 'null',
206
+ 'pendingReason', 'null',
207
+ 'errorHistory', '[]',
208
+ 'failureReason', 'null',
209
+ 'completedAt', 'null',
210
+ 'startedAt', 'null',
211
+ 'lastRetriedAt', 'null',
212
+ 'lastFailedAt', 'null',
213
+ 'lastCancelledAt', 'null',
214
+ 'tags', tagsJson,
215
+ 'idempotencyKey', idempotencyKey,
216
+ 'waitUntil', 'null',
217
+ 'waitTokenId', 'null',
218
+ 'stepData', 'null',
219
+ 'retryDelay', retryDelay,
220
+ 'retryBackoff', retryBackoff,
221
+ 'retryDelayMax', retryDelayMax
222
+ )
223
+
224
+ -- Status index
225
+ redis.call('SADD', prefix .. 'status:pending', id)
226
+
227
+ -- Type index
228
+ redis.call('SADD', prefix .. 'type:' .. jobType, id)
229
+
230
+ -- Tag indexes
231
+ if tagsJson ~= "null" then
232
+ local tags = cjson.decode(tagsJson)
233
+ for _, tag in ipairs(tags) do
234
+ redis.call('SADD', prefix .. 'tag:' .. tag, id)
235
+ end
236
+ for _, tag in ipairs(tags) do
237
+ redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
238
+ end
239
+ end
240
+
241
+ -- Idempotency mapping
242
+ if idempotencyKey ~= "null" then
243
+ redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
244
+ end
245
+
246
+ -- All-jobs sorted set
247
+ redis.call('ZADD', prefix .. 'all', nowMs, id)
248
+
249
+ -- Queue or delayed
250
+ if runAt <= nowMs then
251
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
252
+ redis.call('ZADD', prefix .. 'queue', score, id)
253
+ else
254
+ redis.call('ZADD', prefix .. 'delayed', runAt, id)
255
+ end
256
+
257
+ results[i] = id
258
+ end
259
+ end
260
+
261
+ return results
262
+ `;
263
+
127
264
  /**
128
265
  * GET NEXT BATCH
129
266
  * Atomically: move ready delayed/retry jobs into queue, then pop N jobs.
@@ -174,7 +311,25 @@ for _, jobId in ipairs(retries) do
174
311
  redis.call('ZREM', prefix .. 'retry', jobId)
175
312
  end
176
313
 
177
- -- 3. Parse job type filter
314
+ -- 3. Move ready waiting jobs (time-based, no token) into queue
315
+ local waitingJobs = redis.call('ZRANGEBYSCORE', prefix .. 'waiting', '-inf', nowMs, 'LIMIT', 0, 200)
316
+ for _, jobId in ipairs(waitingJobs) do
317
+ local jk = prefix .. 'job:' .. jobId
318
+ local status = redis.call('HGET', jk, 'status')
319
+ local waitTokenId = redis.call('HGET', jk, 'waitTokenId')
320
+ if status == 'waiting' and (waitTokenId == false or waitTokenId == 'null') then
321
+ local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
322
+ local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
323
+ local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
324
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
325
+ redis.call('SREM', prefix .. 'status:waiting', jobId)
326
+ redis.call('SADD', prefix .. 'status:pending', jobId)
327
+ redis.call('HMSET', jk, 'status', 'pending', 'waitUntil', 'null')
328
+ end
329
+ redis.call('ZREM', prefix .. 'waiting', jobId)
330
+ end
331
+
332
+ -- 4. Parse job type filter
178
333
  local filterTypes = nil
179
334
  if jobTypeFilter ~= "null" then
180
335
  -- Could be a JSON array or a plain string
@@ -187,7 +342,7 @@ if jobTypeFilter ~= "null" then
187
342
  end
188
343
  end
189
344
 
190
- -- 4. Pop candidates from queue (highest score first)
345
+ -- 5. Pop candidates from queue (highest score first)
191
346
  -- We pop more than batchSize because some may be filtered out
192
347
  local popCount = batchSize * 3
193
348
  local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
@@ -277,7 +432,10 @@ local jk = prefix .. 'job:' .. jobId
277
432
  redis.call('HMSET', jk,
278
433
  'status', 'completed',
279
434
  'updatedAt', nowMs,
280
- 'completedAt', nowMs
435
+ 'completedAt', nowMs,
436
+ 'stepData', 'null',
437
+ 'waitUntil', 'null',
438
+ 'waitTokenId', 'null'
281
439
  )
282
440
  redis.call('SREM', prefix .. 'status:processing', jobId)
283
441
  redis.call('SADD', prefix .. 'status:completed', jobId)
@@ -302,11 +460,38 @@ local jk = prefix .. 'job:' .. jobId
302
460
  local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
303
461
  local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
304
462
 
305
- -- Compute next_attempt_at: 2^attempts minutes from now
463
+ -- Read per-job retry config (may be "null")
464
+ local rdRaw = redis.call('HGET', jk, 'retryDelay')
465
+ local rbRaw = redis.call('HGET', jk, 'retryBackoff')
466
+ local rmRaw = redis.call('HGET', jk, 'retryDelayMax')
467
+
306
468
  local nextAttemptAt = 'null'
307
469
  if attempts < maxAttempts then
308
- local delayMs = math.pow(2, attempts) * 60000
309
- nextAttemptAt = nowMs + delayMs
470
+ local allNull = (rdRaw == 'null' or rdRaw == false)
471
+ and (rbRaw == 'null' or rbRaw == false)
472
+ and (rmRaw == 'null' or rmRaw == false)
473
+ if allNull then
474
+ -- Legacy formula: 2^attempts minutes
475
+ local delayMs = math.pow(2, attempts) * 60000
476
+ nextAttemptAt = nowMs + delayMs
477
+ else
478
+ local retryDelaySec = 60
479
+ if rdRaw and rdRaw ~= 'null' then retryDelaySec = tonumber(rdRaw) end
480
+ local useBackoff = true
481
+ if rbRaw and rbRaw ~= 'null' then useBackoff = (rbRaw == 'true') end
482
+ local maxDelaySec = nil
483
+ if rmRaw and rmRaw ~= 'null' then maxDelaySec = tonumber(rmRaw) end
484
+
485
+ local delaySec
486
+ if useBackoff then
487
+ delaySec = retryDelaySec * math.pow(2, attempts)
488
+ if maxDelaySec then delaySec = math.min(delaySec, maxDelaySec) end
489
+ delaySec = delaySec * (0.5 + 0.5 * math.random())
490
+ else
491
+ delaySec = retryDelaySec
492
+ end
493
+ nextAttemptAt = nowMs + math.floor(delaySec * 1000)
494
+ end
310
495
  end
311
496
 
312
497
  -- Append to error_history
@@ -338,7 +523,7 @@ return 1
338
523
  `;
339
524
 
340
525
  /**
341
- * RETRY JOB
526
+ * RETRY JOB (only if failed or processing)
342
527
  * KEYS: [prefix]
343
528
  * ARGV: [jobId, nowMs]
344
529
  */
@@ -349,6 +534,7 @@ local nowMs = tonumber(ARGV[2])
349
534
  local jk = prefix .. 'job:' .. jobId
350
535
 
351
536
  local oldStatus = redis.call('HGET', jk, 'status')
537
+ if oldStatus ~= 'failed' and oldStatus ~= 'processing' then return 0 end
352
538
 
353
539
  redis.call('HMSET', jk,
354
540
  'status', 'pending',
@@ -360,9 +546,7 @@ redis.call('HMSET', jk,
360
546
  )
361
547
 
362
548
  -- Remove from old status, add to pending
363
- if oldStatus then
364
- redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
365
- end
549
+ redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
366
550
  redis.call('SADD', prefix .. 'status:pending', jobId)
367
551
 
368
552
  -- Remove from retry sorted set if present
@@ -378,7 +562,7 @@ return 1
378
562
  `;
379
563
 
380
564
  /**
381
- * CANCEL JOB (only if pending)
565
+ * CANCEL JOB (only if pending or waiting)
382
566
  * KEYS: [prefix]
383
567
  * ARGV: [jobId, nowMs]
384
568
  */
@@ -389,18 +573,21 @@ local nowMs = ARGV[2]
389
573
  local jk = prefix .. 'job:' .. jobId
390
574
 
391
575
  local status = redis.call('HGET', jk, 'status')
392
- if status ~= 'pending' then return 0 end
576
+ if status ~= 'pending' and status ~= 'waiting' then return 0 end
393
577
 
394
578
  redis.call('HMSET', jk,
395
579
  'status', 'cancelled',
396
580
  'updatedAt', nowMs,
397
- 'lastCancelledAt', nowMs
581
+ 'lastCancelledAt', nowMs,
582
+ 'waitUntil', 'null',
583
+ 'waitTokenId', 'null'
398
584
  )
399
- redis.call('SREM', prefix .. 'status:pending', jobId)
585
+ redis.call('SREM', prefix .. 'status:' .. status, jobId)
400
586
  redis.call('SADD', prefix .. 'status:cancelled', jobId)
401
- -- Remove from queue / delayed
587
+ -- Remove from queue / delayed / waiting
402
588
  redis.call('ZREM', prefix .. 'queue', jobId)
403
589
  redis.call('ZREM', prefix .. 'delayed', jobId)
590
+ redis.call('ZREM', prefix .. 'waiting', jobId)
404
591
 
405
592
  return 1
406
593
  `;
@@ -483,23 +670,27 @@ return count
483
670
  `;
484
671
 
485
672
  /**
486
- * CLEANUP OLD JOBS
673
+ * CLEANUP OLD JOBS (batched)
674
+ *
675
+ * Processes a batch of candidate job IDs from the completed set, deleting
676
+ * those whose updatedAt is older than the cutoff. This script is called
677
+ * repeatedly from TypeScript with batches obtained via SSCAN to avoid
678
+ * loading the entire completed set into memory at once.
679
+ *
487
680
  * KEYS: [prefix]
488
- * ARGV: [cutoffMs]
489
- * Returns: count of deleted jobs
681
+ * ARGV: [cutoffMs, id1, id2, ...]
682
+ * Returns: count of deleted jobs in this batch
490
683
  */
491
- export const CLEANUP_OLD_JOBS_SCRIPT = `
684
+ export const CLEANUP_OLD_JOBS_BATCH_SCRIPT = `
492
685
  local prefix = KEYS[1]
493
686
  local cutoffMs = tonumber(ARGV[1])
494
-
495
- local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
496
687
  local count = 0
497
688
 
498
- for _, jobId in ipairs(completed) do
689
+ for i = 2, #ARGV do
690
+ local jobId = ARGV[i]
499
691
  local jk = prefix .. 'job:' .. jobId
500
692
  local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
501
693
  if updatedAt and updatedAt < cutoffMs then
502
- -- Remove all indexes
503
694
  local jobType = redis.call('HGET', jk, 'jobType')
504
695
  local tagsJson = redis.call('HGET', jk, 'tags')
505
696
  local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
@@ -522,7 +713,6 @@ for _, jobId in ipairs(completed) do
522
713
  if idempotencyKey and idempotencyKey ~= 'null' then
523
714
  redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
524
715
  end
525
- -- Delete events
526
716
  redis.call('DEL', prefix .. 'events:' .. jobId)
527
717
 
528
718
  count = count + 1
@@ -531,3 +721,145 @@ end
531
721
 
532
722
  return count
533
723
  `;
724
+
725
+ /**
726
+ * WAIT JOB — Transition a job from 'processing' to 'waiting'.
727
+ * KEYS: [prefix]
728
+ * ARGV: [jobId, waitUntilMs, waitTokenId, stepDataJson, nowMs]
729
+ * waitUntilMs: timestamp ms or "null"
730
+ * waitTokenId: string or "null"
731
+ * Returns: 1 if successful, 0 if job was not in 'processing' state
732
+ */
733
+ export const WAIT_JOB_SCRIPT = `
734
+ local prefix = KEYS[1]
735
+ local jobId = ARGV[1]
736
+ local waitUntilMs = ARGV[2]
737
+ local waitTokenId = ARGV[3]
738
+ local stepDataJson = ARGV[4]
739
+ local nowMs = ARGV[5]
740
+ local jk = prefix .. 'job:' .. jobId
741
+
742
+ local status = redis.call('HGET', jk, 'status')
743
+ if status ~= 'processing' then return 0 end
744
+
745
+ redis.call('HMSET', jk,
746
+ 'status', 'waiting',
747
+ 'waitUntil', waitUntilMs,
748
+ 'waitTokenId', waitTokenId,
749
+ 'stepData', stepDataJson,
750
+ 'lockedAt', 'null',
751
+ 'lockedBy', 'null',
752
+ 'updatedAt', nowMs
753
+ )
754
+ redis.call('SREM', prefix .. 'status:processing', jobId)
755
+ redis.call('SADD', prefix .. 'status:waiting', jobId)
756
+
757
+ -- Add to waiting sorted set if time-based wait
758
+ if waitUntilMs ~= 'null' then
759
+ redis.call('ZADD', prefix .. 'waiting', tonumber(waitUntilMs), jobId)
760
+ end
761
+
762
+ return 1
763
+ `;
764
+
765
+ /**
766
+ * COMPLETE WAITPOINT — Mark a waitpoint as completed and resume associated job.
767
+ * KEYS: [prefix]
768
+ * ARGV: [tokenId, outputJson, nowMs]
769
+ * outputJson: JSON string or "null"
770
+ * Returns: 1 if successful, 0 if waitpoint not found or already completed
771
+ */
772
+ export const COMPLETE_WAITPOINT_SCRIPT = `
773
+ local prefix = KEYS[1]
774
+ local tokenId = ARGV[1]
775
+ local outputJson = ARGV[2]
776
+ local nowMs = ARGV[3]
777
+ local wpk = prefix .. 'waitpoint:' .. tokenId
778
+
779
+ local wpStatus = redis.call('HGET', wpk, 'status')
780
+ if not wpStatus or wpStatus ~= 'waiting' then return 0 end
781
+
782
+ redis.call('HMSET', wpk,
783
+ 'status', 'completed',
784
+ 'output', outputJson,
785
+ 'completedAt', nowMs
786
+ )
787
+
788
+ -- Move associated job back to pending
789
+ local jobId = redis.call('HGET', wpk, 'jobId')
790
+ if jobId and jobId ~= 'null' then
791
+ local jk = prefix .. 'job:' .. jobId
792
+ local jobStatus = redis.call('HGET', jk, 'status')
793
+ if jobStatus == 'waiting' then
794
+ redis.call('HMSET', jk,
795
+ 'status', 'pending',
796
+ 'waitTokenId', 'null',
797
+ 'waitUntil', 'null',
798
+ 'updatedAt', nowMs
799
+ )
800
+ redis.call('SREM', prefix .. 'status:waiting', jobId)
801
+ redis.call('SADD', prefix .. 'status:pending', jobId)
802
+ redis.call('ZREM', prefix .. 'waiting', jobId)
803
+
804
+ -- Re-add to queue
805
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
806
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
807
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
808
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
809
+ end
810
+ end
811
+
812
+ return 1
813
+ `;
814
+
815
+ /**
816
+ * EXPIRE TIMED OUT WAITPOINTS — Expire waitpoints past their timeout and resume jobs.
817
+ * KEYS: [prefix]
818
+ * ARGV: [nowMs]
819
+ * Returns: count of expired waitpoints
820
+ */
821
+ export const EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT = `
822
+ local prefix = KEYS[1]
823
+ local nowMs = tonumber(ARGV[1])
824
+
825
+ local expiredIds = redis.call('ZRANGEBYSCORE', prefix .. 'waitpoint_timeout', '-inf', nowMs)
826
+ local count = 0
827
+
828
+ for _, tokenId in ipairs(expiredIds) do
829
+ local wpk = prefix .. 'waitpoint:' .. tokenId
830
+ local wpStatus = redis.call('HGET', wpk, 'status')
831
+ if wpStatus == 'waiting' then
832
+ redis.call('HMSET', wpk,
833
+ 'status', 'timed_out'
834
+ )
835
+
836
+ -- Move associated job back to pending
837
+ local jobId = redis.call('HGET', wpk, 'jobId')
838
+ if jobId and jobId ~= 'null' then
839
+ local jk = prefix .. 'job:' .. jobId
840
+ local jobStatus = redis.call('HGET', jk, 'status')
841
+ if jobStatus == 'waiting' then
842
+ redis.call('HMSET', jk,
843
+ 'status', 'pending',
844
+ 'waitTokenId', 'null',
845
+ 'waitUntil', 'null',
846
+ 'updatedAt', nowMs
847
+ )
848
+ redis.call('SREM', prefix .. 'status:waiting', jobId)
849
+ redis.call('SADD', prefix .. 'status:pending', jobId)
850
+ redis.call('ZREM', prefix .. 'waiting', jobId)
851
+
852
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
853
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
854
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
855
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
856
+ end
857
+ end
858
+
859
+ count = count + 1
860
+ end
861
+ redis.call('ZREM', prefix .. 'waitpoint_timeout', tokenId)
862
+ end
863
+
864
+ return count
865
+ `;