@nicnocquee/dataqueue 1.31.0 → 1.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,6 +15,10 @@
15
15
  * dq:idempotency:{key} – String mapping idempotency key → job ID
16
16
  * dq:all – Sorted Set of all jobs (score = createdAt ms, for ordering)
17
17
  * dq:event_id_seq – INCR counter for event IDs
18
+ * dq:waiting – Sorted Set of time-based waiting job IDs (score = waitUntil ms)
19
+ * dq:waitpoint:{id} – Hash with waitpoint fields (id, jobId, status, output, timeoutAt, etc.)
20
+ * dq:waitpoint_timeout – Sorted Set of waitpoint IDs with timeouts (score = timeoutAt ms)
21
+ * dq:waitpoint_id_seq – INCR counter for waitpoint sequence (used if needed)
18
22
  */
19
23
 
20
24
  // ─── Score helpers ──────────────────────────────────────────────────────
@@ -82,7 +86,10 @@ redis.call('HMSET', jobKey,
82
86
  'lastFailedAt', 'null',
83
87
  'lastCancelledAt', 'null',
84
88
  'tags', tagsJson,
85
- 'idempotencyKey', idempotencyKey
89
+ 'idempotencyKey', idempotencyKey,
90
+ 'waitUntil', 'null',
91
+ 'waitTokenId', 'null',
92
+ 'stepData', 'null'
86
93
  )
87
94
 
88
95
  -- Status index
@@ -174,7 +181,25 @@ for _, jobId in ipairs(retries) do
174
181
  redis.call('ZREM', prefix .. 'retry', jobId)
175
182
  end
176
183
 
177
- -- 3. Parse job type filter
184
+ -- 3. Move ready waiting jobs (time-based, no token) into queue
185
+ local waitingJobs = redis.call('ZRANGEBYSCORE', prefix .. 'waiting', '-inf', nowMs, 'LIMIT', 0, 200)
186
+ for _, jobId in ipairs(waitingJobs) do
187
+ local jk = prefix .. 'job:' .. jobId
188
+ local status = redis.call('HGET', jk, 'status')
189
+ local waitTokenId = redis.call('HGET', jk, 'waitTokenId')
190
+ if status == 'waiting' and (waitTokenId == false or waitTokenId == 'null') then
191
+ local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
192
+ local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
193
+ local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
194
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
195
+ redis.call('SREM', prefix .. 'status:waiting', jobId)
196
+ redis.call('SADD', prefix .. 'status:pending', jobId)
197
+ redis.call('HMSET', jk, 'status', 'pending', 'waitUntil', 'null')
198
+ end
199
+ redis.call('ZREM', prefix .. 'waiting', jobId)
200
+ end
201
+
202
+ -- 4. Parse job type filter
178
203
  local filterTypes = nil
179
204
  if jobTypeFilter ~= "null" then
180
205
  -- Could be a JSON array or a plain string
@@ -187,7 +212,7 @@ if jobTypeFilter ~= "null" then
187
212
  end
188
213
  end
189
214
 
190
- -- 4. Pop candidates from queue (highest score first)
215
+ -- 5. Pop candidates from queue (highest score first)
191
216
  -- We pop more than batchSize because some may be filtered out
192
217
  local popCount = batchSize * 3
193
218
  local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
@@ -277,7 +302,10 @@ local jk = prefix .. 'job:' .. jobId
277
302
  redis.call('HMSET', jk,
278
303
  'status', 'completed',
279
304
  'updatedAt', nowMs,
280
- 'completedAt', nowMs
305
+ 'completedAt', nowMs,
306
+ 'stepData', 'null',
307
+ 'waitUntil', 'null',
308
+ 'waitTokenId', 'null'
281
309
  )
282
310
  redis.call('SREM', prefix .. 'status:processing', jobId)
283
311
  redis.call('SADD', prefix .. 'status:completed', jobId)
@@ -338,7 +366,7 @@ return 1
338
366
  `;
339
367
 
340
368
  /**
341
- * RETRY JOB
369
+ * RETRY JOB (only if failed or processing)
342
370
  * KEYS: [prefix]
343
371
  * ARGV: [jobId, nowMs]
344
372
  */
@@ -349,6 +377,7 @@ local nowMs = tonumber(ARGV[2])
349
377
  local jk = prefix .. 'job:' .. jobId
350
378
 
351
379
  local oldStatus = redis.call('HGET', jk, 'status')
380
+ if oldStatus ~= 'failed' and oldStatus ~= 'processing' then return 0 end
352
381
 
353
382
  redis.call('HMSET', jk,
354
383
  'status', 'pending',
@@ -360,9 +389,7 @@ redis.call('HMSET', jk,
360
389
  )
361
390
 
362
391
  -- Remove from old status, add to pending
363
- if oldStatus then
364
- redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
365
- end
392
+ redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
366
393
  redis.call('SADD', prefix .. 'status:pending', jobId)
367
394
 
368
395
  -- Remove from retry sorted set if present
@@ -378,7 +405,7 @@ return 1
378
405
  `;
379
406
 
380
407
  /**
381
- * CANCEL JOB (only if pending)
408
+ * CANCEL JOB (only if pending or waiting)
382
409
  * KEYS: [prefix]
383
410
  * ARGV: [jobId, nowMs]
384
411
  */
@@ -389,18 +416,21 @@ local nowMs = ARGV[2]
389
416
  local jk = prefix .. 'job:' .. jobId
390
417
 
391
418
  local status = redis.call('HGET', jk, 'status')
392
- if status ~= 'pending' then return 0 end
419
+ if status ~= 'pending' and status ~= 'waiting' then return 0 end
393
420
 
394
421
  redis.call('HMSET', jk,
395
422
  'status', 'cancelled',
396
423
  'updatedAt', nowMs,
397
- 'lastCancelledAt', nowMs
424
+ 'lastCancelledAt', nowMs,
425
+ 'waitUntil', 'null',
426
+ 'waitTokenId', 'null'
398
427
  )
399
- redis.call('SREM', prefix .. 'status:pending', jobId)
428
+ redis.call('SREM', prefix .. 'status:' .. status, jobId)
400
429
  redis.call('SADD', prefix .. 'status:cancelled', jobId)
401
- -- Remove from queue / delayed
430
+ -- Remove from queue / delayed / waiting
402
431
  redis.call('ZREM', prefix .. 'queue', jobId)
403
432
  redis.call('ZREM', prefix .. 'delayed', jobId)
433
+ redis.call('ZREM', prefix .. 'waiting', jobId)
404
434
 
405
435
  return 1
406
436
  `;
@@ -483,23 +513,27 @@ return count
483
513
  `;
484
514
 
485
515
  /**
486
- * CLEANUP OLD JOBS
516
+ * CLEANUP OLD JOBS (batched)
517
+ *
518
+ * Processes a batch of candidate job IDs from the completed set, deleting
519
+ * those whose updatedAt is older than the cutoff. This script is called
520
+ * repeatedly from TypeScript with batches obtained via SSCAN to avoid
521
+ * loading the entire completed set into memory at once.
522
+ *
487
523
  * KEYS: [prefix]
488
- * ARGV: [cutoffMs]
489
- * Returns: count of deleted jobs
524
+ * ARGV: [cutoffMs, id1, id2, ...]
525
+ * Returns: count of deleted jobs in this batch
490
526
  */
491
- export const CLEANUP_OLD_JOBS_SCRIPT = `
527
+ export const CLEANUP_OLD_JOBS_BATCH_SCRIPT = `
492
528
  local prefix = KEYS[1]
493
529
  local cutoffMs = tonumber(ARGV[1])
494
-
495
- local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
496
530
  local count = 0
497
531
 
498
- for _, jobId in ipairs(completed) do
532
+ for i = 2, #ARGV do
533
+ local jobId = ARGV[i]
499
534
  local jk = prefix .. 'job:' .. jobId
500
535
  local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
501
536
  if updatedAt and updatedAt < cutoffMs then
502
- -- Remove all indexes
503
537
  local jobType = redis.call('HGET', jk, 'jobType')
504
538
  local tagsJson = redis.call('HGET', jk, 'tags')
505
539
  local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
@@ -522,7 +556,6 @@ for _, jobId in ipairs(completed) do
522
556
  if idempotencyKey and idempotencyKey ~= 'null' then
523
557
  redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
524
558
  end
525
- -- Delete events
526
559
  redis.call('DEL', prefix .. 'events:' .. jobId)
527
560
 
528
561
  count = count + 1
@@ -531,3 +564,145 @@ end
531
564
 
532
565
  return count
533
566
  `;
567
+
568
+ /**
569
+ * WAIT JOB — Transition a job from 'processing' to 'waiting'.
570
+ * KEYS: [prefix]
571
+ * ARGV: [jobId, waitUntilMs, waitTokenId, stepDataJson, nowMs]
572
+ * waitUntilMs: timestamp ms or "null"
573
+ * waitTokenId: string or "null"
574
+ * Returns: 1 if successful, 0 if job was not in 'processing' state
575
+ */
576
+ export const WAIT_JOB_SCRIPT = `
577
+ local prefix = KEYS[1]
578
+ local jobId = ARGV[1]
579
+ local waitUntilMs = ARGV[2]
580
+ local waitTokenId = ARGV[3]
581
+ local stepDataJson = ARGV[4]
582
+ local nowMs = ARGV[5]
583
+ local jk = prefix .. 'job:' .. jobId
584
+
585
+ local status = redis.call('HGET', jk, 'status')
586
+ if status ~= 'processing' then return 0 end
587
+
588
+ redis.call('HMSET', jk,
589
+ 'status', 'waiting',
590
+ 'waitUntil', waitUntilMs,
591
+ 'waitTokenId', waitTokenId,
592
+ 'stepData', stepDataJson,
593
+ 'lockedAt', 'null',
594
+ 'lockedBy', 'null',
595
+ 'updatedAt', nowMs
596
+ )
597
+ redis.call('SREM', prefix .. 'status:processing', jobId)
598
+ redis.call('SADD', prefix .. 'status:waiting', jobId)
599
+
600
+ -- Add to waiting sorted set if time-based wait
601
+ if waitUntilMs ~= 'null' then
602
+ redis.call('ZADD', prefix .. 'waiting', tonumber(waitUntilMs), jobId)
603
+ end
604
+
605
+ return 1
606
+ `;
607
+
608
+ /**
609
+ * COMPLETE WAITPOINT — Mark a waitpoint as completed and resume associated job.
610
+ * KEYS: [prefix]
611
+ * ARGV: [tokenId, outputJson, nowMs]
612
+ * outputJson: JSON string or "null"
613
+ * Returns: 1 if successful, 0 if waitpoint not found or already completed
614
+ */
615
+ export const COMPLETE_WAITPOINT_SCRIPT = `
616
+ local prefix = KEYS[1]
617
+ local tokenId = ARGV[1]
618
+ local outputJson = ARGV[2]
619
+ local nowMs = ARGV[3]
620
+ local wpk = prefix .. 'waitpoint:' .. tokenId
621
+
622
+ local wpStatus = redis.call('HGET', wpk, 'status')
623
+ if not wpStatus or wpStatus ~= 'waiting' then return 0 end
624
+
625
+ redis.call('HMSET', wpk,
626
+ 'status', 'completed',
627
+ 'output', outputJson,
628
+ 'completedAt', nowMs
629
+ )
630
+
631
+ -- Move associated job back to pending
632
+ local jobId = redis.call('HGET', wpk, 'jobId')
633
+ if jobId and jobId ~= 'null' then
634
+ local jk = prefix .. 'job:' .. jobId
635
+ local jobStatus = redis.call('HGET', jk, 'status')
636
+ if jobStatus == 'waiting' then
637
+ redis.call('HMSET', jk,
638
+ 'status', 'pending',
639
+ 'waitTokenId', 'null',
640
+ 'waitUntil', 'null',
641
+ 'updatedAt', nowMs
642
+ )
643
+ redis.call('SREM', prefix .. 'status:waiting', jobId)
644
+ redis.call('SADD', prefix .. 'status:pending', jobId)
645
+ redis.call('ZREM', prefix .. 'waiting', jobId)
646
+
647
+ -- Re-add to queue
648
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
649
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
650
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
651
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
652
+ end
653
+ end
654
+
655
+ return 1
656
+ `;
657
+
658
+ /**
659
+ * EXPIRE TIMED OUT WAITPOINTS — Expire waitpoints past their timeout and resume jobs.
660
+ * KEYS: [prefix]
661
+ * ARGV: [nowMs]
662
+ * Returns: count of expired waitpoints
663
+ */
664
+ export const EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT = `
665
+ local prefix = KEYS[1]
666
+ local nowMs = tonumber(ARGV[1])
667
+
668
+ local expiredIds = redis.call('ZRANGEBYSCORE', prefix .. 'waitpoint_timeout', '-inf', nowMs)
669
+ local count = 0
670
+
671
+ for _, tokenId in ipairs(expiredIds) do
672
+ local wpk = prefix .. 'waitpoint:' .. tokenId
673
+ local wpStatus = redis.call('HGET', wpk, 'status')
674
+ if wpStatus == 'waiting' then
675
+ redis.call('HMSET', wpk,
676
+ 'status', 'timed_out'
677
+ )
678
+
679
+ -- Move associated job back to pending
680
+ local jobId = redis.call('HGET', wpk, 'jobId')
681
+ if jobId and jobId ~= 'null' then
682
+ local jk = prefix .. 'job:' .. jobId
683
+ local jobStatus = redis.call('HGET', jk, 'status')
684
+ if jobStatus == 'waiting' then
685
+ redis.call('HMSET', jk,
686
+ 'status', 'pending',
687
+ 'waitTokenId', 'null',
688
+ 'waitUntil', 'null',
689
+ 'updatedAt', nowMs
690
+ )
691
+ redis.call('SREM', prefix .. 'status:waiting', jobId)
692
+ redis.call('SADD', prefix .. 'status:pending', jobId)
693
+ redis.call('ZREM', prefix .. 'waiting', jobId)
694
+
695
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
696
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
697
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
698
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
699
+ end
700
+ end
701
+
702
+ count = count + 1
703
+ end
704
+ redis.call('ZREM', prefix .. 'waitpoint_timeout', tokenId)
705
+ end
706
+
707
+ return count
708
+ `;