@nicnocquee/dataqueue 1.34.0 → 1.35.0-beta.20260224110011

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,7 +31,8 @@ const SCORE_RANGE = '1000000000000000'; // 1e15
31
31
  * ADD JOB
32
32
  * KEYS: [prefix]
33
33
  * ARGV: [jobType, payloadJson, maxAttempts, priority, runAtMs, timeoutMs,
34
- * forceKillOnTimeout, tagsJson, idempotencyKey, nowMs]
34
+ * forceKillOnTimeout, tagsJson, idempotencyKey, nowMs,
35
+ * retryDelay, retryBackoff, retryDelayMax, groupId, groupTier]
35
36
  * Returns: job ID (number)
36
37
  */
37
38
  export const ADD_JOB_SCRIPT = `
@@ -46,6 +47,11 @@ local forceKillOnTimeout = ARGV[7]
46
47
  local tagsJson = ARGV[8] -- "null" or JSON array string
47
48
  local idempotencyKey = ARGV[9] -- "null" string if not set
48
49
  local nowMs = tonumber(ARGV[10])
50
+ local retryDelay = ARGV[11] -- "null" or seconds string
51
+ local retryBackoff = ARGV[12] -- "null" or "true"/"false"
52
+ local retryDelayMax = ARGV[13] -- "null" or seconds string
53
+ local groupId = ARGV[14] -- "null" or group ID
54
+ local groupTier = ARGV[15] -- "null" or group tier
49
55
 
50
56
  -- Idempotency check
51
57
  if idempotencyKey ~= "null" then
@@ -89,7 +95,12 @@ redis.call('HMSET', jobKey,
89
95
  'idempotencyKey', idempotencyKey,
90
96
  'waitUntil', 'null',
91
97
  'waitTokenId', 'null',
92
- 'stepData', 'null'
98
+ 'stepData', 'null',
99
+ 'retryDelay', retryDelay,
100
+ 'retryBackoff', retryBackoff,
101
+ 'retryDelayMax', retryDelayMax,
102
+ 'groupId', groupId,
103
+ 'groupTier', groupTier
93
104
  )
94
105
 
95
106
  -- Status index
@@ -131,11 +142,138 @@ end
131
142
  return id
132
143
  `;
133
144
 
145
+ /**
146
+ * ADD JOBS (batch)
147
+ * KEYS: [prefix]
148
+ * ARGV: [jobsJson, nowMs]
149
+ * jobsJson is a JSON array of objects, each with:
150
+ * jobType, payload (already JSON string), maxAttempts, priority,
151
+ * runAtMs, timeoutMs, forceKillOnTimeout, tags (JSON or "null"),
152
+ * idempotencyKey
153
+ * Returns: array of job IDs (one per input job, in order)
154
+ */
155
+ export const ADD_JOBS_SCRIPT = `
156
+ local prefix = KEYS[1]
157
+ local jobsJson = ARGV[1]
158
+ local nowMs = tonumber(ARGV[2])
159
+
160
+ local jobs = cjson.decode(jobsJson)
161
+ local results = {}
162
+
163
+ for i, job in ipairs(jobs) do
164
+ local jobType = job.jobType
165
+ local payloadJson = job.payload
166
+ local maxAttempts = tonumber(job.maxAttempts)
167
+ local priority = tonumber(job.priority)
168
+ local runAtMs = tostring(job.runAtMs)
169
+ local timeoutMs = tostring(job.timeoutMs)
170
+ local forceKillOnTimeout = tostring(job.forceKillOnTimeout)
171
+ local tagsJson = tostring(job.tags)
172
+ local idempotencyKey = tostring(job.idempotencyKey)
173
+ local retryDelay = tostring(job.retryDelay)
174
+ local retryBackoff = tostring(job.retryBackoff)
175
+ local retryDelayMax = tostring(job.retryDelayMax)
176
+ local groupId = tostring(job.groupId)
177
+ local groupTier = tostring(job.groupTier)
178
+
179
+ -- Idempotency check
180
+ local skip = false
181
+ if idempotencyKey ~= "null" then
182
+ local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
183
+ if existing then
184
+ results[i] = tonumber(existing)
185
+ skip = true
186
+ end
187
+ end
188
+
189
+ if not skip then
190
+ -- Generate ID
191
+ local id = redis.call('INCR', prefix .. 'id_seq')
192
+ local jobKey = prefix .. 'job:' .. id
193
+ local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
194
+
195
+ -- Store the job hash
196
+ redis.call('HMSET', jobKey,
197
+ 'id', id,
198
+ 'jobType', jobType,
199
+ 'payload', payloadJson,
200
+ 'status', 'pending',
201
+ 'maxAttempts', maxAttempts,
202
+ 'attempts', 0,
203
+ 'priority', priority,
204
+ 'runAt', runAt,
205
+ 'timeoutMs', timeoutMs,
206
+ 'forceKillOnTimeout', forceKillOnTimeout,
207
+ 'createdAt', nowMs,
208
+ 'updatedAt', nowMs,
209
+ 'lockedAt', 'null',
210
+ 'lockedBy', 'null',
211
+ 'nextAttemptAt', 'null',
212
+ 'pendingReason', 'null',
213
+ 'errorHistory', '[]',
214
+ 'failureReason', 'null',
215
+ 'completedAt', 'null',
216
+ 'startedAt', 'null',
217
+ 'lastRetriedAt', 'null',
218
+ 'lastFailedAt', 'null',
219
+ 'lastCancelledAt', 'null',
220
+ 'tags', tagsJson,
221
+ 'idempotencyKey', idempotencyKey,
222
+ 'waitUntil', 'null',
223
+ 'waitTokenId', 'null',
224
+ 'stepData', 'null',
225
+ 'retryDelay', retryDelay,
226
+ 'retryBackoff', retryBackoff,
227
+ 'retryDelayMax', retryDelayMax,
228
+ 'groupId', groupId,
229
+ 'groupTier', groupTier
230
+ )
231
+
232
+ -- Status index
233
+ redis.call('SADD', prefix .. 'status:pending', id)
234
+
235
+ -- Type index
236
+ redis.call('SADD', prefix .. 'type:' .. jobType, id)
237
+
238
+ -- Tag indexes
239
+ if tagsJson ~= "null" then
240
+ local tags = cjson.decode(tagsJson)
241
+ for _, tag in ipairs(tags) do
242
+ redis.call('SADD', prefix .. 'tag:' .. tag, id)
243
+ end
244
+ for _, tag in ipairs(tags) do
245
+ redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
246
+ end
247
+ end
248
+
249
+ -- Idempotency mapping
250
+ if idempotencyKey ~= "null" then
251
+ redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
252
+ end
253
+
254
+ -- All-jobs sorted set
255
+ redis.call('ZADD', prefix .. 'all', nowMs, id)
256
+
257
+ -- Queue or delayed
258
+ if runAt <= nowMs then
259
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
260
+ redis.call('ZADD', prefix .. 'queue', score, id)
261
+ else
262
+ redis.call('ZADD', prefix .. 'delayed', runAt, id)
263
+ end
264
+
265
+ results[i] = id
266
+ end
267
+ end
268
+
269
+ return results
270
+ `;
271
+
134
272
  /**
135
273
  * GET NEXT BATCH
136
274
  * Atomically: move ready delayed/retry jobs into queue, then pop N jobs.
137
275
  * KEYS: [prefix]
138
- * ARGV: [workerId, batchSize, nowMs, jobTypeFilter]
276
+ * ARGV: [workerId, batchSize, nowMs, jobTypeFilter, groupConcurrency]
139
277
  * jobTypeFilter: "null" or a JSON array like ["email","sms"] or a string like "email"
140
278
  * Returns: array of job field arrays (flat: [field1, val1, field2, val2, ...] per job)
141
279
  */
@@ -145,6 +283,12 @@ local workerId = ARGV[1]
145
283
  local batchSize = tonumber(ARGV[2])
146
284
  local nowMs = tonumber(ARGV[3])
147
285
  local jobTypeFilter = ARGV[4] -- "null" or JSON array or single string
286
+ local groupConcurrencyRaw = ARGV[5] -- "null" or positive integer
287
+ local groupConcurrency = nil
288
+ if groupConcurrencyRaw ~= "null" then
289
+ groupConcurrency = tonumber(groupConcurrencyRaw)
290
+ end
291
+ local groupActiveKey = prefix .. 'group:active'
148
292
 
149
293
  -- 1. Move ready delayed jobs into queue
150
294
  local delayed = redis.call('ZRANGEBYSCORE', prefix .. 'delayed', '-inf', nowMs, 'LIMIT', 0, 200)
@@ -245,36 +389,53 @@ for i = 1, #candidates, 2 do
245
389
  -- Not ready yet: move to delayed
246
390
  redis.call('ZADD', prefix .. 'delayed', runAt, jobId)
247
391
  else
248
- -- Claim this job
249
- local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
250
- local startedAt = redis.call('HGET', jk, 'startedAt')
251
- local lastRetriedAt = redis.call('HGET', jk, 'lastRetriedAt')
252
- if startedAt == 'null' then startedAt = nowMs end
253
- if attempts > 0 then lastRetriedAt = nowMs end
254
-
255
- redis.call('HMSET', jk,
256
- 'status', 'processing',
257
- 'lockedAt', nowMs,
258
- 'lockedBy', workerId,
259
- 'attempts', attempts + 1,
260
- 'updatedAt', nowMs,
261
- 'pendingReason', 'null',
262
- 'startedAt', startedAt,
263
- 'lastRetriedAt', lastRetriedAt
264
- )
265
-
266
- -- Update status sets
267
- redis.call('SREM', prefix .. 'status:pending', jobId)
268
- redis.call('SADD', prefix .. 'status:processing', jobId)
392
+ local groupId = redis.call('HGET', jk, 'groupId')
393
+ local hasGroup = groupId and groupId ~= 'null'
394
+ local canClaim = true
395
+ if hasGroup and groupConcurrency then
396
+ local activeCount = tonumber(redis.call('HGET', groupActiveKey, groupId) or '0')
397
+ if activeCount >= groupConcurrency then
398
+ table.insert(putBack, score)
399
+ table.insert(putBack, jobId)
400
+ canClaim = false
401
+ end
402
+ end
269
403
 
270
- -- Return job data as flat array
271
- local data = redis.call('HGETALL', jk)
272
- for _, v in ipairs(data) do
273
- table.insert(results, v)
404
+ if canClaim then
405
+ -- Claim this job
406
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
407
+ local startedAt = redis.call('HGET', jk, 'startedAt')
408
+ local lastRetriedAt = redis.call('HGET', jk, 'lastRetriedAt')
409
+ if startedAt == 'null' then startedAt = nowMs end
410
+ if attempts > 0 then lastRetriedAt = nowMs end
411
+
412
+ redis.call('HMSET', jk,
413
+ 'status', 'processing',
414
+ 'lockedAt', nowMs,
415
+ 'lockedBy', workerId,
416
+ 'attempts', attempts + 1,
417
+ 'updatedAt', nowMs,
418
+ 'pendingReason', 'null',
419
+ 'startedAt', startedAt,
420
+ 'lastRetriedAt', lastRetriedAt
421
+ )
422
+
423
+ -- Update status sets
424
+ redis.call('SREM', prefix .. 'status:pending', jobId)
425
+ redis.call('SADD', prefix .. 'status:processing', jobId)
426
+ if hasGroup and groupConcurrency then
427
+ redis.call('HINCRBY', groupActiveKey, groupId, 1)
428
+ end
429
+
430
+ -- Return job data as flat array
431
+ local data = redis.call('HGETALL', jk)
432
+ for _, v in ipairs(data) do
433
+ table.insert(results, v)
434
+ end
435
+ -- Separator
436
+ table.insert(results, '__JOB_SEP__')
437
+ jobsClaimed = jobsClaimed + 1
274
438
  end
275
- -- Separator
276
- table.insert(results, '__JOB_SEP__')
277
- jobsClaimed = jobsClaimed + 1
278
439
  end
279
440
  end
280
441
  end
@@ -291,24 +452,40 @@ return results
291
452
  /**
292
453
  * COMPLETE JOB
293
454
  * KEYS: [prefix]
294
- * ARGV: [jobId, nowMs]
455
+ * ARGV: [jobId, nowMs, outputJson]
295
456
  */
296
457
  export const COMPLETE_JOB_SCRIPT = `
297
458
  local prefix = KEYS[1]
298
459
  local jobId = ARGV[1]
299
460
  local nowMs = ARGV[2]
461
+ local outputJson = ARGV[3]
300
462
  local jk = prefix .. 'job:' .. jobId
463
+ local groupId = redis.call('HGET', jk, 'groupId')
301
464
 
302
- redis.call('HMSET', jk,
465
+ local fields = {
303
466
  'status', 'completed',
304
467
  'updatedAt', nowMs,
305
468
  'completedAt', nowMs,
306
469
  'stepData', 'null',
307
470
  'waitUntil', 'null',
308
471
  'waitTokenId', 'null'
309
- )
472
+ }
473
+
474
+ if outputJson ~= '__NONE__' then
475
+ fields[#fields + 1] = 'output'
476
+ fields[#fields + 1] = outputJson
477
+ end
478
+
479
+ redis.call('HMSET', jk, unpack(fields))
310
480
  redis.call('SREM', prefix .. 'status:processing', jobId)
311
481
  redis.call('SADD', prefix .. 'status:completed', jobId)
482
+ if groupId and groupId ~= 'null' then
483
+ local activeKey = prefix .. 'group:active'
484
+ local remaining = redis.call('HINCRBY', activeKey, groupId, -1)
485
+ if tonumber(remaining) <= 0 then
486
+ redis.call('HDEL', activeKey, groupId)
487
+ end
488
+ end
312
489
 
313
490
  return 1
314
491
  `;
@@ -326,15 +503,43 @@ local errorJson = ARGV[2]
326
503
  local failureReason = ARGV[3]
327
504
  local nowMs = tonumber(ARGV[4])
328
505
  local jk = prefix .. 'job:' .. jobId
506
+ local groupId = redis.call('HGET', jk, 'groupId')
329
507
 
330
508
  local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
331
509
  local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
332
510
 
333
- -- Compute next_attempt_at: 2^attempts minutes from now
511
+ -- Read per-job retry config (may be "null")
512
+ local rdRaw = redis.call('HGET', jk, 'retryDelay')
513
+ local rbRaw = redis.call('HGET', jk, 'retryBackoff')
514
+ local rmRaw = redis.call('HGET', jk, 'retryDelayMax')
515
+
334
516
  local nextAttemptAt = 'null'
335
517
  if attempts < maxAttempts then
336
- local delayMs = math.pow(2, attempts) * 60000
337
- nextAttemptAt = nowMs + delayMs
518
+ local allNull = (rdRaw == 'null' or rdRaw == false)
519
+ and (rbRaw == 'null' or rbRaw == false)
520
+ and (rmRaw == 'null' or rmRaw == false)
521
+ if allNull then
522
+ -- Legacy formula: 2^attempts minutes
523
+ local delayMs = math.pow(2, attempts) * 60000
524
+ nextAttemptAt = nowMs + delayMs
525
+ else
526
+ local retryDelaySec = 60
527
+ if rdRaw and rdRaw ~= 'null' then retryDelaySec = tonumber(rdRaw) end
528
+ local useBackoff = true
529
+ if rbRaw and rbRaw ~= 'null' then useBackoff = (rbRaw == 'true') end
530
+ local maxDelaySec = nil
531
+ if rmRaw and rmRaw ~= 'null' then maxDelaySec = tonumber(rmRaw) end
532
+
533
+ local delaySec
534
+ if useBackoff then
535
+ delaySec = retryDelaySec * math.pow(2, attempts)
536
+ if maxDelaySec then delaySec = math.min(delaySec, maxDelaySec) end
537
+ delaySec = delaySec * (0.5 + 0.5 * math.random())
538
+ else
539
+ delaySec = retryDelaySec
540
+ end
541
+ nextAttemptAt = nowMs + math.floor(delaySec * 1000)
542
+ end
338
543
  end
339
544
 
340
545
  -- Append to error_history
@@ -356,6 +561,13 @@ redis.call('HMSET', jk,
356
561
  )
357
562
  redis.call('SREM', prefix .. 'status:processing', jobId)
358
563
  redis.call('SADD', prefix .. 'status:failed', jobId)
564
+ if groupId and groupId ~= 'null' then
565
+ local activeKey = prefix .. 'group:active'
566
+ local remaining = redis.call('HINCRBY', activeKey, groupId, -1)
567
+ if tonumber(remaining) <= 0 then
568
+ redis.call('HDEL', activeKey, groupId)
569
+ end
570
+ end
359
571
 
360
572
  -- Schedule retry if applicable
361
573
  if nextAttemptAt ~= 'null' then
@@ -378,6 +590,7 @@ local jk = prefix .. 'job:' .. jobId
378
590
 
379
591
  local oldStatus = redis.call('HGET', jk, 'status')
380
592
  if oldStatus ~= 'failed' and oldStatus ~= 'processing' then return 0 end
593
+ local groupId = redis.call('HGET', jk, 'groupId')
381
594
 
382
595
  redis.call('HMSET', jk,
383
596
  'status', 'pending',
@@ -391,6 +604,13 @@ redis.call('HMSET', jk,
391
604
  -- Remove from old status, add to pending
392
605
  redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
393
606
  redis.call('SADD', prefix .. 'status:pending', jobId)
607
+ if oldStatus == 'processing' and groupId and groupId ~= 'null' then
608
+ local activeKey = prefix .. 'group:active'
609
+ local remaining = redis.call('HINCRBY', activeKey, groupId, -1)
610
+ if tonumber(remaining) <= 0 then
611
+ redis.call('HDEL', activeKey, groupId)
612
+ end
613
+ end
394
614
 
395
615
  -- Remove from retry sorted set if present
396
616
  redis.call('ZREM', prefix .. 'retry', jobId)
@@ -496,6 +716,14 @@ for _, jobId in ipairs(processing) do
496
716
  )
497
717
  redis.call('SREM', prefix .. 'status:processing', jobId)
498
718
  redis.call('SADD', prefix .. 'status:pending', jobId)
719
+ local groupId = redis.call('HGET', jk, 'groupId')
720
+ if groupId and groupId ~= 'null' then
721
+ local activeKey = prefix .. 'group:active'
722
+ local remaining = redis.call('HINCRBY', activeKey, groupId, -1)
723
+ if tonumber(remaining) <= 0 then
724
+ redis.call('HDEL', activeKey, groupId)
725
+ end
726
+ end
499
727
 
500
728
  -- Re-add to queue
501
729
  local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
@@ -584,6 +812,7 @@ local jk = prefix .. 'job:' .. jobId
584
812
 
585
813
  local status = redis.call('HGET', jk, 'status')
586
814
  if status ~= 'processing' then return 0 end
815
+ local groupId = redis.call('HGET', jk, 'groupId')
587
816
 
588
817
  redis.call('HMSET', jk,
589
818
  'status', 'waiting',
@@ -596,6 +825,13 @@ redis.call('HMSET', jk,
596
825
  )
597
826
  redis.call('SREM', prefix .. 'status:processing', jobId)
598
827
  redis.call('SADD', prefix .. 'status:waiting', jobId)
828
+ if groupId and groupId ~= 'null' then
829
+ local activeKey = prefix .. 'group:active'
830
+ local remaining = redis.call('HINCRBY', activeKey, groupId, -1)
831
+ if tonumber(remaining) <= 0 then
832
+ redis.call('HDEL', activeKey, groupId)
833
+ end
834
+ end
599
835
 
600
836
  -- Add to waiting sorted set if time-based wait
601
837
  if waitUntilMs ~= 'null' then