groupmq-plus 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/LICENSE +59 -0
  2. package/README.md +722 -0
  3. package/dist/index.cjs +2567 -0
  4. package/dist/index.cjs.map +1 -0
  5. package/dist/index.d.cts +1300 -0
  6. package/dist/index.d.ts +1300 -0
  7. package/dist/index.js +2557 -0
  8. package/dist/index.js.map +1 -0
  9. package/dist/lua/change-delay.lua +62 -0
  10. package/dist/lua/check-stalled.lua +86 -0
  11. package/dist/lua/clean-status.lua +64 -0
  12. package/dist/lua/cleanup-poisoned-group.lua +46 -0
  13. package/dist/lua/cleanup.lua +46 -0
  14. package/dist/lua/complete-and-reserve-next-with-metadata.lua +221 -0
  15. package/dist/lua/complete-with-metadata.lua +190 -0
  16. package/dist/lua/complete.lua +51 -0
  17. package/dist/lua/dead-letter.lua +86 -0
  18. package/dist/lua/enqueue-batch.lua +149 -0
  19. package/dist/lua/enqueue-flow.lua +107 -0
  20. package/dist/lua/enqueue.lua +154 -0
  21. package/dist/lua/get-active-count.lua +6 -0
  22. package/dist/lua/get-active-jobs.lua +6 -0
  23. package/dist/lua/get-delayed-count.lua +5 -0
  24. package/dist/lua/get-delayed-jobs.lua +5 -0
  25. package/dist/lua/get-unique-groups-count.lua +13 -0
  26. package/dist/lua/get-unique-groups.lua +15 -0
  27. package/dist/lua/get-waiting-count.lua +11 -0
  28. package/dist/lua/get-waiting-jobs.lua +15 -0
  29. package/dist/lua/heartbeat.lua +22 -0
  30. package/dist/lua/is-empty.lua +35 -0
  31. package/dist/lua/promote-delayed-jobs.lua +40 -0
  32. package/dist/lua/promote-delayed-one.lua +44 -0
  33. package/dist/lua/promote-staged.lua +70 -0
  34. package/dist/lua/record-job-result.lua +143 -0
  35. package/dist/lua/remove.lua +55 -0
  36. package/dist/lua/reserve-atomic.lua +114 -0
  37. package/dist/lua/reserve-batch.lua +141 -0
  38. package/dist/lua/reserve.lua +161 -0
  39. package/dist/lua/retry.lua +53 -0
  40. package/package.json +92 -0
package/dist/index.cjs ADDED
@@ -0,0 +1,2567 @@
1
+ //#region rolldown:runtime
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __commonJS = (cb, mod) => function() {
9
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
10
+ };
11
+ var __copyProps = (to, from, except, desc) => {
12
+ if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
13
+ key = keys[i];
14
+ if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
15
+ get: ((k) => from[k]).bind(null, key),
16
+ enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
17
+ });
18
+ }
19
+ return to;
20
+ };
21
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
22
+ value: mod,
23
+ enumerable: true
24
+ }) : target, mod));
25
+
26
+ //#endregion
27
+ let node_crypto = require("node:crypto");
28
+ node_crypto = __toESM(node_crypto);
29
+ let cron_parser = require("cron-parser");
30
+ cron_parser = __toESM(cron_parser);
31
+ let node_fs = require("node:fs");
32
+ node_fs = __toESM(node_fs);
33
+ let node_path = require("node:path");
34
+ node_path = __toESM(node_path);
35
+ let node_url = require("node:url");
36
+ node_url = __toESM(node_url);
37
+
38
+ //#region node_modules/.pnpm/@bull-board+api@6.13.0_@bull-board+ui@6.13.0/node_modules/@bull-board/api/dist/queueAdapters/base.js
39
+ var require_base = /* @__PURE__ */ __commonJS({ "node_modules/.pnpm/@bull-board+api@6.13.0_@bull-board+ui@6.13.0/node_modules/@bull-board/api/dist/queueAdapters/base.js": ((exports) => {
40
+ Object.defineProperty(exports, "__esModule", { value: true });
41
+ var BaseAdapter$1 = class {
42
+ constructor(type, options = {}) {
43
+ this.formatters = /* @__PURE__ */ new Map();
44
+ this._visibilityGuard = () => true;
45
+ this.readOnlyMode = options.readOnlyMode === true;
46
+ this.allowRetries = this.readOnlyMode ? false : options.allowRetries !== false;
47
+ this.allowCompletedRetries = this.allowRetries && options.allowCompletedRetries !== false;
48
+ this.prefix = options.prefix || "";
49
+ this.delimiter = options.delimiter || "";
50
+ this.description = options.description || "";
51
+ this.displayName = options.displayName || "";
52
+ this.type = type;
53
+ this.externalJobUrl = options.externalJobUrl;
54
+ }
55
+ getDescription() {
56
+ return this.description;
57
+ }
58
+ getDisplayName() {
59
+ return this.displayName;
60
+ }
61
+ setFormatter(field, formatter) {
62
+ this.formatters.set(field, formatter);
63
+ }
64
+ format(field, data, defaultValue = data) {
65
+ const fieldFormatter = this.formatters.get(field);
66
+ return typeof fieldFormatter === "function" ? fieldFormatter(data) : defaultValue;
67
+ }
68
+ setVisibilityGuard(guard) {
69
+ this._visibilityGuard = guard;
70
+ }
71
+ isVisible(request) {
72
+ return this._visibilityGuard(request);
73
+ }
74
+ };
75
+ exports.BaseAdapter = BaseAdapter$1;
76
+ }) });
77
+
78
+ //#endregion
79
+ //#region src/adapters/groupmq-bullboard-adapter.ts
80
+ var import_base = /* @__PURE__ */ __toESM(require_base(), 1);
81
+ var BullBoardGroupMQAdapter = class extends import_base.BaseAdapter {
82
+ constructor(queue, options = {}) {
83
+ const libName = queue.namespace;
84
+ super(libName, options);
85
+ this.queue = queue;
86
+ this.options = options;
87
+ }
88
+ getDescription() {
89
+ return this.options.description || "";
90
+ }
91
+ getDisplayName() {
92
+ return this.options.displayName || "";
93
+ }
94
+ getName() {
95
+ const prefix = this.options.prefix || "";
96
+ const delimiter = this.options.delimiter || "";
97
+ return `${prefix}${delimiter}${this.queue.rawNamespace}`.replace(/(^[\s:]+)|([\s:]+$)/g, "");
98
+ }
99
+ async getRedisInfo() {
100
+ return this.queue.redis.info();
101
+ }
102
+ async getJob(id) {
103
+ return await this.queue.getJob(id);
104
+ }
105
+ async getJobs(jobStatuses, start, end) {
106
+ return await this.queue.getJobsByStatus(jobStatuses, start, end);
107
+ }
108
+ async getJobCounts() {
109
+ const base = await this.queue.getJobCounts();
110
+ return {
111
+ latest: 0,
112
+ active: base.active,
113
+ waiting: base.waiting,
114
+ "waiting-children": base["waiting-children"],
115
+ prioritized: base.prioritized,
116
+ completed: base.completed,
117
+ failed: base.failed,
118
+ delayed: base.delayed,
119
+ paused: base.paused
120
+ };
121
+ }
122
+ async getJobLogs(_id) {
123
+ return [];
124
+ }
125
+ getStatuses() {
126
+ return [
127
+ "latest",
128
+ "active",
129
+ "waiting",
130
+ "waiting-children",
131
+ "prioritized",
132
+ "completed",
133
+ "failed",
134
+ "delayed",
135
+ "paused"
136
+ ];
137
+ }
138
+ getJobStatuses() {
139
+ return [
140
+ "active",
141
+ "waiting",
142
+ "waiting-children",
143
+ "prioritized",
144
+ "completed",
145
+ "failed",
146
+ "delayed",
147
+ "paused"
148
+ ];
149
+ }
150
+ assertWritable() {
151
+ if (this.options.readOnlyMode) throw new Error("This adapter is in read-only mode. Mutations are disabled.");
152
+ }
153
+ async clean(jobStatus, graceTimeMs) {
154
+ this.assertWritable();
155
+ if (jobStatus !== "completed" && jobStatus !== "failed" && jobStatus !== "delayed") return;
156
+ await this.queue.clean(graceTimeMs, Number.MAX_SAFE_INTEGER, jobStatus);
157
+ }
158
+ async addJob(_name, data, options) {
159
+ this.assertWritable();
160
+ return await this.queue.add({
161
+ groupId: options.groupId ?? Math.random().toString(36).substring(2, 15),
162
+ data,
163
+ ...options
164
+ });
165
+ }
166
+ async isPaused() {
167
+ return this.queue.isPaused();
168
+ }
169
+ async pause() {
170
+ this.assertWritable();
171
+ await this.queue.pause();
172
+ }
173
+ async resume() {
174
+ this.assertWritable();
175
+ await this.queue.resume();
176
+ }
177
+ async empty() {
178
+ this.assertWritable();
179
+ throw new Error("Not implemented");
180
+ }
181
+ async promoteAll() {
182
+ this.assertWritable();
183
+ throw new Error("Not implemented");
184
+ }
185
+ };
186
+
187
+ //#endregion
188
+ //#region src/helpers.ts
189
+ /**
190
+ * Wait for a queue to become empty
191
+ * @param queue The queue to monitor
192
+ * @param timeoutMs Maximum time to wait (default: 60 seconds)
193
+ * @returns Promise that resolves when queue is empty or timeout is reached
194
+ */
195
+ async function waitForQueueToEmpty(queue, timeoutMs = 6e4) {
196
+ return queue.waitForEmpty(timeoutMs);
197
+ }
198
+ /**
199
+ * Get status of all workers
200
+ */
201
+ function getWorkersStatus(workers) {
202
+ const workersStatus = workers.map((worker, index) => {
203
+ const currentJob = worker.getCurrentJob();
204
+ return {
205
+ index,
206
+ isProcessing: worker.isProcessing(),
207
+ currentJob: currentJob ? {
208
+ jobId: currentJob.job.id,
209
+ groupId: currentJob.job.groupId,
210
+ processingTimeMs: currentJob.processingTimeMs
211
+ } : void 0
212
+ };
213
+ });
214
+ const processing = workersStatus.filter((w) => w.isProcessing).length;
215
+ const idle = workersStatus.length - processing;
216
+ return {
217
+ total: workers.length,
218
+ processing,
219
+ idle,
220
+ workers: workersStatus
221
+ };
222
+ }
223
+
224
+ //#endregion
225
+ //#region src/job.ts
226
+ var Job = class Job {
227
+ constructor(args) {
228
+ this.queue = args.queue;
229
+ this.id = args.id;
230
+ this.name = args.name ?? "groupmq";
231
+ this.data = args.data;
232
+ this.groupId = args.groupId;
233
+ this.attemptsMade = args.attemptsMade;
234
+ this.opts = args.opts;
235
+ this.processedOn = args.processedOn;
236
+ this.finishedOn = args.finishedOn;
237
+ this.failedReason = args.failedReason;
238
+ this.stacktrace = args.stacktrace;
239
+ this.returnvalue = args.returnvalue;
240
+ this.timestamp = args.timestamp;
241
+ this.orderMs = args.orderMs;
242
+ this.status = args.status ?? "unknown";
243
+ }
244
+ async getState() {
245
+ return this.status ?? "unknown";
246
+ }
247
+ toJSON() {
248
+ return {
249
+ id: this.id,
250
+ name: this.name,
251
+ data: this.data,
252
+ groupId: this.groupId,
253
+ attemptsMade: this.attemptsMade,
254
+ opts: this.opts,
255
+ processedOn: this.processedOn,
256
+ finishedOn: this.finishedOn,
257
+ failedReason: this.failedReason,
258
+ stacktrace: this.stacktrace ? [this.stacktrace] : null,
259
+ returnvalue: this.returnvalue,
260
+ timestamp: this.timestamp,
261
+ orderMs: this.orderMs,
262
+ status: this.status,
263
+ progress: 0
264
+ };
265
+ }
266
+ changeDelay(newDelay) {
267
+ return this.queue.changeDelay(this.id, newDelay);
268
+ }
269
+ async promote() {
270
+ await this.queue.promote(this.id);
271
+ }
272
+ async remove() {
273
+ await this.queue.remove(this.id);
274
+ }
275
+ async retry(_state) {
276
+ await this.queue.retry(this.id);
277
+ }
278
+ async updateData(jobData) {
279
+ await this.queue.updateData(this.id, jobData);
280
+ }
281
+ async update(jobData) {
282
+ await this.updateData(jobData);
283
+ }
284
+ static fromReserved(queue, reserved, meta) {
285
+ return new Job({
286
+ queue,
287
+ id: reserved.id,
288
+ name: "groupmq",
289
+ data: reserved.data,
290
+ groupId: reserved.groupId,
291
+ attemptsMade: reserved.attempts,
292
+ opts: {
293
+ attempts: reserved.maxAttempts,
294
+ delay: meta?.delayMs
295
+ },
296
+ processedOn: meta?.processedOn,
297
+ finishedOn: meta?.finishedOn,
298
+ failedReason: meta?.failedReason,
299
+ stacktrace: meta?.stacktrace,
300
+ returnvalue: meta?.returnvalue,
301
+ timestamp: reserved.timestamp ? reserved.timestamp : Date.now(),
302
+ orderMs: reserved.orderMs,
303
+ status: coerceStatus(meta?.status)
304
+ });
305
+ }
306
+ /**
307
+ * Create a Job from raw Redis hash data with optional known status
308
+ * This avoids extra Redis lookups when status is already known
309
+ */
310
+ static fromRawHash(queue, id, raw, knownStatus) {
311
+ const groupId = raw.groupId ?? "";
312
+ const payload = raw.data ? safeJsonParse$1(raw.data) : null;
313
+ const attempts = raw.attempts ? parseInt(raw.attempts, 10) : 0;
314
+ const maxAttempts = raw.maxAttempts ? parseInt(raw.maxAttempts, 10) : queue.maxAttemptsDefault;
315
+ const timestampMs = raw.timestamp ? parseInt(raw.timestamp, 10) : 0;
316
+ const orderMs = raw.orderMs ? parseInt(raw.orderMs, 10) : void 0;
317
+ const delayUntil = raw.delayUntil ? parseInt(raw.delayUntil, 10) : 0;
318
+ const processedOn = raw.processedOn ? parseInt(raw.processedOn, 10) : void 0;
319
+ const finishedOn = raw.finishedOn ? parseInt(raw.finishedOn, 10) : void 0;
320
+ const failedReason = (raw.failedReason ?? raw.lastErrorMessage) || void 0;
321
+ const stacktrace = (raw.stacktrace ?? raw.lastErrorStack) || void 0;
322
+ const returnvalue = raw.returnvalue ? safeJsonParse$1(raw.returnvalue) : void 0;
323
+ return new Job({
324
+ queue,
325
+ id,
326
+ name: "groupmq",
327
+ data: payload,
328
+ groupId,
329
+ attemptsMade: attempts,
330
+ opts: {
331
+ attempts: maxAttempts,
332
+ delay: delayUntil && delayUntil > Date.now() ? delayUntil - Date.now() : void 0
333
+ },
334
+ processedOn,
335
+ finishedOn,
336
+ failedReason,
337
+ stacktrace,
338
+ returnvalue,
339
+ timestamp: timestampMs || Date.now(),
340
+ orderMs,
341
+ status: knownStatus ?? coerceStatus(raw.status)
342
+ });
343
+ }
344
+ static async fromStore(queue, id) {
345
+ const jobKey = `${queue.namespace}:job:${id}`;
346
+ const raw = await queue.redis.hgetall(jobKey);
347
+ if (!raw || Object.keys(raw).length === 0) throw new Error(`Job ${id} not found`);
348
+ const groupId = raw.groupId ?? "";
349
+ const payload = raw.data ? safeJsonParse$1(raw.data) : null;
350
+ const attempts = raw.attempts ? parseInt(raw.attempts, 10) : 0;
351
+ const maxAttempts = raw.maxAttempts ? parseInt(raw.maxAttempts, 10) : queue.maxAttemptsDefault;
352
+ const timestampMs = raw.timestamp ? parseInt(raw.timestamp, 10) : 0;
353
+ const orderMs = raw.orderMs ? parseInt(raw.orderMs, 10) : void 0;
354
+ const delayUntil = raw.delayUntil ? parseInt(raw.delayUntil, 10) : 0;
355
+ const processedOn = raw.processedOn ? parseInt(raw.processedOn, 10) : void 0;
356
+ const finishedOn = raw.finishedOn ? parseInt(raw.finishedOn, 10) : void 0;
357
+ const failedReason = (raw.failedReason ?? raw.lastErrorMessage) || void 0;
358
+ const stacktrace = (raw.stacktrace ?? raw.lastErrorStack) || void 0;
359
+ const returnvalue = raw.returnvalue ? safeJsonParse$1(raw.returnvalue) : void 0;
360
+ const [inProcessing, inDelayed] = await Promise.all([queue.redis.zscore(`${queue.namespace}:processing`, id), queue.redis.zscore(`${queue.namespace}:delayed`, id)]);
361
+ let status = raw.status;
362
+ if (inProcessing !== null) status = "active";
363
+ else if (inDelayed !== null) status = "delayed";
364
+ else if (groupId) {
365
+ if (await queue.redis.zscore(`${queue.namespace}:g:${groupId}`, id) !== null) status = "waiting";
366
+ }
367
+ return new Job({
368
+ queue,
369
+ id,
370
+ name: "groupmq",
371
+ data: payload,
372
+ groupId,
373
+ attemptsMade: attempts,
374
+ opts: {
375
+ attempts: maxAttempts,
376
+ delay: delayUntil && delayUntil > Date.now() ? delayUntil - Date.now() : void 0
377
+ },
378
+ processedOn,
379
+ finishedOn,
380
+ failedReason,
381
+ stacktrace,
382
+ returnvalue,
383
+ timestamp: timestampMs || Date.now(),
384
+ orderMs,
385
+ status: coerceStatus(status)
386
+ });
387
+ }
388
+ };
389
+ function safeJsonParse$1(input) {
390
+ try {
391
+ return JSON.parse(input);
392
+ } catch (_e) {
393
+ return null;
394
+ }
395
+ }
396
+ function coerceStatus(input) {
397
+ const valid = [
398
+ "latest",
399
+ "active",
400
+ "waiting",
401
+ "waiting-children",
402
+ "prioritized",
403
+ "completed",
404
+ "failed",
405
+ "delayed",
406
+ "paused"
407
+ ];
408
+ if (!input) return "unknown";
409
+ if (valid.includes(input)) return input;
410
+ return "unknown";
411
+ }
412
+
413
+ //#endregion
414
+ //#region src/logger.ts
415
+ var Logger = class {
416
+ constructor(enabled, name) {
417
+ this.enabled = enabled;
418
+ this.name = name;
419
+ }
420
+ debug(...args) {
421
+ if (this.enabled) console.debug(`[${this.name}]`, ...args);
422
+ }
423
+ info(...args) {
424
+ if (this.enabled) console.log(`[${this.name}]`, ...args);
425
+ }
426
+ warn(...args) {
427
+ if (this.enabled) console.warn(`⚠️ [${this.name}]`, ...args);
428
+ }
429
+ error(...args) {
430
+ if (this.enabled) console.error(`💥 [${this.name}]`, ...args);
431
+ }
432
+ };
433
+
434
+ //#endregion
435
+ //#region src/lua/loader.ts
436
+ const cacheByClient = /* @__PURE__ */ new WeakMap();
437
+ function scriptPath(name) {
438
+ const currentDir = node_path.default.dirname((0, node_url.fileURLToPath)(require("url").pathToFileURL(__filename).href));
439
+ const candidates = [node_path.default.join(currentDir, `${name}.lua`), node_path.default.join(currentDir, "lua", `${name}.lua`)];
440
+ for (const candidate of candidates) if (node_fs.default.existsSync(candidate)) return candidate;
441
+ return candidates[0];
442
+ }
443
+ async function loadScript(client, name) {
444
+ let map = cacheByClient.get(client);
445
+ if (!map) {
446
+ map = /* @__PURE__ */ new Map();
447
+ cacheByClient.set(client, map);
448
+ }
449
+ const cached = map.get(name);
450
+ if (cached) return cached;
451
+ const file = scriptPath(name);
452
+ const lua = node_fs.default.readFileSync(file, "utf8");
453
+ const sha = await client.script("load", lua);
454
+ map.set(name, sha);
455
+ return sha;
456
+ }
457
+ async function evalScript(client, name, argv, numKeys) {
458
+ const sha = await loadScript(client, name);
459
+ return client.evalsha(sha, numKeys, ...argv);
460
+ }
461
+
462
+ //#endregion
463
+ //#region src/queue.ts
464
+ function nsKey(ns, ...parts) {
465
+ return [ns, ...parts].join(":");
466
+ }
467
+ function safeJsonParse(input) {
468
+ try {
469
+ return JSON.parse(input);
470
+ } catch (_e) {
471
+ return null;
472
+ }
473
+ }
474
+ var Queue = class {
475
+ constructor(opts) {
476
+ this._consecutiveEmptyReserves = 0;
477
+ this.promoterRunning = false;
478
+ this.batchBuffer = [];
479
+ this.flushing = false;
480
+ this._groupCleanupTracking = /* @__PURE__ */ new Map();
481
+ this.r = opts.redis;
482
+ this.rawNs = opts.namespace;
483
+ this.name = opts.namespace;
484
+ this.ns = `groupmq:${this.rawNs}`;
485
+ const rawVt = opts.jobTimeoutMs ?? 3e4;
486
+ this.vt = Math.max(1, rawVt);
487
+ this.defaultMaxAttempts = opts.maxAttempts ?? 3;
488
+ this.scanLimit = opts.reserveScanLimit ?? 20;
489
+ this.keepCompleted = Math.max(0, opts.keepCompleted ?? 0);
490
+ this.keepFailed = Math.max(0, opts.keepFailed ?? 0);
491
+ this.schedulerLockTtlMs = opts.schedulerLockTtlMs ?? 1500;
492
+ this.orderingDelayMs = opts.orderingDelayMs ?? 0;
493
+ if (opts.autoBatch) this.batchConfig = typeof opts.autoBatch === "boolean" ? {
494
+ size: 10,
495
+ maxWaitMs: 10
496
+ } : {
497
+ size: opts.autoBatch.size ?? 10,
498
+ maxWaitMs: opts.autoBatch.maxWaitMs ?? 10
499
+ };
500
+ this.logger = typeof opts.logger === "object" ? opts.logger : new Logger(!!opts.logger, this.namespace);
501
+ this.r.on("error", (err) => {
502
+ this.logger.error("Redis error (main):", err);
503
+ });
504
+ }
505
+ get redis() {
506
+ return this.r;
507
+ }
508
+ get namespace() {
509
+ return this.ns;
510
+ }
511
+ get rawNamespace() {
512
+ return this.rawNs;
513
+ }
514
+ get jobTimeoutMs() {
515
+ return this.vt;
516
+ }
517
+ get maxAttemptsDefault() {
518
+ return this.defaultMaxAttempts;
519
+ }
520
+ async add(opts) {
521
+ const maxAttempts = opts.maxAttempts ?? this.defaultMaxAttempts;
522
+ const orderMs = opts.orderMs ?? Date.now();
523
+ const now = Date.now();
524
+ const jobId = opts.jobId ?? (0, node_crypto.randomUUID)();
525
+ if (opts.repeat) return this.addRepeatingJob({
526
+ ...opts,
527
+ orderMs,
528
+ maxAttempts
529
+ });
530
+ let delayMs;
531
+ if (opts.delay !== void 0 && opts.delay > 0) delayMs = opts.delay;
532
+ else if (opts.runAt !== void 0) {
533
+ const runAtTimestamp = opts.runAt instanceof Date ? opts.runAt.getTime() : opts.runAt;
534
+ delayMs = Math.max(0, runAtTimestamp - now);
535
+ }
536
+ const data = opts.data === void 0 ? null : opts.data;
537
+ if (this.batchConfig) return new Promise((resolve, reject) => {
538
+ this.batchBuffer.push({
539
+ groupId: opts.groupId,
540
+ data,
541
+ jobId,
542
+ maxAttempts,
543
+ delayMs,
544
+ orderMs,
545
+ resolve,
546
+ reject
547
+ });
548
+ if (this.batchBuffer.length >= this.batchConfig.size) this.flushBatch();
549
+ else if (!this.batchTimer) this.batchTimer = setTimeout(() => this.flushBatch(), this.batchConfig.maxWaitMs);
550
+ });
551
+ return this.addSingle({
552
+ ...opts,
553
+ data,
554
+ jobId,
555
+ maxAttempts,
556
+ orderMs,
557
+ delayMs
558
+ });
559
+ }
560
+ /**
561
+ * Adds a parent-child flow to the queue.
562
+ * The parent job will only be processed after all child jobs have completed successfully.
563
+ * This operation is atomic.
564
+ *
565
+ * @param flow The flow configuration containing parent and children jobs
566
+ * @returns The parent job entity
567
+ */
568
+ async addFlow(flow) {
569
+ const parentId = flow.parent.jobId ?? (0, node_crypto.randomUUID)();
570
+ const parentMaxAttempts = flow.parent.maxAttempts ?? this.defaultMaxAttempts;
571
+ const parentOrderMs = flow.parent.orderMs ?? Date.now();
572
+ const parentData = JSON.stringify(flow.parent.data === void 0 ? null : flow.parent.data);
573
+ const childrenIds = [];
574
+ const childrenArgs = [];
575
+ for (const child of flow.children) {
576
+ const childId = child.jobId ?? (0, node_crypto.randomUUID)();
577
+ const childMaxAttempts = child.maxAttempts ?? this.defaultMaxAttempts;
578
+ const childOrderMs = child.orderMs ?? Date.now();
579
+ const childDelay = child.delay ?? 0;
580
+ const childData = JSON.stringify(child.data === void 0 ? null : child.data);
581
+ childrenIds.push(childId);
582
+ childrenArgs.push(childId, child.groupId, childData, childMaxAttempts.toString(), childOrderMs.toString(), childDelay.toString());
583
+ }
584
+ const now = Date.now();
585
+ await evalScript(this.r, "enqueue-flow", [
586
+ this.ns,
587
+ parentId,
588
+ flow.parent.groupId,
589
+ parentData,
590
+ parentMaxAttempts.toString(),
591
+ parentOrderMs.toString(),
592
+ now.toString(),
593
+ ...childrenArgs
594
+ ], 1);
595
+ return new Job({
596
+ queue: this,
597
+ id: parentId,
598
+ groupId: flow.parent.groupId,
599
+ data: flow.parent.data,
600
+ status: "waiting-children",
601
+ attemptsMade: 0,
602
+ opts: { attempts: parentMaxAttempts },
603
+ timestamp: now,
604
+ orderMs: parentOrderMs
605
+ });
606
+ }
607
+ /**
608
+ * Gets the number of remaining child jobs for a parent job in a flow.
609
+ * @param parentId The ID of the parent job
610
+ * @returns The number of remaining children, or null if the job is not a parent
611
+ */
612
+ async getFlowDependencies(parentId) {
613
+ const remaining = await this.r.hget(`${this.ns}:job:${parentId}`, "flowRemaining");
614
+ return remaining !== null ? parseInt(remaining, 10) : null;
615
+ }
616
+ /**
617
+ * Gets the results of all child jobs in a flow.
618
+ * @param parentId The ID of the parent job
619
+ * @returns An object mapping child job IDs to their results
620
+ */
621
+ async getFlowResults(parentId) {
622
+ const results = await this.r.hgetall(`${this.ns}:flow:results:${parentId}`);
623
+ const parsed = {};
624
+ for (const [id, val] of Object.entries(results)) try {
625
+ parsed[id] = JSON.parse(val);
626
+ } catch (_e) {
627
+ parsed[id] = val;
628
+ }
629
+ return parsed;
630
+ }
631
+ async addSingle(opts) {
632
+ const now = Date.now();
633
+ let delayUntil = 0;
634
+ if (opts.delayMs !== void 0 && opts.delayMs > 0) delayUntil = now + opts.delayMs;
635
+ const serializedPayload = JSON.stringify(opts.data);
636
+ const result = await evalScript(this.r, "enqueue", [
637
+ this.ns,
638
+ opts.groupId,
639
+ serializedPayload,
640
+ String(opts.maxAttempts),
641
+ String(opts.orderMs),
642
+ String(delayUntil),
643
+ String(opts.jobId),
644
+ String(this.keepCompleted),
645
+ String(now),
646
+ String(this.orderingDelayMs)
647
+ ], 1);
648
+ if (Array.isArray(result)) {
649
+ const [returnedJobId, returnedGroupId, returnedData, attempts, returnedMaxAttempts, timestamp, returnedOrderMs, returnedDelayUntil, status] = result;
650
+ return Job.fromRawHash(this, returnedJobId, {
651
+ id: returnedJobId,
652
+ groupId: returnedGroupId,
653
+ data: returnedData,
654
+ attempts,
655
+ maxAttempts: returnedMaxAttempts,
656
+ timestamp,
657
+ orderMs: returnedOrderMs,
658
+ delayUntil: returnedDelayUntil,
659
+ status
660
+ }, status);
661
+ }
662
+ return this.getJob(result);
663
+ }
664
+ async flushBatch() {
665
+ if (this.batchTimer) {
666
+ clearTimeout(this.batchTimer);
667
+ this.batchTimer = void 0;
668
+ }
669
+ if (this.batchBuffer.length === 0 || this.flushing) return;
670
+ this.flushing = true;
671
+ const batch = this.batchBuffer.splice(0);
672
+ try {
673
+ this.logger.debug(`Flushing batch of ${batch.length} jobs`);
674
+ const now = Date.now();
675
+ const jobsData = batch.map((job) => ({
676
+ jobId: job.jobId,
677
+ groupId: job.groupId,
678
+ data: JSON.stringify(job.data),
679
+ maxAttempts: job.maxAttempts,
680
+ orderMs: job.orderMs,
681
+ delayMs: job.delayMs
682
+ }));
683
+ const jobDataArrays = await evalScript(this.r, "enqueue-batch", [
684
+ this.ns,
685
+ JSON.stringify(jobsData),
686
+ String(this.keepCompleted),
687
+ String(now),
688
+ String(this.orderingDelayMs)
689
+ ], 1);
690
+ for (let i = 0; i < batch.length; i++) {
691
+ const job = batch[i];
692
+ const jobDataArray = jobDataArrays[i];
693
+ try {
694
+ if (jobDataArray && jobDataArray.length >= 9) {
695
+ const [returnedJobId, returnedGroupId, returnedData, attempts, returnedMaxAttempts, timestamp, returnedOrderMs, returnedDelayUntil, status] = jobDataArray;
696
+ const jobEntity = Job.fromRawHash(this, returnedJobId, {
697
+ id: returnedJobId,
698
+ groupId: returnedGroupId,
699
+ data: returnedData,
700
+ attempts,
701
+ maxAttempts: returnedMaxAttempts,
702
+ timestamp,
703
+ orderMs: returnedOrderMs,
704
+ delayUntil: returnedDelayUntil,
705
+ status
706
+ }, status);
707
+ job.resolve(jobEntity);
708
+ } else throw new Error("Invalid job data returned from batch enqueue");
709
+ } catch (err) {
710
+ job.reject(err instanceof Error ? err : new Error(String(err)));
711
+ }
712
+ }
713
+ } catch (err) {
714
+ for (const job of batch) job.reject(err instanceof Error ? err : new Error(String(err)));
715
+ } finally {
716
+ this.flushing = false;
717
+ if (this.batchBuffer.length > 0) setImmediate(() => this.flushBatch());
718
+ }
719
+ }
720
+ async reserve() {
721
+ const now = Date.now();
722
+ const raw = await evalScript(this.r, "reserve", [
723
+ this.ns,
724
+ String(now),
725
+ String(this.vt),
726
+ String(this.scanLimit)
727
+ ], 1);
728
+ if (!raw) return null;
729
+ const parts = raw.split("|||");
730
+ if (parts.length !== 10) return null;
731
+ let data;
732
+ try {
733
+ data = JSON.parse(parts[2]);
734
+ } catch (err) {
735
+ this.logger.warn(`Failed to parse job data: ${err.message}, raw: ${parts[2]}`);
736
+ data = null;
737
+ }
738
+ const parsedOrderMs = Number.parseInt(parts[7], 10);
739
+ return {
740
+ id: parts[0],
741
+ groupId: parts[1],
742
+ data,
743
+ attempts: Number.parseInt(parts[3], 10),
744
+ maxAttempts: Number.parseInt(parts[4], 10),
745
+ seq: Number.parseInt(parts[5], 10),
746
+ timestamp: Number.parseInt(parts[6], 10),
747
+ orderMs: Number.isNaN(parsedOrderMs) ? Number.parseInt(parts[6], 10) : parsedOrderMs,
748
+ score: Number(parts[8]),
749
+ deadlineAt: Number.parseInt(parts[9], 10)
750
+ };
751
+ }
752
+ /**
753
+ * Check how many jobs are waiting in a specific group
754
+ */
755
+ async getGroupJobCount(groupId) {
756
+ const gZ = `${this.ns}:g:${groupId}`;
757
+ return await this.r.zcard(gZ);
758
+ }
759
+ /**
760
+ * Complete a job by removing from processing and unlocking the group.
761
+ * Note: Job metadata recording is handled separately by recordCompleted().
762
+ *
763
+ * @deprecated Use completeWithMetadata() for internal operations. This method
764
+ * is kept for backward compatibility and testing only.
765
+ */
766
+ async complete(job) {
767
+ await evalScript(this.r, "complete", [
768
+ this.ns,
769
+ job.id,
770
+ job.groupId
771
+ ], 1);
772
+ }
773
+ /**
774
+ * Complete a job AND record metadata in a single atomic operation.
775
+ * This is the efficient internal method used by workers.
776
+ */
777
+ async completeWithMetadata(job, result, meta) {
778
+ await evalScript(this.r, "complete-with-metadata", [
779
+ this.ns,
780
+ job.id,
781
+ job.groupId,
782
+ "completed",
783
+ String(meta.finishedOn),
784
+ JSON.stringify(result ?? null),
785
+ String(this.keepCompleted),
786
+ String(this.keepFailed),
787
+ String(meta.processedOn),
788
+ String(meta.finishedOn),
789
+ String(meta.attempts),
790
+ String(meta.maxAttempts)
791
+ ], 1);
792
+ }
793
+ /**
794
+ * Atomically complete a job and try to reserve the next job from the same group
795
+ * This prevents race conditions where other workers can steal subsequent jobs from the same group
796
+ */
797
+ /**
798
+ * Atomically complete a job with metadata and reserve the next job from the same group.
799
+ */
800
+ async completeAndReserveNextWithMetadata(completedJobId, groupId, handlerResult, meta) {
801
+ const now = Date.now();
802
+ try {
803
+ const result = await evalScript(this.r, "complete-and-reserve-next-with-metadata", [
804
+ this.ns,
805
+ completedJobId,
806
+ groupId,
807
+ "completed",
808
+ String(meta.finishedOn),
809
+ JSON.stringify(handlerResult ?? null),
810
+ String(this.keepCompleted),
811
+ String(this.keepFailed),
812
+ String(meta.processedOn),
813
+ String(meta.finishedOn),
814
+ String(meta.attempts),
815
+ String(meta.maxAttempts),
816
+ String(now),
817
+ String(this.jobTimeoutMs)
818
+ ], 1);
819
+ if (!result) return null;
820
+ const parts = result.split("|||");
821
+ if (parts.length !== 10) {
822
+ this.logger.error("Queue completeAndReserveNextWithMetadata: unexpected result format:", result);
823
+ return null;
824
+ }
825
+ const [id, , data, attempts, maxAttempts, seq, enq, orderMs, score, deadline] = parts;
826
+ return {
827
+ id,
828
+ groupId,
829
+ data: JSON.parse(data),
830
+ attempts: parseInt(attempts, 10),
831
+ maxAttempts: parseInt(maxAttempts, 10),
832
+ seq: parseInt(seq, 10),
833
+ timestamp: parseInt(enq, 10),
834
+ orderMs: parseInt(orderMs, 10),
835
+ score: parseFloat(score),
836
+ deadlineAt: parseInt(deadline, 10)
837
+ };
838
+ } catch (error) {
839
+ this.logger.error("Queue completeAndReserveNextWithMetadata error:", error);
840
+ return null;
841
+ }
842
+ }
843
+ /**
844
+ * Check if a job is currently in processing state
845
+ */
846
+ async isJobProcessing(jobId) {
847
+ return await this.r.zscore(`${this.ns}:processing`, jobId) !== null;
848
+ }
849
+ async retry(jobId, backoffMs = 0) {
850
+ return evalScript(this.r, "retry", [
851
+ this.ns,
852
+ jobId,
853
+ String(backoffMs)
854
+ ], 1);
855
+ }
856
+ /**
857
+ * Dead letter a job (remove from group and optionally store in dead letter queue)
858
+ */
859
+ async deadLetter(jobId, groupId) {
860
+ return evalScript(this.r, "dead-letter", [
861
+ this.ns,
862
+ jobId,
863
+ groupId
864
+ ], 1);
865
+ }
866
+ /**
867
+ * Record a successful completion for retention and inspection
868
+ * Uses consolidated Lua script for atomic operation with retention management
869
+ */
870
+ async recordCompleted(job, result, meta) {
871
+ const processedOn = meta.processedOn ?? Date.now();
872
+ const finishedOn = meta.finishedOn ?? Date.now();
873
+ const attempts = meta.attempts ?? 0;
874
+ const maxAttempts = meta.maxAttempts ?? this.defaultMaxAttempts;
875
+ try {
876
+ await evalScript(this.r, "record-job-result", [
877
+ this.ns,
878
+ job.id,
879
+ "completed",
880
+ String(finishedOn),
881
+ JSON.stringify(result ?? null),
882
+ String(this.keepCompleted),
883
+ String(this.keepFailed),
884
+ String(processedOn),
885
+ String(finishedOn),
886
+ String(attempts),
887
+ String(maxAttempts)
888
+ ], 1);
889
+ } catch (error) {
890
+ this.logger.error(`Error recording completion for job ${job.id}:`, error);
891
+ throw error;
892
+ }
893
+ }
894
+ /**
895
+ * Record a failure attempt (non-final), storing last error for visibility
896
+ */
897
+ async recordAttemptFailure(job, error, meta) {
898
+ const jobKey = `${this.ns}:job:${job.id}`;
899
+ const processedOn = meta.processedOn ?? Date.now();
900
+ const finishedOn = meta.finishedOn ?? Date.now();
901
+ const message = typeof error === "string" ? error : error.message ?? "Error";
902
+ const name = typeof error === "string" ? "Error" : error.name ?? "Error";
903
+ const stack = typeof error === "string" ? "" : error.stack ?? "";
904
+ await this.r.hset(jobKey, "lastErrorMessage", message, "lastErrorName", name, "lastErrorStack", stack, "processedOn", String(processedOn), "finishedOn", String(finishedOn));
905
+ }
906
+ /**
907
+ * Record a final failure (dead-lettered) for retention and inspection
908
+ * Uses consolidated Lua script for atomic operation
909
+ */
910
+ async recordFinalFailure(job, error, meta) {
911
+ const processedOn = meta.processedOn ?? Date.now();
912
+ const finishedOn = meta.finishedOn ?? Date.now();
913
+ const attempts = meta.attempts ?? 0;
914
+ const maxAttempts = meta.maxAttempts ?? this.defaultMaxAttempts;
915
+ const message = typeof error === "string" ? error : error.message ?? "Error";
916
+ const name = typeof error === "string" ? "Error" : error.name ?? "Error";
917
+ const stack = typeof error === "string" ? "" : error.stack ?? "";
918
+ const errorInfo = JSON.stringify({
919
+ message,
920
+ name,
921
+ stack
922
+ });
923
+ try {
924
+ await evalScript(this.r, "record-job-result", [
925
+ this.ns,
926
+ job.id,
927
+ "failed",
928
+ String(finishedOn),
929
+ errorInfo,
930
+ String(this.keepCompleted),
931
+ String(this.keepFailed),
932
+ String(processedOn),
933
+ String(finishedOn),
934
+ String(attempts),
935
+ String(maxAttempts)
936
+ ], 1);
937
+ } catch (err) {
938
+ this.logger.error(`Error recording final failure for job ${job.id}:`, err);
939
+ throw err;
940
+ }
941
+ }
942
+ async getCompleted(limit = this.keepCompleted) {
943
+ const completedKey = `${this.ns}:completed`;
944
+ const ids = await this.r.zrevrange(completedKey, 0, Math.max(0, limit - 1));
945
+ if (ids.length === 0) return [];
946
+ const pipe = this.r.multi();
947
+ for (const id of ids) pipe.hmget(`${this.ns}:job:${id}`, "groupId", "data", "returnvalue", "processedOn", "finishedOn", "attempts", "maxAttempts");
948
+ const rows = await pipe.exec() ?? [];
949
+ return ids.map((id, idx) => {
950
+ const [groupId, dataStr, retStr, processedOn, finishedOn, attempts, maxAttempts] = rows[idx]?.[1] || [];
951
+ return {
952
+ id,
953
+ groupId: groupId || "",
954
+ data: dataStr ? safeJsonParse(dataStr) : null,
955
+ returnvalue: retStr ? safeJsonParse(retStr) : null,
956
+ processedOn: processedOn ? parseInt(processedOn, 10) : void 0,
957
+ finishedOn: finishedOn ? parseInt(finishedOn, 10) : void 0,
958
+ attempts: attempts ? parseInt(attempts, 10) : 0,
959
+ maxAttempts: maxAttempts ? parseInt(maxAttempts, 10) : this.defaultMaxAttempts
960
+ };
961
+ });
962
+ }
963
+ async getFailed(limit = this.keepFailed) {
964
+ const failedKey = `${this.ns}:failed`;
965
+ const ids = await this.r.zrevrange(failedKey, 0, Math.max(0, limit - 1));
966
+ if (ids.length === 0) return [];
967
+ const pipe = this.r.multi();
968
+ for (const id of ids) pipe.hmget(`${this.ns}:job:${id}`, "groupId", "data", "failedReason", "stacktrace", "processedOn", "finishedOn", "attempts", "maxAttempts");
969
+ const rows = await pipe.exec() ?? [];
970
+ return ids.map((id, idx) => {
971
+ const [groupId, dataStr, failedReason, stacktrace, processedOn, finishedOn, attempts, maxAttempts] = rows[idx]?.[1] || [];
972
+ return {
973
+ id,
974
+ groupId: groupId || "",
975
+ data: dataStr ? safeJsonParse(dataStr) : null,
976
+ failedReason: failedReason || "",
977
+ stacktrace: stacktrace || void 0,
978
+ processedOn: processedOn ? parseInt(processedOn, 10) : void 0,
979
+ finishedOn: finishedOn ? parseInt(finishedOn, 10) : void 0,
980
+ attempts: attempts ? parseInt(attempts, 10) : 0,
981
+ maxAttempts: maxAttempts ? parseInt(maxAttempts, 10) : this.defaultMaxAttempts
982
+ };
983
+ });
984
+ }
985
+ /**
986
+ * Convenience: return completed jobs as Job entities (non-breaking, new API)
987
+ */
988
+ async getCompletedJobs(limit = this.keepCompleted) {
989
+ const completedKey = `${this.ns}:completed`;
990
+ const ids = await this.r.zrevrange(completedKey, 0, Math.max(0, limit - 1));
991
+ if (ids.length === 0) return [];
992
+ const pipe = this.r.multi();
993
+ for (const id of ids) pipe.hgetall(`${this.ns}:job:${id}`);
994
+ const rows = await pipe.exec();
995
+ const jobs = [];
996
+ for (let i = 0; i < ids.length; i++) {
997
+ const id = ids[i];
998
+ const raw = rows?.[i]?.[1] || {};
999
+ if (!raw || Object.keys(raw).length === 0) {
1000
+ this.logger.warn(`Skipping completed job ${id} - not found (likely cleaned up)`);
1001
+ continue;
1002
+ }
1003
+ const job = Job.fromRawHash(this, id, raw, "completed");
1004
+ jobs.push(job);
1005
+ }
1006
+ return jobs;
1007
+ }
1008
+ /**
1009
+ * Convenience: return failed jobs as Job entities (non-breaking, new API)
1010
+ */
1011
+ async getFailedJobs(limit = this.keepFailed) {
1012
+ const failedKey = `${this.ns}:failed`;
1013
+ const ids = await this.r.zrevrange(failedKey, 0, Math.max(0, limit - 1));
1014
+ if (ids.length === 0) return [];
1015
+ const pipe = this.r.multi();
1016
+ for (const id of ids) pipe.hgetall(`${this.ns}:job:${id}`);
1017
+ const rows = await pipe.exec();
1018
+ const jobs = [];
1019
+ for (let i = 0; i < ids.length; i++) {
1020
+ const id = ids[i];
1021
+ const raw = rows?.[i]?.[1] || {};
1022
+ if (!raw || Object.keys(raw).length === 0) {
1023
+ this.logger.warn(`Skipping failed job ${id} - not found (likely cleaned up)`);
1024
+ continue;
1025
+ }
1026
+ const job = Job.fromRawHash(this, id, raw, "failed");
1027
+ jobs.push(job);
1028
+ }
1029
+ return jobs;
1030
+ }
1031
+ async getCompletedCount() {
1032
+ return this.r.zcard(`${this.ns}:completed`);
1033
+ }
1034
+ async getFailedCount() {
1035
+ return this.r.zcard(`${this.ns}:failed`);
1036
+ }
1037
+ async heartbeat(job, extendMs = this.vt) {
1038
+ return evalScript(this.r, "heartbeat", [
1039
+ this.ns,
1040
+ job.id,
1041
+ job.groupId,
1042
+ String(extendMs)
1043
+ ], 1);
1044
+ }
1045
+ /**
1046
+ * Clean up expired jobs and stale data.
1047
+ * Uses distributed lock to ensure only one worker runs cleanup at a time,
1048
+ * similar to scheduler lock pattern.
1049
+ */
1050
+ async cleanup() {
1051
+ const cleanupLockKey = `${this.ns}:cleanup:lock`;
1052
+ const ttlMs = 6e4;
1053
+ try {
1054
+ if (await this.r.set(cleanupLockKey, "1", "PX", ttlMs, "NX") !== "OK") return 0;
1055
+ const now = Date.now();
1056
+ return evalScript(this.r, "cleanup", [this.ns, String(now)], 1);
1057
+ } catch (_e) {
1058
+ return 0;
1059
+ }
1060
+ }
1061
+ /**
1062
+ * Calculate adaptive blocking timeout like BullMQ
1063
+ * Returns timeout in seconds
1064
+ *
1065
+ * Inspiration by BullMQ ⭐️
1066
+ */
1067
+ getBlockTimeout(maxTimeout, blockUntil) {
1068
+ const minimumBlockTimeout = .001;
1069
+ const maximumBlockTimeout = 5;
1070
+ if (blockUntil) {
1071
+ const blockDelay = blockUntil - Date.now();
1072
+ if (blockDelay <= 0) return minimumBlockTimeout;
1073
+ else if (blockDelay < minimumBlockTimeout * 1e3) return minimumBlockTimeout;
1074
+ else return Math.min(blockDelay / 1e3, maximumBlockTimeout);
1075
+ }
1076
+ return Math.max(minimumBlockTimeout, Math.min(maxTimeout, maximumBlockTimeout));
1077
+ }
1078
+ /**
1079
+ * Check if an error is a Redis connection error (should retry)
1080
+ * Conservative approach: only connection closed and ECONNREFUSED
1081
+ */
1082
+ isConnectionError(err) {
1083
+ if (!err) return false;
1084
+ const message = `${err.message || ""}`;
1085
+ return message === "Connection is closed." || message.includes("ECONNREFUSED");
1086
+ }
1087
+ async reserveBlocking(timeoutSec = 5, blockUntil, blockingClient) {
1088
+ const startTime = Date.now();
1089
+ if (await this.isPaused()) {
1090
+ await sleep$1(50);
1091
+ return null;
1092
+ }
1093
+ if (!(this._consecutiveEmptyReserves >= 3)) {
1094
+ const immediateJob = await this.reserve();
1095
+ if (immediateJob) {
1096
+ this.logger.debug(`Immediate reserve successful (${Date.now() - startTime}ms)`);
1097
+ this._consecutiveEmptyReserves = 0;
1098
+ return immediateJob;
1099
+ }
1100
+ }
1101
+ const adaptiveTimeout = this.getBlockTimeout(timeoutSec, blockUntil);
1102
+ if (this._consecutiveEmptyReserves % 10 === 0) this.logger.debug(`Starting blocking operation (timeout: ${adaptiveTimeout}s, consecutive empty: ${this._consecutiveEmptyReserves})`);
1103
+ const readyKey = nsKey(this.ns, "ready");
1104
+ try {
1105
+ const bzpopminStart = Date.now();
1106
+ const result = await (blockingClient ?? this.r).bzpopmin(readyKey, adaptiveTimeout);
1107
+ const bzpopminDuration = Date.now() - bzpopminStart;
1108
+ if (!result || result.length < 3) {
1109
+ this.logger.debug(`Blocking timeout/empty (took ${bzpopminDuration}ms)`);
1110
+ this._consecutiveEmptyReserves = this._consecutiveEmptyReserves + 1;
1111
+ return null;
1112
+ }
1113
+ const [, groupId, score] = result;
1114
+ if (this._consecutiveEmptyReserves % 10 === 0) this.logger.debug(`Blocking result: group=${groupId}, score=${score} (took ${bzpopminDuration}ms)`);
1115
+ const reserveStart = Date.now();
1116
+ const job = await this.reserveAtomic(groupId);
1117
+ const reserveDuration = Date.now() - reserveStart;
1118
+ if (job) {
1119
+ this.logger.debug(`Successful job reserve after blocking: ${job.id} from group ${job.groupId} (reserve took ${reserveDuration}ms)`);
1120
+ this._consecutiveEmptyReserves = 0;
1121
+ } else {
1122
+ this.logger.warn(`Blocking found group but reserve failed: group=${groupId} (reserve took ${reserveDuration}ms)`);
1123
+ try {
1124
+ const groupKey = `${this.ns}:g:${groupId}`;
1125
+ const jobCount = await this.r.zcard(groupKey);
1126
+ if (jobCount > 0) {
1127
+ await this.r.zadd(readyKey, Number(score), groupId);
1128
+ this.logger.debug(`Restored group ${groupId} to ready with score ${score} after failed atomic reserve (${jobCount} jobs)`);
1129
+ } else this.logger.warn(`Not restoring empty group ${groupId} - preventing poisoned group loop`);
1130
+ } catch (_e) {
1131
+ this.logger.warn(`Failed to check group ${groupId} job count, not restoring`);
1132
+ }
1133
+ this._consecutiveEmptyReserves = this._consecutiveEmptyReserves + 1;
1134
+ return this.reserve();
1135
+ }
1136
+ return job;
1137
+ } catch (err) {
1138
+ const errorDuration = Date.now() - startTime;
1139
+ this.logger.error(`Blocking error after ${errorDuration}ms:`, err);
1140
+ if (this.isConnectionError(err)) {
1141
+ this.logger.error(`Connection error detected - rethrowing`);
1142
+ throw err;
1143
+ }
1144
+ this.logger.warn(`Falling back to regular reserve due to error`);
1145
+ return this.reserve();
1146
+ } finally {
1147
+ const totalDuration = Date.now() - startTime;
1148
+ if (totalDuration > 1e3) this.logger.debug(`ReserveBlocking completed in ${totalDuration}ms`);
1149
+ }
1150
+ }
1151
+ /**
1152
+ * Reserve a job from a specific group atomically (eliminates race conditions)
1153
+ * @param groupId - The group to reserve from
1154
+ */
1155
+ async reserveAtomic(groupId) {
1156
+ const now = Date.now();
1157
+ const result = await evalScript(this.r, "reserve-atomic", [
1158
+ this.ns,
1159
+ String(now),
1160
+ String(this.vt),
1161
+ String(groupId)
1162
+ ], 1);
1163
+ if (!result) return null;
1164
+ const parts = result.split("|||");
1165
+ if (parts.length < 10) return null;
1166
+ const [id, groupIdRaw, data, attempts, maxAttempts, seq, timestamp, orderMs, score, deadline] = parts;
1167
+ const parsedTimestamp = parseInt(timestamp, 10);
1168
+ const parsedOrderMs = parseInt(orderMs, 10);
1169
+ return {
1170
+ id,
1171
+ groupId: groupIdRaw,
1172
+ data: JSON.parse(data),
1173
+ attempts: parseInt(attempts, 10),
1174
+ maxAttempts: parseInt(maxAttempts, 10),
1175
+ seq: parseInt(seq, 10),
1176
+ timestamp: parsedTimestamp,
1177
+ orderMs: Number.isNaN(parsedOrderMs) ? parsedTimestamp : parsedOrderMs,
1178
+ score: parseFloat(score),
1179
+ deadlineAt: parseInt(deadline, 10)
1180
+ };
1181
+ }
1182
+ /**
1183
+ * 获取处于 Ready 状态的 Group 列表
1184
+ * @param start
1185
+ * @param end
1186
+ */
1187
+ async getReadyGroups(start = 0, end = -1) {
1188
+ return this.r.zrange(`${this.ns}:ready`, start, end);
1189
+ }
1190
+ /**
1191
+ * 设置组的元数据 (优先级/并发度)
1192
+ * 我们将使用 Hash 存储这些配置: groupmq:{ns}:config:{groupId}
1193
+ */
1194
+ async setGroupConfig(groupId, config) {
1195
+ const key = `${this.ns}:config:${groupId}`;
1196
+ const args = [];
1197
+ if (config.priority !== void 0) args.push("priority", String(config.priority));
1198
+ if (config.concurrency !== void 0) args.push("concurrency", String(config.concurrency));
1199
+ if (args.length > 0) await this.r.hset(key, ...args);
1200
+ }
1201
+ async getGroupConfig(groupId) {
1202
+ const key = `${this.ns}:config:${groupId}`;
1203
+ const [p, c] = await this.r.hmget(key, "priority", "concurrency");
1204
+ return {
1205
+ priority: p ? parseInt(p, 10) : 1,
1206
+ concurrency: c ? parseInt(c, 10) : 1
1207
+ };
1208
+ }
1209
+ /**
1210
+ * 设置指定组的并发上限
1211
+ * @param groupId 组 ID
1212
+ * @param limit 并发数 (必须 >= 1)
1213
+ */
1214
+ async setGroupConcurrency(groupId, limit) {
1215
+ const validLimit = Math.max(1, Math.floor(limit));
1216
+ await this.r.hset(`${this.ns}:config:${groupId}`, "concurrency", String(validLimit));
1217
+ }
1218
+ /**
1219
+ * 获取指定组的并发上限
1220
+ */
1221
+ async getGroupConcurrency(groupId) {
1222
+ const val = await this.r.hget(`${this.ns}:config:${groupId}`, "concurrency");
1223
+ return val ? parseInt(val, 10) : 1;
1224
+ }
1225
+ /**
1226
+ * 获取组内最老任务的入队时间戳
1227
+ * 用于 PriorityStrategy 的 aging 算法
1228
+ * @param groupId 组 ID
1229
+ * @returns 最老任务的时间戳,如果组为空则返回 undefined
1230
+ */
1231
+ async getGroupOldestTimestamp(groupId) {
1232
+ const gZ = `${this.ns}:g:${groupId}`;
1233
+ const result = await this.r.zrange(gZ, 0, 0);
1234
+ if (!result || result.length === 0) return;
1235
+ const jobId = result[0];
1236
+ const timestamp = await this.r.hget(`${this.ns}:job:${jobId}`, "timestamp");
1237
+ return timestamp ? parseInt(timestamp, 10) : void 0;
1238
+ }
1239
+ /**
1240
+ * Reserve up to maxBatch jobs (one per available group) atomically in Lua.
1241
+ */
1242
+ async reserveBatch(maxBatch = 16) {
1243
+ const now = Date.now();
1244
+ const results = await evalScript(this.r, "reserve-batch", [
1245
+ this.ns,
1246
+ String(now),
1247
+ String(this.vt),
1248
+ String(Math.max(1, maxBatch))
1249
+ ], 1);
1250
+ const out = [];
1251
+ for (const r of results || []) {
1252
+ if (!r) continue;
1253
+ const parts = r.split("|||");
1254
+ if (parts.length !== 10) continue;
1255
+ out.push({
1256
+ id: parts[0],
1257
+ groupId: parts[1],
1258
+ data: safeJsonParse(parts[2]),
1259
+ attempts: parseInt(parts[3], 10),
1260
+ maxAttempts: parseInt(parts[4], 10),
1261
+ seq: parseInt(parts[5], 10),
1262
+ timestamp: parseInt(parts[6], 10),
1263
+ orderMs: parseInt(parts[7], 10),
1264
+ score: parseFloat(parts[8]),
1265
+ deadlineAt: parseInt(parts[9], 10)
1266
+ });
1267
+ }
1268
+ return out;
1269
+ }
1270
+ /**
1271
+ * Get the number of jobs currently being processed (active jobs)
1272
+ */
1273
+ async getActiveCount() {
1274
+ return evalScript(this.r, "get-active-count", [this.ns], 1);
1275
+ }
1276
+ /**
1277
+ * Get the number of jobs waiting to be processed
1278
+ */
1279
+ async getWaitingCount() {
1280
+ return evalScript(this.r, "get-waiting-count", [this.ns], 1);
1281
+ }
1282
+ /**
1283
+ * Get the number of jobs delayed due to backoff
1284
+ */
1285
+ async getDelayedCount() {
1286
+ return evalScript(this.r, "get-delayed-count", [this.ns], 1);
1287
+ }
1288
+ /**
1289
+ * Get list of active job IDs
1290
+ */
1291
+ async getActiveJobs() {
1292
+ return evalScript(this.r, "get-active-jobs", [this.ns], 1);
1293
+ }
1294
+ /**
1295
+ * Get list of waiting job IDs
1296
+ */
1297
+ async getWaitingJobs() {
1298
+ return evalScript(this.r, "get-waiting-jobs", [this.ns], 1);
1299
+ }
1300
+ /**
1301
+ * Get list of delayed job IDs
1302
+ */
1303
+ async getDelayedJobs() {
1304
+ return evalScript(this.r, "get-delayed-jobs", [this.ns], 1);
1305
+ }
1306
+ /**
1307
+ * Get list of unique group IDs that have jobs
1308
+ */
1309
+ async getUniqueGroups() {
1310
+ return evalScript(this.r, "get-unique-groups", [this.ns], 1);
1311
+ }
1312
+ /**
1313
+ * Get count of unique groups that have jobs
1314
+ */
1315
+ async getUniqueGroupsCount() {
1316
+ return evalScript(this.r, "get-unique-groups-count", [this.ns], 1);
1317
+ }
1318
+ /**
1319
+ * Fetch a single job by ID with enriched fields for UI/inspection.
1320
+ * Attempts to mimic BullMQ's Job shape for fields commonly used by BullBoard.
1321
+ */
1322
+ async getJob(id) {
1323
+ return Job.fromStore(this, id);
1324
+ }
1325
+ /**
1326
+ * Fetch jobs by statuses, emulating BullMQ's Queue.getJobs API used by BullBoard.
1327
+ * Only getter functionality; ordering is best-effort.
1328
+ *
1329
+ * Optimized with pagination to reduce Redis load - especially important for BullBoard polling.
1330
+ */
1331
+ async getJobsByStatus(jobStatuses, start = 0, end = -1) {
1332
+ const requestedCount = end >= 0 ? end - start + 1 : 100;
1333
+ const fetchLimit = Math.min(requestedCount * 2, 500);
1334
+ const idToStatus = /* @__PURE__ */ new Map();
1335
+ const idSets = [];
1336
+ const pushZRange = async (key, status, reverse = false) => {
1337
+ try {
1338
+ const ids = reverse ? await this.r.zrevrange(key, 0, fetchLimit - 1) : await this.r.zrange(key, 0, fetchLimit - 1);
1339
+ for (const id of ids) idToStatus.set(id, status);
1340
+ idSets.push(...ids);
1341
+ } catch (_e) {}
1342
+ };
1343
+ const statuses = new Set(jobStatuses);
1344
+ if (statuses.has("active")) await pushZRange(`${this.ns}:processing`, "active");
1345
+ if (statuses.has("delayed")) await pushZRange(`${this.ns}:delayed`, "delayed");
1346
+ if (statuses.has("completed")) await pushZRange(`${this.ns}:completed`, "completed", true);
1347
+ if (statuses.has("failed")) await pushZRange(`${this.ns}:failed`, "failed", true);
1348
+ if (statuses.has("waiting")) try {
1349
+ const groupIds = await this.r.smembers(`${this.ns}:groups`);
1350
+ if (groupIds.length > 0) {
1351
+ const groupsToScan = groupIds.slice(0, Math.min(100, groupIds.length));
1352
+ const pipe$1 = this.r.multi();
1353
+ const jobsPerGroup = Math.max(1, Math.ceil(fetchLimit / groupsToScan.length));
1354
+ for (const gid of groupsToScan) pipe$1.zrange(`${this.ns}:g:${gid}`, 0, jobsPerGroup - 1);
1355
+ const rows$1 = await pipe$1.exec();
1356
+ for (const r of rows$1 || []) {
1357
+ const arr = r?.[1] || [];
1358
+ for (const id of arr) idToStatus.set(id, "waiting");
1359
+ idSets.push(...arr);
1360
+ }
1361
+ }
1362
+ } catch (_e) {}
1363
+ const seen = /* @__PURE__ */ new Set();
1364
+ const uniqueIds = [];
1365
+ for (const id of idSets) if (!seen.has(id)) {
1366
+ seen.add(id);
1367
+ uniqueIds.push(id);
1368
+ }
1369
+ const slice = end >= 0 ? uniqueIds.slice(start, end + 1) : uniqueIds.slice(start);
1370
+ if (slice.length === 0) return [];
1371
+ const pipe = this.r.multi();
1372
+ for (const id of slice) pipe.hgetall(`${this.ns}:job:${id}`);
1373
+ const rows = await pipe.exec();
1374
+ const jobs = [];
1375
+ for (let i = 0; i < slice.length; i++) {
1376
+ const id = slice[i];
1377
+ const raw = rows?.[i]?.[1] || {};
1378
+ if (!raw || Object.keys(raw).length === 0) {
1379
+ this.logger.warn(`Skipping job ${id} - not found (likely cleaned up by retention)`);
1380
+ continue;
1381
+ }
1382
+ const knownStatus = idToStatus.get(id);
1383
+ const job = Job.fromRawHash(this, id, raw, knownStatus);
1384
+ jobs.push(job);
1385
+ }
1386
+ return jobs;
1387
+ }
1388
+ /**
1389
+ * Provide counts structured like BullBoard expects.
1390
+ */
1391
+ async getJobCounts() {
1392
+ const [active, waiting, delayed, completed, failed] = await Promise.all([
1393
+ this.getActiveCount(),
1394
+ this.getWaitingCount(),
1395
+ this.getDelayedCount(),
1396
+ this.getCompletedCount(),
1397
+ this.getFailedCount()
1398
+ ]);
1399
+ return {
1400
+ active,
1401
+ waiting,
1402
+ delayed,
1403
+ completed,
1404
+ failed,
1405
+ paused: 0,
1406
+ "waiting-children": 0,
1407
+ prioritized: 0
1408
+ };
1409
+ }
1410
+ /**
1411
+ * Check for stalled jobs and recover or fail them
1412
+ * Returns array of [jobId, groupId, action] tuples
1413
+ */
1414
+ async checkStalledJobs(now, gracePeriod, maxStalledCount) {
1415
+ try {
1416
+ return await evalScript(this.r, "check-stalled", [
1417
+ this.ns,
1418
+ String(now),
1419
+ String(gracePeriod),
1420
+ String(maxStalledCount)
1421
+ ], 1) || [];
1422
+ } catch (error) {
1423
+ this.logger.error("Error checking stalled jobs:", error);
1424
+ return [];
1425
+ }
1426
+ }
1427
+ /**
1428
+ * Start the promoter service for staging system.
1429
+ * Promoter listens to Redis keyspace notifications and promotes staged jobs when ready.
1430
+ * This is idempotent - calling multiple times has no effect if already running.
1431
+ */
1432
+ async startPromoter() {
1433
+ if (this.promoterRunning || this.orderingDelayMs <= 0) return;
1434
+ this.promoterRunning = true;
1435
+ this.promoterLockId = (0, node_crypto.randomUUID)();
1436
+ try {
1437
+ this.promoterRedis = this.r.duplicate();
1438
+ try {
1439
+ await this.promoterRedis.config("SET", "notify-keyspace-events", "Ex");
1440
+ this.logger.debug("Enabled Redis keyspace notifications for staging promoter");
1441
+ } catch (err) {
1442
+ this.logger.warn("Failed to enable keyspace notifications. Promoter will use polling fallback.", err);
1443
+ }
1444
+ const db = this.promoterRedis.options.db ?? 0;
1445
+ const timerKey = `${this.ns}:stage:timer`;
1446
+ const expiredChannel = `__keyevent@${db}__:expired`;
1447
+ await this.promoterRedis.subscribe(expiredChannel, (err) => {
1448
+ if (err) this.logger.error("Failed to subscribe to keyspace events:", err);
1449
+ else this.logger.debug(`Subscribed to ${expiredChannel}`);
1450
+ });
1451
+ this.promoterRedis.on("message", async (channel, message) => {
1452
+ if (channel === expiredChannel && message === timerKey) await this.runPromotion();
1453
+ });
1454
+ this.promoterInterval = setInterval(async () => {
1455
+ await this.runPromotion();
1456
+ }, 100);
1457
+ await this.runPromotion();
1458
+ this.logger.debug("Staging promoter started");
1459
+ } catch (err) {
1460
+ this.logger.error("Failed to start promoter:", err);
1461
+ this.promoterRunning = false;
1462
+ await this.stopPromoter();
1463
+ }
1464
+ }
1465
+ /**
1466
+ * Run a single promotion cycle with distributed locking
1467
+ */
1468
+ async runPromotion() {
1469
+ if (!this.promoterRunning) return;
1470
+ const lockKey = `${this.ns}:promoter:lock`;
1471
+ const lockTtl = 3e4;
1472
+ try {
1473
+ if (await this.r.set(lockKey, this.promoterLockId, "PX", lockTtl, "NX") === "OK") try {
1474
+ const promoted = await evalScript(this.r, "promote-staged", [
1475
+ this.ns,
1476
+ String(Date.now()),
1477
+ String(100)
1478
+ ], 1);
1479
+ if (promoted > 0) this.logger.debug(`Promoted ${promoted} staged jobs`);
1480
+ } finally {
1481
+ if (await this.r.get(lockKey) === this.promoterLockId) await this.r.del(lockKey);
1482
+ }
1483
+ } catch (err) {
1484
+ this.logger.error("Error during promotion:", err);
1485
+ }
1486
+ }
1487
+ /**
1488
+ * Stop the promoter service
1489
+ */
1490
+ async stopPromoter() {
1491
+ if (!this.promoterRunning) return;
1492
+ this.promoterRunning = false;
1493
+ if (this.promoterInterval) {
1494
+ clearInterval(this.promoterInterval);
1495
+ this.promoterInterval = void 0;
1496
+ }
1497
+ if (this.promoterRedis) {
1498
+ try {
1499
+ await this.promoterRedis.unsubscribe();
1500
+ await this.promoterRedis.quit();
1501
+ } catch (_err) {
1502
+ try {
1503
+ this.promoterRedis.disconnect();
1504
+ } catch (_e) {}
1505
+ }
1506
+ this.promoterRedis = void 0;
1507
+ }
1508
+ this.logger.debug("Staging promoter stopped");
1509
+ }
1510
+ /**
1511
+ * Close underlying Redis connections
1512
+ */
1513
+ async close() {
1514
+ if (this.batchConfig && this.batchBuffer.length > 0) {
1515
+ this.logger.debug(`Flushing ${this.batchBuffer.length} pending batched jobs before close`);
1516
+ await this.flushBatch();
1517
+ }
1518
+ await this.stopPromoter();
1519
+ try {
1520
+ await this.r.quit();
1521
+ } catch (_e) {
1522
+ try {
1523
+ this.r.disconnect();
1524
+ } catch (_e2) {}
1525
+ }
1526
+ }
1527
+ get pausedKey() {
1528
+ return `${this.ns}:paused`;
1529
+ }
1530
+ async pause() {
1531
+ await this.r.set(this.pausedKey, "1");
1532
+ }
1533
+ async resume() {
1534
+ await this.r.del(this.pausedKey);
1535
+ }
1536
+ async isPaused() {
1537
+ return await this.r.get(this.pausedKey) !== null;
1538
+ }
1539
+ /**
1540
+ * Wait for the queue to become empty (no active jobs)
1541
+ * @param timeoutMs Maximum time to wait in milliseconds (default: 60 seconds)
1542
+ * @returns true if queue became empty, false if timeout reached
1543
+ */
1544
+ async waitForEmpty(timeoutMs = 6e4) {
1545
+ const startTime = Date.now();
1546
+ while (Date.now() - startTime < timeoutMs) try {
1547
+ if (await evalScript(this.r, "is-empty", [this.ns], 1) === 1) {
1548
+ await sleep$1(0);
1549
+ return true;
1550
+ }
1551
+ await sleep$1(200);
1552
+ } catch (err) {
1553
+ if (this.isConnectionError(err)) {
1554
+ this.logger.warn("Redis connection error in waitForEmpty, retrying...");
1555
+ await sleep$1(1e3);
1556
+ continue;
1557
+ }
1558
+ throw err;
1559
+ }
1560
+ return false;
1561
+ }
1562
+ /**
1563
+ * Remove problematic groups from ready queue to prevent infinite loops
1564
+ * Handles both poisoned groups (only failed/expired jobs) and locked groups
1565
+ *
1566
+ * Throttled to 1% sampling rate to reduce Redis overhead
1567
+ */
1568
+ async cleanupPoisonedGroup(groupId) {
1569
+ if (Math.random() > .01) return "skipped";
1570
+ const lastCheck = this._groupCleanupTracking.get(groupId) || 0;
1571
+ const now = Date.now();
1572
+ if (now - lastCheck < 1e4) return "throttled";
1573
+ this._groupCleanupTracking.set(groupId, now);
1574
+ if (this._groupCleanupTracking.size > 1e3) {
1575
+ const cutoff = now - 6e4;
1576
+ for (const [gid, ts] of this._groupCleanupTracking.entries()) if (ts < cutoff) this._groupCleanupTracking.delete(gid);
1577
+ }
1578
+ try {
1579
+ const result = await evalScript(this.r, "cleanup-poisoned-group", [
1580
+ this.ns,
1581
+ groupId,
1582
+ String(now)
1583
+ ], 1);
1584
+ if (result === "poisoned") this.logger.warn(`Removed poisoned group ${groupId} from ready queue`);
1585
+ else if (result === "empty") this.logger.warn(`Removed empty group ${groupId} from ready queue`);
1586
+ else if (result === "locked") {
1587
+ if (Math.random() < .1) this.logger.debug(`Detected group ${groupId} is locked by another worker (this is normal with high concurrency)`);
1588
+ }
1589
+ return result;
1590
+ } catch (error) {
1591
+ this.logger.error(`Error cleaning up group ${groupId}:`, error);
1592
+ return "error";
1593
+ }
1594
+ }
1595
+ /**
1596
+ * Distributed one-shot scheduler: promotes delayed jobs and processes repeating jobs.
1597
+ * Only proceeds if a short-lived scheduler lock can be acquired.
1598
+ */
1599
+ schedulerLockKey() {
1600
+ return `${this.ns}:sched:lock`;
1601
+ }
1602
+ async acquireSchedulerLock(ttlMs = 1500) {
1603
+ try {
1604
+ return await this.r.set(this.schedulerLockKey(), "1", "PX", ttlMs, "NX") === "OK";
1605
+ } catch (_e) {
1606
+ return false;
1607
+ }
1608
+ }
1609
+ async runSchedulerOnce(now = Date.now()) {
1610
+ if (!await this.acquireSchedulerLock(this.schedulerLockTtlMs)) return;
1611
+ await this.promoteDelayedJobsBounded(32, now);
1612
+ await this.processRepeatingJobsBounded(16, now);
1613
+ }
1614
+ /**
1615
+ * Promote up to `limit` delayed jobs that are due now. Uses a small Lua to move one item per tick.
1616
+ */
1617
+ async promoteDelayedJobsBounded(limit = 256, now = Date.now()) {
1618
+ let moved = 0;
1619
+ for (let i = 0; i < limit; i++) try {
1620
+ const n = await evalScript(this.r, "promote-delayed-one", [this.ns, String(now)], 1);
1621
+ if (!n || n <= 0) break;
1622
+ moved += n;
1623
+ } catch (_e) {
1624
+ break;
1625
+ }
1626
+ return moved;
1627
+ }
1628
+ /**
1629
+ * Process up to `limit` repeating job ticks.
1630
+ * Intentionally small per-tick work to keep Redis CPU flat.
1631
+ */
1632
+ async processRepeatingJobsBounded(limit = 128, now = Date.now()) {
1633
+ const scheduleKey = `${this.ns}:repeat:schedule`;
1634
+ let processed = 0;
1635
+ for (let i = 0; i < limit; i++) {
1636
+ const due = await this.r.zrangebyscore(scheduleKey, 0, now, "LIMIT", 0, 1);
1637
+ if (!due || due.length === 0) break;
1638
+ const repeatKey = due[0];
1639
+ try {
1640
+ const repeatJobKey = `${this.ns}:repeat:${repeatKey}`;
1641
+ const repeatJobDataStr = await this.r.get(repeatJobKey);
1642
+ if (!repeatJobDataStr) {
1643
+ await this.r.zrem(scheduleKey, repeatKey);
1644
+ continue;
1645
+ }
1646
+ const repeatJobData = JSON.parse(repeatJobDataStr);
1647
+ if (repeatJobData.removed) {
1648
+ await this.r.zrem(scheduleKey, repeatKey);
1649
+ await this.r.del(repeatJobKey);
1650
+ continue;
1651
+ }
1652
+ await this.r.zrem(scheduleKey, repeatKey);
1653
+ let nextRunTime;
1654
+ if ("every" in repeatJobData.repeat) nextRunTime = now + repeatJobData.repeat.every;
1655
+ else nextRunTime = this.getNextCronTime(repeatJobData.repeat.pattern, now);
1656
+ repeatJobData.nextRunTime = nextRunTime;
1657
+ repeatJobData.lastRunTime = now;
1658
+ await this.r.set(repeatJobKey, JSON.stringify(repeatJobData));
1659
+ await this.r.zadd(scheduleKey, nextRunTime, repeatKey);
1660
+ await evalScript(this.r, "enqueue", [
1661
+ this.ns,
1662
+ repeatJobData.groupId,
1663
+ JSON.stringify(repeatJobData.data),
1664
+ String(repeatJobData.maxAttempts ?? this.defaultMaxAttempts),
1665
+ String(repeatJobData.orderMs ?? now),
1666
+ String(0),
1667
+ String((0, node_crypto.randomUUID)()),
1668
+ String(this.keepCompleted)
1669
+ ], 1);
1670
+ processed++;
1671
+ } catch (error) {
1672
+ this.logger.error(`Error processing repeating job ${repeatKey}:`, error);
1673
+ await this.r.zrem(scheduleKey, repeatKey);
1674
+ }
1675
+ }
1676
+ return processed;
1677
+ }
1678
+ /**
1679
+ * Promote delayed jobs that are now ready to be processed
1680
+ * This should be called periodically to move jobs from delayed set to ready queue
1681
+ */
1682
+ async promoteDelayedJobs() {
1683
+ try {
1684
+ return await evalScript(this.r, "promote-delayed-jobs", [this.ns, String(Date.now())], 1);
1685
+ } catch (error) {
1686
+ this.logger.error(`Error promoting delayed jobs:`, error);
1687
+ return 0;
1688
+ }
1689
+ }
1690
+ /**
1691
+ * Change the delay of a specific job
1692
+ */
1693
+ async changeDelay(jobId, newDelay) {
1694
+ const newDelayUntil = newDelay > 0 ? Date.now() + newDelay : 0;
1695
+ try {
1696
+ return await evalScript(this.r, "change-delay", [
1697
+ this.ns,
1698
+ jobId,
1699
+ String(newDelayUntil),
1700
+ String(Date.now())
1701
+ ], 1) === 1;
1702
+ } catch (error) {
1703
+ this.logger.error(`Error changing delay for job ${jobId}:`, error);
1704
+ return false;
1705
+ }
1706
+ }
1707
+ /**
1708
+ * Promote a delayed job to be ready immediately
1709
+ */
1710
+ async promote(jobId) {
1711
+ return this.changeDelay(jobId, 0);
1712
+ }
1713
+ /**
1714
+ * Remove a job from the queue regardless of state (waiting, delayed, processing)
1715
+ */
1716
+ async remove(jobId) {
1717
+ try {
1718
+ return await evalScript(this.r, "remove", [this.ns, jobId], 1) === 1;
1719
+ } catch (error) {
1720
+ this.logger.error(`Error removing job ${jobId}:`, error);
1721
+ return false;
1722
+ }
1723
+ }
1724
+ /**
1725
+ * Clean jobs of a given status older than graceTimeMs
1726
+ * @param graceTimeMs Remove jobs with finishedOn <= now - graceTimeMs (for completed/failed)
1727
+ * @param limit Max number of jobs to clean in one call
1728
+ * @param status Either 'completed' or 'failed'
1729
+ */
1730
+ async clean(graceTimeMs, limit, status) {
1731
+ const graceAt = Date.now() - graceTimeMs;
1732
+ try {
1733
+ return await evalScript(this.r, "clean-status", [
1734
+ this.ns,
1735
+ status,
1736
+ String(graceAt),
1737
+ String(Math.max(0, Math.min(limit, 1e5)))
1738
+ ], 1) ?? 0;
1739
+ } catch (error) {
1740
+ console.log("HERE?", error);
1741
+ this.logger.error(`Error cleaning ${status} jobs:`, error);
1742
+ return 0;
1743
+ }
1744
+ }
1745
+ /**
1746
+ * Update a job's data payload (BullMQ-style)
1747
+ */
1748
+ async updateData(jobId, data) {
1749
+ const jobKey = `${this.ns}:job:${jobId}`;
1750
+ if (!await this.r.exists(jobKey)) throw new Error(`Job ${jobId} not found`);
1751
+ const serialized = JSON.stringify(data === void 0 ? null : data);
1752
+ await this.r.hset(jobKey, "data", serialized);
1753
+ }
1754
+ /**
1755
+ * Add a repeating job (cron job)
1756
+ */
1757
+ async addRepeatingJob(opts) {
1758
+ if (!opts.repeat) throw new Error("Repeat options are required for repeating jobs");
1759
+ const now = Date.now();
1760
+ const repeatKey = `${opts.groupId}:${JSON.stringify(opts.repeat)}:${now}:${Math.random().toString(36).slice(2)}`;
1761
+ let nextRunTime;
1762
+ if ("every" in opts.repeat) nextRunTime = now + opts.repeat.every;
1763
+ else nextRunTime = this.getNextCronTime(opts.repeat.pattern, now);
1764
+ const repeatJobData = {
1765
+ groupId: opts.groupId,
1766
+ data: opts.data === void 0 ? null : opts.data,
1767
+ maxAttempts: opts.maxAttempts ?? this.defaultMaxAttempts,
1768
+ orderMs: opts.orderMs,
1769
+ repeat: opts.repeat,
1770
+ nextRunTime,
1771
+ lastRunTime: null,
1772
+ removed: false
1773
+ };
1774
+ const repeatJobKey = `${this.ns}:repeat:${repeatKey}`;
1775
+ await this.r.set(repeatJobKey, JSON.stringify(repeatJobData));
1776
+ await this.r.zadd(`${this.ns}:repeat:schedule`, nextRunTime, repeatKey);
1777
+ const lookupKey = `${this.ns}:repeat:lookup:${opts.groupId}:${JSON.stringify(opts.repeat)}`;
1778
+ await this.r.set(lookupKey, repeatKey);
1779
+ const repeatId = `repeat:${repeatKey}`;
1780
+ const jobHashKey = `${this.ns}:job:${repeatId}`;
1781
+ try {
1782
+ await this.r.hmset(jobHashKey, "id", repeatId, "groupId", repeatJobData.groupId, "data", JSON.stringify(repeatJobData.data), "attempts", "0", "maxAttempts", String(repeatJobData.maxAttempts), "seq", "0", "timestamp", String(Date.now()), "orderMs", String(repeatJobData.orderMs ?? now), "status", "waiting");
1783
+ } catch (_e) {}
1784
+ return Job.fromStore(this, repeatId);
1785
+ }
1786
+ /**
1787
+ * Compute next execution time using cron-parser (BullMQ-style)
1788
+ */
1789
+ getNextCronTime(pattern, fromTime) {
1790
+ try {
1791
+ return cron_parser.default.parseExpression(pattern, { currentDate: new Date(fromTime) }).next().getTime();
1792
+ } catch (_e) {
1793
+ throw new Error(`Invalid cron pattern: ${pattern}`);
1794
+ }
1795
+ }
1796
+ /**
1797
+ * Remove a repeating job
1798
+ */
1799
+ async removeRepeatingJob(groupId, repeat) {
1800
+ try {
1801
+ const lookupKey = `${this.ns}:repeat:lookup:${groupId}:${JSON.stringify(repeat)}`;
1802
+ const repeatKey = await this.r.get(lookupKey);
1803
+ if (!repeatKey) return false;
1804
+ const repeatJobKey = `${this.ns}:repeat:${repeatKey}`;
1805
+ const scheduleKey = `${this.ns}:repeat:schedule`;
1806
+ const repeatJobDataStr = await this.r.get(repeatJobKey);
1807
+ if (!repeatJobDataStr) {
1808
+ await this.r.del(lookupKey);
1809
+ return false;
1810
+ }
1811
+ const repeatJobData = JSON.parse(repeatJobDataStr);
1812
+ repeatJobData.removed = true;
1813
+ await this.r.set(repeatJobKey, JSON.stringify(repeatJobData));
1814
+ await this.r.zrem(scheduleKey, repeatKey);
1815
+ await this.r.del(lookupKey);
1816
+ try {
1817
+ const repeatId = `repeat:${repeatKey}`;
1818
+ await this.r.del(`${this.ns}:job:${repeatId}`);
1819
+ } catch (_e) {}
1820
+ return true;
1821
+ } catch (error) {
1822
+ this.logger.error(`Error removing repeating job:`, error);
1823
+ return false;
1824
+ }
1825
+ }
1826
+ };
1827
+ function sleep$1(ms) {
1828
+ return new Promise((resolve) => setTimeout(resolve, ms));
1829
+ }
1830
+
1831
+ //#endregion
1832
+ //#region src/async-fifo-queue.ts
1833
+ /**
1834
+ * This file contains code copied from BullMQ (https://github.com/taskforcesh/bullmq)
1835
+ *
1836
+ * BullMQ is a fantastic library and one of the most popular Redis-based job queue
1837
+ * libraries for Node.js. We've copied the AsyncFifoQueue implementation from BullMQ
1838
+ * as it's a well-designed component that fits our needs perfectly.
1839
+ *
1840
+ * Original copyright notice:
1841
+ * Copyright (c) Taskforce.sh and contributors
1842
+ *
1843
+ * This code is used under the MIT License. The original license can be found at:
1844
+ * https://github.com/taskforcesh/bullmq/blob/main/LICENSE
1845
+ *
1846
+ * Modifications may have been made to adapt this code for use in GroupMQ.
1847
+ */
1848
+ var Node = class {
1849
+ constructor(value) {
1850
+ this.value = void 0;
1851
+ this.next = null;
1852
+ this.value = value;
1853
+ }
1854
+ };
1855
+ var LinkedList = class {
1856
+ constructor() {
1857
+ this.length = 0;
1858
+ this.head = null;
1859
+ this.tail = null;
1860
+ }
1861
+ push(value) {
1862
+ const newNode = new Node(value);
1863
+ if (!this.length) this.head = newNode;
1864
+ else this.tail.next = newNode;
1865
+ this.tail = newNode;
1866
+ this.length += 1;
1867
+ return newNode;
1868
+ }
1869
+ shift() {
1870
+ if (!this.length) return null;
1871
+ const head = this.head;
1872
+ this.head = this.head.next;
1873
+ this.length -= 1;
1874
+ return head;
1875
+ }
1876
+ };
1877
+ /**
1878
+ * AsyncFifoQueue
1879
+ *
1880
+ * A minimal FIFO queue for asynchronous operations. Allows adding asynchronous operations
1881
+ * and consume them in the order they are resolved.
1882
+ */
1883
+ var AsyncFifoQueue = class {
1884
+ constructor(ignoreErrors = false) {
1885
+ this.ignoreErrors = ignoreErrors;
1886
+ this.queue = new LinkedList();
1887
+ this.pending = /* @__PURE__ */ new Set();
1888
+ this.newPromise();
1889
+ }
1890
+ add(promise) {
1891
+ this.pending.add(promise);
1892
+ promise.then((data) => {
1893
+ this.pending.delete(promise);
1894
+ if (this.queue.length === 0) this.resolvePromise(data);
1895
+ this.queue.push(data);
1896
+ }).catch((err) => {
1897
+ this.pending.delete(promise);
1898
+ if (this.ignoreErrors) {
1899
+ if (this.queue.length === 0) this.resolvePromise(void 0);
1900
+ this.queue.push(void 0);
1901
+ } else this.rejectPromise(err);
1902
+ });
1903
+ }
1904
+ async waitAll() {
1905
+ await Promise.all(this.pending);
1906
+ }
1907
+ numTotal() {
1908
+ return this.pending.size + this.queue.length;
1909
+ }
1910
+ numPending() {
1911
+ return this.pending.size;
1912
+ }
1913
+ numQueued() {
1914
+ return this.queue.length;
1915
+ }
1916
+ resolvePromise(data) {
1917
+ this.resolve(data);
1918
+ this.newPromise();
1919
+ }
1920
+ rejectPromise(err) {
1921
+ this.reject(err);
1922
+ this.newPromise();
1923
+ }
1924
+ newPromise() {
1925
+ this.nextPromise = new Promise((resolve, reject) => {
1926
+ this.resolve = resolve;
1927
+ this.reject = reject;
1928
+ });
1929
+ }
1930
+ async wait() {
1931
+ return this.nextPromise;
1932
+ }
1933
+ async fetch() {
1934
+ if (this.pending.size === 0 && this.queue.length === 0) return;
1935
+ while (this.queue.length === 0) try {
1936
+ await this.wait();
1937
+ } catch (err) {
1938
+ if (!this.ignoreErrors) console.error("Unexpected Error in AsyncFifoQueue", err);
1939
+ }
1940
+ return this.queue.shift()?.value;
1941
+ }
1942
+ };
1943
+
1944
+ //#endregion
1945
+ //#region src/worker.ts
1946
+ var TypedEventEmitter = class {
1947
+ constructor() {
1948
+ this.listeners = /* @__PURE__ */ new Map();
1949
+ }
1950
+ on(event, listener) {
1951
+ if (!this.listeners.has(event)) this.listeners.set(event, []);
1952
+ this.listeners.get(event).push(listener);
1953
+ return this;
1954
+ }
1955
+ off(event, listener) {
1956
+ const eventListeners = this.listeners.get(event);
1957
+ if (eventListeners) {
1958
+ const index = eventListeners.indexOf(listener);
1959
+ if (index !== -1) eventListeners.splice(index, 1);
1960
+ }
1961
+ return this;
1962
+ }
1963
+ emit(event, ...args) {
1964
+ const eventListeners = this.listeners.get(event);
1965
+ if (eventListeners && eventListeners.length > 0) {
1966
+ for (const listener of eventListeners) try {
1967
+ listener(...args);
1968
+ } catch (error) {
1969
+ console.error(`Error in event listener for '${String(event)}':`, error);
1970
+ }
1971
+ return true;
1972
+ }
1973
+ return false;
1974
+ }
1975
+ removeAllListeners(event) {
1976
+ if (event) this.listeners.delete(event);
1977
+ else this.listeners.clear();
1978
+ return this;
1979
+ }
1980
+ };
1981
+ const defaultBackoff = (attempt) => {
1982
+ const base = Math.min(3e4, 2 ** (attempt - 1) * 500);
1983
+ const jitter = Math.floor(base * .25 * Math.random());
1984
+ return base + jitter;
1985
+ };
1986
+ var _Worker = class extends TypedEventEmitter {
1987
+ constructor(opts) {
1988
+ super();
1989
+ this.stopping = false;
1990
+ this.ready = false;
1991
+ this.closed = false;
1992
+ this.blockingClient = null;
1993
+ this.jobsInProgress = /* @__PURE__ */ new Set();
1994
+ this.lastJobPickupTime = Date.now();
1995
+ this.totalJobsProcessed = 0;
1996
+ this.blockingStats = {
1997
+ totalBlockingCalls: 0,
1998
+ consecutiveEmptyReserves: 0,
1999
+ lastActivityTime: Date.now()
2000
+ };
2001
+ this.emptyReserveBackoffMs = 0;
2002
+ if (!opts.handler || typeof opts.handler !== "function") throw new Error("Worker handler must be a function");
2003
+ this.opts = opts;
2004
+ this.q = opts.queue;
2005
+ this.name = opts.name ?? this.q.name;
2006
+ this.logger = typeof opts.logger === "object" ? opts.logger : new Logger(!!opts.logger, this.name);
2007
+ this.handler = opts.handler;
2008
+ const jobTimeoutMs = this.q.jobTimeoutMs ?? 3e4;
2009
+ this.hbMs = opts.heartbeatMs ?? Math.max(1e3, Math.floor(jobTimeoutMs / 3));
2010
+ this.onError = opts.onError;
2011
+ this.maxAttempts = opts.maxAttempts ?? this.q.maxAttemptsDefault ?? 3;
2012
+ this.backoff = opts.backoff ?? defaultBackoff;
2013
+ this.enableCleanup = opts.enableCleanup ?? true;
2014
+ this.cleanupMs = opts.cleanupIntervalMs ?? 6e4;
2015
+ this.schedulerMs = opts.schedulerIntervalMs ?? 1e3;
2016
+ this.blockingTimeoutSec = opts.blockingTimeoutSec ?? 5;
2017
+ this.concurrency = Math.max(1, opts.concurrency ?? 1);
2018
+ this.stalledInterval = opts.stalledInterval ?? (this.concurrency > 50 ? 6e4 : 3e4);
2019
+ this.maxStalledCount = opts.maxStalledCount ?? (this.concurrency > 50 ? 2 : 1);
2020
+ this.stalledGracePeriod = opts.stalledGracePeriod ?? 5e3;
2021
+ this.setupRedisEventHandlers();
2022
+ if (this.q.orderingDelayMs > 0) this.q.startPromoter().catch((err) => {
2023
+ this.logger.error("Failed to start staging promoter:", err);
2024
+ });
2025
+ this.run();
2026
+ }
2027
+ get isClosed() {
2028
+ return this.closed;
2029
+ }
2030
+ /**
2031
+ * Add jitter to prevent thundering herd problems in high-concurrency environments
2032
+ * @param baseInterval The base interval in milliseconds
2033
+ * @param jitterPercent Percentage of jitter to add (0-1, default 0.1 for 10%)
2034
+ * @returns The interval with jitter applied
2035
+ */
2036
+ addJitter(baseInterval, jitterPercent = .1) {
2037
+ const jitter = Math.random() * baseInterval * jitterPercent;
2038
+ return baseInterval + jitter;
2039
+ }
2040
+ setupRedisEventHandlers() {
2041
+ const redis = this.q.redis;
2042
+ if (redis) {
2043
+ this.redisCloseHandler = () => {
2044
+ this.ready = false;
2045
+ this.emit("ioredis:close");
2046
+ };
2047
+ this.redisErrorHandler = (error) => {
2048
+ this.emit("error", error);
2049
+ };
2050
+ this.redisReadyHandler = () => {
2051
+ if (!this.ready && !this.stopping) {
2052
+ this.ready = true;
2053
+ this.emit("ready");
2054
+ }
2055
+ };
2056
+ redis.on("close", this.redisCloseHandler);
2057
+ redis.on("error", this.redisErrorHandler);
2058
+ redis.on("ready", this.redisReadyHandler);
2059
+ }
2060
+ }
2061
+ async run() {
2062
+ if (this.runLoopPromise) return this.runLoopPromise;
2063
+ const runPromise = this._runLoop();
2064
+ this.runLoopPromise = runPromise;
2065
+ return runPromise;
2066
+ }
2067
+ async _runLoop() {
2068
+ this.logger.info(`🚀 Worker ${this.name} starting...`);
2069
+ const strategyPollInterval = this.opts.strategyPollInterval ?? 50;
2070
+ try {
2071
+ this.blockingClient = this.q.redis.duplicate({
2072
+ enableAutoPipelining: true,
2073
+ maxRetriesPerRequest: null,
2074
+ retryStrategy: (times) => {
2075
+ return Math.max(Math.min(Math.exp(times) * 1e3, 2e4), 1e3);
2076
+ }
2077
+ });
2078
+ this.blockingClient.on("error", (err) => {
2079
+ if (!this.q.isConnectionError(err)) this.logger.error("Blocking client error (non-connection):", err);
2080
+ else this.logger.warn("Blocking client connection error:", err.message);
2081
+ this.emit("error", err instanceof Error ? err : new Error(String(err)));
2082
+ });
2083
+ this.blockingClient.on("close", () => {
2084
+ if (!this.stopping && !this.closed) this.logger.warn("Blocking client disconnected, will reconnect on next operation");
2085
+ });
2086
+ this.blockingClient.on("reconnecting", () => {
2087
+ if (!this.stopping && !this.closed) this.logger.info("Blocking client reconnecting...");
2088
+ });
2089
+ this.blockingClient.on("ready", () => {
2090
+ if (!this.stopping && !this.closed) this.logger.info("Blocking client ready");
2091
+ });
2092
+ } catch (err) {
2093
+ this.logger.error("Failed to create blocking client:", err);
2094
+ this.blockingClient = null;
2095
+ }
2096
+ if (this.enableCleanup) {
2097
+ this.cleanupTimer = setInterval(async () => {
2098
+ try {
2099
+ await this.q.cleanup();
2100
+ } catch (err) {
2101
+ this.onError?.(err);
2102
+ }
2103
+ }, this.addJitter(this.cleanupMs));
2104
+ const schedulerInterval = Math.min(this.schedulerMs, this.cleanupMs);
2105
+ this.schedulerTimer = setInterval(async () => {
2106
+ try {
2107
+ await this.q.runSchedulerOnce();
2108
+ } catch (_err) {}
2109
+ }, this.addJitter(schedulerInterval));
2110
+ }
2111
+ this.startStalledChecker();
2112
+ let connectionRetries = 0;
2113
+ const maxConnectionRetries = 10;
2114
+ const asyncFifoQueue = new AsyncFifoQueue(true);
2115
+ while (!this.stopping || asyncFifoQueue.numTotal() > 0) try {
2116
+ while (!this.stopping) {
2117
+ if (asyncFifoQueue.numTotal() >= this.concurrency) break;
2118
+ this.blockingStats.totalBlockingCalls++;
2119
+ if (this.blockingStats.totalBlockingCalls >= 1e9) this.blockingStats.totalBlockingCalls = 0;
2120
+ this.logger.debug(`Fetching job (call #${this.blockingStats.totalBlockingCalls}, processing: ${this.jobsInProgress.size}/${this.concurrency}, queue: ${asyncFifoQueue.numTotal()} (queued: ${asyncFifoQueue.numQueued()}, pending: ${asyncFifoQueue.numPending()}), total: ${asyncFifoQueue.numTotal()}/${this.concurrency})...`);
2121
+ let fetchedJob;
2122
+ if (this.opts.strategy) fetchedJob = (async () => {
2123
+ const targetGroupId = await this.opts.strategy.getNextGroup(this.q);
2124
+ if (!targetGroupId) {
2125
+ await this.delay(strategyPollInterval);
2126
+ return null;
2127
+ }
2128
+ const job$2 = await this.q.reserveAtomic(targetGroupId);
2129
+ if (!job$2) return null;
2130
+ return job$2;
2131
+ })();
2132
+ else {
2133
+ const availableCapacity = this.concurrency - asyncFifoQueue.numTotal();
2134
+ if (availableCapacity > 0 && asyncFifoQueue.numTotal() === 0) {
2135
+ const batchSize = Math.min(availableCapacity, 8);
2136
+ const batchJobs = await this.q.reserveBatch(batchSize);
2137
+ if (batchJobs.length > 0) {
2138
+ this.logger.debug(`Batch reserved ${batchJobs.length} jobs`);
2139
+ for (const job$2 of batchJobs) asyncFifoQueue.add(Promise.resolve(job$2));
2140
+ connectionRetries = 0;
2141
+ this.lastJobPickupTime = Date.now();
2142
+ this.blockingStats.consecutiveEmptyReserves = 0;
2143
+ this.blockingStats.lastActivityTime = Date.now();
2144
+ this.emptyReserveBackoffMs = 0;
2145
+ continue;
2146
+ }
2147
+ }
2148
+ const allowBlocking = this.blockingStats.consecutiveEmptyReserves >= 2 && asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0;
2149
+ const adaptiveTimeout = this.blockingTimeoutSec;
2150
+ fetchedJob = allowBlocking ? this.q.reserveBlocking(adaptiveTimeout, void 0, this.blockingClient ?? void 0) : this.q.reserve();
2151
+ }
2152
+ asyncFifoQueue.add(fetchedJob);
2153
+ const job$1 = await fetchedJob;
2154
+ if (job$1) {
2155
+ connectionRetries = 0;
2156
+ this.lastJobPickupTime = Date.now();
2157
+ this.blockingStats.consecutiveEmptyReserves = 0;
2158
+ this.blockingStats.lastActivityTime = Date.now();
2159
+ this.emptyReserveBackoffMs = 0;
2160
+ this.logger.debug(`Fetched job ${job$1.id} from group ${job$1.groupId}`);
2161
+ } else {
2162
+ if (this.opts.strategy) {
2163
+ if (asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0) break;
2164
+ }
2165
+ this.blockingStats.consecutiveEmptyReserves++;
2166
+ if (this.blockingStats.consecutiveEmptyReserves % 50 === 0) this.logger.debug(`No job available (consecutive empty: ${this.blockingStats.consecutiveEmptyReserves})`);
2167
+ const backoffThreshold = this.concurrency >= 100 ? 5 : 3;
2168
+ if (this.blockingStats.consecutiveEmptyReserves > backoffThreshold && asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0) {
2169
+ const maxBackoff = this.concurrency >= 100 ? 2e3 : 5e3;
2170
+ if (this.emptyReserveBackoffMs === 0) this.emptyReserveBackoffMs = this.concurrency >= 100 ? 100 : 50;
2171
+ else this.emptyReserveBackoffMs = Math.min(maxBackoff, Math.max(100, this.emptyReserveBackoffMs * 1.2));
2172
+ if (this.blockingStats.consecutiveEmptyReserves % 20 === 0) this.logger.debug(`Applying backoff: ${Math.round(this.emptyReserveBackoffMs)}ms (consecutive empty: ${this.blockingStats.consecutiveEmptyReserves}, jobs in progress: ${this.jobsInProgress.size})`);
2173
+ await this.delay(this.emptyReserveBackoffMs);
2174
+ }
2175
+ if (asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0) break;
2176
+ if (asyncFifoQueue.numTotal() > 0 || this.jobsInProgress.size > 0) break;
2177
+ }
2178
+ }
2179
+ let job;
2180
+ do
2181
+ job = await asyncFifoQueue.fetch() ?? void 0;
2182
+ while (!job && asyncFifoQueue.numQueued() > 0);
2183
+ if (job && typeof job === "object" && "id" in job) {
2184
+ this.totalJobsProcessed++;
2185
+ this.logger.debug(`Processing job ${job.id} from group ${job.groupId} immediately`);
2186
+ const processingPromise = this.processJob(job, () => {
2187
+ return asyncFifoQueue.numTotal() <= this.concurrency;
2188
+ }, this.jobsInProgress);
2189
+ asyncFifoQueue.add(processingPromise);
2190
+ }
2191
+ } catch (err) {
2192
+ if (this.stopping) return;
2193
+ if (this.q.isConnectionError(err)) {
2194
+ connectionRetries++;
2195
+ this.logger.error(`Connection error (retry ${connectionRetries}/${maxConnectionRetries}):`, err);
2196
+ if (connectionRetries >= maxConnectionRetries) {
2197
+ this.logger.error(`⚠️ Max connection retries (${maxConnectionRetries}) exceeded! Worker will continue but may be experiencing persistent Redis issues.`);
2198
+ this.emit("error", /* @__PURE__ */ new Error(`Max connection retries (${maxConnectionRetries}) exceeded - worker continuing with backoff`));
2199
+ await this.delay(2e4);
2200
+ connectionRetries = 0;
2201
+ } else {
2202
+ const delayMs = Math.max(Math.min(Math.exp(connectionRetries) * 1e3, 2e4), 1e3);
2203
+ this.logger.debug(`Waiting ${Math.round(delayMs)}ms before retry (exponential backoff)`);
2204
+ await this.delay(delayMs);
2205
+ }
2206
+ } else {
2207
+ this.logger.error(`Worker loop error (non-connection, continuing):`, err);
2208
+ this.emit("error", err instanceof Error ? err : new Error(String(err)));
2209
+ connectionRetries = 0;
2210
+ await this.delay(100);
2211
+ }
2212
+ this.onError?.(err);
2213
+ }
2214
+ this.logger.info(`Stopped`);
2215
+ }
2216
+ async delay(ms) {
2217
+ return new Promise((resolve) => setTimeout(resolve, ms));
2218
+ }
2219
+ /**
2220
+ * Process a job and return the next job if atomic completion succeeds
2221
+ * This matches BullMQ's processJob signature
2222
+ */
2223
+ async processJob(job, fetchNextCallback, jobsInProgress) {
2224
+ const existingItem = Array.from(jobsInProgress).find((item) => item.job.id === job.id);
2225
+ let inProgressItem;
2226
+ if (existingItem) {
2227
+ existingItem.ts = Date.now();
2228
+ inProgressItem = existingItem;
2229
+ } else {
2230
+ inProgressItem = {
2231
+ job,
2232
+ ts: Date.now()
2233
+ };
2234
+ jobsInProgress.add(inProgressItem);
2235
+ }
2236
+ try {
2237
+ const nextJob = await this.processSingleJob(job, fetchNextCallback);
2238
+ if (nextJob && typeof nextJob === "object" && "id" in nextJob && "groupId" in nextJob) {
2239
+ const chainedItem = {
2240
+ job: nextJob,
2241
+ ts: Date.now()
2242
+ };
2243
+ jobsInProgress.add(chainedItem);
2244
+ jobsInProgress.delete(inProgressItem);
2245
+ return nextJob;
2246
+ }
2247
+ return nextJob;
2248
+ } finally {
2249
+ if (jobsInProgress.has(inProgressItem)) jobsInProgress.delete(inProgressItem);
2250
+ }
2251
+ }
2252
+ /**
2253
+ * Complete a job and try to atomically get next job from same group
2254
+ */
2255
+ async completeJob(job, handlerResult, fetchNextCallback, processedOn, finishedOn) {
2256
+ if (fetchNextCallback?.()) {
2257
+ const nextJob = await this.q.completeAndReserveNextWithMetadata(job.id, job.groupId, handlerResult, {
2258
+ processedOn: processedOn || Date.now(),
2259
+ finishedOn: finishedOn || Date.now(),
2260
+ attempts: job.attempts,
2261
+ maxAttempts: job.maxAttempts
2262
+ });
2263
+ if (nextJob) {
2264
+ this.logger.debug(`Got next job ${nextJob.id} from same group ${nextJob.groupId} atomically`);
2265
+ return nextJob;
2266
+ }
2267
+ this.logger.debug(`Atomic chaining returned nil for job ${job.id} - job completed, but no next job chained`);
2268
+ if (Math.random() < .1) await new Promise((resolve) => setTimeout(resolve, Math.random() * 100));
2269
+ } else await this.q.completeWithMetadata(job, handlerResult, {
2270
+ processedOn: processedOn || Date.now(),
2271
+ finishedOn: finishedOn || Date.now(),
2272
+ attempts: job.attempts,
2273
+ maxAttempts: job.maxAttempts
2274
+ });
2275
+ }
2276
+ /**
2277
+ * Start the stalled job checker
2278
+ * Checks periodically for jobs that exceeded their deadline and recovers or fails them
2279
+ */
2280
+ startStalledChecker() {
2281
+ if (this.stalledInterval <= 0) return;
2282
+ this.stalledCheckTimer = setInterval(async () => {
2283
+ try {
2284
+ await this.checkStalled();
2285
+ } catch (err) {
2286
+ this.logger.error("Error in stalled job checker:", err);
2287
+ this.emit("error", err instanceof Error ? err : new Error(String(err)));
2288
+ }
2289
+ }, this.stalledInterval);
2290
+ }
2291
+ /**
2292
+ * Check for stalled jobs and recover or fail them
2293
+ * A job is stalled when its worker crashed or lost connection
2294
+ */
2295
+ async checkStalled() {
2296
+ if (this.stopping || this.closed) return;
2297
+ try {
2298
+ const now = Date.now();
2299
+ const results = await this.q.checkStalledJobs(now, this.stalledGracePeriod, this.maxStalledCount);
2300
+ if (results.length > 0) for (let i = 0; i < results.length; i += 3) {
2301
+ const jobId = results[i];
2302
+ const groupId = results[i + 1];
2303
+ const action = results[i + 2];
2304
+ if (action === "recovered") {
2305
+ this.logger.info(`Recovered stalled job ${jobId} from group ${groupId}`);
2306
+ this.emit("stalled", jobId, groupId);
2307
+ } else if (action === "failed") {
2308
+ this.logger.warn(`Failed stalled job ${jobId} from group ${groupId} (exceeded max stalled count)`);
2309
+ this.emit("stalled", jobId, groupId);
2310
+ }
2311
+ }
2312
+ } catch (err) {
2313
+ this.logger.error("Error checking stalled jobs:", err);
2314
+ }
2315
+ }
2316
+ /**
2317
+ * Get worker performance metrics
2318
+ */
2319
+ getWorkerMetrics() {
2320
+ const now = Date.now();
2321
+ return {
2322
+ name: this.name,
2323
+ totalJobsProcessed: this.totalJobsProcessed,
2324
+ lastJobPickupTime: this.lastJobPickupTime,
2325
+ timeSinceLastJob: this.lastJobPickupTime > 0 ? now - this.lastJobPickupTime : null,
2326
+ blockingStats: { ...this.blockingStats },
2327
+ isProcessing: this.jobsInProgress.size > 0,
2328
+ jobsInProgressCount: this.jobsInProgress.size,
2329
+ jobsInProgress: Array.from(this.jobsInProgress).map((item) => ({
2330
+ jobId: item.job.id,
2331
+ groupId: item.job.groupId,
2332
+ processingTimeMs: now - item.ts
2333
+ }))
2334
+ };
2335
+ }
2336
+ /**
2337
+ * Stop the worker gracefully
2338
+ * @param gracefulTimeoutMs Maximum time to wait for current job to finish (default: 30 seconds)
2339
+ */
2340
+ async close(gracefulTimeoutMs = 3e4) {
2341
+ this.stopping = true;
2342
+ await this.delay(100);
2343
+ if (this.cleanupTimer) clearInterval(this.cleanupTimer);
2344
+ if (this.schedulerTimer) clearInterval(this.schedulerTimer);
2345
+ if (this.stalledCheckTimer) clearInterval(this.stalledCheckTimer);
2346
+ const startTime = Date.now();
2347
+ while (this.jobsInProgress.size > 0 && Date.now() - startTime < gracefulTimeoutMs) await sleep(100);
2348
+ if (this.blockingClient) {
2349
+ try {
2350
+ if (this.jobsInProgress.size > 0 && gracefulTimeoutMs > 0) {
2351
+ this.logger.debug("Gracefully closing blocking client (quit)...");
2352
+ await this.blockingClient.quit();
2353
+ } else {
2354
+ this.logger.debug("Force closing blocking client (disconnect)...");
2355
+ this.blockingClient.disconnect();
2356
+ }
2357
+ } catch (err) {
2358
+ this.logger.debug("Error closing blocking client:", err);
2359
+ }
2360
+ this.blockingClient = null;
2361
+ }
2362
+ if (this.runLoopPromise) {
2363
+ const runLoopTimeout = this.jobsInProgress.size > 0 ? gracefulTimeoutMs : 2e3;
2364
+ const timeoutPromise = new Promise((resolve) => {
2365
+ setTimeout(resolve, runLoopTimeout);
2366
+ });
2367
+ try {
2368
+ await Promise.race([this.runLoopPromise, timeoutPromise]);
2369
+ } catch (err) {
2370
+ this.logger.warn("Error while waiting for run loop to exit:", err);
2371
+ }
2372
+ }
2373
+ if (this.jobsInProgress.size > 0) {
2374
+ this.logger.warn(`Worker stopped with ${this.jobsInProgress.size} jobs still processing after ${gracefulTimeoutMs}ms timeout.`);
2375
+ const nowWall = Date.now();
2376
+ for (const item of this.jobsInProgress) this.emit("graceful-timeout", Job.fromReserved(this.q, item.job, {
2377
+ processedOn: item.ts,
2378
+ finishedOn: nowWall,
2379
+ status: "active"
2380
+ }));
2381
+ }
2382
+ this.jobsInProgress.clear();
2383
+ this.ready = false;
2384
+ this.closed = true;
2385
+ try {
2386
+ const redis = this.q.redis;
2387
+ if (redis) {
2388
+ if (this.redisCloseHandler) redis.off?.("close", this.redisCloseHandler);
2389
+ if (this.redisErrorHandler) redis.off?.("error", this.redisErrorHandler);
2390
+ if (this.redisReadyHandler) redis.off?.("ready", this.redisReadyHandler);
2391
+ }
2392
+ } catch (_e) {}
2393
+ this.emit("closed");
2394
+ }
2395
+ /**
2396
+ * Get information about the first currently processing job (if any)
2397
+ * For concurrency > 1, returns the oldest job in progress
2398
+ */
2399
+ getCurrentJob() {
2400
+ if (this.jobsInProgress.size === 0) return null;
2401
+ const oldest = Array.from(this.jobsInProgress)[0];
2402
+ const now = Date.now();
2403
+ return {
2404
+ job: oldest.job,
2405
+ processingTimeMs: now - oldest.ts
2406
+ };
2407
+ }
2408
+ /**
2409
+ * Get information about all currently processing jobs
2410
+ */
2411
+ getCurrentJobs() {
2412
+ const now = Date.now();
2413
+ return Array.from(this.jobsInProgress).map((item) => ({
2414
+ job: item.job,
2415
+ processingTimeMs: now - item.ts
2416
+ }));
2417
+ }
2418
+ /**
2419
+ * Check if the worker is currently processing any jobs
2420
+ */
2421
+ isProcessing() {
2422
+ return this.jobsInProgress.size > 0;
2423
+ }
2424
+ async add(opts) {
2425
+ return this.q.add(opts);
2426
+ }
2427
+ async processSingleJob(job, fetchNextCallback) {
2428
+ const jobStartWallTime = Date.now();
2429
+ let hbTimer;
2430
+ let heartbeatDelayTimer;
2431
+ const startHeartbeat = () => {
2432
+ const jobTimeout = this.q.jobTimeoutMs || 3e4;
2433
+ const minInterval = Math.min(this.hbMs, Math.floor(jobTimeout / 3), 1e4);
2434
+ this.logger.debug(`Starting heartbeat for job ${job.id} (interval: ${minInterval}ms, concurrency: ${this.concurrency})`);
2435
+ hbTimer = setInterval(async () => {
2436
+ try {
2437
+ if (await this.q.heartbeat(job) === 0) {
2438
+ this.logger.warn(`Heartbeat failed for job ${job.id} - job may have been removed or completed elsewhere`);
2439
+ if (hbTimer) clearInterval(hbTimer);
2440
+ }
2441
+ } catch (e) {
2442
+ const isConnErr = this.q.isConnectionError(e);
2443
+ if (!isConnErr || !this.stopping) this.logger.error(`Heartbeat error for job ${job.id}:`, e instanceof Error ? e.message : String(e));
2444
+ this.onError?.(e, job);
2445
+ if (!isConnErr || !this.stopping) this.emit("error", e instanceof Error ? e : new Error(String(e)));
2446
+ }
2447
+ }, minInterval);
2448
+ };
2449
+ try {
2450
+ const jobTimeout = this.q.jobTimeoutMs || 3e4;
2451
+ const heartbeatThreshold = Math.min(jobTimeout * .1, 2e3);
2452
+ heartbeatDelayTimer = setTimeout(() => {
2453
+ startHeartbeat();
2454
+ }, heartbeatThreshold);
2455
+ const handlerResult = await this.handler(job);
2456
+ if (heartbeatDelayTimer) clearTimeout(heartbeatDelayTimer);
2457
+ if (hbTimer) clearInterval(hbTimer);
2458
+ const finishedAtWall = Date.now();
2459
+ const nextJob = await this.completeJob(job, handlerResult, fetchNextCallback, jobStartWallTime, finishedAtWall);
2460
+ this.blockingStats.consecutiveEmptyReserves = 0;
2461
+ this.emptyReserveBackoffMs = 0;
2462
+ this.emit("completed", Job.fromReserved(this.q, job, {
2463
+ processedOn: jobStartWallTime,
2464
+ finishedOn: finishedAtWall,
2465
+ returnvalue: handlerResult,
2466
+ status: "completed"
2467
+ }));
2468
+ return nextJob;
2469
+ } catch (err) {
2470
+ if (heartbeatDelayTimer) clearTimeout(heartbeatDelayTimer);
2471
+ if (hbTimer) clearInterval(hbTimer);
2472
+ await this.handleJobFailure(err, job, jobStartWallTime);
2473
+ }
2474
+ }
2475
+ /**
2476
+ * Handle job failure: emit events, retry or dead-letter
2477
+ */
2478
+ async handleJobFailure(err, job, jobStartWallTime) {
2479
+ this.onError?.(err, job);
2480
+ this.blockingStats.consecutiveEmptyReserves = 0;
2481
+ this.emptyReserveBackoffMs = 0;
2482
+ try {
2483
+ this.emit("error", err instanceof Error ? err : new Error(String(err)));
2484
+ } catch (_emitError) {}
2485
+ const failedAt = Date.now();
2486
+ this.emit("failed", Job.fromReserved(this.q, job, {
2487
+ processedOn: jobStartWallTime,
2488
+ finishedOn: failedAt,
2489
+ failedReason: err instanceof Error ? err.message : String(err),
2490
+ stacktrace: err instanceof Error ? err.stack : typeof err === "object" && err !== null ? err.stack : void 0,
2491
+ status: "failed"
2492
+ }));
2493
+ const nextAttempt = job.attempts + 1;
2494
+ const backoffMs = this.backoff(nextAttempt);
2495
+ if (nextAttempt >= this.maxAttempts) {
2496
+ await this.deadLetterJob(err, job, jobStartWallTime, failedAt, nextAttempt);
2497
+ return;
2498
+ }
2499
+ if (await this.q.retry(job.id, backoffMs) === -1) {
2500
+ await this.deadLetterJob(err, job, jobStartWallTime, failedAt, job.maxAttempts);
2501
+ return;
2502
+ }
2503
+ await this.recordFailureAttempt(err, job, jobStartWallTime, failedAt, nextAttempt);
2504
+ }
2505
+ /**
2506
+ * Dead-letter a job that exceeded max attempts
2507
+ */
2508
+ async deadLetterJob(err, job, processedOn, finishedOn, attempts) {
2509
+ this.logger.info(`Dead lettering job ${job.id} from group ${job.groupId} (attempts: ${attempts}/${job.maxAttempts})`);
2510
+ const errObj = err instanceof Error ? err : new Error(String(err));
2511
+ try {
2512
+ await this.q.recordFinalFailure({
2513
+ id: job.id,
2514
+ groupId: job.groupId
2515
+ }, {
2516
+ name: errObj.name,
2517
+ message: errObj.message,
2518
+ stack: errObj.stack
2519
+ }, {
2520
+ processedOn,
2521
+ finishedOn,
2522
+ attempts,
2523
+ maxAttempts: job.maxAttempts,
2524
+ data: job.data
2525
+ });
2526
+ } catch (e) {
2527
+ this.logger.warn("Failed to record final failure", e);
2528
+ }
2529
+ await this.q.deadLetter(job.id, job.groupId);
2530
+ }
2531
+ /**
2532
+ * Record a failed attempt (not final)
2533
+ */
2534
+ async recordFailureAttempt(err, job, processedOn, finishedOn, attempts) {
2535
+ const errObj = err instanceof Error ? err : new Error(String(err));
2536
+ try {
2537
+ await this.q.recordAttemptFailure({
2538
+ id: job.id,
2539
+ groupId: job.groupId
2540
+ }, {
2541
+ name: errObj.name,
2542
+ message: errObj.message,
2543
+ stack: errObj.stack
2544
+ }, {
2545
+ processedOn,
2546
+ finishedOn,
2547
+ attempts,
2548
+ maxAttempts: job.maxAttempts
2549
+ });
2550
+ } catch (e) {
2551
+ this.logger.warn("Failed to record attempt failure", e);
2552
+ }
2553
+ }
2554
+ };
2555
+ const Worker = _Worker;
2556
+ function sleep(ms) {
2557
+ return new Promise((r) => setTimeout(r, ms));
2558
+ }
2559
+
2560
+ //#endregion
2561
+ exports.BullBoardGroupMQAdapter = BullBoardGroupMQAdapter;
2562
+ exports.Job = Job;
2563
+ exports.Queue = Queue;
2564
+ exports.Worker = Worker;
2565
+ exports.getWorkersStatus = getWorkersStatus;
2566
+ exports.waitForQueueToEmpty = waitForQueueToEmpty;
2567
+ //# sourceMappingURL=index.cjs.map