groupmq-plus 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +59 -0
- package/README.md +722 -0
- package/dist/index.cjs +2567 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1300 -0
- package/dist/index.d.ts +1300 -0
- package/dist/index.js +2557 -0
- package/dist/index.js.map +1 -0
- package/dist/lua/change-delay.lua +62 -0
- package/dist/lua/check-stalled.lua +86 -0
- package/dist/lua/clean-status.lua +64 -0
- package/dist/lua/cleanup-poisoned-group.lua +46 -0
- package/dist/lua/cleanup.lua +46 -0
- package/dist/lua/complete-and-reserve-next-with-metadata.lua +221 -0
- package/dist/lua/complete-with-metadata.lua +190 -0
- package/dist/lua/complete.lua +51 -0
- package/dist/lua/dead-letter.lua +86 -0
- package/dist/lua/enqueue-batch.lua +149 -0
- package/dist/lua/enqueue-flow.lua +107 -0
- package/dist/lua/enqueue.lua +154 -0
- package/dist/lua/get-active-count.lua +6 -0
- package/dist/lua/get-active-jobs.lua +6 -0
- package/dist/lua/get-delayed-count.lua +5 -0
- package/dist/lua/get-delayed-jobs.lua +5 -0
- package/dist/lua/get-unique-groups-count.lua +13 -0
- package/dist/lua/get-unique-groups.lua +15 -0
- package/dist/lua/get-waiting-count.lua +11 -0
- package/dist/lua/get-waiting-jobs.lua +15 -0
- package/dist/lua/heartbeat.lua +22 -0
- package/dist/lua/is-empty.lua +35 -0
- package/dist/lua/promote-delayed-jobs.lua +40 -0
- package/dist/lua/promote-delayed-one.lua +44 -0
- package/dist/lua/promote-staged.lua +70 -0
- package/dist/lua/record-job-result.lua +143 -0
- package/dist/lua/remove.lua +55 -0
- package/dist/lua/reserve-atomic.lua +114 -0
- package/dist/lua/reserve-batch.lua +141 -0
- package/dist/lua/reserve.lua +161 -0
- package/dist/lua/retry.lua +53 -0
- package/package.json +92 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,2557 @@
|
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
2
|
+
import CronParser from "cron-parser";
|
|
3
|
+
import fs from "node:fs";
|
|
4
|
+
import path from "node:path";
|
|
5
|
+
import { fileURLToPath } from "node:url";
|
|
6
|
+
|
|
7
|
+
//#region rolldown:runtime
|
|
8
|
+
var __create = Object.create;
|
|
9
|
+
var __defProp = Object.defineProperty;
|
|
10
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
11
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
12
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
13
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
14
|
+
var __commonJS = (cb, mod) => function() {
|
|
15
|
+
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
16
|
+
};
|
|
17
|
+
var __copyProps = (to, from, except, desc) => {
|
|
18
|
+
if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
19
|
+
key = keys[i];
|
|
20
|
+
if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
|
|
21
|
+
get: ((k) => from[k]).bind(null, key),
|
|
22
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
return to;
|
|
26
|
+
};
|
|
27
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
|
|
28
|
+
value: mod,
|
|
29
|
+
enumerable: true
|
|
30
|
+
}) : target, mod));
|
|
31
|
+
|
|
32
|
+
//#endregion
|
|
33
|
+
//#region node_modules/.pnpm/@bull-board+api@6.13.0_@bull-board+ui@6.13.0/node_modules/@bull-board/api/dist/queueAdapters/base.js
|
|
34
|
+
var require_base = /* @__PURE__ */ __commonJS({ "node_modules/.pnpm/@bull-board+api@6.13.0_@bull-board+ui@6.13.0/node_modules/@bull-board/api/dist/queueAdapters/base.js": ((exports) => {
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
var BaseAdapter$1 = class {
|
|
37
|
+
constructor(type, options = {}) {
|
|
38
|
+
this.formatters = /* @__PURE__ */ new Map();
|
|
39
|
+
this._visibilityGuard = () => true;
|
|
40
|
+
this.readOnlyMode = options.readOnlyMode === true;
|
|
41
|
+
this.allowRetries = this.readOnlyMode ? false : options.allowRetries !== false;
|
|
42
|
+
this.allowCompletedRetries = this.allowRetries && options.allowCompletedRetries !== false;
|
|
43
|
+
this.prefix = options.prefix || "";
|
|
44
|
+
this.delimiter = options.delimiter || "";
|
|
45
|
+
this.description = options.description || "";
|
|
46
|
+
this.displayName = options.displayName || "";
|
|
47
|
+
this.type = type;
|
|
48
|
+
this.externalJobUrl = options.externalJobUrl;
|
|
49
|
+
}
|
|
50
|
+
getDescription() {
|
|
51
|
+
return this.description;
|
|
52
|
+
}
|
|
53
|
+
getDisplayName() {
|
|
54
|
+
return this.displayName;
|
|
55
|
+
}
|
|
56
|
+
setFormatter(field, formatter) {
|
|
57
|
+
this.formatters.set(field, formatter);
|
|
58
|
+
}
|
|
59
|
+
format(field, data, defaultValue = data) {
|
|
60
|
+
const fieldFormatter = this.formatters.get(field);
|
|
61
|
+
return typeof fieldFormatter === "function" ? fieldFormatter(data) : defaultValue;
|
|
62
|
+
}
|
|
63
|
+
setVisibilityGuard(guard) {
|
|
64
|
+
this._visibilityGuard = guard;
|
|
65
|
+
}
|
|
66
|
+
isVisible(request) {
|
|
67
|
+
return this._visibilityGuard(request);
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
exports.BaseAdapter = BaseAdapter$1;
|
|
71
|
+
}) });
|
|
72
|
+
|
|
73
|
+
//#endregion
|
|
74
|
+
//#region src/adapters/groupmq-bullboard-adapter.ts
|
|
75
|
+
var import_base = /* @__PURE__ */ __toESM(require_base(), 1);
|
|
76
|
+
var BullBoardGroupMQAdapter = class extends import_base.BaseAdapter {
|
|
77
|
+
constructor(queue, options = {}) {
|
|
78
|
+
const libName = queue.namespace;
|
|
79
|
+
super(libName, options);
|
|
80
|
+
this.queue = queue;
|
|
81
|
+
this.options = options;
|
|
82
|
+
}
|
|
83
|
+
getDescription() {
|
|
84
|
+
return this.options.description || "";
|
|
85
|
+
}
|
|
86
|
+
getDisplayName() {
|
|
87
|
+
return this.options.displayName || "";
|
|
88
|
+
}
|
|
89
|
+
getName() {
|
|
90
|
+
const prefix = this.options.prefix || "";
|
|
91
|
+
const delimiter = this.options.delimiter || "";
|
|
92
|
+
return `${prefix}${delimiter}${this.queue.rawNamespace}`.replace(/(^[\s:]+)|([\s:]+$)/g, "");
|
|
93
|
+
}
|
|
94
|
+
async getRedisInfo() {
|
|
95
|
+
return this.queue.redis.info();
|
|
96
|
+
}
|
|
97
|
+
async getJob(id) {
|
|
98
|
+
return await this.queue.getJob(id);
|
|
99
|
+
}
|
|
100
|
+
async getJobs(jobStatuses, start, end) {
|
|
101
|
+
return await this.queue.getJobsByStatus(jobStatuses, start, end);
|
|
102
|
+
}
|
|
103
|
+
async getJobCounts() {
|
|
104
|
+
const base = await this.queue.getJobCounts();
|
|
105
|
+
return {
|
|
106
|
+
latest: 0,
|
|
107
|
+
active: base.active,
|
|
108
|
+
waiting: base.waiting,
|
|
109
|
+
"waiting-children": base["waiting-children"],
|
|
110
|
+
prioritized: base.prioritized,
|
|
111
|
+
completed: base.completed,
|
|
112
|
+
failed: base.failed,
|
|
113
|
+
delayed: base.delayed,
|
|
114
|
+
paused: base.paused
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
async getJobLogs(_id) {
|
|
118
|
+
return [];
|
|
119
|
+
}
|
|
120
|
+
getStatuses() {
|
|
121
|
+
return [
|
|
122
|
+
"latest",
|
|
123
|
+
"active",
|
|
124
|
+
"waiting",
|
|
125
|
+
"waiting-children",
|
|
126
|
+
"prioritized",
|
|
127
|
+
"completed",
|
|
128
|
+
"failed",
|
|
129
|
+
"delayed",
|
|
130
|
+
"paused"
|
|
131
|
+
];
|
|
132
|
+
}
|
|
133
|
+
getJobStatuses() {
|
|
134
|
+
return [
|
|
135
|
+
"active",
|
|
136
|
+
"waiting",
|
|
137
|
+
"waiting-children",
|
|
138
|
+
"prioritized",
|
|
139
|
+
"completed",
|
|
140
|
+
"failed",
|
|
141
|
+
"delayed",
|
|
142
|
+
"paused"
|
|
143
|
+
];
|
|
144
|
+
}
|
|
145
|
+
assertWritable() {
|
|
146
|
+
if (this.options.readOnlyMode) throw new Error("This adapter is in read-only mode. Mutations are disabled.");
|
|
147
|
+
}
|
|
148
|
+
async clean(jobStatus, graceTimeMs) {
|
|
149
|
+
this.assertWritable();
|
|
150
|
+
if (jobStatus !== "completed" && jobStatus !== "failed" && jobStatus !== "delayed") return;
|
|
151
|
+
await this.queue.clean(graceTimeMs, Number.MAX_SAFE_INTEGER, jobStatus);
|
|
152
|
+
}
|
|
153
|
+
async addJob(_name, data, options) {
|
|
154
|
+
this.assertWritable();
|
|
155
|
+
return await this.queue.add({
|
|
156
|
+
groupId: options.groupId ?? Math.random().toString(36).substring(2, 15),
|
|
157
|
+
data,
|
|
158
|
+
...options
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
async isPaused() {
|
|
162
|
+
return this.queue.isPaused();
|
|
163
|
+
}
|
|
164
|
+
async pause() {
|
|
165
|
+
this.assertWritable();
|
|
166
|
+
await this.queue.pause();
|
|
167
|
+
}
|
|
168
|
+
async resume() {
|
|
169
|
+
this.assertWritable();
|
|
170
|
+
await this.queue.resume();
|
|
171
|
+
}
|
|
172
|
+
async empty() {
|
|
173
|
+
this.assertWritable();
|
|
174
|
+
throw new Error("Not implemented");
|
|
175
|
+
}
|
|
176
|
+
async promoteAll() {
|
|
177
|
+
this.assertWritable();
|
|
178
|
+
throw new Error("Not implemented");
|
|
179
|
+
}
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
//#endregion
|
|
183
|
+
//#region src/helpers.ts
|
|
184
|
+
/**
|
|
185
|
+
* Wait for a queue to become empty
|
|
186
|
+
* @param queue The queue to monitor
|
|
187
|
+
* @param timeoutMs Maximum time to wait (default: 60 seconds)
|
|
188
|
+
* @returns Promise that resolves when queue is empty or timeout is reached
|
|
189
|
+
*/
|
|
190
|
+
async function waitForQueueToEmpty(queue, timeoutMs = 6e4) {
|
|
191
|
+
return queue.waitForEmpty(timeoutMs);
|
|
192
|
+
}
|
|
193
|
+
/**
|
|
194
|
+
* Get status of all workers
|
|
195
|
+
*/
|
|
196
|
+
function getWorkersStatus(workers) {
|
|
197
|
+
const workersStatus = workers.map((worker, index) => {
|
|
198
|
+
const currentJob = worker.getCurrentJob();
|
|
199
|
+
return {
|
|
200
|
+
index,
|
|
201
|
+
isProcessing: worker.isProcessing(),
|
|
202
|
+
currentJob: currentJob ? {
|
|
203
|
+
jobId: currentJob.job.id,
|
|
204
|
+
groupId: currentJob.job.groupId,
|
|
205
|
+
processingTimeMs: currentJob.processingTimeMs
|
|
206
|
+
} : void 0
|
|
207
|
+
};
|
|
208
|
+
});
|
|
209
|
+
const processing = workersStatus.filter((w) => w.isProcessing).length;
|
|
210
|
+
const idle = workersStatus.length - processing;
|
|
211
|
+
return {
|
|
212
|
+
total: workers.length,
|
|
213
|
+
processing,
|
|
214
|
+
idle,
|
|
215
|
+
workers: workersStatus
|
|
216
|
+
};
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
//#endregion
|
|
220
|
+
//#region src/job.ts
|
|
221
|
+
var Job = class Job {
|
|
222
|
+
constructor(args) {
|
|
223
|
+
this.queue = args.queue;
|
|
224
|
+
this.id = args.id;
|
|
225
|
+
this.name = args.name ?? "groupmq";
|
|
226
|
+
this.data = args.data;
|
|
227
|
+
this.groupId = args.groupId;
|
|
228
|
+
this.attemptsMade = args.attemptsMade;
|
|
229
|
+
this.opts = args.opts;
|
|
230
|
+
this.processedOn = args.processedOn;
|
|
231
|
+
this.finishedOn = args.finishedOn;
|
|
232
|
+
this.failedReason = args.failedReason;
|
|
233
|
+
this.stacktrace = args.stacktrace;
|
|
234
|
+
this.returnvalue = args.returnvalue;
|
|
235
|
+
this.timestamp = args.timestamp;
|
|
236
|
+
this.orderMs = args.orderMs;
|
|
237
|
+
this.status = args.status ?? "unknown";
|
|
238
|
+
}
|
|
239
|
+
async getState() {
|
|
240
|
+
return this.status ?? "unknown";
|
|
241
|
+
}
|
|
242
|
+
toJSON() {
|
|
243
|
+
return {
|
|
244
|
+
id: this.id,
|
|
245
|
+
name: this.name,
|
|
246
|
+
data: this.data,
|
|
247
|
+
groupId: this.groupId,
|
|
248
|
+
attemptsMade: this.attemptsMade,
|
|
249
|
+
opts: this.opts,
|
|
250
|
+
processedOn: this.processedOn,
|
|
251
|
+
finishedOn: this.finishedOn,
|
|
252
|
+
failedReason: this.failedReason,
|
|
253
|
+
stacktrace: this.stacktrace ? [this.stacktrace] : null,
|
|
254
|
+
returnvalue: this.returnvalue,
|
|
255
|
+
timestamp: this.timestamp,
|
|
256
|
+
orderMs: this.orderMs,
|
|
257
|
+
status: this.status,
|
|
258
|
+
progress: 0
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
changeDelay(newDelay) {
|
|
262
|
+
return this.queue.changeDelay(this.id, newDelay);
|
|
263
|
+
}
|
|
264
|
+
async promote() {
|
|
265
|
+
await this.queue.promote(this.id);
|
|
266
|
+
}
|
|
267
|
+
async remove() {
|
|
268
|
+
await this.queue.remove(this.id);
|
|
269
|
+
}
|
|
270
|
+
async retry(_state) {
|
|
271
|
+
await this.queue.retry(this.id);
|
|
272
|
+
}
|
|
273
|
+
async updateData(jobData) {
|
|
274
|
+
await this.queue.updateData(this.id, jobData);
|
|
275
|
+
}
|
|
276
|
+
async update(jobData) {
|
|
277
|
+
await this.updateData(jobData);
|
|
278
|
+
}
|
|
279
|
+
static fromReserved(queue, reserved, meta) {
|
|
280
|
+
return new Job({
|
|
281
|
+
queue,
|
|
282
|
+
id: reserved.id,
|
|
283
|
+
name: "groupmq",
|
|
284
|
+
data: reserved.data,
|
|
285
|
+
groupId: reserved.groupId,
|
|
286
|
+
attemptsMade: reserved.attempts,
|
|
287
|
+
opts: {
|
|
288
|
+
attempts: reserved.maxAttempts,
|
|
289
|
+
delay: meta?.delayMs
|
|
290
|
+
},
|
|
291
|
+
processedOn: meta?.processedOn,
|
|
292
|
+
finishedOn: meta?.finishedOn,
|
|
293
|
+
failedReason: meta?.failedReason,
|
|
294
|
+
stacktrace: meta?.stacktrace,
|
|
295
|
+
returnvalue: meta?.returnvalue,
|
|
296
|
+
timestamp: reserved.timestamp ? reserved.timestamp : Date.now(),
|
|
297
|
+
orderMs: reserved.orderMs,
|
|
298
|
+
status: coerceStatus(meta?.status)
|
|
299
|
+
});
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Create a Job from raw Redis hash data with optional known status
|
|
303
|
+
* This avoids extra Redis lookups when status is already known
|
|
304
|
+
*/
|
|
305
|
+
static fromRawHash(queue, id, raw, knownStatus) {
|
|
306
|
+
const groupId = raw.groupId ?? "";
|
|
307
|
+
const payload = raw.data ? safeJsonParse$1(raw.data) : null;
|
|
308
|
+
const attempts = raw.attempts ? parseInt(raw.attempts, 10) : 0;
|
|
309
|
+
const maxAttempts = raw.maxAttempts ? parseInt(raw.maxAttempts, 10) : queue.maxAttemptsDefault;
|
|
310
|
+
const timestampMs = raw.timestamp ? parseInt(raw.timestamp, 10) : 0;
|
|
311
|
+
const orderMs = raw.orderMs ? parseInt(raw.orderMs, 10) : void 0;
|
|
312
|
+
const delayUntil = raw.delayUntil ? parseInt(raw.delayUntil, 10) : 0;
|
|
313
|
+
const processedOn = raw.processedOn ? parseInt(raw.processedOn, 10) : void 0;
|
|
314
|
+
const finishedOn = raw.finishedOn ? parseInt(raw.finishedOn, 10) : void 0;
|
|
315
|
+
const failedReason = (raw.failedReason ?? raw.lastErrorMessage) || void 0;
|
|
316
|
+
const stacktrace = (raw.stacktrace ?? raw.lastErrorStack) || void 0;
|
|
317
|
+
const returnvalue = raw.returnvalue ? safeJsonParse$1(raw.returnvalue) : void 0;
|
|
318
|
+
return new Job({
|
|
319
|
+
queue,
|
|
320
|
+
id,
|
|
321
|
+
name: "groupmq",
|
|
322
|
+
data: payload,
|
|
323
|
+
groupId,
|
|
324
|
+
attemptsMade: attempts,
|
|
325
|
+
opts: {
|
|
326
|
+
attempts: maxAttempts,
|
|
327
|
+
delay: delayUntil && delayUntil > Date.now() ? delayUntil - Date.now() : void 0
|
|
328
|
+
},
|
|
329
|
+
processedOn,
|
|
330
|
+
finishedOn,
|
|
331
|
+
failedReason,
|
|
332
|
+
stacktrace,
|
|
333
|
+
returnvalue,
|
|
334
|
+
timestamp: timestampMs || Date.now(),
|
|
335
|
+
orderMs,
|
|
336
|
+
status: knownStatus ?? coerceStatus(raw.status)
|
|
337
|
+
});
|
|
338
|
+
}
|
|
339
|
+
static async fromStore(queue, id) {
|
|
340
|
+
const jobKey = `${queue.namespace}:job:${id}`;
|
|
341
|
+
const raw = await queue.redis.hgetall(jobKey);
|
|
342
|
+
if (!raw || Object.keys(raw).length === 0) throw new Error(`Job ${id} not found`);
|
|
343
|
+
const groupId = raw.groupId ?? "";
|
|
344
|
+
const payload = raw.data ? safeJsonParse$1(raw.data) : null;
|
|
345
|
+
const attempts = raw.attempts ? parseInt(raw.attempts, 10) : 0;
|
|
346
|
+
const maxAttempts = raw.maxAttempts ? parseInt(raw.maxAttempts, 10) : queue.maxAttemptsDefault;
|
|
347
|
+
const timestampMs = raw.timestamp ? parseInt(raw.timestamp, 10) : 0;
|
|
348
|
+
const orderMs = raw.orderMs ? parseInt(raw.orderMs, 10) : void 0;
|
|
349
|
+
const delayUntil = raw.delayUntil ? parseInt(raw.delayUntil, 10) : 0;
|
|
350
|
+
const processedOn = raw.processedOn ? parseInt(raw.processedOn, 10) : void 0;
|
|
351
|
+
const finishedOn = raw.finishedOn ? parseInt(raw.finishedOn, 10) : void 0;
|
|
352
|
+
const failedReason = (raw.failedReason ?? raw.lastErrorMessage) || void 0;
|
|
353
|
+
const stacktrace = (raw.stacktrace ?? raw.lastErrorStack) || void 0;
|
|
354
|
+
const returnvalue = raw.returnvalue ? safeJsonParse$1(raw.returnvalue) : void 0;
|
|
355
|
+
const [inProcessing, inDelayed] = await Promise.all([queue.redis.zscore(`${queue.namespace}:processing`, id), queue.redis.zscore(`${queue.namespace}:delayed`, id)]);
|
|
356
|
+
let status = raw.status;
|
|
357
|
+
if (inProcessing !== null) status = "active";
|
|
358
|
+
else if (inDelayed !== null) status = "delayed";
|
|
359
|
+
else if (groupId) {
|
|
360
|
+
if (await queue.redis.zscore(`${queue.namespace}:g:${groupId}`, id) !== null) status = "waiting";
|
|
361
|
+
}
|
|
362
|
+
return new Job({
|
|
363
|
+
queue,
|
|
364
|
+
id,
|
|
365
|
+
name: "groupmq",
|
|
366
|
+
data: payload,
|
|
367
|
+
groupId,
|
|
368
|
+
attemptsMade: attempts,
|
|
369
|
+
opts: {
|
|
370
|
+
attempts: maxAttempts,
|
|
371
|
+
delay: delayUntil && delayUntil > Date.now() ? delayUntil - Date.now() : void 0
|
|
372
|
+
},
|
|
373
|
+
processedOn,
|
|
374
|
+
finishedOn,
|
|
375
|
+
failedReason,
|
|
376
|
+
stacktrace,
|
|
377
|
+
returnvalue,
|
|
378
|
+
timestamp: timestampMs || Date.now(),
|
|
379
|
+
orderMs,
|
|
380
|
+
status: coerceStatus(status)
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
};
|
|
384
|
+
function safeJsonParse$1(input) {
|
|
385
|
+
try {
|
|
386
|
+
return JSON.parse(input);
|
|
387
|
+
} catch (_e) {
|
|
388
|
+
return null;
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
function coerceStatus(input) {
|
|
392
|
+
const valid = [
|
|
393
|
+
"latest",
|
|
394
|
+
"active",
|
|
395
|
+
"waiting",
|
|
396
|
+
"waiting-children",
|
|
397
|
+
"prioritized",
|
|
398
|
+
"completed",
|
|
399
|
+
"failed",
|
|
400
|
+
"delayed",
|
|
401
|
+
"paused"
|
|
402
|
+
];
|
|
403
|
+
if (!input) return "unknown";
|
|
404
|
+
if (valid.includes(input)) return input;
|
|
405
|
+
return "unknown";
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
//#endregion
|
|
409
|
+
//#region src/logger.ts
|
|
410
|
+
var Logger = class {
|
|
411
|
+
constructor(enabled, name) {
|
|
412
|
+
this.enabled = enabled;
|
|
413
|
+
this.name = name;
|
|
414
|
+
}
|
|
415
|
+
debug(...args) {
|
|
416
|
+
if (this.enabled) console.debug(`[${this.name}]`, ...args);
|
|
417
|
+
}
|
|
418
|
+
info(...args) {
|
|
419
|
+
if (this.enabled) console.log(`[${this.name}]`, ...args);
|
|
420
|
+
}
|
|
421
|
+
warn(...args) {
|
|
422
|
+
if (this.enabled) console.warn(`⚠️ [${this.name}]`, ...args);
|
|
423
|
+
}
|
|
424
|
+
error(...args) {
|
|
425
|
+
if (this.enabled) console.error(`💥 [${this.name}]`, ...args);
|
|
426
|
+
}
|
|
427
|
+
};
|
|
428
|
+
|
|
429
|
+
//#endregion
|
|
430
|
+
//#region src/lua/loader.ts
|
|
431
|
+
const cacheByClient = /* @__PURE__ */ new WeakMap();
|
|
432
|
+
function scriptPath(name) {
|
|
433
|
+
const currentDir = path.dirname(fileURLToPath(import.meta.url));
|
|
434
|
+
const candidates = [path.join(currentDir, `${name}.lua`), path.join(currentDir, "lua", `${name}.lua`)];
|
|
435
|
+
for (const candidate of candidates) if (fs.existsSync(candidate)) return candidate;
|
|
436
|
+
return candidates[0];
|
|
437
|
+
}
|
|
438
|
+
async function loadScript(client, name) {
|
|
439
|
+
let map = cacheByClient.get(client);
|
|
440
|
+
if (!map) {
|
|
441
|
+
map = /* @__PURE__ */ new Map();
|
|
442
|
+
cacheByClient.set(client, map);
|
|
443
|
+
}
|
|
444
|
+
const cached = map.get(name);
|
|
445
|
+
if (cached) return cached;
|
|
446
|
+
const file = scriptPath(name);
|
|
447
|
+
const lua = fs.readFileSync(file, "utf8");
|
|
448
|
+
const sha = await client.script("load", lua);
|
|
449
|
+
map.set(name, sha);
|
|
450
|
+
return sha;
|
|
451
|
+
}
|
|
452
|
+
async function evalScript(client, name, argv, numKeys) {
|
|
453
|
+
const sha = await loadScript(client, name);
|
|
454
|
+
return client.evalsha(sha, numKeys, ...argv);
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
//#endregion
|
|
458
|
+
//#region src/queue.ts
|
|
459
|
+
function nsKey(ns, ...parts) {
|
|
460
|
+
return [ns, ...parts].join(":");
|
|
461
|
+
}
|
|
462
|
+
function safeJsonParse(input) {
|
|
463
|
+
try {
|
|
464
|
+
return JSON.parse(input);
|
|
465
|
+
} catch (_e) {
|
|
466
|
+
return null;
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
var Queue = class {
|
|
470
|
+
constructor(opts) {
|
|
471
|
+
this._consecutiveEmptyReserves = 0;
|
|
472
|
+
this.promoterRunning = false;
|
|
473
|
+
this.batchBuffer = [];
|
|
474
|
+
this.flushing = false;
|
|
475
|
+
this._groupCleanupTracking = /* @__PURE__ */ new Map();
|
|
476
|
+
this.r = opts.redis;
|
|
477
|
+
this.rawNs = opts.namespace;
|
|
478
|
+
this.name = opts.namespace;
|
|
479
|
+
this.ns = `groupmq:${this.rawNs}`;
|
|
480
|
+
const rawVt = opts.jobTimeoutMs ?? 3e4;
|
|
481
|
+
this.vt = Math.max(1, rawVt);
|
|
482
|
+
this.defaultMaxAttempts = opts.maxAttempts ?? 3;
|
|
483
|
+
this.scanLimit = opts.reserveScanLimit ?? 20;
|
|
484
|
+
this.keepCompleted = Math.max(0, opts.keepCompleted ?? 0);
|
|
485
|
+
this.keepFailed = Math.max(0, opts.keepFailed ?? 0);
|
|
486
|
+
this.schedulerLockTtlMs = opts.schedulerLockTtlMs ?? 1500;
|
|
487
|
+
this.orderingDelayMs = opts.orderingDelayMs ?? 0;
|
|
488
|
+
if (opts.autoBatch) this.batchConfig = typeof opts.autoBatch === "boolean" ? {
|
|
489
|
+
size: 10,
|
|
490
|
+
maxWaitMs: 10
|
|
491
|
+
} : {
|
|
492
|
+
size: opts.autoBatch.size ?? 10,
|
|
493
|
+
maxWaitMs: opts.autoBatch.maxWaitMs ?? 10
|
|
494
|
+
};
|
|
495
|
+
this.logger = typeof opts.logger === "object" ? opts.logger : new Logger(!!opts.logger, this.namespace);
|
|
496
|
+
this.r.on("error", (err) => {
|
|
497
|
+
this.logger.error("Redis error (main):", err);
|
|
498
|
+
});
|
|
499
|
+
}
|
|
500
|
+
get redis() {
|
|
501
|
+
return this.r;
|
|
502
|
+
}
|
|
503
|
+
get namespace() {
|
|
504
|
+
return this.ns;
|
|
505
|
+
}
|
|
506
|
+
get rawNamespace() {
|
|
507
|
+
return this.rawNs;
|
|
508
|
+
}
|
|
509
|
+
get jobTimeoutMs() {
|
|
510
|
+
return this.vt;
|
|
511
|
+
}
|
|
512
|
+
get maxAttemptsDefault() {
|
|
513
|
+
return this.defaultMaxAttempts;
|
|
514
|
+
}
|
|
515
|
+
async add(opts) {
|
|
516
|
+
const maxAttempts = opts.maxAttempts ?? this.defaultMaxAttempts;
|
|
517
|
+
const orderMs = opts.orderMs ?? Date.now();
|
|
518
|
+
const now = Date.now();
|
|
519
|
+
const jobId = opts.jobId ?? randomUUID();
|
|
520
|
+
if (opts.repeat) return this.addRepeatingJob({
|
|
521
|
+
...opts,
|
|
522
|
+
orderMs,
|
|
523
|
+
maxAttempts
|
|
524
|
+
});
|
|
525
|
+
let delayMs;
|
|
526
|
+
if (opts.delay !== void 0 && opts.delay > 0) delayMs = opts.delay;
|
|
527
|
+
else if (opts.runAt !== void 0) {
|
|
528
|
+
const runAtTimestamp = opts.runAt instanceof Date ? opts.runAt.getTime() : opts.runAt;
|
|
529
|
+
delayMs = Math.max(0, runAtTimestamp - now);
|
|
530
|
+
}
|
|
531
|
+
const data = opts.data === void 0 ? null : opts.data;
|
|
532
|
+
if (this.batchConfig) return new Promise((resolve, reject) => {
|
|
533
|
+
this.batchBuffer.push({
|
|
534
|
+
groupId: opts.groupId,
|
|
535
|
+
data,
|
|
536
|
+
jobId,
|
|
537
|
+
maxAttempts,
|
|
538
|
+
delayMs,
|
|
539
|
+
orderMs,
|
|
540
|
+
resolve,
|
|
541
|
+
reject
|
|
542
|
+
});
|
|
543
|
+
if (this.batchBuffer.length >= this.batchConfig.size) this.flushBatch();
|
|
544
|
+
else if (!this.batchTimer) this.batchTimer = setTimeout(() => this.flushBatch(), this.batchConfig.maxWaitMs);
|
|
545
|
+
});
|
|
546
|
+
return this.addSingle({
|
|
547
|
+
...opts,
|
|
548
|
+
data,
|
|
549
|
+
jobId,
|
|
550
|
+
maxAttempts,
|
|
551
|
+
orderMs,
|
|
552
|
+
delayMs
|
|
553
|
+
});
|
|
554
|
+
}
|
|
555
|
+
/**
|
|
556
|
+
* Adds a parent-child flow to the queue.
|
|
557
|
+
* The parent job will only be processed after all child jobs have completed successfully.
|
|
558
|
+
* This operation is atomic.
|
|
559
|
+
*
|
|
560
|
+
* @param flow The flow configuration containing parent and children jobs
|
|
561
|
+
* @returns The parent job entity
|
|
562
|
+
*/
|
|
563
|
+
async addFlow(flow) {
|
|
564
|
+
const parentId = flow.parent.jobId ?? randomUUID();
|
|
565
|
+
const parentMaxAttempts = flow.parent.maxAttempts ?? this.defaultMaxAttempts;
|
|
566
|
+
const parentOrderMs = flow.parent.orderMs ?? Date.now();
|
|
567
|
+
const parentData = JSON.stringify(flow.parent.data === void 0 ? null : flow.parent.data);
|
|
568
|
+
const childrenIds = [];
|
|
569
|
+
const childrenArgs = [];
|
|
570
|
+
for (const child of flow.children) {
|
|
571
|
+
const childId = child.jobId ?? randomUUID();
|
|
572
|
+
const childMaxAttempts = child.maxAttempts ?? this.defaultMaxAttempts;
|
|
573
|
+
const childOrderMs = child.orderMs ?? Date.now();
|
|
574
|
+
const childDelay = child.delay ?? 0;
|
|
575
|
+
const childData = JSON.stringify(child.data === void 0 ? null : child.data);
|
|
576
|
+
childrenIds.push(childId);
|
|
577
|
+
childrenArgs.push(childId, child.groupId, childData, childMaxAttempts.toString(), childOrderMs.toString(), childDelay.toString());
|
|
578
|
+
}
|
|
579
|
+
const now = Date.now();
|
|
580
|
+
await evalScript(this.r, "enqueue-flow", [
|
|
581
|
+
this.ns,
|
|
582
|
+
parentId,
|
|
583
|
+
flow.parent.groupId,
|
|
584
|
+
parentData,
|
|
585
|
+
parentMaxAttempts.toString(),
|
|
586
|
+
parentOrderMs.toString(),
|
|
587
|
+
now.toString(),
|
|
588
|
+
...childrenArgs
|
|
589
|
+
], 1);
|
|
590
|
+
return new Job({
|
|
591
|
+
queue: this,
|
|
592
|
+
id: parentId,
|
|
593
|
+
groupId: flow.parent.groupId,
|
|
594
|
+
data: flow.parent.data,
|
|
595
|
+
status: "waiting-children",
|
|
596
|
+
attemptsMade: 0,
|
|
597
|
+
opts: { attempts: parentMaxAttempts },
|
|
598
|
+
timestamp: now,
|
|
599
|
+
orderMs: parentOrderMs
|
|
600
|
+
});
|
|
601
|
+
}
|
|
602
|
+
/**
|
|
603
|
+
* Gets the number of remaining child jobs for a parent job in a flow.
|
|
604
|
+
* @param parentId The ID of the parent job
|
|
605
|
+
* @returns The number of remaining children, or null if the job is not a parent
|
|
606
|
+
*/
|
|
607
|
+
async getFlowDependencies(parentId) {
|
|
608
|
+
const remaining = await this.r.hget(`${this.ns}:job:${parentId}`, "flowRemaining");
|
|
609
|
+
return remaining !== null ? parseInt(remaining, 10) : null;
|
|
610
|
+
}
|
|
611
|
+
/**
|
|
612
|
+
* Gets the results of all child jobs in a flow.
|
|
613
|
+
* @param parentId The ID of the parent job
|
|
614
|
+
* @returns An object mapping child job IDs to their results
|
|
615
|
+
*/
|
|
616
|
+
async getFlowResults(parentId) {
|
|
617
|
+
const results = await this.r.hgetall(`${this.ns}:flow:results:${parentId}`);
|
|
618
|
+
const parsed = {};
|
|
619
|
+
for (const [id, val] of Object.entries(results)) try {
|
|
620
|
+
parsed[id] = JSON.parse(val);
|
|
621
|
+
} catch (_e) {
|
|
622
|
+
parsed[id] = val;
|
|
623
|
+
}
|
|
624
|
+
return parsed;
|
|
625
|
+
}
|
|
626
|
+
async addSingle(opts) {
|
|
627
|
+
const now = Date.now();
|
|
628
|
+
let delayUntil = 0;
|
|
629
|
+
if (opts.delayMs !== void 0 && opts.delayMs > 0) delayUntil = now + opts.delayMs;
|
|
630
|
+
const serializedPayload = JSON.stringify(opts.data);
|
|
631
|
+
const result = await evalScript(this.r, "enqueue", [
|
|
632
|
+
this.ns,
|
|
633
|
+
opts.groupId,
|
|
634
|
+
serializedPayload,
|
|
635
|
+
String(opts.maxAttempts),
|
|
636
|
+
String(opts.orderMs),
|
|
637
|
+
String(delayUntil),
|
|
638
|
+
String(opts.jobId),
|
|
639
|
+
String(this.keepCompleted),
|
|
640
|
+
String(now),
|
|
641
|
+
String(this.orderingDelayMs)
|
|
642
|
+
], 1);
|
|
643
|
+
if (Array.isArray(result)) {
|
|
644
|
+
const [returnedJobId, returnedGroupId, returnedData, attempts, returnedMaxAttempts, timestamp, returnedOrderMs, returnedDelayUntil, status] = result;
|
|
645
|
+
return Job.fromRawHash(this, returnedJobId, {
|
|
646
|
+
id: returnedJobId,
|
|
647
|
+
groupId: returnedGroupId,
|
|
648
|
+
data: returnedData,
|
|
649
|
+
attempts,
|
|
650
|
+
maxAttempts: returnedMaxAttempts,
|
|
651
|
+
timestamp,
|
|
652
|
+
orderMs: returnedOrderMs,
|
|
653
|
+
delayUntil: returnedDelayUntil,
|
|
654
|
+
status
|
|
655
|
+
}, status);
|
|
656
|
+
}
|
|
657
|
+
return this.getJob(result);
|
|
658
|
+
}
|
|
659
|
+
async flushBatch() {
|
|
660
|
+
if (this.batchTimer) {
|
|
661
|
+
clearTimeout(this.batchTimer);
|
|
662
|
+
this.batchTimer = void 0;
|
|
663
|
+
}
|
|
664
|
+
if (this.batchBuffer.length === 0 || this.flushing) return;
|
|
665
|
+
this.flushing = true;
|
|
666
|
+
const batch = this.batchBuffer.splice(0);
|
|
667
|
+
try {
|
|
668
|
+
this.logger.debug(`Flushing batch of ${batch.length} jobs`);
|
|
669
|
+
const now = Date.now();
|
|
670
|
+
const jobsData = batch.map((job) => ({
|
|
671
|
+
jobId: job.jobId,
|
|
672
|
+
groupId: job.groupId,
|
|
673
|
+
data: JSON.stringify(job.data),
|
|
674
|
+
maxAttempts: job.maxAttempts,
|
|
675
|
+
orderMs: job.orderMs,
|
|
676
|
+
delayMs: job.delayMs
|
|
677
|
+
}));
|
|
678
|
+
const jobDataArrays = await evalScript(this.r, "enqueue-batch", [
|
|
679
|
+
this.ns,
|
|
680
|
+
JSON.stringify(jobsData),
|
|
681
|
+
String(this.keepCompleted),
|
|
682
|
+
String(now),
|
|
683
|
+
String(this.orderingDelayMs)
|
|
684
|
+
], 1);
|
|
685
|
+
for (let i = 0; i < batch.length; i++) {
|
|
686
|
+
const job = batch[i];
|
|
687
|
+
const jobDataArray = jobDataArrays[i];
|
|
688
|
+
try {
|
|
689
|
+
if (jobDataArray && jobDataArray.length >= 9) {
|
|
690
|
+
const [returnedJobId, returnedGroupId, returnedData, attempts, returnedMaxAttempts, timestamp, returnedOrderMs, returnedDelayUntil, status] = jobDataArray;
|
|
691
|
+
const jobEntity = Job.fromRawHash(this, returnedJobId, {
|
|
692
|
+
id: returnedJobId,
|
|
693
|
+
groupId: returnedGroupId,
|
|
694
|
+
data: returnedData,
|
|
695
|
+
attempts,
|
|
696
|
+
maxAttempts: returnedMaxAttempts,
|
|
697
|
+
timestamp,
|
|
698
|
+
orderMs: returnedOrderMs,
|
|
699
|
+
delayUntil: returnedDelayUntil,
|
|
700
|
+
status
|
|
701
|
+
}, status);
|
|
702
|
+
job.resolve(jobEntity);
|
|
703
|
+
} else throw new Error("Invalid job data returned from batch enqueue");
|
|
704
|
+
} catch (err) {
|
|
705
|
+
job.reject(err instanceof Error ? err : new Error(String(err)));
|
|
706
|
+
}
|
|
707
|
+
}
|
|
708
|
+
} catch (err) {
|
|
709
|
+
for (const job of batch) job.reject(err instanceof Error ? err : new Error(String(err)));
|
|
710
|
+
} finally {
|
|
711
|
+
this.flushing = false;
|
|
712
|
+
if (this.batchBuffer.length > 0) setImmediate(() => this.flushBatch());
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
async reserve() {
|
|
716
|
+
const now = Date.now();
|
|
717
|
+
const raw = await evalScript(this.r, "reserve", [
|
|
718
|
+
this.ns,
|
|
719
|
+
String(now),
|
|
720
|
+
String(this.vt),
|
|
721
|
+
String(this.scanLimit)
|
|
722
|
+
], 1);
|
|
723
|
+
if (!raw) return null;
|
|
724
|
+
const parts = raw.split("|||");
|
|
725
|
+
if (parts.length !== 10) return null;
|
|
726
|
+
let data;
|
|
727
|
+
try {
|
|
728
|
+
data = JSON.parse(parts[2]);
|
|
729
|
+
} catch (err) {
|
|
730
|
+
this.logger.warn(`Failed to parse job data: ${err.message}, raw: ${parts[2]}`);
|
|
731
|
+
data = null;
|
|
732
|
+
}
|
|
733
|
+
const parsedOrderMs = Number.parseInt(parts[7], 10);
|
|
734
|
+
return {
|
|
735
|
+
id: parts[0],
|
|
736
|
+
groupId: parts[1],
|
|
737
|
+
data,
|
|
738
|
+
attempts: Number.parseInt(parts[3], 10),
|
|
739
|
+
maxAttempts: Number.parseInt(parts[4], 10),
|
|
740
|
+
seq: Number.parseInt(parts[5], 10),
|
|
741
|
+
timestamp: Number.parseInt(parts[6], 10),
|
|
742
|
+
orderMs: Number.isNaN(parsedOrderMs) ? Number.parseInt(parts[6], 10) : parsedOrderMs,
|
|
743
|
+
score: Number(parts[8]),
|
|
744
|
+
deadlineAt: Number.parseInt(parts[9], 10)
|
|
745
|
+
};
|
|
746
|
+
}
|
|
747
|
+
/**
|
|
748
|
+
* Check how many jobs are waiting in a specific group
|
|
749
|
+
*/
|
|
750
|
+
async getGroupJobCount(groupId) {
|
|
751
|
+
const gZ = `${this.ns}:g:${groupId}`;
|
|
752
|
+
return await this.r.zcard(gZ);
|
|
753
|
+
}
|
|
754
|
+
/**
|
|
755
|
+
* Complete a job by removing from processing and unlocking the group.
|
|
756
|
+
* Note: Job metadata recording is handled separately by recordCompleted().
|
|
757
|
+
*
|
|
758
|
+
* @deprecated Use completeWithMetadata() for internal operations. This method
|
|
759
|
+
* is kept for backward compatibility and testing only.
|
|
760
|
+
*/
|
|
761
|
+
async complete(job) {
|
|
762
|
+
await evalScript(this.r, "complete", [
|
|
763
|
+
this.ns,
|
|
764
|
+
job.id,
|
|
765
|
+
job.groupId
|
|
766
|
+
], 1);
|
|
767
|
+
}
|
|
768
|
+
/**
|
|
769
|
+
* Complete a job AND record metadata in a single atomic operation.
|
|
770
|
+
* This is the efficient internal method used by workers.
|
|
771
|
+
*/
|
|
772
|
+
async completeWithMetadata(job, result, meta) {
|
|
773
|
+
await evalScript(this.r, "complete-with-metadata", [
|
|
774
|
+
this.ns,
|
|
775
|
+
job.id,
|
|
776
|
+
job.groupId,
|
|
777
|
+
"completed",
|
|
778
|
+
String(meta.finishedOn),
|
|
779
|
+
JSON.stringify(result ?? null),
|
|
780
|
+
String(this.keepCompleted),
|
|
781
|
+
String(this.keepFailed),
|
|
782
|
+
String(meta.processedOn),
|
|
783
|
+
String(meta.finishedOn),
|
|
784
|
+
String(meta.attempts),
|
|
785
|
+
String(meta.maxAttempts)
|
|
786
|
+
], 1);
|
|
787
|
+
}
|
|
788
|
+
/**
|
|
789
|
+
* Atomically complete a job and try to reserve the next job from the same group
|
|
790
|
+
* This prevents race conditions where other workers can steal subsequent jobs from the same group
|
|
791
|
+
*/
|
|
792
|
+
/**
|
|
793
|
+
* Atomically complete a job with metadata and reserve the next job from the same group.
|
|
794
|
+
*/
|
|
795
|
+
async completeAndReserveNextWithMetadata(completedJobId, groupId, handlerResult, meta) {
|
|
796
|
+
const now = Date.now();
|
|
797
|
+
try {
|
|
798
|
+
const result = await evalScript(this.r, "complete-and-reserve-next-with-metadata", [
|
|
799
|
+
this.ns,
|
|
800
|
+
completedJobId,
|
|
801
|
+
groupId,
|
|
802
|
+
"completed",
|
|
803
|
+
String(meta.finishedOn),
|
|
804
|
+
JSON.stringify(handlerResult ?? null),
|
|
805
|
+
String(this.keepCompleted),
|
|
806
|
+
String(this.keepFailed),
|
|
807
|
+
String(meta.processedOn),
|
|
808
|
+
String(meta.finishedOn),
|
|
809
|
+
String(meta.attempts),
|
|
810
|
+
String(meta.maxAttempts),
|
|
811
|
+
String(now),
|
|
812
|
+
String(this.jobTimeoutMs)
|
|
813
|
+
], 1);
|
|
814
|
+
if (!result) return null;
|
|
815
|
+
const parts = result.split("|||");
|
|
816
|
+
if (parts.length !== 10) {
|
|
817
|
+
this.logger.error("Queue completeAndReserveNextWithMetadata: unexpected result format:", result);
|
|
818
|
+
return null;
|
|
819
|
+
}
|
|
820
|
+
const [id, , data, attempts, maxAttempts, seq, enq, orderMs, score, deadline] = parts;
|
|
821
|
+
return {
|
|
822
|
+
id,
|
|
823
|
+
groupId,
|
|
824
|
+
data: JSON.parse(data),
|
|
825
|
+
attempts: parseInt(attempts, 10),
|
|
826
|
+
maxAttempts: parseInt(maxAttempts, 10),
|
|
827
|
+
seq: parseInt(seq, 10),
|
|
828
|
+
timestamp: parseInt(enq, 10),
|
|
829
|
+
orderMs: parseInt(orderMs, 10),
|
|
830
|
+
score: parseFloat(score),
|
|
831
|
+
deadlineAt: parseInt(deadline, 10)
|
|
832
|
+
};
|
|
833
|
+
} catch (error) {
|
|
834
|
+
this.logger.error("Queue completeAndReserveNextWithMetadata error:", error);
|
|
835
|
+
return null;
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
/**
|
|
839
|
+
* Check if a job is currently in processing state
|
|
840
|
+
*/
|
|
841
|
+
async isJobProcessing(jobId) {
|
|
842
|
+
return await this.r.zscore(`${this.ns}:processing`, jobId) !== null;
|
|
843
|
+
}
|
|
844
|
+
async retry(jobId, backoffMs = 0) {
|
|
845
|
+
return evalScript(this.r, "retry", [
|
|
846
|
+
this.ns,
|
|
847
|
+
jobId,
|
|
848
|
+
String(backoffMs)
|
|
849
|
+
], 1);
|
|
850
|
+
}
|
|
851
|
+
/**
|
|
852
|
+
* Dead letter a job (remove from group and optionally store in dead letter queue)
|
|
853
|
+
*/
|
|
854
|
+
async deadLetter(jobId, groupId) {
|
|
855
|
+
return evalScript(this.r, "dead-letter", [
|
|
856
|
+
this.ns,
|
|
857
|
+
jobId,
|
|
858
|
+
groupId
|
|
859
|
+
], 1);
|
|
860
|
+
}
|
|
861
|
+
/**
|
|
862
|
+
* Record a successful completion for retention and inspection
|
|
863
|
+
* Uses consolidated Lua script for atomic operation with retention management
|
|
864
|
+
*/
|
|
865
|
+
async recordCompleted(job, result, meta) {
|
|
866
|
+
const processedOn = meta.processedOn ?? Date.now();
|
|
867
|
+
const finishedOn = meta.finishedOn ?? Date.now();
|
|
868
|
+
const attempts = meta.attempts ?? 0;
|
|
869
|
+
const maxAttempts = meta.maxAttempts ?? this.defaultMaxAttempts;
|
|
870
|
+
try {
|
|
871
|
+
await evalScript(this.r, "record-job-result", [
|
|
872
|
+
this.ns,
|
|
873
|
+
job.id,
|
|
874
|
+
"completed",
|
|
875
|
+
String(finishedOn),
|
|
876
|
+
JSON.stringify(result ?? null),
|
|
877
|
+
String(this.keepCompleted),
|
|
878
|
+
String(this.keepFailed),
|
|
879
|
+
String(processedOn),
|
|
880
|
+
String(finishedOn),
|
|
881
|
+
String(attempts),
|
|
882
|
+
String(maxAttempts)
|
|
883
|
+
], 1);
|
|
884
|
+
} catch (error) {
|
|
885
|
+
this.logger.error(`Error recording completion for job ${job.id}:`, error);
|
|
886
|
+
throw error;
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
/**
|
|
890
|
+
* Record a failure attempt (non-final), storing last error for visibility
|
|
891
|
+
*/
|
|
892
|
+
async recordAttemptFailure(job, error, meta) {
|
|
893
|
+
const jobKey = `${this.ns}:job:${job.id}`;
|
|
894
|
+
const processedOn = meta.processedOn ?? Date.now();
|
|
895
|
+
const finishedOn = meta.finishedOn ?? Date.now();
|
|
896
|
+
const message = typeof error === "string" ? error : error.message ?? "Error";
|
|
897
|
+
const name = typeof error === "string" ? "Error" : error.name ?? "Error";
|
|
898
|
+
const stack = typeof error === "string" ? "" : error.stack ?? "";
|
|
899
|
+
await this.r.hset(jobKey, "lastErrorMessage", message, "lastErrorName", name, "lastErrorStack", stack, "processedOn", String(processedOn), "finishedOn", String(finishedOn));
|
|
900
|
+
}
|
|
901
|
+
/**
|
|
902
|
+
* Record a final failure (dead-lettered) for retention and inspection
|
|
903
|
+
* Uses consolidated Lua script for atomic operation
|
|
904
|
+
*/
|
|
905
|
+
async recordFinalFailure(job, error, meta) {
|
|
906
|
+
const processedOn = meta.processedOn ?? Date.now();
|
|
907
|
+
const finishedOn = meta.finishedOn ?? Date.now();
|
|
908
|
+
const attempts = meta.attempts ?? 0;
|
|
909
|
+
const maxAttempts = meta.maxAttempts ?? this.defaultMaxAttempts;
|
|
910
|
+
const message = typeof error === "string" ? error : error.message ?? "Error";
|
|
911
|
+
const name = typeof error === "string" ? "Error" : error.name ?? "Error";
|
|
912
|
+
const stack = typeof error === "string" ? "" : error.stack ?? "";
|
|
913
|
+
const errorInfo = JSON.stringify({
|
|
914
|
+
message,
|
|
915
|
+
name,
|
|
916
|
+
stack
|
|
917
|
+
});
|
|
918
|
+
try {
|
|
919
|
+
await evalScript(this.r, "record-job-result", [
|
|
920
|
+
this.ns,
|
|
921
|
+
job.id,
|
|
922
|
+
"failed",
|
|
923
|
+
String(finishedOn),
|
|
924
|
+
errorInfo,
|
|
925
|
+
String(this.keepCompleted),
|
|
926
|
+
String(this.keepFailed),
|
|
927
|
+
String(processedOn),
|
|
928
|
+
String(finishedOn),
|
|
929
|
+
String(attempts),
|
|
930
|
+
String(maxAttempts)
|
|
931
|
+
], 1);
|
|
932
|
+
} catch (err) {
|
|
933
|
+
this.logger.error(`Error recording final failure for job ${job.id}:`, err);
|
|
934
|
+
throw err;
|
|
935
|
+
}
|
|
936
|
+
}
|
|
937
|
+
async getCompleted(limit = this.keepCompleted) {
|
|
938
|
+
const completedKey = `${this.ns}:completed`;
|
|
939
|
+
const ids = await this.r.zrevrange(completedKey, 0, Math.max(0, limit - 1));
|
|
940
|
+
if (ids.length === 0) return [];
|
|
941
|
+
const pipe = this.r.multi();
|
|
942
|
+
for (const id of ids) pipe.hmget(`${this.ns}:job:${id}`, "groupId", "data", "returnvalue", "processedOn", "finishedOn", "attempts", "maxAttempts");
|
|
943
|
+
const rows = await pipe.exec() ?? [];
|
|
944
|
+
return ids.map((id, idx) => {
|
|
945
|
+
const [groupId, dataStr, retStr, processedOn, finishedOn, attempts, maxAttempts] = rows[idx]?.[1] || [];
|
|
946
|
+
return {
|
|
947
|
+
id,
|
|
948
|
+
groupId: groupId || "",
|
|
949
|
+
data: dataStr ? safeJsonParse(dataStr) : null,
|
|
950
|
+
returnvalue: retStr ? safeJsonParse(retStr) : null,
|
|
951
|
+
processedOn: processedOn ? parseInt(processedOn, 10) : void 0,
|
|
952
|
+
finishedOn: finishedOn ? parseInt(finishedOn, 10) : void 0,
|
|
953
|
+
attempts: attempts ? parseInt(attempts, 10) : 0,
|
|
954
|
+
maxAttempts: maxAttempts ? parseInt(maxAttempts, 10) : this.defaultMaxAttempts
|
|
955
|
+
};
|
|
956
|
+
});
|
|
957
|
+
}
|
|
958
|
+
async getFailed(limit = this.keepFailed) {
|
|
959
|
+
const failedKey = `${this.ns}:failed`;
|
|
960
|
+
const ids = await this.r.zrevrange(failedKey, 0, Math.max(0, limit - 1));
|
|
961
|
+
if (ids.length === 0) return [];
|
|
962
|
+
const pipe = this.r.multi();
|
|
963
|
+
for (const id of ids) pipe.hmget(`${this.ns}:job:${id}`, "groupId", "data", "failedReason", "stacktrace", "processedOn", "finishedOn", "attempts", "maxAttempts");
|
|
964
|
+
const rows = await pipe.exec() ?? [];
|
|
965
|
+
return ids.map((id, idx) => {
|
|
966
|
+
const [groupId, dataStr, failedReason, stacktrace, processedOn, finishedOn, attempts, maxAttempts] = rows[idx]?.[1] || [];
|
|
967
|
+
return {
|
|
968
|
+
id,
|
|
969
|
+
groupId: groupId || "",
|
|
970
|
+
data: dataStr ? safeJsonParse(dataStr) : null,
|
|
971
|
+
failedReason: failedReason || "",
|
|
972
|
+
stacktrace: stacktrace || void 0,
|
|
973
|
+
processedOn: processedOn ? parseInt(processedOn, 10) : void 0,
|
|
974
|
+
finishedOn: finishedOn ? parseInt(finishedOn, 10) : void 0,
|
|
975
|
+
attempts: attempts ? parseInt(attempts, 10) : 0,
|
|
976
|
+
maxAttempts: maxAttempts ? parseInt(maxAttempts, 10) : this.defaultMaxAttempts
|
|
977
|
+
};
|
|
978
|
+
});
|
|
979
|
+
}
|
|
980
|
+
/**
|
|
981
|
+
* Convenience: return completed jobs as Job entities (non-breaking, new API)
|
|
982
|
+
*/
|
|
983
|
+
async getCompletedJobs(limit = this.keepCompleted) {
|
|
984
|
+
const completedKey = `${this.ns}:completed`;
|
|
985
|
+
const ids = await this.r.zrevrange(completedKey, 0, Math.max(0, limit - 1));
|
|
986
|
+
if (ids.length === 0) return [];
|
|
987
|
+
const pipe = this.r.multi();
|
|
988
|
+
for (const id of ids) pipe.hgetall(`${this.ns}:job:${id}`);
|
|
989
|
+
const rows = await pipe.exec();
|
|
990
|
+
const jobs = [];
|
|
991
|
+
for (let i = 0; i < ids.length; i++) {
|
|
992
|
+
const id = ids[i];
|
|
993
|
+
const raw = rows?.[i]?.[1] || {};
|
|
994
|
+
if (!raw || Object.keys(raw).length === 0) {
|
|
995
|
+
this.logger.warn(`Skipping completed job ${id} - not found (likely cleaned up)`);
|
|
996
|
+
continue;
|
|
997
|
+
}
|
|
998
|
+
const job = Job.fromRawHash(this, id, raw, "completed");
|
|
999
|
+
jobs.push(job);
|
|
1000
|
+
}
|
|
1001
|
+
return jobs;
|
|
1002
|
+
}
|
|
1003
|
+
/**
|
|
1004
|
+
* Convenience: return failed jobs as Job entities (non-breaking, new API)
|
|
1005
|
+
*/
|
|
1006
|
+
async getFailedJobs(limit = this.keepFailed) {
|
|
1007
|
+
const failedKey = `${this.ns}:failed`;
|
|
1008
|
+
const ids = await this.r.zrevrange(failedKey, 0, Math.max(0, limit - 1));
|
|
1009
|
+
if (ids.length === 0) return [];
|
|
1010
|
+
const pipe = this.r.multi();
|
|
1011
|
+
for (const id of ids) pipe.hgetall(`${this.ns}:job:${id}`);
|
|
1012
|
+
const rows = await pipe.exec();
|
|
1013
|
+
const jobs = [];
|
|
1014
|
+
for (let i = 0; i < ids.length; i++) {
|
|
1015
|
+
const id = ids[i];
|
|
1016
|
+
const raw = rows?.[i]?.[1] || {};
|
|
1017
|
+
if (!raw || Object.keys(raw).length === 0) {
|
|
1018
|
+
this.logger.warn(`Skipping failed job ${id} - not found (likely cleaned up)`);
|
|
1019
|
+
continue;
|
|
1020
|
+
}
|
|
1021
|
+
const job = Job.fromRawHash(this, id, raw, "failed");
|
|
1022
|
+
jobs.push(job);
|
|
1023
|
+
}
|
|
1024
|
+
return jobs;
|
|
1025
|
+
}
|
|
1026
|
+
async getCompletedCount() {
|
|
1027
|
+
return this.r.zcard(`${this.ns}:completed`);
|
|
1028
|
+
}
|
|
1029
|
+
async getFailedCount() {
|
|
1030
|
+
return this.r.zcard(`${this.ns}:failed`);
|
|
1031
|
+
}
|
|
1032
|
+
async heartbeat(job, extendMs = this.vt) {
|
|
1033
|
+
return evalScript(this.r, "heartbeat", [
|
|
1034
|
+
this.ns,
|
|
1035
|
+
job.id,
|
|
1036
|
+
job.groupId,
|
|
1037
|
+
String(extendMs)
|
|
1038
|
+
], 1);
|
|
1039
|
+
}
|
|
1040
|
+
/**
|
|
1041
|
+
* Clean up expired jobs and stale data.
|
|
1042
|
+
* Uses distributed lock to ensure only one worker runs cleanup at a time,
|
|
1043
|
+
* similar to scheduler lock pattern.
|
|
1044
|
+
*/
|
|
1045
|
+
async cleanup() {
|
|
1046
|
+
const cleanupLockKey = `${this.ns}:cleanup:lock`;
|
|
1047
|
+
const ttlMs = 6e4;
|
|
1048
|
+
try {
|
|
1049
|
+
if (await this.r.set(cleanupLockKey, "1", "PX", ttlMs, "NX") !== "OK") return 0;
|
|
1050
|
+
const now = Date.now();
|
|
1051
|
+
return evalScript(this.r, "cleanup", [this.ns, String(now)], 1);
|
|
1052
|
+
} catch (_e) {
|
|
1053
|
+
return 0;
|
|
1054
|
+
}
|
|
1055
|
+
}
|
|
1056
|
+
/**
|
|
1057
|
+
* Calculate adaptive blocking timeout like BullMQ
|
|
1058
|
+
* Returns timeout in seconds
|
|
1059
|
+
*
|
|
1060
|
+
* Inspiration by BullMQ ⭐️
|
|
1061
|
+
*/
|
|
1062
|
+
getBlockTimeout(maxTimeout, blockUntil) {
|
|
1063
|
+
const minimumBlockTimeout = .001;
|
|
1064
|
+
const maximumBlockTimeout = 5;
|
|
1065
|
+
if (blockUntil) {
|
|
1066
|
+
const blockDelay = blockUntil - Date.now();
|
|
1067
|
+
if (blockDelay <= 0) return minimumBlockTimeout;
|
|
1068
|
+
else if (blockDelay < minimumBlockTimeout * 1e3) return minimumBlockTimeout;
|
|
1069
|
+
else return Math.min(blockDelay / 1e3, maximumBlockTimeout);
|
|
1070
|
+
}
|
|
1071
|
+
return Math.max(minimumBlockTimeout, Math.min(maxTimeout, maximumBlockTimeout));
|
|
1072
|
+
}
|
|
1073
|
+
/**
|
|
1074
|
+
* Check if an error is a Redis connection error (should retry)
|
|
1075
|
+
* Conservative approach: only connection closed and ECONNREFUSED
|
|
1076
|
+
*/
|
|
1077
|
+
isConnectionError(err) {
|
|
1078
|
+
if (!err) return false;
|
|
1079
|
+
const message = `${err.message || ""}`;
|
|
1080
|
+
return message === "Connection is closed." || message.includes("ECONNREFUSED");
|
|
1081
|
+
}
|
|
1082
|
+
async reserveBlocking(timeoutSec = 5, blockUntil, blockingClient) {
|
|
1083
|
+
const startTime = Date.now();
|
|
1084
|
+
if (await this.isPaused()) {
|
|
1085
|
+
await sleep$1(50);
|
|
1086
|
+
return null;
|
|
1087
|
+
}
|
|
1088
|
+
if (!(this._consecutiveEmptyReserves >= 3)) {
|
|
1089
|
+
const immediateJob = await this.reserve();
|
|
1090
|
+
if (immediateJob) {
|
|
1091
|
+
this.logger.debug(`Immediate reserve successful (${Date.now() - startTime}ms)`);
|
|
1092
|
+
this._consecutiveEmptyReserves = 0;
|
|
1093
|
+
return immediateJob;
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
const adaptiveTimeout = this.getBlockTimeout(timeoutSec, blockUntil);
|
|
1097
|
+
if (this._consecutiveEmptyReserves % 10 === 0) this.logger.debug(`Starting blocking operation (timeout: ${adaptiveTimeout}s, consecutive empty: ${this._consecutiveEmptyReserves})`);
|
|
1098
|
+
const readyKey = nsKey(this.ns, "ready");
|
|
1099
|
+
try {
|
|
1100
|
+
const bzpopminStart = Date.now();
|
|
1101
|
+
const result = await (blockingClient ?? this.r).bzpopmin(readyKey, adaptiveTimeout);
|
|
1102
|
+
const bzpopminDuration = Date.now() - bzpopminStart;
|
|
1103
|
+
if (!result || result.length < 3) {
|
|
1104
|
+
this.logger.debug(`Blocking timeout/empty (took ${bzpopminDuration}ms)`);
|
|
1105
|
+
this._consecutiveEmptyReserves = this._consecutiveEmptyReserves + 1;
|
|
1106
|
+
return null;
|
|
1107
|
+
}
|
|
1108
|
+
const [, groupId, score] = result;
|
|
1109
|
+
if (this._consecutiveEmptyReserves % 10 === 0) this.logger.debug(`Blocking result: group=${groupId}, score=${score} (took ${bzpopminDuration}ms)`);
|
|
1110
|
+
const reserveStart = Date.now();
|
|
1111
|
+
const job = await this.reserveAtomic(groupId);
|
|
1112
|
+
const reserveDuration = Date.now() - reserveStart;
|
|
1113
|
+
if (job) {
|
|
1114
|
+
this.logger.debug(`Successful job reserve after blocking: ${job.id} from group ${job.groupId} (reserve took ${reserveDuration}ms)`);
|
|
1115
|
+
this._consecutiveEmptyReserves = 0;
|
|
1116
|
+
} else {
|
|
1117
|
+
this.logger.warn(`Blocking found group but reserve failed: group=${groupId} (reserve took ${reserveDuration}ms)`);
|
|
1118
|
+
try {
|
|
1119
|
+
const groupKey = `${this.ns}:g:${groupId}`;
|
|
1120
|
+
const jobCount = await this.r.zcard(groupKey);
|
|
1121
|
+
if (jobCount > 0) {
|
|
1122
|
+
await this.r.zadd(readyKey, Number(score), groupId);
|
|
1123
|
+
this.logger.debug(`Restored group ${groupId} to ready with score ${score} after failed atomic reserve (${jobCount} jobs)`);
|
|
1124
|
+
} else this.logger.warn(`Not restoring empty group ${groupId} - preventing poisoned group loop`);
|
|
1125
|
+
} catch (_e) {
|
|
1126
|
+
this.logger.warn(`Failed to check group ${groupId} job count, not restoring`);
|
|
1127
|
+
}
|
|
1128
|
+
this._consecutiveEmptyReserves = this._consecutiveEmptyReserves + 1;
|
|
1129
|
+
return this.reserve();
|
|
1130
|
+
}
|
|
1131
|
+
return job;
|
|
1132
|
+
} catch (err) {
|
|
1133
|
+
const errorDuration = Date.now() - startTime;
|
|
1134
|
+
this.logger.error(`Blocking error after ${errorDuration}ms:`, err);
|
|
1135
|
+
if (this.isConnectionError(err)) {
|
|
1136
|
+
this.logger.error(`Connection error detected - rethrowing`);
|
|
1137
|
+
throw err;
|
|
1138
|
+
}
|
|
1139
|
+
this.logger.warn(`Falling back to regular reserve due to error`);
|
|
1140
|
+
return this.reserve();
|
|
1141
|
+
} finally {
|
|
1142
|
+
const totalDuration = Date.now() - startTime;
|
|
1143
|
+
if (totalDuration > 1e3) this.logger.debug(`ReserveBlocking completed in ${totalDuration}ms`);
|
|
1144
|
+
}
|
|
1145
|
+
}
|
|
1146
|
+
/**
|
|
1147
|
+
* Reserve a job from a specific group atomically (eliminates race conditions)
|
|
1148
|
+
* @param groupId - The group to reserve from
|
|
1149
|
+
*/
|
|
1150
|
+
async reserveAtomic(groupId) {
|
|
1151
|
+
const now = Date.now();
|
|
1152
|
+
const result = await evalScript(this.r, "reserve-atomic", [
|
|
1153
|
+
this.ns,
|
|
1154
|
+
String(now),
|
|
1155
|
+
String(this.vt),
|
|
1156
|
+
String(groupId)
|
|
1157
|
+
], 1);
|
|
1158
|
+
if (!result) return null;
|
|
1159
|
+
const parts = result.split("|||");
|
|
1160
|
+
if (parts.length < 10) return null;
|
|
1161
|
+
const [id, groupIdRaw, data, attempts, maxAttempts, seq, timestamp, orderMs, score, deadline] = parts;
|
|
1162
|
+
const parsedTimestamp = parseInt(timestamp, 10);
|
|
1163
|
+
const parsedOrderMs = parseInt(orderMs, 10);
|
|
1164
|
+
return {
|
|
1165
|
+
id,
|
|
1166
|
+
groupId: groupIdRaw,
|
|
1167
|
+
data: JSON.parse(data),
|
|
1168
|
+
attempts: parseInt(attempts, 10),
|
|
1169
|
+
maxAttempts: parseInt(maxAttempts, 10),
|
|
1170
|
+
seq: parseInt(seq, 10),
|
|
1171
|
+
timestamp: parsedTimestamp,
|
|
1172
|
+
orderMs: Number.isNaN(parsedOrderMs) ? parsedTimestamp : parsedOrderMs,
|
|
1173
|
+
score: parseFloat(score),
|
|
1174
|
+
deadlineAt: parseInt(deadline, 10)
|
|
1175
|
+
};
|
|
1176
|
+
}
|
|
1177
|
+
/**
|
|
1178
|
+
* 获取处于 Ready 状态的 Group 列表
|
|
1179
|
+
* @param start
|
|
1180
|
+
* @param end
|
|
1181
|
+
*/
|
|
1182
|
+
async getReadyGroups(start = 0, end = -1) {
|
|
1183
|
+
return this.r.zrange(`${this.ns}:ready`, start, end);
|
|
1184
|
+
}
|
|
1185
|
+
/**
|
|
1186
|
+
* 设置组的元数据 (优先级/并发度)
|
|
1187
|
+
* 我们将使用 Hash 存储这些配置: groupmq:{ns}:config:{groupId}
|
|
1188
|
+
*/
|
|
1189
|
+
async setGroupConfig(groupId, config) {
|
|
1190
|
+
const key = `${this.ns}:config:${groupId}`;
|
|
1191
|
+
const args = [];
|
|
1192
|
+
if (config.priority !== void 0) args.push("priority", String(config.priority));
|
|
1193
|
+
if (config.concurrency !== void 0) args.push("concurrency", String(config.concurrency));
|
|
1194
|
+
if (args.length > 0) await this.r.hset(key, ...args);
|
|
1195
|
+
}
|
|
1196
|
+
async getGroupConfig(groupId) {
|
|
1197
|
+
const key = `${this.ns}:config:${groupId}`;
|
|
1198
|
+
const [p, c] = await this.r.hmget(key, "priority", "concurrency");
|
|
1199
|
+
return {
|
|
1200
|
+
priority: p ? parseInt(p, 10) : 1,
|
|
1201
|
+
concurrency: c ? parseInt(c, 10) : 1
|
|
1202
|
+
};
|
|
1203
|
+
}
|
|
1204
|
+
/**
|
|
1205
|
+
* 设置指定组的并发上限
|
|
1206
|
+
* @param groupId 组 ID
|
|
1207
|
+
* @param limit 并发数 (必须 >= 1)
|
|
1208
|
+
*/
|
|
1209
|
+
async setGroupConcurrency(groupId, limit) {
|
|
1210
|
+
const validLimit = Math.max(1, Math.floor(limit));
|
|
1211
|
+
await this.r.hset(`${this.ns}:config:${groupId}`, "concurrency", String(validLimit));
|
|
1212
|
+
}
|
|
1213
|
+
/**
|
|
1214
|
+
* 获取指定组的并发上限
|
|
1215
|
+
*/
|
|
1216
|
+
async getGroupConcurrency(groupId) {
|
|
1217
|
+
const val = await this.r.hget(`${this.ns}:config:${groupId}`, "concurrency");
|
|
1218
|
+
return val ? parseInt(val, 10) : 1;
|
|
1219
|
+
}
|
|
1220
|
+
/**
|
|
1221
|
+
* 获取组内最老任务的入队时间戳
|
|
1222
|
+
* 用于 PriorityStrategy 的 aging 算法
|
|
1223
|
+
* @param groupId 组 ID
|
|
1224
|
+
* @returns 最老任务的时间戳,如果组为空则返回 undefined
|
|
1225
|
+
*/
|
|
1226
|
+
async getGroupOldestTimestamp(groupId) {
|
|
1227
|
+
const gZ = `${this.ns}:g:${groupId}`;
|
|
1228
|
+
const result = await this.r.zrange(gZ, 0, 0);
|
|
1229
|
+
if (!result || result.length === 0) return;
|
|
1230
|
+
const jobId = result[0];
|
|
1231
|
+
const timestamp = await this.r.hget(`${this.ns}:job:${jobId}`, "timestamp");
|
|
1232
|
+
return timestamp ? parseInt(timestamp, 10) : void 0;
|
|
1233
|
+
}
|
|
1234
|
+
/**
|
|
1235
|
+
* Reserve up to maxBatch jobs (one per available group) atomically in Lua.
|
|
1236
|
+
*/
|
|
1237
|
+
async reserveBatch(maxBatch = 16) {
|
|
1238
|
+
const now = Date.now();
|
|
1239
|
+
const results = await evalScript(this.r, "reserve-batch", [
|
|
1240
|
+
this.ns,
|
|
1241
|
+
String(now),
|
|
1242
|
+
String(this.vt),
|
|
1243
|
+
String(Math.max(1, maxBatch))
|
|
1244
|
+
], 1);
|
|
1245
|
+
const out = [];
|
|
1246
|
+
for (const r of results || []) {
|
|
1247
|
+
if (!r) continue;
|
|
1248
|
+
const parts = r.split("|||");
|
|
1249
|
+
if (parts.length !== 10) continue;
|
|
1250
|
+
out.push({
|
|
1251
|
+
id: parts[0],
|
|
1252
|
+
groupId: parts[1],
|
|
1253
|
+
data: safeJsonParse(parts[2]),
|
|
1254
|
+
attempts: parseInt(parts[3], 10),
|
|
1255
|
+
maxAttempts: parseInt(parts[4], 10),
|
|
1256
|
+
seq: parseInt(parts[5], 10),
|
|
1257
|
+
timestamp: parseInt(parts[6], 10),
|
|
1258
|
+
orderMs: parseInt(parts[7], 10),
|
|
1259
|
+
score: parseFloat(parts[8]),
|
|
1260
|
+
deadlineAt: parseInt(parts[9], 10)
|
|
1261
|
+
});
|
|
1262
|
+
}
|
|
1263
|
+
return out;
|
|
1264
|
+
}
|
|
1265
|
+
/**
|
|
1266
|
+
* Get the number of jobs currently being processed (active jobs)
|
|
1267
|
+
*/
|
|
1268
|
+
async getActiveCount() {
|
|
1269
|
+
return evalScript(this.r, "get-active-count", [this.ns], 1);
|
|
1270
|
+
}
|
|
1271
|
+
/**
|
|
1272
|
+
* Get the number of jobs waiting to be processed
|
|
1273
|
+
*/
|
|
1274
|
+
async getWaitingCount() {
|
|
1275
|
+
return evalScript(this.r, "get-waiting-count", [this.ns], 1);
|
|
1276
|
+
}
|
|
1277
|
+
/**
|
|
1278
|
+
* Get the number of jobs delayed due to backoff
|
|
1279
|
+
*/
|
|
1280
|
+
async getDelayedCount() {
|
|
1281
|
+
return evalScript(this.r, "get-delayed-count", [this.ns], 1);
|
|
1282
|
+
}
|
|
1283
|
+
/**
|
|
1284
|
+
* Get list of active job IDs
|
|
1285
|
+
*/
|
|
1286
|
+
async getActiveJobs() {
|
|
1287
|
+
return evalScript(this.r, "get-active-jobs", [this.ns], 1);
|
|
1288
|
+
}
|
|
1289
|
+
/**
|
|
1290
|
+
* Get list of waiting job IDs
|
|
1291
|
+
*/
|
|
1292
|
+
async getWaitingJobs() {
|
|
1293
|
+
return evalScript(this.r, "get-waiting-jobs", [this.ns], 1);
|
|
1294
|
+
}
|
|
1295
|
+
/**
|
|
1296
|
+
* Get list of delayed job IDs
|
|
1297
|
+
*/
|
|
1298
|
+
async getDelayedJobs() {
|
|
1299
|
+
return evalScript(this.r, "get-delayed-jobs", [this.ns], 1);
|
|
1300
|
+
}
|
|
1301
|
+
/**
|
|
1302
|
+
* Get list of unique group IDs that have jobs
|
|
1303
|
+
*/
|
|
1304
|
+
async getUniqueGroups() {
|
|
1305
|
+
return evalScript(this.r, "get-unique-groups", [this.ns], 1);
|
|
1306
|
+
}
|
|
1307
|
+
/**
|
|
1308
|
+
* Get count of unique groups that have jobs
|
|
1309
|
+
*/
|
|
1310
|
+
async getUniqueGroupsCount() {
|
|
1311
|
+
return evalScript(this.r, "get-unique-groups-count", [this.ns], 1);
|
|
1312
|
+
}
|
|
1313
|
+
/**
|
|
1314
|
+
* Fetch a single job by ID with enriched fields for UI/inspection.
|
|
1315
|
+
* Attempts to mimic BullMQ's Job shape for fields commonly used by BullBoard.
|
|
1316
|
+
*/
|
|
1317
|
+
async getJob(id) {
|
|
1318
|
+
return Job.fromStore(this, id);
|
|
1319
|
+
}
|
|
1320
|
+
/**
|
|
1321
|
+
* Fetch jobs by statuses, emulating BullMQ's Queue.getJobs API used by BullBoard.
|
|
1322
|
+
* Only getter functionality; ordering is best-effort.
|
|
1323
|
+
*
|
|
1324
|
+
* Optimized with pagination to reduce Redis load - especially important for BullBoard polling.
|
|
1325
|
+
*/
|
|
1326
|
+
async getJobsByStatus(jobStatuses, start = 0, end = -1) {
|
|
1327
|
+
const requestedCount = end >= 0 ? end - start + 1 : 100;
|
|
1328
|
+
const fetchLimit = Math.min(requestedCount * 2, 500);
|
|
1329
|
+
const idToStatus = /* @__PURE__ */ new Map();
|
|
1330
|
+
const idSets = [];
|
|
1331
|
+
const pushZRange = async (key, status, reverse = false) => {
|
|
1332
|
+
try {
|
|
1333
|
+
const ids = reverse ? await this.r.zrevrange(key, 0, fetchLimit - 1) : await this.r.zrange(key, 0, fetchLimit - 1);
|
|
1334
|
+
for (const id of ids) idToStatus.set(id, status);
|
|
1335
|
+
idSets.push(...ids);
|
|
1336
|
+
} catch (_e) {}
|
|
1337
|
+
};
|
|
1338
|
+
const statuses = new Set(jobStatuses);
|
|
1339
|
+
if (statuses.has("active")) await pushZRange(`${this.ns}:processing`, "active");
|
|
1340
|
+
if (statuses.has("delayed")) await pushZRange(`${this.ns}:delayed`, "delayed");
|
|
1341
|
+
if (statuses.has("completed")) await pushZRange(`${this.ns}:completed`, "completed", true);
|
|
1342
|
+
if (statuses.has("failed")) await pushZRange(`${this.ns}:failed`, "failed", true);
|
|
1343
|
+
if (statuses.has("waiting")) try {
|
|
1344
|
+
const groupIds = await this.r.smembers(`${this.ns}:groups`);
|
|
1345
|
+
if (groupIds.length > 0) {
|
|
1346
|
+
const groupsToScan = groupIds.slice(0, Math.min(100, groupIds.length));
|
|
1347
|
+
const pipe$1 = this.r.multi();
|
|
1348
|
+
const jobsPerGroup = Math.max(1, Math.ceil(fetchLimit / groupsToScan.length));
|
|
1349
|
+
for (const gid of groupsToScan) pipe$1.zrange(`${this.ns}:g:${gid}`, 0, jobsPerGroup - 1);
|
|
1350
|
+
const rows$1 = await pipe$1.exec();
|
|
1351
|
+
for (const r of rows$1 || []) {
|
|
1352
|
+
const arr = r?.[1] || [];
|
|
1353
|
+
for (const id of arr) idToStatus.set(id, "waiting");
|
|
1354
|
+
idSets.push(...arr);
|
|
1355
|
+
}
|
|
1356
|
+
}
|
|
1357
|
+
} catch (_e) {}
|
|
1358
|
+
const seen = /* @__PURE__ */ new Set();
|
|
1359
|
+
const uniqueIds = [];
|
|
1360
|
+
for (const id of idSets) if (!seen.has(id)) {
|
|
1361
|
+
seen.add(id);
|
|
1362
|
+
uniqueIds.push(id);
|
|
1363
|
+
}
|
|
1364
|
+
const slice = end >= 0 ? uniqueIds.slice(start, end + 1) : uniqueIds.slice(start);
|
|
1365
|
+
if (slice.length === 0) return [];
|
|
1366
|
+
const pipe = this.r.multi();
|
|
1367
|
+
for (const id of slice) pipe.hgetall(`${this.ns}:job:${id}`);
|
|
1368
|
+
const rows = await pipe.exec();
|
|
1369
|
+
const jobs = [];
|
|
1370
|
+
for (let i = 0; i < slice.length; i++) {
|
|
1371
|
+
const id = slice[i];
|
|
1372
|
+
const raw = rows?.[i]?.[1] || {};
|
|
1373
|
+
if (!raw || Object.keys(raw).length === 0) {
|
|
1374
|
+
this.logger.warn(`Skipping job ${id} - not found (likely cleaned up by retention)`);
|
|
1375
|
+
continue;
|
|
1376
|
+
}
|
|
1377
|
+
const knownStatus = idToStatus.get(id);
|
|
1378
|
+
const job = Job.fromRawHash(this, id, raw, knownStatus);
|
|
1379
|
+
jobs.push(job);
|
|
1380
|
+
}
|
|
1381
|
+
return jobs;
|
|
1382
|
+
}
|
|
1383
|
+
/**
|
|
1384
|
+
* Provide counts structured like BullBoard expects.
|
|
1385
|
+
*/
|
|
1386
|
+
async getJobCounts() {
|
|
1387
|
+
const [active, waiting, delayed, completed, failed] = await Promise.all([
|
|
1388
|
+
this.getActiveCount(),
|
|
1389
|
+
this.getWaitingCount(),
|
|
1390
|
+
this.getDelayedCount(),
|
|
1391
|
+
this.getCompletedCount(),
|
|
1392
|
+
this.getFailedCount()
|
|
1393
|
+
]);
|
|
1394
|
+
return {
|
|
1395
|
+
active,
|
|
1396
|
+
waiting,
|
|
1397
|
+
delayed,
|
|
1398
|
+
completed,
|
|
1399
|
+
failed,
|
|
1400
|
+
paused: 0,
|
|
1401
|
+
"waiting-children": 0,
|
|
1402
|
+
prioritized: 0
|
|
1403
|
+
};
|
|
1404
|
+
}
|
|
1405
|
+
/**
|
|
1406
|
+
* Check for stalled jobs and recover or fail them
|
|
1407
|
+
* Returns array of [jobId, groupId, action] tuples
|
|
1408
|
+
*/
|
|
1409
|
+
async checkStalledJobs(now, gracePeriod, maxStalledCount) {
|
|
1410
|
+
try {
|
|
1411
|
+
return await evalScript(this.r, "check-stalled", [
|
|
1412
|
+
this.ns,
|
|
1413
|
+
String(now),
|
|
1414
|
+
String(gracePeriod),
|
|
1415
|
+
String(maxStalledCount)
|
|
1416
|
+
], 1) || [];
|
|
1417
|
+
} catch (error) {
|
|
1418
|
+
this.logger.error("Error checking stalled jobs:", error);
|
|
1419
|
+
return [];
|
|
1420
|
+
}
|
|
1421
|
+
}
|
|
1422
|
+
/**
|
|
1423
|
+
* Start the promoter service for staging system.
|
|
1424
|
+
* Promoter listens to Redis keyspace notifications and promotes staged jobs when ready.
|
|
1425
|
+
* This is idempotent - calling multiple times has no effect if already running.
|
|
1426
|
+
*/
|
|
1427
|
+
async startPromoter() {
|
|
1428
|
+
if (this.promoterRunning || this.orderingDelayMs <= 0) return;
|
|
1429
|
+
this.promoterRunning = true;
|
|
1430
|
+
this.promoterLockId = randomUUID();
|
|
1431
|
+
try {
|
|
1432
|
+
this.promoterRedis = this.r.duplicate();
|
|
1433
|
+
try {
|
|
1434
|
+
await this.promoterRedis.config("SET", "notify-keyspace-events", "Ex");
|
|
1435
|
+
this.logger.debug("Enabled Redis keyspace notifications for staging promoter");
|
|
1436
|
+
} catch (err) {
|
|
1437
|
+
this.logger.warn("Failed to enable keyspace notifications. Promoter will use polling fallback.", err);
|
|
1438
|
+
}
|
|
1439
|
+
const db = this.promoterRedis.options.db ?? 0;
|
|
1440
|
+
const timerKey = `${this.ns}:stage:timer`;
|
|
1441
|
+
const expiredChannel = `__keyevent@${db}__:expired`;
|
|
1442
|
+
await this.promoterRedis.subscribe(expiredChannel, (err) => {
|
|
1443
|
+
if (err) this.logger.error("Failed to subscribe to keyspace events:", err);
|
|
1444
|
+
else this.logger.debug(`Subscribed to ${expiredChannel}`);
|
|
1445
|
+
});
|
|
1446
|
+
this.promoterRedis.on("message", async (channel, message) => {
|
|
1447
|
+
if (channel === expiredChannel && message === timerKey) await this.runPromotion();
|
|
1448
|
+
});
|
|
1449
|
+
this.promoterInterval = setInterval(async () => {
|
|
1450
|
+
await this.runPromotion();
|
|
1451
|
+
}, 100);
|
|
1452
|
+
await this.runPromotion();
|
|
1453
|
+
this.logger.debug("Staging promoter started");
|
|
1454
|
+
} catch (err) {
|
|
1455
|
+
this.logger.error("Failed to start promoter:", err);
|
|
1456
|
+
this.promoterRunning = false;
|
|
1457
|
+
await this.stopPromoter();
|
|
1458
|
+
}
|
|
1459
|
+
}
|
|
1460
|
+
/**
|
|
1461
|
+
* Run a single promotion cycle with distributed locking
|
|
1462
|
+
*/
|
|
1463
|
+
async runPromotion() {
|
|
1464
|
+
if (!this.promoterRunning) return;
|
|
1465
|
+
const lockKey = `${this.ns}:promoter:lock`;
|
|
1466
|
+
const lockTtl = 3e4;
|
|
1467
|
+
try {
|
|
1468
|
+
if (await this.r.set(lockKey, this.promoterLockId, "PX", lockTtl, "NX") === "OK") try {
|
|
1469
|
+
const promoted = await evalScript(this.r, "promote-staged", [
|
|
1470
|
+
this.ns,
|
|
1471
|
+
String(Date.now()),
|
|
1472
|
+
String(100)
|
|
1473
|
+
], 1);
|
|
1474
|
+
if (promoted > 0) this.logger.debug(`Promoted ${promoted} staged jobs`);
|
|
1475
|
+
} finally {
|
|
1476
|
+
if (await this.r.get(lockKey) === this.promoterLockId) await this.r.del(lockKey);
|
|
1477
|
+
}
|
|
1478
|
+
} catch (err) {
|
|
1479
|
+
this.logger.error("Error during promotion:", err);
|
|
1480
|
+
}
|
|
1481
|
+
}
|
|
1482
|
+
/**
|
|
1483
|
+
* Stop the promoter service
|
|
1484
|
+
*/
|
|
1485
|
+
async stopPromoter() {
|
|
1486
|
+
if (!this.promoterRunning) return;
|
|
1487
|
+
this.promoterRunning = false;
|
|
1488
|
+
if (this.promoterInterval) {
|
|
1489
|
+
clearInterval(this.promoterInterval);
|
|
1490
|
+
this.promoterInterval = void 0;
|
|
1491
|
+
}
|
|
1492
|
+
if (this.promoterRedis) {
|
|
1493
|
+
try {
|
|
1494
|
+
await this.promoterRedis.unsubscribe();
|
|
1495
|
+
await this.promoterRedis.quit();
|
|
1496
|
+
} catch (_err) {
|
|
1497
|
+
try {
|
|
1498
|
+
this.promoterRedis.disconnect();
|
|
1499
|
+
} catch (_e) {}
|
|
1500
|
+
}
|
|
1501
|
+
this.promoterRedis = void 0;
|
|
1502
|
+
}
|
|
1503
|
+
this.logger.debug("Staging promoter stopped");
|
|
1504
|
+
}
|
|
1505
|
+
/**
|
|
1506
|
+
* Close underlying Redis connections
|
|
1507
|
+
*/
|
|
1508
|
+
async close() {
|
|
1509
|
+
if (this.batchConfig && this.batchBuffer.length > 0) {
|
|
1510
|
+
this.logger.debug(`Flushing ${this.batchBuffer.length} pending batched jobs before close`);
|
|
1511
|
+
await this.flushBatch();
|
|
1512
|
+
}
|
|
1513
|
+
await this.stopPromoter();
|
|
1514
|
+
try {
|
|
1515
|
+
await this.r.quit();
|
|
1516
|
+
} catch (_e) {
|
|
1517
|
+
try {
|
|
1518
|
+
this.r.disconnect();
|
|
1519
|
+
} catch (_e2) {}
|
|
1520
|
+
}
|
|
1521
|
+
}
|
|
1522
|
+
get pausedKey() {
|
|
1523
|
+
return `${this.ns}:paused`;
|
|
1524
|
+
}
|
|
1525
|
+
async pause() {
|
|
1526
|
+
await this.r.set(this.pausedKey, "1");
|
|
1527
|
+
}
|
|
1528
|
+
async resume() {
|
|
1529
|
+
await this.r.del(this.pausedKey);
|
|
1530
|
+
}
|
|
1531
|
+
async isPaused() {
|
|
1532
|
+
return await this.r.get(this.pausedKey) !== null;
|
|
1533
|
+
}
|
|
1534
|
+
/**
|
|
1535
|
+
* Wait for the queue to become empty (no active jobs)
|
|
1536
|
+
* @param timeoutMs Maximum time to wait in milliseconds (default: 60 seconds)
|
|
1537
|
+
* @returns true if queue became empty, false if timeout reached
|
|
1538
|
+
*/
|
|
1539
|
+
async waitForEmpty(timeoutMs = 6e4) {
|
|
1540
|
+
const startTime = Date.now();
|
|
1541
|
+
while (Date.now() - startTime < timeoutMs) try {
|
|
1542
|
+
if (await evalScript(this.r, "is-empty", [this.ns], 1) === 1) {
|
|
1543
|
+
await sleep$1(0);
|
|
1544
|
+
return true;
|
|
1545
|
+
}
|
|
1546
|
+
await sleep$1(200);
|
|
1547
|
+
} catch (err) {
|
|
1548
|
+
if (this.isConnectionError(err)) {
|
|
1549
|
+
this.logger.warn("Redis connection error in waitForEmpty, retrying...");
|
|
1550
|
+
await sleep$1(1e3);
|
|
1551
|
+
continue;
|
|
1552
|
+
}
|
|
1553
|
+
throw err;
|
|
1554
|
+
}
|
|
1555
|
+
return false;
|
|
1556
|
+
}
|
|
1557
|
+
/**
|
|
1558
|
+
* Remove problematic groups from ready queue to prevent infinite loops
|
|
1559
|
+
* Handles both poisoned groups (only failed/expired jobs) and locked groups
|
|
1560
|
+
*
|
|
1561
|
+
* Throttled to 1% sampling rate to reduce Redis overhead
|
|
1562
|
+
*/
|
|
1563
|
+
async cleanupPoisonedGroup(groupId) {
|
|
1564
|
+
if (Math.random() > .01) return "skipped";
|
|
1565
|
+
const lastCheck = this._groupCleanupTracking.get(groupId) || 0;
|
|
1566
|
+
const now = Date.now();
|
|
1567
|
+
if (now - lastCheck < 1e4) return "throttled";
|
|
1568
|
+
this._groupCleanupTracking.set(groupId, now);
|
|
1569
|
+
if (this._groupCleanupTracking.size > 1e3) {
|
|
1570
|
+
const cutoff = now - 6e4;
|
|
1571
|
+
for (const [gid, ts] of this._groupCleanupTracking.entries()) if (ts < cutoff) this._groupCleanupTracking.delete(gid);
|
|
1572
|
+
}
|
|
1573
|
+
try {
|
|
1574
|
+
const result = await evalScript(this.r, "cleanup-poisoned-group", [
|
|
1575
|
+
this.ns,
|
|
1576
|
+
groupId,
|
|
1577
|
+
String(now)
|
|
1578
|
+
], 1);
|
|
1579
|
+
if (result === "poisoned") this.logger.warn(`Removed poisoned group ${groupId} from ready queue`);
|
|
1580
|
+
else if (result === "empty") this.logger.warn(`Removed empty group ${groupId} from ready queue`);
|
|
1581
|
+
else if (result === "locked") {
|
|
1582
|
+
if (Math.random() < .1) this.logger.debug(`Detected group ${groupId} is locked by another worker (this is normal with high concurrency)`);
|
|
1583
|
+
}
|
|
1584
|
+
return result;
|
|
1585
|
+
} catch (error) {
|
|
1586
|
+
this.logger.error(`Error cleaning up group ${groupId}:`, error);
|
|
1587
|
+
return "error";
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
/**
|
|
1591
|
+
* Distributed one-shot scheduler: promotes delayed jobs and processes repeating jobs.
|
|
1592
|
+
* Only proceeds if a short-lived scheduler lock can be acquired.
|
|
1593
|
+
*/
|
|
1594
|
+
schedulerLockKey() {
|
|
1595
|
+
return `${this.ns}:sched:lock`;
|
|
1596
|
+
}
|
|
1597
|
+
async acquireSchedulerLock(ttlMs = 1500) {
|
|
1598
|
+
try {
|
|
1599
|
+
return await this.r.set(this.schedulerLockKey(), "1", "PX", ttlMs, "NX") === "OK";
|
|
1600
|
+
} catch (_e) {
|
|
1601
|
+
return false;
|
|
1602
|
+
}
|
|
1603
|
+
}
|
|
1604
|
+
async runSchedulerOnce(now = Date.now()) {
|
|
1605
|
+
if (!await this.acquireSchedulerLock(this.schedulerLockTtlMs)) return;
|
|
1606
|
+
await this.promoteDelayedJobsBounded(32, now);
|
|
1607
|
+
await this.processRepeatingJobsBounded(16, now);
|
|
1608
|
+
}
|
|
1609
|
+
/**
|
|
1610
|
+
* Promote up to `limit` delayed jobs that are due now. Uses a small Lua to move one item per tick.
|
|
1611
|
+
*/
|
|
1612
|
+
async promoteDelayedJobsBounded(limit = 256, now = Date.now()) {
|
|
1613
|
+
let moved = 0;
|
|
1614
|
+
for (let i = 0; i < limit; i++) try {
|
|
1615
|
+
const n = await evalScript(this.r, "promote-delayed-one", [this.ns, String(now)], 1);
|
|
1616
|
+
if (!n || n <= 0) break;
|
|
1617
|
+
moved += n;
|
|
1618
|
+
} catch (_e) {
|
|
1619
|
+
break;
|
|
1620
|
+
}
|
|
1621
|
+
return moved;
|
|
1622
|
+
}
|
|
1623
|
+
/**
|
|
1624
|
+
* Process up to `limit` repeating job ticks.
|
|
1625
|
+
* Intentionally small per-tick work to keep Redis CPU flat.
|
|
1626
|
+
*/
|
|
1627
|
+
async processRepeatingJobsBounded(limit = 128, now = Date.now()) {
|
|
1628
|
+
const scheduleKey = `${this.ns}:repeat:schedule`;
|
|
1629
|
+
let processed = 0;
|
|
1630
|
+
for (let i = 0; i < limit; i++) {
|
|
1631
|
+
const due = await this.r.zrangebyscore(scheduleKey, 0, now, "LIMIT", 0, 1);
|
|
1632
|
+
if (!due || due.length === 0) break;
|
|
1633
|
+
const repeatKey = due[0];
|
|
1634
|
+
try {
|
|
1635
|
+
const repeatJobKey = `${this.ns}:repeat:${repeatKey}`;
|
|
1636
|
+
const repeatJobDataStr = await this.r.get(repeatJobKey);
|
|
1637
|
+
if (!repeatJobDataStr) {
|
|
1638
|
+
await this.r.zrem(scheduleKey, repeatKey);
|
|
1639
|
+
continue;
|
|
1640
|
+
}
|
|
1641
|
+
const repeatJobData = JSON.parse(repeatJobDataStr);
|
|
1642
|
+
if (repeatJobData.removed) {
|
|
1643
|
+
await this.r.zrem(scheduleKey, repeatKey);
|
|
1644
|
+
await this.r.del(repeatJobKey);
|
|
1645
|
+
continue;
|
|
1646
|
+
}
|
|
1647
|
+
await this.r.zrem(scheduleKey, repeatKey);
|
|
1648
|
+
let nextRunTime;
|
|
1649
|
+
if ("every" in repeatJobData.repeat) nextRunTime = now + repeatJobData.repeat.every;
|
|
1650
|
+
else nextRunTime = this.getNextCronTime(repeatJobData.repeat.pattern, now);
|
|
1651
|
+
repeatJobData.nextRunTime = nextRunTime;
|
|
1652
|
+
repeatJobData.lastRunTime = now;
|
|
1653
|
+
await this.r.set(repeatJobKey, JSON.stringify(repeatJobData));
|
|
1654
|
+
await this.r.zadd(scheduleKey, nextRunTime, repeatKey);
|
|
1655
|
+
await evalScript(this.r, "enqueue", [
|
|
1656
|
+
this.ns,
|
|
1657
|
+
repeatJobData.groupId,
|
|
1658
|
+
JSON.stringify(repeatJobData.data),
|
|
1659
|
+
String(repeatJobData.maxAttempts ?? this.defaultMaxAttempts),
|
|
1660
|
+
String(repeatJobData.orderMs ?? now),
|
|
1661
|
+
String(0),
|
|
1662
|
+
String(randomUUID()),
|
|
1663
|
+
String(this.keepCompleted)
|
|
1664
|
+
], 1);
|
|
1665
|
+
processed++;
|
|
1666
|
+
} catch (error) {
|
|
1667
|
+
this.logger.error(`Error processing repeating job ${repeatKey}:`, error);
|
|
1668
|
+
await this.r.zrem(scheduleKey, repeatKey);
|
|
1669
|
+
}
|
|
1670
|
+
}
|
|
1671
|
+
return processed;
|
|
1672
|
+
}
|
|
1673
|
+
/**
|
|
1674
|
+
* Promote delayed jobs that are now ready to be processed
|
|
1675
|
+
* This should be called periodically to move jobs from delayed set to ready queue
|
|
1676
|
+
*/
|
|
1677
|
+
async promoteDelayedJobs() {
|
|
1678
|
+
try {
|
|
1679
|
+
return await evalScript(this.r, "promote-delayed-jobs", [this.ns, String(Date.now())], 1);
|
|
1680
|
+
} catch (error) {
|
|
1681
|
+
this.logger.error(`Error promoting delayed jobs:`, error);
|
|
1682
|
+
return 0;
|
|
1683
|
+
}
|
|
1684
|
+
}
|
|
1685
|
+
/**
|
|
1686
|
+
* Change the delay of a specific job
|
|
1687
|
+
*/
|
|
1688
|
+
async changeDelay(jobId, newDelay) {
|
|
1689
|
+
const newDelayUntil = newDelay > 0 ? Date.now() + newDelay : 0;
|
|
1690
|
+
try {
|
|
1691
|
+
return await evalScript(this.r, "change-delay", [
|
|
1692
|
+
this.ns,
|
|
1693
|
+
jobId,
|
|
1694
|
+
String(newDelayUntil),
|
|
1695
|
+
String(Date.now())
|
|
1696
|
+
], 1) === 1;
|
|
1697
|
+
} catch (error) {
|
|
1698
|
+
this.logger.error(`Error changing delay for job ${jobId}:`, error);
|
|
1699
|
+
return false;
|
|
1700
|
+
}
|
|
1701
|
+
}
|
|
1702
|
+
/**
|
|
1703
|
+
* Promote a delayed job to be ready immediately
|
|
1704
|
+
*/
|
|
1705
|
+
async promote(jobId) {
|
|
1706
|
+
return this.changeDelay(jobId, 0);
|
|
1707
|
+
}
|
|
1708
|
+
/**
|
|
1709
|
+
* Remove a job from the queue regardless of state (waiting, delayed, processing)
|
|
1710
|
+
*/
|
|
1711
|
+
async remove(jobId) {
|
|
1712
|
+
try {
|
|
1713
|
+
return await evalScript(this.r, "remove", [this.ns, jobId], 1) === 1;
|
|
1714
|
+
} catch (error) {
|
|
1715
|
+
this.logger.error(`Error removing job ${jobId}:`, error);
|
|
1716
|
+
return false;
|
|
1717
|
+
}
|
|
1718
|
+
}
|
|
1719
|
+
/**
|
|
1720
|
+
* Clean jobs of a given status older than graceTimeMs
|
|
1721
|
+
* @param graceTimeMs Remove jobs with finishedOn <= now - graceTimeMs (for completed/failed)
|
|
1722
|
+
* @param limit Max number of jobs to clean in one call
|
|
1723
|
+
* @param status Either 'completed' or 'failed'
|
|
1724
|
+
*/
|
|
1725
|
+
async clean(graceTimeMs, limit, status) {
|
|
1726
|
+
const graceAt = Date.now() - graceTimeMs;
|
|
1727
|
+
try {
|
|
1728
|
+
return await evalScript(this.r, "clean-status", [
|
|
1729
|
+
this.ns,
|
|
1730
|
+
status,
|
|
1731
|
+
String(graceAt),
|
|
1732
|
+
String(Math.max(0, Math.min(limit, 1e5)))
|
|
1733
|
+
], 1) ?? 0;
|
|
1734
|
+
} catch (error) {
|
|
1735
|
+
console.log("HERE?", error);
|
|
1736
|
+
this.logger.error(`Error cleaning ${status} jobs:`, error);
|
|
1737
|
+
return 0;
|
|
1738
|
+
}
|
|
1739
|
+
}
|
|
1740
|
+
/**
|
|
1741
|
+
* Update a job's data payload (BullMQ-style)
|
|
1742
|
+
*/
|
|
1743
|
+
async updateData(jobId, data) {
|
|
1744
|
+
const jobKey = `${this.ns}:job:${jobId}`;
|
|
1745
|
+
if (!await this.r.exists(jobKey)) throw new Error(`Job ${jobId} not found`);
|
|
1746
|
+
const serialized = JSON.stringify(data === void 0 ? null : data);
|
|
1747
|
+
await this.r.hset(jobKey, "data", serialized);
|
|
1748
|
+
}
|
|
1749
|
+
/**
|
|
1750
|
+
* Add a repeating job (cron job)
|
|
1751
|
+
*/
|
|
1752
|
+
async addRepeatingJob(opts) {
|
|
1753
|
+
if (!opts.repeat) throw new Error("Repeat options are required for repeating jobs");
|
|
1754
|
+
const now = Date.now();
|
|
1755
|
+
const repeatKey = `${opts.groupId}:${JSON.stringify(opts.repeat)}:${now}:${Math.random().toString(36).slice(2)}`;
|
|
1756
|
+
let nextRunTime;
|
|
1757
|
+
if ("every" in opts.repeat) nextRunTime = now + opts.repeat.every;
|
|
1758
|
+
else nextRunTime = this.getNextCronTime(opts.repeat.pattern, now);
|
|
1759
|
+
const repeatJobData = {
|
|
1760
|
+
groupId: opts.groupId,
|
|
1761
|
+
data: opts.data === void 0 ? null : opts.data,
|
|
1762
|
+
maxAttempts: opts.maxAttempts ?? this.defaultMaxAttempts,
|
|
1763
|
+
orderMs: opts.orderMs,
|
|
1764
|
+
repeat: opts.repeat,
|
|
1765
|
+
nextRunTime,
|
|
1766
|
+
lastRunTime: null,
|
|
1767
|
+
removed: false
|
|
1768
|
+
};
|
|
1769
|
+
const repeatJobKey = `${this.ns}:repeat:${repeatKey}`;
|
|
1770
|
+
await this.r.set(repeatJobKey, JSON.stringify(repeatJobData));
|
|
1771
|
+
await this.r.zadd(`${this.ns}:repeat:schedule`, nextRunTime, repeatKey);
|
|
1772
|
+
const lookupKey = `${this.ns}:repeat:lookup:${opts.groupId}:${JSON.stringify(opts.repeat)}`;
|
|
1773
|
+
await this.r.set(lookupKey, repeatKey);
|
|
1774
|
+
const repeatId = `repeat:${repeatKey}`;
|
|
1775
|
+
const jobHashKey = `${this.ns}:job:${repeatId}`;
|
|
1776
|
+
try {
|
|
1777
|
+
await this.r.hmset(jobHashKey, "id", repeatId, "groupId", repeatJobData.groupId, "data", JSON.stringify(repeatJobData.data), "attempts", "0", "maxAttempts", String(repeatJobData.maxAttempts), "seq", "0", "timestamp", String(Date.now()), "orderMs", String(repeatJobData.orderMs ?? now), "status", "waiting");
|
|
1778
|
+
} catch (_e) {}
|
|
1779
|
+
return Job.fromStore(this, repeatId);
|
|
1780
|
+
}
|
|
1781
|
+
/**
|
|
1782
|
+
* Compute next execution time using cron-parser (BullMQ-style)
|
|
1783
|
+
*/
|
|
1784
|
+
getNextCronTime(pattern, fromTime) {
|
|
1785
|
+
try {
|
|
1786
|
+
return CronParser.parseExpression(pattern, { currentDate: new Date(fromTime) }).next().getTime();
|
|
1787
|
+
} catch (_e) {
|
|
1788
|
+
throw new Error(`Invalid cron pattern: ${pattern}`);
|
|
1789
|
+
}
|
|
1790
|
+
}
|
|
1791
|
+
/**
|
|
1792
|
+
* Remove a repeating job
|
|
1793
|
+
*/
|
|
1794
|
+
async removeRepeatingJob(groupId, repeat) {
|
|
1795
|
+
try {
|
|
1796
|
+
const lookupKey = `${this.ns}:repeat:lookup:${groupId}:${JSON.stringify(repeat)}`;
|
|
1797
|
+
const repeatKey = await this.r.get(lookupKey);
|
|
1798
|
+
if (!repeatKey) return false;
|
|
1799
|
+
const repeatJobKey = `${this.ns}:repeat:${repeatKey}`;
|
|
1800
|
+
const scheduleKey = `${this.ns}:repeat:schedule`;
|
|
1801
|
+
const repeatJobDataStr = await this.r.get(repeatJobKey);
|
|
1802
|
+
if (!repeatJobDataStr) {
|
|
1803
|
+
await this.r.del(lookupKey);
|
|
1804
|
+
return false;
|
|
1805
|
+
}
|
|
1806
|
+
const repeatJobData = JSON.parse(repeatJobDataStr);
|
|
1807
|
+
repeatJobData.removed = true;
|
|
1808
|
+
await this.r.set(repeatJobKey, JSON.stringify(repeatJobData));
|
|
1809
|
+
await this.r.zrem(scheduleKey, repeatKey);
|
|
1810
|
+
await this.r.del(lookupKey);
|
|
1811
|
+
try {
|
|
1812
|
+
const repeatId = `repeat:${repeatKey}`;
|
|
1813
|
+
await this.r.del(`${this.ns}:job:${repeatId}`);
|
|
1814
|
+
} catch (_e) {}
|
|
1815
|
+
return true;
|
|
1816
|
+
} catch (error) {
|
|
1817
|
+
this.logger.error(`Error removing repeating job:`, error);
|
|
1818
|
+
return false;
|
|
1819
|
+
}
|
|
1820
|
+
}
|
|
1821
|
+
};
|
|
1822
|
+
function sleep$1(ms) {
|
|
1823
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1824
|
+
}
|
|
1825
|
+
|
|
1826
|
+
//#endregion
|
|
1827
|
+
//#region src/async-fifo-queue.ts
|
|
1828
|
+
/**
|
|
1829
|
+
* This file contains code copied from BullMQ (https://github.com/taskforcesh/bullmq)
|
|
1830
|
+
*
|
|
1831
|
+
* BullMQ is a fantastic library and one of the most popular Redis-based job queue
|
|
1832
|
+
* libraries for Node.js. We've copied the AsyncFifoQueue implementation from BullMQ
|
|
1833
|
+
* as it's a well-designed component that fits our needs perfectly.
|
|
1834
|
+
*
|
|
1835
|
+
* Original copyright notice:
|
|
1836
|
+
* Copyright (c) Taskforce.sh and contributors
|
|
1837
|
+
*
|
|
1838
|
+
* This code is used under the MIT License. The original license can be found at:
|
|
1839
|
+
* https://github.com/taskforcesh/bullmq/blob/main/LICENSE
|
|
1840
|
+
*
|
|
1841
|
+
* Modifications may have been made to adapt this code for use in GroupMQ.
|
|
1842
|
+
*/
|
|
1843
|
+
var Node = class {
|
|
1844
|
+
constructor(value) {
|
|
1845
|
+
this.value = void 0;
|
|
1846
|
+
this.next = null;
|
|
1847
|
+
this.value = value;
|
|
1848
|
+
}
|
|
1849
|
+
};
|
|
1850
|
+
var LinkedList = class {
|
|
1851
|
+
constructor() {
|
|
1852
|
+
this.length = 0;
|
|
1853
|
+
this.head = null;
|
|
1854
|
+
this.tail = null;
|
|
1855
|
+
}
|
|
1856
|
+
push(value) {
|
|
1857
|
+
const newNode = new Node(value);
|
|
1858
|
+
if (!this.length) this.head = newNode;
|
|
1859
|
+
else this.tail.next = newNode;
|
|
1860
|
+
this.tail = newNode;
|
|
1861
|
+
this.length += 1;
|
|
1862
|
+
return newNode;
|
|
1863
|
+
}
|
|
1864
|
+
shift() {
|
|
1865
|
+
if (!this.length) return null;
|
|
1866
|
+
const head = this.head;
|
|
1867
|
+
this.head = this.head.next;
|
|
1868
|
+
this.length -= 1;
|
|
1869
|
+
return head;
|
|
1870
|
+
}
|
|
1871
|
+
};
|
|
1872
|
+
/**
|
|
1873
|
+
* AsyncFifoQueue
|
|
1874
|
+
*
|
|
1875
|
+
* A minimal FIFO queue for asynchronous operations. Allows adding asynchronous operations
|
|
1876
|
+
* and consume them in the order they are resolved.
|
|
1877
|
+
*/
|
|
1878
|
+
var AsyncFifoQueue = class {
|
|
1879
|
+
constructor(ignoreErrors = false) {
|
|
1880
|
+
this.ignoreErrors = ignoreErrors;
|
|
1881
|
+
this.queue = new LinkedList();
|
|
1882
|
+
this.pending = /* @__PURE__ */ new Set();
|
|
1883
|
+
this.newPromise();
|
|
1884
|
+
}
|
|
1885
|
+
add(promise) {
|
|
1886
|
+
this.pending.add(promise);
|
|
1887
|
+
promise.then((data) => {
|
|
1888
|
+
this.pending.delete(promise);
|
|
1889
|
+
if (this.queue.length === 0) this.resolvePromise(data);
|
|
1890
|
+
this.queue.push(data);
|
|
1891
|
+
}).catch((err) => {
|
|
1892
|
+
this.pending.delete(promise);
|
|
1893
|
+
if (this.ignoreErrors) {
|
|
1894
|
+
if (this.queue.length === 0) this.resolvePromise(void 0);
|
|
1895
|
+
this.queue.push(void 0);
|
|
1896
|
+
} else this.rejectPromise(err);
|
|
1897
|
+
});
|
|
1898
|
+
}
|
|
1899
|
+
async waitAll() {
|
|
1900
|
+
await Promise.all(this.pending);
|
|
1901
|
+
}
|
|
1902
|
+
numTotal() {
|
|
1903
|
+
return this.pending.size + this.queue.length;
|
|
1904
|
+
}
|
|
1905
|
+
numPending() {
|
|
1906
|
+
return this.pending.size;
|
|
1907
|
+
}
|
|
1908
|
+
numQueued() {
|
|
1909
|
+
return this.queue.length;
|
|
1910
|
+
}
|
|
1911
|
+
resolvePromise(data) {
|
|
1912
|
+
this.resolve(data);
|
|
1913
|
+
this.newPromise();
|
|
1914
|
+
}
|
|
1915
|
+
rejectPromise(err) {
|
|
1916
|
+
this.reject(err);
|
|
1917
|
+
this.newPromise();
|
|
1918
|
+
}
|
|
1919
|
+
newPromise() {
|
|
1920
|
+
this.nextPromise = new Promise((resolve, reject) => {
|
|
1921
|
+
this.resolve = resolve;
|
|
1922
|
+
this.reject = reject;
|
|
1923
|
+
});
|
|
1924
|
+
}
|
|
1925
|
+
async wait() {
|
|
1926
|
+
return this.nextPromise;
|
|
1927
|
+
}
|
|
1928
|
+
async fetch() {
|
|
1929
|
+
if (this.pending.size === 0 && this.queue.length === 0) return;
|
|
1930
|
+
while (this.queue.length === 0) try {
|
|
1931
|
+
await this.wait();
|
|
1932
|
+
} catch (err) {
|
|
1933
|
+
if (!this.ignoreErrors) console.error("Unexpected Error in AsyncFifoQueue", err);
|
|
1934
|
+
}
|
|
1935
|
+
return this.queue.shift()?.value;
|
|
1936
|
+
}
|
|
1937
|
+
};
|
|
1938
|
+
|
|
1939
|
+
//#endregion
|
|
1940
|
+
//#region src/worker.ts
|
|
1941
|
+
var TypedEventEmitter = class {
|
|
1942
|
+
constructor() {
|
|
1943
|
+
this.listeners = /* @__PURE__ */ new Map();
|
|
1944
|
+
}
|
|
1945
|
+
on(event, listener) {
|
|
1946
|
+
if (!this.listeners.has(event)) this.listeners.set(event, []);
|
|
1947
|
+
this.listeners.get(event).push(listener);
|
|
1948
|
+
return this;
|
|
1949
|
+
}
|
|
1950
|
+
off(event, listener) {
|
|
1951
|
+
const eventListeners = this.listeners.get(event);
|
|
1952
|
+
if (eventListeners) {
|
|
1953
|
+
const index = eventListeners.indexOf(listener);
|
|
1954
|
+
if (index !== -1) eventListeners.splice(index, 1);
|
|
1955
|
+
}
|
|
1956
|
+
return this;
|
|
1957
|
+
}
|
|
1958
|
+
emit(event, ...args) {
|
|
1959
|
+
const eventListeners = this.listeners.get(event);
|
|
1960
|
+
if (eventListeners && eventListeners.length > 0) {
|
|
1961
|
+
for (const listener of eventListeners) try {
|
|
1962
|
+
listener(...args);
|
|
1963
|
+
} catch (error) {
|
|
1964
|
+
console.error(`Error in event listener for '${String(event)}':`, error);
|
|
1965
|
+
}
|
|
1966
|
+
return true;
|
|
1967
|
+
}
|
|
1968
|
+
return false;
|
|
1969
|
+
}
|
|
1970
|
+
removeAllListeners(event) {
|
|
1971
|
+
if (event) this.listeners.delete(event);
|
|
1972
|
+
else this.listeners.clear();
|
|
1973
|
+
return this;
|
|
1974
|
+
}
|
|
1975
|
+
};
|
|
1976
|
+
const defaultBackoff = (attempt) => {
|
|
1977
|
+
const base = Math.min(3e4, 2 ** (attempt - 1) * 500);
|
|
1978
|
+
const jitter = Math.floor(base * .25 * Math.random());
|
|
1979
|
+
return base + jitter;
|
|
1980
|
+
};
|
|
1981
|
+
var _Worker = class extends TypedEventEmitter {
|
|
1982
|
+
constructor(opts) {
|
|
1983
|
+
super();
|
|
1984
|
+
this.stopping = false;
|
|
1985
|
+
this.ready = false;
|
|
1986
|
+
this.closed = false;
|
|
1987
|
+
this.blockingClient = null;
|
|
1988
|
+
this.jobsInProgress = /* @__PURE__ */ new Set();
|
|
1989
|
+
this.lastJobPickupTime = Date.now();
|
|
1990
|
+
this.totalJobsProcessed = 0;
|
|
1991
|
+
this.blockingStats = {
|
|
1992
|
+
totalBlockingCalls: 0,
|
|
1993
|
+
consecutiveEmptyReserves: 0,
|
|
1994
|
+
lastActivityTime: Date.now()
|
|
1995
|
+
};
|
|
1996
|
+
this.emptyReserveBackoffMs = 0;
|
|
1997
|
+
if (!opts.handler || typeof opts.handler !== "function") throw new Error("Worker handler must be a function");
|
|
1998
|
+
this.opts = opts;
|
|
1999
|
+
this.q = opts.queue;
|
|
2000
|
+
this.name = opts.name ?? this.q.name;
|
|
2001
|
+
this.logger = typeof opts.logger === "object" ? opts.logger : new Logger(!!opts.logger, this.name);
|
|
2002
|
+
this.handler = opts.handler;
|
|
2003
|
+
const jobTimeoutMs = this.q.jobTimeoutMs ?? 3e4;
|
|
2004
|
+
this.hbMs = opts.heartbeatMs ?? Math.max(1e3, Math.floor(jobTimeoutMs / 3));
|
|
2005
|
+
this.onError = opts.onError;
|
|
2006
|
+
this.maxAttempts = opts.maxAttempts ?? this.q.maxAttemptsDefault ?? 3;
|
|
2007
|
+
this.backoff = opts.backoff ?? defaultBackoff;
|
|
2008
|
+
this.enableCleanup = opts.enableCleanup ?? true;
|
|
2009
|
+
this.cleanupMs = opts.cleanupIntervalMs ?? 6e4;
|
|
2010
|
+
this.schedulerMs = opts.schedulerIntervalMs ?? 1e3;
|
|
2011
|
+
this.blockingTimeoutSec = opts.blockingTimeoutSec ?? 5;
|
|
2012
|
+
this.concurrency = Math.max(1, opts.concurrency ?? 1);
|
|
2013
|
+
this.stalledInterval = opts.stalledInterval ?? (this.concurrency > 50 ? 6e4 : 3e4);
|
|
2014
|
+
this.maxStalledCount = opts.maxStalledCount ?? (this.concurrency > 50 ? 2 : 1);
|
|
2015
|
+
this.stalledGracePeriod = opts.stalledGracePeriod ?? 5e3;
|
|
2016
|
+
this.setupRedisEventHandlers();
|
|
2017
|
+
if (this.q.orderingDelayMs > 0) this.q.startPromoter().catch((err) => {
|
|
2018
|
+
this.logger.error("Failed to start staging promoter:", err);
|
|
2019
|
+
});
|
|
2020
|
+
this.run();
|
|
2021
|
+
}
|
|
2022
|
+
get isClosed() {
|
|
2023
|
+
return this.closed;
|
|
2024
|
+
}
|
|
2025
|
+
/**
|
|
2026
|
+
* Add jitter to prevent thundering herd problems in high-concurrency environments
|
|
2027
|
+
* @param baseInterval The base interval in milliseconds
|
|
2028
|
+
* @param jitterPercent Percentage of jitter to add (0-1, default 0.1 for 10%)
|
|
2029
|
+
* @returns The interval with jitter applied
|
|
2030
|
+
*/
|
|
2031
|
+
addJitter(baseInterval, jitterPercent = .1) {
|
|
2032
|
+
const jitter = Math.random() * baseInterval * jitterPercent;
|
|
2033
|
+
return baseInterval + jitter;
|
|
2034
|
+
}
|
|
2035
|
+
setupRedisEventHandlers() {
|
|
2036
|
+
const redis = this.q.redis;
|
|
2037
|
+
if (redis) {
|
|
2038
|
+
this.redisCloseHandler = () => {
|
|
2039
|
+
this.ready = false;
|
|
2040
|
+
this.emit("ioredis:close");
|
|
2041
|
+
};
|
|
2042
|
+
this.redisErrorHandler = (error) => {
|
|
2043
|
+
this.emit("error", error);
|
|
2044
|
+
};
|
|
2045
|
+
this.redisReadyHandler = () => {
|
|
2046
|
+
if (!this.ready && !this.stopping) {
|
|
2047
|
+
this.ready = true;
|
|
2048
|
+
this.emit("ready");
|
|
2049
|
+
}
|
|
2050
|
+
};
|
|
2051
|
+
redis.on("close", this.redisCloseHandler);
|
|
2052
|
+
redis.on("error", this.redisErrorHandler);
|
|
2053
|
+
redis.on("ready", this.redisReadyHandler);
|
|
2054
|
+
}
|
|
2055
|
+
}
|
|
2056
|
+
async run() {
|
|
2057
|
+
if (this.runLoopPromise) return this.runLoopPromise;
|
|
2058
|
+
const runPromise = this._runLoop();
|
|
2059
|
+
this.runLoopPromise = runPromise;
|
|
2060
|
+
return runPromise;
|
|
2061
|
+
}
|
|
2062
|
+
async _runLoop() {
|
|
2063
|
+
this.logger.info(`🚀 Worker ${this.name} starting...`);
|
|
2064
|
+
const strategyPollInterval = this.opts.strategyPollInterval ?? 50;
|
|
2065
|
+
try {
|
|
2066
|
+
this.blockingClient = this.q.redis.duplicate({
|
|
2067
|
+
enableAutoPipelining: true,
|
|
2068
|
+
maxRetriesPerRequest: null,
|
|
2069
|
+
retryStrategy: (times) => {
|
|
2070
|
+
return Math.max(Math.min(Math.exp(times) * 1e3, 2e4), 1e3);
|
|
2071
|
+
}
|
|
2072
|
+
});
|
|
2073
|
+
this.blockingClient.on("error", (err) => {
|
|
2074
|
+
if (!this.q.isConnectionError(err)) this.logger.error("Blocking client error (non-connection):", err);
|
|
2075
|
+
else this.logger.warn("Blocking client connection error:", err.message);
|
|
2076
|
+
this.emit("error", err instanceof Error ? err : new Error(String(err)));
|
|
2077
|
+
});
|
|
2078
|
+
this.blockingClient.on("close", () => {
|
|
2079
|
+
if (!this.stopping && !this.closed) this.logger.warn("Blocking client disconnected, will reconnect on next operation");
|
|
2080
|
+
});
|
|
2081
|
+
this.blockingClient.on("reconnecting", () => {
|
|
2082
|
+
if (!this.stopping && !this.closed) this.logger.info("Blocking client reconnecting...");
|
|
2083
|
+
});
|
|
2084
|
+
this.blockingClient.on("ready", () => {
|
|
2085
|
+
if (!this.stopping && !this.closed) this.logger.info("Blocking client ready");
|
|
2086
|
+
});
|
|
2087
|
+
} catch (err) {
|
|
2088
|
+
this.logger.error("Failed to create blocking client:", err);
|
|
2089
|
+
this.blockingClient = null;
|
|
2090
|
+
}
|
|
2091
|
+
if (this.enableCleanup) {
|
|
2092
|
+
this.cleanupTimer = setInterval(async () => {
|
|
2093
|
+
try {
|
|
2094
|
+
await this.q.cleanup();
|
|
2095
|
+
} catch (err) {
|
|
2096
|
+
this.onError?.(err);
|
|
2097
|
+
}
|
|
2098
|
+
}, this.addJitter(this.cleanupMs));
|
|
2099
|
+
const schedulerInterval = Math.min(this.schedulerMs, this.cleanupMs);
|
|
2100
|
+
this.schedulerTimer = setInterval(async () => {
|
|
2101
|
+
try {
|
|
2102
|
+
await this.q.runSchedulerOnce();
|
|
2103
|
+
} catch (_err) {}
|
|
2104
|
+
}, this.addJitter(schedulerInterval));
|
|
2105
|
+
}
|
|
2106
|
+
this.startStalledChecker();
|
|
2107
|
+
let connectionRetries = 0;
|
|
2108
|
+
const maxConnectionRetries = 10;
|
|
2109
|
+
const asyncFifoQueue = new AsyncFifoQueue(true);
|
|
2110
|
+
while (!this.stopping || asyncFifoQueue.numTotal() > 0) try {
|
|
2111
|
+
while (!this.stopping) {
|
|
2112
|
+
if (asyncFifoQueue.numTotal() >= this.concurrency) break;
|
|
2113
|
+
this.blockingStats.totalBlockingCalls++;
|
|
2114
|
+
if (this.blockingStats.totalBlockingCalls >= 1e9) this.blockingStats.totalBlockingCalls = 0;
|
|
2115
|
+
this.logger.debug(`Fetching job (call #${this.blockingStats.totalBlockingCalls}, processing: ${this.jobsInProgress.size}/${this.concurrency}, queue: ${asyncFifoQueue.numTotal()} (queued: ${asyncFifoQueue.numQueued()}, pending: ${asyncFifoQueue.numPending()}), total: ${asyncFifoQueue.numTotal()}/${this.concurrency})...`);
|
|
2116
|
+
let fetchedJob;
|
|
2117
|
+
if (this.opts.strategy) fetchedJob = (async () => {
|
|
2118
|
+
const targetGroupId = await this.opts.strategy.getNextGroup(this.q);
|
|
2119
|
+
if (!targetGroupId) {
|
|
2120
|
+
await this.delay(strategyPollInterval);
|
|
2121
|
+
return null;
|
|
2122
|
+
}
|
|
2123
|
+
const job$2 = await this.q.reserveAtomic(targetGroupId);
|
|
2124
|
+
if (!job$2) return null;
|
|
2125
|
+
return job$2;
|
|
2126
|
+
})();
|
|
2127
|
+
else {
|
|
2128
|
+
const availableCapacity = this.concurrency - asyncFifoQueue.numTotal();
|
|
2129
|
+
if (availableCapacity > 0 && asyncFifoQueue.numTotal() === 0) {
|
|
2130
|
+
const batchSize = Math.min(availableCapacity, 8);
|
|
2131
|
+
const batchJobs = await this.q.reserveBatch(batchSize);
|
|
2132
|
+
if (batchJobs.length > 0) {
|
|
2133
|
+
this.logger.debug(`Batch reserved ${batchJobs.length} jobs`);
|
|
2134
|
+
for (const job$2 of batchJobs) asyncFifoQueue.add(Promise.resolve(job$2));
|
|
2135
|
+
connectionRetries = 0;
|
|
2136
|
+
this.lastJobPickupTime = Date.now();
|
|
2137
|
+
this.blockingStats.consecutiveEmptyReserves = 0;
|
|
2138
|
+
this.blockingStats.lastActivityTime = Date.now();
|
|
2139
|
+
this.emptyReserveBackoffMs = 0;
|
|
2140
|
+
continue;
|
|
2141
|
+
}
|
|
2142
|
+
}
|
|
2143
|
+
const allowBlocking = this.blockingStats.consecutiveEmptyReserves >= 2 && asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0;
|
|
2144
|
+
const adaptiveTimeout = this.blockingTimeoutSec;
|
|
2145
|
+
fetchedJob = allowBlocking ? this.q.reserveBlocking(adaptiveTimeout, void 0, this.blockingClient ?? void 0) : this.q.reserve();
|
|
2146
|
+
}
|
|
2147
|
+
asyncFifoQueue.add(fetchedJob);
|
|
2148
|
+
const job$1 = await fetchedJob;
|
|
2149
|
+
if (job$1) {
|
|
2150
|
+
connectionRetries = 0;
|
|
2151
|
+
this.lastJobPickupTime = Date.now();
|
|
2152
|
+
this.blockingStats.consecutiveEmptyReserves = 0;
|
|
2153
|
+
this.blockingStats.lastActivityTime = Date.now();
|
|
2154
|
+
this.emptyReserveBackoffMs = 0;
|
|
2155
|
+
this.logger.debug(`Fetched job ${job$1.id} from group ${job$1.groupId}`);
|
|
2156
|
+
} else {
|
|
2157
|
+
if (this.opts.strategy) {
|
|
2158
|
+
if (asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0) break;
|
|
2159
|
+
}
|
|
2160
|
+
this.blockingStats.consecutiveEmptyReserves++;
|
|
2161
|
+
if (this.blockingStats.consecutiveEmptyReserves % 50 === 0) this.logger.debug(`No job available (consecutive empty: ${this.blockingStats.consecutiveEmptyReserves})`);
|
|
2162
|
+
const backoffThreshold = this.concurrency >= 100 ? 5 : 3;
|
|
2163
|
+
if (this.blockingStats.consecutiveEmptyReserves > backoffThreshold && asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0) {
|
|
2164
|
+
const maxBackoff = this.concurrency >= 100 ? 2e3 : 5e3;
|
|
2165
|
+
if (this.emptyReserveBackoffMs === 0) this.emptyReserveBackoffMs = this.concurrency >= 100 ? 100 : 50;
|
|
2166
|
+
else this.emptyReserveBackoffMs = Math.min(maxBackoff, Math.max(100, this.emptyReserveBackoffMs * 1.2));
|
|
2167
|
+
if (this.blockingStats.consecutiveEmptyReserves % 20 === 0) this.logger.debug(`Applying backoff: ${Math.round(this.emptyReserveBackoffMs)}ms (consecutive empty: ${this.blockingStats.consecutiveEmptyReserves}, jobs in progress: ${this.jobsInProgress.size})`);
|
|
2168
|
+
await this.delay(this.emptyReserveBackoffMs);
|
|
2169
|
+
}
|
|
2170
|
+
if (asyncFifoQueue.numTotal() === 0 && this.jobsInProgress.size === 0) break;
|
|
2171
|
+
if (asyncFifoQueue.numTotal() > 0 || this.jobsInProgress.size > 0) break;
|
|
2172
|
+
}
|
|
2173
|
+
}
|
|
2174
|
+
let job;
|
|
2175
|
+
do
|
|
2176
|
+
job = await asyncFifoQueue.fetch() ?? void 0;
|
|
2177
|
+
while (!job && asyncFifoQueue.numQueued() > 0);
|
|
2178
|
+
if (job && typeof job === "object" && "id" in job) {
|
|
2179
|
+
this.totalJobsProcessed++;
|
|
2180
|
+
this.logger.debug(`Processing job ${job.id} from group ${job.groupId} immediately`);
|
|
2181
|
+
const processingPromise = this.processJob(job, () => {
|
|
2182
|
+
return asyncFifoQueue.numTotal() <= this.concurrency;
|
|
2183
|
+
}, this.jobsInProgress);
|
|
2184
|
+
asyncFifoQueue.add(processingPromise);
|
|
2185
|
+
}
|
|
2186
|
+
} catch (err) {
|
|
2187
|
+
if (this.stopping) return;
|
|
2188
|
+
if (this.q.isConnectionError(err)) {
|
|
2189
|
+
connectionRetries++;
|
|
2190
|
+
this.logger.error(`Connection error (retry ${connectionRetries}/${maxConnectionRetries}):`, err);
|
|
2191
|
+
if (connectionRetries >= maxConnectionRetries) {
|
|
2192
|
+
this.logger.error(`⚠️ Max connection retries (${maxConnectionRetries}) exceeded! Worker will continue but may be experiencing persistent Redis issues.`);
|
|
2193
|
+
this.emit("error", /* @__PURE__ */ new Error(`Max connection retries (${maxConnectionRetries}) exceeded - worker continuing with backoff`));
|
|
2194
|
+
await this.delay(2e4);
|
|
2195
|
+
connectionRetries = 0;
|
|
2196
|
+
} else {
|
|
2197
|
+
const delayMs = Math.max(Math.min(Math.exp(connectionRetries) * 1e3, 2e4), 1e3);
|
|
2198
|
+
this.logger.debug(`Waiting ${Math.round(delayMs)}ms before retry (exponential backoff)`);
|
|
2199
|
+
await this.delay(delayMs);
|
|
2200
|
+
}
|
|
2201
|
+
} else {
|
|
2202
|
+
this.logger.error(`Worker loop error (non-connection, continuing):`, err);
|
|
2203
|
+
this.emit("error", err instanceof Error ? err : new Error(String(err)));
|
|
2204
|
+
connectionRetries = 0;
|
|
2205
|
+
await this.delay(100);
|
|
2206
|
+
}
|
|
2207
|
+
this.onError?.(err);
|
|
2208
|
+
}
|
|
2209
|
+
this.logger.info(`Stopped`);
|
|
2210
|
+
}
|
|
2211
|
+
async delay(ms) {
|
|
2212
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
2213
|
+
}
|
|
2214
|
+
/**
|
|
2215
|
+
* Process a job and return the next job if atomic completion succeeds
|
|
2216
|
+
* This matches BullMQ's processJob signature
|
|
2217
|
+
*/
|
|
2218
|
+
async processJob(job, fetchNextCallback, jobsInProgress) {
|
|
2219
|
+
const existingItem = Array.from(jobsInProgress).find((item) => item.job.id === job.id);
|
|
2220
|
+
let inProgressItem;
|
|
2221
|
+
if (existingItem) {
|
|
2222
|
+
existingItem.ts = Date.now();
|
|
2223
|
+
inProgressItem = existingItem;
|
|
2224
|
+
} else {
|
|
2225
|
+
inProgressItem = {
|
|
2226
|
+
job,
|
|
2227
|
+
ts: Date.now()
|
|
2228
|
+
};
|
|
2229
|
+
jobsInProgress.add(inProgressItem);
|
|
2230
|
+
}
|
|
2231
|
+
try {
|
|
2232
|
+
const nextJob = await this.processSingleJob(job, fetchNextCallback);
|
|
2233
|
+
if (nextJob && typeof nextJob === "object" && "id" in nextJob && "groupId" in nextJob) {
|
|
2234
|
+
const chainedItem = {
|
|
2235
|
+
job: nextJob,
|
|
2236
|
+
ts: Date.now()
|
|
2237
|
+
};
|
|
2238
|
+
jobsInProgress.add(chainedItem);
|
|
2239
|
+
jobsInProgress.delete(inProgressItem);
|
|
2240
|
+
return nextJob;
|
|
2241
|
+
}
|
|
2242
|
+
return nextJob;
|
|
2243
|
+
} finally {
|
|
2244
|
+
if (jobsInProgress.has(inProgressItem)) jobsInProgress.delete(inProgressItem);
|
|
2245
|
+
}
|
|
2246
|
+
}
|
|
2247
|
+
/**
|
|
2248
|
+
* Complete a job and try to atomically get next job from same group
|
|
2249
|
+
*/
|
|
2250
|
+
async completeJob(job, handlerResult, fetchNextCallback, processedOn, finishedOn) {
|
|
2251
|
+
if (fetchNextCallback?.()) {
|
|
2252
|
+
const nextJob = await this.q.completeAndReserveNextWithMetadata(job.id, job.groupId, handlerResult, {
|
|
2253
|
+
processedOn: processedOn || Date.now(),
|
|
2254
|
+
finishedOn: finishedOn || Date.now(),
|
|
2255
|
+
attempts: job.attempts,
|
|
2256
|
+
maxAttempts: job.maxAttempts
|
|
2257
|
+
});
|
|
2258
|
+
if (nextJob) {
|
|
2259
|
+
this.logger.debug(`Got next job ${nextJob.id} from same group ${nextJob.groupId} atomically`);
|
|
2260
|
+
return nextJob;
|
|
2261
|
+
}
|
|
2262
|
+
this.logger.debug(`Atomic chaining returned nil for job ${job.id} - job completed, but no next job chained`);
|
|
2263
|
+
if (Math.random() < .1) await new Promise((resolve) => setTimeout(resolve, Math.random() * 100));
|
|
2264
|
+
} else await this.q.completeWithMetadata(job, handlerResult, {
|
|
2265
|
+
processedOn: processedOn || Date.now(),
|
|
2266
|
+
finishedOn: finishedOn || Date.now(),
|
|
2267
|
+
attempts: job.attempts,
|
|
2268
|
+
maxAttempts: job.maxAttempts
|
|
2269
|
+
});
|
|
2270
|
+
}
|
|
2271
|
+
/**
|
|
2272
|
+
* Start the stalled job checker
|
|
2273
|
+
* Checks periodically for jobs that exceeded their deadline and recovers or fails them
|
|
2274
|
+
*/
|
|
2275
|
+
startStalledChecker() {
|
|
2276
|
+
if (this.stalledInterval <= 0) return;
|
|
2277
|
+
this.stalledCheckTimer = setInterval(async () => {
|
|
2278
|
+
try {
|
|
2279
|
+
await this.checkStalled();
|
|
2280
|
+
} catch (err) {
|
|
2281
|
+
this.logger.error("Error in stalled job checker:", err);
|
|
2282
|
+
this.emit("error", err instanceof Error ? err : new Error(String(err)));
|
|
2283
|
+
}
|
|
2284
|
+
}, this.stalledInterval);
|
|
2285
|
+
}
|
|
2286
|
+
/**
|
|
2287
|
+
* Check for stalled jobs and recover or fail them
|
|
2288
|
+
* A job is stalled when its worker crashed or lost connection
|
|
2289
|
+
*/
|
|
2290
|
+
async checkStalled() {
|
|
2291
|
+
if (this.stopping || this.closed) return;
|
|
2292
|
+
try {
|
|
2293
|
+
const now = Date.now();
|
|
2294
|
+
const results = await this.q.checkStalledJobs(now, this.stalledGracePeriod, this.maxStalledCount);
|
|
2295
|
+
if (results.length > 0) for (let i = 0; i < results.length; i += 3) {
|
|
2296
|
+
const jobId = results[i];
|
|
2297
|
+
const groupId = results[i + 1];
|
|
2298
|
+
const action = results[i + 2];
|
|
2299
|
+
if (action === "recovered") {
|
|
2300
|
+
this.logger.info(`Recovered stalled job ${jobId} from group ${groupId}`);
|
|
2301
|
+
this.emit("stalled", jobId, groupId);
|
|
2302
|
+
} else if (action === "failed") {
|
|
2303
|
+
this.logger.warn(`Failed stalled job ${jobId} from group ${groupId} (exceeded max stalled count)`);
|
|
2304
|
+
this.emit("stalled", jobId, groupId);
|
|
2305
|
+
}
|
|
2306
|
+
}
|
|
2307
|
+
} catch (err) {
|
|
2308
|
+
this.logger.error("Error checking stalled jobs:", err);
|
|
2309
|
+
}
|
|
2310
|
+
}
|
|
2311
|
+
/**
|
|
2312
|
+
* Get worker performance metrics
|
|
2313
|
+
*/
|
|
2314
|
+
getWorkerMetrics() {
|
|
2315
|
+
const now = Date.now();
|
|
2316
|
+
return {
|
|
2317
|
+
name: this.name,
|
|
2318
|
+
totalJobsProcessed: this.totalJobsProcessed,
|
|
2319
|
+
lastJobPickupTime: this.lastJobPickupTime,
|
|
2320
|
+
timeSinceLastJob: this.lastJobPickupTime > 0 ? now - this.lastJobPickupTime : null,
|
|
2321
|
+
blockingStats: { ...this.blockingStats },
|
|
2322
|
+
isProcessing: this.jobsInProgress.size > 0,
|
|
2323
|
+
jobsInProgressCount: this.jobsInProgress.size,
|
|
2324
|
+
jobsInProgress: Array.from(this.jobsInProgress).map((item) => ({
|
|
2325
|
+
jobId: item.job.id,
|
|
2326
|
+
groupId: item.job.groupId,
|
|
2327
|
+
processingTimeMs: now - item.ts
|
|
2328
|
+
}))
|
|
2329
|
+
};
|
|
2330
|
+
}
|
|
2331
|
+
/**
|
|
2332
|
+
* Stop the worker gracefully
|
|
2333
|
+
* @param gracefulTimeoutMs Maximum time to wait for current job to finish (default: 30 seconds)
|
|
2334
|
+
*/
|
|
2335
|
+
async close(gracefulTimeoutMs = 3e4) {
|
|
2336
|
+
this.stopping = true;
|
|
2337
|
+
await this.delay(100);
|
|
2338
|
+
if (this.cleanupTimer) clearInterval(this.cleanupTimer);
|
|
2339
|
+
if (this.schedulerTimer) clearInterval(this.schedulerTimer);
|
|
2340
|
+
if (this.stalledCheckTimer) clearInterval(this.stalledCheckTimer);
|
|
2341
|
+
const startTime = Date.now();
|
|
2342
|
+
while (this.jobsInProgress.size > 0 && Date.now() - startTime < gracefulTimeoutMs) await sleep(100);
|
|
2343
|
+
if (this.blockingClient) {
|
|
2344
|
+
try {
|
|
2345
|
+
if (this.jobsInProgress.size > 0 && gracefulTimeoutMs > 0) {
|
|
2346
|
+
this.logger.debug("Gracefully closing blocking client (quit)...");
|
|
2347
|
+
await this.blockingClient.quit();
|
|
2348
|
+
} else {
|
|
2349
|
+
this.logger.debug("Force closing blocking client (disconnect)...");
|
|
2350
|
+
this.blockingClient.disconnect();
|
|
2351
|
+
}
|
|
2352
|
+
} catch (err) {
|
|
2353
|
+
this.logger.debug("Error closing blocking client:", err);
|
|
2354
|
+
}
|
|
2355
|
+
this.blockingClient = null;
|
|
2356
|
+
}
|
|
2357
|
+
if (this.runLoopPromise) {
|
|
2358
|
+
const runLoopTimeout = this.jobsInProgress.size > 0 ? gracefulTimeoutMs : 2e3;
|
|
2359
|
+
const timeoutPromise = new Promise((resolve) => {
|
|
2360
|
+
setTimeout(resolve, runLoopTimeout);
|
|
2361
|
+
});
|
|
2362
|
+
try {
|
|
2363
|
+
await Promise.race([this.runLoopPromise, timeoutPromise]);
|
|
2364
|
+
} catch (err) {
|
|
2365
|
+
this.logger.warn("Error while waiting for run loop to exit:", err);
|
|
2366
|
+
}
|
|
2367
|
+
}
|
|
2368
|
+
if (this.jobsInProgress.size > 0) {
|
|
2369
|
+
this.logger.warn(`Worker stopped with ${this.jobsInProgress.size} jobs still processing after ${gracefulTimeoutMs}ms timeout.`);
|
|
2370
|
+
const nowWall = Date.now();
|
|
2371
|
+
for (const item of this.jobsInProgress) this.emit("graceful-timeout", Job.fromReserved(this.q, item.job, {
|
|
2372
|
+
processedOn: item.ts,
|
|
2373
|
+
finishedOn: nowWall,
|
|
2374
|
+
status: "active"
|
|
2375
|
+
}));
|
|
2376
|
+
}
|
|
2377
|
+
this.jobsInProgress.clear();
|
|
2378
|
+
this.ready = false;
|
|
2379
|
+
this.closed = true;
|
|
2380
|
+
try {
|
|
2381
|
+
const redis = this.q.redis;
|
|
2382
|
+
if (redis) {
|
|
2383
|
+
if (this.redisCloseHandler) redis.off?.("close", this.redisCloseHandler);
|
|
2384
|
+
if (this.redisErrorHandler) redis.off?.("error", this.redisErrorHandler);
|
|
2385
|
+
if (this.redisReadyHandler) redis.off?.("ready", this.redisReadyHandler);
|
|
2386
|
+
}
|
|
2387
|
+
} catch (_e) {}
|
|
2388
|
+
this.emit("closed");
|
|
2389
|
+
}
|
|
2390
|
+
/**
|
|
2391
|
+
* Get information about the first currently processing job (if any)
|
|
2392
|
+
* For concurrency > 1, returns the oldest job in progress
|
|
2393
|
+
*/
|
|
2394
|
+
getCurrentJob() {
|
|
2395
|
+
if (this.jobsInProgress.size === 0) return null;
|
|
2396
|
+
const oldest = Array.from(this.jobsInProgress)[0];
|
|
2397
|
+
const now = Date.now();
|
|
2398
|
+
return {
|
|
2399
|
+
job: oldest.job,
|
|
2400
|
+
processingTimeMs: now - oldest.ts
|
|
2401
|
+
};
|
|
2402
|
+
}
|
|
2403
|
+
/**
|
|
2404
|
+
* Get information about all currently processing jobs
|
|
2405
|
+
*/
|
|
2406
|
+
getCurrentJobs() {
|
|
2407
|
+
const now = Date.now();
|
|
2408
|
+
return Array.from(this.jobsInProgress).map((item) => ({
|
|
2409
|
+
job: item.job,
|
|
2410
|
+
processingTimeMs: now - item.ts
|
|
2411
|
+
}));
|
|
2412
|
+
}
|
|
2413
|
+
/**
|
|
2414
|
+
* Check if the worker is currently processing any jobs
|
|
2415
|
+
*/
|
|
2416
|
+
isProcessing() {
|
|
2417
|
+
return this.jobsInProgress.size > 0;
|
|
2418
|
+
}
|
|
2419
|
+
async add(opts) {
|
|
2420
|
+
return this.q.add(opts);
|
|
2421
|
+
}
|
|
2422
|
+
async processSingleJob(job, fetchNextCallback) {
|
|
2423
|
+
const jobStartWallTime = Date.now();
|
|
2424
|
+
let hbTimer;
|
|
2425
|
+
let heartbeatDelayTimer;
|
|
2426
|
+
const startHeartbeat = () => {
|
|
2427
|
+
const jobTimeout = this.q.jobTimeoutMs || 3e4;
|
|
2428
|
+
const minInterval = Math.min(this.hbMs, Math.floor(jobTimeout / 3), 1e4);
|
|
2429
|
+
this.logger.debug(`Starting heartbeat for job ${job.id} (interval: ${minInterval}ms, concurrency: ${this.concurrency})`);
|
|
2430
|
+
hbTimer = setInterval(async () => {
|
|
2431
|
+
try {
|
|
2432
|
+
if (await this.q.heartbeat(job) === 0) {
|
|
2433
|
+
this.logger.warn(`Heartbeat failed for job ${job.id} - job may have been removed or completed elsewhere`);
|
|
2434
|
+
if (hbTimer) clearInterval(hbTimer);
|
|
2435
|
+
}
|
|
2436
|
+
} catch (e) {
|
|
2437
|
+
const isConnErr = this.q.isConnectionError(e);
|
|
2438
|
+
if (!isConnErr || !this.stopping) this.logger.error(`Heartbeat error for job ${job.id}:`, e instanceof Error ? e.message : String(e));
|
|
2439
|
+
this.onError?.(e, job);
|
|
2440
|
+
if (!isConnErr || !this.stopping) this.emit("error", e instanceof Error ? e : new Error(String(e)));
|
|
2441
|
+
}
|
|
2442
|
+
}, minInterval);
|
|
2443
|
+
};
|
|
2444
|
+
try {
|
|
2445
|
+
const jobTimeout = this.q.jobTimeoutMs || 3e4;
|
|
2446
|
+
const heartbeatThreshold = Math.min(jobTimeout * .1, 2e3);
|
|
2447
|
+
heartbeatDelayTimer = setTimeout(() => {
|
|
2448
|
+
startHeartbeat();
|
|
2449
|
+
}, heartbeatThreshold);
|
|
2450
|
+
const handlerResult = await this.handler(job);
|
|
2451
|
+
if (heartbeatDelayTimer) clearTimeout(heartbeatDelayTimer);
|
|
2452
|
+
if (hbTimer) clearInterval(hbTimer);
|
|
2453
|
+
const finishedAtWall = Date.now();
|
|
2454
|
+
const nextJob = await this.completeJob(job, handlerResult, fetchNextCallback, jobStartWallTime, finishedAtWall);
|
|
2455
|
+
this.blockingStats.consecutiveEmptyReserves = 0;
|
|
2456
|
+
this.emptyReserveBackoffMs = 0;
|
|
2457
|
+
this.emit("completed", Job.fromReserved(this.q, job, {
|
|
2458
|
+
processedOn: jobStartWallTime,
|
|
2459
|
+
finishedOn: finishedAtWall,
|
|
2460
|
+
returnvalue: handlerResult,
|
|
2461
|
+
status: "completed"
|
|
2462
|
+
}));
|
|
2463
|
+
return nextJob;
|
|
2464
|
+
} catch (err) {
|
|
2465
|
+
if (heartbeatDelayTimer) clearTimeout(heartbeatDelayTimer);
|
|
2466
|
+
if (hbTimer) clearInterval(hbTimer);
|
|
2467
|
+
await this.handleJobFailure(err, job, jobStartWallTime);
|
|
2468
|
+
}
|
|
2469
|
+
}
|
|
2470
|
+
/**
|
|
2471
|
+
* Handle job failure: emit events, retry or dead-letter
|
|
2472
|
+
*/
|
|
2473
|
+
async handleJobFailure(err, job, jobStartWallTime) {
|
|
2474
|
+
this.onError?.(err, job);
|
|
2475
|
+
this.blockingStats.consecutiveEmptyReserves = 0;
|
|
2476
|
+
this.emptyReserveBackoffMs = 0;
|
|
2477
|
+
try {
|
|
2478
|
+
this.emit("error", err instanceof Error ? err : new Error(String(err)));
|
|
2479
|
+
} catch (_emitError) {}
|
|
2480
|
+
const failedAt = Date.now();
|
|
2481
|
+
this.emit("failed", Job.fromReserved(this.q, job, {
|
|
2482
|
+
processedOn: jobStartWallTime,
|
|
2483
|
+
finishedOn: failedAt,
|
|
2484
|
+
failedReason: err instanceof Error ? err.message : String(err),
|
|
2485
|
+
stacktrace: err instanceof Error ? err.stack : typeof err === "object" && err !== null ? err.stack : void 0,
|
|
2486
|
+
status: "failed"
|
|
2487
|
+
}));
|
|
2488
|
+
const nextAttempt = job.attempts + 1;
|
|
2489
|
+
const backoffMs = this.backoff(nextAttempt);
|
|
2490
|
+
if (nextAttempt >= this.maxAttempts) {
|
|
2491
|
+
await this.deadLetterJob(err, job, jobStartWallTime, failedAt, nextAttempt);
|
|
2492
|
+
return;
|
|
2493
|
+
}
|
|
2494
|
+
if (await this.q.retry(job.id, backoffMs) === -1) {
|
|
2495
|
+
await this.deadLetterJob(err, job, jobStartWallTime, failedAt, job.maxAttempts);
|
|
2496
|
+
return;
|
|
2497
|
+
}
|
|
2498
|
+
await this.recordFailureAttempt(err, job, jobStartWallTime, failedAt, nextAttempt);
|
|
2499
|
+
}
|
|
2500
|
+
/**
|
|
2501
|
+
* Dead-letter a job that exceeded max attempts
|
|
2502
|
+
*/
|
|
2503
|
+
async deadLetterJob(err, job, processedOn, finishedOn, attempts) {
|
|
2504
|
+
this.logger.info(`Dead lettering job ${job.id} from group ${job.groupId} (attempts: ${attempts}/${job.maxAttempts})`);
|
|
2505
|
+
const errObj = err instanceof Error ? err : new Error(String(err));
|
|
2506
|
+
try {
|
|
2507
|
+
await this.q.recordFinalFailure({
|
|
2508
|
+
id: job.id,
|
|
2509
|
+
groupId: job.groupId
|
|
2510
|
+
}, {
|
|
2511
|
+
name: errObj.name,
|
|
2512
|
+
message: errObj.message,
|
|
2513
|
+
stack: errObj.stack
|
|
2514
|
+
}, {
|
|
2515
|
+
processedOn,
|
|
2516
|
+
finishedOn,
|
|
2517
|
+
attempts,
|
|
2518
|
+
maxAttempts: job.maxAttempts,
|
|
2519
|
+
data: job.data
|
|
2520
|
+
});
|
|
2521
|
+
} catch (e) {
|
|
2522
|
+
this.logger.warn("Failed to record final failure", e);
|
|
2523
|
+
}
|
|
2524
|
+
await this.q.deadLetter(job.id, job.groupId);
|
|
2525
|
+
}
|
|
2526
|
+
/**
|
|
2527
|
+
* Record a failed attempt (not final)
|
|
2528
|
+
*/
|
|
2529
|
+
async recordFailureAttempt(err, job, processedOn, finishedOn, attempts) {
|
|
2530
|
+
const errObj = err instanceof Error ? err : new Error(String(err));
|
|
2531
|
+
try {
|
|
2532
|
+
await this.q.recordAttemptFailure({
|
|
2533
|
+
id: job.id,
|
|
2534
|
+
groupId: job.groupId
|
|
2535
|
+
}, {
|
|
2536
|
+
name: errObj.name,
|
|
2537
|
+
message: errObj.message,
|
|
2538
|
+
stack: errObj.stack
|
|
2539
|
+
}, {
|
|
2540
|
+
processedOn,
|
|
2541
|
+
finishedOn,
|
|
2542
|
+
attempts,
|
|
2543
|
+
maxAttempts: job.maxAttempts
|
|
2544
|
+
});
|
|
2545
|
+
} catch (e) {
|
|
2546
|
+
this.logger.warn("Failed to record attempt failure", e);
|
|
2547
|
+
}
|
|
2548
|
+
}
|
|
2549
|
+
};
|
|
2550
|
+
const Worker = _Worker;
|
|
2551
|
+
function sleep(ms) {
|
|
2552
|
+
return new Promise((r) => setTimeout(r, ms));
|
|
2553
|
+
}
|
|
2554
|
+
|
|
2555
|
+
//#endregion
|
|
2556
|
+
export { BullBoardGroupMQAdapter, Job, Queue, Worker, getWorkersStatus, waitForQueueToEmpty };
|
|
2557
|
+
//# sourceMappingURL=index.js.map
|