@gravito/stream 2.0.1 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +104 -288
- package/README.zh-TW.md +146 -13
- package/dist/index.cjs +2666 -305
- package/dist/index.d.cts +2886 -741
- package/dist/index.d.ts +2886 -741
- package/dist/index.js +2661 -308
- package/package.json +12 -7
- package/proto/queue.proto +101 -0
package/dist/index.js
CHANGED
|
@@ -25,6 +25,310 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
25
25
|
};
|
|
26
26
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
27
27
|
|
|
28
|
+
// src/drivers/BullMQDriver.ts
|
|
29
|
+
var BullMQDriver_exports = {};
|
|
30
|
+
__export(BullMQDriver_exports, {
|
|
31
|
+
BullMQDriver: () => BullMQDriver
|
|
32
|
+
});
|
|
33
|
+
var BullMQDriver;
|
|
34
|
+
var init_BullMQDriver = __esm({
|
|
35
|
+
"src/drivers/BullMQDriver.ts"() {
|
|
36
|
+
"use strict";
|
|
37
|
+
BullMQDriver = class {
|
|
38
|
+
queue;
|
|
39
|
+
prefix;
|
|
40
|
+
debug;
|
|
41
|
+
queueMap = /* @__PURE__ */ new Map();
|
|
42
|
+
constructor(config) {
|
|
43
|
+
this.queue = config.queue;
|
|
44
|
+
this.prefix = config.prefix ?? "gravito:";
|
|
45
|
+
this.debug = config.debug ?? false;
|
|
46
|
+
if (!this.queue) {
|
|
47
|
+
throw new Error("[BullMQDriver] Bull Queue instance is required.");
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Get or create a queue for the given queue name.
|
|
52
|
+
*/
|
|
53
|
+
getQueue(queueName) {
|
|
54
|
+
const fullName = `${this.prefix}${queueName}`;
|
|
55
|
+
if (this.queueMap.has(fullName)) {
|
|
56
|
+
return this.queueMap.get(fullName);
|
|
57
|
+
}
|
|
58
|
+
return this.queue;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Build Job Options from JobPushOptions.
|
|
62
|
+
*/
|
|
63
|
+
buildJobOptions(options) {
|
|
64
|
+
const bullOptions = {};
|
|
65
|
+
if (options?.priority) {
|
|
66
|
+
if (options.priority === "high" || options.priority === "critical") {
|
|
67
|
+
bullOptions.priority = 1;
|
|
68
|
+
} else if (options.priority === "low") {
|
|
69
|
+
bullOptions.priority = 10;
|
|
70
|
+
} else if (typeof options.priority === "number") {
|
|
71
|
+
bullOptions.priority = Math.min(Math.max(options.priority, 1), 10);
|
|
72
|
+
} else {
|
|
73
|
+
bullOptions.priority = 5;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return bullOptions;
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Create Bull job data from SerializedJob.
|
|
80
|
+
*/
|
|
81
|
+
createBullJobData(job) {
|
|
82
|
+
return {
|
|
83
|
+
id: job.id,
|
|
84
|
+
type: job.type,
|
|
85
|
+
data: job.data,
|
|
86
|
+
className: job.className,
|
|
87
|
+
createdAt: job.createdAt,
|
|
88
|
+
delaySeconds: job.delaySeconds,
|
|
89
|
+
attempts: job.attempts ?? 0,
|
|
90
|
+
maxAttempts: job.maxAttempts ?? 3,
|
|
91
|
+
groupId: job.groupId,
|
|
92
|
+
retryAfterSeconds: job.retryAfterSeconds,
|
|
93
|
+
retryMultiplier: job.retryMultiplier,
|
|
94
|
+
error: job.error,
|
|
95
|
+
failedAt: job.failedAt,
|
|
96
|
+
priority: job.priority
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Pushes a job to Bull Queue.
|
|
101
|
+
*/
|
|
102
|
+
async push(queue, job, options) {
|
|
103
|
+
try {
|
|
104
|
+
const q = this.getQueue(queue);
|
|
105
|
+
const bullJobData = this.createBullJobData(job);
|
|
106
|
+
const bullOptions = this.buildJobOptions(options);
|
|
107
|
+
if (job.delaySeconds && job.delaySeconds > 0) {
|
|
108
|
+
bullOptions.delay = job.delaySeconds * 1e3;
|
|
109
|
+
}
|
|
110
|
+
bullOptions.attempts = job.maxAttempts ?? 3;
|
|
111
|
+
if (job.retryAfterSeconds) {
|
|
112
|
+
bullOptions.backoff = {
|
|
113
|
+
type: "exponential",
|
|
114
|
+
delay: job.retryAfterSeconds * 1e3
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
if (options?.groupId) {
|
|
118
|
+
bullOptions.group = {
|
|
119
|
+
id: options.groupId
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
const namespacedJobName = `${queue}:${job.id}`;
|
|
123
|
+
await q.add(namespacedJobName, bullJobData, bullOptions);
|
|
124
|
+
if (this.debug) {
|
|
125
|
+
console.log(`[BullMQDriver] Pushed job ${job.id} to queue ${queue}`);
|
|
126
|
+
}
|
|
127
|
+
} catch (error) {
|
|
128
|
+
console.error(`[BullMQDriver] Failed to push job to queue ${queue}:`, error);
|
|
129
|
+
throw error;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
/**
|
|
133
|
+
* Pops a job from Bull Queue.
|
|
134
|
+
* Note: Bull Queue typically uses Workers, not manual pop.
|
|
135
|
+
* This is a fallback implementation.
|
|
136
|
+
*/
|
|
137
|
+
async pop(queue) {
|
|
138
|
+
try {
|
|
139
|
+
this.getQueue(queue);
|
|
140
|
+
return null;
|
|
141
|
+
} catch (error) {
|
|
142
|
+
console.error(`[BullMQDriver] Failed to pop from queue ${queue}:`, error);
|
|
143
|
+
return null;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Returns the size of the queue.
|
|
148
|
+
*/
|
|
149
|
+
async size(queue) {
|
|
150
|
+
try {
|
|
151
|
+
const q = this.getQueue(queue);
|
|
152
|
+
const count = await q.count?.();
|
|
153
|
+
return count ?? 0;
|
|
154
|
+
} catch (error) {
|
|
155
|
+
console.error(`[BullMQDriver] Failed to get queue size for ${queue}:`, error);
|
|
156
|
+
return 0;
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* Clears the queue.
|
|
161
|
+
*/
|
|
162
|
+
async clear(queue) {
|
|
163
|
+
try {
|
|
164
|
+
const q = this.getQueue(queue);
|
|
165
|
+
if (typeof q.clean === "function") {
|
|
166
|
+
await q.clean(0);
|
|
167
|
+
}
|
|
168
|
+
} catch (error) {
|
|
169
|
+
console.error(`[BullMQDriver] Failed to clear queue ${queue}:`, error);
|
|
170
|
+
throw error;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
/**
|
|
174
|
+
* Marks a job as failed (moves to failed list).
|
|
175
|
+
*/
|
|
176
|
+
async fail(queue, job) {
|
|
177
|
+
try {
|
|
178
|
+
const q = this.getQueue(queue);
|
|
179
|
+
const bullJob = await q.getJob?.(job.id);
|
|
180
|
+
if (bullJob) {
|
|
181
|
+
const error = job.error ?? "Job failed";
|
|
182
|
+
const failureReasonError = new Error(error);
|
|
183
|
+
await bullJob.moveToFailed?.(failureReasonError, true);
|
|
184
|
+
}
|
|
185
|
+
} catch (error) {
|
|
186
|
+
console.error(`[BullMQDriver] Failed to mark job as failed:`, error);
|
|
187
|
+
throw error;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
/**
|
|
191
|
+
* Returns detailed statistics for the queue.
|
|
192
|
+
*/
|
|
193
|
+
async stats(queue) {
|
|
194
|
+
try {
|
|
195
|
+
const q = this.getQueue(queue);
|
|
196
|
+
const counts = await q.getJobCounts?.(["active", "completed", "failed", "delayed", "waiting"]);
|
|
197
|
+
const delayed = await q.getDelayedCount?.();
|
|
198
|
+
const failed = await q.getFailedCount?.();
|
|
199
|
+
const active = await q.getActiveCount?.();
|
|
200
|
+
return {
|
|
201
|
+
queue,
|
|
202
|
+
size: counts?.waiting ?? 0,
|
|
203
|
+
delayed: delayed ?? 0,
|
|
204
|
+
failed: failed ?? 0,
|
|
205
|
+
reserved: active ?? 0
|
|
206
|
+
};
|
|
207
|
+
} catch (error) {
|
|
208
|
+
console.error(`[BullMQDriver] Failed to get stats for queue ${queue}:`, error);
|
|
209
|
+
return {
|
|
210
|
+
queue,
|
|
211
|
+
size: 0,
|
|
212
|
+
delayed: 0,
|
|
213
|
+
failed: 0
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
/**
|
|
218
|
+
* Retrieves failed jobs from the Dead Letter Queue.
|
|
219
|
+
*/
|
|
220
|
+
async getFailed(queue, _start = 0, _end = -1) {
|
|
221
|
+
try {
|
|
222
|
+
this.getQueue(queue);
|
|
223
|
+
return [];
|
|
224
|
+
} catch (error) {
|
|
225
|
+
console.error(`[BullMQDriver] Failed to get failed jobs for queue ${queue}:`, error);
|
|
226
|
+
return [];
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
/**
|
|
230
|
+
* Retries failed jobs.
|
|
231
|
+
*/
|
|
232
|
+
async retryFailed(queue, _count = 1) {
|
|
233
|
+
try {
|
|
234
|
+
this.getQueue(queue);
|
|
235
|
+
return 0;
|
|
236
|
+
} catch (error) {
|
|
237
|
+
console.error(`[BullMQDriver] Failed to retry jobs for queue ${queue}:`, error);
|
|
238
|
+
return 0;
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
/**
|
|
242
|
+
* Clears the Dead Letter Queue.
|
|
243
|
+
*/
|
|
244
|
+
async clearFailed(queue) {
|
|
245
|
+
try {
|
|
246
|
+
const q = this.getQueue(queue);
|
|
247
|
+
if (typeof q.clean === "function") {
|
|
248
|
+
await q.clean(0, void 0, "failed");
|
|
249
|
+
}
|
|
250
|
+
} catch (error) {
|
|
251
|
+
console.error(`[BullMQDriver] Failed to clear failed jobs for queue ${queue}:`, error);
|
|
252
|
+
throw error;
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
/**
|
|
256
|
+
* Creates a new queue/topic.
|
|
257
|
+
*/
|
|
258
|
+
async createTopic(_topic, _options) {
|
|
259
|
+
}
|
|
260
|
+
/**
|
|
261
|
+
* Deletes a queue/topic.
|
|
262
|
+
*/
|
|
263
|
+
async deleteTopic(topic) {
|
|
264
|
+
try {
|
|
265
|
+
const q = this.getQueue(topic);
|
|
266
|
+
await q.close?.();
|
|
267
|
+
} catch (error) {
|
|
268
|
+
console.error(`[BullMQDriver] Failed to delete queue ${topic}:`, error);
|
|
269
|
+
throw error;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
/**
|
|
273
|
+
* Pushes multiple jobs in batch.
|
|
274
|
+
*/
|
|
275
|
+
async pushMany(queue, jobs) {
|
|
276
|
+
try {
|
|
277
|
+
const q = this.getQueue(queue);
|
|
278
|
+
const bullJobs = jobs.map((job) => {
|
|
279
|
+
const bullJobData = this.createBullJobData(job);
|
|
280
|
+
const namespacedJobName = `${queue}:${job.id}`;
|
|
281
|
+
return {
|
|
282
|
+
name: namespacedJobName,
|
|
283
|
+
data: bullJobData
|
|
284
|
+
};
|
|
285
|
+
});
|
|
286
|
+
for (const bullJob of bullJobs) {
|
|
287
|
+
await q.add(bullJob.name, bullJob.data);
|
|
288
|
+
}
|
|
289
|
+
} catch (error) {
|
|
290
|
+
console.error(`[BullMQDriver] Failed to push multiple jobs to queue ${queue}:`, error);
|
|
291
|
+
throw error;
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
/**
|
|
295
|
+
* Pops multiple jobs in batch.
|
|
296
|
+
*/
|
|
297
|
+
async popMany(_queue, _count) {
|
|
298
|
+
return [];
|
|
299
|
+
}
|
|
300
|
+
/**
|
|
301
|
+
* Reports worker heartbeat.
|
|
302
|
+
*/
|
|
303
|
+
async reportHeartbeat(workerInfo, _prefix) {
|
|
304
|
+
if (this.debug) {
|
|
305
|
+
console.log(`[BullMQDriver] Worker heartbeat from ${workerInfo.id}`);
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
/**
|
|
309
|
+
* Publishes a log message.
|
|
310
|
+
*/
|
|
311
|
+
async publishLog(logPayload, _prefix) {
|
|
312
|
+
if (this.debug) {
|
|
313
|
+
console.log(`[BullMQDriver] [${logPayload.level}] ${logPayload.message}`);
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Checks rate limit for a queue.
|
|
318
|
+
*/
|
|
319
|
+
async checkRateLimit(_queue, _config) {
|
|
320
|
+
return true;
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Retrieves all queue names.
|
|
324
|
+
*/
|
|
325
|
+
async getQueues() {
|
|
326
|
+
return ["default"];
|
|
327
|
+
}
|
|
328
|
+
};
|
|
329
|
+
}
|
|
330
|
+
});
|
|
331
|
+
|
|
28
332
|
// src/drivers/DatabaseDriver.ts
|
|
29
333
|
var DatabaseDriver_exports = {};
|
|
30
334
|
__export(DatabaseDriver_exports, {
|
|
@@ -47,7 +351,12 @@ var init_DatabaseDriver = __esm({
|
|
|
47
351
|
}
|
|
48
352
|
}
|
|
49
353
|
/**
|
|
50
|
-
*
|
|
354
|
+
* Pushes a job to the database queue.
|
|
355
|
+
*
|
|
356
|
+
* Inserts a new row into the jobs table.
|
|
357
|
+
*
|
|
358
|
+
* @param queue - The queue name.
|
|
359
|
+
* @param job - The serialized job.
|
|
51
360
|
*/
|
|
52
361
|
async push(queue, job) {
|
|
53
362
|
const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
|
|
@@ -59,7 +368,13 @@ var init_DatabaseDriver = __esm({
|
|
|
59
368
|
);
|
|
60
369
|
}
|
|
61
370
|
/**
|
|
62
|
-
*
|
|
371
|
+
* Pops the next available job from the queue.
|
|
372
|
+
*
|
|
373
|
+
* Uses transactional locking (SELECT ... FOR UPDATE SKIP LOCKED if supported) to ensure
|
|
374
|
+
* atomic reservation of jobs by workers.
|
|
375
|
+
*
|
|
376
|
+
* @param queue - The queue name.
|
|
377
|
+
* @returns The job or `null`.
|
|
63
378
|
*/
|
|
64
379
|
async pop(queue) {
|
|
65
380
|
const result = await this.dbService.execute(
|
|
@@ -126,7 +441,10 @@ var init_DatabaseDriver = __esm({
|
|
|
126
441
|
return job;
|
|
127
442
|
}
|
|
128
443
|
/**
|
|
129
|
-
*
|
|
444
|
+
* Pops multiple jobs from the queue in a single transaction.
|
|
445
|
+
*
|
|
446
|
+
* @param queue - The queue name.
|
|
447
|
+
* @param count - Max jobs to pop.
|
|
130
448
|
*/
|
|
131
449
|
async popMany(queue, count) {
|
|
132
450
|
if (count <= 1) {
|
|
@@ -185,7 +503,9 @@ var init_DatabaseDriver = __esm({
|
|
|
185
503
|
}
|
|
186
504
|
}
|
|
187
505
|
/**
|
|
188
|
-
*
|
|
506
|
+
* Retrieves queue statistics by querying the table.
|
|
507
|
+
*
|
|
508
|
+
* @param queue - The queue name.
|
|
189
509
|
*/
|
|
190
510
|
async stats(queue) {
|
|
191
511
|
const failedQueue = `failed:${queue}`;
|
|
@@ -219,7 +539,9 @@ var init_DatabaseDriver = __esm({
|
|
|
219
539
|
}
|
|
220
540
|
}
|
|
221
541
|
/**
|
|
222
|
-
*
|
|
542
|
+
* Returns the count of pending jobs.
|
|
543
|
+
*
|
|
544
|
+
* @param queue - The queue name.
|
|
223
545
|
*/
|
|
224
546
|
async size(queue) {
|
|
225
547
|
const result = await this.dbService.execute(
|
|
@@ -233,14 +555,18 @@ var init_DatabaseDriver = __esm({
|
|
|
233
555
|
return result?.[0]?.count ?? 0;
|
|
234
556
|
}
|
|
235
557
|
/**
|
|
236
|
-
*
|
|
558
|
+
* Clears the queue by deleting all rows for the queue.
|
|
559
|
+
*
|
|
560
|
+
* @param queue - The queue name.
|
|
237
561
|
*/
|
|
238
562
|
async clear(queue) {
|
|
239
563
|
await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE queue = $1`, [queue]);
|
|
240
564
|
}
|
|
241
565
|
/**
|
|
242
|
-
*
|
|
243
|
-
*
|
|
566
|
+
* Pops a job using a polling loop (Blocking simulation).
|
|
567
|
+
*
|
|
568
|
+
* @param queue - The queue name.
|
|
569
|
+
* @param timeout - Timeout in seconds.
|
|
244
570
|
*/
|
|
245
571
|
async popBlocking(queue, timeout) {
|
|
246
572
|
const start = Date.now();
|
|
@@ -253,12 +579,14 @@ var init_DatabaseDriver = __esm({
|
|
|
253
579
|
if (timeout > 0 && Date.now() - start >= timeoutMs) {
|
|
254
580
|
return null;
|
|
255
581
|
}
|
|
256
|
-
await new Promise((
|
|
582
|
+
await new Promise((resolve2) => setTimeout(resolve2, 1e3));
|
|
257
583
|
}
|
|
258
584
|
}
|
|
259
585
|
/**
|
|
260
|
-
*
|
|
261
|
-
*
|
|
586
|
+
* Pushes multiple jobs using a transaction.
|
|
587
|
+
*
|
|
588
|
+
* @param queue - The queue name.
|
|
589
|
+
* @param jobs - Array of jobs.
|
|
262
590
|
*/
|
|
263
591
|
async pushMany(queue, jobs) {
|
|
264
592
|
if (jobs.length === 0) {
|
|
@@ -280,7 +608,10 @@ var init_DatabaseDriver = __esm({
|
|
|
280
608
|
});
|
|
281
609
|
}
|
|
282
610
|
/**
|
|
283
|
-
*
|
|
611
|
+
* Marks a job as permanently failed by moving it to the DLQ (separate logical queue in DB).
|
|
612
|
+
*
|
|
613
|
+
* @param queue - The queue name.
|
|
614
|
+
* @param job - The failed job.
|
|
284
615
|
*/
|
|
285
616
|
async fail(queue, job) {
|
|
286
617
|
const failedQueue = `failed:${queue}`;
|
|
@@ -292,7 +623,10 @@ var init_DatabaseDriver = __esm({
|
|
|
292
623
|
);
|
|
293
624
|
}
|
|
294
625
|
/**
|
|
295
|
-
*
|
|
626
|
+
* Deletes a job row from the database (completion).
|
|
627
|
+
*
|
|
628
|
+
* @param _queue - The queue name (unused).
|
|
629
|
+
* @param job - The job to complete.
|
|
296
630
|
*/
|
|
297
631
|
async complete(_queue, job) {
|
|
298
632
|
if (!job.id) {
|
|
@@ -346,7 +680,10 @@ var init_KafkaDriver = __esm({
|
|
|
346
680
|
return this.admin;
|
|
347
681
|
}
|
|
348
682
|
/**
|
|
349
|
-
*
|
|
683
|
+
* Pushes a job to a Kafka topic.
|
|
684
|
+
*
|
|
685
|
+
* @param queue - The topic name.
|
|
686
|
+
* @param job - The job to publish.
|
|
350
687
|
*/
|
|
351
688
|
async push(queue, job) {
|
|
352
689
|
const producer = await this.ensureProducer();
|
|
@@ -371,30 +708,37 @@ var init_KafkaDriver = __esm({
|
|
|
371
708
|
});
|
|
372
709
|
}
|
|
373
710
|
/**
|
|
374
|
-
* Pop is not supported for Kafka.
|
|
711
|
+
* Pop is not supported for Kafka (Push-based).
|
|
712
|
+
*
|
|
713
|
+
* Kafka consumers typically stream messages. Use `subscribe()` instead.
|
|
375
714
|
*
|
|
376
|
-
*
|
|
715
|
+
* @throws {Error} Always throws as Kafka does not support polling individual messages in this manner.
|
|
377
716
|
*/
|
|
378
717
|
async pop(_queue) {
|
|
379
718
|
throw new Error("[KafkaDriver] Kafka uses push-based model. Use subscribe() instead of pop().");
|
|
380
719
|
}
|
|
381
720
|
/**
|
|
382
|
-
* Kafka does not
|
|
721
|
+
* Returns 0 as Kafka does not expose a simple "queue size".
|
|
383
722
|
*
|
|
384
|
-
*
|
|
723
|
+
* Monitoring lag requires external tools or Admin API checks not implemented here.
|
|
385
724
|
*/
|
|
386
725
|
async size(_queue) {
|
|
387
726
|
return 0;
|
|
388
727
|
}
|
|
389
728
|
/**
|
|
390
|
-
*
|
|
729
|
+
* Clears a queue by deleting the topic.
|
|
730
|
+
*
|
|
731
|
+
* @param queue - The topic name.
|
|
391
732
|
*/
|
|
392
733
|
async clear(queue) {
|
|
393
734
|
const admin = await this.ensureAdmin();
|
|
394
735
|
await admin.deleteTopics({ topics: [queue] });
|
|
395
736
|
}
|
|
396
737
|
/**
|
|
397
|
-
*
|
|
738
|
+
* Pushes multiple jobs to a Kafka topic.
|
|
739
|
+
*
|
|
740
|
+
* @param queue - The topic name.
|
|
741
|
+
* @param jobs - Array of jobs.
|
|
398
742
|
*/
|
|
399
743
|
async pushMany(queue, jobs) {
|
|
400
744
|
if (jobs.length === 0) {
|
|
@@ -423,7 +767,10 @@ var init_KafkaDriver = __esm({
|
|
|
423
767
|
});
|
|
424
768
|
}
|
|
425
769
|
/**
|
|
426
|
-
*
|
|
770
|
+
* Creates a new Kafka topic.
|
|
771
|
+
*
|
|
772
|
+
* @param topic - The topic name.
|
|
773
|
+
* @param options - Config for partitions/replication.
|
|
427
774
|
*/
|
|
428
775
|
async createTopic(topic, options) {
|
|
429
776
|
const admin = await this.ensureAdmin();
|
|
@@ -438,13 +785,20 @@ var init_KafkaDriver = __esm({
|
|
|
438
785
|
});
|
|
439
786
|
}
|
|
440
787
|
/**
|
|
441
|
-
*
|
|
788
|
+
* Deletes a Kafka topic.
|
|
789
|
+
*
|
|
790
|
+
* @param topic - The topic name.
|
|
442
791
|
*/
|
|
443
792
|
async deleteTopic(topic) {
|
|
444
793
|
await this.clear(topic);
|
|
445
794
|
}
|
|
446
795
|
/**
|
|
447
|
-
*
|
|
796
|
+
* Subscribes to a topic for streaming jobs.
|
|
797
|
+
*
|
|
798
|
+
* Starts a Kafka consumer group and processes messages as they arrive.
|
|
799
|
+
*
|
|
800
|
+
* @param queue - The topic name.
|
|
801
|
+
* @param callback - Function to handle the job.
|
|
448
802
|
*/
|
|
449
803
|
async subscribe(queue, callback) {
|
|
450
804
|
const consumer = this.client.consumer({ groupId: this.consumerGroupId });
|
|
@@ -530,7 +884,10 @@ var init_RabbitMQDriver = __esm({
|
|
|
530
884
|
return this.connection;
|
|
531
885
|
}
|
|
532
886
|
/**
|
|
533
|
-
*
|
|
887
|
+
* Pushes a job to a RabbitMQ queue or exchange.
|
|
888
|
+
*
|
|
889
|
+
* @param queue - The queue name.
|
|
890
|
+
* @param job - The serialized job.
|
|
534
891
|
*/
|
|
535
892
|
async push(queue, job) {
|
|
536
893
|
const channel = await this.ensureChannel();
|
|
@@ -545,7 +902,9 @@ var init_RabbitMQDriver = __esm({
|
|
|
545
902
|
}
|
|
546
903
|
}
|
|
547
904
|
/**
|
|
548
|
-
*
|
|
905
|
+
* Pops a job from the queue.
|
|
906
|
+
*
|
|
907
|
+
* @param queue - The queue name.
|
|
549
908
|
*/
|
|
550
909
|
async pop(queue) {
|
|
551
910
|
const channel = await this.ensureChannel();
|
|
@@ -559,8 +918,10 @@ var init_RabbitMQDriver = __esm({
|
|
|
559
918
|
return job;
|
|
560
919
|
}
|
|
561
920
|
/**
|
|
562
|
-
*
|
|
563
|
-
*
|
|
921
|
+
* Pops multiple jobs.
|
|
922
|
+
*
|
|
923
|
+
* @param queue - The queue name.
|
|
924
|
+
* @param count - Max jobs.
|
|
564
925
|
*/
|
|
565
926
|
async popMany(queue, count) {
|
|
566
927
|
const channel = await this.ensureChannel();
|
|
@@ -578,7 +939,9 @@ var init_RabbitMQDriver = __esm({
|
|
|
578
939
|
return results;
|
|
579
940
|
}
|
|
580
941
|
/**
|
|
581
|
-
*
|
|
942
|
+
* Acknowledges a message.
|
|
943
|
+
*
|
|
944
|
+
* @param messageId - The message object (RabbitMQ requires object reference).
|
|
582
945
|
*/
|
|
583
946
|
async acknowledge(messageId) {
|
|
584
947
|
const channel = await this.ensureChannel();
|
|
@@ -601,7 +964,7 @@ var init_RabbitMQDriver = __esm({
|
|
|
601
964
|
channel.reject(message, requeue);
|
|
602
965
|
}
|
|
603
966
|
/**
|
|
604
|
-
*
|
|
967
|
+
* Subscribes to a queue.
|
|
605
968
|
*/
|
|
606
969
|
async subscribe(queue, callback, options = {}) {
|
|
607
970
|
const channel = await this.ensureChannel();
|
|
@@ -630,7 +993,9 @@ var init_RabbitMQDriver = __esm({
|
|
|
630
993
|
);
|
|
631
994
|
}
|
|
632
995
|
/**
|
|
633
|
-
*
|
|
996
|
+
* Returns the number of messages in the queue.
|
|
997
|
+
*
|
|
998
|
+
* @param queue - The queue name.
|
|
634
999
|
*/
|
|
635
1000
|
async size(queue) {
|
|
636
1001
|
const channel = await this.ensureChannel();
|
|
@@ -638,7 +1003,9 @@ var init_RabbitMQDriver = __esm({
|
|
|
638
1003
|
return ok.messageCount;
|
|
639
1004
|
}
|
|
640
1005
|
/**
|
|
641
|
-
*
|
|
1006
|
+
* Purges the queue.
|
|
1007
|
+
*
|
|
1008
|
+
* @param queue - The queue name.
|
|
642
1009
|
*/
|
|
643
1010
|
async clear(queue) {
|
|
644
1011
|
const channel = await this.ensureChannel();
|
|
@@ -686,7 +1053,7 @@ var init_RedisDriver = __esm({
|
|
|
686
1053
|
local activeSet = KEYS[2]
|
|
687
1054
|
local pendingList = KEYS[3]
|
|
688
1055
|
local groupId = ARGV[1]
|
|
689
|
-
|
|
1056
|
+
|
|
690
1057
|
local nextJob = redis.call('LPOP', pendingList)
|
|
691
1058
|
if nextJob then
|
|
692
1059
|
return redis.call('LPUSH', waitList, nextJob)
|
|
@@ -784,7 +1151,13 @@ var init_RedisDriver = __esm({
|
|
|
784
1151
|
return `${this.prefix}${queue}`;
|
|
785
1152
|
}
|
|
786
1153
|
/**
|
|
787
|
-
*
|
|
1154
|
+
* Pushes a job to Redis.
|
|
1155
|
+
*
|
|
1156
|
+
* Handles regular jobs (LPUSH), delayed jobs (ZADD), and grouped jobs (custom Lua logic).
|
|
1157
|
+
*
|
|
1158
|
+
* @param queue - The queue name.
|
|
1159
|
+
* @param job - The serialized job.
|
|
1160
|
+
* @param options - Push options.
|
|
788
1161
|
*/
|
|
789
1162
|
async push(queue, job, options) {
|
|
790
1163
|
const key = this.getKey(queue, options?.priority);
|
|
@@ -805,6 +1178,9 @@ var init_RedisDriver = __esm({
|
|
|
805
1178
|
failedAt: job.failedAt
|
|
806
1179
|
};
|
|
807
1180
|
const payload = JSON.stringify(payloadObj);
|
|
1181
|
+
if (typeof this.client.sadd === "function") {
|
|
1182
|
+
await this.client.sadd(`${this.prefix}queues`, queue);
|
|
1183
|
+
}
|
|
808
1184
|
if (groupId && typeof this.client.pushGroupJob === "function") {
|
|
809
1185
|
const activeSetKey = `${this.prefix}active`;
|
|
810
1186
|
const pendingListKey = `${this.prefix}pending:${groupId}`;
|
|
@@ -824,7 +1200,12 @@ var init_RedisDriver = __esm({
|
|
|
824
1200
|
}
|
|
825
1201
|
}
|
|
826
1202
|
/**
|
|
827
|
-
*
|
|
1203
|
+
* Completes a job.
|
|
1204
|
+
*
|
|
1205
|
+
* Crucial for Group FIFO logic to unlock the next job in the group.
|
|
1206
|
+
*
|
|
1207
|
+
* @param queue - The queue name.
|
|
1208
|
+
* @param job - The job to complete.
|
|
828
1209
|
*/
|
|
829
1210
|
async complete(queue, job) {
|
|
830
1211
|
if (!job.groupId) {
|
|
@@ -838,8 +1219,13 @@ var init_RedisDriver = __esm({
|
|
|
838
1219
|
}
|
|
839
1220
|
}
|
|
840
1221
|
/**
|
|
841
|
-
*
|
|
842
|
-
*
|
|
1222
|
+
* Pops a job from the queue.
|
|
1223
|
+
*
|
|
1224
|
+
* Checks priorities in order (critical -> high -> default -> low).
|
|
1225
|
+
* Also checks for due delayed jobs and moves them to the active list.
|
|
1226
|
+
*
|
|
1227
|
+
* @param queue - The queue name.
|
|
1228
|
+
* @returns The job or `null`.
|
|
843
1229
|
*/
|
|
844
1230
|
async pop(queue) {
|
|
845
1231
|
const priorities = ["critical", "high", "default", "low"];
|
|
@@ -911,8 +1297,12 @@ var init_RedisDriver = __esm({
|
|
|
911
1297
|
return null;
|
|
912
1298
|
}
|
|
913
1299
|
/**
|
|
914
|
-
*
|
|
915
|
-
*
|
|
1300
|
+
* Pops a job using blocking Redis commands (BRPOP).
|
|
1301
|
+
*
|
|
1302
|
+
* Efficiently waits for a job to arrive without polling.
|
|
1303
|
+
*
|
|
1304
|
+
* @param queues - The queues to listen to.
|
|
1305
|
+
* @param timeout - Timeout in seconds.
|
|
916
1306
|
*/
|
|
917
1307
|
async popBlocking(queues, timeout) {
|
|
918
1308
|
const queueList = Array.isArray(queues) ? queues : [queues];
|
|
@@ -956,14 +1346,19 @@ var init_RedisDriver = __esm({
|
|
|
956
1346
|
};
|
|
957
1347
|
}
|
|
958
1348
|
/**
|
|
959
|
-
*
|
|
1349
|
+
* Returns the length of the queue (Redis List length).
|
|
1350
|
+
*
|
|
1351
|
+
* @param queue - The queue name.
|
|
960
1352
|
*/
|
|
961
1353
|
async size(queue) {
|
|
962
1354
|
const key = this.getKey(queue);
|
|
963
1355
|
return this.client.llen(key);
|
|
964
1356
|
}
|
|
965
1357
|
/**
|
|
966
|
-
*
|
|
1358
|
+
* Marks a job as permanently failed by moving it to a DLQ list.
|
|
1359
|
+
*
|
|
1360
|
+
* @param queue - The queue name.
|
|
1361
|
+
* @param job - The failed job.
|
|
967
1362
|
*/
|
|
968
1363
|
async fail(queue, job) {
|
|
969
1364
|
const key = `${this.getKey(queue)}:failed`;
|
|
@@ -977,7 +1372,9 @@ var init_RedisDriver = __esm({
|
|
|
977
1372
|
}
|
|
978
1373
|
}
|
|
979
1374
|
/**
|
|
980
|
-
*
|
|
1375
|
+
* Clears the queue and its associated delayed/active sets.
|
|
1376
|
+
*
|
|
1377
|
+
* @param queue - The queue name.
|
|
981
1378
|
*/
|
|
982
1379
|
async clear(queue) {
|
|
983
1380
|
const key = this.getKey(queue);
|
|
@@ -990,8 +1387,11 @@ var init_RedisDriver = __esm({
|
|
|
990
1387
|
}
|
|
991
1388
|
}
|
|
992
1389
|
/**
|
|
993
|
-
*
|
|
994
|
-
*
|
|
1390
|
+
* Retrieves full stats for the queue using Redis Pipelining.
|
|
1391
|
+
*
|
|
1392
|
+
* Aggregates counts from all priority lists and the DLQ.
|
|
1393
|
+
*
|
|
1394
|
+
* @param queue - The queue name.
|
|
995
1395
|
*/
|
|
996
1396
|
async stats(queue) {
|
|
997
1397
|
const priorities = ["critical", "high", "default", "low"];
|
|
@@ -1036,7 +1436,12 @@ var init_RedisDriver = __esm({
|
|
|
1036
1436
|
return stats;
|
|
1037
1437
|
}
|
|
1038
1438
|
/**
|
|
1039
|
-
*
|
|
1439
|
+
* Pushes multiple jobs to the queue.
|
|
1440
|
+
*
|
|
1441
|
+
* Uses pipeline for batch efficiency. Falls back to individual pushes if complex logic (groups/priority) is involved.
|
|
1442
|
+
*
|
|
1443
|
+
* @param queue - The queue name.
|
|
1444
|
+
* @param jobs - Array of jobs.
|
|
1040
1445
|
*/
|
|
1041
1446
|
async pushMany(queue, jobs) {
|
|
1042
1447
|
if (jobs.length === 0) {
|
|
@@ -1108,8 +1513,12 @@ var init_RedisDriver = __esm({
|
|
|
1108
1513
|
await this.client.lpush(key, ...payloads);
|
|
1109
1514
|
}
|
|
1110
1515
|
/**
|
|
1111
|
-
*
|
|
1112
|
-
*
|
|
1516
|
+
* Pops multiple jobs from the queue.
|
|
1517
|
+
*
|
|
1518
|
+
* Uses a Lua script for atomic retrieval across priorities.
|
|
1519
|
+
*
|
|
1520
|
+
* @param queue - The queue name.
|
|
1521
|
+
* @param count - Max jobs to pop.
|
|
1113
1522
|
*/
|
|
1114
1523
|
async popMany(queue, count) {
|
|
1115
1524
|
if (count <= 0) {
|
|
@@ -1187,7 +1596,9 @@ var init_RedisDriver = __esm({
|
|
|
1187
1596
|
return results;
|
|
1188
1597
|
}
|
|
1189
1598
|
/**
|
|
1190
|
-
*
|
|
1599
|
+
* Reports a worker heartbeat.
|
|
1600
|
+
*
|
|
1601
|
+
* Stores worker metadata in a key with an expiration (TTL).
|
|
1191
1602
|
*/
|
|
1192
1603
|
async reportHeartbeat(workerInfo, prefix) {
|
|
1193
1604
|
const key = `${prefix ?? this.prefix}worker:${workerInfo.id}`;
|
|
@@ -1196,7 +1607,9 @@ var init_RedisDriver = __esm({
|
|
|
1196
1607
|
}
|
|
1197
1608
|
}
|
|
1198
1609
|
/**
|
|
1199
|
-
*
|
|
1610
|
+
* Publishes monitoring logs.
|
|
1611
|
+
*
|
|
1612
|
+
* Uses Redis Pub/Sub for real-time logs and a capped List for history.
|
|
1200
1613
|
*/
|
|
1201
1614
|
async publishLog(logPayload, prefix) {
|
|
1202
1615
|
const payload = JSON.stringify(logPayload);
|
|
@@ -1215,8 +1628,12 @@ var init_RedisDriver = __esm({
|
|
|
1215
1628
|
}
|
|
1216
1629
|
}
|
|
1217
1630
|
/**
|
|
1218
|
-
*
|
|
1219
|
-
*
|
|
1631
|
+
* Checks the rate limit for a queue.
|
|
1632
|
+
*
|
|
1633
|
+
* Uses a simple Fixed Window counter (INCR + EXPIRE).
|
|
1634
|
+
*
|
|
1635
|
+
* @param queue - The queue name.
|
|
1636
|
+
* @param config - Rate limit rules.
|
|
1220
1637
|
*/
|
|
1221
1638
|
async checkRateLimit(queue, config) {
|
|
1222
1639
|
const key = `${this.prefix}${queue}:ratelimit`;
|
|
@@ -1234,7 +1651,11 @@ var init_RedisDriver = __esm({
|
|
|
1234
1651
|
return true;
|
|
1235
1652
|
}
|
|
1236
1653
|
/**
|
|
1237
|
-
*
|
|
1654
|
+
* Retrieves failed jobs from the DLQ.
|
|
1655
|
+
*
|
|
1656
|
+
* @param queue - The queue name.
|
|
1657
|
+
* @param start - Start index.
|
|
1658
|
+
* @param end - End index.
|
|
1238
1659
|
*/
|
|
1239
1660
|
async getFailed(queue, start = 0, end = -1) {
|
|
1240
1661
|
const key = `${this.getKey(queue)}:failed`;
|
|
@@ -1245,8 +1666,12 @@ var init_RedisDriver = __esm({
|
|
|
1245
1666
|
return payloads.map((p) => this.parsePayload(p));
|
|
1246
1667
|
}
|
|
1247
1668
|
/**
|
|
1248
|
-
*
|
|
1249
|
-
*
|
|
1669
|
+
* Retries failed jobs.
|
|
1670
|
+
*
|
|
1671
|
+
* Pops from DLQ and pushes back to the active queue (RPOPLPUSH equivalent logic).
|
|
1672
|
+
*
|
|
1673
|
+
* @param queue - The queue name.
|
|
1674
|
+
* @param count - Jobs to retry.
|
|
1250
1675
|
*/
|
|
1251
1676
|
async retryFailed(queue, count = 1) {
|
|
1252
1677
|
const failedKey = `${this.getKey(queue)}:failed`;
|
|
@@ -1263,18 +1688,31 @@ var init_RedisDriver = __esm({
|
|
|
1263
1688
|
job.attempts = 0;
|
|
1264
1689
|
delete job.error;
|
|
1265
1690
|
delete job.failedAt;
|
|
1691
|
+
delete job.priority;
|
|
1266
1692
|
await this.push(queue, job, { priority: job.priority, groupId: job.groupId });
|
|
1267
1693
|
retried++;
|
|
1268
1694
|
}
|
|
1269
1695
|
return retried;
|
|
1270
1696
|
}
|
|
1271
1697
|
/**
|
|
1272
|
-
*
|
|
1698
|
+
* Clears the Dead Letter Queue.
|
|
1699
|
+
*
|
|
1700
|
+
* @param queue - The queue name.
|
|
1273
1701
|
*/
|
|
1274
1702
|
async clearFailed(queue) {
|
|
1275
1703
|
const key = `${this.getKey(queue)}:failed`;
|
|
1276
1704
|
await this.client.del(key);
|
|
1277
1705
|
}
|
|
1706
|
+
/**
|
|
1707
|
+
* Retrieves all discovered queue names from Redis.
|
|
1708
|
+
*/
|
|
1709
|
+
async getQueues() {
|
|
1710
|
+
if (typeof this.client.smembers === "function") {
|
|
1711
|
+
const queues = await this.client.smembers(`${this.prefix}queues`);
|
|
1712
|
+
return Array.isArray(queues) ? queues.sort() : [];
|
|
1713
|
+
}
|
|
1714
|
+
return ["default"];
|
|
1715
|
+
}
|
|
1278
1716
|
};
|
|
1279
1717
|
}
|
|
1280
1718
|
});
|
|
@@ -1321,7 +1759,10 @@ var init_SQSDriver = __esm({
|
|
|
1321
1759
|
return queue;
|
|
1322
1760
|
}
|
|
1323
1761
|
/**
|
|
1324
|
-
*
|
|
1762
|
+
* Pushes a job to SQS.
|
|
1763
|
+
*
|
|
1764
|
+
* @param queue - The queue name (or URL).
|
|
1765
|
+
* @param job - The serialized job.
|
|
1325
1766
|
*/
|
|
1326
1767
|
async push(queue, job) {
|
|
1327
1768
|
const { SendMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
@@ -1346,7 +1787,9 @@ var init_SQSDriver = __esm({
|
|
|
1346
1787
|
);
|
|
1347
1788
|
}
|
|
1348
1789
|
/**
|
|
1349
|
-
*
|
|
1790
|
+
* Pops a job from SQS (using long polling).
|
|
1791
|
+
*
|
|
1792
|
+
* @param queue - The queue name (or URL).
|
|
1350
1793
|
*/
|
|
1351
1794
|
async pop(queue) {
|
|
1352
1795
|
const { ReceiveMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
@@ -1378,8 +1821,10 @@ var init_SQSDriver = __esm({
|
|
|
1378
1821
|
};
|
|
1379
1822
|
}
|
|
1380
1823
|
/**
|
|
1381
|
-
*
|
|
1382
|
-
*
|
|
1824
|
+
* Pops multiple jobs (up to 10).
|
|
1825
|
+
*
|
|
1826
|
+
* @param queue - The queue name.
|
|
1827
|
+
* @param count - Max jobs (capped at 10 by SQS).
|
|
1383
1828
|
*/
|
|
1384
1829
|
async popMany(queue, count) {
|
|
1385
1830
|
const { ReceiveMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
@@ -1412,7 +1857,9 @@ var init_SQSDriver = __esm({
|
|
|
1412
1857
|
});
|
|
1413
1858
|
}
|
|
1414
1859
|
/**
|
|
1415
|
-
*
|
|
1860
|
+
* Returns the approximate number of messages in the queue.
|
|
1861
|
+
*
|
|
1862
|
+
* @param queue - The queue name.
|
|
1416
1863
|
*/
|
|
1417
1864
|
async size(queue) {
|
|
1418
1865
|
const { GetQueueAttributesCommand } = await import("@aws-sdk/client-sqs");
|
|
@@ -1431,10 +1878,12 @@ var init_SQSDriver = __esm({
|
|
|
1431
1878
|
}
|
|
1432
1879
|
}
|
|
1433
1880
|
/**
|
|
1434
|
-
*
|
|
1881
|
+
* Clears the queue by continuously receiving and deleting messages.
|
|
1435
1882
|
*
|
|
1436
|
-
*
|
|
1437
|
-
*
|
|
1883
|
+
* SQS does not have a "purge" command in the client data plane easily accessible here,
|
|
1884
|
+
* so we drain the queue.
|
|
1885
|
+
*
|
|
1886
|
+
* @param queue - The queue name.
|
|
1438
1887
|
*/
|
|
1439
1888
|
async clear(queue) {
|
|
1440
1889
|
const { DeleteMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
@@ -1455,7 +1904,10 @@ var init_SQSDriver = __esm({
|
|
|
1455
1904
|
}
|
|
1456
1905
|
}
|
|
1457
1906
|
/**
|
|
1458
|
-
*
|
|
1907
|
+
* Pushes multiple jobs using SQS batch API.
|
|
1908
|
+
*
|
|
1909
|
+
* @param queue - The queue name.
|
|
1910
|
+
* @param jobs - Array of jobs.
|
|
1459
1911
|
*/
|
|
1460
1912
|
async pushMany(queue, jobs) {
|
|
1461
1913
|
if (jobs.length === 0) {
|
|
@@ -1492,13 +1944,16 @@ var init_SQSDriver = __esm({
|
|
|
1492
1944
|
}
|
|
1493
1945
|
}
|
|
1494
1946
|
/**
|
|
1495
|
-
*
|
|
1947
|
+
* Throws error as SQS requires ReceiptHandle, not just MessageId.
|
|
1496
1948
|
*/
|
|
1497
1949
|
async acknowledge(_messageId) {
|
|
1498
1950
|
throw new Error("[SQSDriver] Use deleteMessage() with ReceiptHandle instead of acknowledge().");
|
|
1499
1951
|
}
|
|
1500
1952
|
/**
|
|
1501
|
-
*
|
|
1953
|
+
* Deletes a message using its ReceiptHandle (ACK).
|
|
1954
|
+
*
|
|
1955
|
+
* @param queue - The queue name.
|
|
1956
|
+
* @param receiptHandle - The SQS receipt handle.
|
|
1502
1957
|
*/
|
|
1503
1958
|
async deleteMessage(queue, receiptHandle) {
|
|
1504
1959
|
const { DeleteMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
@@ -1514,42 +1969,254 @@ var init_SQSDriver = __esm({
|
|
|
1514
1969
|
}
|
|
1515
1970
|
});
|
|
1516
1971
|
|
|
1517
|
-
// src/
|
|
1518
|
-
var
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
});
|
|
1522
|
-
var BufferedPersistence;
|
|
1523
|
-
var init_BufferedPersistence = __esm({
|
|
1524
|
-
"src/persistence/BufferedPersistence.ts"() {
|
|
1972
|
+
// src/locks/DistributedLock.ts
|
|
1973
|
+
var DistributedLock;
|
|
1974
|
+
var init_DistributedLock = __esm({
|
|
1975
|
+
"src/locks/DistributedLock.ts"() {
|
|
1525
1976
|
"use strict";
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
flushTimer = null;
|
|
1535
|
-
maxBufferSize;
|
|
1536
|
-
flushInterval;
|
|
1537
|
-
async archive(queue, job, status) {
|
|
1538
|
-
this.jobBuffer.push({ queue, job, status });
|
|
1539
|
-
if (this.jobBuffer.length >= this.maxBufferSize) {
|
|
1540
|
-
this.flush().catch((err) => {
|
|
1541
|
-
console.error("[BufferedPersistence] Auto-flush failed (jobs):", err.message || err);
|
|
1542
|
-
});
|
|
1543
|
-
} else {
|
|
1544
|
-
this.ensureFlushTimer();
|
|
1545
|
-
}
|
|
1546
|
-
}
|
|
1547
|
-
async find(queue, id) {
|
|
1548
|
-
return this.adapter.find(queue, id);
|
|
1977
|
+
DistributedLock = class {
|
|
1978
|
+
/**
|
|
1979
|
+
* Creates a DistributedLock instance.
|
|
1980
|
+
*
|
|
1981
|
+
* @param client - Redis client instance. Must support SET, DEL, and EVAL commands.
|
|
1982
|
+
*/
|
|
1983
|
+
constructor(client) {
|
|
1984
|
+
this.client = client;
|
|
1549
1985
|
}
|
|
1550
|
-
|
|
1986
|
+
/**
|
|
1987
|
+
* Unique identifier for this lock instance.
|
|
1988
|
+
* Used to ensure only the owner can release the lock.
|
|
1989
|
+
*/
|
|
1990
|
+
lockId = crypto.randomUUID();
|
|
1991
|
+
/**
|
|
1992
|
+
* Timer for automatic renewal.
|
|
1993
|
+
*/
|
|
1994
|
+
refreshTimer = null;
|
|
1995
|
+
/**
|
|
1996
|
+
* The key of the currently held lock.
|
|
1997
|
+
*/
|
|
1998
|
+
currentLockKey = null;
|
|
1999
|
+
/**
|
|
2000
|
+
* Attempts to acquire a distributed lock for the specified key.
|
|
2001
|
+
*
|
|
2002
|
+
* Uses Redis `SET key value EX ttl NX` for atomic acquisition.
|
|
2003
|
+
* If the lock is held by another node, it retries according to `retryCount`.
|
|
2004
|
+
* Upon success, if `refreshInterval` is set, automatic renewal starts.
|
|
2005
|
+
*
|
|
2006
|
+
* @param key - The lock key. Use a meaningful resource identifier.
|
|
2007
|
+
* @param options - Configuration options for the lock.
|
|
2008
|
+
* @returns `true` if the lock was acquired, `false` otherwise.
|
|
2009
|
+
*
|
|
2010
|
+
* @throws {Error} If the Redis client does not support the SET command.
|
|
2011
|
+
*
|
|
2012
|
+
* @example
|
|
2013
|
+
* ```typescript
|
|
2014
|
+
* const acquired = await lock.acquire('schedule:job-123', {
|
|
2015
|
+
* ttl: 30000,
|
|
2016
|
+
* retryCount: 5,
|
|
2017
|
+
* retryDelay: 200
|
|
2018
|
+
* });
|
|
2019
|
+
*
|
|
2020
|
+
* if (!acquired) {
|
|
2021
|
+
* console.log('Resource is currently locked by another node');
|
|
2022
|
+
* }
|
|
2023
|
+
* ```
|
|
2024
|
+
*/
|
|
2025
|
+
async acquire(key, options) {
|
|
2026
|
+
if (typeof this.client.set !== "function") {
|
|
2027
|
+
throw new Error("[DistributedLock] Redis client does not support SET command");
|
|
2028
|
+
}
|
|
2029
|
+
const ttlSeconds = Math.ceil(options.ttl / 1e3);
|
|
2030
|
+
let attempts = 0;
|
|
2031
|
+
while (attempts <= options.retryCount) {
|
|
2032
|
+
try {
|
|
2033
|
+
const result = await this.client.set(key, this.lockId, "EX", ttlSeconds, "NX");
|
|
2034
|
+
if (result === "OK") {
|
|
2035
|
+
this.currentLockKey = key;
|
|
2036
|
+
if (options.refreshInterval) {
|
|
2037
|
+
this.startRefresh(key, options);
|
|
2038
|
+
}
|
|
2039
|
+
return true;
|
|
2040
|
+
}
|
|
2041
|
+
} catch (error) {
|
|
2042
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
2043
|
+
console.error(`[DistributedLock] Failed to acquire lock for ${key}:`, err.message);
|
|
2044
|
+
}
|
|
2045
|
+
attempts++;
|
|
2046
|
+
if (attempts <= options.retryCount) {
|
|
2047
|
+
await this.sleep(options.retryDelay);
|
|
2048
|
+
}
|
|
2049
|
+
}
|
|
2050
|
+
return false;
|
|
2051
|
+
}
|
|
2052
|
+
/**
|
|
2053
|
+
* Releases the lock for the specified key.
|
|
2054
|
+
*
|
|
2055
|
+
* Uses a Lua script to ensure atomicity: the lock is deleted ONLY if the value matches
|
|
2056
|
+
* this instance's `lockId`. This prevents deleting locks held by others.
|
|
2057
|
+
* Stops the auto-renewal timer upon success.
|
|
2058
|
+
*
|
|
2059
|
+
* @param key - The lock key to release.
|
|
2060
|
+
*
|
|
2061
|
+
* @throws {Error} If the Redis client does not support the EVAL command.
|
|
2062
|
+
*
|
|
2063
|
+
* @example
|
|
2064
|
+
* ```typescript
|
|
2065
|
+
* await lock.release('schedule:job-123');
|
|
2066
|
+
* ```
|
|
2067
|
+
*/
|
|
2068
|
+
async release(key) {
|
|
2069
|
+
this.stopRefresh();
|
|
2070
|
+
if (typeof this.client.eval !== "function") {
|
|
2071
|
+
throw new Error("[DistributedLock] Redis client does not support EVAL command");
|
|
2072
|
+
}
|
|
2073
|
+
try {
|
|
2074
|
+
const script = `
|
|
2075
|
+
if redis.call("get", KEYS[1]) == ARGV[1] then
|
|
2076
|
+
return redis.call("del", KEYS[1])
|
|
2077
|
+
else
|
|
2078
|
+
return 0
|
|
2079
|
+
end
|
|
2080
|
+
`;
|
|
2081
|
+
await this.client.eval(script, 1, key, this.lockId);
|
|
2082
|
+
this.currentLockKey = null;
|
|
2083
|
+
} catch (error) {
|
|
2084
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
2085
|
+
console.error(`[DistributedLock] Failed to release lock for ${key}:`, err.message);
|
|
2086
|
+
}
|
|
2087
|
+
}
|
|
2088
|
+
/**
|
|
2089
|
+
* Starts the automatic renewal mechanism.
|
|
2090
|
+
*
|
|
2091
|
+
* Periodically extends the lock's TTL to prevent expiration during long-running tasks.
|
|
2092
|
+
* Uses a Lua script to ensure only owned locks are renewed.
|
|
2093
|
+
*
|
|
2094
|
+
* @param key - The lock key.
|
|
2095
|
+
* @param options - Lock options containing `refreshInterval`.
|
|
2096
|
+
*/
|
|
2097
|
+
startRefresh(key, options) {
|
|
2098
|
+
if (!options.refreshInterval) {
|
|
2099
|
+
return;
|
|
2100
|
+
}
|
|
2101
|
+
this.stopRefresh();
|
|
2102
|
+
const ttlSeconds = Math.ceil(options.ttl / 1e3);
|
|
2103
|
+
this.refreshTimer = setInterval(async () => {
|
|
2104
|
+
try {
|
|
2105
|
+
if (typeof this.client.eval !== "function") {
|
|
2106
|
+
console.error("[DistributedLock] Redis client does not support EVAL command for refresh");
|
|
2107
|
+
return;
|
|
2108
|
+
}
|
|
2109
|
+
const script = `
|
|
2110
|
+
if redis.call("get", KEYS[1]) == ARGV[1] then
|
|
2111
|
+
return redis.call("expire", KEYS[1], ARGV[2])
|
|
2112
|
+
else
|
|
2113
|
+
return 0
|
|
2114
|
+
end
|
|
2115
|
+
`;
|
|
2116
|
+
const result = await this.client.eval(script, 1, key, this.lockId, ttlSeconds);
|
|
2117
|
+
if (result === 0) {
|
|
2118
|
+
console.warn(
|
|
2119
|
+
`[DistributedLock] Lock ${key} no longer held by this instance, stopping refresh`
|
|
2120
|
+
);
|
|
2121
|
+
this.stopRefresh();
|
|
2122
|
+
}
|
|
2123
|
+
} catch (error) {
|
|
2124
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
2125
|
+
console.error(`[DistributedLock] Failed to refresh lock ${key}:`, err.message);
|
|
2126
|
+
}
|
|
2127
|
+
}, options.refreshInterval);
|
|
2128
|
+
}
|
|
2129
|
+
/**
|
|
2130
|
+
* Stops the automatic renewal timer.
|
|
2131
|
+
*/
|
|
2132
|
+
stopRefresh() {
|
|
2133
|
+
if (this.refreshTimer) {
|
|
2134
|
+
clearInterval(this.refreshTimer);
|
|
2135
|
+
this.refreshTimer = null;
|
|
2136
|
+
}
|
|
2137
|
+
}
|
|
2138
|
+
/**
|
|
2139
|
+
* Helper for delay.
|
|
2140
|
+
*
|
|
2141
|
+
* @param ms - Milliseconds to sleep.
|
|
2142
|
+
*/
|
|
2143
|
+
sleep(ms) {
|
|
2144
|
+
return new Promise((resolve2) => setTimeout(resolve2, ms));
|
|
2145
|
+
}
|
|
2146
|
+
/**
|
|
2147
|
+
* Checks if the specified lock is currently held by this instance.
|
|
2148
|
+
*
|
|
2149
|
+
* @param key - The lock key.
|
|
2150
|
+
* @returns `true` if held, `false` otherwise.
|
|
2151
|
+
*
|
|
2152
|
+
* @example
|
|
2153
|
+
* ```typescript
|
|
2154
|
+
* if (lock.isHeld('schedule:job-123')) {
|
|
2155
|
+
* console.log('Lock is active');
|
|
2156
|
+
* }
|
|
2157
|
+
* ```
|
|
2158
|
+
*/
|
|
2159
|
+
isHeld(key) {
|
|
2160
|
+
return this.currentLockKey === key;
|
|
2161
|
+
}
|
|
2162
|
+
};
|
|
2163
|
+
}
|
|
2164
|
+
});
|
|
2165
|
+
|
|
2166
|
+
// src/persistence/BufferedPersistence.ts
|
|
2167
|
+
var BufferedPersistence_exports = {};
|
|
2168
|
+
__export(BufferedPersistence_exports, {
|
|
2169
|
+
BufferedPersistence: () => BufferedPersistence
|
|
2170
|
+
});
|
|
2171
|
+
var BufferedPersistence;
|
|
2172
|
+
var init_BufferedPersistence = __esm({
|
|
2173
|
+
"src/persistence/BufferedPersistence.ts"() {
|
|
2174
|
+
"use strict";
|
|
2175
|
+
BufferedPersistence = class {
|
|
2176
|
+
constructor(adapter, options = {}) {
|
|
2177
|
+
this.adapter = adapter;
|
|
2178
|
+
this.maxBufferSize = options.maxBufferSize ?? 50;
|
|
2179
|
+
this.flushInterval = options.flushInterval ?? 5e3;
|
|
2180
|
+
}
|
|
2181
|
+
jobBuffer = [];
|
|
2182
|
+
logBuffer = [];
|
|
2183
|
+
flushTimer = null;
|
|
2184
|
+
maxBufferSize;
|
|
2185
|
+
flushInterval;
|
|
2186
|
+
/**
|
|
2187
|
+
* Buffers a job archive request.
|
|
2188
|
+
*
|
|
2189
|
+
* @param queue - The queue name.
|
|
2190
|
+
* @param job - The serialized job.
|
|
2191
|
+
* @param status - The final job status.
|
|
2192
|
+
*/
|
|
2193
|
+
async archive(queue, job, status) {
|
|
2194
|
+
this.jobBuffer.push({ queue, job, status });
|
|
2195
|
+
if (this.jobBuffer.length >= this.maxBufferSize) {
|
|
2196
|
+
this.flush().catch((err) => {
|
|
2197
|
+
console.error("[BufferedPersistence] Auto-flush failed (jobs):", err.message || err);
|
|
2198
|
+
});
|
|
2199
|
+
} else {
|
|
2200
|
+
this.ensureFlushTimer();
|
|
2201
|
+
}
|
|
2202
|
+
}
|
|
2203
|
+
/**
|
|
2204
|
+
* Delegates find to the underlying adapter (no buffering for reads).
|
|
2205
|
+
*/
|
|
2206
|
+
async find(queue, id) {
|
|
2207
|
+
return this.adapter.find(queue, id);
|
|
2208
|
+
}
|
|
2209
|
+
/**
|
|
2210
|
+
* Delegates list to the underlying adapter (no buffering for reads).
|
|
2211
|
+
*/
|
|
2212
|
+
async list(queue, options) {
|
|
1551
2213
|
return this.adapter.list(queue, options);
|
|
1552
2214
|
}
|
|
2215
|
+
/**
|
|
2216
|
+
* Archives multiple jobs directly (bypassing buffer, or flushing first).
|
|
2217
|
+
*
|
|
2218
|
+
* Actually, for consistency, this might just pass through.
|
|
2219
|
+
*/
|
|
1553
2220
|
async archiveMany(jobs) {
|
|
1554
2221
|
if (this.adapter.archiveMany) {
|
|
1555
2222
|
return this.adapter.archiveMany(jobs);
|
|
@@ -1558,9 +2225,17 @@ var init_BufferedPersistence = __esm({
|
|
|
1558
2225
|
await this.adapter.archive(item.queue, item.job, item.status);
|
|
1559
2226
|
}
|
|
1560
2227
|
}
|
|
2228
|
+
/**
|
|
2229
|
+
* Delegates cleanup to the underlying adapter.
|
|
2230
|
+
*/
|
|
1561
2231
|
async cleanup(days) {
|
|
1562
2232
|
return this.adapter.cleanup(days);
|
|
1563
2233
|
}
|
|
2234
|
+
/**
|
|
2235
|
+
* Flushes all buffered data to the underlying adapter.
|
|
2236
|
+
*
|
|
2237
|
+
* Uses `archiveMany` and `archiveLogMany` if supported by the adapter for batch efficiency.
|
|
2238
|
+
*/
|
|
1564
2239
|
async flush() {
|
|
1565
2240
|
if (this.flushTimer) {
|
|
1566
2241
|
clearTimeout(this.flushTimer);
|
|
@@ -1599,9 +2274,15 @@ var init_BufferedPersistence = __esm({
|
|
|
1599
2274
|
}
|
|
1600
2275
|
await Promise.all(promises);
|
|
1601
2276
|
}
|
|
2277
|
+
/**
|
|
2278
|
+
* Delegates count to the underlying adapter.
|
|
2279
|
+
*/
|
|
1602
2280
|
async count(queue, options) {
|
|
1603
2281
|
return this.adapter.count(queue, options);
|
|
1604
2282
|
}
|
|
2283
|
+
/**
|
|
2284
|
+
* Buffers a log message.
|
|
2285
|
+
*/
|
|
1605
2286
|
async archiveLog(log) {
|
|
1606
2287
|
this.logBuffer.push(log);
|
|
1607
2288
|
if (this.logBuffer.length >= this.maxBufferSize) {
|
|
@@ -1612,6 +2293,9 @@ var init_BufferedPersistence = __esm({
|
|
|
1612
2293
|
this.ensureFlushTimer();
|
|
1613
2294
|
}
|
|
1614
2295
|
}
|
|
2296
|
+
/**
|
|
2297
|
+
* Archives multiple logs directly.
|
|
2298
|
+
*/
|
|
1615
2299
|
async archiveLogMany(logs) {
|
|
1616
2300
|
if (this.adapter.archiveLogMany) {
|
|
1617
2301
|
return this.adapter.archiveLogMany(logs);
|
|
@@ -1620,12 +2304,21 @@ var init_BufferedPersistence = __esm({
|
|
|
1620
2304
|
await this.adapter.archiveLog(log);
|
|
1621
2305
|
}
|
|
1622
2306
|
}
|
|
2307
|
+
/**
|
|
2308
|
+
* Delegates listLogs to the underlying adapter.
|
|
2309
|
+
*/
|
|
1623
2310
|
async listLogs(options) {
|
|
1624
2311
|
return this.adapter.listLogs(options);
|
|
1625
2312
|
}
|
|
2313
|
+
/**
|
|
2314
|
+
* Delegates countLogs to the underlying adapter.
|
|
2315
|
+
*/
|
|
1626
2316
|
async countLogs(options) {
|
|
1627
2317
|
return this.adapter.countLogs(options);
|
|
1628
2318
|
}
|
|
2319
|
+
/**
|
|
2320
|
+
* Ensures the auto-flush timer is running.
|
|
2321
|
+
*/
|
|
1629
2322
|
ensureFlushTimer() {
|
|
1630
2323
|
if (this.flushTimer) {
|
|
1631
2324
|
return;
|
|
@@ -3289,6 +3982,9 @@ var init_MessagePackSerializer = __esm({
|
|
|
3289
3982
|
);
|
|
3290
3983
|
}
|
|
3291
3984
|
}
|
|
3985
|
+
/**
|
|
3986
|
+
* Serialize a job using MessagePack.
|
|
3987
|
+
*/
|
|
3292
3988
|
serialize(job) {
|
|
3293
3989
|
const id = job.id || `${Date.now()}-${crypto.randomUUID()}`;
|
|
3294
3990
|
const properties = {};
|
|
@@ -3311,6 +4007,9 @@ var init_MessagePackSerializer = __esm({
|
|
|
3311
4007
|
...job.priority ? { priority: job.priority } : {}
|
|
3312
4008
|
};
|
|
3313
4009
|
}
|
|
4010
|
+
/**
|
|
4011
|
+
* Deserialize a MessagePack job.
|
|
4012
|
+
*/
|
|
3314
4013
|
deserialize(serialized) {
|
|
3315
4014
|
if (serialized.type !== "msgpack") {
|
|
3316
4015
|
throw new Error('Invalid serialization type: expected "msgpack"');
|
|
@@ -3348,12 +4047,29 @@ var Scheduler;
|
|
|
3348
4047
|
var init_Scheduler = __esm({
|
|
3349
4048
|
"src/Scheduler.ts"() {
|
|
3350
4049
|
"use strict";
|
|
4050
|
+
init_DistributedLock();
|
|
3351
4051
|
Scheduler = class {
|
|
3352
4052
|
constructor(manager, options = {}) {
|
|
3353
4053
|
this.manager = manager;
|
|
3354
4054
|
this.prefix = options.prefix ?? "queue:";
|
|
4055
|
+
this.lockTtl = options.lockTtl ?? 6e4;
|
|
4056
|
+
this.lockRefreshInterval = options.lockRefreshInterval ?? 2e4;
|
|
4057
|
+
this.lockRetryCount = options.lockRetryCount ?? 0;
|
|
4058
|
+
this.lockRetryDelay = options.lockRetryDelay ?? 100;
|
|
4059
|
+
this.tickInterval = options.tickInterval ?? 6e4;
|
|
4060
|
+
this.leaderTtl = options.leaderTtl ?? 3e4;
|
|
3355
4061
|
}
|
|
3356
4062
|
prefix;
|
|
4063
|
+
lockTtl;
|
|
4064
|
+
lockRefreshInterval;
|
|
4065
|
+
lockRetryCount;
|
|
4066
|
+
lockRetryDelay;
|
|
4067
|
+
tickInterval;
|
|
4068
|
+
leaderTtl;
|
|
4069
|
+
distributedLock;
|
|
4070
|
+
running = false;
|
|
4071
|
+
timer = null;
|
|
4072
|
+
isLeader = false;
|
|
3357
4073
|
get client() {
|
|
3358
4074
|
const driver = this.manager.getDriver(this.manager.getDefaultConnection());
|
|
3359
4075
|
if (!driver || !("client" in driver)) {
|
|
@@ -3362,7 +4078,23 @@ var init_Scheduler = __esm({
|
|
|
3362
4078
|
return driver.client;
|
|
3363
4079
|
}
|
|
3364
4080
|
/**
|
|
3365
|
-
*
|
|
4081
|
+
* Gets or creates the distributed lock instance.
|
|
4082
|
+
*
|
|
4083
|
+
* @private
|
|
4084
|
+
*/
|
|
4085
|
+
getDistributedLock() {
|
|
4086
|
+
if (!this.distributedLock) {
|
|
4087
|
+
this.distributedLock = new DistributedLock(this.client);
|
|
4088
|
+
}
|
|
4089
|
+
return this.distributedLock;
|
|
4090
|
+
}
|
|
4091
|
+
/**
|
|
4092
|
+
* Registers a new scheduled job or updates an existing one.
|
|
4093
|
+
*
|
|
4094
|
+
* Calculates the next run time based on the CRON expression and stores the configuration in Redis.
|
|
4095
|
+
*
|
|
4096
|
+
* @param config - The job configuration (excluding nextRun and enabled status which are auto-set).
|
|
4097
|
+
* @throws {Error} If Redis client does not support pipelining.
|
|
3366
4098
|
*/
|
|
3367
4099
|
async register(config) {
|
|
3368
4100
|
const nextRun = parser.parse(config.cron).next().getTime();
|
|
@@ -3384,7 +4116,11 @@ var init_Scheduler = __esm({
|
|
|
3384
4116
|
await pipe.exec();
|
|
3385
4117
|
}
|
|
3386
4118
|
/**
|
|
3387
|
-
*
|
|
4119
|
+
* Removes a scheduled job.
|
|
4120
|
+
*
|
|
4121
|
+
* Deletes the job metadata and schedule entry from Redis.
|
|
4122
|
+
*
|
|
4123
|
+
* @param id - The unique identifier of the scheduled job.
|
|
3388
4124
|
*/
|
|
3389
4125
|
async remove(id) {
|
|
3390
4126
|
const client = this.client;
|
|
@@ -3397,7 +4133,9 @@ var init_Scheduler = __esm({
|
|
|
3397
4133
|
await pipe.exec();
|
|
3398
4134
|
}
|
|
3399
4135
|
/**
|
|
3400
|
-
*
|
|
4136
|
+
* Lists all registered scheduled jobs.
|
|
4137
|
+
*
|
|
4138
|
+
* @returns An array of all scheduled job configurations.
|
|
3401
4139
|
*/
|
|
3402
4140
|
async list() {
|
|
3403
4141
|
const client = this.client;
|
|
@@ -3421,7 +4159,77 @@ var init_Scheduler = __esm({
|
|
|
3421
4159
|
return configs;
|
|
3422
4160
|
}
|
|
3423
4161
|
/**
|
|
3424
|
-
*
|
|
4162
|
+
* Starts the automatic scheduler loop.
|
|
4163
|
+
*
|
|
4164
|
+
* Periodically triggers `tick()` to process due jobs. Uses leader election
|
|
4165
|
+
* to ensure that only one node performs the scanning in a multi-node environment.
|
|
4166
|
+
*/
|
|
4167
|
+
async start() {
|
|
4168
|
+
if (this.running) {
|
|
4169
|
+
return;
|
|
4170
|
+
}
|
|
4171
|
+
this.running = true;
|
|
4172
|
+
const loop = async () => {
|
|
4173
|
+
if (!this.running) {
|
|
4174
|
+
return;
|
|
4175
|
+
}
|
|
4176
|
+
try {
|
|
4177
|
+
await this.performTickWithLeaderElection();
|
|
4178
|
+
} catch (err) {
|
|
4179
|
+
console.error("[Scheduler] Loop error:", err);
|
|
4180
|
+
}
|
|
4181
|
+
this.timer = setTimeout(loop, this.tickInterval);
|
|
4182
|
+
};
|
|
4183
|
+
loop();
|
|
4184
|
+
}
|
|
4185
|
+
/**
|
|
4186
|
+
* Stops the automatic scheduler loop.
|
|
4187
|
+
*/
|
|
4188
|
+
async stop() {
|
|
4189
|
+
this.running = false;
|
|
4190
|
+
if (this.timer) {
|
|
4191
|
+
clearTimeout(this.timer);
|
|
4192
|
+
this.timer = null;
|
|
4193
|
+
}
|
|
4194
|
+
if (this.isLeader) {
|
|
4195
|
+
await this.releaseLeader();
|
|
4196
|
+
}
|
|
4197
|
+
}
|
|
4198
|
+
/**
|
|
4199
|
+
* Acquires the leader lock and performs a tick.
|
|
4200
|
+
*
|
|
4201
|
+
* @private
|
|
4202
|
+
*/
|
|
4203
|
+
async performTickWithLeaderElection() {
|
|
4204
|
+
const lock = this.getDistributedLock();
|
|
4205
|
+
const leaderKey = `${this.prefix}scheduler:leader`;
|
|
4206
|
+
this.isLeader = await lock.acquire(leaderKey, {
|
|
4207
|
+
ttl: this.leaderTtl,
|
|
4208
|
+
refreshInterval: Math.floor(this.leaderTtl / 3),
|
|
4209
|
+
retryCount: 0,
|
|
4210
|
+
retryDelay: 0
|
|
4211
|
+
});
|
|
4212
|
+
if (this.isLeader) {
|
|
4213
|
+
await this.tick();
|
|
4214
|
+
}
|
|
4215
|
+
}
|
|
4216
|
+
/**
|
|
4217
|
+
* Releases the leader lock.
|
|
4218
|
+
*
|
|
4219
|
+
* @private
|
|
4220
|
+
*/
|
|
4221
|
+
async releaseLeader() {
|
|
4222
|
+
const lock = this.getDistributedLock();
|
|
4223
|
+
const leaderKey = `${this.prefix}scheduler:leader`;
|
|
4224
|
+
await lock.release(leaderKey);
|
|
4225
|
+
this.isLeader = false;
|
|
4226
|
+
}
|
|
4227
|
+
/**
|
|
4228
|
+
* Manually triggers a scheduled job immediately.
|
|
4229
|
+
*
|
|
4230
|
+
* Forces execution of the job regardless of its schedule, without affecting the next scheduled run time.
|
|
4231
|
+
*
|
|
4232
|
+
* @param id - The unique identifier of the scheduled job.
|
|
3425
4233
|
*/
|
|
3426
4234
|
async runNow(id) {
|
|
3427
4235
|
const client = this.client;
|
|
@@ -3434,8 +4242,16 @@ var init_Scheduler = __esm({
|
|
|
3434
4242
|
}
|
|
3435
4243
|
}
|
|
3436
4244
|
/**
|
|
3437
|
-
*
|
|
3438
|
-
*
|
|
4245
|
+
* Checks for and triggers tasks that are due for execution.
|
|
4246
|
+
*
|
|
4247
|
+
* This method should be called periodically (e.g., via a system cron or a dedicated tick loop).
|
|
4248
|
+
* It scans the schedule for tasks with `nextRun <= now`, acquires a distributed lock for each,
|
|
4249
|
+
* pushes them to their queue, and updates the `nextRun` time.
|
|
4250
|
+
*
|
|
4251
|
+
* The distributed lock ensures that in a multi-node environment, each scheduled job is executed
|
|
4252
|
+
* only once per interval, even if multiple scheduler instances are running.
|
|
4253
|
+
*
|
|
4254
|
+
* @returns The number of jobs triggered in this tick.
|
|
3439
4255
|
*/
|
|
3440
4256
|
async tick() {
|
|
3441
4257
|
const client = this.client;
|
|
@@ -3445,35 +4261,42 @@ var init_Scheduler = __esm({
|
|
|
3445
4261
|
const now = Date.now();
|
|
3446
4262
|
const dueIds = await client.zrangebyscore(`${this.prefix}schedules`, 0, now);
|
|
3447
4263
|
let fired = 0;
|
|
4264
|
+
const lock = this.getDistributedLock();
|
|
3448
4265
|
for (const id of dueIds) {
|
|
3449
4266
|
const lockKey = `${this.prefix}lock:schedule:${id}:${Math.floor(now / 1e3)}`;
|
|
3450
|
-
|
|
3451
|
-
|
|
3452
|
-
|
|
3453
|
-
|
|
3454
|
-
|
|
3455
|
-
|
|
3456
|
-
|
|
3457
|
-
|
|
3458
|
-
|
|
3459
|
-
|
|
3460
|
-
|
|
3461
|
-
|
|
3462
|
-
|
|
3463
|
-
|
|
3464
|
-
|
|
3465
|
-
|
|
3466
|
-
|
|
3467
|
-
|
|
3468
|
-
|
|
3469
|
-
|
|
3470
|
-
|
|
4267
|
+
const acquired = await lock.acquire(lockKey, {
|
|
4268
|
+
ttl: this.lockTtl,
|
|
4269
|
+
retryCount: this.lockRetryCount,
|
|
4270
|
+
retryDelay: this.lockRetryDelay,
|
|
4271
|
+
refreshInterval: this.lockRefreshInterval
|
|
4272
|
+
});
|
|
4273
|
+
if (acquired) {
|
|
4274
|
+
try {
|
|
4275
|
+
const data = await client.hgetall?.(`${this.prefix}schedule:${id}`);
|
|
4276
|
+
if (data?.id && data.enabled === "true") {
|
|
4277
|
+
try {
|
|
4278
|
+
const serializedJob = JSON.parse(data.job);
|
|
4279
|
+
const connection = data.connection || this.manager.getDefaultConnection();
|
|
4280
|
+
const driver = this.manager.getDriver(connection);
|
|
4281
|
+
await driver.push(data.queue, serializedJob);
|
|
4282
|
+
const nextRun = parser.parse(data.cron).next().getTime();
|
|
4283
|
+
if (typeof client.pipeline === "function") {
|
|
4284
|
+
const pipe = client.pipeline();
|
|
4285
|
+
pipe.hset(`${this.prefix}schedule:${id}`, {
|
|
4286
|
+
lastRun: now,
|
|
4287
|
+
nextRun
|
|
4288
|
+
});
|
|
4289
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, id);
|
|
4290
|
+
await pipe.exec();
|
|
4291
|
+
}
|
|
4292
|
+
fired++;
|
|
4293
|
+
} catch (err) {
|
|
4294
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
4295
|
+
console.error(`[Scheduler] Failed to process schedule ${id}:`, error.message);
|
|
3471
4296
|
}
|
|
3472
|
-
fired++;
|
|
3473
|
-
} catch (err) {
|
|
3474
|
-
const error = err instanceof Error ? err : new Error(String(err));
|
|
3475
|
-
console.error(`[Scheduler] Failed to process schedule ${id}:`, error.message);
|
|
3476
4297
|
}
|
|
4298
|
+
} finally {
|
|
4299
|
+
await lock.release(lockKey);
|
|
3477
4300
|
}
|
|
3478
4301
|
}
|
|
3479
4302
|
}
|
|
@@ -3483,78 +4306,550 @@ var init_Scheduler = __esm({
|
|
|
3483
4306
|
}
|
|
3484
4307
|
});
|
|
3485
4308
|
|
|
3486
|
-
// src/
|
|
3487
|
-
|
|
3488
|
-
|
|
3489
|
-
|
|
3490
|
-
|
|
3491
|
-
var
|
|
3492
|
-
|
|
3493
|
-
|
|
3494
|
-
|
|
3495
|
-
|
|
3496
|
-
|
|
3497
|
-
|
|
3498
|
-
*/
|
|
3499
|
-
async process(job) {
|
|
3500
|
-
const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
|
|
3501
|
-
const timeout = this.options.timeout;
|
|
3502
|
-
if (!job.attempts) {
|
|
3503
|
-
job.attempts = 1;
|
|
3504
|
-
}
|
|
3505
|
-
try {
|
|
3506
|
-
if (timeout) {
|
|
3507
|
-
await Promise.race([
|
|
3508
|
-
job.handle(),
|
|
3509
|
-
new Promise(
|
|
3510
|
-
(_, reject) => setTimeout(
|
|
3511
|
-
() => reject(new Error(`Job timeout after ${timeout} seconds`)),
|
|
3512
|
-
timeout * 1e3
|
|
3513
|
-
)
|
|
3514
|
-
)
|
|
3515
|
-
]);
|
|
3516
|
-
} else {
|
|
3517
|
-
await job.handle();
|
|
4309
|
+
// src/DashboardProvider.ts
|
|
4310
|
+
var DashboardProvider_exports = {};
|
|
4311
|
+
__export(DashboardProvider_exports, {
|
|
4312
|
+
DashboardProvider: () => DashboardProvider
|
|
4313
|
+
});
|
|
4314
|
+
var DashboardProvider;
|
|
4315
|
+
var init_DashboardProvider = __esm({
|
|
4316
|
+
"src/DashboardProvider.ts"() {
|
|
4317
|
+
"use strict";
|
|
4318
|
+
DashboardProvider = class {
|
|
4319
|
+
constructor(manager) {
|
|
4320
|
+
this.manager = manager;
|
|
3518
4321
|
}
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
4322
|
+
/**
|
|
4323
|
+
* Registers dashboard API routes on the provided core adapter.
|
|
4324
|
+
*
|
|
4325
|
+
* @param core - The PlanetCore instance.
|
|
4326
|
+
* @param basePath - The base path for API routes (default: '/_flux').
|
|
4327
|
+
*/
|
|
4328
|
+
registerRoutes(core, basePath = "/_flux") {
|
|
4329
|
+
const router = core.adapter;
|
|
4330
|
+
router.get(`${basePath}/stats`, async (c) => {
|
|
4331
|
+
const stats = await this.manager.getGlobalStats();
|
|
4332
|
+
return c.json(stats);
|
|
4333
|
+
});
|
|
4334
|
+
router.get(`${basePath}/queues`, async (c) => {
|
|
4335
|
+
const stats = await this.manager.getGlobalStats();
|
|
4336
|
+
const queues = Object.entries(stats.connections).flatMap(
|
|
4337
|
+
([conn, qList]) => qList.map((q) => ({
|
|
4338
|
+
connection: conn,
|
|
4339
|
+
name: q.queue,
|
|
4340
|
+
size: q.size,
|
|
4341
|
+
failed: q.failed
|
|
4342
|
+
}))
|
|
4343
|
+
);
|
|
4344
|
+
return c.json(queues);
|
|
4345
|
+
});
|
|
4346
|
+
router.get(`${basePath}/jobs`, async (c) => {
|
|
4347
|
+
const queue = c.req.query("queue") || "default";
|
|
4348
|
+
const status = c.req.query("status");
|
|
4349
|
+
const limit = parseInt(c.req.query("limit") || "50", 10);
|
|
4350
|
+
const offset = parseInt(c.req.query("offset") || "0", 10);
|
|
4351
|
+
const persistence = this.manager.getPersistence();
|
|
4352
|
+
if (!persistence) {
|
|
4353
|
+
return c.json({ error: "Persistence not configured" }, 400);
|
|
4354
|
+
}
|
|
4355
|
+
const statuses = status ? status.includes(",") ? status.split(",") : status : void 0;
|
|
4356
|
+
const [jobs, total] = await Promise.all([
|
|
4357
|
+
persistence.list(queue, { status: statuses, limit, offset }),
|
|
4358
|
+
persistence.count(queue, { status: statuses })
|
|
4359
|
+
]);
|
|
4360
|
+
return c.json({
|
|
4361
|
+
data: jobs,
|
|
4362
|
+
meta: {
|
|
4363
|
+
total,
|
|
4364
|
+
limit,
|
|
4365
|
+
offset
|
|
4366
|
+
}
|
|
4367
|
+
});
|
|
4368
|
+
});
|
|
4369
|
+
router.post(`${basePath}/jobs/retry`, async (c) => {
|
|
4370
|
+
const { queue, count } = await c.req.json();
|
|
4371
|
+
if (!queue) {
|
|
4372
|
+
return c.json({ error: "Queue name is required" }, 400);
|
|
4373
|
+
}
|
|
4374
|
+
const retried = await this.manager.retryFailed(queue, count || 1);
|
|
4375
|
+
return c.json({ success: true, retried });
|
|
4376
|
+
});
|
|
4377
|
+
router.get(`${basePath}/logs`, async (c) => {
|
|
4378
|
+
const persistence = this.manager.getPersistence();
|
|
4379
|
+
if (!persistence) {
|
|
4380
|
+
return c.json({ error: "Persistence not configured" }, 400);
|
|
4381
|
+
}
|
|
4382
|
+
const limit = parseInt(c.req.query("limit") || "100", 10);
|
|
4383
|
+
const offset = parseInt(c.req.query("offset") || "0", 10);
|
|
4384
|
+
const level = c.req.query("level");
|
|
4385
|
+
const search = c.req.query("search");
|
|
4386
|
+
const [logs, total] = await Promise.all([
|
|
4387
|
+
persistence.listLogs({ limit, offset, level, search }),
|
|
4388
|
+
persistence.countLogs({ level, search })
|
|
4389
|
+
]);
|
|
4390
|
+
return c.json({
|
|
4391
|
+
data: logs,
|
|
4392
|
+
meta: {
|
|
4393
|
+
total,
|
|
4394
|
+
limit,
|
|
4395
|
+
offset
|
|
4396
|
+
}
|
|
4397
|
+
});
|
|
4398
|
+
});
|
|
3523
4399
|
}
|
|
3524
|
-
|
|
3525
|
-
|
|
4400
|
+
};
|
|
4401
|
+
}
|
|
4402
|
+
});
|
|
4403
|
+
|
|
4404
|
+
// src/BatchConsumer.ts
|
|
4405
|
+
var BatchConsumer = class {
|
|
4406
|
+
constructor(manager, handler, options = {}) {
|
|
4407
|
+
this.manager = manager;
|
|
4408
|
+
this.handler = handler;
|
|
4409
|
+
this.options = {
|
|
4410
|
+
queue: "default",
|
|
4411
|
+
batchSize: 10,
|
|
4412
|
+
pollInterval: 1e3,
|
|
4413
|
+
autoAck: true,
|
|
4414
|
+
...options
|
|
4415
|
+
};
|
|
3526
4416
|
}
|
|
4417
|
+
running = false;
|
|
4418
|
+
options;
|
|
3527
4419
|
/**
|
|
3528
|
-
*
|
|
4420
|
+
* Starts the batch consuming loop.
|
|
4421
|
+
*
|
|
4422
|
+
* Continuously polls for batches of jobs and passes them to the handler.
|
|
3529
4423
|
*/
|
|
3530
|
-
async
|
|
3531
|
-
|
|
3532
|
-
|
|
3533
|
-
} catch (failedError) {
|
|
3534
|
-
console.error("[Worker] Error in job.failed():", failedError);
|
|
4424
|
+
async start() {
|
|
4425
|
+
if (this.running) {
|
|
4426
|
+
return;
|
|
3535
4427
|
}
|
|
3536
|
-
|
|
4428
|
+
this.running = true;
|
|
4429
|
+
const { queue, connection, batchSize, pollInterval, autoAck } = this.options;
|
|
4430
|
+
while (this.running) {
|
|
3537
4431
|
try {
|
|
3538
|
-
await this.
|
|
3539
|
-
|
|
3540
|
-
|
|
4432
|
+
const jobs = await this.manager.popMany(queue, batchSize, connection);
|
|
4433
|
+
if (jobs.length > 0) {
|
|
4434
|
+
try {
|
|
4435
|
+
await this.handler(jobs);
|
|
4436
|
+
if (autoAck) {
|
|
4437
|
+
await Promise.all(jobs.map((job) => this.manager.complete(job)));
|
|
4438
|
+
}
|
|
4439
|
+
} catch (error) {
|
|
4440
|
+
console.error(`[BatchConsumer] Batch processing failed:`, error);
|
|
4441
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
4442
|
+
if (autoAck) {
|
|
4443
|
+
await Promise.all(jobs.map((job) => this.manager.fail(job, err)));
|
|
4444
|
+
}
|
|
4445
|
+
}
|
|
4446
|
+
} else {
|
|
4447
|
+
await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
|
|
4448
|
+
}
|
|
4449
|
+
} catch (err) {
|
|
4450
|
+
console.error(`[BatchConsumer] Polling error:`, err);
|
|
4451
|
+
await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
|
|
3541
4452
|
}
|
|
3542
4453
|
}
|
|
3543
4454
|
}
|
|
3544
|
-
|
|
4455
|
+
/**
|
|
4456
|
+
* Stops the consumer loop.
|
|
4457
|
+
*
|
|
4458
|
+
* Sets the running flag to false. The loop will exit after the current iteration finishes.
|
|
4459
|
+
*/
|
|
4460
|
+
stop() {
|
|
4461
|
+
this.running = false;
|
|
4462
|
+
}
|
|
4463
|
+
};
|
|
4464
|
+
|
|
4465
|
+
// src/Consumer.ts
|
|
4466
|
+
import { EventEmitter } from "events";
|
|
4467
|
+
import pLimit from "p-limit";
|
|
4468
|
+
|
|
4469
|
+
// src/workers/SandboxedWorker.ts
|
|
4470
|
+
import { resolve } from "path";
|
|
4471
|
+
import { Worker as ThreadWorker } from "worker_threads";
|
|
4472
|
+
var SandboxedWorker = class {
|
|
4473
|
+
worker = null;
|
|
4474
|
+
state = "initializing" /* INITIALIZING */;
|
|
4475
|
+
config;
|
|
4476
|
+
idleTimer = null;
|
|
4477
|
+
executionTimer = null;
|
|
4478
|
+
/**
|
|
4479
|
+
* Creates a SandboxedWorker instance.
|
|
4480
|
+
*
|
|
4481
|
+
* @param config - Configuration options for the worker.
|
|
4482
|
+
*/
|
|
4483
|
+
constructor(config = {}) {
|
|
4484
|
+
this.config = {
|
|
4485
|
+
maxExecutionTime: config.maxExecutionTime ?? 3e4,
|
|
4486
|
+
maxMemory: config.maxMemory ?? 0,
|
|
4487
|
+
isolateContexts: config.isolateContexts ?? false,
|
|
4488
|
+
idleTimeout: config.idleTimeout ?? 6e4
|
|
4489
|
+
};
|
|
4490
|
+
}
|
|
4491
|
+
/**
|
|
4492
|
+
* Initializes the Worker Thread.
|
|
4493
|
+
*
|
|
4494
|
+
* @returns The active Worker Thread instance.
|
|
4495
|
+
* @throws {Error} If worker initialization fails or times out.
|
|
4496
|
+
*/
|
|
4497
|
+
async initWorker() {
|
|
4498
|
+
if (this.worker && this.state !== "terminated" /* TERMINATED */) {
|
|
4499
|
+
return this.worker;
|
|
4500
|
+
}
|
|
4501
|
+
const fs = __require("fs");
|
|
4502
|
+
let workerPath = resolve(__dirname, "job-executor.js");
|
|
4503
|
+
if (!fs.existsSync(workerPath)) {
|
|
4504
|
+
const tsPath = resolve(__dirname, "job-executor.ts");
|
|
4505
|
+
if (fs.existsSync(tsPath)) {
|
|
4506
|
+
workerPath = tsPath;
|
|
4507
|
+
}
|
|
4508
|
+
}
|
|
4509
|
+
const execArgv = process.execArgv.slice();
|
|
4510
|
+
if (workerPath.endsWith(".ts") && !process.env.BUN_BINARY_TARGET) {
|
|
4511
|
+
if (!execArgv.includes("--loader")) {
|
|
4512
|
+
execArgv.push("--loader", "ts-node/esm");
|
|
4513
|
+
}
|
|
4514
|
+
}
|
|
4515
|
+
const resourceLimits = {};
|
|
4516
|
+
if (this.config.maxMemory > 0) {
|
|
4517
|
+
resourceLimits.maxOldGenerationSizeMb = this.config.maxMemory;
|
|
4518
|
+
resourceLimits.maxYoungGenerationSizeMb = Math.min(this.config.maxMemory / 2, 128);
|
|
4519
|
+
}
|
|
4520
|
+
this.worker = new ThreadWorker(workerPath, {
|
|
4521
|
+
resourceLimits: Object.keys(resourceLimits).length > 0 ? resourceLimits : void 0,
|
|
4522
|
+
execArgv
|
|
4523
|
+
});
|
|
4524
|
+
this.state = "initializing" /* INITIALIZING */;
|
|
4525
|
+
await new Promise((resolve2, reject) => {
|
|
4526
|
+
const timeout = setTimeout(() => {
|
|
4527
|
+
reject(new Error("Worker initialization timeout"));
|
|
4528
|
+
}, 5e3);
|
|
4529
|
+
this.worker?.once("message", (message) => {
|
|
4530
|
+
clearTimeout(timeout);
|
|
4531
|
+
if (message.type === "ready") {
|
|
4532
|
+
this.state = "ready" /* READY */;
|
|
4533
|
+
resolve2();
|
|
4534
|
+
} else {
|
|
4535
|
+
reject(new Error("Unexpected worker message during initialization"));
|
|
4536
|
+
}
|
|
4537
|
+
});
|
|
4538
|
+
this.worker?.once("error", (error) => {
|
|
4539
|
+
clearTimeout(timeout);
|
|
4540
|
+
reject(error);
|
|
4541
|
+
});
|
|
4542
|
+
});
|
|
4543
|
+
this.worker.on("error", (error) => {
|
|
4544
|
+
console.error("[SandboxedWorker] Worker error:", error);
|
|
4545
|
+
this.state = "terminated" /* TERMINATED */;
|
|
4546
|
+
});
|
|
4547
|
+
this.worker.on("exit", (code) => {
|
|
4548
|
+
if (code !== 0) {
|
|
4549
|
+
console.error(`[SandboxedWorker] Worker exited with code ${code}`);
|
|
4550
|
+
}
|
|
4551
|
+
this.state = "terminated" /* TERMINATED */;
|
|
4552
|
+
});
|
|
4553
|
+
return this.worker;
|
|
4554
|
+
}
|
|
4555
|
+
/**
|
|
4556
|
+
* Executes a job in the sandboxed environment.
|
|
4557
|
+
*
|
|
4558
|
+
* @param job - The serialized job data to execute.
|
|
4559
|
+
* @throws {Error} If execution fails, times out, or the worker crashes.
|
|
4560
|
+
*/
|
|
4561
|
+
async execute(job) {
|
|
4562
|
+
if (this.config.isolateContexts) {
|
|
4563
|
+
await this.terminate();
|
|
4564
|
+
}
|
|
4565
|
+
const worker = await this.initWorker();
|
|
4566
|
+
this.state = "busy" /* BUSY */;
|
|
4567
|
+
if (this.idleTimer) {
|
|
4568
|
+
clearTimeout(this.idleTimer);
|
|
4569
|
+
this.idleTimer = null;
|
|
4570
|
+
}
|
|
4571
|
+
try {
|
|
4572
|
+
await Promise.race([this.executeInWorker(worker, job), this.createTimeoutPromise()]);
|
|
4573
|
+
} finally {
|
|
4574
|
+
this.state = "ready" /* READY */;
|
|
4575
|
+
if (this.executionTimer) {
|
|
4576
|
+
clearTimeout(this.executionTimer);
|
|
4577
|
+
this.executionTimer = null;
|
|
4578
|
+
}
|
|
4579
|
+
if (!this.config.isolateContexts) {
|
|
4580
|
+
this.startIdleTimer();
|
|
4581
|
+
} else {
|
|
4582
|
+
await this.terminate();
|
|
4583
|
+
}
|
|
4584
|
+
}
|
|
4585
|
+
}
|
|
4586
|
+
/**
|
|
4587
|
+
* Internal method to send execution message to the worker thread.
|
|
4588
|
+
*
|
|
4589
|
+
* @param worker - The worker thread instance.
|
|
4590
|
+
* @param job - Job data.
|
|
4591
|
+
*/
|
|
4592
|
+
executeInWorker(worker, job) {
|
|
4593
|
+
return new Promise((resolve2, reject) => {
|
|
4594
|
+
const messageHandler = (message) => {
|
|
4595
|
+
if (message.type === "success") {
|
|
4596
|
+
cleanup();
|
|
4597
|
+
resolve2();
|
|
4598
|
+
} else if (message.type === "error") {
|
|
4599
|
+
cleanup();
|
|
4600
|
+
const error = new Error(message.error || "Job execution failed");
|
|
4601
|
+
if (message.stack) {
|
|
4602
|
+
error.stack = message.stack;
|
|
4603
|
+
}
|
|
4604
|
+
reject(error);
|
|
4605
|
+
}
|
|
4606
|
+
};
|
|
4607
|
+
const errorHandler = (error) => {
|
|
4608
|
+
cleanup();
|
|
4609
|
+
reject(error);
|
|
4610
|
+
};
|
|
4611
|
+
const exitHandler = (code) => {
|
|
4612
|
+
cleanup();
|
|
4613
|
+
if (code !== 0) {
|
|
4614
|
+
reject(new Error(`Worker exited unexpectedly with code ${code}`));
|
|
4615
|
+
}
|
|
4616
|
+
};
|
|
4617
|
+
const cleanup = () => {
|
|
4618
|
+
worker.off("message", messageHandler);
|
|
4619
|
+
worker.off("error", errorHandler);
|
|
4620
|
+
worker.off("exit", exitHandler);
|
|
4621
|
+
};
|
|
4622
|
+
worker.on("message", messageHandler);
|
|
4623
|
+
worker.on("error", errorHandler);
|
|
4624
|
+
worker.on("exit", exitHandler);
|
|
4625
|
+
worker.postMessage({
|
|
4626
|
+
type: "execute",
|
|
4627
|
+
job
|
|
4628
|
+
});
|
|
4629
|
+
});
|
|
4630
|
+
}
|
|
4631
|
+
/**
|
|
4632
|
+
* Creates a promise that rejects after the configured timeout.
|
|
4633
|
+
*/
|
|
4634
|
+
createTimeoutPromise() {
|
|
4635
|
+
return new Promise((_, reject) => {
|
|
4636
|
+
this.executionTimer = setTimeout(() => {
|
|
4637
|
+
this.terminate().catch(console.error);
|
|
4638
|
+
reject(new Error(`Job execution timeout after ${this.config.maxExecutionTime}ms`));
|
|
4639
|
+
}, this.config.maxExecutionTime);
|
|
4640
|
+
});
|
|
4641
|
+
}
|
|
4642
|
+
/**
|
|
4643
|
+
* Starts the idle timer to auto-terminate the worker.
|
|
4644
|
+
*/
|
|
4645
|
+
startIdleTimer() {
|
|
4646
|
+
if (this.idleTimer) {
|
|
4647
|
+
clearTimeout(this.idleTimer);
|
|
4648
|
+
}
|
|
4649
|
+
this.idleTimer = setTimeout(() => {
|
|
4650
|
+
this.terminate().catch(console.error);
|
|
4651
|
+
}, this.config.idleTimeout);
|
|
4652
|
+
}
|
|
4653
|
+
/**
|
|
4654
|
+
* Terminates the Worker Thread immediately.
|
|
4655
|
+
*
|
|
4656
|
+
* Stops any running job and releases resources.
|
|
4657
|
+
*/
|
|
4658
|
+
async terminate() {
|
|
4659
|
+
if (this.idleTimer) {
|
|
4660
|
+
clearTimeout(this.idleTimer);
|
|
4661
|
+
this.idleTimer = null;
|
|
4662
|
+
}
|
|
4663
|
+
if (this.executionTimer) {
|
|
4664
|
+
clearTimeout(this.executionTimer);
|
|
4665
|
+
this.executionTimer = null;
|
|
4666
|
+
}
|
|
4667
|
+
if (this.worker) {
|
|
4668
|
+
const worker = this.worker;
|
|
4669
|
+
this.worker = null;
|
|
4670
|
+
this.state = "terminated" /* TERMINATED */;
|
|
4671
|
+
try {
|
|
4672
|
+
await worker.terminate();
|
|
4673
|
+
} catch (error) {
|
|
4674
|
+
console.error("[SandboxedWorker] Error terminating worker:", error);
|
|
4675
|
+
}
|
|
4676
|
+
}
|
|
4677
|
+
}
|
|
4678
|
+
/**
|
|
4679
|
+
* Gets the current state of the worker.
|
|
4680
|
+
*
|
|
4681
|
+
* @returns The current `WorkerState`.
|
|
4682
|
+
*/
|
|
4683
|
+
getState() {
|
|
4684
|
+
return this.state;
|
|
4685
|
+
}
|
|
4686
|
+
/**
|
|
4687
|
+
* Checks if the worker is ready to accept a job.
|
|
4688
|
+
*
|
|
4689
|
+
* @returns `true` if ready, `false` otherwise.
|
|
4690
|
+
*/
|
|
4691
|
+
isReady() {
|
|
4692
|
+
return this.state === "ready" /* READY */;
|
|
4693
|
+
}
|
|
4694
|
+
/**
|
|
4695
|
+
* Checks if the worker is currently executing a job.
|
|
4696
|
+
*
|
|
4697
|
+
* @returns `true` if busy, `false` otherwise.
|
|
4698
|
+
*/
|
|
4699
|
+
isBusy() {
|
|
4700
|
+
return this.state === "busy" /* BUSY */;
|
|
4701
|
+
}
|
|
4702
|
+
};
|
|
4703
|
+
|
|
4704
|
+
// src/Worker.ts
|
|
4705
|
+
var Worker = class {
|
|
4706
|
+
constructor(options = {}) {
|
|
4707
|
+
this.options = options;
|
|
4708
|
+
if (options.sandboxed) {
|
|
4709
|
+
this.sandboxedWorker = new SandboxedWorker(options.sandboxConfig);
|
|
4710
|
+
}
|
|
4711
|
+
}
|
|
4712
|
+
sandboxedWorker;
|
|
4713
|
+
/**
|
|
4714
|
+
* Processes a single job instance.
|
|
4715
|
+
*
|
|
4716
|
+
* 1. Checks attempt counts.
|
|
4717
|
+
* 2. Enforces execution timeout (if configured).
|
|
4718
|
+
* 3. Runs `job.handle()` (either directly or in a sandboxed Worker Thread).
|
|
4719
|
+
* 4. Catches errors and invokes failure handlers if max attempts are reached.
|
|
4720
|
+
*
|
|
4721
|
+
* @param job - The job to process.
|
|
4722
|
+
* @throws {Error} If the job execution fails (to trigger retry logic in the consumer).
|
|
4723
|
+
*/
|
|
4724
|
+
async process(job) {
|
|
4725
|
+
const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
|
|
4726
|
+
const timeout = this.options.timeout;
|
|
4727
|
+
if (!job.attempts) {
|
|
4728
|
+
job.attempts = 1;
|
|
4729
|
+
}
|
|
4730
|
+
try {
|
|
4731
|
+
if (this.options.sandboxed && this.sandboxedWorker) {
|
|
4732
|
+
await this.processSandboxed(job);
|
|
4733
|
+
} else {
|
|
4734
|
+
await this.processStandard(job, timeout);
|
|
4735
|
+
}
|
|
4736
|
+
} catch (error) {
|
|
4737
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
4738
|
+
if (job.attempts >= maxAttempts) {
|
|
4739
|
+
await this.handleFailure(job, err);
|
|
4740
|
+
}
|
|
4741
|
+
throw err;
|
|
4742
|
+
}
|
|
4743
|
+
}
|
|
4744
|
+
/**
|
|
4745
|
+
* Processes a job in standard mode (directly in current process).
|
|
4746
|
+
*
|
|
4747
|
+
* @param job - The job to process.
|
|
4748
|
+
* @param timeout - Optional timeout in seconds.
|
|
4749
|
+
*/
|
|
4750
|
+
async processStandard(job, timeout) {
|
|
4751
|
+
if (timeout) {
|
|
4752
|
+
await Promise.race([
|
|
4753
|
+
job.handle(),
|
|
4754
|
+
new Promise(
|
|
4755
|
+
(_, reject) => setTimeout(
|
|
4756
|
+
() => reject(new Error(`Job timeout after ${timeout} seconds`)),
|
|
4757
|
+
timeout * 1e3
|
|
4758
|
+
)
|
|
4759
|
+
)
|
|
4760
|
+
]);
|
|
4761
|
+
} else {
|
|
4762
|
+
await job.handle();
|
|
4763
|
+
}
|
|
4764
|
+
}
|
|
4765
|
+
/**
|
|
4766
|
+
* Processes a job in sandboxed mode (in Worker Thread).
|
|
4767
|
+
*
|
|
4768
|
+
* @param job - The job to process.
|
|
4769
|
+
*/
|
|
4770
|
+
async processSandboxed(job) {
|
|
4771
|
+
if (!this.sandboxedWorker) {
|
|
4772
|
+
throw new Error("Sandboxed worker not initialized");
|
|
4773
|
+
}
|
|
4774
|
+
const serialized = this.serializeJob(job);
|
|
4775
|
+
await this.sandboxedWorker.execute(serialized);
|
|
4776
|
+
}
|
|
4777
|
+
/**
|
|
4778
|
+
* Serializes a Job instance for Worker Thread execution.
|
|
4779
|
+
*
|
|
4780
|
+
* @param job - The job to serialize.
|
|
4781
|
+
* @returns Serialized job data.
|
|
4782
|
+
*/
|
|
4783
|
+
serializeJob(job) {
|
|
4784
|
+
const data = JSON.stringify(job);
|
|
4785
|
+
return {
|
|
4786
|
+
id: job.id ?? `job-${Date.now()}-${Math.random()}`,
|
|
4787
|
+
type: "json",
|
|
4788
|
+
data,
|
|
4789
|
+
createdAt: Date.now(),
|
|
4790
|
+
attempts: job.attempts,
|
|
4791
|
+
maxAttempts: job.maxAttempts,
|
|
4792
|
+
delaySeconds: job.delaySeconds,
|
|
4793
|
+
groupId: job.groupId,
|
|
4794
|
+
priority: job.priority,
|
|
4795
|
+
retryAfterSeconds: job.retryAfterSeconds,
|
|
4796
|
+
retryMultiplier: job.retryMultiplier
|
|
4797
|
+
};
|
|
4798
|
+
}
|
|
4799
|
+
/**
|
|
4800
|
+
* Handles the permanent failure of a job.
|
|
4801
|
+
*
|
|
4802
|
+
* Invokes the job's `failed()` method and any global `onFailed` callback.
|
|
4803
|
+
*
|
|
4804
|
+
* @param job - The failed job.
|
|
4805
|
+
* @param error - The error that caused the failure.
|
|
4806
|
+
*/
|
|
4807
|
+
async handleFailure(job, error) {
|
|
4808
|
+
try {
|
|
4809
|
+
await job.failed(error);
|
|
4810
|
+
} catch (failedError) {
|
|
4811
|
+
console.error("[Worker] Error in job.failed():", failedError);
|
|
4812
|
+
}
|
|
4813
|
+
if (this.options.onFailed) {
|
|
4814
|
+
try {
|
|
4815
|
+
await this.options.onFailed(job, error);
|
|
4816
|
+
} catch (callbackError) {
|
|
4817
|
+
console.error("[Worker] Error in onFailed callback:", callbackError);
|
|
4818
|
+
}
|
|
4819
|
+
}
|
|
4820
|
+
}
|
|
4821
|
+
/**
|
|
4822
|
+
* Terminates the sandboxed worker and releases resources.
|
|
4823
|
+
*
|
|
4824
|
+
* Should be called when the worker is no longer needed.
|
|
4825
|
+
* Only applicable when running in sandboxed mode.
|
|
4826
|
+
*/
|
|
4827
|
+
async terminate() {
|
|
4828
|
+
if (this.sandboxedWorker) {
|
|
4829
|
+
await this.sandboxedWorker.terminate();
|
|
4830
|
+
}
|
|
4831
|
+
}
|
|
4832
|
+
};
|
|
3545
4833
|
|
|
3546
4834
|
// src/Consumer.ts
|
|
3547
|
-
var Consumer = class extends EventEmitter {
|
|
4835
|
+
var Consumer = class _Consumer extends EventEmitter {
|
|
3548
4836
|
constructor(queueManager, options) {
|
|
3549
4837
|
super();
|
|
3550
4838
|
this.queueManager = queueManager;
|
|
3551
4839
|
this.options = options;
|
|
3552
4840
|
}
|
|
4841
|
+
/**
|
|
4842
|
+
* Group limiter 的存活時間(毫秒)。
|
|
4843
|
+
* 超過此時間未使用的 group limiter 會被清理,避免記憶體洩漏。
|
|
4844
|
+
*/
|
|
4845
|
+
static GROUP_LIMITER_TTL = 6e4;
|
|
3553
4846
|
running = false;
|
|
3554
4847
|
stopRequested = false;
|
|
3555
4848
|
workerId = `worker-${crypto.randomUUID()}`;
|
|
3556
4849
|
heartbeatTimer = null;
|
|
4850
|
+
cleanupTimer = null;
|
|
3557
4851
|
groupLimiters = /* @__PURE__ */ new Map();
|
|
4852
|
+
groupLimiterLastUsed = /* @__PURE__ */ new Map();
|
|
3558
4853
|
stats = {
|
|
3559
4854
|
processed: 0,
|
|
3560
4855
|
failed: 0,
|
|
@@ -3565,7 +4860,7 @@ var Consumer = class extends EventEmitter {
|
|
|
3565
4860
|
return this.options.connection ?? this.queueManager.getDefaultConnection();
|
|
3566
4861
|
}
|
|
3567
4862
|
/**
|
|
3568
|
-
*
|
|
4863
|
+
* Logs a debug message if debug mode is enabled.
|
|
3569
4864
|
*/
|
|
3570
4865
|
log(message, data) {
|
|
3571
4866
|
if (this.options.debug) {
|
|
@@ -3579,7 +4874,12 @@ var Consumer = class extends EventEmitter {
|
|
|
3579
4874
|
}
|
|
3580
4875
|
}
|
|
3581
4876
|
/**
|
|
3582
|
-
*
|
|
4877
|
+
* Starts the consumer loop.
|
|
4878
|
+
*
|
|
4879
|
+
* Begins polling the queues and processing jobs. This method returns a promise that resolves
|
|
4880
|
+
* only when the consumer stops (if `keepAlive` is false) or throws if already running.
|
|
4881
|
+
*
|
|
4882
|
+
* @throws {Error} If the consumer is already running.
|
|
3583
4883
|
*/
|
|
3584
4884
|
async start() {
|
|
3585
4885
|
if (this.running) {
|
|
@@ -3611,10 +4911,11 @@ var Consumer = class extends EventEmitter {
|
|
|
3611
4911
|
`Consumer started on [${this.options.queues.join(", ")}] with concurrency ${concurrency}`
|
|
3612
4912
|
);
|
|
3613
4913
|
}
|
|
4914
|
+
this.startCleanupTimer();
|
|
3614
4915
|
while (this.running && !this.stopRequested) {
|
|
3615
4916
|
const capacity = concurrency - this.stats.active;
|
|
3616
4917
|
if (capacity <= 0) {
|
|
3617
|
-
await new Promise((
|
|
4918
|
+
await new Promise((resolve2) => setTimeout(resolve2, 50));
|
|
3618
4919
|
continue;
|
|
3619
4920
|
}
|
|
3620
4921
|
const eligibleQueues = [];
|
|
@@ -3636,7 +4937,7 @@ var Consumer = class extends EventEmitter {
|
|
|
3636
4937
|
eligibleQueues.push(queue);
|
|
3637
4938
|
}
|
|
3638
4939
|
if (eligibleQueues.length === 0) {
|
|
3639
|
-
await new Promise((
|
|
4940
|
+
await new Promise((resolve2) => setTimeout(resolve2, currentPollInterval));
|
|
3640
4941
|
continue;
|
|
3641
4942
|
}
|
|
3642
4943
|
let jobs = [];
|
|
@@ -3685,7 +4986,7 @@ var Consumer = class extends EventEmitter {
|
|
|
3685
4986
|
this.stats.active--;
|
|
3686
4987
|
});
|
|
3687
4988
|
}
|
|
3688
|
-
await new Promise((
|
|
4989
|
+
await new Promise((resolve2) => setTimeout(resolve2, 0));
|
|
3689
4990
|
continue;
|
|
3690
4991
|
}
|
|
3691
4992
|
} catch (error) {
|
|
@@ -3696,22 +4997,23 @@ var Consumer = class extends EventEmitter {
|
|
|
3696
4997
|
}
|
|
3697
4998
|
if (!this.stopRequested) {
|
|
3698
4999
|
if (!didBlock) {
|
|
3699
|
-
await new Promise((
|
|
5000
|
+
await new Promise((resolve2) => setTimeout(resolve2, currentPollInterval));
|
|
3700
5001
|
currentPollInterval = Math.min(currentPollInterval * backoffMultiplier, maxPollInterval);
|
|
3701
5002
|
}
|
|
3702
5003
|
} else {
|
|
3703
|
-
await new Promise((
|
|
5004
|
+
await new Promise((resolve2) => setTimeout(resolve2, 50));
|
|
3704
5005
|
}
|
|
3705
5006
|
}
|
|
3706
5007
|
this.running = false;
|
|
3707
5008
|
this.stopHeartbeat();
|
|
5009
|
+
this.stopCleanupTimer();
|
|
3708
5010
|
if (this.options.monitor) {
|
|
3709
5011
|
await this.publishLog("info", "Consumer stopped");
|
|
3710
5012
|
}
|
|
3711
5013
|
this.log("Stopped");
|
|
3712
5014
|
}
|
|
3713
5015
|
/**
|
|
3714
|
-
* Run a job with concurrency controls.
|
|
5016
|
+
* Run a job with concurrency controls and group locking.
|
|
3715
5017
|
*/
|
|
3716
5018
|
async runJob(job, worker) {
|
|
3717
5019
|
if (!job.groupId || this.options.groupJobsSequential === false) {
|
|
@@ -3722,6 +5024,7 @@ var Consumer = class extends EventEmitter {
|
|
|
3722
5024
|
limiter = pLimit(1);
|
|
3723
5025
|
this.groupLimiters.set(job.groupId, limiter);
|
|
3724
5026
|
}
|
|
5027
|
+
this.groupLimiterLastUsed.set(job.groupId, Date.now());
|
|
3725
5028
|
if (limiter.pendingCount > 0) {
|
|
3726
5029
|
this.log(`Job ${job.id} queued behind group ${job.groupId}`);
|
|
3727
5030
|
}
|
|
@@ -3730,16 +5033,18 @@ var Consumer = class extends EventEmitter {
|
|
|
3730
5033
|
});
|
|
3731
5034
|
if (limiter.activeCount === 0 && limiter.pendingCount === 0) {
|
|
3732
5035
|
this.groupLimiters.delete(job.groupId);
|
|
5036
|
+
this.groupLimiterLastUsed.delete(job.groupId);
|
|
3733
5037
|
}
|
|
3734
5038
|
}
|
|
3735
5039
|
/**
|
|
3736
|
-
*
|
|
5040
|
+
* Delegates the actual processing to the worker and handles stats/logging.
|
|
3737
5041
|
*/
|
|
3738
5042
|
async handleJob(job, worker) {
|
|
3739
5043
|
const currentQueue = job.queueName || "default";
|
|
3740
5044
|
const startTime = Date.now();
|
|
3741
5045
|
this.log(`Processing job ${job.id} from ${currentQueue}`);
|
|
3742
5046
|
this.emit("job:started", { job, queue: currentQueue });
|
|
5047
|
+
this.options.onEvent?.("job:started", { jobId: job.id, queue: currentQueue });
|
|
3743
5048
|
if (this.options.monitor) {
|
|
3744
5049
|
await this.publishLog("info", `Processing job: ${job.id}`, job.id);
|
|
3745
5050
|
}
|
|
@@ -3748,14 +5053,32 @@ var Consumer = class extends EventEmitter {
|
|
|
3748
5053
|
const duration = Date.now() - startTime;
|
|
3749
5054
|
this.stats.processed++;
|
|
3750
5055
|
this.emit("job:processed", { job, duration, queue: currentQueue });
|
|
5056
|
+
this.options.onEvent?.("job:processed", { jobId: job.id, duration, queue: currentQueue });
|
|
3751
5057
|
this.log(`Completed job ${job.id} in ${duration}ms`);
|
|
3752
5058
|
if (this.options.monitor) {
|
|
3753
5059
|
await this.publishLog("success", `Completed job: ${job.id}`, job.id);
|
|
3754
5060
|
}
|
|
5061
|
+
if (this.options.maxRequests && this.stats.processed >= this.options.maxRequests) {
|
|
5062
|
+
this.log(`Max requests reached: ${this.stats.processed}/${this.options.maxRequests}`);
|
|
5063
|
+
this.stopRequested = true;
|
|
5064
|
+
this.emit("max_requests_reached", {
|
|
5065
|
+
processed: this.stats.processed,
|
|
5066
|
+
maxRequests: this.options.maxRequests
|
|
5067
|
+
});
|
|
5068
|
+
if (this.options.monitor) {
|
|
5069
|
+
await this.publishLog("info", `Max requests reached: ${this.stats.processed}`, job.id);
|
|
5070
|
+
}
|
|
5071
|
+
}
|
|
3755
5072
|
} catch (err) {
|
|
3756
5073
|
const error = err;
|
|
3757
5074
|
const duration = Date.now() - startTime;
|
|
3758
5075
|
this.emit("job:failed", { job, error, duration, queue: currentQueue });
|
|
5076
|
+
this.options.onEvent?.("job:failed", {
|
|
5077
|
+
jobId: job.id,
|
|
5078
|
+
error: error.message,
|
|
5079
|
+
duration,
|
|
5080
|
+
queue: currentQueue
|
|
5081
|
+
});
|
|
3759
5082
|
this.log(`Failed job ${job.id} in ${duration}ms`, { error: error.message });
|
|
3760
5083
|
this.stats.failed++;
|
|
3761
5084
|
if (this.options.monitor) {
|
|
@@ -3781,6 +5104,7 @@ var Consumer = class extends EventEmitter {
|
|
|
3781
5104
|
}
|
|
3782
5105
|
} else {
|
|
3783
5106
|
this.emit("job:failed_permanently", { job, error });
|
|
5107
|
+
this.options.onEvent?.("job:failed_permanently", { jobId: job.id, error: error.message });
|
|
3784
5108
|
this.log(`Job ${job.id} failed permanently`);
|
|
3785
5109
|
await this.queueManager.fail(job, error).catch((dlqErr) => {
|
|
3786
5110
|
console.error("[Consumer] Error moving job to DLQ:", dlqErr);
|
|
@@ -3838,6 +5162,48 @@ var Consumer = class extends EventEmitter {
|
|
|
3838
5162
|
this.heartbeatTimer = null;
|
|
3839
5163
|
}
|
|
3840
5164
|
}
|
|
5165
|
+
/**
|
|
5166
|
+
* 清理閒置的 group limiters。
|
|
5167
|
+
*
|
|
5168
|
+
* 定期檢查並移除超過 TTL 且沒有 active/pending jobs 的 group limiters,
|
|
5169
|
+
* 避免記憶體洩漏。
|
|
5170
|
+
*/
|
|
5171
|
+
cleanupGroupLimiters() {
|
|
5172
|
+
const now = Date.now();
|
|
5173
|
+
const groupsToDelete = [];
|
|
5174
|
+
for (const [groupId, lastUsed] of this.groupLimiterLastUsed.entries()) {
|
|
5175
|
+
const limiter = this.groupLimiters.get(groupId);
|
|
5176
|
+
if (!limiter) {
|
|
5177
|
+
groupsToDelete.push(groupId);
|
|
5178
|
+
continue;
|
|
5179
|
+
}
|
|
5180
|
+
if (now - lastUsed > _Consumer.GROUP_LIMITER_TTL && limiter.activeCount === 0 && limiter.pendingCount === 0) {
|
|
5181
|
+
this.groupLimiters.delete(groupId);
|
|
5182
|
+
groupsToDelete.push(groupId);
|
|
5183
|
+
this.log(`Cleaned up inactive group limiter: ${groupId}`);
|
|
5184
|
+
}
|
|
5185
|
+
}
|
|
5186
|
+
for (const groupId of groupsToDelete) {
|
|
5187
|
+
this.groupLimiterLastUsed.delete(groupId);
|
|
5188
|
+
}
|
|
5189
|
+
}
|
|
5190
|
+
/**
|
|
5191
|
+
* 啟動 group limiter 清理計時器。
|
|
5192
|
+
*/
|
|
5193
|
+
startCleanupTimer() {
|
|
5194
|
+
this.cleanupTimer = setInterval(() => {
|
|
5195
|
+
this.cleanupGroupLimiters();
|
|
5196
|
+
}, 3e4);
|
|
5197
|
+
}
|
|
5198
|
+
/**
|
|
5199
|
+
* 停止 group limiter 清理計時器。
|
|
5200
|
+
*/
|
|
5201
|
+
stopCleanupTimer() {
|
|
5202
|
+
if (this.cleanupTimer) {
|
|
5203
|
+
clearInterval(this.cleanupTimer);
|
|
5204
|
+
this.cleanupTimer = null;
|
|
5205
|
+
}
|
|
5206
|
+
}
|
|
3841
5207
|
async publishLog(level, message, jobId) {
|
|
3842
5208
|
try {
|
|
3843
5209
|
const driver = this.queueManager.getDriver(this.connectionName);
|
|
@@ -3858,29 +5224,39 @@ var Consumer = class extends EventEmitter {
|
|
|
3858
5224
|
}
|
|
3859
5225
|
}
|
|
3860
5226
|
/**
|
|
3861
|
-
*
|
|
5227
|
+
* Gracefully stops the consumer.
|
|
5228
|
+
*
|
|
5229
|
+
* Signals the consumer to stop accepting new jobs and waits for currently running jobs
|
|
5230
|
+
* to complete.
|
|
5231
|
+
*
|
|
5232
|
+
* @returns A promise that resolves when the consumer has fully stopped.
|
|
3862
5233
|
*/
|
|
3863
5234
|
async stop() {
|
|
3864
5235
|
this.log("Stopping...");
|
|
3865
5236
|
this.stopRequested = true;
|
|
5237
|
+
this.stopCleanupTimer();
|
|
3866
5238
|
while (this.running) {
|
|
3867
|
-
await new Promise((
|
|
5239
|
+
await new Promise((resolve2) => setTimeout(resolve2, 100));
|
|
3868
5240
|
}
|
|
3869
5241
|
}
|
|
3870
5242
|
/**
|
|
3871
|
-
*
|
|
5243
|
+
* Checks if the consumer is currently active.
|
|
5244
|
+
*
|
|
5245
|
+
* @returns True if the consumer loop is running.
|
|
3872
5246
|
*/
|
|
3873
5247
|
isRunning() {
|
|
3874
5248
|
return this.running;
|
|
3875
5249
|
}
|
|
3876
5250
|
/**
|
|
3877
|
-
*
|
|
5251
|
+
* Retrieves current operational statistics.
|
|
5252
|
+
*
|
|
5253
|
+
* @returns An object containing processed, failed, retried, and active job counts.
|
|
3878
5254
|
*/
|
|
3879
5255
|
getStats() {
|
|
3880
5256
|
return { ...this.stats };
|
|
3881
5257
|
}
|
|
3882
5258
|
/**
|
|
3883
|
-
*
|
|
5259
|
+
* Resets the internal statistics counters.
|
|
3884
5260
|
*/
|
|
3885
5261
|
resetStats() {
|
|
3886
5262
|
this.stats.processed = 0;
|
|
@@ -3890,7 +5266,165 @@ var Consumer = class extends EventEmitter {
|
|
|
3890
5266
|
};
|
|
3891
5267
|
|
|
3892
5268
|
// src/index.ts
|
|
5269
|
+
init_BullMQDriver();
|
|
3893
5270
|
init_DatabaseDriver();
|
|
5271
|
+
|
|
5272
|
+
// src/drivers/GrpcDriver.ts
|
|
5273
|
+
import path from "path";
|
|
5274
|
+
import * as grpc from "@grpc/grpc-js";
|
|
5275
|
+
import * as protoLoader from "@grpc/proto-loader";
|
|
5276
|
+
var GrpcDriver = class {
|
|
5277
|
+
client;
|
|
5278
|
+
constructor(config) {
|
|
5279
|
+
const protoPath = config.protoPath || path.resolve(__dirname, "../../proto/queue.proto");
|
|
5280
|
+
const packageDefinition = protoLoader.loadSync(protoPath, {
|
|
5281
|
+
keepCase: true,
|
|
5282
|
+
longs: String,
|
|
5283
|
+
enums: String,
|
|
5284
|
+
defaults: true,
|
|
5285
|
+
oneofs: true
|
|
5286
|
+
});
|
|
5287
|
+
const packageName = config.packageName || "stream";
|
|
5288
|
+
const serviceName = config.serviceName || "QueueService";
|
|
5289
|
+
const pkg = packageDefinition[packageName];
|
|
5290
|
+
if (!pkg) {
|
|
5291
|
+
throw new Error(`Package '${packageName}' not found in proto definition at ${protoPath}`);
|
|
5292
|
+
}
|
|
5293
|
+
const Service = pkg[serviceName];
|
|
5294
|
+
if (!Service) {
|
|
5295
|
+
throw new Error(`Service '${serviceName}' not found in package '${packageName}'`);
|
|
5296
|
+
}
|
|
5297
|
+
const credentials2 = this.getCredentials(config);
|
|
5298
|
+
this.client = new Service(config.url, credentials2);
|
|
5299
|
+
}
|
|
5300
|
+
getCredentials(config) {
|
|
5301
|
+
if (config.credentials) {
|
|
5302
|
+
if (config.credentials.rootCerts) {
|
|
5303
|
+
return grpc.credentials.createSsl(
|
|
5304
|
+
config.credentials.rootCerts,
|
|
5305
|
+
config.credentials.privateKey,
|
|
5306
|
+
config.credentials.certChain
|
|
5307
|
+
);
|
|
5308
|
+
}
|
|
5309
|
+
}
|
|
5310
|
+
return grpc.credentials.createInsecure();
|
|
5311
|
+
}
|
|
5312
|
+
async push(queue, job, options) {
|
|
5313
|
+
const req = {
|
|
5314
|
+
queue,
|
|
5315
|
+
job: this.toProtoJob(job),
|
|
5316
|
+
options: {
|
|
5317
|
+
groupId: options?.groupId,
|
|
5318
|
+
priority: String(options?.priority || "")
|
|
5319
|
+
}
|
|
5320
|
+
};
|
|
5321
|
+
return new Promise((resolve2, reject) => {
|
|
5322
|
+
;
|
|
5323
|
+
this.client.Push(req, (err, response) => {
|
|
5324
|
+
if (err) {
|
|
5325
|
+
return reject(err);
|
|
5326
|
+
}
|
|
5327
|
+
if (!response.success) {
|
|
5328
|
+
return reject(new Error(response.message || "Unknown gRPC error"));
|
|
5329
|
+
}
|
|
5330
|
+
resolve2();
|
|
5331
|
+
});
|
|
5332
|
+
});
|
|
5333
|
+
}
|
|
5334
|
+
async pop(queue) {
|
|
5335
|
+
return new Promise((resolve2, reject) => {
|
|
5336
|
+
;
|
|
5337
|
+
this.client.Pull({ queue }, (err, response) => {
|
|
5338
|
+
if (err) {
|
|
5339
|
+
return reject(err);
|
|
5340
|
+
}
|
|
5341
|
+
if (!response.job || !response.job.id) {
|
|
5342
|
+
return resolve2(null);
|
|
5343
|
+
}
|
|
5344
|
+
resolve2(this.fromProtoJob(response.job));
|
|
5345
|
+
});
|
|
5346
|
+
});
|
|
5347
|
+
}
|
|
5348
|
+
async size(queue) {
|
|
5349
|
+
return new Promise((resolve2, reject) => {
|
|
5350
|
+
;
|
|
5351
|
+
this.client.Size({ queue }, (err, response) => {
|
|
5352
|
+
if (err) {
|
|
5353
|
+
return reject(err);
|
|
5354
|
+
}
|
|
5355
|
+
resolve2(response.size || 0);
|
|
5356
|
+
});
|
|
5357
|
+
});
|
|
5358
|
+
}
|
|
5359
|
+
async clear(queue) {
|
|
5360
|
+
return new Promise((resolve2, reject) => {
|
|
5361
|
+
;
|
|
5362
|
+
this.client.Clear({ queue }, (err) => {
|
|
5363
|
+
if (err) {
|
|
5364
|
+
return reject(err);
|
|
5365
|
+
}
|
|
5366
|
+
resolve2();
|
|
5367
|
+
});
|
|
5368
|
+
});
|
|
5369
|
+
}
|
|
5370
|
+
async acknowledge(messageId) {
|
|
5371
|
+
return new Promise((resolve2, reject) => {
|
|
5372
|
+
;
|
|
5373
|
+
this.client.Acknowledge({ jobId: messageId }, (err) => {
|
|
5374
|
+
if (err) {
|
|
5375
|
+
return reject(err);
|
|
5376
|
+
}
|
|
5377
|
+
resolve2();
|
|
5378
|
+
});
|
|
5379
|
+
});
|
|
5380
|
+
}
|
|
5381
|
+
async stats(queue) {
|
|
5382
|
+
return new Promise((resolve2, reject) => {
|
|
5383
|
+
;
|
|
5384
|
+
this.client.Stats({ queue }, (err, response) => {
|
|
5385
|
+
if (err) {
|
|
5386
|
+
return reject(err);
|
|
5387
|
+
}
|
|
5388
|
+
resolve2({
|
|
5389
|
+
queue: response.queue,
|
|
5390
|
+
size: response.size,
|
|
5391
|
+
delayed: response.delayed,
|
|
5392
|
+
failed: response.failed,
|
|
5393
|
+
reserved: response.reserved
|
|
5394
|
+
});
|
|
5395
|
+
});
|
|
5396
|
+
});
|
|
5397
|
+
}
|
|
5398
|
+
toProtoJob(job) {
|
|
5399
|
+
return {
|
|
5400
|
+
...job,
|
|
5401
|
+
priority: job.priority ? String(job.priority) : void 0,
|
|
5402
|
+
createdAt: String(job.createdAt),
|
|
5403
|
+
// Long as string
|
|
5404
|
+
failedAt: job.failedAt ? String(job.failedAt) : void 0
|
|
5405
|
+
};
|
|
5406
|
+
}
|
|
5407
|
+
fromProtoJob(protoJob) {
|
|
5408
|
+
return {
|
|
5409
|
+
id: protoJob.id,
|
|
5410
|
+
type: protoJob.type,
|
|
5411
|
+
data: protoJob.data,
|
|
5412
|
+
className: protoJob.className,
|
|
5413
|
+
createdAt: Number(protoJob.createdAt),
|
|
5414
|
+
delaySeconds: protoJob.delaySeconds,
|
|
5415
|
+
attempts: protoJob.attempts,
|
|
5416
|
+
maxAttempts: protoJob.maxAttempts,
|
|
5417
|
+
groupId: protoJob.groupId,
|
|
5418
|
+
priority: protoJob.priority,
|
|
5419
|
+
failedAt: protoJob.failedAt ? Number(protoJob.failedAt) : void 0,
|
|
5420
|
+
error: protoJob.error,
|
|
5421
|
+
retryAfterSeconds: protoJob.retryAfterSeconds,
|
|
5422
|
+
retryMultiplier: protoJob.retryMultiplier
|
|
5423
|
+
};
|
|
5424
|
+
}
|
|
5425
|
+
};
|
|
5426
|
+
|
|
5427
|
+
// src/index.ts
|
|
3894
5428
|
init_KafkaDriver();
|
|
3895
5429
|
|
|
3896
5430
|
// src/drivers/MemoryDriver.ts
|
|
@@ -3901,7 +5435,11 @@ var MemoryDriver = class {
|
|
|
3901
5435
|
this.maxSize = config.maxSize ?? Infinity;
|
|
3902
5436
|
}
|
|
3903
5437
|
/**
|
|
3904
|
-
*
|
|
5438
|
+
* Pushes a job to the in-memory queue.
|
|
5439
|
+
*
|
|
5440
|
+
* @param queue - The queue name.
|
|
5441
|
+
* @param job - The serialized job.
|
|
5442
|
+
* @throws {Error} If the queue has reached `maxSize`.
|
|
3905
5443
|
*/
|
|
3906
5444
|
async push(queue, job) {
|
|
3907
5445
|
if (!this.queues.has(queue)) {
|
|
@@ -3914,7 +5452,12 @@ var MemoryDriver = class {
|
|
|
3914
5452
|
q.push(job);
|
|
3915
5453
|
}
|
|
3916
5454
|
/**
|
|
3917
|
-
*
|
|
5455
|
+
* Pops the next available job from the queue.
|
|
5456
|
+
*
|
|
5457
|
+
* Respects `delaySeconds` by checking the job's `createdAt` timestamp.
|
|
5458
|
+
*
|
|
5459
|
+
* @param queue - The queue name.
|
|
5460
|
+
* @returns The job or `null`.
|
|
3918
5461
|
*/
|
|
3919
5462
|
async pop(queue) {
|
|
3920
5463
|
const queueJobs = this.queues.get(queue);
|
|
@@ -3931,19 +5474,28 @@ var MemoryDriver = class {
|
|
|
3931
5474
|
return queueJobs.splice(availableIndex, 1)[0];
|
|
3932
5475
|
}
|
|
3933
5476
|
/**
|
|
3934
|
-
*
|
|
5477
|
+
* Returns the number of jobs in the queue.
|
|
5478
|
+
*
|
|
5479
|
+
* @param queue - The queue name.
|
|
3935
5480
|
*/
|
|
3936
5481
|
async size(queue) {
|
|
3937
5482
|
return this.queues.get(queue)?.length ?? 0;
|
|
3938
5483
|
}
|
|
3939
5484
|
/**
|
|
3940
|
-
*
|
|
5485
|
+
* Clears all jobs from the queue.
|
|
5486
|
+
*
|
|
5487
|
+
* @param queue - The queue name.
|
|
3941
5488
|
*/
|
|
3942
5489
|
async clear(queue) {
|
|
3943
5490
|
this.queues.delete(queue);
|
|
3944
5491
|
}
|
|
3945
5492
|
/**
|
|
3946
|
-
*
|
|
5493
|
+
* Moves a job to the failed (DLQ) list.
|
|
5494
|
+
*
|
|
5495
|
+
* In MemoryDriver, this simply pushes to a `failed:{queue}` list.
|
|
5496
|
+
*
|
|
5497
|
+
* @param queue - The original queue name.
|
|
5498
|
+
* @param job - The failed job.
|
|
3947
5499
|
*/
|
|
3948
5500
|
async fail(queue, job) {
|
|
3949
5501
|
const failedQueue = `failed:${queue}`;
|
|
@@ -3953,7 +5505,11 @@ var MemoryDriver = class {
|
|
|
3953
5505
|
this.queues.get(failedQueue)?.push(job);
|
|
3954
5506
|
}
|
|
3955
5507
|
/**
|
|
3956
|
-
*
|
|
5508
|
+
* Retrieves statistics for the queue.
|
|
5509
|
+
*
|
|
5510
|
+
* Calculates pending, delayed, and failed counts by iterating through the list.
|
|
5511
|
+
*
|
|
5512
|
+
* @param queue - The queue name.
|
|
3957
5513
|
*/
|
|
3958
5514
|
async stats(queue) {
|
|
3959
5515
|
const jobs = this.queues.get(queue) || [];
|
|
@@ -3976,7 +5532,10 @@ var MemoryDriver = class {
|
|
|
3976
5532
|
};
|
|
3977
5533
|
}
|
|
3978
5534
|
/**
|
|
3979
|
-
*
|
|
5535
|
+
* Pushes multiple jobs to the queue.
|
|
5536
|
+
*
|
|
5537
|
+
* @param queue - The queue name.
|
|
5538
|
+
* @param jobs - Array of jobs.
|
|
3980
5539
|
*/
|
|
3981
5540
|
async pushMany(queue, jobs) {
|
|
3982
5541
|
if (!this.queues.has(queue)) {
|
|
@@ -3985,7 +5544,10 @@ var MemoryDriver = class {
|
|
|
3985
5544
|
this.queues.get(queue)?.push(...jobs);
|
|
3986
5545
|
}
|
|
3987
5546
|
/**
|
|
3988
|
-
*
|
|
5547
|
+
* Pops multiple jobs from the queue.
|
|
5548
|
+
*
|
|
5549
|
+
* @param queue - The queue name.
|
|
5550
|
+
* @param count - Max jobs to pop.
|
|
3989
5551
|
*/
|
|
3990
5552
|
async popMany(queue, count) {
|
|
3991
5553
|
const results = [];
|
|
@@ -3999,6 +5561,12 @@ var MemoryDriver = class {
|
|
|
3999
5561
|
}
|
|
4000
5562
|
return results;
|
|
4001
5563
|
}
|
|
5564
|
+
/**
|
|
5565
|
+
* Lists all active queues in memory.
|
|
5566
|
+
*/
|
|
5567
|
+
async getQueues() {
|
|
5568
|
+
return Array.from(this.queues.keys()).filter((q) => !q.startsWith("failed:")).sort();
|
|
5569
|
+
}
|
|
4002
5570
|
};
|
|
4003
5571
|
|
|
4004
5572
|
// src/index.ts
|
|
@@ -4009,78 +5577,128 @@ init_SQSDriver();
|
|
|
4009
5577
|
// src/Job.ts
|
|
4010
5578
|
var Job = class {
|
|
4011
5579
|
/**
|
|
4012
|
-
* Unique job
|
|
5580
|
+
* Unique identifier for the job instance.
|
|
5581
|
+
*
|
|
5582
|
+
* Assigned automatically when the job is pushed to the queue.
|
|
4013
5583
|
*/
|
|
4014
5584
|
id;
|
|
4015
5585
|
/**
|
|
4016
|
-
*
|
|
5586
|
+
* The name of the queue where this job will be processed.
|
|
4017
5587
|
*/
|
|
4018
5588
|
queueName;
|
|
4019
5589
|
/**
|
|
4020
|
-
*
|
|
5590
|
+
* The name of the connection used to transport this job.
|
|
4021
5591
|
*/
|
|
4022
5592
|
connectionName;
|
|
4023
5593
|
/**
|
|
4024
|
-
* Delay before
|
|
5594
|
+
* Delay in seconds before the job becomes available for processing.
|
|
4025
5595
|
*/
|
|
4026
5596
|
delaySeconds;
|
|
4027
5597
|
/**
|
|
4028
|
-
*
|
|
5598
|
+
* The current attempt number (starts at 1).
|
|
4029
5599
|
*/
|
|
4030
5600
|
attempts;
|
|
4031
5601
|
/**
|
|
4032
|
-
*
|
|
5602
|
+
* The maximum number of retry attempts allowed.
|
|
5603
|
+
*
|
|
5604
|
+
* Can be overridden by the worker configuration or per-job using `maxAttempts`.
|
|
4033
5605
|
*/
|
|
4034
5606
|
maxAttempts;
|
|
4035
5607
|
/**
|
|
4036
|
-
* Group ID for
|
|
5608
|
+
* Group ID for sequential processing.
|
|
5609
|
+
*
|
|
5610
|
+
* Jobs with the same `groupId` will be processed in strict order (FIFO)
|
|
5611
|
+
* if the consumer supports it.
|
|
4037
5612
|
*/
|
|
4038
5613
|
groupId;
|
|
4039
5614
|
/**
|
|
4040
|
-
*
|
|
5615
|
+
* Priority level of the job.
|
|
4041
5616
|
*/
|
|
4042
5617
|
priority;
|
|
4043
5618
|
/**
|
|
4044
|
-
* Initial
|
|
5619
|
+
* Initial delay in seconds before the first retry attempt.
|
|
5620
|
+
*
|
|
5621
|
+
* Used for exponential backoff calculation.
|
|
4045
5622
|
*/
|
|
4046
5623
|
retryAfterSeconds;
|
|
4047
5624
|
/**
|
|
4048
|
-
*
|
|
5625
|
+
* Multiplier applied to the retry delay for each subsequent attempt.
|
|
5626
|
+
*
|
|
5627
|
+
* Used for exponential backoff calculation.
|
|
4049
5628
|
*/
|
|
4050
5629
|
retryMultiplier;
|
|
4051
5630
|
/**
|
|
4052
|
-
*
|
|
5631
|
+
* Sets the target queue for the job.
|
|
5632
|
+
*
|
|
5633
|
+
* @param queue - The name of the target queue.
|
|
5634
|
+
* @returns The job instance for chaining.
|
|
5635
|
+
*
|
|
5636
|
+
* @example
|
|
5637
|
+
* ```typescript
|
|
5638
|
+
* job.onQueue('billing');
|
|
5639
|
+
* ```
|
|
4053
5640
|
*/
|
|
4054
5641
|
onQueue(queue) {
|
|
4055
5642
|
this.queueName = queue;
|
|
4056
5643
|
return this;
|
|
4057
5644
|
}
|
|
4058
5645
|
/**
|
|
4059
|
-
*
|
|
5646
|
+
* Sets the target connection for the job.
|
|
5647
|
+
*
|
|
5648
|
+
* @param connection - The name of the connection (e.g., 'redis').
|
|
5649
|
+
* @returns The job instance for chaining.
|
|
5650
|
+
*
|
|
5651
|
+
* @example
|
|
5652
|
+
* ```typescript
|
|
5653
|
+
* job.onConnection('sqs-primary');
|
|
5654
|
+
* ```
|
|
4060
5655
|
*/
|
|
4061
5656
|
onConnection(connection) {
|
|
4062
5657
|
this.connectionName = connection;
|
|
4063
5658
|
return this;
|
|
4064
5659
|
}
|
|
4065
5660
|
/**
|
|
4066
|
-
*
|
|
4067
|
-
*
|
|
5661
|
+
* Sets the priority of the job.
|
|
5662
|
+
*
|
|
5663
|
+
* @param priority - The priority level (e.g., 'high', 10).
|
|
5664
|
+
* @returns The job instance for chaining.
|
|
5665
|
+
*
|
|
5666
|
+
* @example
|
|
5667
|
+
* ```typescript
|
|
5668
|
+
* job.withPriority('high');
|
|
5669
|
+
* ```
|
|
4068
5670
|
*/
|
|
4069
5671
|
withPriority(priority) {
|
|
4070
5672
|
this.priority = priority;
|
|
4071
5673
|
return this;
|
|
4072
5674
|
}
|
|
4073
5675
|
/**
|
|
4074
|
-
*
|
|
5676
|
+
* Delays the job execution.
|
|
5677
|
+
*
|
|
5678
|
+
* @param delay - Delay in seconds.
|
|
5679
|
+
* @returns The job instance for chaining.
|
|
5680
|
+
*
|
|
5681
|
+
* @example
|
|
5682
|
+
* ```typescript
|
|
5683
|
+
* job.delay(60); // Run after 1 minute
|
|
5684
|
+
* ```
|
|
4075
5685
|
*/
|
|
4076
5686
|
delay(delay) {
|
|
4077
5687
|
this.delaySeconds = delay;
|
|
4078
5688
|
return this;
|
|
4079
5689
|
}
|
|
4080
5690
|
/**
|
|
4081
|
-
*
|
|
4082
|
-
*
|
|
4083
|
-
* @param
|
|
5691
|
+
* Configures the exponential backoff strategy for retries.
|
|
5692
|
+
*
|
|
5693
|
+
* @param seconds - Initial delay in seconds before the first retry.
|
|
5694
|
+
* @param multiplier - Factor by which the delay increases for each subsequent attempt (default: 2).
|
|
5695
|
+
* @returns The job instance for chaining.
|
|
5696
|
+
*
|
|
5697
|
+
* @example
|
|
5698
|
+
* ```typescript
|
|
5699
|
+
* // Wait 5s, then 10s, then 20s...
|
|
5700
|
+
* job.backoff(5, 2);
|
|
5701
|
+
* ```
|
|
4084
5702
|
*/
|
|
4085
5703
|
backoff(seconds, multiplier = 2) {
|
|
4086
5704
|
this.retryAfterSeconds = seconds;
|
|
@@ -4088,9 +5706,17 @@ var Job = class {
|
|
|
4088
5706
|
return this;
|
|
4089
5707
|
}
|
|
4090
5708
|
/**
|
|
4091
|
-
*
|
|
4092
|
-
*
|
|
4093
|
-
*
|
|
5709
|
+
* Calculates the delay for the next retry attempt based on the backoff strategy.
|
|
5710
|
+
*
|
|
5711
|
+
* Uses the formula: `initialDelay * multiplier^(attempt - 1)`, capped at 1 hour.
|
|
5712
|
+
*
|
|
5713
|
+
* @param attempt - The current attempt number (1-based).
|
|
5714
|
+
* @returns The calculated delay in milliseconds.
|
|
5715
|
+
*
|
|
5716
|
+
* @example
|
|
5717
|
+
* ```typescript
|
|
5718
|
+
* const nextDelay = job.getRetryDelay(2);
|
|
5719
|
+
* ```
|
|
4094
5720
|
*/
|
|
4095
5721
|
getRetryDelay(attempt) {
|
|
4096
5722
|
const initialDelay = (this.retryAfterSeconds ?? 1) * 1e3;
|
|
@@ -4098,28 +5724,40 @@ var Job = class {
|
|
|
4098
5724
|
return Math.min(initialDelay * multiplier ** (attempt - 1), 36e5);
|
|
4099
5725
|
}
|
|
4100
5726
|
/**
|
|
4101
|
-
*
|
|
5727
|
+
* Optional handler for when the job has permanently failed.
|
|
4102
5728
|
*
|
|
4103
|
-
* Called when the job
|
|
4104
|
-
*
|
|
5729
|
+
* Called when the job has exhausted all retry attempts.
|
|
5730
|
+
* Useful for cleaning up resources, sending alerts, or logging.
|
|
4105
5731
|
*
|
|
4106
|
-
* @param
|
|
5732
|
+
* @param _error - The error that caused the final failure.
|
|
5733
|
+
*
|
|
5734
|
+
* @example
|
|
5735
|
+
* ```typescript
|
|
5736
|
+
* async failed(error: Error) {
|
|
5737
|
+
* await notifyAdmin(`Job failed: ${error.message}`);
|
|
5738
|
+
* }
|
|
5739
|
+
* ```
|
|
4107
5740
|
*/
|
|
4108
5741
|
async failed(_error) {
|
|
4109
5742
|
}
|
|
4110
5743
|
};
|
|
4111
5744
|
|
|
5745
|
+
// src/index.ts
|
|
5746
|
+
init_DistributedLock();
|
|
5747
|
+
|
|
4112
5748
|
// src/serializers/CachedSerializer.ts
|
|
4113
5749
|
var CachedSerializer = class {
|
|
4114
5750
|
/**
|
|
4115
|
-
* @param delegate - The
|
|
5751
|
+
* @param delegate - The underlying serializer to use.
|
|
4116
5752
|
*/
|
|
4117
5753
|
constructor(delegate) {
|
|
4118
5754
|
this.delegate = delegate;
|
|
4119
5755
|
}
|
|
4120
5756
|
cache = /* @__PURE__ */ new WeakMap();
|
|
4121
5757
|
/**
|
|
4122
|
-
*
|
|
5758
|
+
* Serializes the job, returning a cached result if available.
|
|
5759
|
+
*
|
|
5760
|
+
* @param job - The job to serialize.
|
|
4123
5761
|
*/
|
|
4124
5762
|
serialize(job) {
|
|
4125
5763
|
if (this.cache.has(job)) {
|
|
@@ -4130,8 +5768,9 @@ var CachedSerializer = class {
|
|
|
4130
5768
|
return serialized;
|
|
4131
5769
|
}
|
|
4132
5770
|
/**
|
|
4133
|
-
*
|
|
4134
|
-
*
|
|
5771
|
+
* Deserializes a job.
|
|
5772
|
+
*
|
|
5773
|
+
* Caching is not applied here as deserialization always produces new instances.
|
|
4135
5774
|
*/
|
|
4136
5775
|
deserialize(serialized) {
|
|
4137
5776
|
return this.delegate.deserialize(serialized);
|
|
@@ -4141,19 +5780,21 @@ var CachedSerializer = class {
|
|
|
4141
5780
|
// src/serializers/ClassNameSerializer.ts
|
|
4142
5781
|
var ClassNameSerializer = class {
|
|
4143
5782
|
/**
|
|
4144
|
-
*
|
|
5783
|
+
* Registry of job classes, mapped by class name.
|
|
4145
5784
|
*/
|
|
4146
5785
|
jobClasses = /* @__PURE__ */ new Map();
|
|
4147
5786
|
/**
|
|
4148
|
-
*
|
|
4149
|
-
*
|
|
5787
|
+
* Registers a Job class for serialization.
|
|
5788
|
+
*
|
|
5789
|
+
* @param jobClass - The job class constructor.
|
|
4150
5790
|
*/
|
|
4151
5791
|
register(jobClass) {
|
|
4152
5792
|
this.jobClasses.set(jobClass.name, jobClass);
|
|
4153
5793
|
}
|
|
4154
5794
|
/**
|
|
4155
|
-
*
|
|
4156
|
-
*
|
|
5795
|
+
* Registers multiple Job classes at once.
|
|
5796
|
+
*
|
|
5797
|
+
* @param jobClasses - An array of job class constructors.
|
|
4157
5798
|
*/
|
|
4158
5799
|
registerMany(jobClasses) {
|
|
4159
5800
|
for (const jobClass of jobClasses) {
|
|
@@ -4161,7 +5802,11 @@ var ClassNameSerializer = class {
|
|
|
4161
5802
|
}
|
|
4162
5803
|
}
|
|
4163
5804
|
/**
|
|
4164
|
-
*
|
|
5805
|
+
* Serializes a Job instance.
|
|
5806
|
+
*
|
|
5807
|
+
* Captures the class name and all enumerable properties.
|
|
5808
|
+
*
|
|
5809
|
+
* @param job - The job to serialize.
|
|
4165
5810
|
*/
|
|
4166
5811
|
serialize(job) {
|
|
4167
5812
|
const id = job.id || `${Date.now()}-${crypto.randomUUID()}`;
|
|
@@ -4188,7 +5833,12 @@ var ClassNameSerializer = class {
|
|
|
4188
5833
|
};
|
|
4189
5834
|
}
|
|
4190
5835
|
/**
|
|
4191
|
-
*
|
|
5836
|
+
* Deserializes a Job instance.
|
|
5837
|
+
*
|
|
5838
|
+
* Instantiates the class matching `className` and assigns properties.
|
|
5839
|
+
*
|
|
5840
|
+
* @param serialized - The serialized job.
|
|
5841
|
+
* @throws {Error} If the job class is not registered.
|
|
4192
5842
|
*/
|
|
4193
5843
|
deserialize(serialized) {
|
|
4194
5844
|
if (serialized.type !== "class") {
|
|
@@ -4237,7 +5887,7 @@ var ClassNameSerializer = class {
|
|
|
4237
5887
|
// src/serializers/JsonSerializer.ts
|
|
4238
5888
|
var JsonSerializer = class {
|
|
4239
5889
|
/**
|
|
4240
|
-
*
|
|
5890
|
+
* Serializes a job to a JSON object.
|
|
4241
5891
|
*/
|
|
4242
5892
|
serialize(job) {
|
|
4243
5893
|
const id = job.id || `${Date.now()}-${crypto.randomUUID()}`;
|
|
@@ -4260,7 +5910,9 @@ var JsonSerializer = class {
|
|
|
4260
5910
|
};
|
|
4261
5911
|
}
|
|
4262
5912
|
/**
|
|
4263
|
-
*
|
|
5913
|
+
* Deserializes a JSON object into a basic Job-like object.
|
|
5914
|
+
*
|
|
5915
|
+
* Note: The result is NOT an instance of the original Job class.
|
|
4264
5916
|
*/
|
|
4265
5917
|
deserialize(serialized) {
|
|
4266
5918
|
if (serialized.type !== "json") {
|
|
@@ -4342,9 +5994,18 @@ var QueueManager = class {
|
|
|
4342
5994
|
}
|
|
4343
5995
|
}
|
|
4344
5996
|
/**
|
|
4345
|
-
*
|
|
4346
|
-
*
|
|
4347
|
-
*
|
|
5997
|
+
* Registers a new queue connection with the manager.
|
|
5998
|
+
*
|
|
5999
|
+
* Dynamically loads the required driver implementation based on the configuration.
|
|
6000
|
+
*
|
|
6001
|
+
* @param name - The name of the connection (e.g., 'primary').
|
|
6002
|
+
* @param config - The configuration object for the driver.
|
|
6003
|
+
* @throws {Error} If the driver type is missing required dependencies or unsupported.
|
|
6004
|
+
*
|
|
6005
|
+
* @example
|
|
6006
|
+
* ```typescript
|
|
6007
|
+
* manager.registerConnection('analytics', { driver: 'sqs', client: sqs });
|
|
6008
|
+
* ```
|
|
4348
6009
|
*/
|
|
4349
6010
|
registerConnection(name, config) {
|
|
4350
6011
|
const driverType = config.driver;
|
|
@@ -4435,16 +6096,41 @@ var QueueManager = class {
|
|
|
4435
6096
|
);
|
|
4436
6097
|
break;
|
|
4437
6098
|
}
|
|
6099
|
+
case "bullmq": {
|
|
6100
|
+
const { BullMQDriver: BullMQDriver2 } = (init_BullMQDriver(), __toCommonJS(BullMQDriver_exports));
|
|
6101
|
+
if (!config.queue) {
|
|
6102
|
+
throw new Error(
|
|
6103
|
+
"[QueueManager] BullMQDriver requires queue. Please provide Bull Queue instance in connection config."
|
|
6104
|
+
);
|
|
6105
|
+
}
|
|
6106
|
+
this.drivers.set(
|
|
6107
|
+
name,
|
|
6108
|
+
new BullMQDriver2({
|
|
6109
|
+
queue: config.queue,
|
|
6110
|
+
worker: config.worker,
|
|
6111
|
+
prefix: config.prefix,
|
|
6112
|
+
debug: config.debug
|
|
6113
|
+
})
|
|
6114
|
+
);
|
|
6115
|
+
break;
|
|
6116
|
+
}
|
|
4438
6117
|
default:
|
|
4439
6118
|
throw new Error(
|
|
4440
|
-
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq`
|
|
6119
|
+
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq, bullmq`
|
|
4441
6120
|
);
|
|
4442
6121
|
}
|
|
4443
6122
|
}
|
|
4444
6123
|
/**
|
|
4445
|
-
*
|
|
4446
|
-
*
|
|
4447
|
-
* @
|
|
6124
|
+
* Retrieves the driver instance for a specific connection.
|
|
6125
|
+
*
|
|
6126
|
+
* @param connection - The name of the connection.
|
|
6127
|
+
* @returns The configured QueueDriver instance.
|
|
6128
|
+
* @throws {Error} If the connection has not been registered.
|
|
6129
|
+
*
|
|
6130
|
+
* @example
|
|
6131
|
+
* ```typescript
|
|
6132
|
+
* const driver = manager.getDriver('redis');
|
|
6133
|
+
* ```
|
|
4448
6134
|
*/
|
|
4449
6135
|
getDriver(connection) {
|
|
4450
6136
|
const driver = this.drivers.get(connection);
|
|
@@ -4454,16 +6140,19 @@ var QueueManager = class {
|
|
|
4454
6140
|
return driver;
|
|
4455
6141
|
}
|
|
4456
6142
|
/**
|
|
4457
|
-
*
|
|
4458
|
-
*
|
|
6143
|
+
* Gets the name of the default connection.
|
|
6144
|
+
*
|
|
6145
|
+
* @returns The default connection name.
|
|
4459
6146
|
*/
|
|
4460
6147
|
getDefaultConnection() {
|
|
4461
6148
|
return this.defaultConnection;
|
|
4462
6149
|
}
|
|
4463
6150
|
/**
|
|
4464
|
-
*
|
|
4465
|
-
*
|
|
4466
|
-
* @returns
|
|
6151
|
+
* Retrieves a serializer instance by type.
|
|
6152
|
+
*
|
|
6153
|
+
* @param type - The serializer type (e.g., 'json', 'class'). If omitted, returns the default serializer.
|
|
6154
|
+
* @returns The JobSerializer instance.
|
|
6155
|
+
* @throws {Error} If the requested serializer type is not found.
|
|
4467
6156
|
*/
|
|
4468
6157
|
getSerializer(type) {
|
|
4469
6158
|
if (type) {
|
|
@@ -4476,8 +6165,17 @@ var QueueManager = class {
|
|
|
4476
6165
|
return this.defaultSerializer;
|
|
4477
6166
|
}
|
|
4478
6167
|
/**
|
|
4479
|
-
*
|
|
4480
|
-
*
|
|
6168
|
+
* Registers Job classes for the `ClassNameSerializer`.
|
|
6169
|
+
*
|
|
6170
|
+
* This is required when using 'class' serialization to allow proper hydration of job instances
|
|
6171
|
+
* upon deserialization.
|
|
6172
|
+
*
|
|
6173
|
+
* @param jobClasses - An array of Job class constructors.
|
|
6174
|
+
*
|
|
6175
|
+
* @example
|
|
6176
|
+
* ```typescript
|
|
6177
|
+
* manager.registerJobClasses([SendEmailJob, ProcessOrderJob]);
|
|
6178
|
+
* ```
|
|
4481
6179
|
*/
|
|
4482
6180
|
registerJobClasses(jobClasses) {
|
|
4483
6181
|
if (this.defaultSerializer instanceof ClassNameSerializer) {
|
|
@@ -4485,12 +6183,15 @@ var QueueManager = class {
|
|
|
4485
6183
|
}
|
|
4486
6184
|
}
|
|
4487
6185
|
/**
|
|
4488
|
-
*
|
|
6186
|
+
* Pushes a single job to the queue.
|
|
6187
|
+
*
|
|
6188
|
+
* Serializes the job, selects the appropriate driver based on job configuration,
|
|
6189
|
+
* and dispatches it. Also handles audit logging if persistence is enabled.
|
|
4489
6190
|
*
|
|
4490
|
-
* @template T - The type of the job.
|
|
4491
|
-
* @param job -
|
|
4492
|
-
* @param options -
|
|
4493
|
-
* @returns The same job instance (for
|
|
6191
|
+
* @template T - The type of the job (extends Job).
|
|
6192
|
+
* @param job - The job instance to enqueue.
|
|
6193
|
+
* @param options - Optional overrides for push behavior (priority, delay, etc.).
|
|
6194
|
+
* @returns The same job instance (for chaining).
|
|
4494
6195
|
*
|
|
4495
6196
|
* @example
|
|
4496
6197
|
* ```typescript
|
|
@@ -4521,15 +6222,19 @@ var QueueManager = class {
|
|
|
4521
6222
|
return job;
|
|
4522
6223
|
}
|
|
4523
6224
|
/**
|
|
4524
|
-
*
|
|
6225
|
+
* Pushes multiple jobs to the queue in a batch.
|
|
6226
|
+
*
|
|
6227
|
+
* Optimizes network requests by batching jobs where possible. Groups jobs by connection
|
|
6228
|
+
* and queue to maximize throughput.
|
|
4525
6229
|
*
|
|
4526
6230
|
* @template T - The type of the jobs.
|
|
4527
|
-
* @param jobs -
|
|
4528
|
-
* @param options -
|
|
6231
|
+
* @param jobs - An array of job instances to enqueue.
|
|
6232
|
+
* @param options - Configuration for batch size and concurrency.
|
|
6233
|
+
* @returns A promise that resolves when all jobs have been pushed.
|
|
4529
6234
|
*
|
|
4530
6235
|
* @example
|
|
4531
6236
|
* ```typescript
|
|
4532
|
-
* await manager.pushMany(jobs, { batchSize: 500, concurrency:
|
|
6237
|
+
* await manager.pushMany(jobs, { batchSize: 500, concurrency: 5 });
|
|
4533
6238
|
* ```
|
|
4534
6239
|
*/
|
|
4535
6240
|
async pushMany(jobs, options = {}) {
|
|
@@ -4591,15 +6296,17 @@ var QueueManager = class {
|
|
|
4591
6296
|
}
|
|
4592
6297
|
}
|
|
4593
6298
|
/**
|
|
4594
|
-
*
|
|
6299
|
+
* Pops a single job from the queue.
|
|
6300
|
+
*
|
|
6301
|
+
* Retrieves the next available job from the specified queue.
|
|
4595
6302
|
*
|
|
4596
|
-
* @param queue -
|
|
4597
|
-
* @param connection -
|
|
4598
|
-
* @returns Job instance or null if queue is empty.
|
|
6303
|
+
* @param queue - The queue name (default: 'default').
|
|
6304
|
+
* @param connection - The connection name (defaults to default connection).
|
|
6305
|
+
* @returns A Job instance if found, or `null` if the queue is empty.
|
|
4599
6306
|
*
|
|
4600
6307
|
* @example
|
|
4601
6308
|
* ```typescript
|
|
4602
|
-
* const job = await manager.pop('
|
|
6309
|
+
* const job = await manager.pop('priority-queue');
|
|
4603
6310
|
* if (job) await job.handle();
|
|
4604
6311
|
* ```
|
|
4605
6312
|
*/
|
|
@@ -4619,12 +6326,20 @@ var QueueManager = class {
|
|
|
4619
6326
|
}
|
|
4620
6327
|
}
|
|
4621
6328
|
/**
|
|
4622
|
-
*
|
|
6329
|
+
* Pops multiple jobs from the queue efficiently.
|
|
6330
|
+
*
|
|
6331
|
+
* Attempts to retrieve a batch of jobs from the driver. If the driver does not support
|
|
6332
|
+
* batching, it falls back to sequential popping.
|
|
4623
6333
|
*
|
|
4624
|
-
* @param queue -
|
|
4625
|
-
* @param count -
|
|
4626
|
-
* @param connection -
|
|
4627
|
-
* @returns
|
|
6334
|
+
* @param queue - The queue name (default: 'default').
|
|
6335
|
+
* @param count - The maximum number of jobs to retrieve (default: 10).
|
|
6336
|
+
* @param connection - The connection name.
|
|
6337
|
+
* @returns An array of Job instances.
|
|
6338
|
+
*
|
|
6339
|
+
* @example
|
|
6340
|
+
* ```typescript
|
|
6341
|
+
* const jobs = await manager.popMany('default', 50);
|
|
6342
|
+
* ```
|
|
4628
6343
|
*/
|
|
4629
6344
|
async popMany(queue = "default", count = 10, connection = this.defaultConnection) {
|
|
4630
6345
|
const driver = this.getDriver(connection);
|
|
@@ -4655,22 +6370,37 @@ var QueueManager = class {
|
|
|
4655
6370
|
return results;
|
|
4656
6371
|
}
|
|
4657
6372
|
/**
|
|
4658
|
-
*
|
|
6373
|
+
* Retrieves the current size of a queue.
|
|
6374
|
+
*
|
|
6375
|
+
* @param queue - The queue name (default: 'default').
|
|
6376
|
+
* @param connection - The connection name.
|
|
6377
|
+
* @returns The number of waiting jobs.
|
|
4659
6378
|
*
|
|
4660
|
-
* @
|
|
4661
|
-
*
|
|
4662
|
-
*
|
|
6379
|
+
* @example
|
|
6380
|
+
* ```typescript
|
|
6381
|
+
* const count = await manager.size('emails');
|
|
6382
|
+
* ```
|
|
4663
6383
|
*/
|
|
4664
6384
|
async size(queue = "default", connection = this.defaultConnection) {
|
|
4665
6385
|
const driver = this.getDriver(connection);
|
|
4666
6386
|
return driver.size(queue);
|
|
4667
6387
|
}
|
|
4668
6388
|
/**
|
|
4669
|
-
*
|
|
6389
|
+
* Pops a job from the queue with blocking (wait) behavior.
|
|
4670
6390
|
*
|
|
4671
|
-
*
|
|
4672
|
-
*
|
|
4673
|
-
*
|
|
6391
|
+
* Waits for a job to become available for the specified timeout duration.
|
|
6392
|
+
* Useful for reducing polling loop frequency.
|
|
6393
|
+
*
|
|
6394
|
+
* @param queues - A queue name or array of queue names to listen to.
|
|
6395
|
+
* @param timeout - Timeout in seconds (0 = block indefinitely).
|
|
6396
|
+
* @param connection - The connection name.
|
|
6397
|
+
* @returns A Job instance if found, or `null` if timed out.
|
|
6398
|
+
*
|
|
6399
|
+
* @example
|
|
6400
|
+
* ```typescript
|
|
6401
|
+
* // Wait up to 30 seconds for a job
|
|
6402
|
+
* const job = await manager.popBlocking('default', 30);
|
|
6403
|
+
* ```
|
|
4674
6404
|
*/
|
|
4675
6405
|
async popBlocking(queues = "default", timeout = 0, connection = this.defaultConnection) {
|
|
4676
6406
|
const driver = this.getDriver(connection);
|
|
@@ -4695,21 +6425,34 @@ var QueueManager = class {
|
|
|
4695
6425
|
}
|
|
4696
6426
|
}
|
|
4697
6427
|
/**
|
|
4698
|
-
*
|
|
6428
|
+
* Removes all jobs from a specific queue.
|
|
6429
|
+
*
|
|
6430
|
+
* @param queue - The queue name to purge.
|
|
6431
|
+
* @param connection - The connection name.
|
|
4699
6432
|
*
|
|
4700
|
-
* @
|
|
4701
|
-
*
|
|
6433
|
+
* @example
|
|
6434
|
+
* ```typescript
|
|
6435
|
+
* await manager.clear('test-queue');
|
|
6436
|
+
* ```
|
|
4702
6437
|
*/
|
|
4703
6438
|
async clear(queue = "default", connection = this.defaultConnection) {
|
|
4704
6439
|
const driver = this.getDriver(connection);
|
|
4705
6440
|
await driver.clear(queue);
|
|
4706
6441
|
}
|
|
4707
6442
|
/**
|
|
4708
|
-
*
|
|
6443
|
+
* Retrieves comprehensive statistics for a queue.
|
|
6444
|
+
*
|
|
6445
|
+
* Includes counts for pending, processing, delayed, and failed jobs.
|
|
6446
|
+
*
|
|
6447
|
+
* @param queue - The queue name.
|
|
6448
|
+
* @param connection - The connection name.
|
|
6449
|
+
* @returns A QueueStats object.
|
|
4709
6450
|
*
|
|
4710
|
-
* @
|
|
4711
|
-
*
|
|
4712
|
-
*
|
|
6451
|
+
* @example
|
|
6452
|
+
* ```typescript
|
|
6453
|
+
* const stats = await manager.stats('default');
|
|
6454
|
+
* console.log(stats.size, stats.failed);
|
|
6455
|
+
* ```
|
|
4713
6456
|
*/
|
|
4714
6457
|
async stats(queue = "default", connection = this.defaultConnection) {
|
|
4715
6458
|
const driver = this.getDriver(connection);
|
|
@@ -4722,8 +6465,16 @@ var QueueManager = class {
|
|
|
4722
6465
|
};
|
|
4723
6466
|
}
|
|
4724
6467
|
/**
|
|
4725
|
-
*
|
|
4726
|
-
*
|
|
6468
|
+
* Marks a job as successfully completed.
|
|
6469
|
+
*
|
|
6470
|
+
* Removes the job from the processing state and optionally archives it.
|
|
6471
|
+
*
|
|
6472
|
+
* @param job - The job instance that finished.
|
|
6473
|
+
*
|
|
6474
|
+
* @example
|
|
6475
|
+
* ```typescript
|
|
6476
|
+
* await manager.complete(job);
|
|
6477
|
+
* ```
|
|
4727
6478
|
*/
|
|
4728
6479
|
async complete(job) {
|
|
4729
6480
|
const connection = job.connectionName ?? this.defaultConnection;
|
|
@@ -4742,9 +6493,18 @@ var QueueManager = class {
|
|
|
4742
6493
|
}
|
|
4743
6494
|
}
|
|
4744
6495
|
/**
|
|
4745
|
-
*
|
|
4746
|
-
*
|
|
4747
|
-
*
|
|
6496
|
+
* Marks a job as failed.
|
|
6497
|
+
*
|
|
6498
|
+
* Moves the job to the failed state (Dead Letter Queue) and optionally archives it.
|
|
6499
|
+
* This is typically called after max retry attempts are exhausted.
|
|
6500
|
+
*
|
|
6501
|
+
* @param job - The job instance that failed.
|
|
6502
|
+
* @param error - The error that caused the failure.
|
|
6503
|
+
*
|
|
6504
|
+
* @example
|
|
6505
|
+
* ```typescript
|
|
6506
|
+
* await manager.fail(job, new Error('Something went wrong'));
|
|
6507
|
+
* ```
|
|
4748
6508
|
*/
|
|
4749
6509
|
async fail(job, error) {
|
|
4750
6510
|
const connection = job.connectionName ?? this.defaultConnection;
|
|
@@ -4765,13 +6525,19 @@ var QueueManager = class {
|
|
|
4765
6525
|
}
|
|
4766
6526
|
}
|
|
4767
6527
|
/**
|
|
4768
|
-
*
|
|
6528
|
+
* Retrieves the configured persistence adapter.
|
|
6529
|
+
*
|
|
6530
|
+
* @returns The PersistenceAdapter instance, or undefined if not configured.
|
|
4769
6531
|
*/
|
|
4770
6532
|
getPersistence() {
|
|
4771
6533
|
return this.persistence?.adapter;
|
|
4772
6534
|
}
|
|
4773
6535
|
/**
|
|
4774
|
-
*
|
|
6536
|
+
* Gets the Scheduler instance associated with this manager.
|
|
6537
|
+
*
|
|
6538
|
+
* The Scheduler handles delayed jobs and periodic tasks.
|
|
6539
|
+
*
|
|
6540
|
+
* @returns The Scheduler instance.
|
|
4775
6541
|
*/
|
|
4776
6542
|
getScheduler() {
|
|
4777
6543
|
if (!this.scheduler) {
|
|
@@ -4781,7 +6547,18 @@ var QueueManager = class {
|
|
|
4781
6547
|
return this.scheduler;
|
|
4782
6548
|
}
|
|
4783
6549
|
/**
|
|
4784
|
-
*
|
|
6550
|
+
* Retrieves failed jobs from the Dead Letter Queue.
|
|
6551
|
+
*
|
|
6552
|
+
* @param queue - The queue name.
|
|
6553
|
+
* @param start - The starting index (pagination).
|
|
6554
|
+
* @param end - The ending index (pagination).
|
|
6555
|
+
* @param connection - The connection name.
|
|
6556
|
+
* @returns An array of serialized jobs.
|
|
6557
|
+
*
|
|
6558
|
+
* @example
|
|
6559
|
+
* ```typescript
|
|
6560
|
+
* const failedJobs = await manager.getFailed('default', 0, 10);
|
|
6561
|
+
* ```
|
|
4785
6562
|
*/
|
|
4786
6563
|
async getFailed(queue, start = 0, end = -1, connection = this.defaultConnection) {
|
|
4787
6564
|
const driver = this.getDriver(connection);
|
|
@@ -4791,7 +6568,19 @@ var QueueManager = class {
|
|
|
4791
6568
|
return [];
|
|
4792
6569
|
}
|
|
4793
6570
|
/**
|
|
4794
|
-
*
|
|
6571
|
+
* Retries failed jobs from the Dead Letter Queue.
|
|
6572
|
+
*
|
|
6573
|
+
* Moves jobs from the failed state back to the active queue for re-processing.
|
|
6574
|
+
*
|
|
6575
|
+
* @param queue - The queue name.
|
|
6576
|
+
* @param count - The number of jobs to retry.
|
|
6577
|
+
* @param connection - The connection name.
|
|
6578
|
+
* @returns The number of jobs successfully retried.
|
|
6579
|
+
*
|
|
6580
|
+
* @example
|
|
6581
|
+
* ```typescript
|
|
6582
|
+
* await manager.retryFailed('default', 5);
|
|
6583
|
+
* ```
|
|
4795
6584
|
*/
|
|
4796
6585
|
async retryFailed(queue, count = 1, connection = this.defaultConnection) {
|
|
4797
6586
|
const driver = this.getDriver(connection);
|
|
@@ -4801,7 +6590,15 @@ var QueueManager = class {
|
|
|
4801
6590
|
return 0;
|
|
4802
6591
|
}
|
|
4803
6592
|
/**
|
|
4804
|
-
*
|
|
6593
|
+
* Clears all failed jobs from the Dead Letter Queue.
|
|
6594
|
+
*
|
|
6595
|
+
* @param queue - The queue name.
|
|
6596
|
+
* @param connection - The connection name.
|
|
6597
|
+
*
|
|
6598
|
+
* @example
|
|
6599
|
+
* ```typescript
|
|
6600
|
+
* await manager.clearFailed('default');
|
|
6601
|
+
* ```
|
|
4805
6602
|
*/
|
|
4806
6603
|
async clearFailed(queue, connection = this.defaultConnection) {
|
|
4807
6604
|
const driver = this.getDriver(connection);
|
|
@@ -4809,6 +6606,219 @@ var QueueManager = class {
|
|
|
4809
6606
|
await driver.clearFailed(queue);
|
|
4810
6607
|
}
|
|
4811
6608
|
}
|
|
6609
|
+
/**
|
|
6610
|
+
* Retrieves high-level statistics across all registered connections and queues.
|
|
6611
|
+
*
|
|
6612
|
+
* Iterates through all drivers and collects metadata to provide a comprehensive
|
|
6613
|
+
* snapshot of the entire queue system's health.
|
|
6614
|
+
*
|
|
6615
|
+
* @returns A promise resolving to a GlobalStats object.
|
|
6616
|
+
*/
|
|
6617
|
+
async getGlobalStats() {
|
|
6618
|
+
const stats = {
|
|
6619
|
+
connections: {},
|
|
6620
|
+
totalSize: 0,
|
|
6621
|
+
totalFailed: 0,
|
|
6622
|
+
timestamp: Date.now()
|
|
6623
|
+
};
|
|
6624
|
+
for (const [name, driver] of this.drivers.entries()) {
|
|
6625
|
+
const queueNames = driver.getQueues ? await driver.getQueues() : ["default"];
|
|
6626
|
+
const connectionStats = [];
|
|
6627
|
+
for (const queue of queueNames) {
|
|
6628
|
+
const qStats = await this.stats(queue, name);
|
|
6629
|
+
connectionStats.push(qStats);
|
|
6630
|
+
stats.totalSize += qStats.size;
|
|
6631
|
+
stats.totalFailed += qStats.failed ?? 0;
|
|
6632
|
+
}
|
|
6633
|
+
stats.connections[name] = connectionStats;
|
|
6634
|
+
}
|
|
6635
|
+
return stats;
|
|
6636
|
+
}
|
|
6637
|
+
};
|
|
6638
|
+
|
|
6639
|
+
// src/SystemEventJob.ts
|
|
6640
|
+
import { app } from "@gravito/core";
|
|
6641
|
+
var SystemEventJob = class extends Job {
|
|
6642
|
+
constructor(hook, args, options = {}) {
|
|
6643
|
+
super();
|
|
6644
|
+
this.hook = hook;
|
|
6645
|
+
this.args = args;
|
|
6646
|
+
this.options = options;
|
|
6647
|
+
if (options.queue) {
|
|
6648
|
+
this.onQueue(options.queue);
|
|
6649
|
+
}
|
|
6650
|
+
if (options.priority) {
|
|
6651
|
+
this.withPriority(options.priority);
|
|
6652
|
+
}
|
|
6653
|
+
if (options.delay) {
|
|
6654
|
+
this.delay(options.delay);
|
|
6655
|
+
}
|
|
6656
|
+
if (options.retryAfter) {
|
|
6657
|
+
this.backoff(options.retryAfter, options.retryMultiplier);
|
|
6658
|
+
}
|
|
6659
|
+
if (options.connection) {
|
|
6660
|
+
this.onConnection(options.connection);
|
|
6661
|
+
}
|
|
6662
|
+
}
|
|
6663
|
+
/**
|
|
6664
|
+
* Optional failure callback for DLQ handling.
|
|
6665
|
+
*/
|
|
6666
|
+
onFailedCallback;
|
|
6667
|
+
/**
|
|
6668
|
+
* Set failure callback for DLQ handling.
|
|
6669
|
+
*
|
|
6670
|
+
* @param callback - Called when job fails permanently
|
|
6671
|
+
* @returns Self for chaining
|
|
6672
|
+
*/
|
|
6673
|
+
onFailed(callback) {
|
|
6674
|
+
this.onFailedCallback = callback;
|
|
6675
|
+
return this;
|
|
6676
|
+
}
|
|
6677
|
+
/**
|
|
6678
|
+
* Execute the hook listeners in the worker process.
|
|
6679
|
+
*/
|
|
6680
|
+
async handle() {
|
|
6681
|
+
const core = app();
|
|
6682
|
+
if (core?.hooks) {
|
|
6683
|
+
await core.hooks.doActionSync(this.hook, this.args);
|
|
6684
|
+
}
|
|
6685
|
+
}
|
|
6686
|
+
/**
|
|
6687
|
+
* Called when job fails permanently after all retries.
|
|
6688
|
+
*
|
|
6689
|
+
* This method is invoked by the worker when job exhausts all retry attempts.
|
|
6690
|
+
*/
|
|
6691
|
+
async failed(error, attempt = 1) {
|
|
6692
|
+
if (this.onFailedCallback) {
|
|
6693
|
+
try {
|
|
6694
|
+
await this.onFailedCallback(error, attempt);
|
|
6695
|
+
} catch (callbackError) {
|
|
6696
|
+
console.error("[SystemEventJob] Failed callback error:", callbackError);
|
|
6697
|
+
}
|
|
6698
|
+
}
|
|
6699
|
+
}
|
|
6700
|
+
};
|
|
6701
|
+
|
|
6702
|
+
// src/StreamEventBackend.ts
|
|
6703
|
+
var StreamEventBackend = class {
|
|
6704
|
+
constructor(queueManager, config) {
|
|
6705
|
+
this.queueManager = queueManager;
|
|
6706
|
+
this.config = {
|
|
6707
|
+
retryStrategy: "bull",
|
|
6708
|
+
circuitBreakerIntegration: false,
|
|
6709
|
+
...config
|
|
6710
|
+
};
|
|
6711
|
+
}
|
|
6712
|
+
config;
|
|
6713
|
+
/**
|
|
6714
|
+
* Build Job Push Options from EventOptions.
|
|
6715
|
+
*
|
|
6716
|
+
* Maps EventOptions to Bull Queue JobPushOptions with retry strategy applied.
|
|
6717
|
+
*/
|
|
6718
|
+
buildJobOptions(task) {
|
|
6719
|
+
const options = {};
|
|
6720
|
+
if (task.options?.priority) {
|
|
6721
|
+
options.priority = task.options.priority;
|
|
6722
|
+
}
|
|
6723
|
+
const taskOptionsAny = task.options;
|
|
6724
|
+
if (taskOptionsAny?.groupId) {
|
|
6725
|
+
options.groupId = taskOptionsAny.groupId;
|
|
6726
|
+
}
|
|
6727
|
+
return options;
|
|
6728
|
+
}
|
|
6729
|
+
/**
|
|
6730
|
+
* Enqueue an event task to the stream queue.
|
|
6731
|
+
*
|
|
6732
|
+
* Applies retry strategy and CircuitBreaker checks based on configuration.
|
|
6733
|
+
* Supports DLQ routing for failed events.
|
|
6734
|
+
*/
|
|
6735
|
+
async enqueue(task) {
|
|
6736
|
+
if (this.config.circuitBreakerIntegration && this.config.getCircuitBreaker) {
|
|
6737
|
+
const breaker = this.config.getCircuitBreaker(task.hook);
|
|
6738
|
+
if (breaker?.getState?.() === "OPEN") {
|
|
6739
|
+
throw new Error(`Circuit breaker OPEN for event: ${task.hook}`);
|
|
6740
|
+
}
|
|
6741
|
+
}
|
|
6742
|
+
const job = new SystemEventJob(task.hook, task.args, task.options);
|
|
6743
|
+
this.applyRetryStrategy(job, task);
|
|
6744
|
+
if (this.config.dlqHandler) {
|
|
6745
|
+
job.onFailed(async (error, attempt) => {
|
|
6746
|
+
await this.handleJobFailure(task, error, attempt);
|
|
6747
|
+
});
|
|
6748
|
+
}
|
|
6749
|
+
const options = this.buildJobOptions(task);
|
|
6750
|
+
await this.queueManager.push(job, options);
|
|
6751
|
+
}
|
|
6752
|
+
/**
|
|
6753
|
+
* Apply retry strategy to the job based on configuration.
|
|
6754
|
+
*/
|
|
6755
|
+
applyRetryStrategy(job, task) {
|
|
6756
|
+
const strategy = this.config.retryStrategy ?? "bull";
|
|
6757
|
+
const taskOptionsAny = task.options;
|
|
6758
|
+
if (strategy === "bull" || strategy === "hybrid") {
|
|
6759
|
+
job.maxAttempts = taskOptionsAny?.maxAttempts ?? 3;
|
|
6760
|
+
job.retryAfterSeconds = taskOptionsAny?.retryAfter ?? 5;
|
|
6761
|
+
job.retryMultiplier = taskOptionsAny?.retryMultiplier ?? 2;
|
|
6762
|
+
}
|
|
6763
|
+
}
|
|
6764
|
+
/**
|
|
6765
|
+
* Handle job failure and route to DLQ if configured.
|
|
6766
|
+
*
|
|
6767
|
+
* Called when a job exhausts all retry attempts.
|
|
6768
|
+
*/
|
|
6769
|
+
async handleJobFailure(task, error, attempt) {
|
|
6770
|
+
if (this.config.dlqHandler) {
|
|
6771
|
+
try {
|
|
6772
|
+
await this.config.dlqHandler.handle(task, error, attempt);
|
|
6773
|
+
} catch (dlqError) {
|
|
6774
|
+
console.error("[StreamEventBackend] Failed to handle DLQ:", dlqError);
|
|
6775
|
+
}
|
|
6776
|
+
}
|
|
6777
|
+
}
|
|
6778
|
+
/**
|
|
6779
|
+
* Record a job failure for CircuitBreaker state management.
|
|
6780
|
+
*
|
|
6781
|
+
* Called when a job fails, regardless of retry status.
|
|
6782
|
+
*/
|
|
6783
|
+
recordJobFailure(task, error) {
|
|
6784
|
+
if (this.config.circuitBreakerIntegration && this.config.getCircuitBreaker) {
|
|
6785
|
+
const breaker = this.config.getCircuitBreaker(task.hook);
|
|
6786
|
+
if (breaker?.recordFailure) {
|
|
6787
|
+
breaker.recordFailure(error);
|
|
6788
|
+
}
|
|
6789
|
+
}
|
|
6790
|
+
}
|
|
6791
|
+
/**
|
|
6792
|
+
* Record a job success for CircuitBreaker state management.
|
|
6793
|
+
*
|
|
6794
|
+
* Called when a job completes successfully.
|
|
6795
|
+
*/
|
|
6796
|
+
recordJobSuccess(task) {
|
|
6797
|
+
if (this.config.circuitBreakerIntegration && this.config.getCircuitBreaker) {
|
|
6798
|
+
const breaker = this.config.getCircuitBreaker(task.hook);
|
|
6799
|
+
if (breaker?.recordSuccess) {
|
|
6800
|
+
breaker.recordSuccess();
|
|
6801
|
+
}
|
|
6802
|
+
}
|
|
6803
|
+
}
|
|
6804
|
+
/**
|
|
6805
|
+
* Get the retry strategy configuration.
|
|
6806
|
+
*/
|
|
6807
|
+
getRetryStrategy() {
|
|
6808
|
+
return this.config.retryStrategy ?? "bull";
|
|
6809
|
+
}
|
|
6810
|
+
/**
|
|
6811
|
+
* Check if CircuitBreaker integration is enabled.
|
|
6812
|
+
*/
|
|
6813
|
+
isCircuitBreakerEnabled() {
|
|
6814
|
+
return this.config.circuitBreakerIntegration ?? false;
|
|
6815
|
+
}
|
|
6816
|
+
/**
|
|
6817
|
+
* Get the DLQ handler, if configured.
|
|
6818
|
+
*/
|
|
6819
|
+
getDLQHandler() {
|
|
6820
|
+
return this.config.dlqHandler;
|
|
6821
|
+
}
|
|
4812
6822
|
};
|
|
4813
6823
|
|
|
4814
6824
|
// src/OrbitStream.ts
|
|
@@ -4818,16 +6828,37 @@ var OrbitStream = class _OrbitStream {
|
|
|
4818
6828
|
}
|
|
4819
6829
|
queueManager;
|
|
4820
6830
|
consumer;
|
|
6831
|
+
core;
|
|
4821
6832
|
/**
|
|
4822
|
-
*
|
|
6833
|
+
* Factory method for creating and configuring an OrbitStream instance.
|
|
6834
|
+
*
|
|
6835
|
+
* Provides a fluent way to instantiate the orbit during application bootstrap.
|
|
6836
|
+
*
|
|
6837
|
+
* @param options - Configuration options.
|
|
6838
|
+
* @returns A new OrbitStream instance.
|
|
6839
|
+
*
|
|
6840
|
+
* @example
|
|
6841
|
+
* ```typescript
|
|
6842
|
+
* const orbit = OrbitStream.configure({ default: 'memory' });
|
|
6843
|
+
* ```
|
|
4823
6844
|
*/
|
|
4824
6845
|
static configure(options) {
|
|
4825
6846
|
return new _OrbitStream(options);
|
|
4826
6847
|
}
|
|
4827
6848
|
/**
|
|
4828
|
-
*
|
|
6849
|
+
* Installs the Queue system into the Gravito PlanetCore.
|
|
6850
|
+
*
|
|
6851
|
+
* This lifecycle method:
|
|
6852
|
+
* 1. Initializes the `QueueManager`.
|
|
6853
|
+
* 2. Registers the `queue` service in the dependency injection container.
|
|
6854
|
+
* 3. Sets up a global middleware to inject `QueueManager` into the request context (`c.get('queue')`).
|
|
6855
|
+
* 4. Automatically detects and registers database connections if available in the context.
|
|
6856
|
+
* 5. Starts the embedded worker if configured.
|
|
6857
|
+
*
|
|
6858
|
+
* @param core - The PlanetCore instance.
|
|
4829
6859
|
*/
|
|
4830
6860
|
install(core) {
|
|
6861
|
+
this.core = core;
|
|
4831
6862
|
this.queueManager = new QueueManager(this.options);
|
|
4832
6863
|
core.container.instance("queue", this.queueManager);
|
|
4833
6864
|
core.adapter.use("*", async (c, next) => {
|
|
@@ -4855,12 +6886,35 @@ var OrbitStream = class _OrbitStream {
|
|
|
4855
6886
|
return await next();
|
|
4856
6887
|
});
|
|
4857
6888
|
core.logger.info("[OrbitStream] Installed");
|
|
6889
|
+
if (this.queueManager) {
|
|
6890
|
+
const backend = new StreamEventBackend(this.queueManager);
|
|
6891
|
+
core.hooks.setBackend(backend);
|
|
6892
|
+
core.logger.info("[OrbitStream] HookManager backend switched to StreamEventBackend");
|
|
6893
|
+
}
|
|
6894
|
+
if (this.options.dashboard) {
|
|
6895
|
+
const { DashboardProvider: DashboardProvider2 } = (init_DashboardProvider(), __toCommonJS(DashboardProvider_exports));
|
|
6896
|
+
const dashboard = new DashboardProvider2(this.queueManager);
|
|
6897
|
+
const path2 = typeof this.options.dashboard === "object" ? this.options.dashboard.path : "/_flux";
|
|
6898
|
+
dashboard.registerRoutes(core, path2);
|
|
6899
|
+
core.logger.info(`[OrbitStream] Dashboard API registered at ${path2}`);
|
|
6900
|
+
}
|
|
4858
6901
|
if (this.options.autoStartWorker && process.env.NODE_ENV === "development" && this.options.workerOptions) {
|
|
4859
6902
|
this.startWorker(this.options.workerOptions);
|
|
4860
6903
|
}
|
|
4861
6904
|
}
|
|
4862
6905
|
/**
|
|
4863
|
-
*
|
|
6906
|
+
* Starts the embedded worker process.
|
|
6907
|
+
*
|
|
6908
|
+
* Launches a `Consumer` instance to process jobs in the background.
|
|
6909
|
+
* Throws an error if `QueueManager` is not initialized or if a worker is already running.
|
|
6910
|
+
*
|
|
6911
|
+
* @param options - Consumer configuration options.
|
|
6912
|
+
* @throws {Error} If QueueManager is missing or worker is already active.
|
|
6913
|
+
*
|
|
6914
|
+
* @example
|
|
6915
|
+
* ```typescript
|
|
6916
|
+
* orbit.startWorker({ queues: ['default'] });
|
|
6917
|
+
* ```
|
|
4864
6918
|
*/
|
|
4865
6919
|
startWorker(options) {
|
|
4866
6920
|
if (!this.queueManager) {
|
|
@@ -4869,13 +6923,31 @@ var OrbitStream = class _OrbitStream {
|
|
|
4869
6923
|
if (this.consumer?.isRunning()) {
|
|
4870
6924
|
throw new Error("Worker is already running");
|
|
4871
6925
|
}
|
|
4872
|
-
|
|
6926
|
+
const consumerOptions = {
|
|
6927
|
+
...options,
|
|
6928
|
+
onEvent: (event, payload) => {
|
|
6929
|
+
const signal = this.core?.container.make("signal");
|
|
6930
|
+
if (signal && typeof signal.emit === "function") {
|
|
6931
|
+
signal.emit(`stream:${event}`, payload);
|
|
6932
|
+
}
|
|
6933
|
+
}
|
|
6934
|
+
};
|
|
6935
|
+
this.consumer = new Consumer(this.queueManager, consumerOptions);
|
|
4873
6936
|
this.consumer.start().catch((error) => {
|
|
4874
6937
|
console.error("[OrbitStream] Worker error:", error);
|
|
4875
6938
|
});
|
|
4876
6939
|
}
|
|
4877
6940
|
/**
|
|
4878
|
-
*
|
|
6941
|
+
* Stops the embedded worker process.
|
|
6942
|
+
*
|
|
6943
|
+
* Gracefully shuts down the consumer, waiting for active jobs to complete.
|
|
6944
|
+
*
|
|
6945
|
+
* @returns A promise that resolves when the worker has stopped.
|
|
6946
|
+
*
|
|
6947
|
+
* @example
|
|
6948
|
+
* ```typescript
|
|
6949
|
+
* await orbit.stopWorker();
|
|
6950
|
+
* ```
|
|
4879
6951
|
*/
|
|
4880
6952
|
async stopWorker() {
|
|
4881
6953
|
if (this.consumer) {
|
|
@@ -4883,7 +6955,14 @@ var OrbitStream = class _OrbitStream {
|
|
|
4883
6955
|
}
|
|
4884
6956
|
}
|
|
4885
6957
|
/**
|
|
4886
|
-
*
|
|
6958
|
+
* Retrieves the underlying QueueManager instance.
|
|
6959
|
+
*
|
|
6960
|
+
* @returns The active QueueManager, or undefined if not installed.
|
|
6961
|
+
*
|
|
6962
|
+
* @example
|
|
6963
|
+
* ```typescript
|
|
6964
|
+
* const manager = orbit.getQueueManager();
|
|
6965
|
+
* ```
|
|
4887
6966
|
*/
|
|
4888
6967
|
getQueueManager() {
|
|
4889
6968
|
return this.queueManager;
|
|
@@ -4907,9 +6986,15 @@ var MySQLPersistence = class {
|
|
|
4907
6986
|
this.table = table;
|
|
4908
6987
|
this.logsTable = logsTable;
|
|
4909
6988
|
}
|
|
6989
|
+
/**
|
|
6990
|
+
* Archives a single job.
|
|
6991
|
+
*/
|
|
4910
6992
|
async archive(queue, job, status) {
|
|
4911
6993
|
await this.archiveMany([{ queue, job, status }]);
|
|
4912
6994
|
}
|
|
6995
|
+
/**
|
|
6996
|
+
* Archives multiple jobs in a batch.
|
|
6997
|
+
*/
|
|
4913
6998
|
async archiveMany(jobs) {
|
|
4914
6999
|
if (jobs.length === 0) {
|
|
4915
7000
|
return;
|
|
@@ -4933,8 +7018,14 @@ var MySQLPersistence = class {
|
|
|
4933
7018
|
}
|
|
4934
7019
|
}
|
|
4935
7020
|
}
|
|
7021
|
+
/**
|
|
7022
|
+
* No-op. Use BufferedPersistence if flushing is needed.
|
|
7023
|
+
*/
|
|
4936
7024
|
async flush() {
|
|
4937
7025
|
}
|
|
7026
|
+
/**
|
|
7027
|
+
* Finds an archived job by ID.
|
|
7028
|
+
*/
|
|
4938
7029
|
async find(queue, id) {
|
|
4939
7030
|
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
4940
7031
|
if (!row) {
|
|
@@ -4978,6 +7069,9 @@ var MySQLPersistence = class {
|
|
|
4978
7069
|
}
|
|
4979
7070
|
/**
|
|
4980
7071
|
* Search jobs from the archive.
|
|
7072
|
+
*
|
|
7073
|
+
* @param query - Search string (matches ID, payload, or error).
|
|
7074
|
+
* @param options - Filter options.
|
|
4981
7075
|
*/
|
|
4982
7076
|
async search(query, options = {}) {
|
|
4983
7077
|
let q = this.db.table(this.table);
|
|
@@ -4999,13 +7093,13 @@ var MySQLPersistence = class {
|
|
|
4999
7093
|
);
|
|
5000
7094
|
}
|
|
5001
7095
|
/**
|
|
5002
|
-
* Archive a system log message
|
|
7096
|
+
* Archive a system log message.
|
|
5003
7097
|
*/
|
|
5004
7098
|
async archiveLog(log) {
|
|
5005
7099
|
await this.archiveLogMany([log]);
|
|
5006
7100
|
}
|
|
5007
7101
|
/**
|
|
5008
|
-
* Archive multiple log messages
|
|
7102
|
+
* Archive multiple log messages.
|
|
5009
7103
|
*/
|
|
5010
7104
|
async archiveLogMany(logs) {
|
|
5011
7105
|
if (logs.length === 0) {
|
|
@@ -5109,7 +7203,7 @@ var MySQLPersistence = class {
|
|
|
5109
7203
|
return Number(result) || 0;
|
|
5110
7204
|
}
|
|
5111
7205
|
/**
|
|
5112
|
-
*
|
|
7206
|
+
* Helper to create necessary tables if they don't exist.
|
|
5113
7207
|
*/
|
|
5114
7208
|
async setupTable() {
|
|
5115
7209
|
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
@@ -5170,9 +7264,17 @@ var SQLitePersistence = class {
|
|
|
5170
7264
|
this.table = table;
|
|
5171
7265
|
this.logsTable = logsTable;
|
|
5172
7266
|
}
|
|
7267
|
+
/**
|
|
7268
|
+
* Archives a single job.
|
|
7269
|
+
*/
|
|
5173
7270
|
async archive(queue, job, status) {
|
|
5174
7271
|
await this.archiveMany([{ queue, job, status }]);
|
|
5175
7272
|
}
|
|
7273
|
+
/**
|
|
7274
|
+
* Archives multiple jobs in a batch.
|
|
7275
|
+
*
|
|
7276
|
+
* Optimized for SQLite by wrapping chunks in transactions.
|
|
7277
|
+
*/
|
|
5176
7278
|
async archiveMany(jobs) {
|
|
5177
7279
|
if (jobs.length === 0) {
|
|
5178
7280
|
return;
|
|
@@ -5203,8 +7305,14 @@ var SQLitePersistence = class {
|
|
|
5203
7305
|
}
|
|
5204
7306
|
}
|
|
5205
7307
|
}
|
|
7308
|
+
/**
|
|
7309
|
+
* No-op. Use BufferedPersistence if flushing is needed.
|
|
7310
|
+
*/
|
|
5206
7311
|
async flush() {
|
|
5207
7312
|
}
|
|
7313
|
+
/**
|
|
7314
|
+
* Finds an archived job by ID.
|
|
7315
|
+
*/
|
|
5208
7316
|
async find(queue, id) {
|
|
5209
7317
|
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
5210
7318
|
if (!row) {
|
|
@@ -5223,7 +7331,11 @@ var SQLitePersistence = class {
|
|
|
5223
7331
|
async list(queue, options = {}) {
|
|
5224
7332
|
let query = this.db.table(this.table).where("queue", queue);
|
|
5225
7333
|
if (options.status) {
|
|
5226
|
-
|
|
7334
|
+
if (Array.isArray(options.status)) {
|
|
7335
|
+
query = query.whereIn("status", options.status);
|
|
7336
|
+
} else {
|
|
7337
|
+
query = query.where("status", options.status);
|
|
7338
|
+
}
|
|
5227
7339
|
}
|
|
5228
7340
|
if (options.jobId) {
|
|
5229
7341
|
query = query.where("job_id", options.jobId);
|
|
@@ -5364,7 +7476,11 @@ var SQLitePersistence = class {
|
|
|
5364
7476
|
async count(queue, options = {}) {
|
|
5365
7477
|
let query = this.db.table(this.table).where("queue", queue);
|
|
5366
7478
|
if (options.status) {
|
|
5367
|
-
|
|
7479
|
+
if (Array.isArray(options.status)) {
|
|
7480
|
+
query = query.whereIn("status", options.status);
|
|
7481
|
+
} else {
|
|
7482
|
+
query = query.where("status", options.status);
|
|
7483
|
+
}
|
|
5368
7484
|
}
|
|
5369
7485
|
if (options.jobId) {
|
|
5370
7486
|
query = query.where("job_id", options.jobId);
|
|
@@ -5426,11 +7542,244 @@ var SQLitePersistence = class {
|
|
|
5426
7542
|
|
|
5427
7543
|
// src/index.ts
|
|
5428
7544
|
init_Scheduler();
|
|
7545
|
+
|
|
7546
|
+
// src/workers/WorkerPool.ts
|
|
7547
|
+
var WorkerPool = class {
|
|
7548
|
+
workers = [];
|
|
7549
|
+
config;
|
|
7550
|
+
queue = [];
|
|
7551
|
+
healthCheckTimer = null;
|
|
7552
|
+
stats = {
|
|
7553
|
+
completed: 0,
|
|
7554
|
+
failed: 0
|
|
7555
|
+
};
|
|
7556
|
+
/**
|
|
7557
|
+
* Creates a WorkerPool instance.
|
|
7558
|
+
*
|
|
7559
|
+
* @param config - Configuration options for the pool.
|
|
7560
|
+
*/
|
|
7561
|
+
constructor(config = {}) {
|
|
7562
|
+
this.config = {
|
|
7563
|
+
poolSize: config.poolSize ?? 4,
|
|
7564
|
+
minWorkers: config.minWorkers ?? 0,
|
|
7565
|
+
healthCheckInterval: config.healthCheckInterval ?? 3e4,
|
|
7566
|
+
maxExecutionTime: config.maxExecutionTime ?? 3e4,
|
|
7567
|
+
maxMemory: config.maxMemory ?? 0,
|
|
7568
|
+
isolateContexts: config.isolateContexts ?? false,
|
|
7569
|
+
idleTimeout: config.idleTimeout ?? 6e4
|
|
7570
|
+
};
|
|
7571
|
+
this.warmUp();
|
|
7572
|
+
this.startHealthCheck();
|
|
7573
|
+
}
|
|
7574
|
+
/**
|
|
7575
|
+
* Pre-warms the pool by creating the minimum number of workers.
|
|
7576
|
+
*/
|
|
7577
|
+
warmUp() {
|
|
7578
|
+
const targetCount = Math.min(this.config.minWorkers, this.config.poolSize);
|
|
7579
|
+
for (let i = 0; i < targetCount; i++) {
|
|
7580
|
+
this.createWorker();
|
|
7581
|
+
}
|
|
7582
|
+
}
|
|
7583
|
+
/**
|
|
7584
|
+
* Creates a new SandboxedWorker and adds it to the pool.
|
|
7585
|
+
*
|
|
7586
|
+
* @returns The newly created worker.
|
|
7587
|
+
*/
|
|
7588
|
+
createWorker() {
|
|
7589
|
+
const worker = new SandboxedWorker({
|
|
7590
|
+
maxExecutionTime: this.config.maxExecutionTime,
|
|
7591
|
+
maxMemory: this.config.maxMemory,
|
|
7592
|
+
isolateContexts: this.config.isolateContexts,
|
|
7593
|
+
idleTimeout: this.config.idleTimeout
|
|
7594
|
+
});
|
|
7595
|
+
this.workers.push(worker);
|
|
7596
|
+
return worker;
|
|
7597
|
+
}
|
|
7598
|
+
/**
|
|
7599
|
+
* Retrieves an available worker from the pool.
|
|
7600
|
+
*
|
|
7601
|
+
* Priorities:
|
|
7602
|
+
* 1. Reuse an existing ready worker.
|
|
7603
|
+
* 2. Create a new worker if the pool is not full.
|
|
7604
|
+
* 3. Return `null` if the pool is saturated.
|
|
7605
|
+
*
|
|
7606
|
+
* @returns An available worker or `null`.
|
|
7607
|
+
*/
|
|
7608
|
+
getAvailableWorker() {
|
|
7609
|
+
const readyWorker = this.workers.find((w) => w.isReady());
|
|
7610
|
+
if (readyWorker) {
|
|
7611
|
+
return readyWorker;
|
|
7612
|
+
}
|
|
7613
|
+
if (this.workers.length < this.config.poolSize) {
|
|
7614
|
+
return this.createWorker();
|
|
7615
|
+
}
|
|
7616
|
+
return null;
|
|
7617
|
+
}
|
|
7618
|
+
/**
|
|
7619
|
+
* Executes a job using the worker pool.
|
|
7620
|
+
*
|
|
7621
|
+
* If a worker is available, the job starts immediately.
|
|
7622
|
+
* Otherwise, it is added to the pending queue.
|
|
7623
|
+
*
|
|
7624
|
+
* @param job - The serialized job data.
|
|
7625
|
+
* @throws {Error} If execution fails.
|
|
7626
|
+
*/
|
|
7627
|
+
async execute(job) {
|
|
7628
|
+
const worker = this.getAvailableWorker();
|
|
7629
|
+
if (worker) {
|
|
7630
|
+
try {
|
|
7631
|
+
await worker.execute(job);
|
|
7632
|
+
this.stats.completed++;
|
|
7633
|
+
} catch (error) {
|
|
7634
|
+
this.stats.failed++;
|
|
7635
|
+
throw error;
|
|
7636
|
+
} finally {
|
|
7637
|
+
this.processQueue();
|
|
7638
|
+
}
|
|
7639
|
+
} else {
|
|
7640
|
+
return new Promise((resolve2, reject) => {
|
|
7641
|
+
this.queue.push({ job, resolve: resolve2, reject });
|
|
7642
|
+
});
|
|
7643
|
+
}
|
|
7644
|
+
}
|
|
7645
|
+
/**
|
|
7646
|
+
* Processes the next job in the queue if a worker is available.
|
|
7647
|
+
*/
|
|
7648
|
+
processQueue() {
|
|
7649
|
+
if (this.queue.length === 0) {
|
|
7650
|
+
return;
|
|
7651
|
+
}
|
|
7652
|
+
const worker = this.getAvailableWorker();
|
|
7653
|
+
if (!worker) {
|
|
7654
|
+
return;
|
|
7655
|
+
}
|
|
7656
|
+
const item = this.queue.shift();
|
|
7657
|
+
if (!item) {
|
|
7658
|
+
return;
|
|
7659
|
+
}
|
|
7660
|
+
worker.execute(item.job).then(() => {
|
|
7661
|
+
this.stats.completed++;
|
|
7662
|
+
item.resolve();
|
|
7663
|
+
}).catch((error) => {
|
|
7664
|
+
this.stats.failed++;
|
|
7665
|
+
item.reject(error);
|
|
7666
|
+
}).finally(() => {
|
|
7667
|
+
this.processQueue();
|
|
7668
|
+
});
|
|
7669
|
+
}
|
|
7670
|
+
/**
|
|
7671
|
+
* Starts the periodic health check.
|
|
7672
|
+
*/
|
|
7673
|
+
startHealthCheck() {
|
|
7674
|
+
if (this.healthCheckTimer) {
|
|
7675
|
+
return;
|
|
7676
|
+
}
|
|
7677
|
+
this.healthCheckTimer = setInterval(() => {
|
|
7678
|
+
this.performHealthCheck();
|
|
7679
|
+
}, this.config.healthCheckInterval);
|
|
7680
|
+
}
|
|
7681
|
+
/**
|
|
7682
|
+
* Performs a health check on the pool.
|
|
7683
|
+
*
|
|
7684
|
+
* Removes terminated workers and ensures `minWorkers` are available.
|
|
7685
|
+
*/
|
|
7686
|
+
performHealthCheck() {
|
|
7687
|
+
this.workers = this.workers.filter((worker) => {
|
|
7688
|
+
if (worker.getState() === "terminated") {
|
|
7689
|
+
worker.terminate().catch(console.error);
|
|
7690
|
+
return false;
|
|
7691
|
+
}
|
|
7692
|
+
return true;
|
|
7693
|
+
});
|
|
7694
|
+
const activeWorkers = this.workers.length;
|
|
7695
|
+
if (activeWorkers < this.config.minWorkers) {
|
|
7696
|
+
const needed = this.config.minWorkers - activeWorkers;
|
|
7697
|
+
for (let i = 0; i < needed; i++) {
|
|
7698
|
+
this.createWorker();
|
|
7699
|
+
}
|
|
7700
|
+
}
|
|
7701
|
+
}
|
|
7702
|
+
/**
|
|
7703
|
+
* Gets the current statistics of the worker pool.
|
|
7704
|
+
*
|
|
7705
|
+
* @returns Snapshot of pool statistics.
|
|
7706
|
+
*/
|
|
7707
|
+
getStats() {
|
|
7708
|
+
let ready = 0;
|
|
7709
|
+
let busy = 0;
|
|
7710
|
+
let terminated = 0;
|
|
7711
|
+
for (const worker of this.workers) {
|
|
7712
|
+
const state = worker.getState();
|
|
7713
|
+
if (state === "ready") {
|
|
7714
|
+
ready++;
|
|
7715
|
+
} else if (state === "busy") {
|
|
7716
|
+
busy++;
|
|
7717
|
+
} else if (state === "terminated") {
|
|
7718
|
+
terminated++;
|
|
7719
|
+
}
|
|
7720
|
+
}
|
|
7721
|
+
return {
|
|
7722
|
+
total: this.workers.length,
|
|
7723
|
+
ready,
|
|
7724
|
+
busy,
|
|
7725
|
+
terminated,
|
|
7726
|
+
pending: this.queue.length,
|
|
7727
|
+
completed: this.stats.completed,
|
|
7728
|
+
failed: this.stats.failed
|
|
7729
|
+
};
|
|
7730
|
+
}
|
|
7731
|
+
/**
|
|
7732
|
+
* Shuts down the worker pool.
|
|
7733
|
+
*
|
|
7734
|
+
* Terminates all workers and rejects any pending jobs.
|
|
7735
|
+
*/
|
|
7736
|
+
async shutdown() {
|
|
7737
|
+
if (this.healthCheckTimer) {
|
|
7738
|
+
clearInterval(this.healthCheckTimer);
|
|
7739
|
+
this.healthCheckTimer = null;
|
|
7740
|
+
}
|
|
7741
|
+
for (const item of this.queue) {
|
|
7742
|
+
item.reject(new Error("Worker pool is shutting down"));
|
|
7743
|
+
}
|
|
7744
|
+
this.queue = [];
|
|
7745
|
+
await Promise.all(this.workers.map((worker) => worker.terminate().catch(console.error)));
|
|
7746
|
+
this.workers = [];
|
|
7747
|
+
}
|
|
7748
|
+
/**
|
|
7749
|
+
* Waits for all active and pending jobs to complete.
|
|
7750
|
+
*
|
|
7751
|
+
* @param timeout - Maximum wait time in milliseconds. 0 for infinite.
|
|
7752
|
+
* @throws {Error} If the timeout is reached.
|
|
7753
|
+
*/
|
|
7754
|
+
async waitForCompletion(timeout = 0) {
|
|
7755
|
+
const startTime = Date.now();
|
|
7756
|
+
return new Promise((resolve2, reject) => {
|
|
7757
|
+
const checkCompletion = () => {
|
|
7758
|
+
const stats = this.getStats();
|
|
7759
|
+
const isComplete = stats.busy === 0 && stats.pending === 0;
|
|
7760
|
+
if (isComplete) {
|
|
7761
|
+
resolve2();
|
|
7762
|
+
return;
|
|
7763
|
+
}
|
|
7764
|
+
if (timeout > 0 && Date.now() - startTime > timeout) {
|
|
7765
|
+
reject(new Error("Wait for completion timeout"));
|
|
7766
|
+
return;
|
|
7767
|
+
}
|
|
7768
|
+
setTimeout(checkCompletion, 100);
|
|
7769
|
+
};
|
|
7770
|
+
checkCompletion();
|
|
7771
|
+
});
|
|
7772
|
+
}
|
|
7773
|
+
};
|
|
5429
7774
|
export {
|
|
7775
|
+
BatchConsumer,
|
|
5430
7776
|
BufferedPersistence,
|
|
7777
|
+
BullMQDriver,
|
|
5431
7778
|
ClassNameSerializer,
|
|
5432
7779
|
Consumer,
|
|
5433
7780
|
DatabaseDriver,
|
|
7781
|
+
DistributedLock,
|
|
7782
|
+
GrpcDriver,
|
|
5434
7783
|
Job,
|
|
5435
7784
|
JsonSerializer,
|
|
5436
7785
|
KafkaDriver,
|
|
@@ -5442,6 +7791,10 @@ export {
|
|
|
5442
7791
|
RedisDriver,
|
|
5443
7792
|
SQLitePersistence,
|
|
5444
7793
|
SQSDriver,
|
|
7794
|
+
SandboxedWorker,
|
|
5445
7795
|
Scheduler,
|
|
5446
|
-
|
|
7796
|
+
StreamEventBackend,
|
|
7797
|
+
SystemEventJob,
|
|
7798
|
+
Worker,
|
|
7799
|
+
WorkerPool
|
|
5447
7800
|
};
|