@gravito/stream 1.0.0-beta.1 → 1.0.0-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -2
- package/dist/index.cjs +1357 -73
- package/dist/index.d.cts +689 -7
- package/dist/index.d.ts +689 -7
- package/dist/index.js +1359 -73
- package/package.json +9 -5
package/dist/index.js
CHANGED
|
@@ -2,6 +2,12 @@ var __defProp = Object.defineProperty;
|
|
|
2
2
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
3
3
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
4
4
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
5
|
+
var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
|
|
6
|
+
get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
|
|
7
|
+
}) : x)(function(x) {
|
|
8
|
+
if (typeof require !== "undefined") return require.apply(this, arguments);
|
|
9
|
+
throw Error('Dynamic require of "' + x + '" is not supported');
|
|
10
|
+
});
|
|
5
11
|
var __esm = (fn, res) => function __init() {
|
|
6
12
|
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
7
13
|
};
|
|
@@ -45,10 +51,11 @@ var init_DatabaseDriver = __esm({
|
|
|
45
51
|
*/
|
|
46
52
|
async push(queue, job) {
|
|
47
53
|
const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
|
|
54
|
+
const payload = JSON.stringify(job);
|
|
48
55
|
await this.dbService.execute(
|
|
49
56
|
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
50
57
|
VALUES ($1, $2, $3, $4, $5)`,
|
|
51
|
-
[queue,
|
|
58
|
+
[queue, payload, job.attempts ?? 0, availableAt.toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
52
59
|
);
|
|
53
60
|
}
|
|
54
61
|
/**
|
|
@@ -91,15 +98,32 @@ var init_DatabaseDriver = __esm({
|
|
|
91
98
|
);
|
|
92
99
|
const createdAt = new Date(row.created_at).getTime();
|
|
93
100
|
const delaySeconds = row.available_at ? Math.max(0, Math.floor((new Date(row.available_at).getTime() - createdAt) / 1e3)) : void 0;
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
101
|
+
let job;
|
|
102
|
+
try {
|
|
103
|
+
const parsed = JSON.parse(row.payload);
|
|
104
|
+
if (parsed && typeof parsed === "object" && parsed.type && parsed.data) {
|
|
105
|
+
job = {
|
|
106
|
+
...parsed,
|
|
107
|
+
id: row.id,
|
|
108
|
+
// DB ID is the source of truth for deletion
|
|
109
|
+
attempts: row.attempts
|
|
110
|
+
};
|
|
111
|
+
} else {
|
|
112
|
+
throw new Error("Fallback");
|
|
113
|
+
}
|
|
114
|
+
} catch (_e) {
|
|
115
|
+
job = {
|
|
116
|
+
id: row.id,
|
|
117
|
+
type: "class",
|
|
118
|
+
data: row.payload,
|
|
119
|
+
createdAt,
|
|
120
|
+
attempts: row.attempts
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
if (delaySeconds !== void 0) {
|
|
124
|
+
job.delaySeconds = delaySeconds;
|
|
125
|
+
}
|
|
126
|
+
return job;
|
|
103
127
|
}
|
|
104
128
|
/**
|
|
105
129
|
* Get queue size.
|
|
@@ -139,6 +163,27 @@ var init_DatabaseDriver = __esm({
|
|
|
139
163
|
}
|
|
140
164
|
});
|
|
141
165
|
}
|
|
166
|
+
/**
|
|
167
|
+
* Mark a job as failed (DLQ).
|
|
168
|
+
*/
|
|
169
|
+
async fail(queue, job) {
|
|
170
|
+
const failedQueue = `failed:${queue}`;
|
|
171
|
+
const payload = JSON.stringify(job);
|
|
172
|
+
await this.dbService.execute(
|
|
173
|
+
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
174
|
+
VALUES ($1, $2, $3, $4, $5)`,
|
|
175
|
+
[failedQueue, payload, job.attempts, (/* @__PURE__ */ new Date()).toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
176
|
+
);
|
|
177
|
+
}
|
|
178
|
+
/**
|
|
179
|
+
* Acknowledge/Complete a job.
|
|
180
|
+
*/
|
|
181
|
+
async complete(_queue, job) {
|
|
182
|
+
if (!job.id) {
|
|
183
|
+
return;
|
|
184
|
+
}
|
|
185
|
+
await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE id = $1`, [job.id]);
|
|
186
|
+
}
|
|
142
187
|
};
|
|
143
188
|
}
|
|
144
189
|
});
|
|
@@ -317,6 +362,150 @@ var init_KafkaDriver = __esm({
|
|
|
317
362
|
}
|
|
318
363
|
});
|
|
319
364
|
|
|
365
|
+
// src/drivers/RabbitMQDriver.ts
|
|
366
|
+
var RabbitMQDriver_exports = {};
|
|
367
|
+
__export(RabbitMQDriver_exports, {
|
|
368
|
+
RabbitMQDriver: () => RabbitMQDriver
|
|
369
|
+
});
|
|
370
|
+
var RabbitMQDriver;
|
|
371
|
+
var init_RabbitMQDriver = __esm({
|
|
372
|
+
"src/drivers/RabbitMQDriver.ts"() {
|
|
373
|
+
"use strict";
|
|
374
|
+
RabbitMQDriver = class {
|
|
375
|
+
connection;
|
|
376
|
+
channel;
|
|
377
|
+
exchange;
|
|
378
|
+
exchangeType;
|
|
379
|
+
constructor(config) {
|
|
380
|
+
this.connection = config.client;
|
|
381
|
+
this.exchange = config.exchange;
|
|
382
|
+
this.exchangeType = config.exchangeType ?? "fanout";
|
|
383
|
+
if (!this.connection) {
|
|
384
|
+
throw new Error(
|
|
385
|
+
"[RabbitMQDriver] RabbitMQ connection is required. Please provide a connection from amqplib."
|
|
386
|
+
);
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
/**
|
|
390
|
+
* Ensure channel is created.
|
|
391
|
+
*/
|
|
392
|
+
async ensureChannel() {
|
|
393
|
+
if (this.channel) {
|
|
394
|
+
return this.channel;
|
|
395
|
+
}
|
|
396
|
+
if (typeof this.connection.createChannel === "function") {
|
|
397
|
+
this.channel = await this.connection.createChannel();
|
|
398
|
+
} else {
|
|
399
|
+
this.channel = this.connection;
|
|
400
|
+
}
|
|
401
|
+
if (this.exchange) {
|
|
402
|
+
await this.channel.assertExchange(this.exchange, this.exchangeType, { durable: true });
|
|
403
|
+
}
|
|
404
|
+
return this.channel;
|
|
405
|
+
}
|
|
406
|
+
/**
|
|
407
|
+
* Get the underlying connection.
|
|
408
|
+
*/
|
|
409
|
+
getRawConnection() {
|
|
410
|
+
return this.connection;
|
|
411
|
+
}
|
|
412
|
+
/**
|
|
413
|
+
* Push a job (sendToQueue / publish).
|
|
414
|
+
*/
|
|
415
|
+
async push(queue, job) {
|
|
416
|
+
const channel = await this.ensureChannel();
|
|
417
|
+
const payload = Buffer.from(JSON.stringify(job));
|
|
418
|
+
if (this.exchange) {
|
|
419
|
+
await channel.assertQueue(queue, { durable: true });
|
|
420
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
421
|
+
channel.publish(this.exchange, "", payload, { persistent: true });
|
|
422
|
+
} else {
|
|
423
|
+
await channel.assertQueue(queue, { durable: true });
|
|
424
|
+
channel.sendToQueue(queue, payload, { persistent: true });
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Pop a job (get).
|
|
429
|
+
*/
|
|
430
|
+
async pop(queue) {
|
|
431
|
+
const channel = await this.ensureChannel();
|
|
432
|
+
await channel.assertQueue(queue, { durable: true });
|
|
433
|
+
const msg = await channel.get(queue, { noAck: false });
|
|
434
|
+
if (!msg) {
|
|
435
|
+
return null;
|
|
436
|
+
}
|
|
437
|
+
const job = JSON.parse(msg.content.toString());
|
|
438
|
+
job._raw = msg;
|
|
439
|
+
return job;
|
|
440
|
+
}
|
|
441
|
+
/**
|
|
442
|
+
* Acknowledge a message.
|
|
443
|
+
*/
|
|
444
|
+
async acknowledge(messageId) {
|
|
445
|
+
const channel = await this.ensureChannel();
|
|
446
|
+
if (typeof messageId === "object") {
|
|
447
|
+
channel.ack(messageId);
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
/**
|
|
451
|
+
* Negative acknowledge a message.
|
|
452
|
+
*/
|
|
453
|
+
async nack(message, requeue = true) {
|
|
454
|
+
const channel = await this.ensureChannel();
|
|
455
|
+
channel.nack(message, false, requeue);
|
|
456
|
+
}
|
|
457
|
+
/**
|
|
458
|
+
* Reject a message.
|
|
459
|
+
*/
|
|
460
|
+
async reject(message, requeue = true) {
|
|
461
|
+
const channel = await this.ensureChannel();
|
|
462
|
+
channel.reject(message, requeue);
|
|
463
|
+
}
|
|
464
|
+
/**
|
|
465
|
+
* Subscribe to a queue.
|
|
466
|
+
*/
|
|
467
|
+
async subscribe(queue, callback, options = {}) {
|
|
468
|
+
const channel = await this.ensureChannel();
|
|
469
|
+
await channel.assertQueue(queue, { durable: true });
|
|
470
|
+
if (this.exchange) {
|
|
471
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
472
|
+
}
|
|
473
|
+
const { autoAck = true } = options;
|
|
474
|
+
await channel.consume(
|
|
475
|
+
queue,
|
|
476
|
+
async (msg) => {
|
|
477
|
+
if (!msg) {
|
|
478
|
+
return;
|
|
479
|
+
}
|
|
480
|
+
const job = JSON.parse(msg.content.toString());
|
|
481
|
+
job._raw = msg;
|
|
482
|
+
await callback(job);
|
|
483
|
+
if (autoAck) {
|
|
484
|
+
channel.ack(msg);
|
|
485
|
+
}
|
|
486
|
+
},
|
|
487
|
+
{ noAck: false }
|
|
488
|
+
);
|
|
489
|
+
}
|
|
490
|
+
/**
|
|
491
|
+
* Get queue size.
|
|
492
|
+
*/
|
|
493
|
+
async size(queue) {
|
|
494
|
+
const channel = await this.ensureChannel();
|
|
495
|
+
const ok = await channel.checkQueue(queue);
|
|
496
|
+
return ok.messageCount;
|
|
497
|
+
}
|
|
498
|
+
/**
|
|
499
|
+
* Clear a queue.
|
|
500
|
+
*/
|
|
501
|
+
async clear(queue) {
|
|
502
|
+
const channel = await this.ensureChannel();
|
|
503
|
+
await channel.purgeQueue(queue);
|
|
504
|
+
}
|
|
505
|
+
};
|
|
506
|
+
}
|
|
507
|
+
});
|
|
508
|
+
|
|
320
509
|
// src/drivers/RedisDriver.ts
|
|
321
510
|
var RedisDriver_exports = {};
|
|
322
511
|
__export(RedisDriver_exports, {
|
|
@@ -326,9 +515,43 @@ var RedisDriver;
|
|
|
326
515
|
var init_RedisDriver = __esm({
|
|
327
516
|
"src/drivers/RedisDriver.ts"() {
|
|
328
517
|
"use strict";
|
|
329
|
-
RedisDriver = class {
|
|
518
|
+
RedisDriver = class _RedisDriver {
|
|
330
519
|
prefix;
|
|
331
520
|
client;
|
|
521
|
+
// Lua Logic:
|
|
522
|
+
// IF (IS_MEMBER(activeSet, groupId)) -> PUSH(pendingList, job)
|
|
523
|
+
// ELSE -> SADD(activeSet, groupId) & LPUSH(waitList, job)
|
|
524
|
+
static PUSH_SCRIPT = `
|
|
525
|
+
local waitList = KEYS[1]
|
|
526
|
+
local activeSet = KEYS[2]
|
|
527
|
+
local pendingList = KEYS[3]
|
|
528
|
+
local groupId = ARGV[1]
|
|
529
|
+
local payload = ARGV[2]
|
|
530
|
+
|
|
531
|
+
if redis.call('SISMEMBER', activeSet, groupId) == 1 then
|
|
532
|
+
return redis.call('RPUSH', pendingList, payload)
|
|
533
|
+
else
|
|
534
|
+
redis.call('SADD', activeSet, groupId)
|
|
535
|
+
return redis.call('LPUSH', waitList, payload)
|
|
536
|
+
end
|
|
537
|
+
`;
|
|
538
|
+
// Lua Logic:
|
|
539
|
+
// local next = LPOP(pendingList)
|
|
540
|
+
// IF (next) -> LPUSH(waitList, next)
|
|
541
|
+
// ELSE -> SREM(activeSet, groupId)
|
|
542
|
+
static COMPLETE_SCRIPT = `
|
|
543
|
+
local waitList = KEYS[1]
|
|
544
|
+
local activeSet = KEYS[2]
|
|
545
|
+
local pendingList = KEYS[3]
|
|
546
|
+
local groupId = ARGV[1]
|
|
547
|
+
|
|
548
|
+
local nextJob = redis.call('LPOP', pendingList)
|
|
549
|
+
if nextJob then
|
|
550
|
+
return redis.call('LPUSH', waitList, nextJob)
|
|
551
|
+
else
|
|
552
|
+
return redis.call('SREM', activeSet, groupId)
|
|
553
|
+
end
|
|
554
|
+
`;
|
|
332
555
|
constructor(config) {
|
|
333
556
|
this.client = config.client;
|
|
334
557
|
this.prefix = config.prefix ?? "queue:";
|
|
@@ -337,19 +560,36 @@ var init_RedisDriver = __esm({
|
|
|
337
560
|
"[RedisDriver] Redis client is required. Please install ioredis or redis package."
|
|
338
561
|
);
|
|
339
562
|
}
|
|
563
|
+
if (typeof this.client.defineCommand === "function") {
|
|
564
|
+
;
|
|
565
|
+
this.client.defineCommand("pushGroupJob", {
|
|
566
|
+
numberOfKeys: 3,
|
|
567
|
+
lua: _RedisDriver.PUSH_SCRIPT
|
|
568
|
+
});
|
|
569
|
+
this.client.defineCommand("completeGroupJob", {
|
|
570
|
+
numberOfKeys: 3,
|
|
571
|
+
lua: _RedisDriver.COMPLETE_SCRIPT
|
|
572
|
+
});
|
|
573
|
+
}
|
|
340
574
|
}
|
|
341
575
|
/**
|
|
342
576
|
* Get full Redis key for a queue.
|
|
343
577
|
*/
|
|
344
|
-
getKey(queue) {
|
|
578
|
+
getKey(queue, priority) {
|
|
579
|
+
if (priority) {
|
|
580
|
+
return `${this.prefix}${queue}:${priority}`;
|
|
581
|
+
}
|
|
345
582
|
return `${this.prefix}${queue}`;
|
|
346
583
|
}
|
|
347
584
|
/**
|
|
348
585
|
* Push a job (LPUSH).
|
|
349
586
|
*/
|
|
350
|
-
async push(queue, job) {
|
|
351
|
-
const key = this.getKey(queue);
|
|
352
|
-
const
|
|
587
|
+
async push(queue, job, options) {
|
|
588
|
+
const key = this.getKey(queue, options?.priority);
|
|
589
|
+
const groupId = options?.groupId;
|
|
590
|
+
if (groupId && options?.priority) {
|
|
591
|
+
}
|
|
592
|
+
const payloadObj = {
|
|
353
593
|
id: job.id,
|
|
354
594
|
type: job.type,
|
|
355
595
|
data: job.data,
|
|
@@ -357,8 +597,18 @@ var init_RedisDriver = __esm({
|
|
|
357
597
|
createdAt: job.createdAt,
|
|
358
598
|
delaySeconds: job.delaySeconds,
|
|
359
599
|
attempts: job.attempts,
|
|
360
|
-
maxAttempts: job.maxAttempts
|
|
361
|
-
|
|
600
|
+
maxAttempts: job.maxAttempts,
|
|
601
|
+
groupId,
|
|
602
|
+
error: job.error,
|
|
603
|
+
failedAt: job.failedAt
|
|
604
|
+
};
|
|
605
|
+
const payload = JSON.stringify(payloadObj);
|
|
606
|
+
if (groupId && typeof this.client.pushGroupJob === "function") {
|
|
607
|
+
const activeSetKey = `${this.prefix}active`;
|
|
608
|
+
const pendingListKey = `${this.prefix}pending:${groupId}`;
|
|
609
|
+
await this.client.pushGroupJob(key, activeSetKey, pendingListKey, groupId, payload);
|
|
610
|
+
return;
|
|
611
|
+
}
|
|
362
612
|
if (job.delaySeconds && job.delaySeconds > 0) {
|
|
363
613
|
const delayKey = `${key}:delayed`;
|
|
364
614
|
const score = Date.now() + job.delaySeconds * 1e3;
|
|
@@ -371,29 +621,53 @@ var init_RedisDriver = __esm({
|
|
|
371
621
|
await this.client.lpush(key, payload);
|
|
372
622
|
}
|
|
373
623
|
}
|
|
624
|
+
/**
|
|
625
|
+
* Complete a job (handle Group FIFO).
|
|
626
|
+
*/
|
|
627
|
+
async complete(queue, job) {
|
|
628
|
+
if (!job.groupId) {
|
|
629
|
+
return;
|
|
630
|
+
}
|
|
631
|
+
const key = this.getKey(queue);
|
|
632
|
+
const activeSetKey = `${this.prefix}active`;
|
|
633
|
+
const pendingListKey = `${this.prefix}pending:${job.groupId}`;
|
|
634
|
+
if (typeof this.client.completeGroupJob === "function") {
|
|
635
|
+
await this.client.completeGroupJob(key, activeSetKey, pendingListKey, job.groupId);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
374
638
|
/**
|
|
375
639
|
* Pop a job (RPOP, FIFO).
|
|
640
|
+
* Supports implicit priority polling (critical -> high -> default -> low).
|
|
376
641
|
*/
|
|
377
642
|
async pop(queue) {
|
|
378
|
-
const
|
|
379
|
-
const
|
|
380
|
-
|
|
381
|
-
const
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
const
|
|
385
|
-
if (
|
|
386
|
-
const
|
|
387
|
-
|
|
388
|
-
|
|
643
|
+
const priorities = ["critical", "high", void 0, "low"];
|
|
644
|
+
for (const priority of priorities) {
|
|
645
|
+
const key = this.getKey(queue, priority);
|
|
646
|
+
const delayKey = `${key}:delayed`;
|
|
647
|
+
if (typeof this.client.zrange === "function") {
|
|
648
|
+
const now = Date.now();
|
|
649
|
+
const delayedJobs = await this.client.zrange(delayKey, 0, 0, "WITHSCORES");
|
|
650
|
+
if (delayedJobs && delayedJobs.length >= 2) {
|
|
651
|
+
const score = parseFloat(delayedJobs[1]);
|
|
652
|
+
if (score <= now) {
|
|
653
|
+
const payload2 = delayedJobs[0];
|
|
654
|
+
await this.client.zrem(delayKey, payload2);
|
|
655
|
+
return this.parsePayload(payload2);
|
|
656
|
+
}
|
|
389
657
|
}
|
|
390
658
|
}
|
|
659
|
+
if (typeof this.client.get === "function") {
|
|
660
|
+
const isPaused = await this.client.get(`${key}:paused`);
|
|
661
|
+
if (isPaused === "1") {
|
|
662
|
+
continue;
|
|
663
|
+
}
|
|
664
|
+
}
|
|
665
|
+
const payload = await this.client.rpop(key);
|
|
666
|
+
if (payload) {
|
|
667
|
+
return this.parsePayload(payload);
|
|
668
|
+
}
|
|
391
669
|
}
|
|
392
|
-
|
|
393
|
-
if (!payload) {
|
|
394
|
-
return null;
|
|
395
|
-
}
|
|
396
|
-
return this.parsePayload(payload);
|
|
670
|
+
return null;
|
|
397
671
|
}
|
|
398
672
|
/**
|
|
399
673
|
* Parse Redis payload.
|
|
@@ -408,7 +682,11 @@ var init_RedisDriver = __esm({
|
|
|
408
682
|
createdAt: parsed.createdAt,
|
|
409
683
|
delaySeconds: parsed.delaySeconds,
|
|
410
684
|
attempts: parsed.attempts,
|
|
411
|
-
maxAttempts: parsed.maxAttempts
|
|
685
|
+
maxAttempts: parsed.maxAttempts,
|
|
686
|
+
groupId: parsed.groupId,
|
|
687
|
+
error: parsed.error,
|
|
688
|
+
failedAt: parsed.failedAt,
|
|
689
|
+
priority: parsed.priority
|
|
412
690
|
};
|
|
413
691
|
}
|
|
414
692
|
/**
|
|
@@ -418,15 +696,31 @@ var init_RedisDriver = __esm({
|
|
|
418
696
|
const key = this.getKey(queue);
|
|
419
697
|
return this.client.llen(key);
|
|
420
698
|
}
|
|
699
|
+
/**
|
|
700
|
+
* Mark a job as permanently failed (DLQ).
|
|
701
|
+
*/
|
|
702
|
+
async fail(queue, job) {
|
|
703
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
704
|
+
const payload = JSON.stringify({
|
|
705
|
+
...job,
|
|
706
|
+
failedAt: Date.now()
|
|
707
|
+
});
|
|
708
|
+
await this.client.lpush(key, payload);
|
|
709
|
+
if (typeof this.client.ltrim === "function") {
|
|
710
|
+
await this.client.ltrim(key, 0, 999);
|
|
711
|
+
}
|
|
712
|
+
}
|
|
421
713
|
/**
|
|
422
714
|
* Clear a queue.
|
|
423
715
|
*/
|
|
424
716
|
async clear(queue) {
|
|
425
717
|
const key = this.getKey(queue);
|
|
426
718
|
const delayKey = `${key}:delayed`;
|
|
719
|
+
const activeSetKey = `${this.prefix}active`;
|
|
427
720
|
await this.client.del(key);
|
|
428
721
|
if (typeof this.client.del === "function") {
|
|
429
722
|
await this.client.del(delayKey);
|
|
723
|
+
await this.client.del(activeSetKey);
|
|
430
724
|
}
|
|
431
725
|
}
|
|
432
726
|
/**
|
|
@@ -436,6 +730,17 @@ var init_RedisDriver = __esm({
|
|
|
436
730
|
if (jobs.length === 0) {
|
|
437
731
|
return;
|
|
438
732
|
}
|
|
733
|
+
const hasGroup = jobs.some((j) => j.groupId);
|
|
734
|
+
const hasPriority = jobs.some((j) => j.priority);
|
|
735
|
+
if (hasGroup || hasPriority) {
|
|
736
|
+
for (const job of jobs) {
|
|
737
|
+
await this.push(queue, job, {
|
|
738
|
+
groupId: job.groupId,
|
|
739
|
+
priority: job.priority
|
|
740
|
+
});
|
|
741
|
+
}
|
|
742
|
+
return;
|
|
743
|
+
}
|
|
439
744
|
const key = this.getKey(queue);
|
|
440
745
|
const payloads = jobs.map(
|
|
441
746
|
(job) => JSON.stringify({
|
|
@@ -446,7 +751,9 @@ var init_RedisDriver = __esm({
|
|
|
446
751
|
createdAt: job.createdAt,
|
|
447
752
|
delaySeconds: job.delaySeconds,
|
|
448
753
|
attempts: job.attempts,
|
|
449
|
-
maxAttempts: job.maxAttempts
|
|
754
|
+
maxAttempts: job.maxAttempts,
|
|
755
|
+
groupId: job.groupId,
|
|
756
|
+
priority: job.priority
|
|
450
757
|
})
|
|
451
758
|
);
|
|
452
759
|
await this.client.lpush(key, ...payloads);
|
|
@@ -467,6 +774,89 @@ var init_RedisDriver = __esm({
|
|
|
467
774
|
}
|
|
468
775
|
return results;
|
|
469
776
|
}
|
|
777
|
+
/**
|
|
778
|
+
* Report worker heartbeat for monitoring.
|
|
779
|
+
*/
|
|
780
|
+
async reportHeartbeat(workerInfo, prefix) {
|
|
781
|
+
const key = `${prefix ?? this.prefix}worker:${workerInfo.id}`;
|
|
782
|
+
if (typeof this.client.set === "function") {
|
|
783
|
+
await this.client.set(key, JSON.stringify(workerInfo), "EX", 10);
|
|
784
|
+
}
|
|
785
|
+
}
|
|
786
|
+
/**
|
|
787
|
+
* Publish a log message for monitoring.
|
|
788
|
+
*/
|
|
789
|
+
async publishLog(logPayload, prefix) {
|
|
790
|
+
const payload = JSON.stringify(logPayload);
|
|
791
|
+
const monitorPrefix = prefix ?? this.prefix;
|
|
792
|
+
if (typeof this.client.publish === "function") {
|
|
793
|
+
await this.client.publish(`${monitorPrefix}logs`, payload);
|
|
794
|
+
}
|
|
795
|
+
const historyKey = `${monitorPrefix}logs:history`;
|
|
796
|
+
if (typeof this.client.pipeline === "function") {
|
|
797
|
+
const pipe = this.client.pipeline();
|
|
798
|
+
pipe.lpush(historyKey, payload);
|
|
799
|
+
pipe.ltrim(historyKey, 0, 99);
|
|
800
|
+
await pipe.exec();
|
|
801
|
+
} else {
|
|
802
|
+
await this.client.lpush(historyKey, payload);
|
|
803
|
+
}
|
|
804
|
+
}
|
|
805
|
+
/**
|
|
806
|
+
* Check if a queue is rate limited.
|
|
807
|
+
* Uses a fixed window counter.
|
|
808
|
+
*/
|
|
809
|
+
async checkRateLimit(queue, config) {
|
|
810
|
+
const key = `${this.prefix}${queue}:ratelimit`;
|
|
811
|
+
const now = Date.now();
|
|
812
|
+
const windowStart = Math.floor(now / config.duration);
|
|
813
|
+
const windowKey = `${key}:${windowStart}`;
|
|
814
|
+
const client = this.client;
|
|
815
|
+
if (typeof client.incr === "function") {
|
|
816
|
+
const current = await client.incr(windowKey);
|
|
817
|
+
if (current === 1) {
|
|
818
|
+
await client.expire(windowKey, Math.ceil(config.duration / 1e3) + 1);
|
|
819
|
+
}
|
|
820
|
+
return current <= config.max;
|
|
821
|
+
}
|
|
822
|
+
return true;
|
|
823
|
+
}
|
|
824
|
+
/**
|
|
825
|
+
* Get failed jobs from DLQ.
|
|
826
|
+
*/
|
|
827
|
+
async getFailed(queue, start = 0, end = -1) {
|
|
828
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
829
|
+
const payloads = await this.client.lrange(key, start, end);
|
|
830
|
+
return payloads.map((p) => this.parsePayload(p));
|
|
831
|
+
}
|
|
832
|
+
/**
|
|
833
|
+
* Retry failed jobs from DLQ.
|
|
834
|
+
* Moves jobs from failed list back to the main queue.
|
|
835
|
+
*/
|
|
836
|
+
async retryFailed(queue, count = 1) {
|
|
837
|
+
const failedKey = `${this.getKey(queue)}:failed`;
|
|
838
|
+
let retried = 0;
|
|
839
|
+
for (let i = 0; i < count; i++) {
|
|
840
|
+
const payload = await this.client.rpop(failedKey);
|
|
841
|
+
if (!payload) {
|
|
842
|
+
break;
|
|
843
|
+
}
|
|
844
|
+
const job = this.parsePayload(payload);
|
|
845
|
+
job.attempts = 0;
|
|
846
|
+
delete job.error;
|
|
847
|
+
delete job.failedAt;
|
|
848
|
+
await this.push(queue, job, { priority: job.priority, groupId: job.groupId });
|
|
849
|
+
retried++;
|
|
850
|
+
}
|
|
851
|
+
return retried;
|
|
852
|
+
}
|
|
853
|
+
/**
|
|
854
|
+
* Clear failed jobs from DLQ.
|
|
855
|
+
*/
|
|
856
|
+
async clearFailed(queue) {
|
|
857
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
858
|
+
await this.client.del(key);
|
|
859
|
+
}
|
|
470
860
|
};
|
|
471
861
|
}
|
|
472
862
|
});
|
|
@@ -672,6 +1062,125 @@ var init_SQSDriver = __esm({
|
|
|
672
1062
|
}
|
|
673
1063
|
});
|
|
674
1064
|
|
|
1065
|
+
// src/Scheduler.ts
|
|
1066
|
+
var Scheduler_exports = {};
|
|
1067
|
+
__export(Scheduler_exports, {
|
|
1068
|
+
Scheduler: () => Scheduler
|
|
1069
|
+
});
|
|
1070
|
+
import parser from "cron-parser";
|
|
1071
|
+
var Scheduler;
|
|
1072
|
+
var init_Scheduler = __esm({
|
|
1073
|
+
"src/Scheduler.ts"() {
|
|
1074
|
+
"use strict";
|
|
1075
|
+
Scheduler = class {
|
|
1076
|
+
constructor(manager, options = {}) {
|
|
1077
|
+
this.manager = manager;
|
|
1078
|
+
this.prefix = options.prefix ?? "queue:";
|
|
1079
|
+
}
|
|
1080
|
+
prefix;
|
|
1081
|
+
get client() {
|
|
1082
|
+
const driver = this.manager.getDriver(this.manager.getDefaultConnection());
|
|
1083
|
+
return driver.client;
|
|
1084
|
+
}
|
|
1085
|
+
/**
|
|
1086
|
+
* Register a scheduled job.
|
|
1087
|
+
*/
|
|
1088
|
+
async register(config) {
|
|
1089
|
+
const nextRun = parser.parse(config.cron).next().getTime();
|
|
1090
|
+
const fullConfig = {
|
|
1091
|
+
...config,
|
|
1092
|
+
nextRun,
|
|
1093
|
+
enabled: true
|
|
1094
|
+
};
|
|
1095
|
+
const pipe = this.client.pipeline();
|
|
1096
|
+
pipe.hset(`${this.prefix}schedule:${config.id}`, {
|
|
1097
|
+
...fullConfig,
|
|
1098
|
+
job: JSON.stringify(fullConfig.job)
|
|
1099
|
+
});
|
|
1100
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, config.id);
|
|
1101
|
+
await pipe.exec();
|
|
1102
|
+
}
|
|
1103
|
+
/**
|
|
1104
|
+
* Remove a scheduled job.
|
|
1105
|
+
*/
|
|
1106
|
+
async remove(id) {
|
|
1107
|
+
const pipe = this.client.pipeline();
|
|
1108
|
+
pipe.del(`${this.prefix}schedule:${id}`);
|
|
1109
|
+
pipe.zrem(`${this.prefix}schedules`, id);
|
|
1110
|
+
await pipe.exec();
|
|
1111
|
+
}
|
|
1112
|
+
/**
|
|
1113
|
+
* List all scheduled jobs.
|
|
1114
|
+
*/
|
|
1115
|
+
async list() {
|
|
1116
|
+
const ids = await this.client.zrange(`${this.prefix}schedules`, 0, -1);
|
|
1117
|
+
const configs = [];
|
|
1118
|
+
for (const id of ids) {
|
|
1119
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1120
|
+
if (data?.id) {
|
|
1121
|
+
configs.push({
|
|
1122
|
+
...data,
|
|
1123
|
+
lastRun: data.lastRun ? parseInt(data.lastRun, 10) : void 0,
|
|
1124
|
+
nextRun: data.nextRun ? parseInt(data.nextRun, 10) : void 0,
|
|
1125
|
+
enabled: data.enabled === "true",
|
|
1126
|
+
job: JSON.parse(data.job)
|
|
1127
|
+
});
|
|
1128
|
+
}
|
|
1129
|
+
}
|
|
1130
|
+
return configs;
|
|
1131
|
+
}
|
|
1132
|
+
/**
|
|
1133
|
+
* Run a scheduled job immediately (out of schedule).
|
|
1134
|
+
*/
|
|
1135
|
+
async runNow(id) {
|
|
1136
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1137
|
+
if (data?.id) {
|
|
1138
|
+
const serialized = JSON.parse(data.job);
|
|
1139
|
+
const serializer = this.manager.getSerializer();
|
|
1140
|
+
const job = serializer.deserialize(serialized);
|
|
1141
|
+
await this.manager.push(job);
|
|
1142
|
+
}
|
|
1143
|
+
}
|
|
1144
|
+
/**
|
|
1145
|
+
* Process due tasks (TICK).
|
|
1146
|
+
* This should be called periodically (e.g. every minute).
|
|
1147
|
+
*/
|
|
1148
|
+
async tick() {
|
|
1149
|
+
const now = Date.now();
|
|
1150
|
+
const dueIds = await this.client.zrangebyscore(`${this.prefix}schedules`, 0, now);
|
|
1151
|
+
let fired = 0;
|
|
1152
|
+
for (const id of dueIds) {
|
|
1153
|
+
const lockKey = `${this.prefix}lock:schedule:${id}:${Math.floor(now / 1e3)}`;
|
|
1154
|
+
const lock = await this.client.set(lockKey, "1", "EX", 10, "NX");
|
|
1155
|
+
if (lock === "OK") {
|
|
1156
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1157
|
+
if (data?.id && data.enabled === "true") {
|
|
1158
|
+
try {
|
|
1159
|
+
const serializedJob = JSON.parse(data.job);
|
|
1160
|
+
const connection = data.connection || this.manager.getDefaultConnection();
|
|
1161
|
+
const driver = this.manager.getDriver(connection);
|
|
1162
|
+
await driver.push(data.queue, serializedJob);
|
|
1163
|
+
const nextRun = parser.parse(data.cron).next().getTime();
|
|
1164
|
+
const pipe = this.client.pipeline();
|
|
1165
|
+
pipe.hset(`${this.prefix}schedule:${id}`, {
|
|
1166
|
+
lastRun: now,
|
|
1167
|
+
nextRun
|
|
1168
|
+
});
|
|
1169
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, id);
|
|
1170
|
+
await pipe.exec();
|
|
1171
|
+
fired++;
|
|
1172
|
+
} catch (err) {
|
|
1173
|
+
console.error(`[Scheduler] Failed to process schedule ${id}:`, err);
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
}
|
|
1178
|
+
return fired;
|
|
1179
|
+
}
|
|
1180
|
+
};
|
|
1181
|
+
}
|
|
1182
|
+
});
|
|
1183
|
+
|
|
675
1184
|
// src/Worker.ts
|
|
676
1185
|
var Worker = class {
|
|
677
1186
|
constructor(options = {}) {
|
|
@@ -682,36 +1191,31 @@ var Worker = class {
|
|
|
682
1191
|
* @param job - Job instance
|
|
683
1192
|
*/
|
|
684
1193
|
async process(job) {
|
|
685
|
-
const maxAttempts = this.options.maxAttempts ?? 3;
|
|
1194
|
+
const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
|
|
686
1195
|
const timeout = this.options.timeout;
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
(
|
|
697
|
-
|
|
698
|
-
timeout * 1e3
|
|
699
|
-
)
|
|
1196
|
+
if (!job.attempts) {
|
|
1197
|
+
job.attempts = 1;
|
|
1198
|
+
}
|
|
1199
|
+
try {
|
|
1200
|
+
if (timeout) {
|
|
1201
|
+
await Promise.race([
|
|
1202
|
+
job.handle(),
|
|
1203
|
+
new Promise(
|
|
1204
|
+
(_, reject) => setTimeout(
|
|
1205
|
+
() => reject(new Error(`Job timeout after ${timeout} seconds`)),
|
|
1206
|
+
timeout * 1e3
|
|
700
1207
|
)
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
throw lastError;
|
|
711
|
-
}
|
|
712
|
-
const delay = Math.min(1e3 * 2 ** (attempt - 1), 3e4);
|
|
713
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
1208
|
+
)
|
|
1209
|
+
]);
|
|
1210
|
+
} else {
|
|
1211
|
+
await job.handle();
|
|
1212
|
+
}
|
|
1213
|
+
} catch (error) {
|
|
1214
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1215
|
+
if (job.attempts >= maxAttempts) {
|
|
1216
|
+
await this.handleFailure(job, err);
|
|
714
1217
|
}
|
|
1218
|
+
throw err;
|
|
715
1219
|
}
|
|
716
1220
|
}
|
|
717
1221
|
/**
|
|
@@ -741,6 +1245,11 @@ var Consumer = class {
|
|
|
741
1245
|
}
|
|
742
1246
|
running = false;
|
|
743
1247
|
stopRequested = false;
|
|
1248
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 8)}`;
|
|
1249
|
+
heartbeatTimer = null;
|
|
1250
|
+
get connectionName() {
|
|
1251
|
+
return this.options.connection ?? this.queueManager.getDefaultConnection();
|
|
1252
|
+
}
|
|
744
1253
|
/**
|
|
745
1254
|
* Start the consumer loop.
|
|
746
1255
|
*/
|
|
@@ -755,18 +1264,72 @@ var Consumer = class {
|
|
|
755
1264
|
const keepAlive = this.options.keepAlive ?? true;
|
|
756
1265
|
console.log("[Consumer] Started", {
|
|
757
1266
|
queues: this.options.queues,
|
|
758
|
-
connection: this.options.connection
|
|
1267
|
+
connection: this.options.connection,
|
|
1268
|
+
workerId: this.workerId
|
|
759
1269
|
});
|
|
1270
|
+
if (this.options.monitor) {
|
|
1271
|
+
this.startHeartbeat();
|
|
1272
|
+
await this.publishLog("info", `Consumer started on [${this.options.queues.join(", ")}]`);
|
|
1273
|
+
}
|
|
760
1274
|
while (this.running && !this.stopRequested) {
|
|
761
1275
|
let processed = false;
|
|
762
1276
|
for (const queue of this.options.queues) {
|
|
1277
|
+
if (this.options.rateLimits?.[queue]) {
|
|
1278
|
+
const limit = this.options.rateLimits[queue];
|
|
1279
|
+
try {
|
|
1280
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1281
|
+
if (driver.checkRateLimit) {
|
|
1282
|
+
const allowed = await driver.checkRateLimit(queue, limit);
|
|
1283
|
+
if (!allowed) {
|
|
1284
|
+
continue;
|
|
1285
|
+
}
|
|
1286
|
+
}
|
|
1287
|
+
} catch (err) {
|
|
1288
|
+
console.error(`[Consumer] Error checking rate limit for "${queue}":`, err);
|
|
1289
|
+
}
|
|
1290
|
+
}
|
|
763
1291
|
try {
|
|
764
1292
|
const job = await this.queueManager.pop(queue, this.options.connection);
|
|
765
1293
|
if (job) {
|
|
766
1294
|
processed = true;
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
}
|
|
1295
|
+
if (this.options.monitor) {
|
|
1296
|
+
await this.publishLog("info", `Processing job: ${job.id}`, job.id);
|
|
1297
|
+
}
|
|
1298
|
+
try {
|
|
1299
|
+
await worker.process(job);
|
|
1300
|
+
if (this.options.monitor) {
|
|
1301
|
+
await this.publishLog("success", `Completed job: ${job.id}`, job.id);
|
|
1302
|
+
}
|
|
1303
|
+
} catch (err) {
|
|
1304
|
+
console.error(`[Consumer] Error processing job in queue "${queue}":`, err);
|
|
1305
|
+
if (this.options.monitor) {
|
|
1306
|
+
await this.publishLog("error", `Job failed: ${job.id} - ${err.message}`, job.id);
|
|
1307
|
+
}
|
|
1308
|
+
const attempts = job.attempts ?? 1;
|
|
1309
|
+
const maxAttempts = job.maxAttempts ?? this.options.workerOptions?.maxAttempts ?? 3;
|
|
1310
|
+
if (attempts < maxAttempts) {
|
|
1311
|
+
job.attempts = attempts + 1;
|
|
1312
|
+
const delayMs = job.getRetryDelay(job.attempts);
|
|
1313
|
+
const delaySec = Math.ceil(delayMs / 1e3);
|
|
1314
|
+
job.delay(delaySec);
|
|
1315
|
+
await this.queueManager.push(job);
|
|
1316
|
+
if (this.options.monitor) {
|
|
1317
|
+
await this.publishLog(
|
|
1318
|
+
"warning",
|
|
1319
|
+
`Job retrying in ${delaySec}s (Attempt ${job.attempts}/${maxAttempts})`,
|
|
1320
|
+
job.id
|
|
1321
|
+
);
|
|
1322
|
+
}
|
|
1323
|
+
} else {
|
|
1324
|
+
await this.queueManager.fail(job, err).catch((dlqErr) => {
|
|
1325
|
+
console.error(`[Consumer] Error moving job to DLQ:`, dlqErr);
|
|
1326
|
+
});
|
|
1327
|
+
}
|
|
1328
|
+
} finally {
|
|
1329
|
+
await this.queueManager.complete(job).catch((err) => {
|
|
1330
|
+
console.error(`[Consumer] Error completing job in queue "${queue}":`, err);
|
|
1331
|
+
});
|
|
1332
|
+
}
|
|
770
1333
|
}
|
|
771
1334
|
} catch (error) {
|
|
772
1335
|
console.error(`[Consumer] Error polling queue "${queue}":`, error);
|
|
@@ -775,13 +1338,83 @@ var Consumer = class {
|
|
|
775
1338
|
if (!processed && !keepAlive) {
|
|
776
1339
|
break;
|
|
777
1340
|
}
|
|
778
|
-
if (!this.stopRequested) {
|
|
1341
|
+
if (!this.stopRequested && !processed) {
|
|
779
1342
|
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
|
1343
|
+
} else if (!this.stopRequested && processed) {
|
|
1344
|
+
await new Promise((resolve) => setTimeout(resolve, 0));
|
|
780
1345
|
}
|
|
781
1346
|
}
|
|
782
1347
|
this.running = false;
|
|
1348
|
+
this.stopHeartbeat();
|
|
1349
|
+
if (this.options.monitor) {
|
|
1350
|
+
await this.publishLog("info", "Consumer stopped");
|
|
1351
|
+
}
|
|
783
1352
|
console.log("[Consumer] Stopped");
|
|
784
1353
|
}
|
|
1354
|
+
startHeartbeat() {
|
|
1355
|
+
const interval = typeof this.options.monitor === "object" ? this.options.monitor.interval ?? 5e3 : 5e3;
|
|
1356
|
+
const monitorOptions = typeof this.options.monitor === "object" ? this.options.monitor : {};
|
|
1357
|
+
this.heartbeatTimer = setInterval(async () => {
|
|
1358
|
+
try {
|
|
1359
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1360
|
+
if (driver.reportHeartbeat) {
|
|
1361
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1362
|
+
const os = __require("os");
|
|
1363
|
+
const mem = process.memoryUsage();
|
|
1364
|
+
const metrics = {
|
|
1365
|
+
cpu: os.loadavg()[0],
|
|
1366
|
+
// 1m load avg
|
|
1367
|
+
cores: os.cpus().length,
|
|
1368
|
+
ram: {
|
|
1369
|
+
rss: Math.floor(mem.rss / 1024 / 1024),
|
|
1370
|
+
heapUsed: Math.floor(mem.heapUsed / 1024 / 1024),
|
|
1371
|
+
total: Math.floor(os.totalmem() / 1024 / 1024)
|
|
1372
|
+
}
|
|
1373
|
+
};
|
|
1374
|
+
await driver.reportHeartbeat(
|
|
1375
|
+
{
|
|
1376
|
+
id: this.workerId,
|
|
1377
|
+
status: "online",
|
|
1378
|
+
hostname: os.hostname(),
|
|
1379
|
+
pid: process.pid,
|
|
1380
|
+
uptime: Math.floor(process.uptime()),
|
|
1381
|
+
last_ping: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1382
|
+
queues: this.options.queues,
|
|
1383
|
+
metrics,
|
|
1384
|
+
...monitorOptions.extraInfo || {}
|
|
1385
|
+
},
|
|
1386
|
+
monitorPrefix
|
|
1387
|
+
);
|
|
1388
|
+
}
|
|
1389
|
+
} catch (_e) {
|
|
1390
|
+
}
|
|
1391
|
+
}, interval);
|
|
1392
|
+
}
|
|
1393
|
+
stopHeartbeat() {
|
|
1394
|
+
if (this.heartbeatTimer) {
|
|
1395
|
+
clearInterval(this.heartbeatTimer);
|
|
1396
|
+
this.heartbeatTimer = null;
|
|
1397
|
+
}
|
|
1398
|
+
}
|
|
1399
|
+
async publishLog(level, message, jobId) {
|
|
1400
|
+
try {
|
|
1401
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1402
|
+
if (driver.publishLog) {
|
|
1403
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1404
|
+
await driver.publishLog(
|
|
1405
|
+
{
|
|
1406
|
+
level,
|
|
1407
|
+
message,
|
|
1408
|
+
workerId: this.workerId,
|
|
1409
|
+
jobId,
|
|
1410
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1411
|
+
},
|
|
1412
|
+
monitorPrefix
|
|
1413
|
+
);
|
|
1414
|
+
}
|
|
1415
|
+
} catch (_e) {
|
|
1416
|
+
}
|
|
1417
|
+
}
|
|
785
1418
|
/**
|
|
786
1419
|
* Stop the consumer loop (graceful shutdown).
|
|
787
1420
|
*/
|
|
@@ -872,11 +1505,16 @@ var MemoryDriver = class {
|
|
|
872
1505
|
};
|
|
873
1506
|
|
|
874
1507
|
// src/index.ts
|
|
1508
|
+
init_RabbitMQDriver();
|
|
875
1509
|
init_RedisDriver();
|
|
876
1510
|
init_SQSDriver();
|
|
877
1511
|
|
|
878
1512
|
// src/Job.ts
|
|
879
1513
|
var Job = class {
|
|
1514
|
+
/**
|
|
1515
|
+
* Unique job identifier.
|
|
1516
|
+
*/
|
|
1517
|
+
id;
|
|
880
1518
|
/**
|
|
881
1519
|
* Queue name.
|
|
882
1520
|
*/
|
|
@@ -897,6 +1535,22 @@ var Job = class {
|
|
|
897
1535
|
* Maximum attempts.
|
|
898
1536
|
*/
|
|
899
1537
|
maxAttempts;
|
|
1538
|
+
/**
|
|
1539
|
+
* Group ID for FIFO.
|
|
1540
|
+
*/
|
|
1541
|
+
groupId;
|
|
1542
|
+
/**
|
|
1543
|
+
* Job priority.
|
|
1544
|
+
*/
|
|
1545
|
+
priority;
|
|
1546
|
+
/**
|
|
1547
|
+
* Initial retry delay (seconds).
|
|
1548
|
+
*/
|
|
1549
|
+
retryAfterSeconds;
|
|
1550
|
+
/**
|
|
1551
|
+
* Retry delay multiplier.
|
|
1552
|
+
*/
|
|
1553
|
+
retryMultiplier;
|
|
900
1554
|
/**
|
|
901
1555
|
* Set target queue.
|
|
902
1556
|
*/
|
|
@@ -911,6 +1565,14 @@ var Job = class {
|
|
|
911
1565
|
this.connectionName = connection;
|
|
912
1566
|
return this;
|
|
913
1567
|
}
|
|
1568
|
+
/**
|
|
1569
|
+
* Set job priority.
|
|
1570
|
+
* @param priority - 'high', 'low', or number
|
|
1571
|
+
*/
|
|
1572
|
+
withPriority(priority) {
|
|
1573
|
+
this.priority = priority;
|
|
1574
|
+
return this;
|
|
1575
|
+
}
|
|
914
1576
|
/**
|
|
915
1577
|
* Set delay (seconds).
|
|
916
1578
|
*/
|
|
@@ -918,6 +1580,26 @@ var Job = class {
|
|
|
918
1580
|
this.delaySeconds = delay;
|
|
919
1581
|
return this;
|
|
920
1582
|
}
|
|
1583
|
+
/**
|
|
1584
|
+
* Set retry backoff strategy.
|
|
1585
|
+
* @param seconds - Initial delay in seconds
|
|
1586
|
+
* @param multiplier - Multiplier for each subsequent attempt (default: 2)
|
|
1587
|
+
*/
|
|
1588
|
+
backoff(seconds, multiplier = 2) {
|
|
1589
|
+
this.retryAfterSeconds = seconds;
|
|
1590
|
+
this.retryMultiplier = multiplier;
|
|
1591
|
+
return this;
|
|
1592
|
+
}
|
|
1593
|
+
/**
|
|
1594
|
+
* Calculate retry delay for the next attempt.
|
|
1595
|
+
* @param attempt - Current attempt number (1-based)
|
|
1596
|
+
* @returns Delay in milliseconds
|
|
1597
|
+
*/
|
|
1598
|
+
getRetryDelay(attempt) {
|
|
1599
|
+
const initialDelay = (this.retryAfterSeconds ?? 1) * 1e3;
|
|
1600
|
+
const multiplier = this.retryMultiplier ?? 2;
|
|
1601
|
+
return Math.min(initialDelay * multiplier ** (attempt - 1), 36e5);
|
|
1602
|
+
}
|
|
921
1603
|
/**
|
|
922
1604
|
* Failure handler (optional).
|
|
923
1605
|
*
|
|
@@ -956,7 +1638,7 @@ var ClassNameSerializer = class {
|
|
|
956
1638
|
* Serialize a Job.
|
|
957
1639
|
*/
|
|
958
1640
|
serialize(job) {
|
|
959
|
-
const id = `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
1641
|
+
const id = job.id || `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
960
1642
|
const className = job.constructor.name;
|
|
961
1643
|
const properties = {};
|
|
962
1644
|
for (const key in job) {
|
|
@@ -975,7 +1657,11 @@ var ClassNameSerializer = class {
|
|
|
975
1657
|
createdAt: Date.now(),
|
|
976
1658
|
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
977
1659
|
attempts: job.attempts ?? 0,
|
|
978
|
-
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {}
|
|
1660
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1661
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1662
|
+
...job.retryAfterSeconds !== void 0 ? { retryAfterSeconds: job.retryAfterSeconds } : {},
|
|
1663
|
+
...job.retryMultiplier !== void 0 ? { retryMultiplier: job.retryMultiplier } : {},
|
|
1664
|
+
...job.priority !== void 0 ? { priority: job.priority } : {}
|
|
979
1665
|
};
|
|
980
1666
|
}
|
|
981
1667
|
/**
|
|
@@ -999,6 +1685,7 @@ var ClassNameSerializer = class {
|
|
|
999
1685
|
if (parsed.properties) {
|
|
1000
1686
|
Object.assign(job, parsed.properties);
|
|
1001
1687
|
}
|
|
1688
|
+
job.id = serialized.id;
|
|
1002
1689
|
if (serialized.delaySeconds !== void 0) {
|
|
1003
1690
|
job.delaySeconds = serialized.delaySeconds;
|
|
1004
1691
|
}
|
|
@@ -1008,6 +1695,18 @@ var ClassNameSerializer = class {
|
|
|
1008
1695
|
if (serialized.maxAttempts !== void 0) {
|
|
1009
1696
|
job.maxAttempts = serialized.maxAttempts;
|
|
1010
1697
|
}
|
|
1698
|
+
if (serialized.groupId !== void 0) {
|
|
1699
|
+
job.groupId = serialized.groupId;
|
|
1700
|
+
}
|
|
1701
|
+
if (serialized.retryAfterSeconds !== void 0) {
|
|
1702
|
+
job.retryAfterSeconds = serialized.retryAfterSeconds;
|
|
1703
|
+
}
|
|
1704
|
+
if (serialized.retryMultiplier !== void 0) {
|
|
1705
|
+
job.retryMultiplier = serialized.retryMultiplier;
|
|
1706
|
+
}
|
|
1707
|
+
if (serialized.priority !== void 0) {
|
|
1708
|
+
job.priority = serialized.priority;
|
|
1709
|
+
}
|
|
1011
1710
|
return job;
|
|
1012
1711
|
}
|
|
1013
1712
|
};
|
|
@@ -1029,7 +1728,9 @@ var JsonSerializer = class {
|
|
|
1029
1728
|
createdAt: Date.now(),
|
|
1030
1729
|
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1031
1730
|
attempts: job.attempts ?? 0,
|
|
1032
|
-
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {}
|
|
1731
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1732
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1733
|
+
...job.priority ? { priority: job.priority } : {}
|
|
1033
1734
|
};
|
|
1034
1735
|
}
|
|
1035
1736
|
/**
|
|
@@ -1045,6 +1746,12 @@ var JsonSerializer = class {
|
|
|
1045
1746
|
const parsed = JSON.parse(serialized.data);
|
|
1046
1747
|
const job = /* @__PURE__ */ Object.create({});
|
|
1047
1748
|
Object.assign(job, parsed.properties);
|
|
1749
|
+
if (serialized.groupId) {
|
|
1750
|
+
job.groupId = serialized.groupId;
|
|
1751
|
+
}
|
|
1752
|
+
if (serialized.priority) {
|
|
1753
|
+
job.priority = serialized.priority;
|
|
1754
|
+
}
|
|
1048
1755
|
return job;
|
|
1049
1756
|
}
|
|
1050
1757
|
};
|
|
@@ -1055,7 +1762,11 @@ var QueueManager = class {
|
|
|
1055
1762
|
serializers = /* @__PURE__ */ new Map();
|
|
1056
1763
|
defaultConnection;
|
|
1057
1764
|
defaultSerializer;
|
|
1765
|
+
persistence;
|
|
1766
|
+
scheduler;
|
|
1767
|
+
// Using any to avoid circular dependency or import issues for now
|
|
1058
1768
|
constructor(config = {}) {
|
|
1769
|
+
this.persistence = config.persistence;
|
|
1059
1770
|
this.defaultConnection = config.default ?? "default";
|
|
1060
1771
|
const serializerType = config.defaultSerializer ?? "class";
|
|
1061
1772
|
if (serializerType === "class") {
|
|
@@ -1163,9 +1874,30 @@ var QueueManager = class {
|
|
|
1163
1874
|
);
|
|
1164
1875
|
break;
|
|
1165
1876
|
}
|
|
1877
|
+
case "rabbitmq": {
|
|
1878
|
+
const { RabbitMQDriver: RabbitMQDriver2 } = (init_RabbitMQDriver(), __toCommonJS(RabbitMQDriver_exports));
|
|
1879
|
+
const client = config.client;
|
|
1880
|
+
if (!client) {
|
|
1881
|
+
throw new Error(
|
|
1882
|
+
"[QueueManager] RabbitMQDriver requires client. Please provide RabbitMQ connection/channel in connection config."
|
|
1883
|
+
);
|
|
1884
|
+
}
|
|
1885
|
+
this.drivers.set(
|
|
1886
|
+
name,
|
|
1887
|
+
new RabbitMQDriver2({
|
|
1888
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1889
|
+
client,
|
|
1890
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1891
|
+
exchange: config.exchange,
|
|
1892
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1893
|
+
exchangeType: config.exchangeType
|
|
1894
|
+
})
|
|
1895
|
+
);
|
|
1896
|
+
break;
|
|
1897
|
+
}
|
|
1166
1898
|
default:
|
|
1167
1899
|
throw new Error(
|
|
1168
|
-
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs`
|
|
1900
|
+
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq`
|
|
1169
1901
|
);
|
|
1170
1902
|
}
|
|
1171
1903
|
}
|
|
@@ -1181,6 +1913,13 @@ var QueueManager = class {
|
|
|
1181
1913
|
}
|
|
1182
1914
|
return driver;
|
|
1183
1915
|
}
|
|
1916
|
+
/**
|
|
1917
|
+
* Get the default connection name.
|
|
1918
|
+
* @returns Default connection name
|
|
1919
|
+
*/
|
|
1920
|
+
getDefaultConnection() {
|
|
1921
|
+
return this.defaultConnection;
|
|
1922
|
+
}
|
|
1184
1923
|
/**
|
|
1185
1924
|
* Get a serializer.
|
|
1186
1925
|
* @param type - Serializer type
|
|
@@ -1210,6 +1949,7 @@ var QueueManager = class {
|
|
|
1210
1949
|
*
|
|
1211
1950
|
* @template T - The type of the job.
|
|
1212
1951
|
* @param job - Job instance to push.
|
|
1952
|
+
* @param options - Push options.
|
|
1213
1953
|
* @returns The same job instance (for fluent chaining).
|
|
1214
1954
|
*
|
|
1215
1955
|
* @example
|
|
@@ -1217,13 +1957,22 @@ var QueueManager = class {
|
|
|
1217
1957
|
* await manager.push(new SendEmailJob('user@example.com'));
|
|
1218
1958
|
* ```
|
|
1219
1959
|
*/
|
|
1220
|
-
async push(job) {
|
|
1960
|
+
async push(job, options) {
|
|
1221
1961
|
const connection = job.connectionName ?? this.defaultConnection;
|
|
1222
1962
|
const queue = job.queueName ?? "default";
|
|
1223
1963
|
const driver = this.getDriver(connection);
|
|
1224
1964
|
const serializer = this.getSerializer();
|
|
1225
1965
|
const serialized = serializer.serialize(job);
|
|
1226
|
-
|
|
1966
|
+
const pushOptions = { ...options };
|
|
1967
|
+
if (job.priority) {
|
|
1968
|
+
pushOptions.priority = job.priority;
|
|
1969
|
+
}
|
|
1970
|
+
await driver.push(queue, serialized, pushOptions);
|
|
1971
|
+
if (this.persistence?.archiveEnqueued) {
|
|
1972
|
+
this.persistence.adapter.archive(queue, serialized, "waiting").catch((err) => {
|
|
1973
|
+
console.error("[QueueManager] Persistence archive failed (waiting):", err);
|
|
1974
|
+
});
|
|
1975
|
+
}
|
|
1227
1976
|
return job;
|
|
1228
1977
|
}
|
|
1229
1978
|
/**
|
|
@@ -1316,6 +2065,92 @@ var QueueManager = class {
|
|
|
1316
2065
|
const driver = this.getDriver(connection);
|
|
1317
2066
|
await driver.clear(queue);
|
|
1318
2067
|
}
|
|
2068
|
+
/**
|
|
2069
|
+
* Mark a job as completed.
|
|
2070
|
+
* @param job - Job instance
|
|
2071
|
+
*/
|
|
2072
|
+
async complete(job) {
|
|
2073
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2074
|
+
const queue = job.queueName ?? "default";
|
|
2075
|
+
const driver = this.getDriver(connection);
|
|
2076
|
+
const serializer = this.getSerializer();
|
|
2077
|
+
if (driver.complete) {
|
|
2078
|
+
const serialized = serializer.serialize(job);
|
|
2079
|
+
await driver.complete(queue, serialized);
|
|
2080
|
+
if (this.persistence?.archiveCompleted) {
|
|
2081
|
+
await this.persistence.adapter.archive(queue, serialized, "completed").catch((err) => {
|
|
2082
|
+
console.error("[QueueManager] Persistence archive failed (completed):", err);
|
|
2083
|
+
});
|
|
2084
|
+
}
|
|
2085
|
+
}
|
|
2086
|
+
}
|
|
2087
|
+
/**
|
|
2088
|
+
* Mark a job as permanently failed.
|
|
2089
|
+
* @param job - Job instance
|
|
2090
|
+
* @param error - Error object
|
|
2091
|
+
*/
|
|
2092
|
+
async fail(job, error) {
|
|
2093
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2094
|
+
const queue = job.queueName ?? "default";
|
|
2095
|
+
const driver = this.getDriver(connection);
|
|
2096
|
+
const serializer = this.getSerializer();
|
|
2097
|
+
if (driver.fail) {
|
|
2098
|
+
const serialized = serializer.serialize(job);
|
|
2099
|
+
serialized.error = error.message;
|
|
2100
|
+
serialized.failedAt = Date.now();
|
|
2101
|
+
await driver.fail(queue, serialized);
|
|
2102
|
+
if (this.persistence?.archiveFailed) {
|
|
2103
|
+
await this.persistence.adapter.archive(queue, serialized, "failed").catch((err) => {
|
|
2104
|
+
console.error("[QueueManager] Persistence archive failed (failed):", err);
|
|
2105
|
+
});
|
|
2106
|
+
}
|
|
2107
|
+
}
|
|
2108
|
+
}
|
|
2109
|
+
/**
|
|
2110
|
+
* Get the persistence adapter if configured.
|
|
2111
|
+
*/
|
|
2112
|
+
getPersistence() {
|
|
2113
|
+
return this.persistence?.adapter;
|
|
2114
|
+
}
|
|
2115
|
+
/**
|
|
2116
|
+
* Get the scheduler if configured.
|
|
2117
|
+
*/
|
|
2118
|
+
getScheduler() {
|
|
2119
|
+
if (!this.scheduler) {
|
|
2120
|
+
const { Scheduler: Scheduler2 } = (init_Scheduler(), __toCommonJS(Scheduler_exports));
|
|
2121
|
+
this.scheduler = new Scheduler2(this);
|
|
2122
|
+
}
|
|
2123
|
+
return this.scheduler;
|
|
2124
|
+
}
|
|
2125
|
+
/**
|
|
2126
|
+
* Get failed jobs from DLQ (if driver supports it).
|
|
2127
|
+
*/
|
|
2128
|
+
async getFailed(queue, start = 0, end = -1, connection = this.defaultConnection) {
|
|
2129
|
+
const driver = this.getDriver(connection);
|
|
2130
|
+
if (driver.getFailed) {
|
|
2131
|
+
return driver.getFailed(queue, start, end);
|
|
2132
|
+
}
|
|
2133
|
+
return [];
|
|
2134
|
+
}
|
|
2135
|
+
/**
|
|
2136
|
+
* Retry failed jobs from DLQ (if driver supports it).
|
|
2137
|
+
*/
|
|
2138
|
+
async retryFailed(queue, count = 1, connection = this.defaultConnection) {
|
|
2139
|
+
const driver = this.getDriver(connection);
|
|
2140
|
+
if (driver.retryFailed) {
|
|
2141
|
+
return driver.retryFailed(queue, count);
|
|
2142
|
+
}
|
|
2143
|
+
return 0;
|
|
2144
|
+
}
|
|
2145
|
+
/**
|
|
2146
|
+
* Clear failed jobs from DLQ (if driver supports it).
|
|
2147
|
+
*/
|
|
2148
|
+
async clearFailed(queue, connection = this.defaultConnection) {
|
|
2149
|
+
const driver = this.getDriver(connection);
|
|
2150
|
+
if (driver.clearFailed) {
|
|
2151
|
+
await driver.clearFailed(queue);
|
|
2152
|
+
}
|
|
2153
|
+
}
|
|
1319
2154
|
};
|
|
1320
2155
|
|
|
1321
2156
|
// src/OrbitStream.ts
|
|
@@ -1396,6 +2231,453 @@ var OrbitStream = class _OrbitStream {
|
|
|
1396
2231
|
return this.queueManager;
|
|
1397
2232
|
}
|
|
1398
2233
|
};
|
|
2234
|
+
|
|
2235
|
+
// src/persistence/MySQLPersistence.ts
|
|
2236
|
+
import { DB, Schema } from "@gravito/atlas";
|
|
2237
|
+
var MySQLPersistence = class {
|
|
2238
|
+
/**
|
|
2239
|
+
* @param db - An Atlas DB instance or compatible QueryBuilder.
|
|
2240
|
+
* @param table - The name of the table to store archived jobs.
|
|
2241
|
+
*/
|
|
2242
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2243
|
+
this.db = db;
|
|
2244
|
+
this.table = table;
|
|
2245
|
+
this.logsTable = logsTable;
|
|
2246
|
+
}
|
|
2247
|
+
/**
|
|
2248
|
+
* Archive a job.
|
|
2249
|
+
*/
|
|
2250
|
+
async archive(queue, job, status) {
|
|
2251
|
+
try {
|
|
2252
|
+
await this.db.table(this.table).insert({
|
|
2253
|
+
job_id: job.id,
|
|
2254
|
+
queue,
|
|
2255
|
+
status,
|
|
2256
|
+
payload: JSON.stringify(job),
|
|
2257
|
+
error: job.error || null,
|
|
2258
|
+
created_at: new Date(job.createdAt),
|
|
2259
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2260
|
+
});
|
|
2261
|
+
} catch (err) {
|
|
2262
|
+
console.error(`[MySQLPersistence] Failed to archive job ${job.id}:`, err);
|
|
2263
|
+
}
|
|
2264
|
+
}
|
|
2265
|
+
/**
|
|
2266
|
+
* Find a specific job in the archive.
|
|
2267
|
+
*/
|
|
2268
|
+
async find(queue, id) {
|
|
2269
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2270
|
+
if (!row) {
|
|
2271
|
+
return null;
|
|
2272
|
+
}
|
|
2273
|
+
try {
|
|
2274
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2275
|
+
return job;
|
|
2276
|
+
} catch (_e) {
|
|
2277
|
+
return null;
|
|
2278
|
+
}
|
|
2279
|
+
}
|
|
2280
|
+
/**
|
|
2281
|
+
* List jobs from the archive.
|
|
2282
|
+
*/
|
|
2283
|
+
async list(queue, options = {}) {
|
|
2284
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2285
|
+
if (options.status) {
|
|
2286
|
+
query = query.where("status", options.status);
|
|
2287
|
+
}
|
|
2288
|
+
if (options.jobId) {
|
|
2289
|
+
query = query.where("job_id", options.jobId);
|
|
2290
|
+
}
|
|
2291
|
+
if (options.startTime) {
|
|
2292
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2293
|
+
}
|
|
2294
|
+
if (options.endTime) {
|
|
2295
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2296
|
+
}
|
|
2297
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2298
|
+
return rows.map((r) => {
|
|
2299
|
+
try {
|
|
2300
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2301
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2302
|
+
} catch (_e) {
|
|
2303
|
+
return null;
|
|
2304
|
+
}
|
|
2305
|
+
}).filter(Boolean);
|
|
2306
|
+
}
|
|
2307
|
+
/**
|
|
2308
|
+
* Search jobs from the archive.
|
|
2309
|
+
*/
|
|
2310
|
+
async search(query, options = {}) {
|
|
2311
|
+
let q = this.db.table(this.table);
|
|
2312
|
+
if (options.queue) {
|
|
2313
|
+
q = q.where("queue", options.queue);
|
|
2314
|
+
}
|
|
2315
|
+
const rows = await q.where((sub) => {
|
|
2316
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2317
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2318
|
+
return rows.map((r) => {
|
|
2319
|
+
try {
|
|
2320
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2321
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2322
|
+
} catch (_e) {
|
|
2323
|
+
return null;
|
|
2324
|
+
}
|
|
2325
|
+
}).filter(Boolean);
|
|
2326
|
+
}
|
|
2327
|
+
/**
|
|
2328
|
+
* Archive a system log message.
|
|
2329
|
+
*/
|
|
2330
|
+
async archiveLog(log) {
|
|
2331
|
+
try {
|
|
2332
|
+
await this.db.table(this.logsTable).insert({
|
|
2333
|
+
level: log.level,
|
|
2334
|
+
message: log.message,
|
|
2335
|
+
worker_id: log.workerId,
|
|
2336
|
+
queue: log.queue || null,
|
|
2337
|
+
timestamp: log.timestamp
|
|
2338
|
+
});
|
|
2339
|
+
} catch (err) {
|
|
2340
|
+
console.error(`[MySQLPersistence] Failed to archive log:`, err.message);
|
|
2341
|
+
}
|
|
2342
|
+
}
|
|
2343
|
+
/**
|
|
2344
|
+
* List system logs from the archive.
|
|
2345
|
+
*/
|
|
2346
|
+
async listLogs(options = {}) {
|
|
2347
|
+
let query = this.db.table(this.logsTable);
|
|
2348
|
+
if (options.level) query = query.where("level", options.level);
|
|
2349
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2350
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2351
|
+
if (options.search) {
|
|
2352
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2353
|
+
}
|
|
2354
|
+
if (options.startTime) {
|
|
2355
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2356
|
+
}
|
|
2357
|
+
if (options.endTime) {
|
|
2358
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2359
|
+
}
|
|
2360
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2361
|
+
}
|
|
2362
|
+
/**
|
|
2363
|
+
* Count system logs in the archive.
|
|
2364
|
+
*/
|
|
2365
|
+
async countLogs(options = {}) {
|
|
2366
|
+
let query = this.db.table(this.logsTable);
|
|
2367
|
+
if (options.level) query = query.where("level", options.level);
|
|
2368
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2369
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2370
|
+
if (options.search) {
|
|
2371
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2372
|
+
}
|
|
2373
|
+
if (options.startTime) {
|
|
2374
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2375
|
+
}
|
|
2376
|
+
if (options.endTime) {
|
|
2377
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2378
|
+
}
|
|
2379
|
+
const result = await query.count("id as total").first();
|
|
2380
|
+
return result?.total || 0;
|
|
2381
|
+
}
|
|
2382
|
+
/**
|
|
2383
|
+
* Remove old records from the archive.
|
|
2384
|
+
*/
|
|
2385
|
+
async cleanup(days) {
|
|
2386
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2387
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2388
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2389
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2390
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2391
|
+
]);
|
|
2392
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2393
|
+
}
|
|
2394
|
+
/**
|
|
2395
|
+
* Count jobs in the archive.
|
|
2396
|
+
*/
|
|
2397
|
+
async count(queue, options = {}) {
|
|
2398
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2399
|
+
if (options.status) {
|
|
2400
|
+
query = query.where("status", options.status);
|
|
2401
|
+
}
|
|
2402
|
+
if (options.jobId) {
|
|
2403
|
+
query = query.where("job_id", options.jobId);
|
|
2404
|
+
}
|
|
2405
|
+
if (options.startTime) {
|
|
2406
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2407
|
+
}
|
|
2408
|
+
if (options.endTime) {
|
|
2409
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2410
|
+
}
|
|
2411
|
+
const result = await query.count("id as total").first();
|
|
2412
|
+
return result?.total || 0;
|
|
2413
|
+
}
|
|
2414
|
+
/**
|
|
2415
|
+
* Help script to create the necessary table.
|
|
2416
|
+
*/
|
|
2417
|
+
async setupTable() {
|
|
2418
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2419
|
+
}
|
|
2420
|
+
async setupJobsTable() {
|
|
2421
|
+
const exists = await Schema.hasTable(this.table);
|
|
2422
|
+
if (exists) return;
|
|
2423
|
+
await Schema.create(this.table, (table) => {
|
|
2424
|
+
table.id();
|
|
2425
|
+
table.string("job_id", 64);
|
|
2426
|
+
table.string("queue", 128);
|
|
2427
|
+
table.string("status", 20);
|
|
2428
|
+
table.json("payload");
|
|
2429
|
+
table.text("error").nullable();
|
|
2430
|
+
table.timestamp("created_at").nullable();
|
|
2431
|
+
table.timestamp("archived_at").default(DB.raw("CURRENT_TIMESTAMP"));
|
|
2432
|
+
table.index(["queue", "archived_at"]);
|
|
2433
|
+
table.index(["queue", "job_id"]);
|
|
2434
|
+
table.index(["status", "archived_at"]);
|
|
2435
|
+
table.index(["archived_at"]);
|
|
2436
|
+
});
|
|
2437
|
+
console.log(`[MySQLPersistence] Created jobs archive table: ${this.table}`);
|
|
2438
|
+
}
|
|
2439
|
+
async setupLogsTable() {
|
|
2440
|
+
const exists = await Schema.hasTable(this.logsTable);
|
|
2441
|
+
if (exists) return;
|
|
2442
|
+
await Schema.create(this.logsTable, (table) => {
|
|
2443
|
+
table.id();
|
|
2444
|
+
table.string("level", 20);
|
|
2445
|
+
table.text("message");
|
|
2446
|
+
table.string("worker_id", 128);
|
|
2447
|
+
table.string("queue", 128).nullable();
|
|
2448
|
+
table.timestamp("timestamp").default(DB.raw("CURRENT_TIMESTAMP"));
|
|
2449
|
+
table.index(["worker_id"]);
|
|
2450
|
+
table.index(["queue"]);
|
|
2451
|
+
table.index(["level"]);
|
|
2452
|
+
table.index(["timestamp"]);
|
|
2453
|
+
});
|
|
2454
|
+
console.log(`[MySQLPersistence] Created logs archive table: ${this.logsTable}`);
|
|
2455
|
+
}
|
|
2456
|
+
};
|
|
2457
|
+
|
|
2458
|
+
// src/persistence/SQLitePersistence.ts
|
|
2459
|
+
import { Schema as Schema2 } from "@gravito/atlas";
|
|
2460
|
+
var SQLitePersistence = class {
|
|
2461
|
+
/**
|
|
2462
|
+
* @param db - An Atlas DB instance (SQLite driver).
|
|
2463
|
+
* @param table - The name of the table to store archived jobs.
|
|
2464
|
+
*/
|
|
2465
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2466
|
+
this.db = db;
|
|
2467
|
+
this.table = table;
|
|
2468
|
+
this.logsTable = logsTable;
|
|
2469
|
+
}
|
|
2470
|
+
/**
|
|
2471
|
+
* Archive a job.
|
|
2472
|
+
*/
|
|
2473
|
+
async archive(queue, job, status) {
|
|
2474
|
+
try {
|
|
2475
|
+
await this.db.table(this.table).insert({
|
|
2476
|
+
job_id: job.id,
|
|
2477
|
+
queue,
|
|
2478
|
+
status,
|
|
2479
|
+
payload: JSON.stringify(job),
|
|
2480
|
+
error: job.error || null,
|
|
2481
|
+
created_at: new Date(job.createdAt),
|
|
2482
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2483
|
+
});
|
|
2484
|
+
} catch (err) {
|
|
2485
|
+
console.error(`[SQLitePersistence] Failed to archive job ${job.id}:`, err.message);
|
|
2486
|
+
}
|
|
2487
|
+
}
|
|
2488
|
+
/**
|
|
2489
|
+
* Find a specific job in the archive.
|
|
2490
|
+
*/
|
|
2491
|
+
async find(queue, id) {
|
|
2492
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2493
|
+
if (!row) {
|
|
2494
|
+
return null;
|
|
2495
|
+
}
|
|
2496
|
+
try {
|
|
2497
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2498
|
+
return job;
|
|
2499
|
+
} catch (_e) {
|
|
2500
|
+
return null;
|
|
2501
|
+
}
|
|
2502
|
+
}
|
|
2503
|
+
/**
|
|
2504
|
+
* List jobs from the archive.
|
|
2505
|
+
*/
|
|
2506
|
+
async list(queue, options = {}) {
|
|
2507
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2508
|
+
if (options.status) {
|
|
2509
|
+
query = query.where("status", options.status);
|
|
2510
|
+
}
|
|
2511
|
+
if (options.jobId) {
|
|
2512
|
+
query = query.where("job_id", options.jobId);
|
|
2513
|
+
}
|
|
2514
|
+
if (options.startTime) {
|
|
2515
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2516
|
+
}
|
|
2517
|
+
if (options.endTime) {
|
|
2518
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2519
|
+
}
|
|
2520
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2521
|
+
return rows.map((r) => {
|
|
2522
|
+
try {
|
|
2523
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2524
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2525
|
+
} catch (_e) {
|
|
2526
|
+
return null;
|
|
2527
|
+
}
|
|
2528
|
+
}).filter(Boolean);
|
|
2529
|
+
}
|
|
2530
|
+
/**
|
|
2531
|
+
* Search jobs from the archive.
|
|
2532
|
+
*/
|
|
2533
|
+
async search(query, options = {}) {
|
|
2534
|
+
let q = this.db.table(this.table);
|
|
2535
|
+
if (options.queue) {
|
|
2536
|
+
q = q.where("queue", options.queue);
|
|
2537
|
+
}
|
|
2538
|
+
const rows = await q.where((sub) => {
|
|
2539
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2540
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2541
|
+
return rows.map((r) => {
|
|
2542
|
+
try {
|
|
2543
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2544
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2545
|
+
} catch (_e) {
|
|
2546
|
+
return null;
|
|
2547
|
+
}
|
|
2548
|
+
}).filter(Boolean);
|
|
2549
|
+
}
|
|
2550
|
+
/**
|
|
2551
|
+
* Archive a system log message.
|
|
2552
|
+
*/
|
|
2553
|
+
async archiveLog(log) {
|
|
2554
|
+
try {
|
|
2555
|
+
await this.db.table(this.logsTable).insert({
|
|
2556
|
+
level: log.level,
|
|
2557
|
+
message: log.message,
|
|
2558
|
+
worker_id: log.workerId,
|
|
2559
|
+
queue: log.queue || null,
|
|
2560
|
+
timestamp: log.timestamp
|
|
2561
|
+
});
|
|
2562
|
+
} catch (err) {
|
|
2563
|
+
console.error(`[SQLitePersistence] Failed to archive log:`, err.message);
|
|
2564
|
+
}
|
|
2565
|
+
}
|
|
2566
|
+
/**
|
|
2567
|
+
* List system logs from the archive.
|
|
2568
|
+
*/
|
|
2569
|
+
async listLogs(options = {}) {
|
|
2570
|
+
let query = this.db.table(this.logsTable);
|
|
2571
|
+
if (options.level) query = query.where("level", options.level);
|
|
2572
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2573
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2574
|
+
if (options.search) {
|
|
2575
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2576
|
+
}
|
|
2577
|
+
if (options.startTime) {
|
|
2578
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2579
|
+
}
|
|
2580
|
+
if (options.endTime) {
|
|
2581
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2582
|
+
}
|
|
2583
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2584
|
+
}
|
|
2585
|
+
/**
|
|
2586
|
+
* Count system logs in the archive.
|
|
2587
|
+
*/
|
|
2588
|
+
async countLogs(options = {}) {
|
|
2589
|
+
let query = this.db.table(this.logsTable);
|
|
2590
|
+
if (options.level) query = query.where("level", options.level);
|
|
2591
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2592
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2593
|
+
if (options.search) {
|
|
2594
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2595
|
+
}
|
|
2596
|
+
if (options.startTime) {
|
|
2597
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2598
|
+
}
|
|
2599
|
+
if (options.endTime) {
|
|
2600
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2601
|
+
}
|
|
2602
|
+
const result = await query.count("id as total").first();
|
|
2603
|
+
return result?.total || 0;
|
|
2604
|
+
}
|
|
2605
|
+
/**
|
|
2606
|
+
* Remove old records from the archive.
|
|
2607
|
+
*/
|
|
2608
|
+
async cleanup(days) {
|
|
2609
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2610
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2611
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2612
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2613
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2614
|
+
]);
|
|
2615
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2616
|
+
}
|
|
2617
|
+
/**
|
|
2618
|
+
* Count jobs in the archive.
|
|
2619
|
+
*/
|
|
2620
|
+
async count(queue, options = {}) {
|
|
2621
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2622
|
+
if (options.status) {
|
|
2623
|
+
query = query.where("status", options.status);
|
|
2624
|
+
}
|
|
2625
|
+
if (options.jobId) {
|
|
2626
|
+
query = query.where("job_id", options.jobId);
|
|
2627
|
+
}
|
|
2628
|
+
if (options.startTime) {
|
|
2629
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2630
|
+
}
|
|
2631
|
+
if (options.endTime) {
|
|
2632
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2633
|
+
}
|
|
2634
|
+
const result = await query.count("id as total").first();
|
|
2635
|
+
return result?.total || 0;
|
|
2636
|
+
}
|
|
2637
|
+
/**
|
|
2638
|
+
* Setup table for SQLite.
|
|
2639
|
+
*/
|
|
2640
|
+
async setupTable() {
|
|
2641
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2642
|
+
}
|
|
2643
|
+
async setupJobsTable() {
|
|
2644
|
+
const exists = await Schema2.hasTable(this.table);
|
|
2645
|
+
if (exists) return;
|
|
2646
|
+
await Schema2.create(this.table, (table) => {
|
|
2647
|
+
table.id();
|
|
2648
|
+
table.string("job_id", 64);
|
|
2649
|
+
table.string("queue", 128);
|
|
2650
|
+
table.string("status", 20);
|
|
2651
|
+
table.text("payload");
|
|
2652
|
+
table.text("error").nullable();
|
|
2653
|
+
table.timestamp("created_at").nullable();
|
|
2654
|
+
table.timestamp("archived_at").nullable();
|
|
2655
|
+
table.index(["queue", "archived_at"]);
|
|
2656
|
+
table.index(["archived_at"]);
|
|
2657
|
+
});
|
|
2658
|
+
console.log(`[SQLitePersistence] Created jobs archive table: ${this.table}`);
|
|
2659
|
+
}
|
|
2660
|
+
async setupLogsTable() {
|
|
2661
|
+
const exists = await Schema2.hasTable(this.logsTable);
|
|
2662
|
+
if (exists) return;
|
|
2663
|
+
await Schema2.create(this.logsTable, (table) => {
|
|
2664
|
+
table.id();
|
|
2665
|
+
table.string("level", 20);
|
|
2666
|
+
table.text("message");
|
|
2667
|
+
table.string("worker_id", 128);
|
|
2668
|
+
table.string("queue", 128).nullable();
|
|
2669
|
+
table.timestamp("timestamp");
|
|
2670
|
+
table.index(["worker_id"]);
|
|
2671
|
+
table.index(["queue"]);
|
|
2672
|
+
table.index(["level"]);
|
|
2673
|
+
table.index(["timestamp"]);
|
|
2674
|
+
});
|
|
2675
|
+
console.log(`[SQLitePersistence] Created logs archive table: ${this.logsTable}`);
|
|
2676
|
+
}
|
|
2677
|
+
};
|
|
2678
|
+
|
|
2679
|
+
// src/index.ts
|
|
2680
|
+
init_Scheduler();
|
|
1399
2681
|
export {
|
|
1400
2682
|
ClassNameSerializer,
|
|
1401
2683
|
Consumer,
|
|
@@ -1404,9 +2686,13 @@ export {
|
|
|
1404
2686
|
JsonSerializer,
|
|
1405
2687
|
KafkaDriver,
|
|
1406
2688
|
MemoryDriver,
|
|
2689
|
+
MySQLPersistence,
|
|
1407
2690
|
OrbitStream,
|
|
1408
2691
|
QueueManager,
|
|
2692
|
+
RabbitMQDriver,
|
|
1409
2693
|
RedisDriver,
|
|
2694
|
+
SQLitePersistence,
|
|
1410
2695
|
SQSDriver,
|
|
2696
|
+
Scheduler,
|
|
1411
2697
|
Worker
|
|
1412
2698
|
};
|