@gravito/stream 1.0.0-beta.1 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -2
- package/dist/index.cjs +1359 -73
- package/dist/index.d.cts +689 -7
- package/dist/index.d.ts +689 -7
- package/dist/index.js +1361 -73
- package/package.json +10 -6
package/dist/index.cjs
CHANGED
|
@@ -56,10 +56,11 @@ var init_DatabaseDriver = __esm({
|
|
|
56
56
|
*/
|
|
57
57
|
async push(queue, job) {
|
|
58
58
|
const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
|
|
59
|
+
const payload = JSON.stringify(job);
|
|
59
60
|
await this.dbService.execute(
|
|
60
61
|
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
61
62
|
VALUES ($1, $2, $3, $4, $5)`,
|
|
62
|
-
[queue,
|
|
63
|
+
[queue, payload, job.attempts ?? 0, availableAt.toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
63
64
|
);
|
|
64
65
|
}
|
|
65
66
|
/**
|
|
@@ -102,15 +103,32 @@ var init_DatabaseDriver = __esm({
|
|
|
102
103
|
);
|
|
103
104
|
const createdAt = new Date(row.created_at).getTime();
|
|
104
105
|
const delaySeconds = row.available_at ? Math.max(0, Math.floor((new Date(row.available_at).getTime() - createdAt) / 1e3)) : void 0;
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
106
|
+
let job;
|
|
107
|
+
try {
|
|
108
|
+
const parsed = JSON.parse(row.payload);
|
|
109
|
+
if (parsed && typeof parsed === "object" && parsed.type && parsed.data) {
|
|
110
|
+
job = {
|
|
111
|
+
...parsed,
|
|
112
|
+
id: row.id,
|
|
113
|
+
// DB ID is the source of truth for deletion
|
|
114
|
+
attempts: row.attempts
|
|
115
|
+
};
|
|
116
|
+
} else {
|
|
117
|
+
throw new Error("Fallback");
|
|
118
|
+
}
|
|
119
|
+
} catch (_e) {
|
|
120
|
+
job = {
|
|
121
|
+
id: row.id,
|
|
122
|
+
type: "class",
|
|
123
|
+
data: row.payload,
|
|
124
|
+
createdAt,
|
|
125
|
+
attempts: row.attempts
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
if (delaySeconds !== void 0) {
|
|
129
|
+
job.delaySeconds = delaySeconds;
|
|
130
|
+
}
|
|
131
|
+
return job;
|
|
114
132
|
}
|
|
115
133
|
/**
|
|
116
134
|
* Get queue size.
|
|
@@ -150,6 +168,27 @@ var init_DatabaseDriver = __esm({
|
|
|
150
168
|
}
|
|
151
169
|
});
|
|
152
170
|
}
|
|
171
|
+
/**
|
|
172
|
+
* Mark a job as failed (DLQ).
|
|
173
|
+
*/
|
|
174
|
+
async fail(queue, job) {
|
|
175
|
+
const failedQueue = `failed:${queue}`;
|
|
176
|
+
const payload = JSON.stringify(job);
|
|
177
|
+
await this.dbService.execute(
|
|
178
|
+
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
179
|
+
VALUES ($1, $2, $3, $4, $5)`,
|
|
180
|
+
[failedQueue, payload, job.attempts, (/* @__PURE__ */ new Date()).toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
181
|
+
);
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Acknowledge/Complete a job.
|
|
185
|
+
*/
|
|
186
|
+
async complete(_queue, job) {
|
|
187
|
+
if (!job.id) {
|
|
188
|
+
return;
|
|
189
|
+
}
|
|
190
|
+
await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE id = $1`, [job.id]);
|
|
191
|
+
}
|
|
153
192
|
};
|
|
154
193
|
}
|
|
155
194
|
});
|
|
@@ -328,6 +367,150 @@ var init_KafkaDriver = __esm({
|
|
|
328
367
|
}
|
|
329
368
|
});
|
|
330
369
|
|
|
370
|
+
// src/drivers/RabbitMQDriver.ts
|
|
371
|
+
var RabbitMQDriver_exports = {};
|
|
372
|
+
__export(RabbitMQDriver_exports, {
|
|
373
|
+
RabbitMQDriver: () => RabbitMQDriver
|
|
374
|
+
});
|
|
375
|
+
var RabbitMQDriver;
|
|
376
|
+
var init_RabbitMQDriver = __esm({
|
|
377
|
+
"src/drivers/RabbitMQDriver.ts"() {
|
|
378
|
+
"use strict";
|
|
379
|
+
RabbitMQDriver = class {
|
|
380
|
+
connection;
|
|
381
|
+
channel;
|
|
382
|
+
exchange;
|
|
383
|
+
exchangeType;
|
|
384
|
+
constructor(config) {
|
|
385
|
+
this.connection = config.client;
|
|
386
|
+
this.exchange = config.exchange;
|
|
387
|
+
this.exchangeType = config.exchangeType ?? "fanout";
|
|
388
|
+
if (!this.connection) {
|
|
389
|
+
throw new Error(
|
|
390
|
+
"[RabbitMQDriver] RabbitMQ connection is required. Please provide a connection from amqplib."
|
|
391
|
+
);
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
/**
|
|
395
|
+
* Ensure channel is created.
|
|
396
|
+
*/
|
|
397
|
+
async ensureChannel() {
|
|
398
|
+
if (this.channel) {
|
|
399
|
+
return this.channel;
|
|
400
|
+
}
|
|
401
|
+
if (typeof this.connection.createChannel === "function") {
|
|
402
|
+
this.channel = await this.connection.createChannel();
|
|
403
|
+
} else {
|
|
404
|
+
this.channel = this.connection;
|
|
405
|
+
}
|
|
406
|
+
if (this.exchange) {
|
|
407
|
+
await this.channel.assertExchange(this.exchange, this.exchangeType, { durable: true });
|
|
408
|
+
}
|
|
409
|
+
return this.channel;
|
|
410
|
+
}
|
|
411
|
+
/**
|
|
412
|
+
* Get the underlying connection.
|
|
413
|
+
*/
|
|
414
|
+
getRawConnection() {
|
|
415
|
+
return this.connection;
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Push a job (sendToQueue / publish).
|
|
419
|
+
*/
|
|
420
|
+
async push(queue, job) {
|
|
421
|
+
const channel = await this.ensureChannel();
|
|
422
|
+
const payload = Buffer.from(JSON.stringify(job));
|
|
423
|
+
if (this.exchange) {
|
|
424
|
+
await channel.assertQueue(queue, { durable: true });
|
|
425
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
426
|
+
channel.publish(this.exchange, "", payload, { persistent: true });
|
|
427
|
+
} else {
|
|
428
|
+
await channel.assertQueue(queue, { durable: true });
|
|
429
|
+
channel.sendToQueue(queue, payload, { persistent: true });
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
/**
|
|
433
|
+
* Pop a job (get).
|
|
434
|
+
*/
|
|
435
|
+
async pop(queue) {
|
|
436
|
+
const channel = await this.ensureChannel();
|
|
437
|
+
await channel.assertQueue(queue, { durable: true });
|
|
438
|
+
const msg = await channel.get(queue, { noAck: false });
|
|
439
|
+
if (!msg) {
|
|
440
|
+
return null;
|
|
441
|
+
}
|
|
442
|
+
const job = JSON.parse(msg.content.toString());
|
|
443
|
+
job._raw = msg;
|
|
444
|
+
return job;
|
|
445
|
+
}
|
|
446
|
+
/**
|
|
447
|
+
* Acknowledge a message.
|
|
448
|
+
*/
|
|
449
|
+
async acknowledge(messageId) {
|
|
450
|
+
const channel = await this.ensureChannel();
|
|
451
|
+
if (typeof messageId === "object") {
|
|
452
|
+
channel.ack(messageId);
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
/**
|
|
456
|
+
* Negative acknowledge a message.
|
|
457
|
+
*/
|
|
458
|
+
async nack(message, requeue = true) {
|
|
459
|
+
const channel = await this.ensureChannel();
|
|
460
|
+
channel.nack(message, false, requeue);
|
|
461
|
+
}
|
|
462
|
+
/**
|
|
463
|
+
* Reject a message.
|
|
464
|
+
*/
|
|
465
|
+
async reject(message, requeue = true) {
|
|
466
|
+
const channel = await this.ensureChannel();
|
|
467
|
+
channel.reject(message, requeue);
|
|
468
|
+
}
|
|
469
|
+
/**
|
|
470
|
+
* Subscribe to a queue.
|
|
471
|
+
*/
|
|
472
|
+
async subscribe(queue, callback, options = {}) {
|
|
473
|
+
const channel = await this.ensureChannel();
|
|
474
|
+
await channel.assertQueue(queue, { durable: true });
|
|
475
|
+
if (this.exchange) {
|
|
476
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
477
|
+
}
|
|
478
|
+
const { autoAck = true } = options;
|
|
479
|
+
await channel.consume(
|
|
480
|
+
queue,
|
|
481
|
+
async (msg) => {
|
|
482
|
+
if (!msg) {
|
|
483
|
+
return;
|
|
484
|
+
}
|
|
485
|
+
const job = JSON.parse(msg.content.toString());
|
|
486
|
+
job._raw = msg;
|
|
487
|
+
await callback(job);
|
|
488
|
+
if (autoAck) {
|
|
489
|
+
channel.ack(msg);
|
|
490
|
+
}
|
|
491
|
+
},
|
|
492
|
+
{ noAck: false }
|
|
493
|
+
);
|
|
494
|
+
}
|
|
495
|
+
/**
|
|
496
|
+
* Get queue size.
|
|
497
|
+
*/
|
|
498
|
+
async size(queue) {
|
|
499
|
+
const channel = await this.ensureChannel();
|
|
500
|
+
const ok = await channel.checkQueue(queue);
|
|
501
|
+
return ok.messageCount;
|
|
502
|
+
}
|
|
503
|
+
/**
|
|
504
|
+
* Clear a queue.
|
|
505
|
+
*/
|
|
506
|
+
async clear(queue) {
|
|
507
|
+
const channel = await this.ensureChannel();
|
|
508
|
+
await channel.purgeQueue(queue);
|
|
509
|
+
}
|
|
510
|
+
};
|
|
511
|
+
}
|
|
512
|
+
});
|
|
513
|
+
|
|
331
514
|
// src/drivers/RedisDriver.ts
|
|
332
515
|
var RedisDriver_exports = {};
|
|
333
516
|
__export(RedisDriver_exports, {
|
|
@@ -337,9 +520,43 @@ var RedisDriver;
|
|
|
337
520
|
var init_RedisDriver = __esm({
|
|
338
521
|
"src/drivers/RedisDriver.ts"() {
|
|
339
522
|
"use strict";
|
|
340
|
-
RedisDriver = class {
|
|
523
|
+
RedisDriver = class _RedisDriver {
|
|
341
524
|
prefix;
|
|
342
525
|
client;
|
|
526
|
+
// Lua Logic:
|
|
527
|
+
// IF (IS_MEMBER(activeSet, groupId)) -> PUSH(pendingList, job)
|
|
528
|
+
// ELSE -> SADD(activeSet, groupId) & LPUSH(waitList, job)
|
|
529
|
+
static PUSH_SCRIPT = `
|
|
530
|
+
local waitList = KEYS[1]
|
|
531
|
+
local activeSet = KEYS[2]
|
|
532
|
+
local pendingList = KEYS[3]
|
|
533
|
+
local groupId = ARGV[1]
|
|
534
|
+
local payload = ARGV[2]
|
|
535
|
+
|
|
536
|
+
if redis.call('SISMEMBER', activeSet, groupId) == 1 then
|
|
537
|
+
return redis.call('RPUSH', pendingList, payload)
|
|
538
|
+
else
|
|
539
|
+
redis.call('SADD', activeSet, groupId)
|
|
540
|
+
return redis.call('LPUSH', waitList, payload)
|
|
541
|
+
end
|
|
542
|
+
`;
|
|
543
|
+
// Lua Logic:
|
|
544
|
+
// local next = LPOP(pendingList)
|
|
545
|
+
// IF (next) -> LPUSH(waitList, next)
|
|
546
|
+
// ELSE -> SREM(activeSet, groupId)
|
|
547
|
+
static COMPLETE_SCRIPT = `
|
|
548
|
+
local waitList = KEYS[1]
|
|
549
|
+
local activeSet = KEYS[2]
|
|
550
|
+
local pendingList = KEYS[3]
|
|
551
|
+
local groupId = ARGV[1]
|
|
552
|
+
|
|
553
|
+
local nextJob = redis.call('LPOP', pendingList)
|
|
554
|
+
if nextJob then
|
|
555
|
+
return redis.call('LPUSH', waitList, nextJob)
|
|
556
|
+
else
|
|
557
|
+
return redis.call('SREM', activeSet, groupId)
|
|
558
|
+
end
|
|
559
|
+
`;
|
|
343
560
|
constructor(config) {
|
|
344
561
|
this.client = config.client;
|
|
345
562
|
this.prefix = config.prefix ?? "queue:";
|
|
@@ -348,19 +565,36 @@ var init_RedisDriver = __esm({
|
|
|
348
565
|
"[RedisDriver] Redis client is required. Please install ioredis or redis package."
|
|
349
566
|
);
|
|
350
567
|
}
|
|
568
|
+
if (typeof this.client.defineCommand === "function") {
|
|
569
|
+
;
|
|
570
|
+
this.client.defineCommand("pushGroupJob", {
|
|
571
|
+
numberOfKeys: 3,
|
|
572
|
+
lua: _RedisDriver.PUSH_SCRIPT
|
|
573
|
+
});
|
|
574
|
+
this.client.defineCommand("completeGroupJob", {
|
|
575
|
+
numberOfKeys: 3,
|
|
576
|
+
lua: _RedisDriver.COMPLETE_SCRIPT
|
|
577
|
+
});
|
|
578
|
+
}
|
|
351
579
|
}
|
|
352
580
|
/**
|
|
353
581
|
* Get full Redis key for a queue.
|
|
354
582
|
*/
|
|
355
|
-
getKey(queue) {
|
|
583
|
+
getKey(queue, priority) {
|
|
584
|
+
if (priority) {
|
|
585
|
+
return `${this.prefix}${queue}:${priority}`;
|
|
586
|
+
}
|
|
356
587
|
return `${this.prefix}${queue}`;
|
|
357
588
|
}
|
|
358
589
|
/**
|
|
359
590
|
* Push a job (LPUSH).
|
|
360
591
|
*/
|
|
361
|
-
async push(queue, job) {
|
|
362
|
-
const key = this.getKey(queue);
|
|
363
|
-
const
|
|
592
|
+
async push(queue, job, options) {
|
|
593
|
+
const key = this.getKey(queue, options?.priority);
|
|
594
|
+
const groupId = options?.groupId;
|
|
595
|
+
if (groupId && options?.priority) {
|
|
596
|
+
}
|
|
597
|
+
const payloadObj = {
|
|
364
598
|
id: job.id,
|
|
365
599
|
type: job.type,
|
|
366
600
|
data: job.data,
|
|
@@ -368,8 +602,18 @@ var init_RedisDriver = __esm({
|
|
|
368
602
|
createdAt: job.createdAt,
|
|
369
603
|
delaySeconds: job.delaySeconds,
|
|
370
604
|
attempts: job.attempts,
|
|
371
|
-
maxAttempts: job.maxAttempts
|
|
372
|
-
|
|
605
|
+
maxAttempts: job.maxAttempts,
|
|
606
|
+
groupId,
|
|
607
|
+
error: job.error,
|
|
608
|
+
failedAt: job.failedAt
|
|
609
|
+
};
|
|
610
|
+
const payload = JSON.stringify(payloadObj);
|
|
611
|
+
if (groupId && typeof this.client.pushGroupJob === "function") {
|
|
612
|
+
const activeSetKey = `${this.prefix}active`;
|
|
613
|
+
const pendingListKey = `${this.prefix}pending:${groupId}`;
|
|
614
|
+
await this.client.pushGroupJob(key, activeSetKey, pendingListKey, groupId, payload);
|
|
615
|
+
return;
|
|
616
|
+
}
|
|
373
617
|
if (job.delaySeconds && job.delaySeconds > 0) {
|
|
374
618
|
const delayKey = `${key}:delayed`;
|
|
375
619
|
const score = Date.now() + job.delaySeconds * 1e3;
|
|
@@ -382,29 +626,53 @@ var init_RedisDriver = __esm({
|
|
|
382
626
|
await this.client.lpush(key, payload);
|
|
383
627
|
}
|
|
384
628
|
}
|
|
629
|
+
/**
|
|
630
|
+
* Complete a job (handle Group FIFO).
|
|
631
|
+
*/
|
|
632
|
+
async complete(queue, job) {
|
|
633
|
+
if (!job.groupId) {
|
|
634
|
+
return;
|
|
635
|
+
}
|
|
636
|
+
const key = this.getKey(queue);
|
|
637
|
+
const activeSetKey = `${this.prefix}active`;
|
|
638
|
+
const pendingListKey = `${this.prefix}pending:${job.groupId}`;
|
|
639
|
+
if (typeof this.client.completeGroupJob === "function") {
|
|
640
|
+
await this.client.completeGroupJob(key, activeSetKey, pendingListKey, job.groupId);
|
|
641
|
+
}
|
|
642
|
+
}
|
|
385
643
|
/**
|
|
386
644
|
* Pop a job (RPOP, FIFO).
|
|
645
|
+
* Supports implicit priority polling (critical -> high -> default -> low).
|
|
387
646
|
*/
|
|
388
647
|
async pop(queue) {
|
|
389
|
-
const
|
|
390
|
-
const
|
|
391
|
-
|
|
392
|
-
const
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
const
|
|
396
|
-
if (
|
|
397
|
-
const
|
|
398
|
-
|
|
399
|
-
|
|
648
|
+
const priorities = ["critical", "high", void 0, "low"];
|
|
649
|
+
for (const priority of priorities) {
|
|
650
|
+
const key = this.getKey(queue, priority);
|
|
651
|
+
const delayKey = `${key}:delayed`;
|
|
652
|
+
if (typeof this.client.zrange === "function") {
|
|
653
|
+
const now = Date.now();
|
|
654
|
+
const delayedJobs = await this.client.zrange(delayKey, 0, 0, "WITHSCORES");
|
|
655
|
+
if (delayedJobs && delayedJobs.length >= 2) {
|
|
656
|
+
const score = parseFloat(delayedJobs[1]);
|
|
657
|
+
if (score <= now) {
|
|
658
|
+
const payload2 = delayedJobs[0];
|
|
659
|
+
await this.client.zrem(delayKey, payload2);
|
|
660
|
+
return this.parsePayload(payload2);
|
|
661
|
+
}
|
|
400
662
|
}
|
|
401
663
|
}
|
|
664
|
+
if (typeof this.client.get === "function") {
|
|
665
|
+
const isPaused = await this.client.get(`${key}:paused`);
|
|
666
|
+
if (isPaused === "1") {
|
|
667
|
+
continue;
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
const payload = await this.client.rpop(key);
|
|
671
|
+
if (payload) {
|
|
672
|
+
return this.parsePayload(payload);
|
|
673
|
+
}
|
|
402
674
|
}
|
|
403
|
-
|
|
404
|
-
if (!payload) {
|
|
405
|
-
return null;
|
|
406
|
-
}
|
|
407
|
-
return this.parsePayload(payload);
|
|
675
|
+
return null;
|
|
408
676
|
}
|
|
409
677
|
/**
|
|
410
678
|
* Parse Redis payload.
|
|
@@ -419,7 +687,11 @@ var init_RedisDriver = __esm({
|
|
|
419
687
|
createdAt: parsed.createdAt,
|
|
420
688
|
delaySeconds: parsed.delaySeconds,
|
|
421
689
|
attempts: parsed.attempts,
|
|
422
|
-
maxAttempts: parsed.maxAttempts
|
|
690
|
+
maxAttempts: parsed.maxAttempts,
|
|
691
|
+
groupId: parsed.groupId,
|
|
692
|
+
error: parsed.error,
|
|
693
|
+
failedAt: parsed.failedAt,
|
|
694
|
+
priority: parsed.priority
|
|
423
695
|
};
|
|
424
696
|
}
|
|
425
697
|
/**
|
|
@@ -429,15 +701,31 @@ var init_RedisDriver = __esm({
|
|
|
429
701
|
const key = this.getKey(queue);
|
|
430
702
|
return this.client.llen(key);
|
|
431
703
|
}
|
|
704
|
+
/**
|
|
705
|
+
* Mark a job as permanently failed (DLQ).
|
|
706
|
+
*/
|
|
707
|
+
async fail(queue, job) {
|
|
708
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
709
|
+
const payload = JSON.stringify({
|
|
710
|
+
...job,
|
|
711
|
+
failedAt: Date.now()
|
|
712
|
+
});
|
|
713
|
+
await this.client.lpush(key, payload);
|
|
714
|
+
if (typeof this.client.ltrim === "function") {
|
|
715
|
+
await this.client.ltrim(key, 0, 999);
|
|
716
|
+
}
|
|
717
|
+
}
|
|
432
718
|
/**
|
|
433
719
|
* Clear a queue.
|
|
434
720
|
*/
|
|
435
721
|
async clear(queue) {
|
|
436
722
|
const key = this.getKey(queue);
|
|
437
723
|
const delayKey = `${key}:delayed`;
|
|
724
|
+
const activeSetKey = `${this.prefix}active`;
|
|
438
725
|
await this.client.del(key);
|
|
439
726
|
if (typeof this.client.del === "function") {
|
|
440
727
|
await this.client.del(delayKey);
|
|
728
|
+
await this.client.del(activeSetKey);
|
|
441
729
|
}
|
|
442
730
|
}
|
|
443
731
|
/**
|
|
@@ -447,6 +735,17 @@ var init_RedisDriver = __esm({
|
|
|
447
735
|
if (jobs.length === 0) {
|
|
448
736
|
return;
|
|
449
737
|
}
|
|
738
|
+
const hasGroup = jobs.some((j) => j.groupId);
|
|
739
|
+
const hasPriority = jobs.some((j) => j.priority);
|
|
740
|
+
if (hasGroup || hasPriority) {
|
|
741
|
+
for (const job of jobs) {
|
|
742
|
+
await this.push(queue, job, {
|
|
743
|
+
groupId: job.groupId,
|
|
744
|
+
priority: job.priority
|
|
745
|
+
});
|
|
746
|
+
}
|
|
747
|
+
return;
|
|
748
|
+
}
|
|
450
749
|
const key = this.getKey(queue);
|
|
451
750
|
const payloads = jobs.map(
|
|
452
751
|
(job) => JSON.stringify({
|
|
@@ -457,7 +756,9 @@ var init_RedisDriver = __esm({
|
|
|
457
756
|
createdAt: job.createdAt,
|
|
458
757
|
delaySeconds: job.delaySeconds,
|
|
459
758
|
attempts: job.attempts,
|
|
460
|
-
maxAttempts: job.maxAttempts
|
|
759
|
+
maxAttempts: job.maxAttempts,
|
|
760
|
+
groupId: job.groupId,
|
|
761
|
+
priority: job.priority
|
|
461
762
|
})
|
|
462
763
|
);
|
|
463
764
|
await this.client.lpush(key, ...payloads);
|
|
@@ -478,6 +779,90 @@ var init_RedisDriver = __esm({
|
|
|
478
779
|
}
|
|
479
780
|
return results;
|
|
480
781
|
}
|
|
782
|
+
/**
|
|
783
|
+
* Report worker heartbeat for monitoring.
|
|
784
|
+
*/
|
|
785
|
+
async reportHeartbeat(workerInfo, prefix) {
|
|
786
|
+
const key = `${prefix ?? this.prefix}worker:${workerInfo.id}`;
|
|
787
|
+
if (typeof this.client.set === "function") {
|
|
788
|
+
await this.client.set(key, JSON.stringify(workerInfo), "EX", 10);
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
/**
|
|
792
|
+
* Publish a log message for monitoring.
|
|
793
|
+
*/
|
|
794
|
+
async publishLog(logPayload, prefix) {
|
|
795
|
+
const payload = JSON.stringify(logPayload);
|
|
796
|
+
const monitorPrefix = prefix ?? this.prefix;
|
|
797
|
+
if (typeof this.client.publish === "function") {
|
|
798
|
+
await this.client.publish(`${monitorPrefix}logs`, payload);
|
|
799
|
+
}
|
|
800
|
+
const historyKey = `${monitorPrefix}logs:history`;
|
|
801
|
+
if (typeof this.client.pipeline === "function") {
|
|
802
|
+
const pipe = this.client.pipeline();
|
|
803
|
+
pipe.lpush(historyKey, payload);
|
|
804
|
+
pipe.ltrim(historyKey, 0, 99);
|
|
805
|
+
await pipe.exec();
|
|
806
|
+
} else {
|
|
807
|
+
await this.client.lpush(historyKey, payload);
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
/**
|
|
811
|
+
* Check if a queue is rate limited.
|
|
812
|
+
* Uses a fixed window counter.
|
|
813
|
+
*/
|
|
814
|
+
async checkRateLimit(queue, config) {
|
|
815
|
+
const key = `${this.prefix}${queue}:ratelimit`;
|
|
816
|
+
const now = Date.now();
|
|
817
|
+
const windowStart = Math.floor(now / config.duration);
|
|
818
|
+
const windowKey = `${key}:${windowStart}`;
|
|
819
|
+
const client = this.client;
|
|
820
|
+
if (typeof client.incr === "function") {
|
|
821
|
+
const current = await client.incr(windowKey);
|
|
822
|
+
if (current === 1) {
|
|
823
|
+
await client.expire(windowKey, Math.ceil(config.duration / 1e3) + 1);
|
|
824
|
+
}
|
|
825
|
+
return current <= config.max;
|
|
826
|
+
}
|
|
827
|
+
return true;
|
|
828
|
+
}
|
|
829
|
+
/**
|
|
830
|
+
* Get failed jobs from DLQ.
|
|
831
|
+
*/
|
|
832
|
+
async getFailed(queue, start = 0, end = -1) {
|
|
833
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
834
|
+
const payloads = await this.client.lrange(key, start, end);
|
|
835
|
+
return payloads.map((p) => this.parsePayload(p));
|
|
836
|
+
}
|
|
837
|
+
/**
|
|
838
|
+
* Retry failed jobs from DLQ.
|
|
839
|
+
* Moves jobs from failed list back to the main queue.
|
|
840
|
+
*/
|
|
841
|
+
async retryFailed(queue, count = 1) {
|
|
842
|
+
const failedKey = `${this.getKey(queue)}:failed`;
|
|
843
|
+
const queueKey = this.getKey(queue);
|
|
844
|
+
let retried = 0;
|
|
845
|
+
for (let i = 0; i < count; i++) {
|
|
846
|
+
const payload = await this.client.rpop(failedKey);
|
|
847
|
+
if (!payload) {
|
|
848
|
+
break;
|
|
849
|
+
}
|
|
850
|
+
const job = this.parsePayload(payload);
|
|
851
|
+
job.attempts = 0;
|
|
852
|
+
delete job.error;
|
|
853
|
+
delete job.failedAt;
|
|
854
|
+
await this.push(queue, job, { priority: job.priority, groupId: job.groupId });
|
|
855
|
+
retried++;
|
|
856
|
+
}
|
|
857
|
+
return retried;
|
|
858
|
+
}
|
|
859
|
+
/**
|
|
860
|
+
* Clear failed jobs from DLQ.
|
|
861
|
+
*/
|
|
862
|
+
async clearFailed(queue) {
|
|
863
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
864
|
+
await this.client.del(key);
|
|
865
|
+
}
|
|
481
866
|
};
|
|
482
867
|
}
|
|
483
868
|
});
|
|
@@ -683,6 +1068,126 @@ var init_SQSDriver = __esm({
|
|
|
683
1068
|
}
|
|
684
1069
|
});
|
|
685
1070
|
|
|
1071
|
+
// src/Scheduler.ts
|
|
1072
|
+
var Scheduler_exports = {};
|
|
1073
|
+
__export(Scheduler_exports, {
|
|
1074
|
+
Scheduler: () => Scheduler
|
|
1075
|
+
});
|
|
1076
|
+
var import_cron_parser, Scheduler;
|
|
1077
|
+
var init_Scheduler = __esm({
|
|
1078
|
+
"src/Scheduler.ts"() {
|
|
1079
|
+
"use strict";
|
|
1080
|
+
import_cron_parser = __toESM(require("cron-parser"), 1);
|
|
1081
|
+
Scheduler = class {
|
|
1082
|
+
constructor(manager, options = {}) {
|
|
1083
|
+
this.manager = manager;
|
|
1084
|
+
this.prefix = options.prefix ?? "queue:";
|
|
1085
|
+
}
|
|
1086
|
+
prefix;
|
|
1087
|
+
get client() {
|
|
1088
|
+
const driver = this.manager.getDriver(this.manager.getDefaultConnection());
|
|
1089
|
+
return driver.client;
|
|
1090
|
+
}
|
|
1091
|
+
/**
|
|
1092
|
+
* Register a scheduled job.
|
|
1093
|
+
*/
|
|
1094
|
+
async register(config) {
|
|
1095
|
+
const nextRun = import_cron_parser.default.parse(config.cron).next().getTime();
|
|
1096
|
+
const fullConfig = {
|
|
1097
|
+
...config,
|
|
1098
|
+
nextRun,
|
|
1099
|
+
enabled: true
|
|
1100
|
+
};
|
|
1101
|
+
const pipe = this.client.pipeline();
|
|
1102
|
+
pipe.hset(`${this.prefix}schedule:${config.id}`, {
|
|
1103
|
+
...fullConfig,
|
|
1104
|
+
job: JSON.stringify(fullConfig.job)
|
|
1105
|
+
});
|
|
1106
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, config.id);
|
|
1107
|
+
await pipe.exec();
|
|
1108
|
+
}
|
|
1109
|
+
/**
|
|
1110
|
+
* Remove a scheduled job.
|
|
1111
|
+
*/
|
|
1112
|
+
async remove(id) {
|
|
1113
|
+
const pipe = this.client.pipeline();
|
|
1114
|
+
pipe.del(`${this.prefix}schedule:${id}`);
|
|
1115
|
+
pipe.zrem(`${this.prefix}schedules`, id);
|
|
1116
|
+
await pipe.exec();
|
|
1117
|
+
}
|
|
1118
|
+
/**
|
|
1119
|
+
* List all scheduled jobs.
|
|
1120
|
+
*/
|
|
1121
|
+
async list() {
|
|
1122
|
+
const ids = await this.client.zrange(`${this.prefix}schedules`, 0, -1);
|
|
1123
|
+
const configs = [];
|
|
1124
|
+
for (const id of ids) {
|
|
1125
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1126
|
+
if (data?.id) {
|
|
1127
|
+
configs.push({
|
|
1128
|
+
...data,
|
|
1129
|
+
lastRun: data.lastRun ? parseInt(data.lastRun, 10) : void 0,
|
|
1130
|
+
nextRun: data.nextRun ? parseInt(data.nextRun, 10) : void 0,
|
|
1131
|
+
enabled: data.enabled === "true",
|
|
1132
|
+
job: JSON.parse(data.job)
|
|
1133
|
+
});
|
|
1134
|
+
}
|
|
1135
|
+
}
|
|
1136
|
+
return configs;
|
|
1137
|
+
}
|
|
1138
|
+
/**
|
|
1139
|
+
* Run a scheduled job immediately (out of schedule).
|
|
1140
|
+
*/
|
|
1141
|
+
async runNow(id) {
|
|
1142
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1143
|
+
if (data?.id) {
|
|
1144
|
+
const serialized = JSON.parse(data.job);
|
|
1145
|
+
const serializer = this.manager.getSerializer();
|
|
1146
|
+
const job = serializer.deserialize(serialized);
|
|
1147
|
+
await this.manager.push(job);
|
|
1148
|
+
}
|
|
1149
|
+
}
|
|
1150
|
+
/**
|
|
1151
|
+
* Process due tasks (TICK).
|
|
1152
|
+
* This should be called periodically (e.g. every minute).
|
|
1153
|
+
*/
|
|
1154
|
+
async tick() {
|
|
1155
|
+
const now = Date.now();
|
|
1156
|
+
const dueIds = await this.client.zrangebyscore(`${this.prefix}schedules`, 0, now);
|
|
1157
|
+
let fired = 0;
|
|
1158
|
+
const serializer = this.manager.getSerializer();
|
|
1159
|
+
for (const id of dueIds) {
|
|
1160
|
+
const lockKey = `${this.prefix}lock:schedule:${id}:${Math.floor(now / 1e3)}`;
|
|
1161
|
+
const lock = await this.client.set(lockKey, "1", "EX", 10, "NX");
|
|
1162
|
+
if (lock === "OK") {
|
|
1163
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1164
|
+
if (data?.id && data.enabled === "true") {
|
|
1165
|
+
try {
|
|
1166
|
+
const serializedJob = JSON.parse(data.job);
|
|
1167
|
+
const connection = data.connection || this.manager.getDefaultConnection();
|
|
1168
|
+
const driver = this.manager.getDriver(connection);
|
|
1169
|
+
await driver.push(data.queue, serializedJob);
|
|
1170
|
+
const nextRun = import_cron_parser.default.parse(data.cron).next().getTime();
|
|
1171
|
+
const pipe = this.client.pipeline();
|
|
1172
|
+
pipe.hset(`${this.prefix}schedule:${id}`, {
|
|
1173
|
+
lastRun: now,
|
|
1174
|
+
nextRun
|
|
1175
|
+
});
|
|
1176
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, id);
|
|
1177
|
+
await pipe.exec();
|
|
1178
|
+
fired++;
|
|
1179
|
+
} catch (err) {
|
|
1180
|
+
console.error(`[Scheduler] Failed to process schedule ${id}:`, err);
|
|
1181
|
+
}
|
|
1182
|
+
}
|
|
1183
|
+
}
|
|
1184
|
+
}
|
|
1185
|
+
return fired;
|
|
1186
|
+
}
|
|
1187
|
+
};
|
|
1188
|
+
}
|
|
1189
|
+
});
|
|
1190
|
+
|
|
686
1191
|
// src/index.ts
|
|
687
1192
|
var index_exports = {};
|
|
688
1193
|
__export(index_exports, {
|
|
@@ -693,10 +1198,14 @@ __export(index_exports, {
|
|
|
693
1198
|
JsonSerializer: () => JsonSerializer,
|
|
694
1199
|
KafkaDriver: () => KafkaDriver,
|
|
695
1200
|
MemoryDriver: () => MemoryDriver,
|
|
1201
|
+
MySQLPersistence: () => MySQLPersistence,
|
|
696
1202
|
OrbitStream: () => OrbitStream,
|
|
697
1203
|
QueueManager: () => QueueManager,
|
|
1204
|
+
RabbitMQDriver: () => RabbitMQDriver,
|
|
698
1205
|
RedisDriver: () => RedisDriver,
|
|
1206
|
+
SQLitePersistence: () => SQLitePersistence,
|
|
699
1207
|
SQSDriver: () => SQSDriver,
|
|
1208
|
+
Scheduler: () => Scheduler,
|
|
700
1209
|
Worker: () => Worker
|
|
701
1210
|
});
|
|
702
1211
|
module.exports = __toCommonJS(index_exports);
|
|
@@ -711,36 +1220,31 @@ var Worker = class {
|
|
|
711
1220
|
* @param job - Job instance
|
|
712
1221
|
*/
|
|
713
1222
|
async process(job) {
|
|
714
|
-
const maxAttempts = this.options.maxAttempts ?? 3;
|
|
1223
|
+
const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
|
|
715
1224
|
const timeout = this.options.timeout;
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
(
|
|
726
|
-
|
|
727
|
-
timeout * 1e3
|
|
728
|
-
)
|
|
1225
|
+
if (!job.attempts) {
|
|
1226
|
+
job.attempts = 1;
|
|
1227
|
+
}
|
|
1228
|
+
try {
|
|
1229
|
+
if (timeout) {
|
|
1230
|
+
await Promise.race([
|
|
1231
|
+
job.handle(),
|
|
1232
|
+
new Promise(
|
|
1233
|
+
(_, reject) => setTimeout(
|
|
1234
|
+
() => reject(new Error(`Job timeout after ${timeout} seconds`)),
|
|
1235
|
+
timeout * 1e3
|
|
729
1236
|
)
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
throw lastError;
|
|
740
|
-
}
|
|
741
|
-
const delay = Math.min(1e3 * 2 ** (attempt - 1), 3e4);
|
|
742
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
1237
|
+
)
|
|
1238
|
+
]);
|
|
1239
|
+
} else {
|
|
1240
|
+
await job.handle();
|
|
1241
|
+
}
|
|
1242
|
+
} catch (error) {
|
|
1243
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1244
|
+
if (job.attempts >= maxAttempts) {
|
|
1245
|
+
await this.handleFailure(job, err);
|
|
743
1246
|
}
|
|
1247
|
+
throw err;
|
|
744
1248
|
}
|
|
745
1249
|
}
|
|
746
1250
|
/**
|
|
@@ -770,6 +1274,11 @@ var Consumer = class {
|
|
|
770
1274
|
}
|
|
771
1275
|
running = false;
|
|
772
1276
|
stopRequested = false;
|
|
1277
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 8)}`;
|
|
1278
|
+
heartbeatTimer = null;
|
|
1279
|
+
get connectionName() {
|
|
1280
|
+
return this.options.connection ?? this.queueManager.getDefaultConnection();
|
|
1281
|
+
}
|
|
773
1282
|
/**
|
|
774
1283
|
* Start the consumer loop.
|
|
775
1284
|
*/
|
|
@@ -784,18 +1293,72 @@ var Consumer = class {
|
|
|
784
1293
|
const keepAlive = this.options.keepAlive ?? true;
|
|
785
1294
|
console.log("[Consumer] Started", {
|
|
786
1295
|
queues: this.options.queues,
|
|
787
|
-
connection: this.options.connection
|
|
1296
|
+
connection: this.options.connection,
|
|
1297
|
+
workerId: this.workerId
|
|
788
1298
|
});
|
|
1299
|
+
if (this.options.monitor) {
|
|
1300
|
+
this.startHeartbeat();
|
|
1301
|
+
await this.publishLog("info", `Consumer started on [${this.options.queues.join(", ")}]`);
|
|
1302
|
+
}
|
|
789
1303
|
while (this.running && !this.stopRequested) {
|
|
790
1304
|
let processed = false;
|
|
791
1305
|
for (const queue of this.options.queues) {
|
|
1306
|
+
if (this.options.rateLimits?.[queue]) {
|
|
1307
|
+
const limit = this.options.rateLimits[queue];
|
|
1308
|
+
try {
|
|
1309
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1310
|
+
if (driver.checkRateLimit) {
|
|
1311
|
+
const allowed = await driver.checkRateLimit(queue, limit);
|
|
1312
|
+
if (!allowed) {
|
|
1313
|
+
continue;
|
|
1314
|
+
}
|
|
1315
|
+
}
|
|
1316
|
+
} catch (err) {
|
|
1317
|
+
console.error(`[Consumer] Error checking rate limit for "${queue}":`, err);
|
|
1318
|
+
}
|
|
1319
|
+
}
|
|
792
1320
|
try {
|
|
793
1321
|
const job = await this.queueManager.pop(queue, this.options.connection);
|
|
794
1322
|
if (job) {
|
|
795
1323
|
processed = true;
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
}
|
|
1324
|
+
if (this.options.monitor) {
|
|
1325
|
+
await this.publishLog("info", `Processing job: ${job.id}`, job.id);
|
|
1326
|
+
}
|
|
1327
|
+
try {
|
|
1328
|
+
await worker.process(job);
|
|
1329
|
+
if (this.options.monitor) {
|
|
1330
|
+
await this.publishLog("success", `Completed job: ${job.id}`, job.id);
|
|
1331
|
+
}
|
|
1332
|
+
} catch (err) {
|
|
1333
|
+
console.error(`[Consumer] Error processing job in queue "${queue}":`, err);
|
|
1334
|
+
if (this.options.monitor) {
|
|
1335
|
+
await this.publishLog("error", `Job failed: ${job.id} - ${err.message}`, job.id);
|
|
1336
|
+
}
|
|
1337
|
+
const attempts = job.attempts ?? 1;
|
|
1338
|
+
const maxAttempts = job.maxAttempts ?? this.options.workerOptions?.maxAttempts ?? 3;
|
|
1339
|
+
if (attempts < maxAttempts) {
|
|
1340
|
+
job.attempts = attempts + 1;
|
|
1341
|
+
const delayMs = job.getRetryDelay(job.attempts);
|
|
1342
|
+
const delaySec = Math.ceil(delayMs / 1e3);
|
|
1343
|
+
job.delay(delaySec);
|
|
1344
|
+
await this.queueManager.push(job);
|
|
1345
|
+
if (this.options.monitor) {
|
|
1346
|
+
await this.publishLog(
|
|
1347
|
+
"warning",
|
|
1348
|
+
`Job retrying in ${delaySec}s (Attempt ${job.attempts}/${maxAttempts})`,
|
|
1349
|
+
job.id
|
|
1350
|
+
);
|
|
1351
|
+
}
|
|
1352
|
+
} else {
|
|
1353
|
+
await this.queueManager.fail(job, err).catch((dlqErr) => {
|
|
1354
|
+
console.error(`[Consumer] Error moving job to DLQ:`, dlqErr);
|
|
1355
|
+
});
|
|
1356
|
+
}
|
|
1357
|
+
} finally {
|
|
1358
|
+
await this.queueManager.complete(job).catch((err) => {
|
|
1359
|
+
console.error(`[Consumer] Error completing job in queue "${queue}":`, err);
|
|
1360
|
+
});
|
|
1361
|
+
}
|
|
799
1362
|
}
|
|
800
1363
|
} catch (error) {
|
|
801
1364
|
console.error(`[Consumer] Error polling queue "${queue}":`, error);
|
|
@@ -804,13 +1367,83 @@ var Consumer = class {
|
|
|
804
1367
|
if (!processed && !keepAlive) {
|
|
805
1368
|
break;
|
|
806
1369
|
}
|
|
807
|
-
if (!this.stopRequested) {
|
|
1370
|
+
if (!this.stopRequested && !processed) {
|
|
808
1371
|
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
|
1372
|
+
} else if (!this.stopRequested && processed) {
|
|
1373
|
+
await new Promise((resolve) => setTimeout(resolve, 0));
|
|
809
1374
|
}
|
|
810
1375
|
}
|
|
811
1376
|
this.running = false;
|
|
1377
|
+
this.stopHeartbeat();
|
|
1378
|
+
if (this.options.monitor) {
|
|
1379
|
+
await this.publishLog("info", "Consumer stopped");
|
|
1380
|
+
}
|
|
812
1381
|
console.log("[Consumer] Stopped");
|
|
813
1382
|
}
|
|
1383
|
+
startHeartbeat() {
|
|
1384
|
+
const interval = typeof this.options.monitor === "object" ? this.options.monitor.interval ?? 5e3 : 5e3;
|
|
1385
|
+
const monitorOptions = typeof this.options.monitor === "object" ? this.options.monitor : {};
|
|
1386
|
+
this.heartbeatTimer = setInterval(async () => {
|
|
1387
|
+
try {
|
|
1388
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1389
|
+
if (driver.reportHeartbeat) {
|
|
1390
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1391
|
+
const os = require("os");
|
|
1392
|
+
const mem = process.memoryUsage();
|
|
1393
|
+
const metrics = {
|
|
1394
|
+
cpu: os.loadavg()[0],
|
|
1395
|
+
// 1m load avg
|
|
1396
|
+
cores: os.cpus().length,
|
|
1397
|
+
ram: {
|
|
1398
|
+
rss: Math.floor(mem.rss / 1024 / 1024),
|
|
1399
|
+
heapUsed: Math.floor(mem.heapUsed / 1024 / 1024),
|
|
1400
|
+
total: Math.floor(os.totalmem() / 1024 / 1024)
|
|
1401
|
+
}
|
|
1402
|
+
};
|
|
1403
|
+
await driver.reportHeartbeat(
|
|
1404
|
+
{
|
|
1405
|
+
id: this.workerId,
|
|
1406
|
+
status: "online",
|
|
1407
|
+
hostname: os.hostname(),
|
|
1408
|
+
pid: process.pid,
|
|
1409
|
+
uptime: Math.floor(process.uptime()),
|
|
1410
|
+
last_ping: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1411
|
+
queues: this.options.queues,
|
|
1412
|
+
metrics,
|
|
1413
|
+
...monitorOptions.extraInfo || {}
|
|
1414
|
+
},
|
|
1415
|
+
monitorPrefix
|
|
1416
|
+
);
|
|
1417
|
+
}
|
|
1418
|
+
} catch (_e) {
|
|
1419
|
+
}
|
|
1420
|
+
}, interval);
|
|
1421
|
+
}
|
|
1422
|
+
stopHeartbeat() {
|
|
1423
|
+
if (this.heartbeatTimer) {
|
|
1424
|
+
clearInterval(this.heartbeatTimer);
|
|
1425
|
+
this.heartbeatTimer = null;
|
|
1426
|
+
}
|
|
1427
|
+
}
|
|
1428
|
+
async publishLog(level, message, jobId) {
|
|
1429
|
+
try {
|
|
1430
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1431
|
+
if (driver.publishLog) {
|
|
1432
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1433
|
+
await driver.publishLog(
|
|
1434
|
+
{
|
|
1435
|
+
level,
|
|
1436
|
+
message,
|
|
1437
|
+
workerId: this.workerId,
|
|
1438
|
+
jobId,
|
|
1439
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1440
|
+
},
|
|
1441
|
+
monitorPrefix
|
|
1442
|
+
);
|
|
1443
|
+
}
|
|
1444
|
+
} catch (_e) {
|
|
1445
|
+
}
|
|
1446
|
+
}
|
|
814
1447
|
/**
|
|
815
1448
|
* Stop the consumer loop (graceful shutdown).
|
|
816
1449
|
*/
|
|
@@ -901,11 +1534,16 @@ var MemoryDriver = class {
|
|
|
901
1534
|
};
|
|
902
1535
|
|
|
903
1536
|
// src/index.ts
|
|
1537
|
+
init_RabbitMQDriver();
|
|
904
1538
|
init_RedisDriver();
|
|
905
1539
|
init_SQSDriver();
|
|
906
1540
|
|
|
907
1541
|
// src/Job.ts
|
|
908
1542
|
var Job = class {
|
|
1543
|
+
/**
|
|
1544
|
+
* Unique job identifier.
|
|
1545
|
+
*/
|
|
1546
|
+
id;
|
|
909
1547
|
/**
|
|
910
1548
|
* Queue name.
|
|
911
1549
|
*/
|
|
@@ -926,6 +1564,22 @@ var Job = class {
|
|
|
926
1564
|
* Maximum attempts.
|
|
927
1565
|
*/
|
|
928
1566
|
maxAttempts;
|
|
1567
|
+
/**
|
|
1568
|
+
* Group ID for FIFO.
|
|
1569
|
+
*/
|
|
1570
|
+
groupId;
|
|
1571
|
+
/**
|
|
1572
|
+
* Job priority.
|
|
1573
|
+
*/
|
|
1574
|
+
priority;
|
|
1575
|
+
/**
|
|
1576
|
+
* Initial retry delay (seconds).
|
|
1577
|
+
*/
|
|
1578
|
+
retryAfterSeconds;
|
|
1579
|
+
/**
|
|
1580
|
+
* Retry delay multiplier.
|
|
1581
|
+
*/
|
|
1582
|
+
retryMultiplier;
|
|
929
1583
|
/**
|
|
930
1584
|
* Set target queue.
|
|
931
1585
|
*/
|
|
@@ -940,6 +1594,14 @@ var Job = class {
|
|
|
940
1594
|
this.connectionName = connection;
|
|
941
1595
|
return this;
|
|
942
1596
|
}
|
|
1597
|
+
/**
|
|
1598
|
+
* Set job priority.
|
|
1599
|
+
* @param priority - 'high', 'low', or number
|
|
1600
|
+
*/
|
|
1601
|
+
withPriority(priority) {
|
|
1602
|
+
this.priority = priority;
|
|
1603
|
+
return this;
|
|
1604
|
+
}
|
|
943
1605
|
/**
|
|
944
1606
|
* Set delay (seconds).
|
|
945
1607
|
*/
|
|
@@ -947,6 +1609,26 @@ var Job = class {
|
|
|
947
1609
|
this.delaySeconds = delay;
|
|
948
1610
|
return this;
|
|
949
1611
|
}
|
|
1612
|
+
/**
|
|
1613
|
+
* Set retry backoff strategy.
|
|
1614
|
+
* @param seconds - Initial delay in seconds
|
|
1615
|
+
* @param multiplier - Multiplier for each subsequent attempt (default: 2)
|
|
1616
|
+
*/
|
|
1617
|
+
backoff(seconds, multiplier = 2) {
|
|
1618
|
+
this.retryAfterSeconds = seconds;
|
|
1619
|
+
this.retryMultiplier = multiplier;
|
|
1620
|
+
return this;
|
|
1621
|
+
}
|
|
1622
|
+
/**
|
|
1623
|
+
* Calculate retry delay for the next attempt.
|
|
1624
|
+
* @param attempt - Current attempt number (1-based)
|
|
1625
|
+
* @returns Delay in milliseconds
|
|
1626
|
+
*/
|
|
1627
|
+
getRetryDelay(attempt) {
|
|
1628
|
+
const initialDelay = (this.retryAfterSeconds ?? 1) * 1e3;
|
|
1629
|
+
const multiplier = this.retryMultiplier ?? 2;
|
|
1630
|
+
return Math.min(initialDelay * multiplier ** (attempt - 1), 36e5);
|
|
1631
|
+
}
|
|
950
1632
|
/**
|
|
951
1633
|
* Failure handler (optional).
|
|
952
1634
|
*
|
|
@@ -985,7 +1667,7 @@ var ClassNameSerializer = class {
|
|
|
985
1667
|
* Serialize a Job.
|
|
986
1668
|
*/
|
|
987
1669
|
serialize(job) {
|
|
988
|
-
const id = `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
1670
|
+
const id = job.id || `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
989
1671
|
const className = job.constructor.name;
|
|
990
1672
|
const properties = {};
|
|
991
1673
|
for (const key in job) {
|
|
@@ -1004,7 +1686,11 @@ var ClassNameSerializer = class {
|
|
|
1004
1686
|
createdAt: Date.now(),
|
|
1005
1687
|
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1006
1688
|
attempts: job.attempts ?? 0,
|
|
1007
|
-
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {}
|
|
1689
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1690
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1691
|
+
...job.retryAfterSeconds !== void 0 ? { retryAfterSeconds: job.retryAfterSeconds } : {},
|
|
1692
|
+
...job.retryMultiplier !== void 0 ? { retryMultiplier: job.retryMultiplier } : {},
|
|
1693
|
+
...job.priority !== void 0 ? { priority: job.priority } : {}
|
|
1008
1694
|
};
|
|
1009
1695
|
}
|
|
1010
1696
|
/**
|
|
@@ -1028,6 +1714,7 @@ var ClassNameSerializer = class {
|
|
|
1028
1714
|
if (parsed.properties) {
|
|
1029
1715
|
Object.assign(job, parsed.properties);
|
|
1030
1716
|
}
|
|
1717
|
+
job.id = serialized.id;
|
|
1031
1718
|
if (serialized.delaySeconds !== void 0) {
|
|
1032
1719
|
job.delaySeconds = serialized.delaySeconds;
|
|
1033
1720
|
}
|
|
@@ -1037,6 +1724,18 @@ var ClassNameSerializer = class {
|
|
|
1037
1724
|
if (serialized.maxAttempts !== void 0) {
|
|
1038
1725
|
job.maxAttempts = serialized.maxAttempts;
|
|
1039
1726
|
}
|
|
1727
|
+
if (serialized.groupId !== void 0) {
|
|
1728
|
+
job.groupId = serialized.groupId;
|
|
1729
|
+
}
|
|
1730
|
+
if (serialized.retryAfterSeconds !== void 0) {
|
|
1731
|
+
job.retryAfterSeconds = serialized.retryAfterSeconds;
|
|
1732
|
+
}
|
|
1733
|
+
if (serialized.retryMultiplier !== void 0) {
|
|
1734
|
+
job.retryMultiplier = serialized.retryMultiplier;
|
|
1735
|
+
}
|
|
1736
|
+
if (serialized.priority !== void 0) {
|
|
1737
|
+
job.priority = serialized.priority;
|
|
1738
|
+
}
|
|
1040
1739
|
return job;
|
|
1041
1740
|
}
|
|
1042
1741
|
};
|
|
@@ -1058,7 +1757,9 @@ var JsonSerializer = class {
|
|
|
1058
1757
|
createdAt: Date.now(),
|
|
1059
1758
|
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1060
1759
|
attempts: job.attempts ?? 0,
|
|
1061
|
-
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {}
|
|
1760
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1761
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1762
|
+
...job.priority ? { priority: job.priority } : {}
|
|
1062
1763
|
};
|
|
1063
1764
|
}
|
|
1064
1765
|
/**
|
|
@@ -1074,6 +1775,12 @@ var JsonSerializer = class {
|
|
|
1074
1775
|
const parsed = JSON.parse(serialized.data);
|
|
1075
1776
|
const job = /* @__PURE__ */ Object.create({});
|
|
1076
1777
|
Object.assign(job, parsed.properties);
|
|
1778
|
+
if (serialized.groupId) {
|
|
1779
|
+
job.groupId = serialized.groupId;
|
|
1780
|
+
}
|
|
1781
|
+
if (serialized.priority) {
|
|
1782
|
+
job.priority = serialized.priority;
|
|
1783
|
+
}
|
|
1077
1784
|
return job;
|
|
1078
1785
|
}
|
|
1079
1786
|
};
|
|
@@ -1084,7 +1791,11 @@ var QueueManager = class {
|
|
|
1084
1791
|
serializers = /* @__PURE__ */ new Map();
|
|
1085
1792
|
defaultConnection;
|
|
1086
1793
|
defaultSerializer;
|
|
1794
|
+
persistence;
|
|
1795
|
+
scheduler;
|
|
1796
|
+
// Using any to avoid circular dependency or import issues for now
|
|
1087
1797
|
constructor(config = {}) {
|
|
1798
|
+
this.persistence = config.persistence;
|
|
1088
1799
|
this.defaultConnection = config.default ?? "default";
|
|
1089
1800
|
const serializerType = config.defaultSerializer ?? "class";
|
|
1090
1801
|
if (serializerType === "class") {
|
|
@@ -1192,9 +1903,30 @@ var QueueManager = class {
|
|
|
1192
1903
|
);
|
|
1193
1904
|
break;
|
|
1194
1905
|
}
|
|
1906
|
+
case "rabbitmq": {
|
|
1907
|
+
const { RabbitMQDriver: RabbitMQDriver2 } = (init_RabbitMQDriver(), __toCommonJS(RabbitMQDriver_exports));
|
|
1908
|
+
const client = config.client;
|
|
1909
|
+
if (!client) {
|
|
1910
|
+
throw new Error(
|
|
1911
|
+
"[QueueManager] RabbitMQDriver requires client. Please provide RabbitMQ connection/channel in connection config."
|
|
1912
|
+
);
|
|
1913
|
+
}
|
|
1914
|
+
this.drivers.set(
|
|
1915
|
+
name,
|
|
1916
|
+
new RabbitMQDriver2({
|
|
1917
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1918
|
+
client,
|
|
1919
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1920
|
+
exchange: config.exchange,
|
|
1921
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1922
|
+
exchangeType: config.exchangeType
|
|
1923
|
+
})
|
|
1924
|
+
);
|
|
1925
|
+
break;
|
|
1926
|
+
}
|
|
1195
1927
|
default:
|
|
1196
1928
|
throw new Error(
|
|
1197
|
-
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs`
|
|
1929
|
+
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq`
|
|
1198
1930
|
);
|
|
1199
1931
|
}
|
|
1200
1932
|
}
|
|
@@ -1210,6 +1942,13 @@ var QueueManager = class {
|
|
|
1210
1942
|
}
|
|
1211
1943
|
return driver;
|
|
1212
1944
|
}
|
|
1945
|
+
/**
|
|
1946
|
+
* Get the default connection name.
|
|
1947
|
+
* @returns Default connection name
|
|
1948
|
+
*/
|
|
1949
|
+
getDefaultConnection() {
|
|
1950
|
+
return this.defaultConnection;
|
|
1951
|
+
}
|
|
1213
1952
|
/**
|
|
1214
1953
|
* Get a serializer.
|
|
1215
1954
|
* @param type - Serializer type
|
|
@@ -1239,6 +1978,7 @@ var QueueManager = class {
|
|
|
1239
1978
|
*
|
|
1240
1979
|
* @template T - The type of the job.
|
|
1241
1980
|
* @param job - Job instance to push.
|
|
1981
|
+
* @param options - Push options.
|
|
1242
1982
|
* @returns The same job instance (for fluent chaining).
|
|
1243
1983
|
*
|
|
1244
1984
|
* @example
|
|
@@ -1246,13 +1986,22 @@ var QueueManager = class {
|
|
|
1246
1986
|
* await manager.push(new SendEmailJob('user@example.com'));
|
|
1247
1987
|
* ```
|
|
1248
1988
|
*/
|
|
1249
|
-
async push(job) {
|
|
1989
|
+
async push(job, options) {
|
|
1250
1990
|
const connection = job.connectionName ?? this.defaultConnection;
|
|
1251
1991
|
const queue = job.queueName ?? "default";
|
|
1252
1992
|
const driver = this.getDriver(connection);
|
|
1253
1993
|
const serializer = this.getSerializer();
|
|
1254
1994
|
const serialized = serializer.serialize(job);
|
|
1255
|
-
|
|
1995
|
+
const pushOptions = { ...options };
|
|
1996
|
+
if (job.priority) {
|
|
1997
|
+
pushOptions.priority = job.priority;
|
|
1998
|
+
}
|
|
1999
|
+
await driver.push(queue, serialized, pushOptions);
|
|
2000
|
+
if (this.persistence?.archiveEnqueued) {
|
|
2001
|
+
this.persistence.adapter.archive(queue, serialized, "waiting").catch((err) => {
|
|
2002
|
+
console.error("[QueueManager] Persistence archive failed (waiting):", err);
|
|
2003
|
+
});
|
|
2004
|
+
}
|
|
1256
2005
|
return job;
|
|
1257
2006
|
}
|
|
1258
2007
|
/**
|
|
@@ -1345,6 +2094,92 @@ var QueueManager = class {
|
|
|
1345
2094
|
const driver = this.getDriver(connection);
|
|
1346
2095
|
await driver.clear(queue);
|
|
1347
2096
|
}
|
|
2097
|
+
/**
|
|
2098
|
+
* Mark a job as completed.
|
|
2099
|
+
* @param job - Job instance
|
|
2100
|
+
*/
|
|
2101
|
+
async complete(job) {
|
|
2102
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2103
|
+
const queue = job.queueName ?? "default";
|
|
2104
|
+
const driver = this.getDriver(connection);
|
|
2105
|
+
const serializer = this.getSerializer();
|
|
2106
|
+
if (driver.complete) {
|
|
2107
|
+
const serialized = serializer.serialize(job);
|
|
2108
|
+
await driver.complete(queue, serialized);
|
|
2109
|
+
if (this.persistence?.archiveCompleted) {
|
|
2110
|
+
await this.persistence.adapter.archive(queue, serialized, "completed").catch((err) => {
|
|
2111
|
+
console.error("[QueueManager] Persistence archive failed (completed):", err);
|
|
2112
|
+
});
|
|
2113
|
+
}
|
|
2114
|
+
}
|
|
2115
|
+
}
|
|
2116
|
+
/**
|
|
2117
|
+
* Mark a job as permanently failed.
|
|
2118
|
+
* @param job - Job instance
|
|
2119
|
+
* @param error - Error object
|
|
2120
|
+
*/
|
|
2121
|
+
async fail(job, error) {
|
|
2122
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2123
|
+
const queue = job.queueName ?? "default";
|
|
2124
|
+
const driver = this.getDriver(connection);
|
|
2125
|
+
const serializer = this.getSerializer();
|
|
2126
|
+
if (driver.fail) {
|
|
2127
|
+
const serialized = serializer.serialize(job);
|
|
2128
|
+
serialized.error = error.message;
|
|
2129
|
+
serialized.failedAt = Date.now();
|
|
2130
|
+
await driver.fail(queue, serialized);
|
|
2131
|
+
if (this.persistence?.archiveFailed) {
|
|
2132
|
+
await this.persistence.adapter.archive(queue, serialized, "failed").catch((err) => {
|
|
2133
|
+
console.error("[QueueManager] Persistence archive failed (failed):", err);
|
|
2134
|
+
});
|
|
2135
|
+
}
|
|
2136
|
+
}
|
|
2137
|
+
}
|
|
2138
|
+
/**
|
|
2139
|
+
* Get the persistence adapter if configured.
|
|
2140
|
+
*/
|
|
2141
|
+
getPersistence() {
|
|
2142
|
+
return this.persistence?.adapter;
|
|
2143
|
+
}
|
|
2144
|
+
/**
|
|
2145
|
+
* Get the scheduler if configured.
|
|
2146
|
+
*/
|
|
2147
|
+
getScheduler() {
|
|
2148
|
+
if (!this.scheduler) {
|
|
2149
|
+
const { Scheduler: Scheduler2 } = (init_Scheduler(), __toCommonJS(Scheduler_exports));
|
|
2150
|
+
this.scheduler = new Scheduler2(this);
|
|
2151
|
+
}
|
|
2152
|
+
return this.scheduler;
|
|
2153
|
+
}
|
|
2154
|
+
/**
|
|
2155
|
+
* Get failed jobs from DLQ (if driver supports it).
|
|
2156
|
+
*/
|
|
2157
|
+
async getFailed(queue, start = 0, end = -1, connection = this.defaultConnection) {
|
|
2158
|
+
const driver = this.getDriver(connection);
|
|
2159
|
+
if (driver.getFailed) {
|
|
2160
|
+
return driver.getFailed(queue, start, end);
|
|
2161
|
+
}
|
|
2162
|
+
return [];
|
|
2163
|
+
}
|
|
2164
|
+
/**
|
|
2165
|
+
* Retry failed jobs from DLQ (if driver supports it).
|
|
2166
|
+
*/
|
|
2167
|
+
async retryFailed(queue, count = 1, connection = this.defaultConnection) {
|
|
2168
|
+
const driver = this.getDriver(connection);
|
|
2169
|
+
if (driver.retryFailed) {
|
|
2170
|
+
return driver.retryFailed(queue, count);
|
|
2171
|
+
}
|
|
2172
|
+
return 0;
|
|
2173
|
+
}
|
|
2174
|
+
/**
|
|
2175
|
+
* Clear failed jobs from DLQ (if driver supports it).
|
|
2176
|
+
*/
|
|
2177
|
+
async clearFailed(queue, connection = this.defaultConnection) {
|
|
2178
|
+
const driver = this.getDriver(connection);
|
|
2179
|
+
if (driver.clearFailed) {
|
|
2180
|
+
await driver.clearFailed(queue);
|
|
2181
|
+
}
|
|
2182
|
+
}
|
|
1348
2183
|
};
|
|
1349
2184
|
|
|
1350
2185
|
// src/OrbitStream.ts
|
|
@@ -1425,6 +2260,453 @@ var OrbitStream = class _OrbitStream {
|
|
|
1425
2260
|
return this.queueManager;
|
|
1426
2261
|
}
|
|
1427
2262
|
};
|
|
2263
|
+
|
|
2264
|
+
// src/persistence/MySQLPersistence.ts
|
|
2265
|
+
var import_atlas = require("@gravito/atlas");
|
|
2266
|
+
var MySQLPersistence = class {
|
|
2267
|
+
/**
|
|
2268
|
+
* @param db - An Atlas DB instance or compatible QueryBuilder.
|
|
2269
|
+
* @param table - The name of the table to store archived jobs.
|
|
2270
|
+
*/
|
|
2271
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2272
|
+
this.db = db;
|
|
2273
|
+
this.table = table;
|
|
2274
|
+
this.logsTable = logsTable;
|
|
2275
|
+
}
|
|
2276
|
+
/**
|
|
2277
|
+
* Archive a job.
|
|
2278
|
+
*/
|
|
2279
|
+
async archive(queue, job, status) {
|
|
2280
|
+
try {
|
|
2281
|
+
await this.db.table(this.table).insert({
|
|
2282
|
+
job_id: job.id,
|
|
2283
|
+
queue,
|
|
2284
|
+
status,
|
|
2285
|
+
payload: JSON.stringify(job),
|
|
2286
|
+
error: job.error || null,
|
|
2287
|
+
created_at: new Date(job.createdAt),
|
|
2288
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2289
|
+
});
|
|
2290
|
+
} catch (err) {
|
|
2291
|
+
console.error(`[MySQLPersistence] Failed to archive job ${job.id}:`, err);
|
|
2292
|
+
}
|
|
2293
|
+
}
|
|
2294
|
+
/**
|
|
2295
|
+
* Find a specific job in the archive.
|
|
2296
|
+
*/
|
|
2297
|
+
async find(queue, id) {
|
|
2298
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2299
|
+
if (!row) {
|
|
2300
|
+
return null;
|
|
2301
|
+
}
|
|
2302
|
+
try {
|
|
2303
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2304
|
+
return job;
|
|
2305
|
+
} catch (_e) {
|
|
2306
|
+
return null;
|
|
2307
|
+
}
|
|
2308
|
+
}
|
|
2309
|
+
/**
|
|
2310
|
+
* List jobs from the archive.
|
|
2311
|
+
*/
|
|
2312
|
+
async list(queue, options = {}) {
|
|
2313
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2314
|
+
if (options.status) {
|
|
2315
|
+
query = query.where("status", options.status);
|
|
2316
|
+
}
|
|
2317
|
+
if (options.jobId) {
|
|
2318
|
+
query = query.where("job_id", options.jobId);
|
|
2319
|
+
}
|
|
2320
|
+
if (options.startTime) {
|
|
2321
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2322
|
+
}
|
|
2323
|
+
if (options.endTime) {
|
|
2324
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2325
|
+
}
|
|
2326
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2327
|
+
return rows.map((r) => {
|
|
2328
|
+
try {
|
|
2329
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2330
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2331
|
+
} catch (_e) {
|
|
2332
|
+
return null;
|
|
2333
|
+
}
|
|
2334
|
+
}).filter(Boolean);
|
|
2335
|
+
}
|
|
2336
|
+
/**
|
|
2337
|
+
* Search jobs from the archive.
|
|
2338
|
+
*/
|
|
2339
|
+
async search(query, options = {}) {
|
|
2340
|
+
let q = this.db.table(this.table);
|
|
2341
|
+
if (options.queue) {
|
|
2342
|
+
q = q.where("queue", options.queue);
|
|
2343
|
+
}
|
|
2344
|
+
const rows = await q.where((sub) => {
|
|
2345
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2346
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2347
|
+
return rows.map((r) => {
|
|
2348
|
+
try {
|
|
2349
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2350
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2351
|
+
} catch (_e) {
|
|
2352
|
+
return null;
|
|
2353
|
+
}
|
|
2354
|
+
}).filter(Boolean);
|
|
2355
|
+
}
|
|
2356
|
+
/**
|
|
2357
|
+
* Archive a system log message.
|
|
2358
|
+
*/
|
|
2359
|
+
async archiveLog(log) {
|
|
2360
|
+
try {
|
|
2361
|
+
await this.db.table(this.logsTable).insert({
|
|
2362
|
+
level: log.level,
|
|
2363
|
+
message: log.message,
|
|
2364
|
+
worker_id: log.workerId,
|
|
2365
|
+
queue: log.queue || null,
|
|
2366
|
+
timestamp: log.timestamp
|
|
2367
|
+
});
|
|
2368
|
+
} catch (err) {
|
|
2369
|
+
console.error(`[MySQLPersistence] Failed to archive log:`, err.message);
|
|
2370
|
+
}
|
|
2371
|
+
}
|
|
2372
|
+
/**
|
|
2373
|
+
* List system logs from the archive.
|
|
2374
|
+
*/
|
|
2375
|
+
async listLogs(options = {}) {
|
|
2376
|
+
let query = this.db.table(this.logsTable);
|
|
2377
|
+
if (options.level) query = query.where("level", options.level);
|
|
2378
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2379
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2380
|
+
if (options.search) {
|
|
2381
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2382
|
+
}
|
|
2383
|
+
if (options.startTime) {
|
|
2384
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2385
|
+
}
|
|
2386
|
+
if (options.endTime) {
|
|
2387
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2388
|
+
}
|
|
2389
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2390
|
+
}
|
|
2391
|
+
/**
|
|
2392
|
+
* Count system logs in the archive.
|
|
2393
|
+
*/
|
|
2394
|
+
async countLogs(options = {}) {
|
|
2395
|
+
let query = this.db.table(this.logsTable);
|
|
2396
|
+
if (options.level) query = query.where("level", options.level);
|
|
2397
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2398
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2399
|
+
if (options.search) {
|
|
2400
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2401
|
+
}
|
|
2402
|
+
if (options.startTime) {
|
|
2403
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2404
|
+
}
|
|
2405
|
+
if (options.endTime) {
|
|
2406
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2407
|
+
}
|
|
2408
|
+
const result = await query.count("id as total").first();
|
|
2409
|
+
return result?.total || 0;
|
|
2410
|
+
}
|
|
2411
|
+
/**
|
|
2412
|
+
* Remove old records from the archive.
|
|
2413
|
+
*/
|
|
2414
|
+
async cleanup(days) {
|
|
2415
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2416
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2417
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2418
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2419
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2420
|
+
]);
|
|
2421
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2422
|
+
}
|
|
2423
|
+
/**
|
|
2424
|
+
* Count jobs in the archive.
|
|
2425
|
+
*/
|
|
2426
|
+
async count(queue, options = {}) {
|
|
2427
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2428
|
+
if (options.status) {
|
|
2429
|
+
query = query.where("status", options.status);
|
|
2430
|
+
}
|
|
2431
|
+
if (options.jobId) {
|
|
2432
|
+
query = query.where("job_id", options.jobId);
|
|
2433
|
+
}
|
|
2434
|
+
if (options.startTime) {
|
|
2435
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2436
|
+
}
|
|
2437
|
+
if (options.endTime) {
|
|
2438
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2439
|
+
}
|
|
2440
|
+
const result = await query.count("id as total").first();
|
|
2441
|
+
return result?.total || 0;
|
|
2442
|
+
}
|
|
2443
|
+
/**
|
|
2444
|
+
* Help script to create the necessary table.
|
|
2445
|
+
*/
|
|
2446
|
+
async setupTable() {
|
|
2447
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2448
|
+
}
|
|
2449
|
+
async setupJobsTable() {
|
|
2450
|
+
const exists = await import_atlas.Schema.hasTable(this.table);
|
|
2451
|
+
if (exists) return;
|
|
2452
|
+
await import_atlas.Schema.create(this.table, (table) => {
|
|
2453
|
+
table.id();
|
|
2454
|
+
table.string("job_id", 64);
|
|
2455
|
+
table.string("queue", 128);
|
|
2456
|
+
table.string("status", 20);
|
|
2457
|
+
table.json("payload");
|
|
2458
|
+
table.text("error").nullable();
|
|
2459
|
+
table.timestamp("created_at").nullable();
|
|
2460
|
+
table.timestamp("archived_at").default(import_atlas.DB.raw("CURRENT_TIMESTAMP"));
|
|
2461
|
+
table.index(["queue", "archived_at"]);
|
|
2462
|
+
table.index(["queue", "job_id"]);
|
|
2463
|
+
table.index(["status", "archived_at"]);
|
|
2464
|
+
table.index(["archived_at"]);
|
|
2465
|
+
});
|
|
2466
|
+
console.log(`[MySQLPersistence] Created jobs archive table: ${this.table}`);
|
|
2467
|
+
}
|
|
2468
|
+
async setupLogsTable() {
|
|
2469
|
+
const exists = await import_atlas.Schema.hasTable(this.logsTable);
|
|
2470
|
+
if (exists) return;
|
|
2471
|
+
await import_atlas.Schema.create(this.logsTable, (table) => {
|
|
2472
|
+
table.id();
|
|
2473
|
+
table.string("level", 20);
|
|
2474
|
+
table.text("message");
|
|
2475
|
+
table.string("worker_id", 128);
|
|
2476
|
+
table.string("queue", 128).nullable();
|
|
2477
|
+
table.timestamp("timestamp").default(import_atlas.DB.raw("CURRENT_TIMESTAMP"));
|
|
2478
|
+
table.index(["worker_id"]);
|
|
2479
|
+
table.index(["queue"]);
|
|
2480
|
+
table.index(["level"]);
|
|
2481
|
+
table.index(["timestamp"]);
|
|
2482
|
+
});
|
|
2483
|
+
console.log(`[MySQLPersistence] Created logs archive table: ${this.logsTable}`);
|
|
2484
|
+
}
|
|
2485
|
+
};
|
|
2486
|
+
|
|
2487
|
+
// src/persistence/SQLitePersistence.ts
|
|
2488
|
+
var import_atlas2 = require("@gravito/atlas");
|
|
2489
|
+
var SQLitePersistence = class {
|
|
2490
|
+
/**
|
|
2491
|
+
* @param db - An Atlas DB instance (SQLite driver).
|
|
2492
|
+
* @param table - The name of the table to store archived jobs.
|
|
2493
|
+
*/
|
|
2494
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2495
|
+
this.db = db;
|
|
2496
|
+
this.table = table;
|
|
2497
|
+
this.logsTable = logsTable;
|
|
2498
|
+
}
|
|
2499
|
+
/**
|
|
2500
|
+
* Archive a job.
|
|
2501
|
+
*/
|
|
2502
|
+
async archive(queue, job, status) {
|
|
2503
|
+
try {
|
|
2504
|
+
await this.db.table(this.table).insert({
|
|
2505
|
+
job_id: job.id,
|
|
2506
|
+
queue,
|
|
2507
|
+
status,
|
|
2508
|
+
payload: JSON.stringify(job),
|
|
2509
|
+
error: job.error || null,
|
|
2510
|
+
created_at: new Date(job.createdAt),
|
|
2511
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2512
|
+
});
|
|
2513
|
+
} catch (err) {
|
|
2514
|
+
console.error(`[SQLitePersistence] Failed to archive job ${job.id}:`, err.message);
|
|
2515
|
+
}
|
|
2516
|
+
}
|
|
2517
|
+
/**
|
|
2518
|
+
* Find a specific job in the archive.
|
|
2519
|
+
*/
|
|
2520
|
+
async find(queue, id) {
|
|
2521
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2522
|
+
if (!row) {
|
|
2523
|
+
return null;
|
|
2524
|
+
}
|
|
2525
|
+
try {
|
|
2526
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2527
|
+
return job;
|
|
2528
|
+
} catch (_e) {
|
|
2529
|
+
return null;
|
|
2530
|
+
}
|
|
2531
|
+
}
|
|
2532
|
+
/**
|
|
2533
|
+
* List jobs from the archive.
|
|
2534
|
+
*/
|
|
2535
|
+
async list(queue, options = {}) {
|
|
2536
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2537
|
+
if (options.status) {
|
|
2538
|
+
query = query.where("status", options.status);
|
|
2539
|
+
}
|
|
2540
|
+
if (options.jobId) {
|
|
2541
|
+
query = query.where("job_id", options.jobId);
|
|
2542
|
+
}
|
|
2543
|
+
if (options.startTime) {
|
|
2544
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2545
|
+
}
|
|
2546
|
+
if (options.endTime) {
|
|
2547
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2548
|
+
}
|
|
2549
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2550
|
+
return rows.map((r) => {
|
|
2551
|
+
try {
|
|
2552
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2553
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2554
|
+
} catch (_e) {
|
|
2555
|
+
return null;
|
|
2556
|
+
}
|
|
2557
|
+
}).filter(Boolean);
|
|
2558
|
+
}
|
|
2559
|
+
/**
|
|
2560
|
+
* Search jobs from the archive.
|
|
2561
|
+
*/
|
|
2562
|
+
async search(query, options = {}) {
|
|
2563
|
+
let q = this.db.table(this.table);
|
|
2564
|
+
if (options.queue) {
|
|
2565
|
+
q = q.where("queue", options.queue);
|
|
2566
|
+
}
|
|
2567
|
+
const rows = await q.where((sub) => {
|
|
2568
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2569
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2570
|
+
return rows.map((r) => {
|
|
2571
|
+
try {
|
|
2572
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2573
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2574
|
+
} catch (_e) {
|
|
2575
|
+
return null;
|
|
2576
|
+
}
|
|
2577
|
+
}).filter(Boolean);
|
|
2578
|
+
}
|
|
2579
|
+
/**
|
|
2580
|
+
* Archive a system log message.
|
|
2581
|
+
*/
|
|
2582
|
+
async archiveLog(log) {
|
|
2583
|
+
try {
|
|
2584
|
+
await this.db.table(this.logsTable).insert({
|
|
2585
|
+
level: log.level,
|
|
2586
|
+
message: log.message,
|
|
2587
|
+
worker_id: log.workerId,
|
|
2588
|
+
queue: log.queue || null,
|
|
2589
|
+
timestamp: log.timestamp
|
|
2590
|
+
});
|
|
2591
|
+
} catch (err) {
|
|
2592
|
+
console.error(`[SQLitePersistence] Failed to archive log:`, err.message);
|
|
2593
|
+
}
|
|
2594
|
+
}
|
|
2595
|
+
/**
|
|
2596
|
+
* List system logs from the archive.
|
|
2597
|
+
*/
|
|
2598
|
+
async listLogs(options = {}) {
|
|
2599
|
+
let query = this.db.table(this.logsTable);
|
|
2600
|
+
if (options.level) query = query.where("level", options.level);
|
|
2601
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2602
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2603
|
+
if (options.search) {
|
|
2604
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2605
|
+
}
|
|
2606
|
+
if (options.startTime) {
|
|
2607
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2608
|
+
}
|
|
2609
|
+
if (options.endTime) {
|
|
2610
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2611
|
+
}
|
|
2612
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2613
|
+
}
|
|
2614
|
+
/**
|
|
2615
|
+
* Count system logs in the archive.
|
|
2616
|
+
*/
|
|
2617
|
+
async countLogs(options = {}) {
|
|
2618
|
+
let query = this.db.table(this.logsTable);
|
|
2619
|
+
if (options.level) query = query.where("level", options.level);
|
|
2620
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2621
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2622
|
+
if (options.search) {
|
|
2623
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2624
|
+
}
|
|
2625
|
+
if (options.startTime) {
|
|
2626
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2627
|
+
}
|
|
2628
|
+
if (options.endTime) {
|
|
2629
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2630
|
+
}
|
|
2631
|
+
const result = await query.count("id as total").first();
|
|
2632
|
+
return result?.total || 0;
|
|
2633
|
+
}
|
|
2634
|
+
/**
|
|
2635
|
+
* Remove old records from the archive.
|
|
2636
|
+
*/
|
|
2637
|
+
async cleanup(days) {
|
|
2638
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2639
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2640
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2641
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2642
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2643
|
+
]);
|
|
2644
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2645
|
+
}
|
|
2646
|
+
/**
|
|
2647
|
+
* Count jobs in the archive.
|
|
2648
|
+
*/
|
|
2649
|
+
async count(queue, options = {}) {
|
|
2650
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2651
|
+
if (options.status) {
|
|
2652
|
+
query = query.where("status", options.status);
|
|
2653
|
+
}
|
|
2654
|
+
if (options.jobId) {
|
|
2655
|
+
query = query.where("job_id", options.jobId);
|
|
2656
|
+
}
|
|
2657
|
+
if (options.startTime) {
|
|
2658
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2659
|
+
}
|
|
2660
|
+
if (options.endTime) {
|
|
2661
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2662
|
+
}
|
|
2663
|
+
const result = await query.count("id as total").first();
|
|
2664
|
+
return result?.total || 0;
|
|
2665
|
+
}
|
|
2666
|
+
/**
|
|
2667
|
+
* Setup table for SQLite.
|
|
2668
|
+
*/
|
|
2669
|
+
async setupTable() {
|
|
2670
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2671
|
+
}
|
|
2672
|
+
async setupJobsTable() {
|
|
2673
|
+
const exists = await import_atlas2.Schema.hasTable(this.table);
|
|
2674
|
+
if (exists) return;
|
|
2675
|
+
await import_atlas2.Schema.create(this.table, (table) => {
|
|
2676
|
+
table.id();
|
|
2677
|
+
table.string("job_id", 64);
|
|
2678
|
+
table.string("queue", 128);
|
|
2679
|
+
table.string("status", 20);
|
|
2680
|
+
table.text("payload");
|
|
2681
|
+
table.text("error").nullable();
|
|
2682
|
+
table.timestamp("created_at").nullable();
|
|
2683
|
+
table.timestamp("archived_at").nullable();
|
|
2684
|
+
table.index(["queue", "archived_at"]);
|
|
2685
|
+
table.index(["archived_at"]);
|
|
2686
|
+
});
|
|
2687
|
+
console.log(`[SQLitePersistence] Created jobs archive table: ${this.table}`);
|
|
2688
|
+
}
|
|
2689
|
+
async setupLogsTable() {
|
|
2690
|
+
const exists = await import_atlas2.Schema.hasTable(this.logsTable);
|
|
2691
|
+
if (exists) return;
|
|
2692
|
+
await import_atlas2.Schema.create(this.logsTable, (table) => {
|
|
2693
|
+
table.id();
|
|
2694
|
+
table.string("level", 20);
|
|
2695
|
+
table.text("message");
|
|
2696
|
+
table.string("worker_id", 128);
|
|
2697
|
+
table.string("queue", 128).nullable();
|
|
2698
|
+
table.timestamp("timestamp");
|
|
2699
|
+
table.index(["worker_id"]);
|
|
2700
|
+
table.index(["queue"]);
|
|
2701
|
+
table.index(["level"]);
|
|
2702
|
+
table.index(["timestamp"]);
|
|
2703
|
+
});
|
|
2704
|
+
console.log(`[SQLitePersistence] Created logs archive table: ${this.logsTable}`);
|
|
2705
|
+
}
|
|
2706
|
+
};
|
|
2707
|
+
|
|
2708
|
+
// src/index.ts
|
|
2709
|
+
init_Scheduler();
|
|
1428
2710
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1429
2711
|
0 && (module.exports = {
|
|
1430
2712
|
ClassNameSerializer,
|
|
@@ -1434,9 +2716,13 @@ var OrbitStream = class _OrbitStream {
|
|
|
1434
2716
|
JsonSerializer,
|
|
1435
2717
|
KafkaDriver,
|
|
1436
2718
|
MemoryDriver,
|
|
2719
|
+
MySQLPersistence,
|
|
1437
2720
|
OrbitStream,
|
|
1438
2721
|
QueueManager,
|
|
2722
|
+
RabbitMQDriver,
|
|
1439
2723
|
RedisDriver,
|
|
2724
|
+
SQLitePersistence,
|
|
1440
2725
|
SQSDriver,
|
|
2726
|
+
Scheduler,
|
|
1441
2727
|
Worker
|
|
1442
2728
|
});
|