@gravito/stream 1.0.0-beta.1 → 1.0.0-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -2
- package/dist/index.cjs +1357 -73
- package/dist/index.d.cts +689 -7
- package/dist/index.d.ts +689 -7
- package/dist/index.js +1359 -73
- package/package.json +9 -5
package/dist/index.cjs
CHANGED
|
@@ -56,10 +56,11 @@ var init_DatabaseDriver = __esm({
|
|
|
56
56
|
*/
|
|
57
57
|
async push(queue, job) {
|
|
58
58
|
const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
|
|
59
|
+
const payload = JSON.stringify(job);
|
|
59
60
|
await this.dbService.execute(
|
|
60
61
|
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
61
62
|
VALUES ($1, $2, $3, $4, $5)`,
|
|
62
|
-
[queue,
|
|
63
|
+
[queue, payload, job.attempts ?? 0, availableAt.toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
63
64
|
);
|
|
64
65
|
}
|
|
65
66
|
/**
|
|
@@ -102,15 +103,32 @@ var init_DatabaseDriver = __esm({
|
|
|
102
103
|
);
|
|
103
104
|
const createdAt = new Date(row.created_at).getTime();
|
|
104
105
|
const delaySeconds = row.available_at ? Math.max(0, Math.floor((new Date(row.available_at).getTime() - createdAt) / 1e3)) : void 0;
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
106
|
+
let job;
|
|
107
|
+
try {
|
|
108
|
+
const parsed = JSON.parse(row.payload);
|
|
109
|
+
if (parsed && typeof parsed === "object" && parsed.type && parsed.data) {
|
|
110
|
+
job = {
|
|
111
|
+
...parsed,
|
|
112
|
+
id: row.id,
|
|
113
|
+
// DB ID is the source of truth for deletion
|
|
114
|
+
attempts: row.attempts
|
|
115
|
+
};
|
|
116
|
+
} else {
|
|
117
|
+
throw new Error("Fallback");
|
|
118
|
+
}
|
|
119
|
+
} catch (_e) {
|
|
120
|
+
job = {
|
|
121
|
+
id: row.id,
|
|
122
|
+
type: "class",
|
|
123
|
+
data: row.payload,
|
|
124
|
+
createdAt,
|
|
125
|
+
attempts: row.attempts
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
if (delaySeconds !== void 0) {
|
|
129
|
+
job.delaySeconds = delaySeconds;
|
|
130
|
+
}
|
|
131
|
+
return job;
|
|
114
132
|
}
|
|
115
133
|
/**
|
|
116
134
|
* Get queue size.
|
|
@@ -150,6 +168,27 @@ var init_DatabaseDriver = __esm({
|
|
|
150
168
|
}
|
|
151
169
|
});
|
|
152
170
|
}
|
|
171
|
+
/**
|
|
172
|
+
* Mark a job as failed (DLQ).
|
|
173
|
+
*/
|
|
174
|
+
async fail(queue, job) {
|
|
175
|
+
const failedQueue = `failed:${queue}`;
|
|
176
|
+
const payload = JSON.stringify(job);
|
|
177
|
+
await this.dbService.execute(
|
|
178
|
+
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
179
|
+
VALUES ($1, $2, $3, $4, $5)`,
|
|
180
|
+
[failedQueue, payload, job.attempts, (/* @__PURE__ */ new Date()).toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
181
|
+
);
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Acknowledge/Complete a job.
|
|
185
|
+
*/
|
|
186
|
+
async complete(_queue, job) {
|
|
187
|
+
if (!job.id) {
|
|
188
|
+
return;
|
|
189
|
+
}
|
|
190
|
+
await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE id = $1`, [job.id]);
|
|
191
|
+
}
|
|
153
192
|
};
|
|
154
193
|
}
|
|
155
194
|
});
|
|
@@ -328,6 +367,150 @@ var init_KafkaDriver = __esm({
|
|
|
328
367
|
}
|
|
329
368
|
});
|
|
330
369
|
|
|
370
|
+
// src/drivers/RabbitMQDriver.ts
|
|
371
|
+
var RabbitMQDriver_exports = {};
|
|
372
|
+
__export(RabbitMQDriver_exports, {
|
|
373
|
+
RabbitMQDriver: () => RabbitMQDriver
|
|
374
|
+
});
|
|
375
|
+
var RabbitMQDriver;
|
|
376
|
+
var init_RabbitMQDriver = __esm({
|
|
377
|
+
"src/drivers/RabbitMQDriver.ts"() {
|
|
378
|
+
"use strict";
|
|
379
|
+
RabbitMQDriver = class {
|
|
380
|
+
connection;
|
|
381
|
+
channel;
|
|
382
|
+
exchange;
|
|
383
|
+
exchangeType;
|
|
384
|
+
constructor(config) {
|
|
385
|
+
this.connection = config.client;
|
|
386
|
+
this.exchange = config.exchange;
|
|
387
|
+
this.exchangeType = config.exchangeType ?? "fanout";
|
|
388
|
+
if (!this.connection) {
|
|
389
|
+
throw new Error(
|
|
390
|
+
"[RabbitMQDriver] RabbitMQ connection is required. Please provide a connection from amqplib."
|
|
391
|
+
);
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
/**
|
|
395
|
+
* Ensure channel is created.
|
|
396
|
+
*/
|
|
397
|
+
async ensureChannel() {
|
|
398
|
+
if (this.channel) {
|
|
399
|
+
return this.channel;
|
|
400
|
+
}
|
|
401
|
+
if (typeof this.connection.createChannel === "function") {
|
|
402
|
+
this.channel = await this.connection.createChannel();
|
|
403
|
+
} else {
|
|
404
|
+
this.channel = this.connection;
|
|
405
|
+
}
|
|
406
|
+
if (this.exchange) {
|
|
407
|
+
await this.channel.assertExchange(this.exchange, this.exchangeType, { durable: true });
|
|
408
|
+
}
|
|
409
|
+
return this.channel;
|
|
410
|
+
}
|
|
411
|
+
/**
|
|
412
|
+
* Get the underlying connection.
|
|
413
|
+
*/
|
|
414
|
+
getRawConnection() {
|
|
415
|
+
return this.connection;
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Push a job (sendToQueue / publish).
|
|
419
|
+
*/
|
|
420
|
+
async push(queue, job) {
|
|
421
|
+
const channel = await this.ensureChannel();
|
|
422
|
+
const payload = Buffer.from(JSON.stringify(job));
|
|
423
|
+
if (this.exchange) {
|
|
424
|
+
await channel.assertQueue(queue, { durable: true });
|
|
425
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
426
|
+
channel.publish(this.exchange, "", payload, { persistent: true });
|
|
427
|
+
} else {
|
|
428
|
+
await channel.assertQueue(queue, { durable: true });
|
|
429
|
+
channel.sendToQueue(queue, payload, { persistent: true });
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
/**
|
|
433
|
+
* Pop a job (get).
|
|
434
|
+
*/
|
|
435
|
+
async pop(queue) {
|
|
436
|
+
const channel = await this.ensureChannel();
|
|
437
|
+
await channel.assertQueue(queue, { durable: true });
|
|
438
|
+
const msg = await channel.get(queue, { noAck: false });
|
|
439
|
+
if (!msg) {
|
|
440
|
+
return null;
|
|
441
|
+
}
|
|
442
|
+
const job = JSON.parse(msg.content.toString());
|
|
443
|
+
job._raw = msg;
|
|
444
|
+
return job;
|
|
445
|
+
}
|
|
446
|
+
/**
|
|
447
|
+
* Acknowledge a message.
|
|
448
|
+
*/
|
|
449
|
+
async acknowledge(messageId) {
|
|
450
|
+
const channel = await this.ensureChannel();
|
|
451
|
+
if (typeof messageId === "object") {
|
|
452
|
+
channel.ack(messageId);
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
/**
|
|
456
|
+
* Negative acknowledge a message.
|
|
457
|
+
*/
|
|
458
|
+
async nack(message, requeue = true) {
|
|
459
|
+
const channel = await this.ensureChannel();
|
|
460
|
+
channel.nack(message, false, requeue);
|
|
461
|
+
}
|
|
462
|
+
/**
|
|
463
|
+
* Reject a message.
|
|
464
|
+
*/
|
|
465
|
+
async reject(message, requeue = true) {
|
|
466
|
+
const channel = await this.ensureChannel();
|
|
467
|
+
channel.reject(message, requeue);
|
|
468
|
+
}
|
|
469
|
+
/**
|
|
470
|
+
* Subscribe to a queue.
|
|
471
|
+
*/
|
|
472
|
+
async subscribe(queue, callback, options = {}) {
|
|
473
|
+
const channel = await this.ensureChannel();
|
|
474
|
+
await channel.assertQueue(queue, { durable: true });
|
|
475
|
+
if (this.exchange) {
|
|
476
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
477
|
+
}
|
|
478
|
+
const { autoAck = true } = options;
|
|
479
|
+
await channel.consume(
|
|
480
|
+
queue,
|
|
481
|
+
async (msg) => {
|
|
482
|
+
if (!msg) {
|
|
483
|
+
return;
|
|
484
|
+
}
|
|
485
|
+
const job = JSON.parse(msg.content.toString());
|
|
486
|
+
job._raw = msg;
|
|
487
|
+
await callback(job);
|
|
488
|
+
if (autoAck) {
|
|
489
|
+
channel.ack(msg);
|
|
490
|
+
}
|
|
491
|
+
},
|
|
492
|
+
{ noAck: false }
|
|
493
|
+
);
|
|
494
|
+
}
|
|
495
|
+
/**
|
|
496
|
+
* Get queue size.
|
|
497
|
+
*/
|
|
498
|
+
async size(queue) {
|
|
499
|
+
const channel = await this.ensureChannel();
|
|
500
|
+
const ok = await channel.checkQueue(queue);
|
|
501
|
+
return ok.messageCount;
|
|
502
|
+
}
|
|
503
|
+
/**
|
|
504
|
+
* Clear a queue.
|
|
505
|
+
*/
|
|
506
|
+
async clear(queue) {
|
|
507
|
+
const channel = await this.ensureChannel();
|
|
508
|
+
await channel.purgeQueue(queue);
|
|
509
|
+
}
|
|
510
|
+
};
|
|
511
|
+
}
|
|
512
|
+
});
|
|
513
|
+
|
|
331
514
|
// src/drivers/RedisDriver.ts
|
|
332
515
|
var RedisDriver_exports = {};
|
|
333
516
|
__export(RedisDriver_exports, {
|
|
@@ -337,9 +520,43 @@ var RedisDriver;
|
|
|
337
520
|
var init_RedisDriver = __esm({
|
|
338
521
|
"src/drivers/RedisDriver.ts"() {
|
|
339
522
|
"use strict";
|
|
340
|
-
RedisDriver = class {
|
|
523
|
+
RedisDriver = class _RedisDriver {
|
|
341
524
|
prefix;
|
|
342
525
|
client;
|
|
526
|
+
// Lua Logic:
|
|
527
|
+
// IF (IS_MEMBER(activeSet, groupId)) -> PUSH(pendingList, job)
|
|
528
|
+
// ELSE -> SADD(activeSet, groupId) & LPUSH(waitList, job)
|
|
529
|
+
static PUSH_SCRIPT = `
|
|
530
|
+
local waitList = KEYS[1]
|
|
531
|
+
local activeSet = KEYS[2]
|
|
532
|
+
local pendingList = KEYS[3]
|
|
533
|
+
local groupId = ARGV[1]
|
|
534
|
+
local payload = ARGV[2]
|
|
535
|
+
|
|
536
|
+
if redis.call('SISMEMBER', activeSet, groupId) == 1 then
|
|
537
|
+
return redis.call('RPUSH', pendingList, payload)
|
|
538
|
+
else
|
|
539
|
+
redis.call('SADD', activeSet, groupId)
|
|
540
|
+
return redis.call('LPUSH', waitList, payload)
|
|
541
|
+
end
|
|
542
|
+
`;
|
|
543
|
+
// Lua Logic:
|
|
544
|
+
// local next = LPOP(pendingList)
|
|
545
|
+
// IF (next) -> LPUSH(waitList, next)
|
|
546
|
+
// ELSE -> SREM(activeSet, groupId)
|
|
547
|
+
static COMPLETE_SCRIPT = `
|
|
548
|
+
local waitList = KEYS[1]
|
|
549
|
+
local activeSet = KEYS[2]
|
|
550
|
+
local pendingList = KEYS[3]
|
|
551
|
+
local groupId = ARGV[1]
|
|
552
|
+
|
|
553
|
+
local nextJob = redis.call('LPOP', pendingList)
|
|
554
|
+
if nextJob then
|
|
555
|
+
return redis.call('LPUSH', waitList, nextJob)
|
|
556
|
+
else
|
|
557
|
+
return redis.call('SREM', activeSet, groupId)
|
|
558
|
+
end
|
|
559
|
+
`;
|
|
343
560
|
constructor(config) {
|
|
344
561
|
this.client = config.client;
|
|
345
562
|
this.prefix = config.prefix ?? "queue:";
|
|
@@ -348,19 +565,36 @@ var init_RedisDriver = __esm({
|
|
|
348
565
|
"[RedisDriver] Redis client is required. Please install ioredis or redis package."
|
|
349
566
|
);
|
|
350
567
|
}
|
|
568
|
+
if (typeof this.client.defineCommand === "function") {
|
|
569
|
+
;
|
|
570
|
+
this.client.defineCommand("pushGroupJob", {
|
|
571
|
+
numberOfKeys: 3,
|
|
572
|
+
lua: _RedisDriver.PUSH_SCRIPT
|
|
573
|
+
});
|
|
574
|
+
this.client.defineCommand("completeGroupJob", {
|
|
575
|
+
numberOfKeys: 3,
|
|
576
|
+
lua: _RedisDriver.COMPLETE_SCRIPT
|
|
577
|
+
});
|
|
578
|
+
}
|
|
351
579
|
}
|
|
352
580
|
/**
|
|
353
581
|
* Get full Redis key for a queue.
|
|
354
582
|
*/
|
|
355
|
-
getKey(queue) {
|
|
583
|
+
getKey(queue, priority) {
|
|
584
|
+
if (priority) {
|
|
585
|
+
return `${this.prefix}${queue}:${priority}`;
|
|
586
|
+
}
|
|
356
587
|
return `${this.prefix}${queue}`;
|
|
357
588
|
}
|
|
358
589
|
/**
|
|
359
590
|
* Push a job (LPUSH).
|
|
360
591
|
*/
|
|
361
|
-
async push(queue, job) {
|
|
362
|
-
const key = this.getKey(queue);
|
|
363
|
-
const
|
|
592
|
+
async push(queue, job, options) {
|
|
593
|
+
const key = this.getKey(queue, options?.priority);
|
|
594
|
+
const groupId = options?.groupId;
|
|
595
|
+
if (groupId && options?.priority) {
|
|
596
|
+
}
|
|
597
|
+
const payloadObj = {
|
|
364
598
|
id: job.id,
|
|
365
599
|
type: job.type,
|
|
366
600
|
data: job.data,
|
|
@@ -368,8 +602,18 @@ var init_RedisDriver = __esm({
|
|
|
368
602
|
createdAt: job.createdAt,
|
|
369
603
|
delaySeconds: job.delaySeconds,
|
|
370
604
|
attempts: job.attempts,
|
|
371
|
-
maxAttempts: job.maxAttempts
|
|
372
|
-
|
|
605
|
+
maxAttempts: job.maxAttempts,
|
|
606
|
+
groupId,
|
|
607
|
+
error: job.error,
|
|
608
|
+
failedAt: job.failedAt
|
|
609
|
+
};
|
|
610
|
+
const payload = JSON.stringify(payloadObj);
|
|
611
|
+
if (groupId && typeof this.client.pushGroupJob === "function") {
|
|
612
|
+
const activeSetKey = `${this.prefix}active`;
|
|
613
|
+
const pendingListKey = `${this.prefix}pending:${groupId}`;
|
|
614
|
+
await this.client.pushGroupJob(key, activeSetKey, pendingListKey, groupId, payload);
|
|
615
|
+
return;
|
|
616
|
+
}
|
|
373
617
|
if (job.delaySeconds && job.delaySeconds > 0) {
|
|
374
618
|
const delayKey = `${key}:delayed`;
|
|
375
619
|
const score = Date.now() + job.delaySeconds * 1e3;
|
|
@@ -382,29 +626,53 @@ var init_RedisDriver = __esm({
|
|
|
382
626
|
await this.client.lpush(key, payload);
|
|
383
627
|
}
|
|
384
628
|
}
|
|
629
|
+
/**
|
|
630
|
+
* Complete a job (handle Group FIFO).
|
|
631
|
+
*/
|
|
632
|
+
async complete(queue, job) {
|
|
633
|
+
if (!job.groupId) {
|
|
634
|
+
return;
|
|
635
|
+
}
|
|
636
|
+
const key = this.getKey(queue);
|
|
637
|
+
const activeSetKey = `${this.prefix}active`;
|
|
638
|
+
const pendingListKey = `${this.prefix}pending:${job.groupId}`;
|
|
639
|
+
if (typeof this.client.completeGroupJob === "function") {
|
|
640
|
+
await this.client.completeGroupJob(key, activeSetKey, pendingListKey, job.groupId);
|
|
641
|
+
}
|
|
642
|
+
}
|
|
385
643
|
/**
|
|
386
644
|
* Pop a job (RPOP, FIFO).
|
|
645
|
+
* Supports implicit priority polling (critical -> high -> default -> low).
|
|
387
646
|
*/
|
|
388
647
|
async pop(queue) {
|
|
389
|
-
const
|
|
390
|
-
const
|
|
391
|
-
|
|
392
|
-
const
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
const
|
|
396
|
-
if (
|
|
397
|
-
const
|
|
398
|
-
|
|
399
|
-
|
|
648
|
+
const priorities = ["critical", "high", void 0, "low"];
|
|
649
|
+
for (const priority of priorities) {
|
|
650
|
+
const key = this.getKey(queue, priority);
|
|
651
|
+
const delayKey = `${key}:delayed`;
|
|
652
|
+
if (typeof this.client.zrange === "function") {
|
|
653
|
+
const now = Date.now();
|
|
654
|
+
const delayedJobs = await this.client.zrange(delayKey, 0, 0, "WITHSCORES");
|
|
655
|
+
if (delayedJobs && delayedJobs.length >= 2) {
|
|
656
|
+
const score = parseFloat(delayedJobs[1]);
|
|
657
|
+
if (score <= now) {
|
|
658
|
+
const payload2 = delayedJobs[0];
|
|
659
|
+
await this.client.zrem(delayKey, payload2);
|
|
660
|
+
return this.parsePayload(payload2);
|
|
661
|
+
}
|
|
400
662
|
}
|
|
401
663
|
}
|
|
664
|
+
if (typeof this.client.get === "function") {
|
|
665
|
+
const isPaused = await this.client.get(`${key}:paused`);
|
|
666
|
+
if (isPaused === "1") {
|
|
667
|
+
continue;
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
const payload = await this.client.rpop(key);
|
|
671
|
+
if (payload) {
|
|
672
|
+
return this.parsePayload(payload);
|
|
673
|
+
}
|
|
402
674
|
}
|
|
403
|
-
|
|
404
|
-
if (!payload) {
|
|
405
|
-
return null;
|
|
406
|
-
}
|
|
407
|
-
return this.parsePayload(payload);
|
|
675
|
+
return null;
|
|
408
676
|
}
|
|
409
677
|
/**
|
|
410
678
|
* Parse Redis payload.
|
|
@@ -419,7 +687,11 @@ var init_RedisDriver = __esm({
|
|
|
419
687
|
createdAt: parsed.createdAt,
|
|
420
688
|
delaySeconds: parsed.delaySeconds,
|
|
421
689
|
attempts: parsed.attempts,
|
|
422
|
-
maxAttempts: parsed.maxAttempts
|
|
690
|
+
maxAttempts: parsed.maxAttempts,
|
|
691
|
+
groupId: parsed.groupId,
|
|
692
|
+
error: parsed.error,
|
|
693
|
+
failedAt: parsed.failedAt,
|
|
694
|
+
priority: parsed.priority
|
|
423
695
|
};
|
|
424
696
|
}
|
|
425
697
|
/**
|
|
@@ -429,15 +701,31 @@ var init_RedisDriver = __esm({
|
|
|
429
701
|
const key = this.getKey(queue);
|
|
430
702
|
return this.client.llen(key);
|
|
431
703
|
}
|
|
704
|
+
/**
|
|
705
|
+
* Mark a job as permanently failed (DLQ).
|
|
706
|
+
*/
|
|
707
|
+
async fail(queue, job) {
|
|
708
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
709
|
+
const payload = JSON.stringify({
|
|
710
|
+
...job,
|
|
711
|
+
failedAt: Date.now()
|
|
712
|
+
});
|
|
713
|
+
await this.client.lpush(key, payload);
|
|
714
|
+
if (typeof this.client.ltrim === "function") {
|
|
715
|
+
await this.client.ltrim(key, 0, 999);
|
|
716
|
+
}
|
|
717
|
+
}
|
|
432
718
|
/**
|
|
433
719
|
* Clear a queue.
|
|
434
720
|
*/
|
|
435
721
|
async clear(queue) {
|
|
436
722
|
const key = this.getKey(queue);
|
|
437
723
|
const delayKey = `${key}:delayed`;
|
|
724
|
+
const activeSetKey = `${this.prefix}active`;
|
|
438
725
|
await this.client.del(key);
|
|
439
726
|
if (typeof this.client.del === "function") {
|
|
440
727
|
await this.client.del(delayKey);
|
|
728
|
+
await this.client.del(activeSetKey);
|
|
441
729
|
}
|
|
442
730
|
}
|
|
443
731
|
/**
|
|
@@ -447,6 +735,17 @@ var init_RedisDriver = __esm({
|
|
|
447
735
|
if (jobs.length === 0) {
|
|
448
736
|
return;
|
|
449
737
|
}
|
|
738
|
+
const hasGroup = jobs.some((j) => j.groupId);
|
|
739
|
+
const hasPriority = jobs.some((j) => j.priority);
|
|
740
|
+
if (hasGroup || hasPriority) {
|
|
741
|
+
for (const job of jobs) {
|
|
742
|
+
await this.push(queue, job, {
|
|
743
|
+
groupId: job.groupId,
|
|
744
|
+
priority: job.priority
|
|
745
|
+
});
|
|
746
|
+
}
|
|
747
|
+
return;
|
|
748
|
+
}
|
|
450
749
|
const key = this.getKey(queue);
|
|
451
750
|
const payloads = jobs.map(
|
|
452
751
|
(job) => JSON.stringify({
|
|
@@ -457,7 +756,9 @@ var init_RedisDriver = __esm({
|
|
|
457
756
|
createdAt: job.createdAt,
|
|
458
757
|
delaySeconds: job.delaySeconds,
|
|
459
758
|
attempts: job.attempts,
|
|
460
|
-
maxAttempts: job.maxAttempts
|
|
759
|
+
maxAttempts: job.maxAttempts,
|
|
760
|
+
groupId: job.groupId,
|
|
761
|
+
priority: job.priority
|
|
461
762
|
})
|
|
462
763
|
);
|
|
463
764
|
await this.client.lpush(key, ...payloads);
|
|
@@ -478,6 +779,89 @@ var init_RedisDriver = __esm({
|
|
|
478
779
|
}
|
|
479
780
|
return results;
|
|
480
781
|
}
|
|
782
|
+
/**
|
|
783
|
+
* Report worker heartbeat for monitoring.
|
|
784
|
+
*/
|
|
785
|
+
async reportHeartbeat(workerInfo, prefix) {
|
|
786
|
+
const key = `${prefix ?? this.prefix}worker:${workerInfo.id}`;
|
|
787
|
+
if (typeof this.client.set === "function") {
|
|
788
|
+
await this.client.set(key, JSON.stringify(workerInfo), "EX", 10);
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
/**
|
|
792
|
+
* Publish a log message for monitoring.
|
|
793
|
+
*/
|
|
794
|
+
async publishLog(logPayload, prefix) {
|
|
795
|
+
const payload = JSON.stringify(logPayload);
|
|
796
|
+
const monitorPrefix = prefix ?? this.prefix;
|
|
797
|
+
if (typeof this.client.publish === "function") {
|
|
798
|
+
await this.client.publish(`${monitorPrefix}logs`, payload);
|
|
799
|
+
}
|
|
800
|
+
const historyKey = `${monitorPrefix}logs:history`;
|
|
801
|
+
if (typeof this.client.pipeline === "function") {
|
|
802
|
+
const pipe = this.client.pipeline();
|
|
803
|
+
pipe.lpush(historyKey, payload);
|
|
804
|
+
pipe.ltrim(historyKey, 0, 99);
|
|
805
|
+
await pipe.exec();
|
|
806
|
+
} else {
|
|
807
|
+
await this.client.lpush(historyKey, payload);
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
/**
|
|
811
|
+
* Check if a queue is rate limited.
|
|
812
|
+
* Uses a fixed window counter.
|
|
813
|
+
*/
|
|
814
|
+
async checkRateLimit(queue, config) {
|
|
815
|
+
const key = `${this.prefix}${queue}:ratelimit`;
|
|
816
|
+
const now = Date.now();
|
|
817
|
+
const windowStart = Math.floor(now / config.duration);
|
|
818
|
+
const windowKey = `${key}:${windowStart}`;
|
|
819
|
+
const client = this.client;
|
|
820
|
+
if (typeof client.incr === "function") {
|
|
821
|
+
const current = await client.incr(windowKey);
|
|
822
|
+
if (current === 1) {
|
|
823
|
+
await client.expire(windowKey, Math.ceil(config.duration / 1e3) + 1);
|
|
824
|
+
}
|
|
825
|
+
return current <= config.max;
|
|
826
|
+
}
|
|
827
|
+
return true;
|
|
828
|
+
}
|
|
829
|
+
/**
|
|
830
|
+
* Get failed jobs from DLQ.
|
|
831
|
+
*/
|
|
832
|
+
async getFailed(queue, start = 0, end = -1) {
|
|
833
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
834
|
+
const payloads = await this.client.lrange(key, start, end);
|
|
835
|
+
return payloads.map((p) => this.parsePayload(p));
|
|
836
|
+
}
|
|
837
|
+
/**
|
|
838
|
+
* Retry failed jobs from DLQ.
|
|
839
|
+
* Moves jobs from failed list back to the main queue.
|
|
840
|
+
*/
|
|
841
|
+
async retryFailed(queue, count = 1) {
|
|
842
|
+
const failedKey = `${this.getKey(queue)}:failed`;
|
|
843
|
+
let retried = 0;
|
|
844
|
+
for (let i = 0; i < count; i++) {
|
|
845
|
+
const payload = await this.client.rpop(failedKey);
|
|
846
|
+
if (!payload) {
|
|
847
|
+
break;
|
|
848
|
+
}
|
|
849
|
+
const job = this.parsePayload(payload);
|
|
850
|
+
job.attempts = 0;
|
|
851
|
+
delete job.error;
|
|
852
|
+
delete job.failedAt;
|
|
853
|
+
await this.push(queue, job, { priority: job.priority, groupId: job.groupId });
|
|
854
|
+
retried++;
|
|
855
|
+
}
|
|
856
|
+
return retried;
|
|
857
|
+
}
|
|
858
|
+
/**
|
|
859
|
+
* Clear failed jobs from DLQ.
|
|
860
|
+
*/
|
|
861
|
+
async clearFailed(queue) {
|
|
862
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
863
|
+
await this.client.del(key);
|
|
864
|
+
}
|
|
481
865
|
};
|
|
482
866
|
}
|
|
483
867
|
});
|
|
@@ -683,6 +1067,125 @@ var init_SQSDriver = __esm({
|
|
|
683
1067
|
}
|
|
684
1068
|
});
|
|
685
1069
|
|
|
1070
|
+
// src/Scheduler.ts
|
|
1071
|
+
var Scheduler_exports = {};
|
|
1072
|
+
__export(Scheduler_exports, {
|
|
1073
|
+
Scheduler: () => Scheduler
|
|
1074
|
+
});
|
|
1075
|
+
var import_cron_parser, Scheduler;
|
|
1076
|
+
var init_Scheduler = __esm({
|
|
1077
|
+
"src/Scheduler.ts"() {
|
|
1078
|
+
"use strict";
|
|
1079
|
+
import_cron_parser = __toESM(require("cron-parser"), 1);
|
|
1080
|
+
Scheduler = class {
|
|
1081
|
+
constructor(manager, options = {}) {
|
|
1082
|
+
this.manager = manager;
|
|
1083
|
+
this.prefix = options.prefix ?? "queue:";
|
|
1084
|
+
}
|
|
1085
|
+
prefix;
|
|
1086
|
+
get client() {
|
|
1087
|
+
const driver = this.manager.getDriver(this.manager.getDefaultConnection());
|
|
1088
|
+
return driver.client;
|
|
1089
|
+
}
|
|
1090
|
+
/**
|
|
1091
|
+
* Register a scheduled job.
|
|
1092
|
+
*/
|
|
1093
|
+
async register(config) {
|
|
1094
|
+
const nextRun = import_cron_parser.default.parse(config.cron).next().getTime();
|
|
1095
|
+
const fullConfig = {
|
|
1096
|
+
...config,
|
|
1097
|
+
nextRun,
|
|
1098
|
+
enabled: true
|
|
1099
|
+
};
|
|
1100
|
+
const pipe = this.client.pipeline();
|
|
1101
|
+
pipe.hset(`${this.prefix}schedule:${config.id}`, {
|
|
1102
|
+
...fullConfig,
|
|
1103
|
+
job: JSON.stringify(fullConfig.job)
|
|
1104
|
+
});
|
|
1105
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, config.id);
|
|
1106
|
+
await pipe.exec();
|
|
1107
|
+
}
|
|
1108
|
+
/**
|
|
1109
|
+
* Remove a scheduled job.
|
|
1110
|
+
*/
|
|
1111
|
+
async remove(id) {
|
|
1112
|
+
const pipe = this.client.pipeline();
|
|
1113
|
+
pipe.del(`${this.prefix}schedule:${id}`);
|
|
1114
|
+
pipe.zrem(`${this.prefix}schedules`, id);
|
|
1115
|
+
await pipe.exec();
|
|
1116
|
+
}
|
|
1117
|
+
/**
|
|
1118
|
+
* List all scheduled jobs.
|
|
1119
|
+
*/
|
|
1120
|
+
async list() {
|
|
1121
|
+
const ids = await this.client.zrange(`${this.prefix}schedules`, 0, -1);
|
|
1122
|
+
const configs = [];
|
|
1123
|
+
for (const id of ids) {
|
|
1124
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1125
|
+
if (data?.id) {
|
|
1126
|
+
configs.push({
|
|
1127
|
+
...data,
|
|
1128
|
+
lastRun: data.lastRun ? parseInt(data.lastRun, 10) : void 0,
|
|
1129
|
+
nextRun: data.nextRun ? parseInt(data.nextRun, 10) : void 0,
|
|
1130
|
+
enabled: data.enabled === "true",
|
|
1131
|
+
job: JSON.parse(data.job)
|
|
1132
|
+
});
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
1135
|
+
return configs;
|
|
1136
|
+
}
|
|
1137
|
+
/**
|
|
1138
|
+
* Run a scheduled job immediately (out of schedule).
|
|
1139
|
+
*/
|
|
1140
|
+
async runNow(id) {
|
|
1141
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1142
|
+
if (data?.id) {
|
|
1143
|
+
const serialized = JSON.parse(data.job);
|
|
1144
|
+
const serializer = this.manager.getSerializer();
|
|
1145
|
+
const job = serializer.deserialize(serialized);
|
|
1146
|
+
await this.manager.push(job);
|
|
1147
|
+
}
|
|
1148
|
+
}
|
|
1149
|
+
/**
|
|
1150
|
+
* Process due tasks (TICK).
|
|
1151
|
+
* This should be called periodically (e.g. every minute).
|
|
1152
|
+
*/
|
|
1153
|
+
async tick() {
|
|
1154
|
+
const now = Date.now();
|
|
1155
|
+
const dueIds = await this.client.zrangebyscore(`${this.prefix}schedules`, 0, now);
|
|
1156
|
+
let fired = 0;
|
|
1157
|
+
for (const id of dueIds) {
|
|
1158
|
+
const lockKey = `${this.prefix}lock:schedule:${id}:${Math.floor(now / 1e3)}`;
|
|
1159
|
+
const lock = await this.client.set(lockKey, "1", "EX", 10, "NX");
|
|
1160
|
+
if (lock === "OK") {
|
|
1161
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1162
|
+
if (data?.id && data.enabled === "true") {
|
|
1163
|
+
try {
|
|
1164
|
+
const serializedJob = JSON.parse(data.job);
|
|
1165
|
+
const connection = data.connection || this.manager.getDefaultConnection();
|
|
1166
|
+
const driver = this.manager.getDriver(connection);
|
|
1167
|
+
await driver.push(data.queue, serializedJob);
|
|
1168
|
+
const nextRun = import_cron_parser.default.parse(data.cron).next().getTime();
|
|
1169
|
+
const pipe = this.client.pipeline();
|
|
1170
|
+
pipe.hset(`${this.prefix}schedule:${id}`, {
|
|
1171
|
+
lastRun: now,
|
|
1172
|
+
nextRun
|
|
1173
|
+
});
|
|
1174
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, id);
|
|
1175
|
+
await pipe.exec();
|
|
1176
|
+
fired++;
|
|
1177
|
+
} catch (err) {
|
|
1178
|
+
console.error(`[Scheduler] Failed to process schedule ${id}:`, err);
|
|
1179
|
+
}
|
|
1180
|
+
}
|
|
1181
|
+
}
|
|
1182
|
+
}
|
|
1183
|
+
return fired;
|
|
1184
|
+
}
|
|
1185
|
+
};
|
|
1186
|
+
}
|
|
1187
|
+
});
|
|
1188
|
+
|
|
686
1189
|
// src/index.ts
|
|
687
1190
|
var index_exports = {};
|
|
688
1191
|
__export(index_exports, {
|
|
@@ -693,10 +1196,14 @@ __export(index_exports, {
|
|
|
693
1196
|
JsonSerializer: () => JsonSerializer,
|
|
694
1197
|
KafkaDriver: () => KafkaDriver,
|
|
695
1198
|
MemoryDriver: () => MemoryDriver,
|
|
1199
|
+
MySQLPersistence: () => MySQLPersistence,
|
|
696
1200
|
OrbitStream: () => OrbitStream,
|
|
697
1201
|
QueueManager: () => QueueManager,
|
|
1202
|
+
RabbitMQDriver: () => RabbitMQDriver,
|
|
698
1203
|
RedisDriver: () => RedisDriver,
|
|
1204
|
+
SQLitePersistence: () => SQLitePersistence,
|
|
699
1205
|
SQSDriver: () => SQSDriver,
|
|
1206
|
+
Scheduler: () => Scheduler,
|
|
700
1207
|
Worker: () => Worker
|
|
701
1208
|
});
|
|
702
1209
|
module.exports = __toCommonJS(index_exports);
|
|
@@ -711,36 +1218,31 @@ var Worker = class {
|
|
|
711
1218
|
* @param job - Job instance
|
|
712
1219
|
*/
|
|
713
1220
|
async process(job) {
|
|
714
|
-
const maxAttempts = this.options.maxAttempts ?? 3;
|
|
1221
|
+
const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
|
|
715
1222
|
const timeout = this.options.timeout;
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
(
|
|
726
|
-
|
|
727
|
-
timeout * 1e3
|
|
728
|
-
)
|
|
1223
|
+
if (!job.attempts) {
|
|
1224
|
+
job.attempts = 1;
|
|
1225
|
+
}
|
|
1226
|
+
try {
|
|
1227
|
+
if (timeout) {
|
|
1228
|
+
await Promise.race([
|
|
1229
|
+
job.handle(),
|
|
1230
|
+
new Promise(
|
|
1231
|
+
(_, reject) => setTimeout(
|
|
1232
|
+
() => reject(new Error(`Job timeout after ${timeout} seconds`)),
|
|
1233
|
+
timeout * 1e3
|
|
729
1234
|
)
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
throw lastError;
|
|
740
|
-
}
|
|
741
|
-
const delay = Math.min(1e3 * 2 ** (attempt - 1), 3e4);
|
|
742
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
1235
|
+
)
|
|
1236
|
+
]);
|
|
1237
|
+
} else {
|
|
1238
|
+
await job.handle();
|
|
1239
|
+
}
|
|
1240
|
+
} catch (error) {
|
|
1241
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1242
|
+
if (job.attempts >= maxAttempts) {
|
|
1243
|
+
await this.handleFailure(job, err);
|
|
743
1244
|
}
|
|
1245
|
+
throw err;
|
|
744
1246
|
}
|
|
745
1247
|
}
|
|
746
1248
|
/**
|
|
@@ -770,6 +1272,11 @@ var Consumer = class {
|
|
|
770
1272
|
}
|
|
771
1273
|
running = false;
|
|
772
1274
|
stopRequested = false;
|
|
1275
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 8)}`;
|
|
1276
|
+
heartbeatTimer = null;
|
|
1277
|
+
get connectionName() {
|
|
1278
|
+
return this.options.connection ?? this.queueManager.getDefaultConnection();
|
|
1279
|
+
}
|
|
773
1280
|
/**
|
|
774
1281
|
* Start the consumer loop.
|
|
775
1282
|
*/
|
|
@@ -784,18 +1291,72 @@ var Consumer = class {
|
|
|
784
1291
|
const keepAlive = this.options.keepAlive ?? true;
|
|
785
1292
|
console.log("[Consumer] Started", {
|
|
786
1293
|
queues: this.options.queues,
|
|
787
|
-
connection: this.options.connection
|
|
1294
|
+
connection: this.options.connection,
|
|
1295
|
+
workerId: this.workerId
|
|
788
1296
|
});
|
|
1297
|
+
if (this.options.monitor) {
|
|
1298
|
+
this.startHeartbeat();
|
|
1299
|
+
await this.publishLog("info", `Consumer started on [${this.options.queues.join(", ")}]`);
|
|
1300
|
+
}
|
|
789
1301
|
while (this.running && !this.stopRequested) {
|
|
790
1302
|
let processed = false;
|
|
791
1303
|
for (const queue of this.options.queues) {
|
|
1304
|
+
if (this.options.rateLimits?.[queue]) {
|
|
1305
|
+
const limit = this.options.rateLimits[queue];
|
|
1306
|
+
try {
|
|
1307
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1308
|
+
if (driver.checkRateLimit) {
|
|
1309
|
+
const allowed = await driver.checkRateLimit(queue, limit);
|
|
1310
|
+
if (!allowed) {
|
|
1311
|
+
continue;
|
|
1312
|
+
}
|
|
1313
|
+
}
|
|
1314
|
+
} catch (err) {
|
|
1315
|
+
console.error(`[Consumer] Error checking rate limit for "${queue}":`, err);
|
|
1316
|
+
}
|
|
1317
|
+
}
|
|
792
1318
|
try {
|
|
793
1319
|
const job = await this.queueManager.pop(queue, this.options.connection);
|
|
794
1320
|
if (job) {
|
|
795
1321
|
processed = true;
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
}
|
|
1322
|
+
if (this.options.monitor) {
|
|
1323
|
+
await this.publishLog("info", `Processing job: ${job.id}`, job.id);
|
|
1324
|
+
}
|
|
1325
|
+
try {
|
|
1326
|
+
await worker.process(job);
|
|
1327
|
+
if (this.options.monitor) {
|
|
1328
|
+
await this.publishLog("success", `Completed job: ${job.id}`, job.id);
|
|
1329
|
+
}
|
|
1330
|
+
} catch (err) {
|
|
1331
|
+
console.error(`[Consumer] Error processing job in queue "${queue}":`, err);
|
|
1332
|
+
if (this.options.monitor) {
|
|
1333
|
+
await this.publishLog("error", `Job failed: ${job.id} - ${err.message}`, job.id);
|
|
1334
|
+
}
|
|
1335
|
+
const attempts = job.attempts ?? 1;
|
|
1336
|
+
const maxAttempts = job.maxAttempts ?? this.options.workerOptions?.maxAttempts ?? 3;
|
|
1337
|
+
if (attempts < maxAttempts) {
|
|
1338
|
+
job.attempts = attempts + 1;
|
|
1339
|
+
const delayMs = job.getRetryDelay(job.attempts);
|
|
1340
|
+
const delaySec = Math.ceil(delayMs / 1e3);
|
|
1341
|
+
job.delay(delaySec);
|
|
1342
|
+
await this.queueManager.push(job);
|
|
1343
|
+
if (this.options.monitor) {
|
|
1344
|
+
await this.publishLog(
|
|
1345
|
+
"warning",
|
|
1346
|
+
`Job retrying in ${delaySec}s (Attempt ${job.attempts}/${maxAttempts})`,
|
|
1347
|
+
job.id
|
|
1348
|
+
);
|
|
1349
|
+
}
|
|
1350
|
+
} else {
|
|
1351
|
+
await this.queueManager.fail(job, err).catch((dlqErr) => {
|
|
1352
|
+
console.error(`[Consumer] Error moving job to DLQ:`, dlqErr);
|
|
1353
|
+
});
|
|
1354
|
+
}
|
|
1355
|
+
} finally {
|
|
1356
|
+
await this.queueManager.complete(job).catch((err) => {
|
|
1357
|
+
console.error(`[Consumer] Error completing job in queue "${queue}":`, err);
|
|
1358
|
+
});
|
|
1359
|
+
}
|
|
799
1360
|
}
|
|
800
1361
|
} catch (error) {
|
|
801
1362
|
console.error(`[Consumer] Error polling queue "${queue}":`, error);
|
|
@@ -804,13 +1365,83 @@ var Consumer = class {
|
|
|
804
1365
|
if (!processed && !keepAlive) {
|
|
805
1366
|
break;
|
|
806
1367
|
}
|
|
807
|
-
if (!this.stopRequested) {
|
|
1368
|
+
if (!this.stopRequested && !processed) {
|
|
808
1369
|
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
|
1370
|
+
} else if (!this.stopRequested && processed) {
|
|
1371
|
+
await new Promise((resolve) => setTimeout(resolve, 0));
|
|
809
1372
|
}
|
|
810
1373
|
}
|
|
811
1374
|
this.running = false;
|
|
1375
|
+
this.stopHeartbeat();
|
|
1376
|
+
if (this.options.monitor) {
|
|
1377
|
+
await this.publishLog("info", "Consumer stopped");
|
|
1378
|
+
}
|
|
812
1379
|
console.log("[Consumer] Stopped");
|
|
813
1380
|
}
|
|
1381
|
+
startHeartbeat() {
|
|
1382
|
+
const interval = typeof this.options.monitor === "object" ? this.options.monitor.interval ?? 5e3 : 5e3;
|
|
1383
|
+
const monitorOptions = typeof this.options.monitor === "object" ? this.options.monitor : {};
|
|
1384
|
+
this.heartbeatTimer = setInterval(async () => {
|
|
1385
|
+
try {
|
|
1386
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1387
|
+
if (driver.reportHeartbeat) {
|
|
1388
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1389
|
+
const os = require("os");
|
|
1390
|
+
const mem = process.memoryUsage();
|
|
1391
|
+
const metrics = {
|
|
1392
|
+
cpu: os.loadavg()[0],
|
|
1393
|
+
// 1m load avg
|
|
1394
|
+
cores: os.cpus().length,
|
|
1395
|
+
ram: {
|
|
1396
|
+
rss: Math.floor(mem.rss / 1024 / 1024),
|
|
1397
|
+
heapUsed: Math.floor(mem.heapUsed / 1024 / 1024),
|
|
1398
|
+
total: Math.floor(os.totalmem() / 1024 / 1024)
|
|
1399
|
+
}
|
|
1400
|
+
};
|
|
1401
|
+
await driver.reportHeartbeat(
|
|
1402
|
+
{
|
|
1403
|
+
id: this.workerId,
|
|
1404
|
+
status: "online",
|
|
1405
|
+
hostname: os.hostname(),
|
|
1406
|
+
pid: process.pid,
|
|
1407
|
+
uptime: Math.floor(process.uptime()),
|
|
1408
|
+
last_ping: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1409
|
+
queues: this.options.queues,
|
|
1410
|
+
metrics,
|
|
1411
|
+
...monitorOptions.extraInfo || {}
|
|
1412
|
+
},
|
|
1413
|
+
monitorPrefix
|
|
1414
|
+
);
|
|
1415
|
+
}
|
|
1416
|
+
} catch (_e) {
|
|
1417
|
+
}
|
|
1418
|
+
}, interval);
|
|
1419
|
+
}
|
|
1420
|
+
stopHeartbeat() {
|
|
1421
|
+
if (this.heartbeatTimer) {
|
|
1422
|
+
clearInterval(this.heartbeatTimer);
|
|
1423
|
+
this.heartbeatTimer = null;
|
|
1424
|
+
}
|
|
1425
|
+
}
|
|
1426
|
+
async publishLog(level, message, jobId) {
|
|
1427
|
+
try {
|
|
1428
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1429
|
+
if (driver.publishLog) {
|
|
1430
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1431
|
+
await driver.publishLog(
|
|
1432
|
+
{
|
|
1433
|
+
level,
|
|
1434
|
+
message,
|
|
1435
|
+
workerId: this.workerId,
|
|
1436
|
+
jobId,
|
|
1437
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1438
|
+
},
|
|
1439
|
+
monitorPrefix
|
|
1440
|
+
);
|
|
1441
|
+
}
|
|
1442
|
+
} catch (_e) {
|
|
1443
|
+
}
|
|
1444
|
+
}
|
|
814
1445
|
/**
|
|
815
1446
|
* Stop the consumer loop (graceful shutdown).
|
|
816
1447
|
*/
|
|
@@ -901,11 +1532,16 @@ var MemoryDriver = class {
|
|
|
901
1532
|
};
|
|
902
1533
|
|
|
903
1534
|
// src/index.ts
|
|
1535
|
+
init_RabbitMQDriver();
|
|
904
1536
|
init_RedisDriver();
|
|
905
1537
|
init_SQSDriver();
|
|
906
1538
|
|
|
907
1539
|
// src/Job.ts
|
|
908
1540
|
var Job = class {
|
|
1541
|
+
/**
|
|
1542
|
+
* Unique job identifier.
|
|
1543
|
+
*/
|
|
1544
|
+
id;
|
|
909
1545
|
/**
|
|
910
1546
|
* Queue name.
|
|
911
1547
|
*/
|
|
@@ -926,6 +1562,22 @@ var Job = class {
|
|
|
926
1562
|
* Maximum attempts.
|
|
927
1563
|
*/
|
|
928
1564
|
maxAttempts;
|
|
1565
|
+
/**
|
|
1566
|
+
* Group ID for FIFO.
|
|
1567
|
+
*/
|
|
1568
|
+
groupId;
|
|
1569
|
+
/**
|
|
1570
|
+
* Job priority.
|
|
1571
|
+
*/
|
|
1572
|
+
priority;
|
|
1573
|
+
/**
|
|
1574
|
+
* Initial retry delay (seconds).
|
|
1575
|
+
*/
|
|
1576
|
+
retryAfterSeconds;
|
|
1577
|
+
/**
|
|
1578
|
+
* Retry delay multiplier.
|
|
1579
|
+
*/
|
|
1580
|
+
retryMultiplier;
|
|
929
1581
|
/**
|
|
930
1582
|
* Set target queue.
|
|
931
1583
|
*/
|
|
@@ -940,6 +1592,14 @@ var Job = class {
|
|
|
940
1592
|
this.connectionName = connection;
|
|
941
1593
|
return this;
|
|
942
1594
|
}
|
|
1595
|
+
/**
|
|
1596
|
+
* Set job priority.
|
|
1597
|
+
* @param priority - 'high', 'low', or number
|
|
1598
|
+
*/
|
|
1599
|
+
withPriority(priority) {
|
|
1600
|
+
this.priority = priority;
|
|
1601
|
+
return this;
|
|
1602
|
+
}
|
|
943
1603
|
/**
|
|
944
1604
|
* Set delay (seconds).
|
|
945
1605
|
*/
|
|
@@ -947,6 +1607,26 @@ var Job = class {
|
|
|
947
1607
|
this.delaySeconds = delay;
|
|
948
1608
|
return this;
|
|
949
1609
|
}
|
|
1610
|
+
/**
|
|
1611
|
+
* Set retry backoff strategy.
|
|
1612
|
+
* @param seconds - Initial delay in seconds
|
|
1613
|
+
* @param multiplier - Multiplier for each subsequent attempt (default: 2)
|
|
1614
|
+
*/
|
|
1615
|
+
backoff(seconds, multiplier = 2) {
|
|
1616
|
+
this.retryAfterSeconds = seconds;
|
|
1617
|
+
this.retryMultiplier = multiplier;
|
|
1618
|
+
return this;
|
|
1619
|
+
}
|
|
1620
|
+
/**
|
|
1621
|
+
* Calculate retry delay for the next attempt.
|
|
1622
|
+
* @param attempt - Current attempt number (1-based)
|
|
1623
|
+
* @returns Delay in milliseconds
|
|
1624
|
+
*/
|
|
1625
|
+
getRetryDelay(attempt) {
|
|
1626
|
+
const initialDelay = (this.retryAfterSeconds ?? 1) * 1e3;
|
|
1627
|
+
const multiplier = this.retryMultiplier ?? 2;
|
|
1628
|
+
return Math.min(initialDelay * multiplier ** (attempt - 1), 36e5);
|
|
1629
|
+
}
|
|
950
1630
|
/**
|
|
951
1631
|
* Failure handler (optional).
|
|
952
1632
|
*
|
|
@@ -985,7 +1665,7 @@ var ClassNameSerializer = class {
|
|
|
985
1665
|
* Serialize a Job.
|
|
986
1666
|
*/
|
|
987
1667
|
serialize(job) {
|
|
988
|
-
const id = `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
1668
|
+
const id = job.id || `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
989
1669
|
const className = job.constructor.name;
|
|
990
1670
|
const properties = {};
|
|
991
1671
|
for (const key in job) {
|
|
@@ -1004,7 +1684,11 @@ var ClassNameSerializer = class {
|
|
|
1004
1684
|
createdAt: Date.now(),
|
|
1005
1685
|
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1006
1686
|
attempts: job.attempts ?? 0,
|
|
1007
|
-
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {}
|
|
1687
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1688
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1689
|
+
...job.retryAfterSeconds !== void 0 ? { retryAfterSeconds: job.retryAfterSeconds } : {},
|
|
1690
|
+
...job.retryMultiplier !== void 0 ? { retryMultiplier: job.retryMultiplier } : {},
|
|
1691
|
+
...job.priority !== void 0 ? { priority: job.priority } : {}
|
|
1008
1692
|
};
|
|
1009
1693
|
}
|
|
1010
1694
|
/**
|
|
@@ -1028,6 +1712,7 @@ var ClassNameSerializer = class {
|
|
|
1028
1712
|
if (parsed.properties) {
|
|
1029
1713
|
Object.assign(job, parsed.properties);
|
|
1030
1714
|
}
|
|
1715
|
+
job.id = serialized.id;
|
|
1031
1716
|
if (serialized.delaySeconds !== void 0) {
|
|
1032
1717
|
job.delaySeconds = serialized.delaySeconds;
|
|
1033
1718
|
}
|
|
@@ -1037,6 +1722,18 @@ var ClassNameSerializer = class {
|
|
|
1037
1722
|
if (serialized.maxAttempts !== void 0) {
|
|
1038
1723
|
job.maxAttempts = serialized.maxAttempts;
|
|
1039
1724
|
}
|
|
1725
|
+
if (serialized.groupId !== void 0) {
|
|
1726
|
+
job.groupId = serialized.groupId;
|
|
1727
|
+
}
|
|
1728
|
+
if (serialized.retryAfterSeconds !== void 0) {
|
|
1729
|
+
job.retryAfterSeconds = serialized.retryAfterSeconds;
|
|
1730
|
+
}
|
|
1731
|
+
if (serialized.retryMultiplier !== void 0) {
|
|
1732
|
+
job.retryMultiplier = serialized.retryMultiplier;
|
|
1733
|
+
}
|
|
1734
|
+
if (serialized.priority !== void 0) {
|
|
1735
|
+
job.priority = serialized.priority;
|
|
1736
|
+
}
|
|
1040
1737
|
return job;
|
|
1041
1738
|
}
|
|
1042
1739
|
};
|
|
@@ -1058,7 +1755,9 @@ var JsonSerializer = class {
|
|
|
1058
1755
|
createdAt: Date.now(),
|
|
1059
1756
|
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1060
1757
|
attempts: job.attempts ?? 0,
|
|
1061
|
-
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {}
|
|
1758
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1759
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1760
|
+
...job.priority ? { priority: job.priority } : {}
|
|
1062
1761
|
};
|
|
1063
1762
|
}
|
|
1064
1763
|
/**
|
|
@@ -1074,6 +1773,12 @@ var JsonSerializer = class {
|
|
|
1074
1773
|
const parsed = JSON.parse(serialized.data);
|
|
1075
1774
|
const job = /* @__PURE__ */ Object.create({});
|
|
1076
1775
|
Object.assign(job, parsed.properties);
|
|
1776
|
+
if (serialized.groupId) {
|
|
1777
|
+
job.groupId = serialized.groupId;
|
|
1778
|
+
}
|
|
1779
|
+
if (serialized.priority) {
|
|
1780
|
+
job.priority = serialized.priority;
|
|
1781
|
+
}
|
|
1077
1782
|
return job;
|
|
1078
1783
|
}
|
|
1079
1784
|
};
|
|
@@ -1084,7 +1789,11 @@ var QueueManager = class {
|
|
|
1084
1789
|
serializers = /* @__PURE__ */ new Map();
|
|
1085
1790
|
defaultConnection;
|
|
1086
1791
|
defaultSerializer;
|
|
1792
|
+
persistence;
|
|
1793
|
+
scheduler;
|
|
1794
|
+
// Using any to avoid circular dependency or import issues for now
|
|
1087
1795
|
constructor(config = {}) {
|
|
1796
|
+
this.persistence = config.persistence;
|
|
1088
1797
|
this.defaultConnection = config.default ?? "default";
|
|
1089
1798
|
const serializerType = config.defaultSerializer ?? "class";
|
|
1090
1799
|
if (serializerType === "class") {
|
|
@@ -1192,9 +1901,30 @@ var QueueManager = class {
|
|
|
1192
1901
|
);
|
|
1193
1902
|
break;
|
|
1194
1903
|
}
|
|
1904
|
+
case "rabbitmq": {
|
|
1905
|
+
const { RabbitMQDriver: RabbitMQDriver2 } = (init_RabbitMQDriver(), __toCommonJS(RabbitMQDriver_exports));
|
|
1906
|
+
const client = config.client;
|
|
1907
|
+
if (!client) {
|
|
1908
|
+
throw new Error(
|
|
1909
|
+
"[QueueManager] RabbitMQDriver requires client. Please provide RabbitMQ connection/channel in connection config."
|
|
1910
|
+
);
|
|
1911
|
+
}
|
|
1912
|
+
this.drivers.set(
|
|
1913
|
+
name,
|
|
1914
|
+
new RabbitMQDriver2({
|
|
1915
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1916
|
+
client,
|
|
1917
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1918
|
+
exchange: config.exchange,
|
|
1919
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1920
|
+
exchangeType: config.exchangeType
|
|
1921
|
+
})
|
|
1922
|
+
);
|
|
1923
|
+
break;
|
|
1924
|
+
}
|
|
1195
1925
|
default:
|
|
1196
1926
|
throw new Error(
|
|
1197
|
-
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs`
|
|
1927
|
+
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq`
|
|
1198
1928
|
);
|
|
1199
1929
|
}
|
|
1200
1930
|
}
|
|
@@ -1210,6 +1940,13 @@ var QueueManager = class {
|
|
|
1210
1940
|
}
|
|
1211
1941
|
return driver;
|
|
1212
1942
|
}
|
|
1943
|
+
/**
|
|
1944
|
+
* Get the default connection name.
|
|
1945
|
+
* @returns Default connection name
|
|
1946
|
+
*/
|
|
1947
|
+
getDefaultConnection() {
|
|
1948
|
+
return this.defaultConnection;
|
|
1949
|
+
}
|
|
1213
1950
|
/**
|
|
1214
1951
|
* Get a serializer.
|
|
1215
1952
|
* @param type - Serializer type
|
|
@@ -1239,6 +1976,7 @@ var QueueManager = class {
|
|
|
1239
1976
|
*
|
|
1240
1977
|
* @template T - The type of the job.
|
|
1241
1978
|
* @param job - Job instance to push.
|
|
1979
|
+
* @param options - Push options.
|
|
1242
1980
|
* @returns The same job instance (for fluent chaining).
|
|
1243
1981
|
*
|
|
1244
1982
|
* @example
|
|
@@ -1246,13 +1984,22 @@ var QueueManager = class {
|
|
|
1246
1984
|
* await manager.push(new SendEmailJob('user@example.com'));
|
|
1247
1985
|
* ```
|
|
1248
1986
|
*/
|
|
1249
|
-
async push(job) {
|
|
1987
|
+
async push(job, options) {
|
|
1250
1988
|
const connection = job.connectionName ?? this.defaultConnection;
|
|
1251
1989
|
const queue = job.queueName ?? "default";
|
|
1252
1990
|
const driver = this.getDriver(connection);
|
|
1253
1991
|
const serializer = this.getSerializer();
|
|
1254
1992
|
const serialized = serializer.serialize(job);
|
|
1255
|
-
|
|
1993
|
+
const pushOptions = { ...options };
|
|
1994
|
+
if (job.priority) {
|
|
1995
|
+
pushOptions.priority = job.priority;
|
|
1996
|
+
}
|
|
1997
|
+
await driver.push(queue, serialized, pushOptions);
|
|
1998
|
+
if (this.persistence?.archiveEnqueued) {
|
|
1999
|
+
this.persistence.adapter.archive(queue, serialized, "waiting").catch((err) => {
|
|
2000
|
+
console.error("[QueueManager] Persistence archive failed (waiting):", err);
|
|
2001
|
+
});
|
|
2002
|
+
}
|
|
1256
2003
|
return job;
|
|
1257
2004
|
}
|
|
1258
2005
|
/**
|
|
@@ -1345,6 +2092,92 @@ var QueueManager = class {
|
|
|
1345
2092
|
const driver = this.getDriver(connection);
|
|
1346
2093
|
await driver.clear(queue);
|
|
1347
2094
|
}
|
|
2095
|
+
/**
|
|
2096
|
+
* Mark a job as completed.
|
|
2097
|
+
* @param job - Job instance
|
|
2098
|
+
*/
|
|
2099
|
+
async complete(job) {
|
|
2100
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2101
|
+
const queue = job.queueName ?? "default";
|
|
2102
|
+
const driver = this.getDriver(connection);
|
|
2103
|
+
const serializer = this.getSerializer();
|
|
2104
|
+
if (driver.complete) {
|
|
2105
|
+
const serialized = serializer.serialize(job);
|
|
2106
|
+
await driver.complete(queue, serialized);
|
|
2107
|
+
if (this.persistence?.archiveCompleted) {
|
|
2108
|
+
await this.persistence.adapter.archive(queue, serialized, "completed").catch((err) => {
|
|
2109
|
+
console.error("[QueueManager] Persistence archive failed (completed):", err);
|
|
2110
|
+
});
|
|
2111
|
+
}
|
|
2112
|
+
}
|
|
2113
|
+
}
|
|
2114
|
+
/**
|
|
2115
|
+
* Mark a job as permanently failed.
|
|
2116
|
+
* @param job - Job instance
|
|
2117
|
+
* @param error - Error object
|
|
2118
|
+
*/
|
|
2119
|
+
async fail(job, error) {
|
|
2120
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2121
|
+
const queue = job.queueName ?? "default";
|
|
2122
|
+
const driver = this.getDriver(connection);
|
|
2123
|
+
const serializer = this.getSerializer();
|
|
2124
|
+
if (driver.fail) {
|
|
2125
|
+
const serialized = serializer.serialize(job);
|
|
2126
|
+
serialized.error = error.message;
|
|
2127
|
+
serialized.failedAt = Date.now();
|
|
2128
|
+
await driver.fail(queue, serialized);
|
|
2129
|
+
if (this.persistence?.archiveFailed) {
|
|
2130
|
+
await this.persistence.adapter.archive(queue, serialized, "failed").catch((err) => {
|
|
2131
|
+
console.error("[QueueManager] Persistence archive failed (failed):", err);
|
|
2132
|
+
});
|
|
2133
|
+
}
|
|
2134
|
+
}
|
|
2135
|
+
}
|
|
2136
|
+
/**
|
|
2137
|
+
* Get the persistence adapter if configured.
|
|
2138
|
+
*/
|
|
2139
|
+
getPersistence() {
|
|
2140
|
+
return this.persistence?.adapter;
|
|
2141
|
+
}
|
|
2142
|
+
/**
|
|
2143
|
+
* Get the scheduler if configured.
|
|
2144
|
+
*/
|
|
2145
|
+
getScheduler() {
|
|
2146
|
+
if (!this.scheduler) {
|
|
2147
|
+
const { Scheduler: Scheduler2 } = (init_Scheduler(), __toCommonJS(Scheduler_exports));
|
|
2148
|
+
this.scheduler = new Scheduler2(this);
|
|
2149
|
+
}
|
|
2150
|
+
return this.scheduler;
|
|
2151
|
+
}
|
|
2152
|
+
/**
|
|
2153
|
+
* Get failed jobs from DLQ (if driver supports it).
|
|
2154
|
+
*/
|
|
2155
|
+
async getFailed(queue, start = 0, end = -1, connection = this.defaultConnection) {
|
|
2156
|
+
const driver = this.getDriver(connection);
|
|
2157
|
+
if (driver.getFailed) {
|
|
2158
|
+
return driver.getFailed(queue, start, end);
|
|
2159
|
+
}
|
|
2160
|
+
return [];
|
|
2161
|
+
}
|
|
2162
|
+
/**
|
|
2163
|
+
* Retry failed jobs from DLQ (if driver supports it).
|
|
2164
|
+
*/
|
|
2165
|
+
async retryFailed(queue, count = 1, connection = this.defaultConnection) {
|
|
2166
|
+
const driver = this.getDriver(connection);
|
|
2167
|
+
if (driver.retryFailed) {
|
|
2168
|
+
return driver.retryFailed(queue, count);
|
|
2169
|
+
}
|
|
2170
|
+
return 0;
|
|
2171
|
+
}
|
|
2172
|
+
/**
|
|
2173
|
+
* Clear failed jobs from DLQ (if driver supports it).
|
|
2174
|
+
*/
|
|
2175
|
+
async clearFailed(queue, connection = this.defaultConnection) {
|
|
2176
|
+
const driver = this.getDriver(connection);
|
|
2177
|
+
if (driver.clearFailed) {
|
|
2178
|
+
await driver.clearFailed(queue);
|
|
2179
|
+
}
|
|
2180
|
+
}
|
|
1348
2181
|
};
|
|
1349
2182
|
|
|
1350
2183
|
// src/OrbitStream.ts
|
|
@@ -1425,6 +2258,453 @@ var OrbitStream = class _OrbitStream {
|
|
|
1425
2258
|
return this.queueManager;
|
|
1426
2259
|
}
|
|
1427
2260
|
};
|
|
2261
|
+
|
|
2262
|
+
// src/persistence/MySQLPersistence.ts
|
|
2263
|
+
var import_atlas = require("@gravito/atlas");
|
|
2264
|
+
var MySQLPersistence = class {
|
|
2265
|
+
/**
|
|
2266
|
+
* @param db - An Atlas DB instance or compatible QueryBuilder.
|
|
2267
|
+
* @param table - The name of the table to store archived jobs.
|
|
2268
|
+
*/
|
|
2269
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2270
|
+
this.db = db;
|
|
2271
|
+
this.table = table;
|
|
2272
|
+
this.logsTable = logsTable;
|
|
2273
|
+
}
|
|
2274
|
+
/**
|
|
2275
|
+
* Archive a job.
|
|
2276
|
+
*/
|
|
2277
|
+
async archive(queue, job, status) {
|
|
2278
|
+
try {
|
|
2279
|
+
await this.db.table(this.table).insert({
|
|
2280
|
+
job_id: job.id,
|
|
2281
|
+
queue,
|
|
2282
|
+
status,
|
|
2283
|
+
payload: JSON.stringify(job),
|
|
2284
|
+
error: job.error || null,
|
|
2285
|
+
created_at: new Date(job.createdAt),
|
|
2286
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2287
|
+
});
|
|
2288
|
+
} catch (err) {
|
|
2289
|
+
console.error(`[MySQLPersistence] Failed to archive job ${job.id}:`, err);
|
|
2290
|
+
}
|
|
2291
|
+
}
|
|
2292
|
+
/**
|
|
2293
|
+
* Find a specific job in the archive.
|
|
2294
|
+
*/
|
|
2295
|
+
async find(queue, id) {
|
|
2296
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2297
|
+
if (!row) {
|
|
2298
|
+
return null;
|
|
2299
|
+
}
|
|
2300
|
+
try {
|
|
2301
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2302
|
+
return job;
|
|
2303
|
+
} catch (_e) {
|
|
2304
|
+
return null;
|
|
2305
|
+
}
|
|
2306
|
+
}
|
|
2307
|
+
/**
|
|
2308
|
+
* List jobs from the archive.
|
|
2309
|
+
*/
|
|
2310
|
+
async list(queue, options = {}) {
|
|
2311
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2312
|
+
if (options.status) {
|
|
2313
|
+
query = query.where("status", options.status);
|
|
2314
|
+
}
|
|
2315
|
+
if (options.jobId) {
|
|
2316
|
+
query = query.where("job_id", options.jobId);
|
|
2317
|
+
}
|
|
2318
|
+
if (options.startTime) {
|
|
2319
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2320
|
+
}
|
|
2321
|
+
if (options.endTime) {
|
|
2322
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2323
|
+
}
|
|
2324
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2325
|
+
return rows.map((r) => {
|
|
2326
|
+
try {
|
|
2327
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2328
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2329
|
+
} catch (_e) {
|
|
2330
|
+
return null;
|
|
2331
|
+
}
|
|
2332
|
+
}).filter(Boolean);
|
|
2333
|
+
}
|
|
2334
|
+
/**
|
|
2335
|
+
* Search jobs from the archive.
|
|
2336
|
+
*/
|
|
2337
|
+
async search(query, options = {}) {
|
|
2338
|
+
let q = this.db.table(this.table);
|
|
2339
|
+
if (options.queue) {
|
|
2340
|
+
q = q.where("queue", options.queue);
|
|
2341
|
+
}
|
|
2342
|
+
const rows = await q.where((sub) => {
|
|
2343
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2344
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2345
|
+
return rows.map((r) => {
|
|
2346
|
+
try {
|
|
2347
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2348
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2349
|
+
} catch (_e) {
|
|
2350
|
+
return null;
|
|
2351
|
+
}
|
|
2352
|
+
}).filter(Boolean);
|
|
2353
|
+
}
|
|
2354
|
+
/**
|
|
2355
|
+
* Archive a system log message.
|
|
2356
|
+
*/
|
|
2357
|
+
async archiveLog(log) {
|
|
2358
|
+
try {
|
|
2359
|
+
await this.db.table(this.logsTable).insert({
|
|
2360
|
+
level: log.level,
|
|
2361
|
+
message: log.message,
|
|
2362
|
+
worker_id: log.workerId,
|
|
2363
|
+
queue: log.queue || null,
|
|
2364
|
+
timestamp: log.timestamp
|
|
2365
|
+
});
|
|
2366
|
+
} catch (err) {
|
|
2367
|
+
console.error(`[MySQLPersistence] Failed to archive log:`, err.message);
|
|
2368
|
+
}
|
|
2369
|
+
}
|
|
2370
|
+
/**
|
|
2371
|
+
* List system logs from the archive.
|
|
2372
|
+
*/
|
|
2373
|
+
async listLogs(options = {}) {
|
|
2374
|
+
let query = this.db.table(this.logsTable);
|
|
2375
|
+
if (options.level) query = query.where("level", options.level);
|
|
2376
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2377
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2378
|
+
if (options.search) {
|
|
2379
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2380
|
+
}
|
|
2381
|
+
if (options.startTime) {
|
|
2382
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2383
|
+
}
|
|
2384
|
+
if (options.endTime) {
|
|
2385
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2386
|
+
}
|
|
2387
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2388
|
+
}
|
|
2389
|
+
/**
|
|
2390
|
+
* Count system logs in the archive.
|
|
2391
|
+
*/
|
|
2392
|
+
async countLogs(options = {}) {
|
|
2393
|
+
let query = this.db.table(this.logsTable);
|
|
2394
|
+
if (options.level) query = query.where("level", options.level);
|
|
2395
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2396
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2397
|
+
if (options.search) {
|
|
2398
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2399
|
+
}
|
|
2400
|
+
if (options.startTime) {
|
|
2401
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2402
|
+
}
|
|
2403
|
+
if (options.endTime) {
|
|
2404
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2405
|
+
}
|
|
2406
|
+
const result = await query.count("id as total").first();
|
|
2407
|
+
return result?.total || 0;
|
|
2408
|
+
}
|
|
2409
|
+
/**
|
|
2410
|
+
* Remove old records from the archive.
|
|
2411
|
+
*/
|
|
2412
|
+
async cleanup(days) {
|
|
2413
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2414
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2415
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2416
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2417
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2418
|
+
]);
|
|
2419
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2420
|
+
}
|
|
2421
|
+
/**
|
|
2422
|
+
* Count jobs in the archive.
|
|
2423
|
+
*/
|
|
2424
|
+
async count(queue, options = {}) {
|
|
2425
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2426
|
+
if (options.status) {
|
|
2427
|
+
query = query.where("status", options.status);
|
|
2428
|
+
}
|
|
2429
|
+
if (options.jobId) {
|
|
2430
|
+
query = query.where("job_id", options.jobId);
|
|
2431
|
+
}
|
|
2432
|
+
if (options.startTime) {
|
|
2433
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2434
|
+
}
|
|
2435
|
+
if (options.endTime) {
|
|
2436
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2437
|
+
}
|
|
2438
|
+
const result = await query.count("id as total").first();
|
|
2439
|
+
return result?.total || 0;
|
|
2440
|
+
}
|
|
2441
|
+
/**
|
|
2442
|
+
* Help script to create the necessary table.
|
|
2443
|
+
*/
|
|
2444
|
+
async setupTable() {
|
|
2445
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2446
|
+
}
|
|
2447
|
+
async setupJobsTable() {
|
|
2448
|
+
const exists = await import_atlas.Schema.hasTable(this.table);
|
|
2449
|
+
if (exists) return;
|
|
2450
|
+
await import_atlas.Schema.create(this.table, (table) => {
|
|
2451
|
+
table.id();
|
|
2452
|
+
table.string("job_id", 64);
|
|
2453
|
+
table.string("queue", 128);
|
|
2454
|
+
table.string("status", 20);
|
|
2455
|
+
table.json("payload");
|
|
2456
|
+
table.text("error").nullable();
|
|
2457
|
+
table.timestamp("created_at").nullable();
|
|
2458
|
+
table.timestamp("archived_at").default(import_atlas.DB.raw("CURRENT_TIMESTAMP"));
|
|
2459
|
+
table.index(["queue", "archived_at"]);
|
|
2460
|
+
table.index(["queue", "job_id"]);
|
|
2461
|
+
table.index(["status", "archived_at"]);
|
|
2462
|
+
table.index(["archived_at"]);
|
|
2463
|
+
});
|
|
2464
|
+
console.log(`[MySQLPersistence] Created jobs archive table: ${this.table}`);
|
|
2465
|
+
}
|
|
2466
|
+
async setupLogsTable() {
|
|
2467
|
+
const exists = await import_atlas.Schema.hasTable(this.logsTable);
|
|
2468
|
+
if (exists) return;
|
|
2469
|
+
await import_atlas.Schema.create(this.logsTable, (table) => {
|
|
2470
|
+
table.id();
|
|
2471
|
+
table.string("level", 20);
|
|
2472
|
+
table.text("message");
|
|
2473
|
+
table.string("worker_id", 128);
|
|
2474
|
+
table.string("queue", 128).nullable();
|
|
2475
|
+
table.timestamp("timestamp").default(import_atlas.DB.raw("CURRENT_TIMESTAMP"));
|
|
2476
|
+
table.index(["worker_id"]);
|
|
2477
|
+
table.index(["queue"]);
|
|
2478
|
+
table.index(["level"]);
|
|
2479
|
+
table.index(["timestamp"]);
|
|
2480
|
+
});
|
|
2481
|
+
console.log(`[MySQLPersistence] Created logs archive table: ${this.logsTable}`);
|
|
2482
|
+
}
|
|
2483
|
+
};
|
|
2484
|
+
|
|
2485
|
+
// src/persistence/SQLitePersistence.ts
|
|
2486
|
+
var import_atlas2 = require("@gravito/atlas");
|
|
2487
|
+
var SQLitePersistence = class {
|
|
2488
|
+
/**
|
|
2489
|
+
* @param db - An Atlas DB instance (SQLite driver).
|
|
2490
|
+
* @param table - The name of the table to store archived jobs.
|
|
2491
|
+
*/
|
|
2492
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2493
|
+
this.db = db;
|
|
2494
|
+
this.table = table;
|
|
2495
|
+
this.logsTable = logsTable;
|
|
2496
|
+
}
|
|
2497
|
+
/**
|
|
2498
|
+
* Archive a job.
|
|
2499
|
+
*/
|
|
2500
|
+
async archive(queue, job, status) {
|
|
2501
|
+
try {
|
|
2502
|
+
await this.db.table(this.table).insert({
|
|
2503
|
+
job_id: job.id,
|
|
2504
|
+
queue,
|
|
2505
|
+
status,
|
|
2506
|
+
payload: JSON.stringify(job),
|
|
2507
|
+
error: job.error || null,
|
|
2508
|
+
created_at: new Date(job.createdAt),
|
|
2509
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2510
|
+
});
|
|
2511
|
+
} catch (err) {
|
|
2512
|
+
console.error(`[SQLitePersistence] Failed to archive job ${job.id}:`, err.message);
|
|
2513
|
+
}
|
|
2514
|
+
}
|
|
2515
|
+
/**
|
|
2516
|
+
* Find a specific job in the archive.
|
|
2517
|
+
*/
|
|
2518
|
+
async find(queue, id) {
|
|
2519
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2520
|
+
if (!row) {
|
|
2521
|
+
return null;
|
|
2522
|
+
}
|
|
2523
|
+
try {
|
|
2524
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2525
|
+
return job;
|
|
2526
|
+
} catch (_e) {
|
|
2527
|
+
return null;
|
|
2528
|
+
}
|
|
2529
|
+
}
|
|
2530
|
+
/**
|
|
2531
|
+
* List jobs from the archive.
|
|
2532
|
+
*/
|
|
2533
|
+
async list(queue, options = {}) {
|
|
2534
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2535
|
+
if (options.status) {
|
|
2536
|
+
query = query.where("status", options.status);
|
|
2537
|
+
}
|
|
2538
|
+
if (options.jobId) {
|
|
2539
|
+
query = query.where("job_id", options.jobId);
|
|
2540
|
+
}
|
|
2541
|
+
if (options.startTime) {
|
|
2542
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2543
|
+
}
|
|
2544
|
+
if (options.endTime) {
|
|
2545
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2546
|
+
}
|
|
2547
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2548
|
+
return rows.map((r) => {
|
|
2549
|
+
try {
|
|
2550
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2551
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2552
|
+
} catch (_e) {
|
|
2553
|
+
return null;
|
|
2554
|
+
}
|
|
2555
|
+
}).filter(Boolean);
|
|
2556
|
+
}
|
|
2557
|
+
/**
|
|
2558
|
+
* Search jobs from the archive.
|
|
2559
|
+
*/
|
|
2560
|
+
async search(query, options = {}) {
|
|
2561
|
+
let q = this.db.table(this.table);
|
|
2562
|
+
if (options.queue) {
|
|
2563
|
+
q = q.where("queue", options.queue);
|
|
2564
|
+
}
|
|
2565
|
+
const rows = await q.where((sub) => {
|
|
2566
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2567
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2568
|
+
return rows.map((r) => {
|
|
2569
|
+
try {
|
|
2570
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2571
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2572
|
+
} catch (_e) {
|
|
2573
|
+
return null;
|
|
2574
|
+
}
|
|
2575
|
+
}).filter(Boolean);
|
|
2576
|
+
}
|
|
2577
|
+
/**
|
|
2578
|
+
* Archive a system log message.
|
|
2579
|
+
*/
|
|
2580
|
+
async archiveLog(log) {
|
|
2581
|
+
try {
|
|
2582
|
+
await this.db.table(this.logsTable).insert({
|
|
2583
|
+
level: log.level,
|
|
2584
|
+
message: log.message,
|
|
2585
|
+
worker_id: log.workerId,
|
|
2586
|
+
queue: log.queue || null,
|
|
2587
|
+
timestamp: log.timestamp
|
|
2588
|
+
});
|
|
2589
|
+
} catch (err) {
|
|
2590
|
+
console.error(`[SQLitePersistence] Failed to archive log:`, err.message);
|
|
2591
|
+
}
|
|
2592
|
+
}
|
|
2593
|
+
/**
|
|
2594
|
+
* List system logs from the archive.
|
|
2595
|
+
*/
|
|
2596
|
+
async listLogs(options = {}) {
|
|
2597
|
+
let query = this.db.table(this.logsTable);
|
|
2598
|
+
if (options.level) query = query.where("level", options.level);
|
|
2599
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2600
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2601
|
+
if (options.search) {
|
|
2602
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2603
|
+
}
|
|
2604
|
+
if (options.startTime) {
|
|
2605
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2606
|
+
}
|
|
2607
|
+
if (options.endTime) {
|
|
2608
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2609
|
+
}
|
|
2610
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2611
|
+
}
|
|
2612
|
+
/**
|
|
2613
|
+
* Count system logs in the archive.
|
|
2614
|
+
*/
|
|
2615
|
+
async countLogs(options = {}) {
|
|
2616
|
+
let query = this.db.table(this.logsTable);
|
|
2617
|
+
if (options.level) query = query.where("level", options.level);
|
|
2618
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2619
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2620
|
+
if (options.search) {
|
|
2621
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2622
|
+
}
|
|
2623
|
+
if (options.startTime) {
|
|
2624
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2625
|
+
}
|
|
2626
|
+
if (options.endTime) {
|
|
2627
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2628
|
+
}
|
|
2629
|
+
const result = await query.count("id as total").first();
|
|
2630
|
+
return result?.total || 0;
|
|
2631
|
+
}
|
|
2632
|
+
/**
|
|
2633
|
+
* Remove old records from the archive.
|
|
2634
|
+
*/
|
|
2635
|
+
async cleanup(days) {
|
|
2636
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2637
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2638
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2639
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2640
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2641
|
+
]);
|
|
2642
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2643
|
+
}
|
|
2644
|
+
/**
|
|
2645
|
+
* Count jobs in the archive.
|
|
2646
|
+
*/
|
|
2647
|
+
async count(queue, options = {}) {
|
|
2648
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2649
|
+
if (options.status) {
|
|
2650
|
+
query = query.where("status", options.status);
|
|
2651
|
+
}
|
|
2652
|
+
if (options.jobId) {
|
|
2653
|
+
query = query.where("job_id", options.jobId);
|
|
2654
|
+
}
|
|
2655
|
+
if (options.startTime) {
|
|
2656
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2657
|
+
}
|
|
2658
|
+
if (options.endTime) {
|
|
2659
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2660
|
+
}
|
|
2661
|
+
const result = await query.count("id as total").first();
|
|
2662
|
+
return result?.total || 0;
|
|
2663
|
+
}
|
|
2664
|
+
/**
|
|
2665
|
+
* Setup table for SQLite.
|
|
2666
|
+
*/
|
|
2667
|
+
async setupTable() {
|
|
2668
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2669
|
+
}
|
|
2670
|
+
async setupJobsTable() {
|
|
2671
|
+
const exists = await import_atlas2.Schema.hasTable(this.table);
|
|
2672
|
+
if (exists) return;
|
|
2673
|
+
await import_atlas2.Schema.create(this.table, (table) => {
|
|
2674
|
+
table.id();
|
|
2675
|
+
table.string("job_id", 64);
|
|
2676
|
+
table.string("queue", 128);
|
|
2677
|
+
table.string("status", 20);
|
|
2678
|
+
table.text("payload");
|
|
2679
|
+
table.text("error").nullable();
|
|
2680
|
+
table.timestamp("created_at").nullable();
|
|
2681
|
+
table.timestamp("archived_at").nullable();
|
|
2682
|
+
table.index(["queue", "archived_at"]);
|
|
2683
|
+
table.index(["archived_at"]);
|
|
2684
|
+
});
|
|
2685
|
+
console.log(`[SQLitePersistence] Created jobs archive table: ${this.table}`);
|
|
2686
|
+
}
|
|
2687
|
+
async setupLogsTable() {
|
|
2688
|
+
const exists = await import_atlas2.Schema.hasTable(this.logsTable);
|
|
2689
|
+
if (exists) return;
|
|
2690
|
+
await import_atlas2.Schema.create(this.logsTable, (table) => {
|
|
2691
|
+
table.id();
|
|
2692
|
+
table.string("level", 20);
|
|
2693
|
+
table.text("message");
|
|
2694
|
+
table.string("worker_id", 128);
|
|
2695
|
+
table.string("queue", 128).nullable();
|
|
2696
|
+
table.timestamp("timestamp");
|
|
2697
|
+
table.index(["worker_id"]);
|
|
2698
|
+
table.index(["queue"]);
|
|
2699
|
+
table.index(["level"]);
|
|
2700
|
+
table.index(["timestamp"]);
|
|
2701
|
+
});
|
|
2702
|
+
console.log(`[SQLitePersistence] Created logs archive table: ${this.logsTable}`);
|
|
2703
|
+
}
|
|
2704
|
+
};
|
|
2705
|
+
|
|
2706
|
+
// src/index.ts
|
|
2707
|
+
init_Scheduler();
|
|
1428
2708
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1429
2709
|
0 && (module.exports = {
|
|
1430
2710
|
ClassNameSerializer,
|
|
@@ -1434,9 +2714,13 @@ var OrbitStream = class _OrbitStream {
|
|
|
1434
2714
|
JsonSerializer,
|
|
1435
2715
|
KafkaDriver,
|
|
1436
2716
|
MemoryDriver,
|
|
2717
|
+
MySQLPersistence,
|
|
1437
2718
|
OrbitStream,
|
|
1438
2719
|
QueueManager,
|
|
2720
|
+
RabbitMQDriver,
|
|
1439
2721
|
RedisDriver,
|
|
2722
|
+
SQLitePersistence,
|
|
1440
2723
|
SQSDriver,
|
|
2724
|
+
Scheduler,
|
|
1441
2725
|
Worker
|
|
1442
2726
|
});
|