@gravito/stream 2.0.1 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -30,6 +30,310 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
30
30
  ));
31
31
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
32
32
 
33
+ // src/drivers/BullMQDriver.ts
34
+ var BullMQDriver_exports = {};
35
+ __export(BullMQDriver_exports, {
36
+ BullMQDriver: () => BullMQDriver
37
+ });
38
+ var BullMQDriver;
39
+ var init_BullMQDriver = __esm({
40
+ "src/drivers/BullMQDriver.ts"() {
41
+ "use strict";
42
+ BullMQDriver = class {
43
+ queue;
44
+ prefix;
45
+ debug;
46
+ queueMap = /* @__PURE__ */ new Map();
47
+ constructor(config) {
48
+ this.queue = config.queue;
49
+ this.prefix = config.prefix ?? "gravito:";
50
+ this.debug = config.debug ?? false;
51
+ if (!this.queue) {
52
+ throw new Error("[BullMQDriver] Bull Queue instance is required.");
53
+ }
54
+ }
55
+ /**
56
+ * Get or create a queue for the given queue name.
57
+ */
58
+ getQueue(queueName) {
59
+ const fullName = `${this.prefix}${queueName}`;
60
+ if (this.queueMap.has(fullName)) {
61
+ return this.queueMap.get(fullName);
62
+ }
63
+ return this.queue;
64
+ }
65
+ /**
66
+ * Build Job Options from JobPushOptions.
67
+ */
68
+ buildJobOptions(options) {
69
+ const bullOptions = {};
70
+ if (options?.priority) {
71
+ if (options.priority === "high" || options.priority === "critical") {
72
+ bullOptions.priority = 1;
73
+ } else if (options.priority === "low") {
74
+ bullOptions.priority = 10;
75
+ } else if (typeof options.priority === "number") {
76
+ bullOptions.priority = Math.min(Math.max(options.priority, 1), 10);
77
+ } else {
78
+ bullOptions.priority = 5;
79
+ }
80
+ }
81
+ return bullOptions;
82
+ }
83
+ /**
84
+ * Create Bull job data from SerializedJob.
85
+ */
86
+ createBullJobData(job) {
87
+ return {
88
+ id: job.id,
89
+ type: job.type,
90
+ data: job.data,
91
+ className: job.className,
92
+ createdAt: job.createdAt,
93
+ delaySeconds: job.delaySeconds,
94
+ attempts: job.attempts ?? 0,
95
+ maxAttempts: job.maxAttempts ?? 3,
96
+ groupId: job.groupId,
97
+ retryAfterSeconds: job.retryAfterSeconds,
98
+ retryMultiplier: job.retryMultiplier,
99
+ error: job.error,
100
+ failedAt: job.failedAt,
101
+ priority: job.priority
102
+ };
103
+ }
104
+ /**
105
+ * Pushes a job to Bull Queue.
106
+ */
107
+ async push(queue, job, options) {
108
+ try {
109
+ const q = this.getQueue(queue);
110
+ const bullJobData = this.createBullJobData(job);
111
+ const bullOptions = this.buildJobOptions(options);
112
+ if (job.delaySeconds && job.delaySeconds > 0) {
113
+ bullOptions.delay = job.delaySeconds * 1e3;
114
+ }
115
+ bullOptions.attempts = job.maxAttempts ?? 3;
116
+ if (job.retryAfterSeconds) {
117
+ bullOptions.backoff = {
118
+ type: "exponential",
119
+ delay: job.retryAfterSeconds * 1e3
120
+ };
121
+ }
122
+ if (options?.groupId) {
123
+ bullOptions.group = {
124
+ id: options.groupId
125
+ };
126
+ }
127
+ const namespacedJobName = `${queue}:${job.id}`;
128
+ await q.add(namespacedJobName, bullJobData, bullOptions);
129
+ if (this.debug) {
130
+ console.log(`[BullMQDriver] Pushed job ${job.id} to queue ${queue}`);
131
+ }
132
+ } catch (error) {
133
+ console.error(`[BullMQDriver] Failed to push job to queue ${queue}:`, error);
134
+ throw error;
135
+ }
136
+ }
137
+ /**
138
+ * Pops a job from Bull Queue.
139
+ * Note: Bull Queue typically uses Workers, not manual pop.
140
+ * This is a fallback implementation.
141
+ */
142
+ async pop(queue) {
143
+ try {
144
+ this.getQueue(queue);
145
+ return null;
146
+ } catch (error) {
147
+ console.error(`[BullMQDriver] Failed to pop from queue ${queue}:`, error);
148
+ return null;
149
+ }
150
+ }
151
+ /**
152
+ * Returns the size of the queue.
153
+ */
154
+ async size(queue) {
155
+ try {
156
+ const q = this.getQueue(queue);
157
+ const count = await q.count?.();
158
+ return count ?? 0;
159
+ } catch (error) {
160
+ console.error(`[BullMQDriver] Failed to get queue size for ${queue}:`, error);
161
+ return 0;
162
+ }
163
+ }
164
+ /**
165
+ * Clears the queue.
166
+ */
167
+ async clear(queue) {
168
+ try {
169
+ const q = this.getQueue(queue);
170
+ if (typeof q.clean === "function") {
171
+ await q.clean(0);
172
+ }
173
+ } catch (error) {
174
+ console.error(`[BullMQDriver] Failed to clear queue ${queue}:`, error);
175
+ throw error;
176
+ }
177
+ }
178
+ /**
179
+ * Marks a job as failed (moves to failed list).
180
+ */
181
+ async fail(queue, job) {
182
+ try {
183
+ const q = this.getQueue(queue);
184
+ const bullJob = await q.getJob?.(job.id);
185
+ if (bullJob) {
186
+ const error = job.error ?? "Job failed";
187
+ const failureReasonError = new Error(error);
188
+ await bullJob.moveToFailed?.(failureReasonError, true);
189
+ }
190
+ } catch (error) {
191
+ console.error(`[BullMQDriver] Failed to mark job as failed:`, error);
192
+ throw error;
193
+ }
194
+ }
195
+ /**
196
+ * Returns detailed statistics for the queue.
197
+ */
198
+ async stats(queue) {
199
+ try {
200
+ const q = this.getQueue(queue);
201
+ const counts = await q.getJobCounts?.(["active", "completed", "failed", "delayed", "waiting"]);
202
+ const delayed = await q.getDelayedCount?.();
203
+ const failed = await q.getFailedCount?.();
204
+ const active = await q.getActiveCount?.();
205
+ return {
206
+ queue,
207
+ size: counts?.waiting ?? 0,
208
+ delayed: delayed ?? 0,
209
+ failed: failed ?? 0,
210
+ reserved: active ?? 0
211
+ };
212
+ } catch (error) {
213
+ console.error(`[BullMQDriver] Failed to get stats for queue ${queue}:`, error);
214
+ return {
215
+ queue,
216
+ size: 0,
217
+ delayed: 0,
218
+ failed: 0
219
+ };
220
+ }
221
+ }
222
+ /**
223
+ * Retrieves failed jobs from the Dead Letter Queue.
224
+ */
225
+ async getFailed(queue, _start = 0, _end = -1) {
226
+ try {
227
+ this.getQueue(queue);
228
+ return [];
229
+ } catch (error) {
230
+ console.error(`[BullMQDriver] Failed to get failed jobs for queue ${queue}:`, error);
231
+ return [];
232
+ }
233
+ }
234
+ /**
235
+ * Retries failed jobs.
236
+ */
237
+ async retryFailed(queue, _count = 1) {
238
+ try {
239
+ this.getQueue(queue);
240
+ return 0;
241
+ } catch (error) {
242
+ console.error(`[BullMQDriver] Failed to retry jobs for queue ${queue}:`, error);
243
+ return 0;
244
+ }
245
+ }
246
+ /**
247
+ * Clears the Dead Letter Queue.
248
+ */
249
+ async clearFailed(queue) {
250
+ try {
251
+ const q = this.getQueue(queue);
252
+ if (typeof q.clean === "function") {
253
+ await q.clean(0, void 0, "failed");
254
+ }
255
+ } catch (error) {
256
+ console.error(`[BullMQDriver] Failed to clear failed jobs for queue ${queue}:`, error);
257
+ throw error;
258
+ }
259
+ }
260
+ /**
261
+ * Creates a new queue/topic.
262
+ */
263
+ async createTopic(_topic, _options) {
264
+ }
265
+ /**
266
+ * Deletes a queue/topic.
267
+ */
268
+ async deleteTopic(topic) {
269
+ try {
270
+ const q = this.getQueue(topic);
271
+ await q.close?.();
272
+ } catch (error) {
273
+ console.error(`[BullMQDriver] Failed to delete queue ${topic}:`, error);
274
+ throw error;
275
+ }
276
+ }
277
+ /**
278
+ * Pushes multiple jobs in batch.
279
+ */
280
+ async pushMany(queue, jobs) {
281
+ try {
282
+ const q = this.getQueue(queue);
283
+ const bullJobs = jobs.map((job) => {
284
+ const bullJobData = this.createBullJobData(job);
285
+ const namespacedJobName = `${queue}:${job.id}`;
286
+ return {
287
+ name: namespacedJobName,
288
+ data: bullJobData
289
+ };
290
+ });
291
+ for (const bullJob of bullJobs) {
292
+ await q.add(bullJob.name, bullJob.data);
293
+ }
294
+ } catch (error) {
295
+ console.error(`[BullMQDriver] Failed to push multiple jobs to queue ${queue}:`, error);
296
+ throw error;
297
+ }
298
+ }
299
+ /**
300
+ * Pops multiple jobs in batch.
301
+ */
302
+ async popMany(_queue, _count) {
303
+ return [];
304
+ }
305
+ /**
306
+ * Reports worker heartbeat.
307
+ */
308
+ async reportHeartbeat(workerInfo, _prefix) {
309
+ if (this.debug) {
310
+ console.log(`[BullMQDriver] Worker heartbeat from ${workerInfo.id}`);
311
+ }
312
+ }
313
+ /**
314
+ * Publishes a log message.
315
+ */
316
+ async publishLog(logPayload, _prefix) {
317
+ if (this.debug) {
318
+ console.log(`[BullMQDriver] [${logPayload.level}] ${logPayload.message}`);
319
+ }
320
+ }
321
+ /**
322
+ * Checks rate limit for a queue.
323
+ */
324
+ async checkRateLimit(_queue, _config) {
325
+ return true;
326
+ }
327
+ /**
328
+ * Retrieves all queue names.
329
+ */
330
+ async getQueues() {
331
+ return ["default"];
332
+ }
333
+ };
334
+ }
335
+ });
336
+
33
337
  // src/drivers/DatabaseDriver.ts
34
338
  var DatabaseDriver_exports = {};
35
339
  __export(DatabaseDriver_exports, {
@@ -52,7 +356,12 @@ var init_DatabaseDriver = __esm({
52
356
  }
53
357
  }
54
358
  /**
55
- * Push a job to a queue.
359
+ * Pushes a job to the database queue.
360
+ *
361
+ * Inserts a new row into the jobs table.
362
+ *
363
+ * @param queue - The queue name.
364
+ * @param job - The serialized job.
56
365
  */
57
366
  async push(queue, job) {
58
367
  const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
@@ -64,7 +373,13 @@ var init_DatabaseDriver = __esm({
64
373
  );
65
374
  }
66
375
  /**
67
- * Pop a job from the queue (FIFO, with delay support).
376
+ * Pops the next available job from the queue.
377
+ *
378
+ * Uses transactional locking (SELECT ... FOR UPDATE SKIP LOCKED if supported) to ensure
379
+ * atomic reservation of jobs by workers.
380
+ *
381
+ * @param queue - The queue name.
382
+ * @returns The job or `null`.
68
383
  */
69
384
  async pop(queue) {
70
385
  const result = await this.dbService.execute(
@@ -131,7 +446,10 @@ var init_DatabaseDriver = __esm({
131
446
  return job;
132
447
  }
133
448
  /**
134
- * Pop multiple jobs from the queue.
449
+ * Pops multiple jobs from the queue in a single transaction.
450
+ *
451
+ * @param queue - The queue name.
452
+ * @param count - Max jobs to pop.
135
453
  */
136
454
  async popMany(queue, count) {
137
455
  if (count <= 1) {
@@ -190,7 +508,9 @@ var init_DatabaseDriver = __esm({
190
508
  }
191
509
  }
192
510
  /**
193
- * Get queue statistics.
511
+ * Retrieves queue statistics by querying the table.
512
+ *
513
+ * @param queue - The queue name.
194
514
  */
195
515
  async stats(queue) {
196
516
  const failedQueue = `failed:${queue}`;
@@ -224,7 +544,9 @@ var init_DatabaseDriver = __esm({
224
544
  }
225
545
  }
226
546
  /**
227
- * Get queue size.
547
+ * Returns the count of pending jobs.
548
+ *
549
+ * @param queue - The queue name.
228
550
  */
229
551
  async size(queue) {
230
552
  const result = await this.dbService.execute(
@@ -238,14 +560,18 @@ var init_DatabaseDriver = __esm({
238
560
  return result?.[0]?.count ?? 0;
239
561
  }
240
562
  /**
241
- * Clear a queue.
563
+ * Clears the queue by deleting all rows for the queue.
564
+ *
565
+ * @param queue - The queue name.
242
566
  */
243
567
  async clear(queue) {
244
568
  await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE queue = $1`, [queue]);
245
569
  }
246
570
  /**
247
- * Pop a job from the queue (blocking).
248
- * Simple polling fallback for databases.
571
+ * Pops a job using a polling loop (Blocking simulation).
572
+ *
573
+ * @param queue - The queue name.
574
+ * @param timeout - Timeout in seconds.
249
575
  */
250
576
  async popBlocking(queue, timeout) {
251
577
  const start = Date.now();
@@ -258,12 +584,14 @@ var init_DatabaseDriver = __esm({
258
584
  if (timeout > 0 && Date.now() - start >= timeoutMs) {
259
585
  return null;
260
586
  }
261
- await new Promise((resolve) => setTimeout(resolve, 1e3));
587
+ await new Promise((resolve2) => setTimeout(resolve2, 1e3));
262
588
  }
263
589
  }
264
590
  /**
265
- * Push multiple jobs.
266
- * Optimizes by using a single multi-row insert if possible.
591
+ * Pushes multiple jobs using a transaction.
592
+ *
593
+ * @param queue - The queue name.
594
+ * @param jobs - Array of jobs.
267
595
  */
268
596
  async pushMany(queue, jobs) {
269
597
  if (jobs.length === 0) {
@@ -285,7 +613,10 @@ var init_DatabaseDriver = __esm({
285
613
  });
286
614
  }
287
615
  /**
288
- * Mark a job as failed (DLQ).
616
+ * Marks a job as permanently failed by moving it to the DLQ (separate logical queue in DB).
617
+ *
618
+ * @param queue - The queue name.
619
+ * @param job - The failed job.
289
620
  */
290
621
  async fail(queue, job) {
291
622
  const failedQueue = `failed:${queue}`;
@@ -297,7 +628,10 @@ var init_DatabaseDriver = __esm({
297
628
  );
298
629
  }
299
630
  /**
300
- * Acknowledge/Complete a job.
631
+ * Deletes a job row from the database (completion).
632
+ *
633
+ * @param _queue - The queue name (unused).
634
+ * @param job - The job to complete.
301
635
  */
302
636
  async complete(_queue, job) {
303
637
  if (!job.id) {
@@ -351,7 +685,10 @@ var init_KafkaDriver = __esm({
351
685
  return this.admin;
352
686
  }
353
687
  /**
354
- * Push a job to a topic.
688
+ * Pushes a job to a Kafka topic.
689
+ *
690
+ * @param queue - The topic name.
691
+ * @param job - The job to publish.
355
692
  */
356
693
  async push(queue, job) {
357
694
  const producer = await this.ensureProducer();
@@ -376,30 +713,37 @@ var init_KafkaDriver = __esm({
376
713
  });
377
714
  }
378
715
  /**
379
- * Pop is not supported for Kafka.
716
+ * Pop is not supported for Kafka (Push-based).
717
+ *
718
+ * Kafka consumers typically stream messages. Use `subscribe()` instead.
380
719
  *
381
- * Note: Kafka uses a push-based model, so you should use `subscribe()`.
720
+ * @throws {Error} Always throws as Kafka does not support polling individual messages in this manner.
382
721
  */
383
722
  async pop(_queue) {
384
723
  throw new Error("[KafkaDriver] Kafka uses push-based model. Use subscribe() instead of pop().");
385
724
  }
386
725
  /**
387
- * Kafka does not provide a direct queue size.
726
+ * Returns 0 as Kafka does not expose a simple "queue size".
388
727
  *
389
- * Returns 0; use Kafka tooling/metrics for lag/size insights.
728
+ * Monitoring lag requires external tools or Admin API checks not implemented here.
390
729
  */
391
730
  async size(_queue) {
392
731
  return 0;
393
732
  }
394
733
  /**
395
- * Clear a queue by deleting the topic.
734
+ * Clears a queue by deleting the topic.
735
+ *
736
+ * @param queue - The topic name.
396
737
  */
397
738
  async clear(queue) {
398
739
  const admin = await this.ensureAdmin();
399
740
  await admin.deleteTopics({ topics: [queue] });
400
741
  }
401
742
  /**
402
- * Push multiple jobs.
743
+ * Pushes multiple jobs to a Kafka topic.
744
+ *
745
+ * @param queue - The topic name.
746
+ * @param jobs - Array of jobs.
403
747
  */
404
748
  async pushMany(queue, jobs) {
405
749
  if (jobs.length === 0) {
@@ -428,7 +772,10 @@ var init_KafkaDriver = __esm({
428
772
  });
429
773
  }
430
774
  /**
431
- * Create a topic.
775
+ * Creates a new Kafka topic.
776
+ *
777
+ * @param topic - The topic name.
778
+ * @param options - Config for partitions/replication.
432
779
  */
433
780
  async createTopic(topic, options) {
434
781
  const admin = await this.ensureAdmin();
@@ -443,13 +790,20 @@ var init_KafkaDriver = __esm({
443
790
  });
444
791
  }
445
792
  /**
446
- * Delete a topic.
793
+ * Deletes a Kafka topic.
794
+ *
795
+ * @param topic - The topic name.
447
796
  */
448
797
  async deleteTopic(topic) {
449
798
  await this.clear(topic);
450
799
  }
451
800
  /**
452
- * Subscribe to a topic (push-based model).
801
+ * Subscribes to a topic for streaming jobs.
802
+ *
803
+ * Starts a Kafka consumer group and processes messages as they arrive.
804
+ *
805
+ * @param queue - The topic name.
806
+ * @param callback - Function to handle the job.
453
807
  */
454
808
  async subscribe(queue, callback) {
455
809
  const consumer = this.client.consumer({ groupId: this.consumerGroupId });
@@ -535,7 +889,10 @@ var init_RabbitMQDriver = __esm({
535
889
  return this.connection;
536
890
  }
537
891
  /**
538
- * Push a job (sendToQueue / publish).
892
+ * Pushes a job to a RabbitMQ queue or exchange.
893
+ *
894
+ * @param queue - The queue name.
895
+ * @param job - The serialized job.
539
896
  */
540
897
  async push(queue, job) {
541
898
  const channel = await this.ensureChannel();
@@ -550,7 +907,9 @@ var init_RabbitMQDriver = __esm({
550
907
  }
551
908
  }
552
909
  /**
553
- * Pop a job (get).
910
+ * Pops a job from the queue.
911
+ *
912
+ * @param queue - The queue name.
554
913
  */
555
914
  async pop(queue) {
556
915
  const channel = await this.ensureChannel();
@@ -564,8 +923,10 @@ var init_RabbitMQDriver = __esm({
564
923
  return job;
565
924
  }
566
925
  /**
567
- * Pop multiple jobs.
568
- * Uses channel.get() in a loop (no native batch get in AMQP).
926
+ * Pops multiple jobs.
927
+ *
928
+ * @param queue - The queue name.
929
+ * @param count - Max jobs.
569
930
  */
570
931
  async popMany(queue, count) {
571
932
  const channel = await this.ensureChannel();
@@ -583,7 +944,9 @@ var init_RabbitMQDriver = __esm({
583
944
  return results;
584
945
  }
585
946
  /**
586
- * Acknowledge a message.
947
+ * Acknowledges a message.
948
+ *
949
+ * @param messageId - The message object (RabbitMQ requires object reference).
587
950
  */
588
951
  async acknowledge(messageId) {
589
952
  const channel = await this.ensureChannel();
@@ -606,7 +969,7 @@ var init_RabbitMQDriver = __esm({
606
969
  channel.reject(message, requeue);
607
970
  }
608
971
  /**
609
- * Subscribe to a queue.
972
+ * Subscribes to a queue.
610
973
  */
611
974
  async subscribe(queue, callback, options = {}) {
612
975
  const channel = await this.ensureChannel();
@@ -635,7 +998,9 @@ var init_RabbitMQDriver = __esm({
635
998
  );
636
999
  }
637
1000
  /**
638
- * Get queue size.
1001
+ * Returns the number of messages in the queue.
1002
+ *
1003
+ * @param queue - The queue name.
639
1004
  */
640
1005
  async size(queue) {
641
1006
  const channel = await this.ensureChannel();
@@ -643,7 +1008,9 @@ var init_RabbitMQDriver = __esm({
643
1008
  return ok.messageCount;
644
1009
  }
645
1010
  /**
646
- * Clear a queue.
1011
+ * Purges the queue.
1012
+ *
1013
+ * @param queue - The queue name.
647
1014
  */
648
1015
  async clear(queue) {
649
1016
  const channel = await this.ensureChannel();
@@ -691,7 +1058,7 @@ var init_RedisDriver = __esm({
691
1058
  local activeSet = KEYS[2]
692
1059
  local pendingList = KEYS[3]
693
1060
  local groupId = ARGV[1]
694
-
1061
+
695
1062
  local nextJob = redis.call('LPOP', pendingList)
696
1063
  if nextJob then
697
1064
  return redis.call('LPUSH', waitList, nextJob)
@@ -789,7 +1156,13 @@ var init_RedisDriver = __esm({
789
1156
  return `${this.prefix}${queue}`;
790
1157
  }
791
1158
  /**
792
- * Push a job (LPUSH).
1159
+ * Pushes a job to Redis.
1160
+ *
1161
+ * Handles regular jobs (LPUSH), delayed jobs (ZADD), and grouped jobs (custom Lua logic).
1162
+ *
1163
+ * @param queue - The queue name.
1164
+ * @param job - The serialized job.
1165
+ * @param options - Push options.
793
1166
  */
794
1167
  async push(queue, job, options) {
795
1168
  const key = this.getKey(queue, options?.priority);
@@ -810,6 +1183,9 @@ var init_RedisDriver = __esm({
810
1183
  failedAt: job.failedAt
811
1184
  };
812
1185
  const payload = JSON.stringify(payloadObj);
1186
+ if (typeof this.client.sadd === "function") {
1187
+ await this.client.sadd(`${this.prefix}queues`, queue);
1188
+ }
813
1189
  if (groupId && typeof this.client.pushGroupJob === "function") {
814
1190
  const activeSetKey = `${this.prefix}active`;
815
1191
  const pendingListKey = `${this.prefix}pending:${groupId}`;
@@ -829,7 +1205,12 @@ var init_RedisDriver = __esm({
829
1205
  }
830
1206
  }
831
1207
  /**
832
- * Complete a job (handle Group FIFO).
1208
+ * Completes a job.
1209
+ *
1210
+ * Crucial for Group FIFO logic to unlock the next job in the group.
1211
+ *
1212
+ * @param queue - The queue name.
1213
+ * @param job - The job to complete.
833
1214
  */
834
1215
  async complete(queue, job) {
835
1216
  if (!job.groupId) {
@@ -843,8 +1224,13 @@ var init_RedisDriver = __esm({
843
1224
  }
844
1225
  }
845
1226
  /**
846
- * Pop a job from a queue (non-blocking).
847
- * Optimized with Lua script for atomic priority polling.
1227
+ * Pops a job from the queue.
1228
+ *
1229
+ * Checks priorities in order (critical -> high -> default -> low).
1230
+ * Also checks for due delayed jobs and moves them to the active list.
1231
+ *
1232
+ * @param queue - The queue name.
1233
+ * @returns The job or `null`.
848
1234
  */
849
1235
  async pop(queue) {
850
1236
  const priorities = ["critical", "high", "default", "low"];
@@ -916,8 +1302,12 @@ var init_RedisDriver = __esm({
916
1302
  return null;
917
1303
  }
918
1304
  /**
919
- * Pop a job from the queue (blocking).
920
- * Uses BRPOP for efficiency. Supports multiple queues and priorities.
1305
+ * Pops a job using blocking Redis commands (BRPOP).
1306
+ *
1307
+ * Efficiently waits for a job to arrive without polling.
1308
+ *
1309
+ * @param queues - The queues to listen to.
1310
+ * @param timeout - Timeout in seconds.
921
1311
  */
922
1312
  async popBlocking(queues, timeout) {
923
1313
  const queueList = Array.isArray(queues) ? queues : [queues];
@@ -961,14 +1351,19 @@ var init_RedisDriver = __esm({
961
1351
  };
962
1352
  }
963
1353
  /**
964
- * Get queue size.
1354
+ * Returns the length of the queue (Redis List length).
1355
+ *
1356
+ * @param queue - The queue name.
965
1357
  */
966
1358
  async size(queue) {
967
1359
  const key = this.getKey(queue);
968
1360
  return this.client.llen(key);
969
1361
  }
970
1362
  /**
971
- * Mark a job as permanently failed (DLQ).
1363
+ * Marks a job as permanently failed by moving it to a DLQ list.
1364
+ *
1365
+ * @param queue - The queue name.
1366
+ * @param job - The failed job.
972
1367
  */
973
1368
  async fail(queue, job) {
974
1369
  const key = `${this.getKey(queue)}:failed`;
@@ -982,7 +1377,9 @@ var init_RedisDriver = __esm({
982
1377
  }
983
1378
  }
984
1379
  /**
985
- * Clear a queue.
1380
+ * Clears the queue and its associated delayed/active sets.
1381
+ *
1382
+ * @param queue - The queue name.
986
1383
  */
987
1384
  async clear(queue) {
988
1385
  const key = this.getKey(queue);
@@ -995,8 +1392,11 @@ var init_RedisDriver = __esm({
995
1392
  }
996
1393
  }
997
1394
  /**
998
- * Get queue statistics.
999
- * Optimized with Redis Pipeline to fetch all priorities and DLQ stats in one trip.
1395
+ * Retrieves full stats for the queue using Redis Pipelining.
1396
+ *
1397
+ * Aggregates counts from all priority lists and the DLQ.
1398
+ *
1399
+ * @param queue - The queue name.
1000
1400
  */
1001
1401
  async stats(queue) {
1002
1402
  const priorities = ["critical", "high", "default", "low"];
@@ -1041,7 +1441,12 @@ var init_RedisDriver = __esm({
1041
1441
  return stats;
1042
1442
  }
1043
1443
  /**
1044
- * Push multiple jobs.
1444
+ * Pushes multiple jobs to the queue.
1445
+ *
1446
+ * Uses pipeline for batch efficiency. Falls back to individual pushes if complex logic (groups/priority) is involved.
1447
+ *
1448
+ * @param queue - The queue name.
1449
+ * @param jobs - Array of jobs.
1045
1450
  */
1046
1451
  async pushMany(queue, jobs) {
1047
1452
  if (jobs.length === 0) {
@@ -1113,8 +1518,12 @@ var init_RedisDriver = __esm({
1113
1518
  await this.client.lpush(key, ...payloads);
1114
1519
  }
1115
1520
  /**
1116
- * Pop multiple jobs.
1117
- * Atomic operation across multiple priority levels.
1521
+ * Pops multiple jobs from the queue.
1522
+ *
1523
+ * Uses a Lua script for atomic retrieval across priorities.
1524
+ *
1525
+ * @param queue - The queue name.
1526
+ * @param count - Max jobs to pop.
1118
1527
  */
1119
1528
  async popMany(queue, count) {
1120
1529
  if (count <= 0) {
@@ -1192,7 +1601,9 @@ var init_RedisDriver = __esm({
1192
1601
  return results;
1193
1602
  }
1194
1603
  /**
1195
- * Report worker heartbeat for monitoring.
1604
+ * Reports a worker heartbeat.
1605
+ *
1606
+ * Stores worker metadata in a key with an expiration (TTL).
1196
1607
  */
1197
1608
  async reportHeartbeat(workerInfo, prefix) {
1198
1609
  const key = `${prefix ?? this.prefix}worker:${workerInfo.id}`;
@@ -1201,7 +1612,9 @@ var init_RedisDriver = __esm({
1201
1612
  }
1202
1613
  }
1203
1614
  /**
1204
- * Publish a log message for monitoring.
1615
+ * Publishes monitoring logs.
1616
+ *
1617
+ * Uses Redis Pub/Sub for real-time logs and a capped List for history.
1205
1618
  */
1206
1619
  async publishLog(logPayload, prefix) {
1207
1620
  const payload = JSON.stringify(logPayload);
@@ -1220,8 +1633,12 @@ var init_RedisDriver = __esm({
1220
1633
  }
1221
1634
  }
1222
1635
  /**
1223
- * Check if a queue is rate limited.
1224
- * Uses a fixed window counter.
1636
+ * Checks the rate limit for a queue.
1637
+ *
1638
+ * Uses a simple Fixed Window counter (INCR + EXPIRE).
1639
+ *
1640
+ * @param queue - The queue name.
1641
+ * @param config - Rate limit rules.
1225
1642
  */
1226
1643
  async checkRateLimit(queue, config) {
1227
1644
  const key = `${this.prefix}${queue}:ratelimit`;
@@ -1239,7 +1656,11 @@ var init_RedisDriver = __esm({
1239
1656
  return true;
1240
1657
  }
1241
1658
  /**
1242
- * Get failed jobs from DLQ.
1659
+ * Retrieves failed jobs from the DLQ.
1660
+ *
1661
+ * @param queue - The queue name.
1662
+ * @param start - Start index.
1663
+ * @param end - End index.
1243
1664
  */
1244
1665
  async getFailed(queue, start = 0, end = -1) {
1245
1666
  const key = `${this.getKey(queue)}:failed`;
@@ -1250,8 +1671,12 @@ var init_RedisDriver = __esm({
1250
1671
  return payloads.map((p) => this.parsePayload(p));
1251
1672
  }
1252
1673
  /**
1253
- * Retry failed jobs from DLQ.
1254
- * Moves jobs from failed list back to the main queue.
1674
+ * Retries failed jobs.
1675
+ *
1676
+ * Pops from DLQ and pushes back to the active queue (RPOPLPUSH equivalent logic).
1677
+ *
1678
+ * @param queue - The queue name.
1679
+ * @param count - Jobs to retry.
1255
1680
  */
1256
1681
  async retryFailed(queue, count = 1) {
1257
1682
  const failedKey = `${this.getKey(queue)}:failed`;
@@ -1268,18 +1693,31 @@ var init_RedisDriver = __esm({
1268
1693
  job.attempts = 0;
1269
1694
  delete job.error;
1270
1695
  delete job.failedAt;
1696
+ delete job.priority;
1271
1697
  await this.push(queue, job, { priority: job.priority, groupId: job.groupId });
1272
1698
  retried++;
1273
1699
  }
1274
1700
  return retried;
1275
1701
  }
1276
1702
  /**
1277
- * Clear failed jobs from DLQ.
1703
+ * Clears the Dead Letter Queue.
1704
+ *
1705
+ * @param queue - The queue name.
1278
1706
  */
1279
1707
  async clearFailed(queue) {
1280
1708
  const key = `${this.getKey(queue)}:failed`;
1281
1709
  await this.client.del(key);
1282
1710
  }
1711
+ /**
1712
+ * Retrieves all discovered queue names from Redis.
1713
+ */
1714
+ async getQueues() {
1715
+ if (typeof this.client.smembers === "function") {
1716
+ const queues = await this.client.smembers(`${this.prefix}queues`);
1717
+ return Array.isArray(queues) ? queues.sort() : [];
1718
+ }
1719
+ return ["default"];
1720
+ }
1283
1721
  };
1284
1722
  }
1285
1723
  });
@@ -1326,7 +1764,10 @@ var init_SQSDriver = __esm({
1326
1764
  return queue;
1327
1765
  }
1328
1766
  /**
1329
- * Push a job to SQS.
1767
+ * Pushes a job to SQS.
1768
+ *
1769
+ * @param queue - The queue name (or URL).
1770
+ * @param job - The serialized job.
1330
1771
  */
1331
1772
  async push(queue, job) {
1332
1773
  const { SendMessageCommand } = await import("@aws-sdk/client-sqs");
@@ -1351,7 +1792,9 @@ var init_SQSDriver = __esm({
1351
1792
  );
1352
1793
  }
1353
1794
  /**
1354
- * Pop a job (long polling).
1795
+ * Pops a job from SQS (using long polling).
1796
+ *
1797
+ * @param queue - The queue name (or URL).
1355
1798
  */
1356
1799
  async pop(queue) {
1357
1800
  const { ReceiveMessageCommand } = await import("@aws-sdk/client-sqs");
@@ -1383,8 +1826,10 @@ var init_SQSDriver = __esm({
1383
1826
  };
1384
1827
  }
1385
1828
  /**
1386
- * Pop multiple jobs.
1387
- * Leverages SQS MaxNumberOfMessages (up to 10).
1829
+ * Pops multiple jobs (up to 10).
1830
+ *
1831
+ * @param queue - The queue name.
1832
+ * @param count - Max jobs (capped at 10 by SQS).
1388
1833
  */
1389
1834
  async popMany(queue, count) {
1390
1835
  const { ReceiveMessageCommand } = await import("@aws-sdk/client-sqs");
@@ -1417,7 +1862,9 @@ var init_SQSDriver = __esm({
1417
1862
  });
1418
1863
  }
1419
1864
  /**
1420
- * Get queue size (approximate).
1865
+ * Returns the approximate number of messages in the queue.
1866
+ *
1867
+ * @param queue - The queue name.
1421
1868
  */
1422
1869
  async size(queue) {
1423
1870
  const { GetQueueAttributesCommand } = await import("@aws-sdk/client-sqs");
@@ -1436,10 +1883,12 @@ var init_SQSDriver = __esm({
1436
1883
  }
1437
1884
  }
1438
1885
  /**
1439
- * Clear a queue by receiving and deleting messages.
1886
+ * Clears the queue by continuously receiving and deleting messages.
1440
1887
  *
1441
- * Note: SQS does not provide a direct "purge" API via this wrapper. This method will
1442
- * keep receiving and deleting messages until the queue is empty.
1888
+ * SQS does not have a "purge" command in the client data plane easily accessible here,
1889
+ * so we drain the queue.
1890
+ *
1891
+ * @param queue - The queue name.
1443
1892
  */
1444
1893
  async clear(queue) {
1445
1894
  const { DeleteMessageCommand } = await import("@aws-sdk/client-sqs");
@@ -1460,7 +1909,10 @@ var init_SQSDriver = __esm({
1460
1909
  }
1461
1910
  }
1462
1911
  /**
1463
- * Push multiple jobs.
1912
+ * Pushes multiple jobs using SQS batch API.
1913
+ *
1914
+ * @param queue - The queue name.
1915
+ * @param jobs - Array of jobs.
1464
1916
  */
1465
1917
  async pushMany(queue, jobs) {
1466
1918
  if (jobs.length === 0) {
@@ -1497,13 +1949,16 @@ var init_SQSDriver = __esm({
1497
1949
  }
1498
1950
  }
1499
1951
  /**
1500
- * Acknowledge is not supported via messageId.
1952
+ * Throws error as SQS requires ReceiptHandle, not just MessageId.
1501
1953
  */
1502
1954
  async acknowledge(_messageId) {
1503
1955
  throw new Error("[SQSDriver] Use deleteMessage() with ReceiptHandle instead of acknowledge().");
1504
1956
  }
1505
1957
  /**
1506
- * Delete a message (acknowledge processing completion).
1958
+ * Deletes a message using its ReceiptHandle (ACK).
1959
+ *
1960
+ * @param queue - The queue name.
1961
+ * @param receiptHandle - The SQS receipt handle.
1507
1962
  */
1508
1963
  async deleteMessage(queue, receiptHandle) {
1509
1964
  const { DeleteMessageCommand } = await import("@aws-sdk/client-sqs");
@@ -1519,42 +1974,254 @@ var init_SQSDriver = __esm({
1519
1974
  }
1520
1975
  });
1521
1976
 
1522
- // src/persistence/BufferedPersistence.ts
1523
- var BufferedPersistence_exports = {};
1524
- __export(BufferedPersistence_exports, {
1525
- BufferedPersistence: () => BufferedPersistence
1526
- });
1527
- var BufferedPersistence;
1528
- var init_BufferedPersistence = __esm({
1529
- "src/persistence/BufferedPersistence.ts"() {
1977
+ // src/locks/DistributedLock.ts
1978
+ var DistributedLock;
1979
+ var init_DistributedLock = __esm({
1980
+ "src/locks/DistributedLock.ts"() {
1530
1981
  "use strict";
1531
- BufferedPersistence = class {
1532
- constructor(adapter, options = {}) {
1533
- this.adapter = adapter;
1534
- this.maxBufferSize = options.maxBufferSize ?? 50;
1535
- this.flushInterval = options.flushInterval ?? 5e3;
1536
- }
1537
- jobBuffer = [];
1538
- logBuffer = [];
1539
- flushTimer = null;
1540
- maxBufferSize;
1541
- flushInterval;
1542
- async archive(queue, job, status) {
1543
- this.jobBuffer.push({ queue, job, status });
1544
- if (this.jobBuffer.length >= this.maxBufferSize) {
1545
- this.flush().catch((err) => {
1546
- console.error("[BufferedPersistence] Auto-flush failed (jobs):", err.message || err);
1547
- });
1548
- } else {
1549
- this.ensureFlushTimer();
1550
- }
1551
- }
1552
- async find(queue, id) {
1553
- return this.adapter.find(queue, id);
1982
+ DistributedLock = class {
1983
+ /**
1984
+ * Creates a DistributedLock instance.
1985
+ *
1986
+ * @param client - Redis client instance. Must support SET, DEL, and EVAL commands.
1987
+ */
1988
+ constructor(client) {
1989
+ this.client = client;
1554
1990
  }
1555
- async list(queue, options) {
1991
+ /**
1992
+ * Unique identifier for this lock instance.
1993
+ * Used to ensure only the owner can release the lock.
1994
+ */
1995
+ lockId = crypto.randomUUID();
1996
+ /**
1997
+ * Timer for automatic renewal.
1998
+ */
1999
+ refreshTimer = null;
2000
+ /**
2001
+ * The key of the currently held lock.
2002
+ */
2003
+ currentLockKey = null;
2004
+ /**
2005
+ * Attempts to acquire a distributed lock for the specified key.
2006
+ *
2007
+ * Uses Redis `SET key value EX ttl NX` for atomic acquisition.
2008
+ * If the lock is held by another node, it retries according to `retryCount`.
2009
+ * Upon success, if `refreshInterval` is set, automatic renewal starts.
2010
+ *
2011
+ * @param key - The lock key. Use a meaningful resource identifier.
2012
+ * @param options - Configuration options for the lock.
2013
+ * @returns `true` if the lock was acquired, `false` otherwise.
2014
+ *
2015
+ * @throws {Error} If the Redis client does not support the SET command.
2016
+ *
2017
+ * @example
2018
+ * ```typescript
2019
+ * const acquired = await lock.acquire('schedule:job-123', {
2020
+ * ttl: 30000,
2021
+ * retryCount: 5,
2022
+ * retryDelay: 200
2023
+ * });
2024
+ *
2025
+ * if (!acquired) {
2026
+ * console.log('Resource is currently locked by another node');
2027
+ * }
2028
+ * ```
2029
+ */
2030
+ async acquire(key, options) {
2031
+ if (typeof this.client.set !== "function") {
2032
+ throw new Error("[DistributedLock] Redis client does not support SET command");
2033
+ }
2034
+ const ttlSeconds = Math.ceil(options.ttl / 1e3);
2035
+ let attempts = 0;
2036
+ while (attempts <= options.retryCount) {
2037
+ try {
2038
+ const result = await this.client.set(key, this.lockId, "EX", ttlSeconds, "NX");
2039
+ if (result === "OK") {
2040
+ this.currentLockKey = key;
2041
+ if (options.refreshInterval) {
2042
+ this.startRefresh(key, options);
2043
+ }
2044
+ return true;
2045
+ }
2046
+ } catch (error) {
2047
+ const err = error instanceof Error ? error : new Error(String(error));
2048
+ console.error(`[DistributedLock] Failed to acquire lock for ${key}:`, err.message);
2049
+ }
2050
+ attempts++;
2051
+ if (attempts <= options.retryCount) {
2052
+ await this.sleep(options.retryDelay);
2053
+ }
2054
+ }
2055
+ return false;
2056
+ }
2057
+ /**
2058
+ * Releases the lock for the specified key.
2059
+ *
2060
+ * Uses a Lua script to ensure atomicity: the lock is deleted ONLY if the value matches
2061
+ * this instance's `lockId`. This prevents deleting locks held by others.
2062
+ * Stops the auto-renewal timer upon success.
2063
+ *
2064
+ * @param key - The lock key to release.
2065
+ *
2066
+ * @throws {Error} If the Redis client does not support the EVAL command.
2067
+ *
2068
+ * @example
2069
+ * ```typescript
2070
+ * await lock.release('schedule:job-123');
2071
+ * ```
2072
+ */
2073
+ async release(key) {
2074
+ this.stopRefresh();
2075
+ if (typeof this.client.eval !== "function") {
2076
+ throw new Error("[DistributedLock] Redis client does not support EVAL command");
2077
+ }
2078
+ try {
2079
+ const script = `
2080
+ if redis.call("get", KEYS[1]) == ARGV[1] then
2081
+ return redis.call("del", KEYS[1])
2082
+ else
2083
+ return 0
2084
+ end
2085
+ `;
2086
+ await this.client.eval(script, 1, key, this.lockId);
2087
+ this.currentLockKey = null;
2088
+ } catch (error) {
2089
+ const err = error instanceof Error ? error : new Error(String(error));
2090
+ console.error(`[DistributedLock] Failed to release lock for ${key}:`, err.message);
2091
+ }
2092
+ }
2093
+ /**
2094
+ * Starts the automatic renewal mechanism.
2095
+ *
2096
+ * Periodically extends the lock's TTL to prevent expiration during long-running tasks.
2097
+ * Uses a Lua script to ensure only owned locks are renewed.
2098
+ *
2099
+ * @param key - The lock key.
2100
+ * @param options - Lock options containing `refreshInterval`.
2101
+ */
2102
+ startRefresh(key, options) {
2103
+ if (!options.refreshInterval) {
2104
+ return;
2105
+ }
2106
+ this.stopRefresh();
2107
+ const ttlSeconds = Math.ceil(options.ttl / 1e3);
2108
+ this.refreshTimer = setInterval(async () => {
2109
+ try {
2110
+ if (typeof this.client.eval !== "function") {
2111
+ console.error("[DistributedLock] Redis client does not support EVAL command for refresh");
2112
+ return;
2113
+ }
2114
+ const script = `
2115
+ if redis.call("get", KEYS[1]) == ARGV[1] then
2116
+ return redis.call("expire", KEYS[1], ARGV[2])
2117
+ else
2118
+ return 0
2119
+ end
2120
+ `;
2121
+ const result = await this.client.eval(script, 1, key, this.lockId, ttlSeconds);
2122
+ if (result === 0) {
2123
+ console.warn(
2124
+ `[DistributedLock] Lock ${key} no longer held by this instance, stopping refresh`
2125
+ );
2126
+ this.stopRefresh();
2127
+ }
2128
+ } catch (error) {
2129
+ const err = error instanceof Error ? error : new Error(String(error));
2130
+ console.error(`[DistributedLock] Failed to refresh lock ${key}:`, err.message);
2131
+ }
2132
+ }, options.refreshInterval);
2133
+ }
2134
+ /**
2135
+ * Stops the automatic renewal timer.
2136
+ */
2137
+ stopRefresh() {
2138
+ if (this.refreshTimer) {
2139
+ clearInterval(this.refreshTimer);
2140
+ this.refreshTimer = null;
2141
+ }
2142
+ }
2143
+ /**
2144
+ * Helper for delay.
2145
+ *
2146
+ * @param ms - Milliseconds to sleep.
2147
+ */
2148
+ sleep(ms) {
2149
+ return new Promise((resolve2) => setTimeout(resolve2, ms));
2150
+ }
2151
+ /**
2152
+ * Checks if the specified lock is currently held by this instance.
2153
+ *
2154
+ * @param key - The lock key.
2155
+ * @returns `true` if held, `false` otherwise.
2156
+ *
2157
+ * @example
2158
+ * ```typescript
2159
+ * if (lock.isHeld('schedule:job-123')) {
2160
+ * console.log('Lock is active');
2161
+ * }
2162
+ * ```
2163
+ */
2164
+ isHeld(key) {
2165
+ return this.currentLockKey === key;
2166
+ }
2167
+ };
2168
+ }
2169
+ });
2170
+
2171
+ // src/persistence/BufferedPersistence.ts
2172
+ var BufferedPersistence_exports = {};
2173
+ __export(BufferedPersistence_exports, {
2174
+ BufferedPersistence: () => BufferedPersistence
2175
+ });
2176
+ var BufferedPersistence;
2177
+ var init_BufferedPersistence = __esm({
2178
+ "src/persistence/BufferedPersistence.ts"() {
2179
+ "use strict";
2180
+ BufferedPersistence = class {
2181
+ constructor(adapter, options = {}) {
2182
+ this.adapter = adapter;
2183
+ this.maxBufferSize = options.maxBufferSize ?? 50;
2184
+ this.flushInterval = options.flushInterval ?? 5e3;
2185
+ }
2186
+ jobBuffer = [];
2187
+ logBuffer = [];
2188
+ flushTimer = null;
2189
+ maxBufferSize;
2190
+ flushInterval;
2191
+ /**
2192
+ * Buffers a job archive request.
2193
+ *
2194
+ * @param queue - The queue name.
2195
+ * @param job - The serialized job.
2196
+ * @param status - The final job status.
2197
+ */
2198
+ async archive(queue, job, status) {
2199
+ this.jobBuffer.push({ queue, job, status });
2200
+ if (this.jobBuffer.length >= this.maxBufferSize) {
2201
+ this.flush().catch((err) => {
2202
+ console.error("[BufferedPersistence] Auto-flush failed (jobs):", err.message || err);
2203
+ });
2204
+ } else {
2205
+ this.ensureFlushTimer();
2206
+ }
2207
+ }
2208
+ /**
2209
+ * Delegates find to the underlying adapter (no buffering for reads).
2210
+ */
2211
+ async find(queue, id) {
2212
+ return this.adapter.find(queue, id);
2213
+ }
2214
+ /**
2215
+ * Delegates list to the underlying adapter (no buffering for reads).
2216
+ */
2217
+ async list(queue, options) {
1556
2218
  return this.adapter.list(queue, options);
1557
2219
  }
2220
+ /**
2221
+ * Archives multiple jobs directly (bypassing buffer, or flushing first).
2222
+ *
2223
+ * Actually, for consistency, this might just pass through.
2224
+ */
1558
2225
  async archiveMany(jobs) {
1559
2226
  if (this.adapter.archiveMany) {
1560
2227
  return this.adapter.archiveMany(jobs);
@@ -1563,9 +2230,17 @@ var init_BufferedPersistence = __esm({
1563
2230
  await this.adapter.archive(item.queue, item.job, item.status);
1564
2231
  }
1565
2232
  }
2233
+ /**
2234
+ * Delegates cleanup to the underlying adapter.
2235
+ */
1566
2236
  async cleanup(days) {
1567
2237
  return this.adapter.cleanup(days);
1568
2238
  }
2239
+ /**
2240
+ * Flushes all buffered data to the underlying adapter.
2241
+ *
2242
+ * Uses `archiveMany` and `archiveLogMany` if supported by the adapter for batch efficiency.
2243
+ */
1569
2244
  async flush() {
1570
2245
  if (this.flushTimer) {
1571
2246
  clearTimeout(this.flushTimer);
@@ -1604,9 +2279,15 @@ var init_BufferedPersistence = __esm({
1604
2279
  }
1605
2280
  await Promise.all(promises);
1606
2281
  }
2282
+ /**
2283
+ * Delegates count to the underlying adapter.
2284
+ */
1607
2285
  async count(queue, options) {
1608
2286
  return this.adapter.count(queue, options);
1609
2287
  }
2288
+ /**
2289
+ * Buffers a log message.
2290
+ */
1610
2291
  async archiveLog(log) {
1611
2292
  this.logBuffer.push(log);
1612
2293
  if (this.logBuffer.length >= this.maxBufferSize) {
@@ -1617,6 +2298,9 @@ var init_BufferedPersistence = __esm({
1617
2298
  this.ensureFlushTimer();
1618
2299
  }
1619
2300
  }
2301
+ /**
2302
+ * Archives multiple logs directly.
2303
+ */
1620
2304
  async archiveLogMany(logs) {
1621
2305
  if (this.adapter.archiveLogMany) {
1622
2306
  return this.adapter.archiveLogMany(logs);
@@ -1625,12 +2309,21 @@ var init_BufferedPersistence = __esm({
1625
2309
  await this.adapter.archiveLog(log);
1626
2310
  }
1627
2311
  }
2312
+ /**
2313
+ * Delegates listLogs to the underlying adapter.
2314
+ */
1628
2315
  async listLogs(options) {
1629
2316
  return this.adapter.listLogs(options);
1630
2317
  }
2318
+ /**
2319
+ * Delegates countLogs to the underlying adapter.
2320
+ */
1631
2321
  async countLogs(options) {
1632
2322
  return this.adapter.countLogs(options);
1633
2323
  }
2324
+ /**
2325
+ * Ensures the auto-flush timer is running.
2326
+ */
1634
2327
  ensureFlushTimer() {
1635
2328
  if (this.flushTimer) {
1636
2329
  return;
@@ -3294,6 +3987,9 @@ var init_MessagePackSerializer = __esm({
3294
3987
  );
3295
3988
  }
3296
3989
  }
3990
+ /**
3991
+ * Serialize a job using MessagePack.
3992
+ */
3297
3993
  serialize(job) {
3298
3994
  const id = job.id || `${Date.now()}-${crypto.randomUUID()}`;
3299
3995
  const properties = {};
@@ -3316,6 +4012,9 @@ var init_MessagePackSerializer = __esm({
3316
4012
  ...job.priority ? { priority: job.priority } : {}
3317
4013
  };
3318
4014
  }
4015
+ /**
4016
+ * Deserialize a MessagePack job.
4017
+ */
3319
4018
  deserialize(serialized) {
3320
4019
  if (serialized.type !== "msgpack") {
3321
4020
  throw new Error('Invalid serialization type: expected "msgpack"');
@@ -3353,12 +4052,29 @@ var init_Scheduler = __esm({
3353
4052
  "src/Scheduler.ts"() {
3354
4053
  "use strict";
3355
4054
  import_cron_parser = __toESM(require("cron-parser"), 1);
4055
+ init_DistributedLock();
3356
4056
  Scheduler = class {
3357
4057
  constructor(manager, options = {}) {
3358
4058
  this.manager = manager;
3359
4059
  this.prefix = options.prefix ?? "queue:";
4060
+ this.lockTtl = options.lockTtl ?? 6e4;
4061
+ this.lockRefreshInterval = options.lockRefreshInterval ?? 2e4;
4062
+ this.lockRetryCount = options.lockRetryCount ?? 0;
4063
+ this.lockRetryDelay = options.lockRetryDelay ?? 100;
4064
+ this.tickInterval = options.tickInterval ?? 6e4;
4065
+ this.leaderTtl = options.leaderTtl ?? 3e4;
3360
4066
  }
3361
4067
  prefix;
4068
+ lockTtl;
4069
+ lockRefreshInterval;
4070
+ lockRetryCount;
4071
+ lockRetryDelay;
4072
+ tickInterval;
4073
+ leaderTtl;
4074
+ distributedLock;
4075
+ running = false;
4076
+ timer = null;
4077
+ isLeader = false;
3362
4078
  get client() {
3363
4079
  const driver = this.manager.getDriver(this.manager.getDefaultConnection());
3364
4080
  if (!driver || !("client" in driver)) {
@@ -3367,7 +4083,23 @@ var init_Scheduler = __esm({
3367
4083
  return driver.client;
3368
4084
  }
3369
4085
  /**
3370
- * Register a scheduled job.
4086
+ * Gets or creates the distributed lock instance.
4087
+ *
4088
+ * @private
4089
+ */
4090
+ getDistributedLock() {
4091
+ if (!this.distributedLock) {
4092
+ this.distributedLock = new DistributedLock(this.client);
4093
+ }
4094
+ return this.distributedLock;
4095
+ }
4096
+ /**
4097
+ * Registers a new scheduled job or updates an existing one.
4098
+ *
4099
+ * Calculates the next run time based on the CRON expression and stores the configuration in Redis.
4100
+ *
4101
+ * @param config - The job configuration (excluding nextRun and enabled status which are auto-set).
4102
+ * @throws {Error} If Redis client does not support pipelining.
3371
4103
  */
3372
4104
  async register(config) {
3373
4105
  const nextRun = import_cron_parser.default.parse(config.cron).next().getTime();
@@ -3389,7 +4121,11 @@ var init_Scheduler = __esm({
3389
4121
  await pipe.exec();
3390
4122
  }
3391
4123
  /**
3392
- * Remove a scheduled job.
4124
+ * Removes a scheduled job.
4125
+ *
4126
+ * Deletes the job metadata and schedule entry from Redis.
4127
+ *
4128
+ * @param id - The unique identifier of the scheduled job.
3393
4129
  */
3394
4130
  async remove(id) {
3395
4131
  const client = this.client;
@@ -3402,7 +4138,9 @@ var init_Scheduler = __esm({
3402
4138
  await pipe.exec();
3403
4139
  }
3404
4140
  /**
3405
- * List all scheduled jobs.
4141
+ * Lists all registered scheduled jobs.
4142
+ *
4143
+ * @returns An array of all scheduled job configurations.
3406
4144
  */
3407
4145
  async list() {
3408
4146
  const client = this.client;
@@ -3426,7 +4164,77 @@ var init_Scheduler = __esm({
3426
4164
  return configs;
3427
4165
  }
3428
4166
  /**
3429
- * Run a scheduled job immediately (out of schedule).
4167
+ * Starts the automatic scheduler loop.
4168
+ *
4169
+ * Periodically triggers `tick()` to process due jobs. Uses leader election
4170
+ * to ensure that only one node performs the scanning in a multi-node environment.
4171
+ */
4172
+ async start() {
4173
+ if (this.running) {
4174
+ return;
4175
+ }
4176
+ this.running = true;
4177
+ const loop = async () => {
4178
+ if (!this.running) {
4179
+ return;
4180
+ }
4181
+ try {
4182
+ await this.performTickWithLeaderElection();
4183
+ } catch (err) {
4184
+ console.error("[Scheduler] Loop error:", err);
4185
+ }
4186
+ this.timer = setTimeout(loop, this.tickInterval);
4187
+ };
4188
+ loop();
4189
+ }
4190
+ /**
4191
+ * Stops the automatic scheduler loop.
4192
+ */
4193
+ async stop() {
4194
+ this.running = false;
4195
+ if (this.timer) {
4196
+ clearTimeout(this.timer);
4197
+ this.timer = null;
4198
+ }
4199
+ if (this.isLeader) {
4200
+ await this.releaseLeader();
4201
+ }
4202
+ }
4203
+ /**
4204
+ * Acquires the leader lock and performs a tick.
4205
+ *
4206
+ * @private
4207
+ */
4208
+ async performTickWithLeaderElection() {
4209
+ const lock = this.getDistributedLock();
4210
+ const leaderKey = `${this.prefix}scheduler:leader`;
4211
+ this.isLeader = await lock.acquire(leaderKey, {
4212
+ ttl: this.leaderTtl,
4213
+ refreshInterval: Math.floor(this.leaderTtl / 3),
4214
+ retryCount: 0,
4215
+ retryDelay: 0
4216
+ });
4217
+ if (this.isLeader) {
4218
+ await this.tick();
4219
+ }
4220
+ }
4221
+ /**
4222
+ * Releases the leader lock.
4223
+ *
4224
+ * @private
4225
+ */
4226
+ async releaseLeader() {
4227
+ const lock = this.getDistributedLock();
4228
+ const leaderKey = `${this.prefix}scheduler:leader`;
4229
+ await lock.release(leaderKey);
4230
+ this.isLeader = false;
4231
+ }
4232
+ /**
4233
+ * Manually triggers a scheduled job immediately.
4234
+ *
4235
+ * Forces execution of the job regardless of its schedule, without affecting the next scheduled run time.
4236
+ *
4237
+ * @param id - The unique identifier of the scheduled job.
3430
4238
  */
3431
4239
  async runNow(id) {
3432
4240
  const client = this.client;
@@ -3439,8 +4247,16 @@ var init_Scheduler = __esm({
3439
4247
  }
3440
4248
  }
3441
4249
  /**
3442
- * Process due tasks (TICK).
3443
- * This should be called periodically (e.g. every minute).
4250
+ * Checks for and triggers tasks that are due for execution.
4251
+ *
4252
+ * This method should be called periodically (e.g., via a system cron or a dedicated tick loop).
4253
+ * It scans the schedule for tasks with `nextRun <= now`, acquires a distributed lock for each,
4254
+ * pushes them to their queue, and updates the `nextRun` time.
4255
+ *
4256
+ * The distributed lock ensures that in a multi-node environment, each scheduled job is executed
4257
+ * only once per interval, even if multiple scheduler instances are running.
4258
+ *
4259
+ * @returns The number of jobs triggered in this tick.
3444
4260
  */
3445
4261
  async tick() {
3446
4262
  const client = this.client;
@@ -3450,35 +4266,42 @@ var init_Scheduler = __esm({
3450
4266
  const now = Date.now();
3451
4267
  const dueIds = await client.zrangebyscore(`${this.prefix}schedules`, 0, now);
3452
4268
  let fired = 0;
4269
+ const lock = this.getDistributedLock();
3453
4270
  for (const id of dueIds) {
3454
4271
  const lockKey = `${this.prefix}lock:schedule:${id}:${Math.floor(now / 1e3)}`;
3455
- if (typeof client.set !== "function") {
3456
- continue;
3457
- }
3458
- const lock = await client.set(lockKey, "1", "EX", 10, "NX");
3459
- if (lock === "OK") {
3460
- const data = await client.hgetall?.(`${this.prefix}schedule:${id}`);
3461
- if (data?.id && data.enabled === "true") {
3462
- try {
3463
- const serializedJob = JSON.parse(data.job);
3464
- const connection = data.connection || this.manager.getDefaultConnection();
3465
- const driver = this.manager.getDriver(connection);
3466
- await driver.push(data.queue, serializedJob);
3467
- const nextRun = import_cron_parser.default.parse(data.cron).next().getTime();
3468
- if (typeof client.pipeline === "function") {
3469
- const pipe = client.pipeline();
3470
- pipe.hset(`${this.prefix}schedule:${id}`, {
3471
- lastRun: now,
3472
- nextRun
3473
- });
3474
- pipe.zadd(`${this.prefix}schedules`, nextRun, id);
3475
- await pipe.exec();
4272
+ const acquired = await lock.acquire(lockKey, {
4273
+ ttl: this.lockTtl,
4274
+ retryCount: this.lockRetryCount,
4275
+ retryDelay: this.lockRetryDelay,
4276
+ refreshInterval: this.lockRefreshInterval
4277
+ });
4278
+ if (acquired) {
4279
+ try {
4280
+ const data = await client.hgetall?.(`${this.prefix}schedule:${id}`);
4281
+ if (data?.id && data.enabled === "true") {
4282
+ try {
4283
+ const serializedJob = JSON.parse(data.job);
4284
+ const connection = data.connection || this.manager.getDefaultConnection();
4285
+ const driver = this.manager.getDriver(connection);
4286
+ await driver.push(data.queue, serializedJob);
4287
+ const nextRun = import_cron_parser.default.parse(data.cron).next().getTime();
4288
+ if (typeof client.pipeline === "function") {
4289
+ const pipe = client.pipeline();
4290
+ pipe.hset(`${this.prefix}schedule:${id}`, {
4291
+ lastRun: now,
4292
+ nextRun
4293
+ });
4294
+ pipe.zadd(`${this.prefix}schedules`, nextRun, id);
4295
+ await pipe.exec();
4296
+ }
4297
+ fired++;
4298
+ } catch (err) {
4299
+ const error = err instanceof Error ? err : new Error(String(err));
4300
+ console.error(`[Scheduler] Failed to process schedule ${id}:`, error.message);
3476
4301
  }
3477
- fired++;
3478
- } catch (err) {
3479
- const error = err instanceof Error ? err : new Error(String(err));
3480
- console.error(`[Scheduler] Failed to process schedule ${id}:`, error.message);
3481
4302
  }
4303
+ } finally {
4304
+ await lock.release(lockKey);
3482
4305
  }
3483
4306
  }
3484
4307
  }
@@ -3488,13 +4311,112 @@ var init_Scheduler = __esm({
3488
4311
  }
3489
4312
  });
3490
4313
 
4314
+ // src/DashboardProvider.ts
4315
+ var DashboardProvider_exports = {};
4316
+ __export(DashboardProvider_exports, {
4317
+ DashboardProvider: () => DashboardProvider
4318
+ });
4319
+ var DashboardProvider;
4320
+ var init_DashboardProvider = __esm({
4321
+ "src/DashboardProvider.ts"() {
4322
+ "use strict";
4323
+ DashboardProvider = class {
4324
+ constructor(manager) {
4325
+ this.manager = manager;
4326
+ }
4327
+ /**
4328
+ * Registers dashboard API routes on the provided core adapter.
4329
+ *
4330
+ * @param core - The PlanetCore instance.
4331
+ * @param basePath - The base path for API routes (default: '/_flux').
4332
+ */
4333
+ registerRoutes(core, basePath = "/_flux") {
4334
+ const router = core.adapter;
4335
+ router.get(`${basePath}/stats`, async (c) => {
4336
+ const stats = await this.manager.getGlobalStats();
4337
+ return c.json(stats);
4338
+ });
4339
+ router.get(`${basePath}/queues`, async (c) => {
4340
+ const stats = await this.manager.getGlobalStats();
4341
+ const queues = Object.entries(stats.connections).flatMap(
4342
+ ([conn, qList]) => qList.map((q) => ({
4343
+ connection: conn,
4344
+ name: q.queue,
4345
+ size: q.size,
4346
+ failed: q.failed
4347
+ }))
4348
+ );
4349
+ return c.json(queues);
4350
+ });
4351
+ router.get(`${basePath}/jobs`, async (c) => {
4352
+ const queue = c.req.query("queue") || "default";
4353
+ const status = c.req.query("status");
4354
+ const limit = parseInt(c.req.query("limit") || "50", 10);
4355
+ const offset = parseInt(c.req.query("offset") || "0", 10);
4356
+ const persistence = this.manager.getPersistence();
4357
+ if (!persistence) {
4358
+ return c.json({ error: "Persistence not configured" }, 400);
4359
+ }
4360
+ const statuses = status ? status.includes(",") ? status.split(",") : status : void 0;
4361
+ const [jobs, total] = await Promise.all([
4362
+ persistence.list(queue, { status: statuses, limit, offset }),
4363
+ persistence.count(queue, { status: statuses })
4364
+ ]);
4365
+ return c.json({
4366
+ data: jobs,
4367
+ meta: {
4368
+ total,
4369
+ limit,
4370
+ offset
4371
+ }
4372
+ });
4373
+ });
4374
+ router.post(`${basePath}/jobs/retry`, async (c) => {
4375
+ const { queue, count } = await c.req.json();
4376
+ if (!queue) {
4377
+ return c.json({ error: "Queue name is required" }, 400);
4378
+ }
4379
+ const retried = await this.manager.retryFailed(queue, count || 1);
4380
+ return c.json({ success: true, retried });
4381
+ });
4382
+ router.get(`${basePath}/logs`, async (c) => {
4383
+ const persistence = this.manager.getPersistence();
4384
+ if (!persistence) {
4385
+ return c.json({ error: "Persistence not configured" }, 400);
4386
+ }
4387
+ const limit = parseInt(c.req.query("limit") || "100", 10);
4388
+ const offset = parseInt(c.req.query("offset") || "0", 10);
4389
+ const level = c.req.query("level");
4390
+ const search = c.req.query("search");
4391
+ const [logs, total] = await Promise.all([
4392
+ persistence.listLogs({ limit, offset, level, search }),
4393
+ persistence.countLogs({ level, search })
4394
+ ]);
4395
+ return c.json({
4396
+ data: logs,
4397
+ meta: {
4398
+ total,
4399
+ limit,
4400
+ offset
4401
+ }
4402
+ });
4403
+ });
4404
+ }
4405
+ };
4406
+ }
4407
+ });
4408
+
3491
4409
  // src/index.ts
3492
4410
  var index_exports = {};
3493
4411
  __export(index_exports, {
4412
+ BatchConsumer: () => BatchConsumer,
3494
4413
  BufferedPersistence: () => BufferedPersistence,
4414
+ BullMQDriver: () => BullMQDriver,
3495
4415
  ClassNameSerializer: () => ClassNameSerializer,
3496
4416
  Consumer: () => Consumer,
3497
4417
  DatabaseDriver: () => DatabaseDriver,
4418
+ DistributedLock: () => DistributedLock,
4419
+ GrpcDriver: () => GrpcDriver,
3498
4420
  Job: () => Job,
3499
4421
  JsonSerializer: () => JsonSerializer,
3500
4422
  KafkaDriver: () => KafkaDriver,
@@ -3506,83 +4428,464 @@ __export(index_exports, {
3506
4428
  RedisDriver: () => RedisDriver,
3507
4429
  SQLitePersistence: () => SQLitePersistence,
3508
4430
  SQSDriver: () => SQSDriver,
4431
+ SandboxedWorker: () => SandboxedWorker,
3509
4432
  Scheduler: () => Scheduler,
3510
- Worker: () => Worker
4433
+ StreamEventBackend: () => StreamEventBackend,
4434
+ SystemEventJob: () => SystemEventJob,
4435
+ Worker: () => Worker,
4436
+ WorkerPool: () => WorkerPool
3511
4437
  });
3512
4438
  module.exports = __toCommonJS(index_exports);
3513
4439
 
3514
- // src/Consumer.ts
3515
- var import_node_events = require("events");
3516
- var import_p_limit = __toESM(require("p-limit"), 1);
3517
-
3518
- // src/Worker.ts
3519
- var Worker = class {
3520
- constructor(options = {}) {
3521
- this.options = options;
4440
+ // src/BatchConsumer.ts
4441
+ var BatchConsumer = class {
4442
+ constructor(manager, handler, options = {}) {
4443
+ this.manager = manager;
4444
+ this.handler = handler;
4445
+ this.options = {
4446
+ queue: "default",
4447
+ batchSize: 10,
4448
+ pollInterval: 1e3,
4449
+ autoAck: true,
4450
+ ...options
4451
+ };
3522
4452
  }
4453
+ running = false;
4454
+ options;
3523
4455
  /**
3524
- * Process a Job.
3525
- * @param job - Job instance
4456
+ * Starts the batch consuming loop.
4457
+ *
4458
+ * Continuously polls for batches of jobs and passes them to the handler.
3526
4459
  */
3527
- async process(job) {
3528
- const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
3529
- const timeout = this.options.timeout;
3530
- if (!job.attempts) {
3531
- job.attempts = 1;
4460
+ async start() {
4461
+ if (this.running) {
4462
+ return;
3532
4463
  }
3533
- try {
3534
- if (timeout) {
3535
- await Promise.race([
3536
- job.handle(),
3537
- new Promise(
3538
- (_, reject) => setTimeout(
3539
- () => reject(new Error(`Job timeout after ${timeout} seconds`)),
3540
- timeout * 1e3
3541
- )
3542
- )
3543
- ]);
3544
- } else {
3545
- await job.handle();
3546
- }
3547
- } catch (error) {
3548
- const err = error instanceof Error ? error : new Error(String(error));
3549
- if (job.attempts >= maxAttempts) {
3550
- await this.handleFailure(job, err);
4464
+ this.running = true;
4465
+ const { queue, connection, batchSize, pollInterval, autoAck } = this.options;
4466
+ while (this.running) {
4467
+ try {
4468
+ const jobs = await this.manager.popMany(queue, batchSize, connection);
4469
+ if (jobs.length > 0) {
4470
+ try {
4471
+ await this.handler(jobs);
4472
+ if (autoAck) {
4473
+ await Promise.all(jobs.map((job) => this.manager.complete(job)));
4474
+ }
4475
+ } catch (error) {
4476
+ console.error(`[BatchConsumer] Batch processing failed:`, error);
4477
+ const err = error instanceof Error ? error : new Error(String(error));
4478
+ if (autoAck) {
4479
+ await Promise.all(jobs.map((job) => this.manager.fail(job, err)));
4480
+ }
4481
+ }
4482
+ } else {
4483
+ await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
4484
+ }
4485
+ } catch (err) {
4486
+ console.error(`[BatchConsumer] Polling error:`, err);
4487
+ await new Promise((resolve2) => setTimeout(resolve2, pollInterval));
3551
4488
  }
3552
- throw err;
3553
4489
  }
3554
4490
  }
3555
4491
  /**
3556
- * Handle failure.
4492
+ * Stops the consumer loop.
4493
+ *
4494
+ * Sets the running flag to false. The loop will exit after the current iteration finishes.
3557
4495
  */
3558
- async handleFailure(job, error) {
3559
- try {
3560
- await job.failed(error);
3561
- } catch (failedError) {
3562
- console.error("[Worker] Error in job.failed():", failedError);
3563
- }
3564
- if (this.options.onFailed) {
3565
- try {
3566
- await this.options.onFailed(job, error);
3567
- } catch (callbackError) {
4496
+ stop() {
4497
+ this.running = false;
4498
+ }
4499
+ };
4500
+
4501
+ // src/Consumer.ts
4502
+ var import_node_events = require("events");
4503
+ var import_p_limit = __toESM(require("p-limit"), 1);
4504
+
4505
+ // src/workers/SandboxedWorker.ts
4506
+ var import_node_path = require("path");
4507
+ var import_node_worker_threads = require("worker_threads");
4508
+ var SandboxedWorker = class {
4509
+ worker = null;
4510
+ state = "initializing" /* INITIALIZING */;
4511
+ config;
4512
+ idleTimer = null;
4513
+ executionTimer = null;
4514
+ /**
4515
+ * Creates a SandboxedWorker instance.
4516
+ *
4517
+ * @param config - Configuration options for the worker.
4518
+ */
4519
+ constructor(config = {}) {
4520
+ this.config = {
4521
+ maxExecutionTime: config.maxExecutionTime ?? 3e4,
4522
+ maxMemory: config.maxMemory ?? 0,
4523
+ isolateContexts: config.isolateContexts ?? false,
4524
+ idleTimeout: config.idleTimeout ?? 6e4
4525
+ };
4526
+ }
4527
+ /**
4528
+ * Initializes the Worker Thread.
4529
+ *
4530
+ * @returns The active Worker Thread instance.
4531
+ * @throws {Error} If worker initialization fails or times out.
4532
+ */
4533
+ async initWorker() {
4534
+ if (this.worker && this.state !== "terminated" /* TERMINATED */) {
4535
+ return this.worker;
4536
+ }
4537
+ const fs = require("fs");
4538
+ let workerPath = (0, import_node_path.resolve)(__dirname, "job-executor.js");
4539
+ if (!fs.existsSync(workerPath)) {
4540
+ const tsPath = (0, import_node_path.resolve)(__dirname, "job-executor.ts");
4541
+ if (fs.existsSync(tsPath)) {
4542
+ workerPath = tsPath;
4543
+ }
4544
+ }
4545
+ const execArgv = process.execArgv.slice();
4546
+ if (workerPath.endsWith(".ts") && !process.env.BUN_BINARY_TARGET) {
4547
+ if (!execArgv.includes("--loader")) {
4548
+ execArgv.push("--loader", "ts-node/esm");
4549
+ }
4550
+ }
4551
+ const resourceLimits = {};
4552
+ if (this.config.maxMemory > 0) {
4553
+ resourceLimits.maxOldGenerationSizeMb = this.config.maxMemory;
4554
+ resourceLimits.maxYoungGenerationSizeMb = Math.min(this.config.maxMemory / 2, 128);
4555
+ }
4556
+ this.worker = new import_node_worker_threads.Worker(workerPath, {
4557
+ resourceLimits: Object.keys(resourceLimits).length > 0 ? resourceLimits : void 0,
4558
+ execArgv
4559
+ });
4560
+ this.state = "initializing" /* INITIALIZING */;
4561
+ await new Promise((resolve2, reject) => {
4562
+ const timeout = setTimeout(() => {
4563
+ reject(new Error("Worker initialization timeout"));
4564
+ }, 5e3);
4565
+ this.worker?.once("message", (message) => {
4566
+ clearTimeout(timeout);
4567
+ if (message.type === "ready") {
4568
+ this.state = "ready" /* READY */;
4569
+ resolve2();
4570
+ } else {
4571
+ reject(new Error("Unexpected worker message during initialization"));
4572
+ }
4573
+ });
4574
+ this.worker?.once("error", (error) => {
4575
+ clearTimeout(timeout);
4576
+ reject(error);
4577
+ });
4578
+ });
4579
+ this.worker.on("error", (error) => {
4580
+ console.error("[SandboxedWorker] Worker error:", error);
4581
+ this.state = "terminated" /* TERMINATED */;
4582
+ });
4583
+ this.worker.on("exit", (code) => {
4584
+ if (code !== 0) {
4585
+ console.error(`[SandboxedWorker] Worker exited with code ${code}`);
4586
+ }
4587
+ this.state = "terminated" /* TERMINATED */;
4588
+ });
4589
+ return this.worker;
4590
+ }
4591
+ /**
4592
+ * Executes a job in the sandboxed environment.
4593
+ *
4594
+ * @param job - The serialized job data to execute.
4595
+ * @throws {Error} If execution fails, times out, or the worker crashes.
4596
+ */
4597
+ async execute(job) {
4598
+ if (this.config.isolateContexts) {
4599
+ await this.terminate();
4600
+ }
4601
+ const worker = await this.initWorker();
4602
+ this.state = "busy" /* BUSY */;
4603
+ if (this.idleTimer) {
4604
+ clearTimeout(this.idleTimer);
4605
+ this.idleTimer = null;
4606
+ }
4607
+ try {
4608
+ await Promise.race([this.executeInWorker(worker, job), this.createTimeoutPromise()]);
4609
+ } finally {
4610
+ this.state = "ready" /* READY */;
4611
+ if (this.executionTimer) {
4612
+ clearTimeout(this.executionTimer);
4613
+ this.executionTimer = null;
4614
+ }
4615
+ if (!this.config.isolateContexts) {
4616
+ this.startIdleTimer();
4617
+ } else {
4618
+ await this.terminate();
4619
+ }
4620
+ }
4621
+ }
4622
+ /**
4623
+ * Internal method to send execution message to the worker thread.
4624
+ *
4625
+ * @param worker - The worker thread instance.
4626
+ * @param job - Job data.
4627
+ */
4628
+ executeInWorker(worker, job) {
4629
+ return new Promise((resolve2, reject) => {
4630
+ const messageHandler = (message) => {
4631
+ if (message.type === "success") {
4632
+ cleanup();
4633
+ resolve2();
4634
+ } else if (message.type === "error") {
4635
+ cleanup();
4636
+ const error = new Error(message.error || "Job execution failed");
4637
+ if (message.stack) {
4638
+ error.stack = message.stack;
4639
+ }
4640
+ reject(error);
4641
+ }
4642
+ };
4643
+ const errorHandler = (error) => {
4644
+ cleanup();
4645
+ reject(error);
4646
+ };
4647
+ const exitHandler = (code) => {
4648
+ cleanup();
4649
+ if (code !== 0) {
4650
+ reject(new Error(`Worker exited unexpectedly with code ${code}`));
4651
+ }
4652
+ };
4653
+ const cleanup = () => {
4654
+ worker.off("message", messageHandler);
4655
+ worker.off("error", errorHandler);
4656
+ worker.off("exit", exitHandler);
4657
+ };
4658
+ worker.on("message", messageHandler);
4659
+ worker.on("error", errorHandler);
4660
+ worker.on("exit", exitHandler);
4661
+ worker.postMessage({
4662
+ type: "execute",
4663
+ job
4664
+ });
4665
+ });
4666
+ }
4667
+ /**
4668
+ * Creates a promise that rejects after the configured timeout.
4669
+ */
4670
+ createTimeoutPromise() {
4671
+ return new Promise((_, reject) => {
4672
+ this.executionTimer = setTimeout(() => {
4673
+ this.terminate().catch(console.error);
4674
+ reject(new Error(`Job execution timeout after ${this.config.maxExecutionTime}ms`));
4675
+ }, this.config.maxExecutionTime);
4676
+ });
4677
+ }
4678
+ /**
4679
+ * Starts the idle timer to auto-terminate the worker.
4680
+ */
4681
+ startIdleTimer() {
4682
+ if (this.idleTimer) {
4683
+ clearTimeout(this.idleTimer);
4684
+ }
4685
+ this.idleTimer = setTimeout(() => {
4686
+ this.terminate().catch(console.error);
4687
+ }, this.config.idleTimeout);
4688
+ }
4689
+ /**
4690
+ * Terminates the Worker Thread immediately.
4691
+ *
4692
+ * Stops any running job and releases resources.
4693
+ */
4694
+ async terminate() {
4695
+ if (this.idleTimer) {
4696
+ clearTimeout(this.idleTimer);
4697
+ this.idleTimer = null;
4698
+ }
4699
+ if (this.executionTimer) {
4700
+ clearTimeout(this.executionTimer);
4701
+ this.executionTimer = null;
4702
+ }
4703
+ if (this.worker) {
4704
+ const worker = this.worker;
4705
+ this.worker = null;
4706
+ this.state = "terminated" /* TERMINATED */;
4707
+ try {
4708
+ await worker.terminate();
4709
+ } catch (error) {
4710
+ console.error("[SandboxedWorker] Error terminating worker:", error);
4711
+ }
4712
+ }
4713
+ }
4714
+ /**
4715
+ * Gets the current state of the worker.
4716
+ *
4717
+ * @returns The current `WorkerState`.
4718
+ */
4719
+ getState() {
4720
+ return this.state;
4721
+ }
4722
+ /**
4723
+ * Checks if the worker is ready to accept a job.
4724
+ *
4725
+ * @returns `true` if ready, `false` otherwise.
4726
+ */
4727
+ isReady() {
4728
+ return this.state === "ready" /* READY */;
4729
+ }
4730
+ /**
4731
+ * Checks if the worker is currently executing a job.
4732
+ *
4733
+ * @returns `true` if busy, `false` otherwise.
4734
+ */
4735
+ isBusy() {
4736
+ return this.state === "busy" /* BUSY */;
4737
+ }
4738
+ };
4739
+
4740
+ // src/Worker.ts
4741
+ var Worker = class {
4742
+ constructor(options = {}) {
4743
+ this.options = options;
4744
+ if (options.sandboxed) {
4745
+ this.sandboxedWorker = new SandboxedWorker(options.sandboxConfig);
4746
+ }
4747
+ }
4748
+ sandboxedWorker;
4749
+ /**
4750
+ * Processes a single job instance.
4751
+ *
4752
+ * 1. Checks attempt counts.
4753
+ * 2. Enforces execution timeout (if configured).
4754
+ * 3. Runs `job.handle()` (either directly or in a sandboxed Worker Thread).
4755
+ * 4. Catches errors and invokes failure handlers if max attempts are reached.
4756
+ *
4757
+ * @param job - The job to process.
4758
+ * @throws {Error} If the job execution fails (to trigger retry logic in the consumer).
4759
+ */
4760
+ async process(job) {
4761
+ const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
4762
+ const timeout = this.options.timeout;
4763
+ if (!job.attempts) {
4764
+ job.attempts = 1;
4765
+ }
4766
+ try {
4767
+ if (this.options.sandboxed && this.sandboxedWorker) {
4768
+ await this.processSandboxed(job);
4769
+ } else {
4770
+ await this.processStandard(job, timeout);
4771
+ }
4772
+ } catch (error) {
4773
+ const err = error instanceof Error ? error : new Error(String(error));
4774
+ if (job.attempts >= maxAttempts) {
4775
+ await this.handleFailure(job, err);
4776
+ }
4777
+ throw err;
4778
+ }
4779
+ }
4780
+ /**
4781
+ * Processes a job in standard mode (directly in current process).
4782
+ *
4783
+ * @param job - The job to process.
4784
+ * @param timeout - Optional timeout in seconds.
4785
+ */
4786
+ async processStandard(job, timeout) {
4787
+ if (timeout) {
4788
+ await Promise.race([
4789
+ job.handle(),
4790
+ new Promise(
4791
+ (_, reject) => setTimeout(
4792
+ () => reject(new Error(`Job timeout after ${timeout} seconds`)),
4793
+ timeout * 1e3
4794
+ )
4795
+ )
4796
+ ]);
4797
+ } else {
4798
+ await job.handle();
4799
+ }
4800
+ }
4801
+ /**
4802
+ * Processes a job in sandboxed mode (in Worker Thread).
4803
+ *
4804
+ * @param job - The job to process.
4805
+ */
4806
+ async processSandboxed(job) {
4807
+ if (!this.sandboxedWorker) {
4808
+ throw new Error("Sandboxed worker not initialized");
4809
+ }
4810
+ const serialized = this.serializeJob(job);
4811
+ await this.sandboxedWorker.execute(serialized);
4812
+ }
4813
+ /**
4814
+ * Serializes a Job instance for Worker Thread execution.
4815
+ *
4816
+ * @param job - The job to serialize.
4817
+ * @returns Serialized job data.
4818
+ */
4819
+ serializeJob(job) {
4820
+ const data = JSON.stringify(job);
4821
+ return {
4822
+ id: job.id ?? `job-${Date.now()}-${Math.random()}`,
4823
+ type: "json",
4824
+ data,
4825
+ createdAt: Date.now(),
4826
+ attempts: job.attempts,
4827
+ maxAttempts: job.maxAttempts,
4828
+ delaySeconds: job.delaySeconds,
4829
+ groupId: job.groupId,
4830
+ priority: job.priority,
4831
+ retryAfterSeconds: job.retryAfterSeconds,
4832
+ retryMultiplier: job.retryMultiplier
4833
+ };
4834
+ }
4835
+ /**
4836
+ * Handles the permanent failure of a job.
4837
+ *
4838
+ * Invokes the job's `failed()` method and any global `onFailed` callback.
4839
+ *
4840
+ * @param job - The failed job.
4841
+ * @param error - The error that caused the failure.
4842
+ */
4843
+ async handleFailure(job, error) {
4844
+ try {
4845
+ await job.failed(error);
4846
+ } catch (failedError) {
4847
+ console.error("[Worker] Error in job.failed():", failedError);
4848
+ }
4849
+ if (this.options.onFailed) {
4850
+ try {
4851
+ await this.options.onFailed(job, error);
4852
+ } catch (callbackError) {
3568
4853
  console.error("[Worker] Error in onFailed callback:", callbackError);
3569
4854
  }
3570
4855
  }
3571
4856
  }
4857
+ /**
4858
+ * Terminates the sandboxed worker and releases resources.
4859
+ *
4860
+ * Should be called when the worker is no longer needed.
4861
+ * Only applicable when running in sandboxed mode.
4862
+ */
4863
+ async terminate() {
4864
+ if (this.sandboxedWorker) {
4865
+ await this.sandboxedWorker.terminate();
4866
+ }
4867
+ }
3572
4868
  };
3573
4869
 
3574
4870
  // src/Consumer.ts
3575
- var Consumer = class extends import_node_events.EventEmitter {
4871
+ var Consumer = class _Consumer extends import_node_events.EventEmitter {
3576
4872
  constructor(queueManager, options) {
3577
4873
  super();
3578
4874
  this.queueManager = queueManager;
3579
4875
  this.options = options;
3580
4876
  }
4877
+ /**
4878
+ * Group limiter 的存活時間(毫秒)。
4879
+ * 超過此時間未使用的 group limiter 會被清理,避免記憶體洩漏。
4880
+ */
4881
+ static GROUP_LIMITER_TTL = 6e4;
3581
4882
  running = false;
3582
4883
  stopRequested = false;
3583
4884
  workerId = `worker-${crypto.randomUUID()}`;
3584
4885
  heartbeatTimer = null;
4886
+ cleanupTimer = null;
3585
4887
  groupLimiters = /* @__PURE__ */ new Map();
4888
+ groupLimiterLastUsed = /* @__PURE__ */ new Map();
3586
4889
  stats = {
3587
4890
  processed: 0,
3588
4891
  failed: 0,
@@ -3593,7 +4896,7 @@ var Consumer = class extends import_node_events.EventEmitter {
3593
4896
  return this.options.connection ?? this.queueManager.getDefaultConnection();
3594
4897
  }
3595
4898
  /**
3596
- * Log debug message.
4899
+ * Logs a debug message if debug mode is enabled.
3597
4900
  */
3598
4901
  log(message, data) {
3599
4902
  if (this.options.debug) {
@@ -3607,7 +4910,12 @@ var Consumer = class extends import_node_events.EventEmitter {
3607
4910
  }
3608
4911
  }
3609
4912
  /**
3610
- * Start the consumer loop.
4913
+ * Starts the consumer loop.
4914
+ *
4915
+ * Begins polling the queues and processing jobs. This method returns a promise that resolves
4916
+ * only when the consumer stops (if `keepAlive` is false) or throws if already running.
4917
+ *
4918
+ * @throws {Error} If the consumer is already running.
3611
4919
  */
3612
4920
  async start() {
3613
4921
  if (this.running) {
@@ -3639,10 +4947,11 @@ var Consumer = class extends import_node_events.EventEmitter {
3639
4947
  `Consumer started on [${this.options.queues.join(", ")}] with concurrency ${concurrency}`
3640
4948
  );
3641
4949
  }
4950
+ this.startCleanupTimer();
3642
4951
  while (this.running && !this.stopRequested) {
3643
4952
  const capacity = concurrency - this.stats.active;
3644
4953
  if (capacity <= 0) {
3645
- await new Promise((resolve) => setTimeout(resolve, 50));
4954
+ await new Promise((resolve2) => setTimeout(resolve2, 50));
3646
4955
  continue;
3647
4956
  }
3648
4957
  const eligibleQueues = [];
@@ -3664,7 +4973,7 @@ var Consumer = class extends import_node_events.EventEmitter {
3664
4973
  eligibleQueues.push(queue);
3665
4974
  }
3666
4975
  if (eligibleQueues.length === 0) {
3667
- await new Promise((resolve) => setTimeout(resolve, currentPollInterval));
4976
+ await new Promise((resolve2) => setTimeout(resolve2, currentPollInterval));
3668
4977
  continue;
3669
4978
  }
3670
4979
  let jobs = [];
@@ -3713,7 +5022,7 @@ var Consumer = class extends import_node_events.EventEmitter {
3713
5022
  this.stats.active--;
3714
5023
  });
3715
5024
  }
3716
- await new Promise((resolve) => setTimeout(resolve, 0));
5025
+ await new Promise((resolve2) => setTimeout(resolve2, 0));
3717
5026
  continue;
3718
5027
  }
3719
5028
  } catch (error) {
@@ -3724,22 +5033,23 @@ var Consumer = class extends import_node_events.EventEmitter {
3724
5033
  }
3725
5034
  if (!this.stopRequested) {
3726
5035
  if (!didBlock) {
3727
- await new Promise((resolve) => setTimeout(resolve, currentPollInterval));
5036
+ await new Promise((resolve2) => setTimeout(resolve2, currentPollInterval));
3728
5037
  currentPollInterval = Math.min(currentPollInterval * backoffMultiplier, maxPollInterval);
3729
5038
  }
3730
5039
  } else {
3731
- await new Promise((resolve) => setTimeout(resolve, 50));
5040
+ await new Promise((resolve2) => setTimeout(resolve2, 50));
3732
5041
  }
3733
5042
  }
3734
5043
  this.running = false;
3735
5044
  this.stopHeartbeat();
5045
+ this.stopCleanupTimer();
3736
5046
  if (this.options.monitor) {
3737
5047
  await this.publishLog("info", "Consumer stopped");
3738
5048
  }
3739
5049
  this.log("Stopped");
3740
5050
  }
3741
5051
  /**
3742
- * Run a job with concurrency controls.
5052
+ * Run a job with concurrency controls and group locking.
3743
5053
  */
3744
5054
  async runJob(job, worker) {
3745
5055
  if (!job.groupId || this.options.groupJobsSequential === false) {
@@ -3750,6 +5060,7 @@ var Consumer = class extends import_node_events.EventEmitter {
3750
5060
  limiter = (0, import_p_limit.default)(1);
3751
5061
  this.groupLimiters.set(job.groupId, limiter);
3752
5062
  }
5063
+ this.groupLimiterLastUsed.set(job.groupId, Date.now());
3753
5064
  if (limiter.pendingCount > 0) {
3754
5065
  this.log(`Job ${job.id} queued behind group ${job.groupId}`);
3755
5066
  }
@@ -3758,16 +5069,18 @@ var Consumer = class extends import_node_events.EventEmitter {
3758
5069
  });
3759
5070
  if (limiter.activeCount === 0 && limiter.pendingCount === 0) {
3760
5071
  this.groupLimiters.delete(job.groupId);
5072
+ this.groupLimiterLastUsed.delete(job.groupId);
3761
5073
  }
3762
5074
  }
3763
5075
  /**
3764
- * Handle a single job.
5076
+ * Delegates the actual processing to the worker and handles stats/logging.
3765
5077
  */
3766
5078
  async handleJob(job, worker) {
3767
5079
  const currentQueue = job.queueName || "default";
3768
5080
  const startTime = Date.now();
3769
5081
  this.log(`Processing job ${job.id} from ${currentQueue}`);
3770
5082
  this.emit("job:started", { job, queue: currentQueue });
5083
+ this.options.onEvent?.("job:started", { jobId: job.id, queue: currentQueue });
3771
5084
  if (this.options.monitor) {
3772
5085
  await this.publishLog("info", `Processing job: ${job.id}`, job.id);
3773
5086
  }
@@ -3776,14 +5089,32 @@ var Consumer = class extends import_node_events.EventEmitter {
3776
5089
  const duration = Date.now() - startTime;
3777
5090
  this.stats.processed++;
3778
5091
  this.emit("job:processed", { job, duration, queue: currentQueue });
5092
+ this.options.onEvent?.("job:processed", { jobId: job.id, duration, queue: currentQueue });
3779
5093
  this.log(`Completed job ${job.id} in ${duration}ms`);
3780
5094
  if (this.options.monitor) {
3781
5095
  await this.publishLog("success", `Completed job: ${job.id}`, job.id);
3782
5096
  }
5097
+ if (this.options.maxRequests && this.stats.processed >= this.options.maxRequests) {
5098
+ this.log(`Max requests reached: ${this.stats.processed}/${this.options.maxRequests}`);
5099
+ this.stopRequested = true;
5100
+ this.emit("max_requests_reached", {
5101
+ processed: this.stats.processed,
5102
+ maxRequests: this.options.maxRequests
5103
+ });
5104
+ if (this.options.monitor) {
5105
+ await this.publishLog("info", `Max requests reached: ${this.stats.processed}`, job.id);
5106
+ }
5107
+ }
3783
5108
  } catch (err) {
3784
5109
  const error = err;
3785
5110
  const duration = Date.now() - startTime;
3786
5111
  this.emit("job:failed", { job, error, duration, queue: currentQueue });
5112
+ this.options.onEvent?.("job:failed", {
5113
+ jobId: job.id,
5114
+ error: error.message,
5115
+ duration,
5116
+ queue: currentQueue
5117
+ });
3787
5118
  this.log(`Failed job ${job.id} in ${duration}ms`, { error: error.message });
3788
5119
  this.stats.failed++;
3789
5120
  if (this.options.monitor) {
@@ -3809,6 +5140,7 @@ var Consumer = class extends import_node_events.EventEmitter {
3809
5140
  }
3810
5141
  } else {
3811
5142
  this.emit("job:failed_permanently", { job, error });
5143
+ this.options.onEvent?.("job:failed_permanently", { jobId: job.id, error: error.message });
3812
5144
  this.log(`Job ${job.id} failed permanently`);
3813
5145
  await this.queueManager.fail(job, error).catch((dlqErr) => {
3814
5146
  console.error("[Consumer] Error moving job to DLQ:", dlqErr);
@@ -3866,6 +5198,48 @@ var Consumer = class extends import_node_events.EventEmitter {
3866
5198
  this.heartbeatTimer = null;
3867
5199
  }
3868
5200
  }
5201
+ /**
5202
+ * 清理閒置的 group limiters。
5203
+ *
5204
+ * 定期檢查並移除超過 TTL 且沒有 active/pending jobs 的 group limiters,
5205
+ * 避免記憶體洩漏。
5206
+ */
5207
+ cleanupGroupLimiters() {
5208
+ const now = Date.now();
5209
+ const groupsToDelete = [];
5210
+ for (const [groupId, lastUsed] of this.groupLimiterLastUsed.entries()) {
5211
+ const limiter = this.groupLimiters.get(groupId);
5212
+ if (!limiter) {
5213
+ groupsToDelete.push(groupId);
5214
+ continue;
5215
+ }
5216
+ if (now - lastUsed > _Consumer.GROUP_LIMITER_TTL && limiter.activeCount === 0 && limiter.pendingCount === 0) {
5217
+ this.groupLimiters.delete(groupId);
5218
+ groupsToDelete.push(groupId);
5219
+ this.log(`Cleaned up inactive group limiter: ${groupId}`);
5220
+ }
5221
+ }
5222
+ for (const groupId of groupsToDelete) {
5223
+ this.groupLimiterLastUsed.delete(groupId);
5224
+ }
5225
+ }
5226
+ /**
5227
+ * 啟動 group limiter 清理計時器。
5228
+ */
5229
+ startCleanupTimer() {
5230
+ this.cleanupTimer = setInterval(() => {
5231
+ this.cleanupGroupLimiters();
5232
+ }, 3e4);
5233
+ }
5234
+ /**
5235
+ * 停止 group limiter 清理計時器。
5236
+ */
5237
+ stopCleanupTimer() {
5238
+ if (this.cleanupTimer) {
5239
+ clearInterval(this.cleanupTimer);
5240
+ this.cleanupTimer = null;
5241
+ }
5242
+ }
3869
5243
  async publishLog(level, message, jobId) {
3870
5244
  try {
3871
5245
  const driver = this.queueManager.getDriver(this.connectionName);
@@ -3886,29 +5260,39 @@ var Consumer = class extends import_node_events.EventEmitter {
3886
5260
  }
3887
5261
  }
3888
5262
  /**
3889
- * Stop the consumer loop (graceful shutdown).
5263
+ * Gracefully stops the consumer.
5264
+ *
5265
+ * Signals the consumer to stop accepting new jobs and waits for currently running jobs
5266
+ * to complete.
5267
+ *
5268
+ * @returns A promise that resolves when the consumer has fully stopped.
3890
5269
  */
3891
5270
  async stop() {
3892
5271
  this.log("Stopping...");
3893
5272
  this.stopRequested = true;
5273
+ this.stopCleanupTimer();
3894
5274
  while (this.running) {
3895
- await new Promise((resolve) => setTimeout(resolve, 100));
5275
+ await new Promise((resolve2) => setTimeout(resolve2, 100));
3896
5276
  }
3897
5277
  }
3898
5278
  /**
3899
- * Check whether the consumer is running.
5279
+ * Checks if the consumer is currently active.
5280
+ *
5281
+ * @returns True if the consumer loop is running.
3900
5282
  */
3901
5283
  isRunning() {
3902
5284
  return this.running;
3903
5285
  }
3904
5286
  /**
3905
- * Get current consumer statistics.
5287
+ * Retrieves current operational statistics.
5288
+ *
5289
+ * @returns An object containing processed, failed, retried, and active job counts.
3906
5290
  */
3907
5291
  getStats() {
3908
5292
  return { ...this.stats };
3909
5293
  }
3910
5294
  /**
3911
- * Reset statistics counters.
5295
+ * Resets the internal statistics counters.
3912
5296
  */
3913
5297
  resetStats() {
3914
5298
  this.stats.processed = 0;
@@ -3918,7 +5302,165 @@ var Consumer = class extends import_node_events.EventEmitter {
3918
5302
  };
3919
5303
 
3920
5304
  // src/index.ts
5305
+ init_BullMQDriver();
3921
5306
  init_DatabaseDriver();
5307
+
5308
+ // src/drivers/GrpcDriver.ts
5309
+ var import_node_path2 = __toESM(require("path"), 1);
5310
+ var grpc = __toESM(require("@grpc/grpc-js"), 1);
5311
+ var protoLoader = __toESM(require("@grpc/proto-loader"), 1);
5312
+ var GrpcDriver = class {
5313
+ client;
5314
+ constructor(config) {
5315
+ const protoPath = config.protoPath || import_node_path2.default.resolve(__dirname, "../../proto/queue.proto");
5316
+ const packageDefinition = protoLoader.loadSync(protoPath, {
5317
+ keepCase: true,
5318
+ longs: String,
5319
+ enums: String,
5320
+ defaults: true,
5321
+ oneofs: true
5322
+ });
5323
+ const packageName = config.packageName || "stream";
5324
+ const serviceName = config.serviceName || "QueueService";
5325
+ const pkg = packageDefinition[packageName];
5326
+ if (!pkg) {
5327
+ throw new Error(`Package '${packageName}' not found in proto definition at ${protoPath}`);
5328
+ }
5329
+ const Service = pkg[serviceName];
5330
+ if (!Service) {
5331
+ throw new Error(`Service '${serviceName}' not found in package '${packageName}'`);
5332
+ }
5333
+ const credentials2 = this.getCredentials(config);
5334
+ this.client = new Service(config.url, credentials2);
5335
+ }
5336
+ getCredentials(config) {
5337
+ if (config.credentials) {
5338
+ if (config.credentials.rootCerts) {
5339
+ return grpc.credentials.createSsl(
5340
+ config.credentials.rootCerts,
5341
+ config.credentials.privateKey,
5342
+ config.credentials.certChain
5343
+ );
5344
+ }
5345
+ }
5346
+ return grpc.credentials.createInsecure();
5347
+ }
5348
+ async push(queue, job, options) {
5349
+ const req = {
5350
+ queue,
5351
+ job: this.toProtoJob(job),
5352
+ options: {
5353
+ groupId: options?.groupId,
5354
+ priority: String(options?.priority || "")
5355
+ }
5356
+ };
5357
+ return new Promise((resolve2, reject) => {
5358
+ ;
5359
+ this.client.Push(req, (err, response) => {
5360
+ if (err) {
5361
+ return reject(err);
5362
+ }
5363
+ if (!response.success) {
5364
+ return reject(new Error(response.message || "Unknown gRPC error"));
5365
+ }
5366
+ resolve2();
5367
+ });
5368
+ });
5369
+ }
5370
+ async pop(queue) {
5371
+ return new Promise((resolve2, reject) => {
5372
+ ;
5373
+ this.client.Pull({ queue }, (err, response) => {
5374
+ if (err) {
5375
+ return reject(err);
5376
+ }
5377
+ if (!response.job || !response.job.id) {
5378
+ return resolve2(null);
5379
+ }
5380
+ resolve2(this.fromProtoJob(response.job));
5381
+ });
5382
+ });
5383
+ }
5384
+ async size(queue) {
5385
+ return new Promise((resolve2, reject) => {
5386
+ ;
5387
+ this.client.Size({ queue }, (err, response) => {
5388
+ if (err) {
5389
+ return reject(err);
5390
+ }
5391
+ resolve2(response.size || 0);
5392
+ });
5393
+ });
5394
+ }
5395
+ async clear(queue) {
5396
+ return new Promise((resolve2, reject) => {
5397
+ ;
5398
+ this.client.Clear({ queue }, (err) => {
5399
+ if (err) {
5400
+ return reject(err);
5401
+ }
5402
+ resolve2();
5403
+ });
5404
+ });
5405
+ }
5406
+ async acknowledge(messageId) {
5407
+ return new Promise((resolve2, reject) => {
5408
+ ;
5409
+ this.client.Acknowledge({ jobId: messageId }, (err) => {
5410
+ if (err) {
5411
+ return reject(err);
5412
+ }
5413
+ resolve2();
5414
+ });
5415
+ });
5416
+ }
5417
+ async stats(queue) {
5418
+ return new Promise((resolve2, reject) => {
5419
+ ;
5420
+ this.client.Stats({ queue }, (err, response) => {
5421
+ if (err) {
5422
+ return reject(err);
5423
+ }
5424
+ resolve2({
5425
+ queue: response.queue,
5426
+ size: response.size,
5427
+ delayed: response.delayed,
5428
+ failed: response.failed,
5429
+ reserved: response.reserved
5430
+ });
5431
+ });
5432
+ });
5433
+ }
5434
+ toProtoJob(job) {
5435
+ return {
5436
+ ...job,
5437
+ priority: job.priority ? String(job.priority) : void 0,
5438
+ createdAt: String(job.createdAt),
5439
+ // Long as string
5440
+ failedAt: job.failedAt ? String(job.failedAt) : void 0
5441
+ };
5442
+ }
5443
+ fromProtoJob(protoJob) {
5444
+ return {
5445
+ id: protoJob.id,
5446
+ type: protoJob.type,
5447
+ data: protoJob.data,
5448
+ className: protoJob.className,
5449
+ createdAt: Number(protoJob.createdAt),
5450
+ delaySeconds: protoJob.delaySeconds,
5451
+ attempts: protoJob.attempts,
5452
+ maxAttempts: protoJob.maxAttempts,
5453
+ groupId: protoJob.groupId,
5454
+ priority: protoJob.priority,
5455
+ failedAt: protoJob.failedAt ? Number(protoJob.failedAt) : void 0,
5456
+ error: protoJob.error,
5457
+ retryAfterSeconds: protoJob.retryAfterSeconds,
5458
+ retryMultiplier: protoJob.retryMultiplier
5459
+ };
5460
+ }
5461
+ };
5462
+
5463
+ // src/index.ts
3922
5464
  init_KafkaDriver();
3923
5465
 
3924
5466
  // src/drivers/MemoryDriver.ts
@@ -3929,7 +5471,11 @@ var MemoryDriver = class {
3929
5471
  this.maxSize = config.maxSize ?? Infinity;
3930
5472
  }
3931
5473
  /**
3932
- * Push a job to a queue.
5474
+ * Pushes a job to the in-memory queue.
5475
+ *
5476
+ * @param queue - The queue name.
5477
+ * @param job - The serialized job.
5478
+ * @throws {Error} If the queue has reached `maxSize`.
3933
5479
  */
3934
5480
  async push(queue, job) {
3935
5481
  if (!this.queues.has(queue)) {
@@ -3942,7 +5488,12 @@ var MemoryDriver = class {
3942
5488
  q.push(job);
3943
5489
  }
3944
5490
  /**
3945
- * Pop a job from a queue (FIFO).
5491
+ * Pops the next available job from the queue.
5492
+ *
5493
+ * Respects `delaySeconds` by checking the job's `createdAt` timestamp.
5494
+ *
5495
+ * @param queue - The queue name.
5496
+ * @returns The job or `null`.
3946
5497
  */
3947
5498
  async pop(queue) {
3948
5499
  const queueJobs = this.queues.get(queue);
@@ -3959,19 +5510,28 @@ var MemoryDriver = class {
3959
5510
  return queueJobs.splice(availableIndex, 1)[0];
3960
5511
  }
3961
5512
  /**
3962
- * Get queue size.
5513
+ * Returns the number of jobs in the queue.
5514
+ *
5515
+ * @param queue - The queue name.
3963
5516
  */
3964
5517
  async size(queue) {
3965
5518
  return this.queues.get(queue)?.length ?? 0;
3966
5519
  }
3967
5520
  /**
3968
- * Clear a queue.
5521
+ * Clears all jobs from the queue.
5522
+ *
5523
+ * @param queue - The queue name.
3969
5524
  */
3970
5525
  async clear(queue) {
3971
5526
  this.queues.delete(queue);
3972
5527
  }
3973
5528
  /**
3974
- * Mark a job as permanently failed.
5529
+ * Moves a job to the failed (DLQ) list.
5530
+ *
5531
+ * In MemoryDriver, this simply pushes to a `failed:{queue}` list.
5532
+ *
5533
+ * @param queue - The original queue name.
5534
+ * @param job - The failed job.
3975
5535
  */
3976
5536
  async fail(queue, job) {
3977
5537
  const failedQueue = `failed:${queue}`;
@@ -3981,7 +5541,11 @@ var MemoryDriver = class {
3981
5541
  this.queues.get(failedQueue)?.push(job);
3982
5542
  }
3983
5543
  /**
3984
- * Get queue statistics.
5544
+ * Retrieves statistics for the queue.
5545
+ *
5546
+ * Calculates pending, delayed, and failed counts by iterating through the list.
5547
+ *
5548
+ * @param queue - The queue name.
3985
5549
  */
3986
5550
  async stats(queue) {
3987
5551
  const jobs = this.queues.get(queue) || [];
@@ -4004,7 +5568,10 @@ var MemoryDriver = class {
4004
5568
  };
4005
5569
  }
4006
5570
  /**
4007
- * Push multiple jobs.
5571
+ * Pushes multiple jobs to the queue.
5572
+ *
5573
+ * @param queue - The queue name.
5574
+ * @param jobs - Array of jobs.
4008
5575
  */
4009
5576
  async pushMany(queue, jobs) {
4010
5577
  if (!this.queues.has(queue)) {
@@ -4013,7 +5580,10 @@ var MemoryDriver = class {
4013
5580
  this.queues.get(queue)?.push(...jobs);
4014
5581
  }
4015
5582
  /**
4016
- * Pop multiple jobs.
5583
+ * Pops multiple jobs from the queue.
5584
+ *
5585
+ * @param queue - The queue name.
5586
+ * @param count - Max jobs to pop.
4017
5587
  */
4018
5588
  async popMany(queue, count) {
4019
5589
  const results = [];
@@ -4027,6 +5597,12 @@ var MemoryDriver = class {
4027
5597
  }
4028
5598
  return results;
4029
5599
  }
5600
+ /**
5601
+ * Lists all active queues in memory.
5602
+ */
5603
+ async getQueues() {
5604
+ return Array.from(this.queues.keys()).filter((q) => !q.startsWith("failed:")).sort();
5605
+ }
4030
5606
  };
4031
5607
 
4032
5608
  // src/index.ts
@@ -4037,78 +5613,128 @@ init_SQSDriver();
4037
5613
  // src/Job.ts
4038
5614
  var Job = class {
4039
5615
  /**
4040
- * Unique job identifier.
5616
+ * Unique identifier for the job instance.
5617
+ *
5618
+ * Assigned automatically when the job is pushed to the queue.
4041
5619
  */
4042
5620
  id;
4043
5621
  /**
4044
- * Queue name.
5622
+ * The name of the queue where this job will be processed.
4045
5623
  */
4046
5624
  queueName;
4047
5625
  /**
4048
- * Connection name.
5626
+ * The name of the connection used to transport this job.
4049
5627
  */
4050
5628
  connectionName;
4051
5629
  /**
4052
- * Delay before execution (seconds).
5630
+ * Delay in seconds before the job becomes available for processing.
4053
5631
  */
4054
5632
  delaySeconds;
4055
5633
  /**
4056
- * Current attempt number.
5634
+ * The current attempt number (starts at 1).
4057
5635
  */
4058
5636
  attempts;
4059
5637
  /**
4060
- * Maximum attempts.
5638
+ * The maximum number of retry attempts allowed.
5639
+ *
5640
+ * Can be overridden by the worker configuration or per-job using `maxAttempts`.
4061
5641
  */
4062
5642
  maxAttempts;
4063
5643
  /**
4064
- * Group ID for FIFO.
5644
+ * Group ID for sequential processing.
5645
+ *
5646
+ * Jobs with the same `groupId` will be processed in strict order (FIFO)
5647
+ * if the consumer supports it.
4065
5648
  */
4066
5649
  groupId;
4067
5650
  /**
4068
- * Job priority.
5651
+ * Priority level of the job.
4069
5652
  */
4070
5653
  priority;
4071
5654
  /**
4072
- * Initial retry delay (seconds).
5655
+ * Initial delay in seconds before the first retry attempt.
5656
+ *
5657
+ * Used for exponential backoff calculation.
4073
5658
  */
4074
5659
  retryAfterSeconds;
4075
5660
  /**
4076
- * Retry delay multiplier.
5661
+ * Multiplier applied to the retry delay for each subsequent attempt.
5662
+ *
5663
+ * Used for exponential backoff calculation.
4077
5664
  */
4078
5665
  retryMultiplier;
4079
5666
  /**
4080
- * Set target queue.
5667
+ * Sets the target queue for the job.
5668
+ *
5669
+ * @param queue - The name of the target queue.
5670
+ * @returns The job instance for chaining.
5671
+ *
5672
+ * @example
5673
+ * ```typescript
5674
+ * job.onQueue('billing');
5675
+ * ```
4081
5676
  */
4082
5677
  onQueue(queue) {
4083
5678
  this.queueName = queue;
4084
5679
  return this;
4085
5680
  }
4086
5681
  /**
4087
- * Set target connection.
5682
+ * Sets the target connection for the job.
5683
+ *
5684
+ * @param connection - The name of the connection (e.g., 'redis').
5685
+ * @returns The job instance for chaining.
5686
+ *
5687
+ * @example
5688
+ * ```typescript
5689
+ * job.onConnection('sqs-primary');
5690
+ * ```
4088
5691
  */
4089
5692
  onConnection(connection) {
4090
5693
  this.connectionName = connection;
4091
5694
  return this;
4092
5695
  }
4093
5696
  /**
4094
- * Set job priority.
4095
- * @param priority - 'high', 'low', or number
5697
+ * Sets the priority of the job.
5698
+ *
5699
+ * @param priority - The priority level (e.g., 'high', 10).
5700
+ * @returns The job instance for chaining.
5701
+ *
5702
+ * @example
5703
+ * ```typescript
5704
+ * job.withPriority('high');
5705
+ * ```
4096
5706
  */
4097
5707
  withPriority(priority) {
4098
5708
  this.priority = priority;
4099
5709
  return this;
4100
5710
  }
4101
5711
  /**
4102
- * Set delay (seconds).
5712
+ * Delays the job execution.
5713
+ *
5714
+ * @param delay - Delay in seconds.
5715
+ * @returns The job instance for chaining.
5716
+ *
5717
+ * @example
5718
+ * ```typescript
5719
+ * job.delay(60); // Run after 1 minute
5720
+ * ```
4103
5721
  */
4104
5722
  delay(delay) {
4105
5723
  this.delaySeconds = delay;
4106
5724
  return this;
4107
5725
  }
4108
5726
  /**
4109
- * Set retry backoff strategy.
4110
- * @param seconds - Initial delay in seconds
4111
- * @param multiplier - Multiplier for each subsequent attempt (default: 2)
5727
+ * Configures the exponential backoff strategy for retries.
5728
+ *
5729
+ * @param seconds - Initial delay in seconds before the first retry.
5730
+ * @param multiplier - Factor by which the delay increases for each subsequent attempt (default: 2).
5731
+ * @returns The job instance for chaining.
5732
+ *
5733
+ * @example
5734
+ * ```typescript
5735
+ * // Wait 5s, then 10s, then 20s...
5736
+ * job.backoff(5, 2);
5737
+ * ```
4112
5738
  */
4113
5739
  backoff(seconds, multiplier = 2) {
4114
5740
  this.retryAfterSeconds = seconds;
@@ -4116,9 +5742,17 @@ var Job = class {
4116
5742
  return this;
4117
5743
  }
4118
5744
  /**
4119
- * Calculate retry delay for the next attempt.
4120
- * @param attempt - Current attempt number (1-based)
4121
- * @returns Delay in milliseconds
5745
+ * Calculates the delay for the next retry attempt based on the backoff strategy.
5746
+ *
5747
+ * Uses the formula: `initialDelay * multiplier^(attempt - 1)`, capped at 1 hour.
5748
+ *
5749
+ * @param attempt - The current attempt number (1-based).
5750
+ * @returns The calculated delay in milliseconds.
5751
+ *
5752
+ * @example
5753
+ * ```typescript
5754
+ * const nextDelay = job.getRetryDelay(2);
5755
+ * ```
4122
5756
  */
4123
5757
  getRetryDelay(attempt) {
4124
5758
  const initialDelay = (this.retryAfterSeconds ?? 1) * 1e3;
@@ -4126,28 +5760,40 @@ var Job = class {
4126
5760
  return Math.min(initialDelay * multiplier ** (attempt - 1), 36e5);
4127
5761
  }
4128
5762
  /**
4129
- * Failure handler (optional).
5763
+ * Optional handler for when the job has permanently failed.
5764
+ *
5765
+ * Called when the job has exhausted all retry attempts.
5766
+ * Useful for cleaning up resources, sending alerts, or logging.
4130
5767
  *
4131
- * Called when the job fails and reaches the maximum number of attempts.
4132
- * Subclasses can override to implement custom failure handling.
5768
+ * @param _error - The error that caused the final failure.
4133
5769
  *
4134
- * @param error - Error instance
5770
+ * @example
5771
+ * ```typescript
5772
+ * async failed(error: Error) {
5773
+ * await notifyAdmin(`Job failed: ${error.message}`);
5774
+ * }
5775
+ * ```
4135
5776
  */
4136
5777
  async failed(_error) {
4137
5778
  }
4138
5779
  };
4139
5780
 
5781
+ // src/index.ts
5782
+ init_DistributedLock();
5783
+
4140
5784
  // src/serializers/CachedSerializer.ts
4141
5785
  var CachedSerializer = class {
4142
5786
  /**
4143
- * @param delegate - The actual serializer to use for the first serialization
5787
+ * @param delegate - The underlying serializer to use.
4144
5788
  */
4145
5789
  constructor(delegate) {
4146
5790
  this.delegate = delegate;
4147
5791
  }
4148
5792
  cache = /* @__PURE__ */ new WeakMap();
4149
5793
  /**
4150
- * Serialize a job with caching.
5794
+ * Serializes the job, returning a cached result if available.
5795
+ *
5796
+ * @param job - The job to serialize.
4151
5797
  */
4152
5798
  serialize(job) {
4153
5799
  if (this.cache.has(job)) {
@@ -4158,8 +5804,9 @@ var CachedSerializer = class {
4158
5804
  return serialized;
4159
5805
  }
4160
5806
  /**
4161
- * Deserialize a job.
4162
- * No caching for deserialization as we get new objects each time from the driver.
5807
+ * Deserializes a job.
5808
+ *
5809
+ * Caching is not applied here as deserialization always produces new instances.
4163
5810
  */
4164
5811
  deserialize(serialized) {
4165
5812
  return this.delegate.deserialize(serialized);
@@ -4169,19 +5816,21 @@ var CachedSerializer = class {
4169
5816
  // src/serializers/ClassNameSerializer.ts
4170
5817
  var ClassNameSerializer = class {
4171
5818
  /**
4172
- * Job class registry (for resolving classes by name).
5819
+ * Registry of job classes, mapped by class name.
4173
5820
  */
4174
5821
  jobClasses = /* @__PURE__ */ new Map();
4175
5822
  /**
4176
- * Register a Job class.
4177
- * @param jobClass - Job class
5823
+ * Registers a Job class for serialization.
5824
+ *
5825
+ * @param jobClass - The job class constructor.
4178
5826
  */
4179
5827
  register(jobClass) {
4180
5828
  this.jobClasses.set(jobClass.name, jobClass);
4181
5829
  }
4182
5830
  /**
4183
- * Register multiple Job classes.
4184
- * @param jobClasses - Job class array
5831
+ * Registers multiple Job classes at once.
5832
+ *
5833
+ * @param jobClasses - An array of job class constructors.
4185
5834
  */
4186
5835
  registerMany(jobClasses) {
4187
5836
  for (const jobClass of jobClasses) {
@@ -4189,7 +5838,11 @@ var ClassNameSerializer = class {
4189
5838
  }
4190
5839
  }
4191
5840
  /**
4192
- * Serialize a Job.
5841
+ * Serializes a Job instance.
5842
+ *
5843
+ * Captures the class name and all enumerable properties.
5844
+ *
5845
+ * @param job - The job to serialize.
4193
5846
  */
4194
5847
  serialize(job) {
4195
5848
  const id = job.id || `${Date.now()}-${crypto.randomUUID()}`;
@@ -4216,7 +5869,12 @@ var ClassNameSerializer = class {
4216
5869
  };
4217
5870
  }
4218
5871
  /**
4219
- * Deserialize a Job.
5872
+ * Deserializes a Job instance.
5873
+ *
5874
+ * Instantiates the class matching `className` and assigns properties.
5875
+ *
5876
+ * @param serialized - The serialized job.
5877
+ * @throws {Error} If the job class is not registered.
4220
5878
  */
4221
5879
  deserialize(serialized) {
4222
5880
  if (serialized.type !== "class") {
@@ -4265,7 +5923,7 @@ var ClassNameSerializer = class {
4265
5923
  // src/serializers/JsonSerializer.ts
4266
5924
  var JsonSerializer = class {
4267
5925
  /**
4268
- * Serialize a job.
5926
+ * Serializes a job to a JSON object.
4269
5927
  */
4270
5928
  serialize(job) {
4271
5929
  const id = job.id || `${Date.now()}-${crypto.randomUUID()}`;
@@ -4288,7 +5946,9 @@ var JsonSerializer = class {
4288
5946
  };
4289
5947
  }
4290
5948
  /**
4291
- * Deserialize a job.
5949
+ * Deserializes a JSON object into a basic Job-like object.
5950
+ *
5951
+ * Note: The result is NOT an instance of the original Job class.
4292
5952
  */
4293
5953
  deserialize(serialized) {
4294
5954
  if (serialized.type !== "json") {
@@ -4370,9 +6030,18 @@ var QueueManager = class {
4370
6030
  }
4371
6031
  }
4372
6032
  /**
4373
- * Register a connection.
4374
- * @param name - Connection name
4375
- * @param config - Connection config
6033
+ * Registers a new queue connection with the manager.
6034
+ *
6035
+ * Dynamically loads the required driver implementation based on the configuration.
6036
+ *
6037
+ * @param name - The name of the connection (e.g., 'primary').
6038
+ * @param config - The configuration object for the driver.
6039
+ * @throws {Error} If the driver type is missing required dependencies or unsupported.
6040
+ *
6041
+ * @example
6042
+ * ```typescript
6043
+ * manager.registerConnection('analytics', { driver: 'sqs', client: sqs });
6044
+ * ```
4376
6045
  */
4377
6046
  registerConnection(name, config) {
4378
6047
  const driverType = config.driver;
@@ -4463,16 +6132,41 @@ var QueueManager = class {
4463
6132
  );
4464
6133
  break;
4465
6134
  }
6135
+ case "bullmq": {
6136
+ const { BullMQDriver: BullMQDriver2 } = (init_BullMQDriver(), __toCommonJS(BullMQDriver_exports));
6137
+ if (!config.queue) {
6138
+ throw new Error(
6139
+ "[QueueManager] BullMQDriver requires queue. Please provide Bull Queue instance in connection config."
6140
+ );
6141
+ }
6142
+ this.drivers.set(
6143
+ name,
6144
+ new BullMQDriver2({
6145
+ queue: config.queue,
6146
+ worker: config.worker,
6147
+ prefix: config.prefix,
6148
+ debug: config.debug
6149
+ })
6150
+ );
6151
+ break;
6152
+ }
4466
6153
  default:
4467
6154
  throw new Error(
4468
- `Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq`
6155
+ `Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq, bullmq`
4469
6156
  );
4470
6157
  }
4471
6158
  }
4472
6159
  /**
4473
- * Get a driver for a connection.
4474
- * @param connection - Connection name
4475
- * @returns Driver instance
6160
+ * Retrieves the driver instance for a specific connection.
6161
+ *
6162
+ * @param connection - The name of the connection.
6163
+ * @returns The configured QueueDriver instance.
6164
+ * @throws {Error} If the connection has not been registered.
6165
+ *
6166
+ * @example
6167
+ * ```typescript
6168
+ * const driver = manager.getDriver('redis');
6169
+ * ```
4476
6170
  */
4477
6171
  getDriver(connection) {
4478
6172
  const driver = this.drivers.get(connection);
@@ -4482,16 +6176,19 @@ var QueueManager = class {
4482
6176
  return driver;
4483
6177
  }
4484
6178
  /**
4485
- * Get the default connection name.
4486
- * @returns Default connection name
6179
+ * Gets the name of the default connection.
6180
+ *
6181
+ * @returns The default connection name.
4487
6182
  */
4488
6183
  getDefaultConnection() {
4489
6184
  return this.defaultConnection;
4490
6185
  }
4491
6186
  /**
4492
- * Get a serializer.
4493
- * @param type - Serializer type
4494
- * @returns Serializer instance
6187
+ * Retrieves a serializer instance by type.
6188
+ *
6189
+ * @param type - The serializer type (e.g., 'json', 'class'). If omitted, returns the default serializer.
6190
+ * @returns The JobSerializer instance.
6191
+ * @throws {Error} If the requested serializer type is not found.
4495
6192
  */
4496
6193
  getSerializer(type) {
4497
6194
  if (type) {
@@ -4504,8 +6201,17 @@ var QueueManager = class {
4504
6201
  return this.defaultSerializer;
4505
6202
  }
4506
6203
  /**
4507
- * Register Job classes (used by ClassNameSerializer).
4508
- * @param jobClasses - Job class array
6204
+ * Registers Job classes for the `ClassNameSerializer`.
6205
+ *
6206
+ * This is required when using 'class' serialization to allow proper hydration of job instances
6207
+ * upon deserialization.
6208
+ *
6209
+ * @param jobClasses - An array of Job class constructors.
6210
+ *
6211
+ * @example
6212
+ * ```typescript
6213
+ * manager.registerJobClasses([SendEmailJob, ProcessOrderJob]);
6214
+ * ```
4509
6215
  */
4510
6216
  registerJobClasses(jobClasses) {
4511
6217
  if (this.defaultSerializer instanceof ClassNameSerializer) {
@@ -4513,12 +6219,15 @@ var QueueManager = class {
4513
6219
  }
4514
6220
  }
4515
6221
  /**
4516
- * Push a Job to the queue.
6222
+ * Pushes a single job to the queue.
6223
+ *
6224
+ * Serializes the job, selects the appropriate driver based on job configuration,
6225
+ * and dispatches it. Also handles audit logging if persistence is enabled.
4517
6226
  *
4518
- * @template T - The type of the job.
4519
- * @param job - Job instance to push.
4520
- * @param options - Push options.
4521
- * @returns The same job instance (for fluent chaining).
6227
+ * @template T - The type of the job (extends Job).
6228
+ * @param job - The job instance to enqueue.
6229
+ * @param options - Optional overrides for push behavior (priority, delay, etc.).
6230
+ * @returns The same job instance (for chaining).
4522
6231
  *
4523
6232
  * @example
4524
6233
  * ```typescript
@@ -4549,15 +6258,19 @@ var QueueManager = class {
4549
6258
  return job;
4550
6259
  }
4551
6260
  /**
4552
- * Push multiple jobs to the queue.
6261
+ * Pushes multiple jobs to the queue in a batch.
6262
+ *
6263
+ * Optimizes network requests by batching jobs where possible. Groups jobs by connection
6264
+ * and queue to maximize throughput.
4553
6265
  *
4554
6266
  * @template T - The type of the jobs.
4555
- * @param jobs - Array of job instances.
4556
- * @param options - Bulk push options.
6267
+ * @param jobs - An array of job instances to enqueue.
6268
+ * @param options - Configuration for batch size and concurrency.
6269
+ * @returns A promise that resolves when all jobs have been pushed.
4557
6270
  *
4558
6271
  * @example
4559
6272
  * ```typescript
4560
- * await manager.pushMany(jobs, { batchSize: 500, concurrency: 2 });
6273
+ * await manager.pushMany(jobs, { batchSize: 500, concurrency: 5 });
4561
6274
  * ```
4562
6275
  */
4563
6276
  async pushMany(jobs, options = {}) {
@@ -4619,15 +6332,17 @@ var QueueManager = class {
4619
6332
  }
4620
6333
  }
4621
6334
  /**
4622
- * Pop a job from the queue.
6335
+ * Pops a single job from the queue.
4623
6336
  *
4624
- * @param queue - Queue name (default: 'default').
4625
- * @param connection - Connection name (optional).
4626
- * @returns Job instance or null if queue is empty.
6337
+ * Retrieves the next available job from the specified queue.
6338
+ *
6339
+ * @param queue - The queue name (default: 'default').
6340
+ * @param connection - The connection name (defaults to default connection).
6341
+ * @returns A Job instance if found, or `null` if the queue is empty.
4627
6342
  *
4628
6343
  * @example
4629
6344
  * ```typescript
4630
- * const job = await manager.pop('emails');
6345
+ * const job = await manager.pop('priority-queue');
4631
6346
  * if (job) await job.handle();
4632
6347
  * ```
4633
6348
  */
@@ -4647,12 +6362,20 @@ var QueueManager = class {
4647
6362
  }
4648
6363
  }
4649
6364
  /**
4650
- * Pop multiple jobs from the queue.
6365
+ * Pops multiple jobs from the queue efficiently.
6366
+ *
6367
+ * Attempts to retrieve a batch of jobs from the driver. If the driver does not support
6368
+ * batching, it falls back to sequential popping.
6369
+ *
6370
+ * @param queue - The queue name (default: 'default').
6371
+ * @param count - The maximum number of jobs to retrieve (default: 10).
6372
+ * @param connection - The connection name.
6373
+ * @returns An array of Job instances.
4651
6374
  *
4652
- * @param queue - Queue name (default: 'default').
4653
- * @param count - Number of jobs to pop (default: 10).
4654
- * @param connection - Connection name (optional).
4655
- * @returns Array of Job instances.
6375
+ * @example
6376
+ * ```typescript
6377
+ * const jobs = await manager.popMany('default', 50);
6378
+ * ```
4656
6379
  */
4657
6380
  async popMany(queue = "default", count = 10, connection = this.defaultConnection) {
4658
6381
  const driver = this.getDriver(connection);
@@ -4683,22 +6406,37 @@ var QueueManager = class {
4683
6406
  return results;
4684
6407
  }
4685
6408
  /**
4686
- * Get queue size.
6409
+ * Retrieves the current size of a queue.
4687
6410
  *
4688
- * @param queue - Queue name (default: 'default').
4689
- * @param connection - Connection name (optional).
4690
- * @returns Number of jobs in the queue.
6411
+ * @param queue - The queue name (default: 'default').
6412
+ * @param connection - The connection name.
6413
+ * @returns The number of waiting jobs.
6414
+ *
6415
+ * @example
6416
+ * ```typescript
6417
+ * const count = await manager.size('emails');
6418
+ * ```
4691
6419
  */
4692
6420
  async size(queue = "default", connection = this.defaultConnection) {
4693
6421
  const driver = this.getDriver(connection);
4694
6422
  return driver.size(queue);
4695
6423
  }
4696
6424
  /**
4697
- * Pop a job from the queue (blocking).
6425
+ * Pops a job from the queue with blocking (wait) behavior.
6426
+ *
6427
+ * Waits for a job to become available for the specified timeout duration.
6428
+ * Useful for reducing polling loop frequency.
4698
6429
  *
4699
- * @param queue - Queue name (default: 'default').
4700
- * @param timeout - Timeout in seconds (default: 0, wait forever).
4701
- * @param connection - Connection name (optional).
6430
+ * @param queues - A queue name or array of queue names to listen to.
6431
+ * @param timeout - Timeout in seconds (0 = block indefinitely).
6432
+ * @param connection - The connection name.
6433
+ * @returns A Job instance if found, or `null` if timed out.
6434
+ *
6435
+ * @example
6436
+ * ```typescript
6437
+ * // Wait up to 30 seconds for a job
6438
+ * const job = await manager.popBlocking('default', 30);
6439
+ * ```
4702
6440
  */
4703
6441
  async popBlocking(queues = "default", timeout = 0, connection = this.defaultConnection) {
4704
6442
  const driver = this.getDriver(connection);
@@ -4723,21 +6461,34 @@ var QueueManager = class {
4723
6461
  }
4724
6462
  }
4725
6463
  /**
4726
- * Clear all jobs from a queue.
6464
+ * Removes all jobs from a specific queue.
6465
+ *
6466
+ * @param queue - The queue name to purge.
6467
+ * @param connection - The connection name.
4727
6468
  *
4728
- * @param queue - Queue name (default: 'default').
4729
- * @param connection - Connection name (optional).
6469
+ * @example
6470
+ * ```typescript
6471
+ * await manager.clear('test-queue');
6472
+ * ```
4730
6473
  */
4731
6474
  async clear(queue = "default", connection = this.defaultConnection) {
4732
6475
  const driver = this.getDriver(connection);
4733
6476
  await driver.clear(queue);
4734
6477
  }
4735
6478
  /**
4736
- * Get queue statistics including size, delayed, and failed job counts.
6479
+ * Retrieves comprehensive statistics for a queue.
4737
6480
  *
4738
- * @param queue - Queue name (default: 'default').
4739
- * @param connection - Connection name (optional).
4740
- * @returns Queue statistics object.
6481
+ * Includes counts for pending, processing, delayed, and failed jobs.
6482
+ *
6483
+ * @param queue - The queue name.
6484
+ * @param connection - The connection name.
6485
+ * @returns A QueueStats object.
6486
+ *
6487
+ * @example
6488
+ * ```typescript
6489
+ * const stats = await manager.stats('default');
6490
+ * console.log(stats.size, stats.failed);
6491
+ * ```
4741
6492
  */
4742
6493
  async stats(queue = "default", connection = this.defaultConnection) {
4743
6494
  const driver = this.getDriver(connection);
@@ -4750,8 +6501,16 @@ var QueueManager = class {
4750
6501
  };
4751
6502
  }
4752
6503
  /**
4753
- * Mark a job as completed.
4754
- * @param job - Job instance
6504
+ * Marks a job as successfully completed.
6505
+ *
6506
+ * Removes the job from the processing state and optionally archives it.
6507
+ *
6508
+ * @param job - The job instance that finished.
6509
+ *
6510
+ * @example
6511
+ * ```typescript
6512
+ * await manager.complete(job);
6513
+ * ```
4755
6514
  */
4756
6515
  async complete(job) {
4757
6516
  const connection = job.connectionName ?? this.defaultConnection;
@@ -4770,9 +6529,18 @@ var QueueManager = class {
4770
6529
  }
4771
6530
  }
4772
6531
  /**
4773
- * Mark a job as permanently failed.
4774
- * @param job - Job instance
4775
- * @param error - Error object
6532
+ * Marks a job as failed.
6533
+ *
6534
+ * Moves the job to the failed state (Dead Letter Queue) and optionally archives it.
6535
+ * This is typically called after max retry attempts are exhausted.
6536
+ *
6537
+ * @param job - The job instance that failed.
6538
+ * @param error - The error that caused the failure.
6539
+ *
6540
+ * @example
6541
+ * ```typescript
6542
+ * await manager.fail(job, new Error('Something went wrong'));
6543
+ * ```
4776
6544
  */
4777
6545
  async fail(job, error) {
4778
6546
  const connection = job.connectionName ?? this.defaultConnection;
@@ -4793,13 +6561,19 @@ var QueueManager = class {
4793
6561
  }
4794
6562
  }
4795
6563
  /**
4796
- * Get the persistence adapter if configured.
6564
+ * Retrieves the configured persistence adapter.
6565
+ *
6566
+ * @returns The PersistenceAdapter instance, or undefined if not configured.
4797
6567
  */
4798
6568
  getPersistence() {
4799
6569
  return this.persistence?.adapter;
4800
6570
  }
4801
6571
  /**
4802
- * Get the scheduler if configured.
6572
+ * Gets the Scheduler instance associated with this manager.
6573
+ *
6574
+ * The Scheduler handles delayed jobs and periodic tasks.
6575
+ *
6576
+ * @returns The Scheduler instance.
4803
6577
  */
4804
6578
  getScheduler() {
4805
6579
  if (!this.scheduler) {
@@ -4809,7 +6583,18 @@ var QueueManager = class {
4809
6583
  return this.scheduler;
4810
6584
  }
4811
6585
  /**
4812
- * Get failed jobs from DLQ (if driver supports it).
6586
+ * Retrieves failed jobs from the Dead Letter Queue.
6587
+ *
6588
+ * @param queue - The queue name.
6589
+ * @param start - The starting index (pagination).
6590
+ * @param end - The ending index (pagination).
6591
+ * @param connection - The connection name.
6592
+ * @returns An array of serialized jobs.
6593
+ *
6594
+ * @example
6595
+ * ```typescript
6596
+ * const failedJobs = await manager.getFailed('default', 0, 10);
6597
+ * ```
4813
6598
  */
4814
6599
  async getFailed(queue, start = 0, end = -1, connection = this.defaultConnection) {
4815
6600
  const driver = this.getDriver(connection);
@@ -4819,7 +6604,19 @@ var QueueManager = class {
4819
6604
  return [];
4820
6605
  }
4821
6606
  /**
4822
- * Retry failed jobs from DLQ (if driver supports it).
6607
+ * Retries failed jobs from the Dead Letter Queue.
6608
+ *
6609
+ * Moves jobs from the failed state back to the active queue for re-processing.
6610
+ *
6611
+ * @param queue - The queue name.
6612
+ * @param count - The number of jobs to retry.
6613
+ * @param connection - The connection name.
6614
+ * @returns The number of jobs successfully retried.
6615
+ *
6616
+ * @example
6617
+ * ```typescript
6618
+ * await manager.retryFailed('default', 5);
6619
+ * ```
4823
6620
  */
4824
6621
  async retryFailed(queue, count = 1, connection = this.defaultConnection) {
4825
6622
  const driver = this.getDriver(connection);
@@ -4829,7 +6626,15 @@ var QueueManager = class {
4829
6626
  return 0;
4830
6627
  }
4831
6628
  /**
4832
- * Clear failed jobs from DLQ (if driver supports it).
6629
+ * Clears all failed jobs from the Dead Letter Queue.
6630
+ *
6631
+ * @param queue - The queue name.
6632
+ * @param connection - The connection name.
6633
+ *
6634
+ * @example
6635
+ * ```typescript
6636
+ * await manager.clearFailed('default');
6637
+ * ```
4833
6638
  */
4834
6639
  async clearFailed(queue, connection = this.defaultConnection) {
4835
6640
  const driver = this.getDriver(connection);
@@ -4837,6 +6642,219 @@ var QueueManager = class {
4837
6642
  await driver.clearFailed(queue);
4838
6643
  }
4839
6644
  }
6645
+ /**
6646
+ * Retrieves high-level statistics across all registered connections and queues.
6647
+ *
6648
+ * Iterates through all drivers and collects metadata to provide a comprehensive
6649
+ * snapshot of the entire queue system's health.
6650
+ *
6651
+ * @returns A promise resolving to a GlobalStats object.
6652
+ */
6653
+ async getGlobalStats() {
6654
+ const stats = {
6655
+ connections: {},
6656
+ totalSize: 0,
6657
+ totalFailed: 0,
6658
+ timestamp: Date.now()
6659
+ };
6660
+ for (const [name, driver] of this.drivers.entries()) {
6661
+ const queueNames = driver.getQueues ? await driver.getQueues() : ["default"];
6662
+ const connectionStats = [];
6663
+ for (const queue of queueNames) {
6664
+ const qStats = await this.stats(queue, name);
6665
+ connectionStats.push(qStats);
6666
+ stats.totalSize += qStats.size;
6667
+ stats.totalFailed += qStats.failed ?? 0;
6668
+ }
6669
+ stats.connections[name] = connectionStats;
6670
+ }
6671
+ return stats;
6672
+ }
6673
+ };
6674
+
6675
+ // src/SystemEventJob.ts
6676
+ var import_core = require("@gravito/core");
6677
+ var SystemEventJob = class extends Job {
6678
+ constructor(hook, args, options = {}) {
6679
+ super();
6680
+ this.hook = hook;
6681
+ this.args = args;
6682
+ this.options = options;
6683
+ if (options.queue) {
6684
+ this.onQueue(options.queue);
6685
+ }
6686
+ if (options.priority) {
6687
+ this.withPriority(options.priority);
6688
+ }
6689
+ if (options.delay) {
6690
+ this.delay(options.delay);
6691
+ }
6692
+ if (options.retryAfter) {
6693
+ this.backoff(options.retryAfter, options.retryMultiplier);
6694
+ }
6695
+ if (options.connection) {
6696
+ this.onConnection(options.connection);
6697
+ }
6698
+ }
6699
+ /**
6700
+ * Optional failure callback for DLQ handling.
6701
+ */
6702
+ onFailedCallback;
6703
+ /**
6704
+ * Set failure callback for DLQ handling.
6705
+ *
6706
+ * @param callback - Called when job fails permanently
6707
+ * @returns Self for chaining
6708
+ */
6709
+ onFailed(callback) {
6710
+ this.onFailedCallback = callback;
6711
+ return this;
6712
+ }
6713
+ /**
6714
+ * Execute the hook listeners in the worker process.
6715
+ */
6716
+ async handle() {
6717
+ const core = (0, import_core.app)();
6718
+ if (core?.hooks) {
6719
+ await core.hooks.doActionSync(this.hook, this.args);
6720
+ }
6721
+ }
6722
+ /**
6723
+ * Called when job fails permanently after all retries.
6724
+ *
6725
+ * This method is invoked by the worker when job exhausts all retry attempts.
6726
+ */
6727
+ async failed(error, attempt = 1) {
6728
+ if (this.onFailedCallback) {
6729
+ try {
6730
+ await this.onFailedCallback(error, attempt);
6731
+ } catch (callbackError) {
6732
+ console.error("[SystemEventJob] Failed callback error:", callbackError);
6733
+ }
6734
+ }
6735
+ }
6736
+ };
6737
+
6738
+ // src/StreamEventBackend.ts
6739
+ var StreamEventBackend = class {
6740
+ constructor(queueManager, config) {
6741
+ this.queueManager = queueManager;
6742
+ this.config = {
6743
+ retryStrategy: "bull",
6744
+ circuitBreakerIntegration: false,
6745
+ ...config
6746
+ };
6747
+ }
6748
+ config;
6749
+ /**
6750
+ * Build Job Push Options from EventOptions.
6751
+ *
6752
+ * Maps EventOptions to Bull Queue JobPushOptions with retry strategy applied.
6753
+ */
6754
+ buildJobOptions(task) {
6755
+ const options = {};
6756
+ if (task.options?.priority) {
6757
+ options.priority = task.options.priority;
6758
+ }
6759
+ const taskOptionsAny = task.options;
6760
+ if (taskOptionsAny?.groupId) {
6761
+ options.groupId = taskOptionsAny.groupId;
6762
+ }
6763
+ return options;
6764
+ }
6765
+ /**
6766
+ * Enqueue an event task to the stream queue.
6767
+ *
6768
+ * Applies retry strategy and CircuitBreaker checks based on configuration.
6769
+ * Supports DLQ routing for failed events.
6770
+ */
6771
+ async enqueue(task) {
6772
+ if (this.config.circuitBreakerIntegration && this.config.getCircuitBreaker) {
6773
+ const breaker = this.config.getCircuitBreaker(task.hook);
6774
+ if (breaker?.getState?.() === "OPEN") {
6775
+ throw new Error(`Circuit breaker OPEN for event: ${task.hook}`);
6776
+ }
6777
+ }
6778
+ const job = new SystemEventJob(task.hook, task.args, task.options);
6779
+ this.applyRetryStrategy(job, task);
6780
+ if (this.config.dlqHandler) {
6781
+ job.onFailed(async (error, attempt) => {
6782
+ await this.handleJobFailure(task, error, attempt);
6783
+ });
6784
+ }
6785
+ const options = this.buildJobOptions(task);
6786
+ await this.queueManager.push(job, options);
6787
+ }
6788
+ /**
6789
+ * Apply retry strategy to the job based on configuration.
6790
+ */
6791
+ applyRetryStrategy(job, task) {
6792
+ const strategy = this.config.retryStrategy ?? "bull";
6793
+ const taskOptionsAny = task.options;
6794
+ if (strategy === "bull" || strategy === "hybrid") {
6795
+ job.maxAttempts = taskOptionsAny?.maxAttempts ?? 3;
6796
+ job.retryAfterSeconds = taskOptionsAny?.retryAfter ?? 5;
6797
+ job.retryMultiplier = taskOptionsAny?.retryMultiplier ?? 2;
6798
+ }
6799
+ }
6800
+ /**
6801
+ * Handle job failure and route to DLQ if configured.
6802
+ *
6803
+ * Called when a job exhausts all retry attempts.
6804
+ */
6805
+ async handleJobFailure(task, error, attempt) {
6806
+ if (this.config.dlqHandler) {
6807
+ try {
6808
+ await this.config.dlqHandler.handle(task, error, attempt);
6809
+ } catch (dlqError) {
6810
+ console.error("[StreamEventBackend] Failed to handle DLQ:", dlqError);
6811
+ }
6812
+ }
6813
+ }
6814
+ /**
6815
+ * Record a job failure for CircuitBreaker state management.
6816
+ *
6817
+ * Called when a job fails, regardless of retry status.
6818
+ */
6819
+ recordJobFailure(task, error) {
6820
+ if (this.config.circuitBreakerIntegration && this.config.getCircuitBreaker) {
6821
+ const breaker = this.config.getCircuitBreaker(task.hook);
6822
+ if (breaker?.recordFailure) {
6823
+ breaker.recordFailure(error);
6824
+ }
6825
+ }
6826
+ }
6827
+ /**
6828
+ * Record a job success for CircuitBreaker state management.
6829
+ *
6830
+ * Called when a job completes successfully.
6831
+ */
6832
+ recordJobSuccess(task) {
6833
+ if (this.config.circuitBreakerIntegration && this.config.getCircuitBreaker) {
6834
+ const breaker = this.config.getCircuitBreaker(task.hook);
6835
+ if (breaker?.recordSuccess) {
6836
+ breaker.recordSuccess();
6837
+ }
6838
+ }
6839
+ }
6840
+ /**
6841
+ * Get the retry strategy configuration.
6842
+ */
6843
+ getRetryStrategy() {
6844
+ return this.config.retryStrategy ?? "bull";
6845
+ }
6846
+ /**
6847
+ * Check if CircuitBreaker integration is enabled.
6848
+ */
6849
+ isCircuitBreakerEnabled() {
6850
+ return this.config.circuitBreakerIntegration ?? false;
6851
+ }
6852
+ /**
6853
+ * Get the DLQ handler, if configured.
6854
+ */
6855
+ getDLQHandler() {
6856
+ return this.config.dlqHandler;
6857
+ }
4840
6858
  };
4841
6859
 
4842
6860
  // src/OrbitStream.ts
@@ -4846,16 +6864,37 @@ var OrbitStream = class _OrbitStream {
4846
6864
  }
4847
6865
  queueManager;
4848
6866
  consumer;
6867
+ core;
4849
6868
  /**
4850
- * Static configuration helper.
6869
+ * Factory method for creating and configuring an OrbitStream instance.
6870
+ *
6871
+ * Provides a fluent way to instantiate the orbit during application bootstrap.
6872
+ *
6873
+ * @param options - Configuration options.
6874
+ * @returns A new OrbitStream instance.
6875
+ *
6876
+ * @example
6877
+ * ```typescript
6878
+ * const orbit = OrbitStream.configure({ default: 'memory' });
6879
+ * ```
4851
6880
  */
4852
6881
  static configure(options) {
4853
6882
  return new _OrbitStream(options);
4854
6883
  }
4855
6884
  /**
4856
- * Install into PlanetCore.
6885
+ * Installs the Queue system into the Gravito PlanetCore.
6886
+ *
6887
+ * This lifecycle method:
6888
+ * 1. Initializes the `QueueManager`.
6889
+ * 2. Registers the `queue` service in the dependency injection container.
6890
+ * 3. Sets up a global middleware to inject `QueueManager` into the request context (`c.get('queue')`).
6891
+ * 4. Automatically detects and registers database connections if available in the context.
6892
+ * 5. Starts the embedded worker if configured.
6893
+ *
6894
+ * @param core - The PlanetCore instance.
4857
6895
  */
4858
6896
  install(core) {
6897
+ this.core = core;
4859
6898
  this.queueManager = new QueueManager(this.options);
4860
6899
  core.container.instance("queue", this.queueManager);
4861
6900
  core.adapter.use("*", async (c, next) => {
@@ -4883,12 +6922,35 @@ var OrbitStream = class _OrbitStream {
4883
6922
  return await next();
4884
6923
  });
4885
6924
  core.logger.info("[OrbitStream] Installed");
6925
+ if (this.queueManager) {
6926
+ const backend = new StreamEventBackend(this.queueManager);
6927
+ core.hooks.setBackend(backend);
6928
+ core.logger.info("[OrbitStream] HookManager backend switched to StreamEventBackend");
6929
+ }
6930
+ if (this.options.dashboard) {
6931
+ const { DashboardProvider: DashboardProvider2 } = (init_DashboardProvider(), __toCommonJS(DashboardProvider_exports));
6932
+ const dashboard = new DashboardProvider2(this.queueManager);
6933
+ const path2 = typeof this.options.dashboard === "object" ? this.options.dashboard.path : "/_flux";
6934
+ dashboard.registerRoutes(core, path2);
6935
+ core.logger.info(`[OrbitStream] Dashboard API registered at ${path2}`);
6936
+ }
4886
6937
  if (this.options.autoStartWorker && process.env.NODE_ENV === "development" && this.options.workerOptions) {
4887
6938
  this.startWorker(this.options.workerOptions);
4888
6939
  }
4889
6940
  }
4890
6941
  /**
4891
- * Start embedded worker.
6942
+ * Starts the embedded worker process.
6943
+ *
6944
+ * Launches a `Consumer` instance to process jobs in the background.
6945
+ * Throws an error if `QueueManager` is not initialized or if a worker is already running.
6946
+ *
6947
+ * @param options - Consumer configuration options.
6948
+ * @throws {Error} If QueueManager is missing or worker is already active.
6949
+ *
6950
+ * @example
6951
+ * ```typescript
6952
+ * orbit.startWorker({ queues: ['default'] });
6953
+ * ```
4892
6954
  */
4893
6955
  startWorker(options) {
4894
6956
  if (!this.queueManager) {
@@ -4897,13 +6959,31 @@ var OrbitStream = class _OrbitStream {
4897
6959
  if (this.consumer?.isRunning()) {
4898
6960
  throw new Error("Worker is already running");
4899
6961
  }
4900
- this.consumer = new Consumer(this.queueManager, options);
6962
+ const consumerOptions = {
6963
+ ...options,
6964
+ onEvent: (event, payload) => {
6965
+ const signal = this.core?.container.make("signal");
6966
+ if (signal && typeof signal.emit === "function") {
6967
+ signal.emit(`stream:${event}`, payload);
6968
+ }
6969
+ }
6970
+ };
6971
+ this.consumer = new Consumer(this.queueManager, consumerOptions);
4901
6972
  this.consumer.start().catch((error) => {
4902
6973
  console.error("[OrbitStream] Worker error:", error);
4903
6974
  });
4904
6975
  }
4905
6976
  /**
4906
- * Stop embedded worker.
6977
+ * Stops the embedded worker process.
6978
+ *
6979
+ * Gracefully shuts down the consumer, waiting for active jobs to complete.
6980
+ *
6981
+ * @returns A promise that resolves when the worker has stopped.
6982
+ *
6983
+ * @example
6984
+ * ```typescript
6985
+ * await orbit.stopWorker();
6986
+ * ```
4907
6987
  */
4908
6988
  async stopWorker() {
4909
6989
  if (this.consumer) {
@@ -4911,7 +6991,14 @@ var OrbitStream = class _OrbitStream {
4911
6991
  }
4912
6992
  }
4913
6993
  /**
4914
- * Get QueueManager instance.
6994
+ * Retrieves the underlying QueueManager instance.
6995
+ *
6996
+ * @returns The active QueueManager, or undefined if not installed.
6997
+ *
6998
+ * @example
6999
+ * ```typescript
7000
+ * const manager = orbit.getQueueManager();
7001
+ * ```
4915
7002
  */
4916
7003
  getQueueManager() {
4917
7004
  return this.queueManager;
@@ -4935,9 +7022,15 @@ var MySQLPersistence = class {
4935
7022
  this.table = table;
4936
7023
  this.logsTable = logsTable;
4937
7024
  }
7025
+ /**
7026
+ * Archives a single job.
7027
+ */
4938
7028
  async archive(queue, job, status) {
4939
7029
  await this.archiveMany([{ queue, job, status }]);
4940
7030
  }
7031
+ /**
7032
+ * Archives multiple jobs in a batch.
7033
+ */
4941
7034
  async archiveMany(jobs) {
4942
7035
  if (jobs.length === 0) {
4943
7036
  return;
@@ -4961,8 +7054,14 @@ var MySQLPersistence = class {
4961
7054
  }
4962
7055
  }
4963
7056
  }
7057
+ /**
7058
+ * No-op. Use BufferedPersistence if flushing is needed.
7059
+ */
4964
7060
  async flush() {
4965
7061
  }
7062
+ /**
7063
+ * Finds an archived job by ID.
7064
+ */
4966
7065
  async find(queue, id) {
4967
7066
  const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
4968
7067
  if (!row) {
@@ -5006,6 +7105,9 @@ var MySQLPersistence = class {
5006
7105
  }
5007
7106
  /**
5008
7107
  * Search jobs from the archive.
7108
+ *
7109
+ * @param query - Search string (matches ID, payload, or error).
7110
+ * @param options - Filter options.
5009
7111
  */
5010
7112
  async search(query, options = {}) {
5011
7113
  let q = this.db.table(this.table);
@@ -5027,13 +7129,13 @@ var MySQLPersistence = class {
5027
7129
  );
5028
7130
  }
5029
7131
  /**
5030
- * Archive a system log message (buffered).
7132
+ * Archive a system log message.
5031
7133
  */
5032
7134
  async archiveLog(log) {
5033
7135
  await this.archiveLogMany([log]);
5034
7136
  }
5035
7137
  /**
5036
- * Archive multiple log messages (direct batch write).
7138
+ * Archive multiple log messages.
5037
7139
  */
5038
7140
  async archiveLogMany(logs) {
5039
7141
  if (logs.length === 0) {
@@ -5137,7 +7239,7 @@ var MySQLPersistence = class {
5137
7239
  return Number(result) || 0;
5138
7240
  }
5139
7241
  /**
5140
- * Help script to create the necessary table.
7242
+ * Helper to create necessary tables if they don't exist.
5141
7243
  */
5142
7244
  async setupTable() {
5143
7245
  await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
@@ -5198,9 +7300,17 @@ var SQLitePersistence = class {
5198
7300
  this.table = table;
5199
7301
  this.logsTable = logsTable;
5200
7302
  }
7303
+ /**
7304
+ * Archives a single job.
7305
+ */
5201
7306
  async archive(queue, job, status) {
5202
7307
  await this.archiveMany([{ queue, job, status }]);
5203
7308
  }
7309
+ /**
7310
+ * Archives multiple jobs in a batch.
7311
+ *
7312
+ * Optimized for SQLite by wrapping chunks in transactions.
7313
+ */
5204
7314
  async archiveMany(jobs) {
5205
7315
  if (jobs.length === 0) {
5206
7316
  return;
@@ -5231,8 +7341,14 @@ var SQLitePersistence = class {
5231
7341
  }
5232
7342
  }
5233
7343
  }
7344
+ /**
7345
+ * No-op. Use BufferedPersistence if flushing is needed.
7346
+ */
5234
7347
  async flush() {
5235
7348
  }
7349
+ /**
7350
+ * Finds an archived job by ID.
7351
+ */
5236
7352
  async find(queue, id) {
5237
7353
  const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
5238
7354
  if (!row) {
@@ -5251,7 +7367,11 @@ var SQLitePersistence = class {
5251
7367
  async list(queue, options = {}) {
5252
7368
  let query = this.db.table(this.table).where("queue", queue);
5253
7369
  if (options.status) {
5254
- query = query.where("status", options.status);
7370
+ if (Array.isArray(options.status)) {
7371
+ query = query.whereIn("status", options.status);
7372
+ } else {
7373
+ query = query.where("status", options.status);
7374
+ }
5255
7375
  }
5256
7376
  if (options.jobId) {
5257
7377
  query = query.where("job_id", options.jobId);
@@ -5392,7 +7512,11 @@ var SQLitePersistence = class {
5392
7512
  async count(queue, options = {}) {
5393
7513
  let query = this.db.table(this.table).where("queue", queue);
5394
7514
  if (options.status) {
5395
- query = query.where("status", options.status);
7515
+ if (Array.isArray(options.status)) {
7516
+ query = query.whereIn("status", options.status);
7517
+ } else {
7518
+ query = query.where("status", options.status);
7519
+ }
5396
7520
  }
5397
7521
  if (options.jobId) {
5398
7522
  query = query.where("job_id", options.jobId);
@@ -5454,12 +7578,245 @@ var SQLitePersistence = class {
5454
7578
 
5455
7579
  // src/index.ts
5456
7580
  init_Scheduler();
7581
+
7582
+ // src/workers/WorkerPool.ts
7583
+ var WorkerPool = class {
7584
+ workers = [];
7585
+ config;
7586
+ queue = [];
7587
+ healthCheckTimer = null;
7588
+ stats = {
7589
+ completed: 0,
7590
+ failed: 0
7591
+ };
7592
+ /**
7593
+ * Creates a WorkerPool instance.
7594
+ *
7595
+ * @param config - Configuration options for the pool.
7596
+ */
7597
+ constructor(config = {}) {
7598
+ this.config = {
7599
+ poolSize: config.poolSize ?? 4,
7600
+ minWorkers: config.minWorkers ?? 0,
7601
+ healthCheckInterval: config.healthCheckInterval ?? 3e4,
7602
+ maxExecutionTime: config.maxExecutionTime ?? 3e4,
7603
+ maxMemory: config.maxMemory ?? 0,
7604
+ isolateContexts: config.isolateContexts ?? false,
7605
+ idleTimeout: config.idleTimeout ?? 6e4
7606
+ };
7607
+ this.warmUp();
7608
+ this.startHealthCheck();
7609
+ }
7610
+ /**
7611
+ * Pre-warms the pool by creating the minimum number of workers.
7612
+ */
7613
+ warmUp() {
7614
+ const targetCount = Math.min(this.config.minWorkers, this.config.poolSize);
7615
+ for (let i = 0; i < targetCount; i++) {
7616
+ this.createWorker();
7617
+ }
7618
+ }
7619
+ /**
7620
+ * Creates a new SandboxedWorker and adds it to the pool.
7621
+ *
7622
+ * @returns The newly created worker.
7623
+ */
7624
+ createWorker() {
7625
+ const worker = new SandboxedWorker({
7626
+ maxExecutionTime: this.config.maxExecutionTime,
7627
+ maxMemory: this.config.maxMemory,
7628
+ isolateContexts: this.config.isolateContexts,
7629
+ idleTimeout: this.config.idleTimeout
7630
+ });
7631
+ this.workers.push(worker);
7632
+ return worker;
7633
+ }
7634
+ /**
7635
+ * Retrieves an available worker from the pool.
7636
+ *
7637
+ * Priorities:
7638
+ * 1. Reuse an existing ready worker.
7639
+ * 2. Create a new worker if the pool is not full.
7640
+ * 3. Return `null` if the pool is saturated.
7641
+ *
7642
+ * @returns An available worker or `null`.
7643
+ */
7644
+ getAvailableWorker() {
7645
+ const readyWorker = this.workers.find((w) => w.isReady());
7646
+ if (readyWorker) {
7647
+ return readyWorker;
7648
+ }
7649
+ if (this.workers.length < this.config.poolSize) {
7650
+ return this.createWorker();
7651
+ }
7652
+ return null;
7653
+ }
7654
+ /**
7655
+ * Executes a job using the worker pool.
7656
+ *
7657
+ * If a worker is available, the job starts immediately.
7658
+ * Otherwise, it is added to the pending queue.
7659
+ *
7660
+ * @param job - The serialized job data.
7661
+ * @throws {Error} If execution fails.
7662
+ */
7663
+ async execute(job) {
7664
+ const worker = this.getAvailableWorker();
7665
+ if (worker) {
7666
+ try {
7667
+ await worker.execute(job);
7668
+ this.stats.completed++;
7669
+ } catch (error) {
7670
+ this.stats.failed++;
7671
+ throw error;
7672
+ } finally {
7673
+ this.processQueue();
7674
+ }
7675
+ } else {
7676
+ return new Promise((resolve2, reject) => {
7677
+ this.queue.push({ job, resolve: resolve2, reject });
7678
+ });
7679
+ }
7680
+ }
7681
+ /**
7682
+ * Processes the next job in the queue if a worker is available.
7683
+ */
7684
+ processQueue() {
7685
+ if (this.queue.length === 0) {
7686
+ return;
7687
+ }
7688
+ const worker = this.getAvailableWorker();
7689
+ if (!worker) {
7690
+ return;
7691
+ }
7692
+ const item = this.queue.shift();
7693
+ if (!item) {
7694
+ return;
7695
+ }
7696
+ worker.execute(item.job).then(() => {
7697
+ this.stats.completed++;
7698
+ item.resolve();
7699
+ }).catch((error) => {
7700
+ this.stats.failed++;
7701
+ item.reject(error);
7702
+ }).finally(() => {
7703
+ this.processQueue();
7704
+ });
7705
+ }
7706
+ /**
7707
+ * Starts the periodic health check.
7708
+ */
7709
+ startHealthCheck() {
7710
+ if (this.healthCheckTimer) {
7711
+ return;
7712
+ }
7713
+ this.healthCheckTimer = setInterval(() => {
7714
+ this.performHealthCheck();
7715
+ }, this.config.healthCheckInterval);
7716
+ }
7717
+ /**
7718
+ * Performs a health check on the pool.
7719
+ *
7720
+ * Removes terminated workers and ensures `minWorkers` are available.
7721
+ */
7722
+ performHealthCheck() {
7723
+ this.workers = this.workers.filter((worker) => {
7724
+ if (worker.getState() === "terminated") {
7725
+ worker.terminate().catch(console.error);
7726
+ return false;
7727
+ }
7728
+ return true;
7729
+ });
7730
+ const activeWorkers = this.workers.length;
7731
+ if (activeWorkers < this.config.minWorkers) {
7732
+ const needed = this.config.minWorkers - activeWorkers;
7733
+ for (let i = 0; i < needed; i++) {
7734
+ this.createWorker();
7735
+ }
7736
+ }
7737
+ }
7738
+ /**
7739
+ * Gets the current statistics of the worker pool.
7740
+ *
7741
+ * @returns Snapshot of pool statistics.
7742
+ */
7743
+ getStats() {
7744
+ let ready = 0;
7745
+ let busy = 0;
7746
+ let terminated = 0;
7747
+ for (const worker of this.workers) {
7748
+ const state = worker.getState();
7749
+ if (state === "ready") {
7750
+ ready++;
7751
+ } else if (state === "busy") {
7752
+ busy++;
7753
+ } else if (state === "terminated") {
7754
+ terminated++;
7755
+ }
7756
+ }
7757
+ return {
7758
+ total: this.workers.length,
7759
+ ready,
7760
+ busy,
7761
+ terminated,
7762
+ pending: this.queue.length,
7763
+ completed: this.stats.completed,
7764
+ failed: this.stats.failed
7765
+ };
7766
+ }
7767
+ /**
7768
+ * Shuts down the worker pool.
7769
+ *
7770
+ * Terminates all workers and rejects any pending jobs.
7771
+ */
7772
+ async shutdown() {
7773
+ if (this.healthCheckTimer) {
7774
+ clearInterval(this.healthCheckTimer);
7775
+ this.healthCheckTimer = null;
7776
+ }
7777
+ for (const item of this.queue) {
7778
+ item.reject(new Error("Worker pool is shutting down"));
7779
+ }
7780
+ this.queue = [];
7781
+ await Promise.all(this.workers.map((worker) => worker.terminate().catch(console.error)));
7782
+ this.workers = [];
7783
+ }
7784
+ /**
7785
+ * Waits for all active and pending jobs to complete.
7786
+ *
7787
+ * @param timeout - Maximum wait time in milliseconds. 0 for infinite.
7788
+ * @throws {Error} If the timeout is reached.
7789
+ */
7790
+ async waitForCompletion(timeout = 0) {
7791
+ const startTime = Date.now();
7792
+ return new Promise((resolve2, reject) => {
7793
+ const checkCompletion = () => {
7794
+ const stats = this.getStats();
7795
+ const isComplete = stats.busy === 0 && stats.pending === 0;
7796
+ if (isComplete) {
7797
+ resolve2();
7798
+ return;
7799
+ }
7800
+ if (timeout > 0 && Date.now() - startTime > timeout) {
7801
+ reject(new Error("Wait for completion timeout"));
7802
+ return;
7803
+ }
7804
+ setTimeout(checkCompletion, 100);
7805
+ };
7806
+ checkCompletion();
7807
+ });
7808
+ }
7809
+ };
5457
7810
  // Annotate the CommonJS export names for ESM import in node:
5458
7811
  0 && (module.exports = {
7812
+ BatchConsumer,
5459
7813
  BufferedPersistence,
7814
+ BullMQDriver,
5460
7815
  ClassNameSerializer,
5461
7816
  Consumer,
5462
7817
  DatabaseDriver,
7818
+ DistributedLock,
7819
+ GrpcDriver,
5463
7820
  Job,
5464
7821
  JsonSerializer,
5465
7822
  KafkaDriver,
@@ -5471,6 +7828,10 @@ init_Scheduler();
5471
7828
  RedisDriver,
5472
7829
  SQLitePersistence,
5473
7830
  SQSDriver,
7831
+ SandboxedWorker,
5474
7832
  Scheduler,
5475
- Worker
7833
+ StreamEventBackend,
7834
+ SystemEventJob,
7835
+ Worker,
7836
+ WorkerPool
5476
7837
  });