@gravito/stream 2.0.1 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,132 +1,639 @@
1
1
  import { EventEmitter } from 'node:events';
2
- import { GravitoOrbit, PlanetCore } from '@gravito/core';
2
+ import { GravitoOrbit, PlanetCore, EventBackend, EventTask } from '@gravito/core';
3
3
  import { ConnectionContract } from '@gravito/atlas';
4
4
 
5
+ /**
6
+ * Defines the contract for queueable items (jobs) with fluent configuration.
7
+ *
8
+ * This interface enables method chaining for configuring job properties such as
9
+ * target queue, connection, delay, and priority before dispatching. It ensures
10
+ * consistent API surface across different job implementations.
11
+ *
12
+ * @public
13
+ * @example
14
+ * ```typescript
15
+ * class MyJob implements Queueable {
16
+ * queueName?: string
17
+ * // ... implementation
18
+ * onQueue(queue: string): this {
19
+ * this.queueName = queue
20
+ * return this
21
+ * }
22
+ * // ...
23
+ * }
24
+ * ```
25
+ */
26
+ interface Queueable {
27
+ /**
28
+ * The specific queue name where the job should be processed.
29
+ *
30
+ * If not set, the default queue for the connection will be used.
31
+ */
32
+ queueName?: string;
33
+ /**
34
+ * The connection name (e.g., 'redis', 'sqs') to use for this job.
35
+ *
36
+ * If not set, the default connection configured in QueueManager will be used.
37
+ */
38
+ connectionName?: string;
39
+ /**
40
+ * The number of seconds to delay the job execution.
41
+ */
42
+ delaySeconds?: number;
43
+ /**
44
+ * The priority level of the job.
45
+ */
46
+ priority?: string | number;
47
+ /**
48
+ * Sets the target queue for the job.
49
+ *
50
+ * @param queue - The name of the queue to push the job to.
51
+ * @returns The instance for method chaining.
52
+ *
53
+ * @example
54
+ * ```typescript
55
+ * job.onQueue('notifications');
56
+ * ```
57
+ */
58
+ onQueue(queue: string): this;
59
+ /**
60
+ * Sets the target connection for the job.
61
+ *
62
+ * @param connection - The name of the connection to use.
63
+ * @returns The instance for method chaining.
64
+ *
65
+ * @example
66
+ * ```typescript
67
+ * job.onConnection('sqs');
68
+ * ```
69
+ */
70
+ onConnection(connection: string): this;
71
+ /**
72
+ * Sets the priority of the job.
73
+ *
74
+ * @param priority - The priority level (e.g., 'high', 'low', 10).
75
+ * @returns The instance for method chaining.
76
+ *
77
+ * @example
78
+ * ```typescript
79
+ * job.withPriority('critical');
80
+ * ```
81
+ */
82
+ withPriority(priority: string | number): this;
83
+ /**
84
+ * Sets a delay before the job is available for processing.
85
+ *
86
+ * @param delay - The delay in seconds.
87
+ * @returns The instance for method chaining.
88
+ *
89
+ * @example
90
+ * ```typescript
91
+ * job.delay(300); // 5 minutes
92
+ * ```
93
+ */
94
+ delay(delay: number): this;
95
+ }
96
+
97
+ /**
98
+ * Abstract base class for all background jobs.
99
+ *
100
+ * This class serves as the foundation for creating queueable tasks. It implements the `Queueable`
101
+ * interface for fluent configuration and provides the core structure for defining execution logic (`handle`)
102
+ * and failure handling (`failed`).
103
+ *
104
+ * Subclasses must implement the `handle` method.
105
+ *
106
+ * @public
107
+ * @example
108
+ * ```typescript
109
+ * export class SendEmailJob extends Job {
110
+ * constructor(private email: string, private subject: string) {
111
+ * super();
112
+ * }
113
+ *
114
+ * async handle(): Promise<void> {
115
+ * await emailService.send(this.email, this.subject);
116
+ * }
117
+ * }
118
+ *
119
+ * // Usage
120
+ * await queue.push(new SendEmailJob('user@example.com', 'Welcome'))
121
+ * .onQueue('emails')
122
+ * .delay(60);
123
+ * ```
124
+ */
125
+ declare abstract class Job implements Queueable {
126
+ /**
127
+ * Unique identifier for the job instance.
128
+ *
129
+ * Assigned automatically when the job is pushed to the queue.
130
+ */
131
+ id?: string;
132
+ /**
133
+ * The name of the queue where this job will be processed.
134
+ */
135
+ queueName?: string;
136
+ /**
137
+ * The name of the connection used to transport this job.
138
+ */
139
+ connectionName?: string;
140
+ /**
141
+ * Delay in seconds before the job becomes available for processing.
142
+ */
143
+ delaySeconds?: number;
144
+ /**
145
+ * The current attempt number (starts at 1).
146
+ */
147
+ attempts?: number;
148
+ /**
149
+ * The maximum number of retry attempts allowed.
150
+ *
151
+ * Can be overridden by the worker configuration or per-job using `maxAttempts`.
152
+ */
153
+ maxAttempts?: number;
154
+ /**
155
+ * Group ID for sequential processing.
156
+ *
157
+ * Jobs with the same `groupId` will be processed in strict order (FIFO)
158
+ * if the consumer supports it.
159
+ */
160
+ groupId?: string;
161
+ /**
162
+ * Priority level of the job.
163
+ */
164
+ priority?: string | number;
165
+ /**
166
+ * Initial delay in seconds before the first retry attempt.
167
+ *
168
+ * Used for exponential backoff calculation.
169
+ */
170
+ retryAfterSeconds?: number;
171
+ /**
172
+ * Multiplier applied to the retry delay for each subsequent attempt.
173
+ *
174
+ * Used for exponential backoff calculation.
175
+ */
176
+ retryMultiplier?: number;
177
+ /**
178
+ * Sets the target queue for the job.
179
+ *
180
+ * @param queue - The name of the target queue.
181
+ * @returns The job instance for chaining.
182
+ *
183
+ * @example
184
+ * ```typescript
185
+ * job.onQueue('billing');
186
+ * ```
187
+ */
188
+ onQueue(queue: string): this;
189
+ /**
190
+ * Sets the target connection for the job.
191
+ *
192
+ * @param connection - The name of the connection (e.g., 'redis').
193
+ * @returns The job instance for chaining.
194
+ *
195
+ * @example
196
+ * ```typescript
197
+ * job.onConnection('sqs-primary');
198
+ * ```
199
+ */
200
+ onConnection(connection: string): this;
201
+ /**
202
+ * Sets the priority of the job.
203
+ *
204
+ * @param priority - The priority level (e.g., 'high', 10).
205
+ * @returns The job instance for chaining.
206
+ *
207
+ * @example
208
+ * ```typescript
209
+ * job.withPriority('high');
210
+ * ```
211
+ */
212
+ withPriority(priority: string | number): this;
213
+ /**
214
+ * Delays the job execution.
215
+ *
216
+ * @param delay - Delay in seconds.
217
+ * @returns The job instance for chaining.
218
+ *
219
+ * @example
220
+ * ```typescript
221
+ * job.delay(60); // Run after 1 minute
222
+ * ```
223
+ */
224
+ delay(delay: number): this;
225
+ /**
226
+ * Configures the exponential backoff strategy for retries.
227
+ *
228
+ * @param seconds - Initial delay in seconds before the first retry.
229
+ * @param multiplier - Factor by which the delay increases for each subsequent attempt (default: 2).
230
+ * @returns The job instance for chaining.
231
+ *
232
+ * @example
233
+ * ```typescript
234
+ * // Wait 5s, then 10s, then 20s...
235
+ * job.backoff(5, 2);
236
+ * ```
237
+ */
238
+ backoff(seconds: number, multiplier?: number): this;
239
+ /**
240
+ * Calculates the delay for the next retry attempt based on the backoff strategy.
241
+ *
242
+ * Uses the formula: `initialDelay * multiplier^(attempt - 1)`, capped at 1 hour.
243
+ *
244
+ * @param attempt - The current attempt number (1-based).
245
+ * @returns The calculated delay in milliseconds.
246
+ *
247
+ * @example
248
+ * ```typescript
249
+ * const nextDelay = job.getRetryDelay(2);
250
+ * ```
251
+ */
252
+ getRetryDelay(attempt: number): number;
253
+ /**
254
+ * Contains the main business logic of the job.
255
+ *
256
+ * This method is called by the worker to process the job.
257
+ * Implementations should be idempotent if possible.
258
+ *
259
+ * @throws {Error} If the job fails and should be retried.
260
+ */
261
+ abstract handle(): Promise<void>;
262
+ /**
263
+ * Optional handler for when the job has permanently failed.
264
+ *
265
+ * Called when the job has exhausted all retry attempts.
266
+ * Useful for cleaning up resources, sending alerts, or logging.
267
+ *
268
+ * @param _error - The error that caused the final failure.
269
+ *
270
+ * @example
271
+ * ```typescript
272
+ * async failed(error: Error) {
273
+ * await notifyAdmin(`Job failed: ${error.message}`);
274
+ * }
275
+ * ```
276
+ */
277
+ failed(_error: Error): Promise<void>;
278
+ }
279
+
5
280
  /**
6
281
  * Represents a job that has been serialized for storage in a queue.
282
+ *
283
+ * This interface defines the data structure used to persist jobs in the underlying
284
+ * storage mechanism (e.g., Redis, Database, SQS). It encapsulates all metadata
285
+ * required for processing, retries, and lifecycle management.
286
+ *
7
287
  * @public
288
+ * @example
289
+ * ```typescript
290
+ * const job: SerializedJob = {
291
+ * id: 'job-123',
292
+ * type: 'json',
293
+ * data: '{"userId": 1}',
294
+ * createdAt: Date.now()
295
+ * };
296
+ * ```
8
297
  */
9
298
  interface SerializedJob {
10
- /** Unique job identifier */
299
+ /**
300
+ * Unique identifier for the job.
301
+ */
11
302
  id: string;
12
- /** Serializer type: 'json' for plain objects or 'class' for instances */
303
+ /**
304
+ * The serialization format used for the job data.
305
+ *
306
+ * - 'json': Simple JSON objects.
307
+ * - 'class': Serialized class instances (requires class registration).
308
+ * - 'msgpack': Binary MessagePack format.
309
+ */
13
310
  type: 'json' | 'class' | 'msgpack';
14
- /** Serialized data string */
311
+ /**
312
+ * The actual serialized job payload.
313
+ *
314
+ * Contains the business data needed to execute the job.
315
+ */
15
316
  data: string;
16
- /** Fully qualified class name (only used for 'class' type) */
317
+ /**
318
+ * The fully qualified class name of the job.
319
+ *
320
+ * Only required when `type` is 'class' to reconstruct the original object instance.
321
+ */
17
322
  className?: string;
18
- /** Timestamp when the job was created */
323
+ /**
324
+ * The timestamp (in milliseconds) when the job was originally created.
325
+ */
19
326
  createdAt: number;
20
- /** Optional delay in seconds before the job becomes available for processing */
327
+ /**
328
+ * Optional delay in seconds before the job becomes eligible for processing.
329
+ *
330
+ * Used for scheduling future tasks.
331
+ */
21
332
  delaySeconds?: number;
22
- /** Number of times the job has been attempted */
333
+ /**
334
+ * The number of times this job has been attempted so far.
335
+ */
23
336
  attempts?: number;
24
- /** Maximum number of retry attempts before the job is marked as failed */
337
+ /**
338
+ * The maximum number of retry attempts allowed before marking the job as failed.
339
+ */
25
340
  maxAttempts?: number;
26
- /** Group ID for FIFO (strictly sequential) processing */
341
+ /**
342
+ * Group ID for sequential processing.
343
+ *
344
+ * Jobs sharing the same `groupId` are guaranteed to be processed in order (FIFO),
345
+ * provided the consumer supports this feature.
346
+ */
27
347
  groupId?: string;
28
- /** Initial delay in seconds before first retry attempt */
348
+ /**
349
+ * The initial delay in seconds before the first retry attempt after a failure.
350
+ */
29
351
  retryAfterSeconds?: number;
30
- /** Multiplier for exponential backoff on retries */
352
+ /**
353
+ * The multiplier applied to the delay for exponential backoff strategies.
354
+ */
31
355
  retryMultiplier?: number;
32
- /** Last error message if the job failed */
356
+ /**
357
+ * The error message from the last failed attempt, if any.
358
+ */
33
359
  error?: string;
34
- /** Timestamp when the job finally failed after max attempts */
360
+ /**
361
+ * The timestamp (in milliseconds) when the job was permanently marked as failed.
362
+ */
35
363
  failedAt?: number;
36
- /** Optional priority for the job (string or numeric) */
364
+ /**
365
+ * The priority level of the job.
366
+ *
367
+ * Higher values or specific strings (e.g., 'high') indicate higher priority.
368
+ */
37
369
  priority?: string | number;
38
370
  }
39
371
  /**
40
- * Statistics for a single queue.
372
+ * Statistics snapshot for a specific queue.
373
+ *
374
+ * Provides insight into the current state of a queue, including pending,
375
+ * delayed, reserved, and failed job counts.
376
+ *
41
377
  * @public
378
+ * @example
379
+ * ```typescript
380
+ * const stats: QueueStats = {
381
+ * queue: 'default',
382
+ * size: 42,
383
+ * delayed: 5,
384
+ * failed: 1
385
+ * };
386
+ * ```
42
387
  */
43
388
  interface QueueStats {
44
- /** Queue name */
389
+ /** Name of the queue */
45
390
  queue: string;
46
- /** Number of pending jobs */
391
+ /** Number of pending jobs waiting to be processed */
47
392
  size: number;
48
- /** Number of delayed jobs (if supported) */
393
+ /** Number of jobs scheduled for future execution */
49
394
  delayed?: number;
50
- /** Number of reserved/in-flight jobs (if supported) */
395
+ /** Number of jobs currently being processed by workers */
51
396
  reserved?: number;
52
- /** Number of failed jobs in DLQ (if supported) */
397
+ /** Number of jobs in the Dead Letter Queue (DLQ) */
53
398
  failed?: number;
54
- /** Additional custom metrics */
399
+ /** Driver-specific custom metrics */
55
400
  metrics?: Record<string, number>;
56
401
  }
57
402
  /**
58
- * Advanced topic options for distributed queues (e.g., Kafka).
403
+ * Snapshot of statistics across all connections and queues.
404
+ *
405
+ * Used by monitoring dashboards to provide a high-level overview
406
+ * of the entire background processing system.
407
+ *
408
+ * @public
409
+ */
410
+ interface GlobalStats {
411
+ /** Map of connection names to their respective queue statistics */
412
+ connections: Record<string, QueueStats[]>;
413
+ /** Total number of pending jobs across all connections */
414
+ totalSize: number;
415
+ /** Total number of failed jobs across all connections */
416
+ totalFailed: number;
417
+ /** Timestamp of the snapshot */
418
+ timestamp: number;
419
+ }
420
+ /**
421
+ * Advanced topic configuration options for distributed queue systems.
422
+ *
423
+ * Used primarily by drivers like Kafka to configure topic properties.
424
+ *
59
425
  * @public
426
+ * @example
427
+ * ```typescript
428
+ * const options: TopicOptions = {
429
+ * partitions: 3,
430
+ * replicationFactor: 2
431
+ * };
432
+ * ```
60
433
  */
61
434
  interface TopicOptions {
62
435
  /** Number of partitions for the topic */
63
436
  partitions?: number;
64
- /** Number of replicas for each partition */
437
+ /** Replication factor for fault tolerance */
65
438
  replicationFactor?: number;
66
- /** Additional driver-specific configurations */
439
+ /** Additional driver-specific configuration key-values */
67
440
  config?: Record<string, string>;
68
441
  }
69
442
  /**
70
- * Database driver configuration.
443
+ * Configuration for the Database driver.
444
+ *
445
+ * Configures the queue system to use a SQL database for job storage.
446
+ *
71
447
  * @public
448
+ * @example
449
+ * ```typescript
450
+ * const config: DatabaseDriverConfig = {
451
+ * driver: 'database',
452
+ * dbService: myDbService,
453
+ * table: 'my_jobs'
454
+ * };
455
+ * ```
72
456
  */
73
457
  interface DatabaseDriverConfig$1 {
458
+ /** Driver type identifier */
74
459
  driver: 'database';
75
460
  /** Database service implementation for executing queries */
76
461
  dbService: any;
77
- /** Optional table name for job storage */
462
+ /** Optional custom table name for storing jobs */
78
463
  table?: string;
79
464
  }
80
465
  /**
81
- * Redis driver configuration.
466
+ * Configuration for the Redis driver.
467
+ *
468
+ * Configures the queue system to use Redis for high-performance job storage.
469
+ *
82
470
  * @public
471
+ * @example
472
+ * ```typescript
473
+ * const config: RedisDriverConfig = {
474
+ * driver: 'redis',
475
+ * client: redisClient,
476
+ * prefix: 'my-app:queue:'
477
+ * };
478
+ * ```
83
479
  */
84
480
  interface RedisDriverConfig$1 {
481
+ /** Driver type identifier */
85
482
  driver: 'redis';
86
483
  /** Redis client instance (ioredis or node-redis compatible) */
87
484
  client: any;
88
- /** Optional prefix for all Redis keys */
485
+ /** Optional key prefix for namespacing */
89
486
  prefix?: string;
90
487
  }
91
488
  /**
92
- * Kafka driver configuration.
489
+ * Configuration for the Kafka driver.
490
+ *
491
+ * Configures the queue system to use Apache Kafka.
492
+ *
493
+ * @example
494
+ * ```typescript
495
+ * const config: KafkaDriverConfig = {
496
+ * driver: 'kafka',
497
+ * client: kafkaClient,
498
+ * consumerGroupId: 'my-group'
499
+ * };
500
+ * ```
93
501
  */
94
502
  interface KafkaDriverConfig$1 {
503
+ /** Driver type identifier */
95
504
  driver: 'kafka';
505
+ /** Kafka client instance */
96
506
  client: any;
507
+ /** Consumer group ID for coordinating workers */
97
508
  consumerGroupId?: string;
98
509
  }
99
510
  /**
100
- * SQS driver configuration.
511
+ * Configuration for the SQS driver.
512
+ *
513
+ * Configures the queue system to use Amazon Simple Queue Service.
514
+ *
101
515
  * @public
516
+ * @example
517
+ * ```typescript
518
+ * const config: SQSDriverConfig = {
519
+ * driver: 'sqs',
520
+ * client: sqsClient,
521
+ * queueUrlPrefix: 'https://sqs.us-east-1.amazonaws.com/123/'
522
+ * };
523
+ * ```
102
524
  */
103
525
  interface SQSDriverConfig$1 {
526
+ /** Driver type identifier */
104
527
  driver: 'sqs';
105
528
  /** Amazon SQS client instance */
106
529
  client: any;
107
- /** Optional prefix for queue URLs */
530
+ /** Optional prefix for resolving queue names to URLs */
108
531
  queueUrlPrefix?: string;
109
- /** The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. */
532
+ /** The duration (in seconds) that received messages are hidden from other consumers */
110
533
  visibilityTimeout?: number;
111
- /** The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. */
534
+ /** The duration (in seconds) to wait for a message (Long Polling) */
112
535
  waitTimeSeconds?: number;
113
536
  }
114
537
  /**
115
- * RabbitMQ driver configuration.
538
+ * Configuration for the RabbitMQ driver.
539
+ *
540
+ * Configures the queue system to use RabbitMQ (AMQP).
541
+ *
542
+ * @example
543
+ * ```typescript
544
+ * const config: RabbitMQDriverConfig = {
545
+ * driver: 'rabbitmq',
546
+ * client: amqpConnection,
547
+ * exchange: 'jobs',
548
+ * exchangeType: 'direct'
549
+ * };
550
+ * ```
116
551
  */
117
552
  interface RabbitMQDriverConfig$1 {
553
+ /** Driver type identifier */
118
554
  driver: 'rabbitmq';
555
+ /** AMQP client instance */
119
556
  client: any;
557
+ /** Exchange name to publish to */
120
558
  exchange?: string;
559
+ /** Type of exchange (direct, topic, fanout, headers) */
121
560
  exchangeType?: string;
122
561
  }
123
562
  /**
124
- * Configuration for a specific queue connection.
563
+ * Configuration for the gRPC driver.
564
+ *
565
+ * Configures the queue system to use a remote gRPC service.
566
+ *
567
+ * @public
568
+ * @example
569
+ * ```typescript
570
+ * const config: GrpcDriverConfig = {
571
+ * driver: 'grpc',
572
+ * url: 'localhost:50051',
573
+ * protoUser: 'myuser',
574
+ * protoPassword: 'mypassword',
575
+ * serviceName: 'QueueService',
576
+ * packageName: 'stream'
577
+ * };
578
+ * ```
579
+ */
580
+ interface GrpcDriverConfig {
581
+ /** Driver type identifier */
582
+ driver: 'grpc';
583
+ /** The gRPC server URL (host:port) */
584
+ url: string;
585
+ /** Path to the .proto file (optional, defaults to built-in) */
586
+ protoPath?: string;
587
+ /** The package name defined in the .proto file (default: 'stream') */
588
+ packageName?: string;
589
+ /** The service name defined in the .proto file (default: 'QueueService') */
590
+ serviceName?: string;
591
+ /** Optional credentials/metadata for connection */
592
+ credentials?: {
593
+ rootCerts?: Buffer;
594
+ privateKey?: Buffer;
595
+ certChain?: Buffer;
596
+ };
597
+ }
598
+ /**
599
+ * Configuration for the Bull Queue driver.
600
+ *
601
+ * Configures the queue system to use Bull Queue (backed by Redis).
602
+ *
603
+ * @public
604
+ * @example
605
+ * ```typescript
606
+ * import { Queue } from 'bullmq'
607
+ * import Redis from 'ioredis'
608
+ *
609
+ * const redis = new Redis()
610
+ * const queue = new Queue('gravito-events', { connection: redis })
611
+ * const config: BullMQDriverConfig = {
612
+ * driver: 'bullmq',
613
+ * queue: queue
614
+ * };
615
+ * ```
616
+ */
617
+ interface BullMQDriverConfig$1 {
618
+ /** Driver type identifier */
619
+ driver: 'bullmq';
620
+ /** Bull Queue instance */
621
+ queue: any;
622
+ /** Optional Bull Worker instance */
623
+ worker?: any;
624
+ /** Key prefix for queue namespacing */
625
+ prefix?: string;
626
+ /** Enable debug logging */
627
+ debug?: boolean;
628
+ }
629
+ /**
630
+ * Union type for all supported queue connection configurations.
631
+ *
125
632
  * @public
126
633
  */
127
634
  type QueueConnectionConfig = {
128
635
  driver: 'memory';
129
- } | DatabaseDriverConfig$1 | RedisDriverConfig$1 | KafkaDriverConfig$1 | SQSDriverConfig$1 | RabbitMQDriverConfig$1 | {
636
+ } | DatabaseDriverConfig$1 | RedisDriverConfig$1 | KafkaDriverConfig$1 | SQSDriverConfig$1 | RabbitMQDriverConfig$1 | GrpcDriverConfig | BullMQDriverConfig$1 | {
130
637
  driver: 'nats';
131
638
  [key: string]: unknown;
132
639
  } | {
@@ -134,91 +641,137 @@ type QueueConnectionConfig = {
134
641
  [key: string]: unknown;
135
642
  };
136
643
  /**
137
- * Queue manager config.
644
+ * Global configuration for the QueueManager.
645
+ *
646
+ * Defines available connections, serialization settings, and system-wide behaviors.
647
+ *
648
+ * @example
649
+ * ```typescript
650
+ * const config: QueueConfig = {
651
+ * default: 'redis',
652
+ * connections: {
653
+ * redis: { driver: 'redis', client: redis }
654
+ * },
655
+ * debug: true
656
+ * };
657
+ * ```
138
658
  */
139
659
  interface QueueConfig {
140
660
  /**
141
- * Default connection name.
661
+ * The name of the default connection to use when none is specified.
142
662
  */
143
663
  default?: string;
144
664
  /**
145
- * Connection configs.
665
+ * Map of connection names to their configurations.
146
666
  */
147
667
  connections?: Record<string, QueueConnectionConfig>;
148
668
  /**
149
- * Default serializer type.
669
+ * The default serialization format to use for jobs.
150
670
  */
151
671
  defaultSerializer?: 'json' | 'class' | 'msgpack';
152
672
  /**
153
- * Whether to enable serialization caching.
154
- * If true, re-queuing the same Job instance will use the cached serialized data.
673
+ * Whether to cache serialized job data.
674
+ *
675
+ * If true, re-queuing the same Job instance will reuse the cached serialized string,
676
+ * improving performance for frequently pushed jobs.
677
+ *
155
678
  * @default false
156
679
  */
157
680
  useSerializationCache?: boolean;
158
681
  /**
159
- * Enable verbose debug logging for QueueManager and Consumer.
682
+ * Enable verbose debug logging.
683
+ *
684
+ * Useful for troubleshooting queue operations and consumer behavior.
685
+ *
160
686
  * @default false
161
687
  */
162
688
  debug?: boolean;
163
689
  /**
164
- * Persistence configuration (SQL Archive).
690
+ * Configuration for the persistence layer (SQL Archive).
165
691
  */
166
692
  persistence?: {
167
693
  /**
168
- * Persistence adapter instance or config.
694
+ * The persistence adapter instance used to store archived jobs.
169
695
  */
170
696
  adapter: PersistenceAdapter;
171
697
  /**
172
- * Whether to automatically archive completed jobs.
698
+ * Whether to automatically archive jobs upon successful completion.
173
699
  */
174
700
  archiveCompleted?: boolean;
175
701
  /**
176
- * Whether to automatically archive failed jobs.
702
+ * Whether to automatically archive jobs upon permanent failure.
177
703
  */
178
704
  archiveFailed?: boolean;
179
705
  /**
180
- * Whether to archive jobs immediately upon enqueue (Audit Mode).
706
+ * Whether to archive jobs immediately when they are enqueued (Audit Mode).
707
+ *
181
708
  * @default false
182
709
  */
183
710
  archiveEnqueued?: boolean;
184
711
  /**
185
- * Buffer size for batched writes.
186
- * If set, wraps the adapter in BufferedPersistence.
712
+ * Buffer size for batched writes to the archive.
713
+ *
714
+ * If set, wraps the adapter in a `BufferedPersistence` decorator to improve throughput.
187
715
  */
188
716
  bufferSize?: number;
189
717
  /**
190
- * Flush interval in ms for batched writes.
191
- * If set, wraps the adapter in BufferedPersistence.
718
+ * Maximum time (in milliseconds) to wait before flushing the write buffer.
192
719
  */
193
720
  flushInterval?: number;
194
721
  };
195
722
  }
196
723
  /**
197
- * Persistence Adapter Interface
198
- * Used for long-term archiving of jobs in a SQL database.
724
+ * Interface for persistence adapters.
725
+ *
726
+ * Defines the contract for storing long-term history of jobs in a permanent storage
727
+ * (typically a SQL database).
728
+ *
729
+ * @example
730
+ * ```typescript
731
+ * class MyPersistence implements PersistenceAdapter {
732
+ * async archive(queue, job, status) { ... }
733
+ * // ...
734
+ * }
735
+ * ```
199
736
  */
200
737
  interface PersistenceAdapter {
201
738
  /**
202
- * Archive a job.
739
+ * Archive a single job.
740
+ *
741
+ * @param queue - The name of the queue.
742
+ * @param job - The serialized job data.
743
+ * @param status - The final status of the job ('completed', 'failed', etc.).
744
+ * @returns A promise that resolves when the job is archived.
203
745
  */
204
746
  archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
205
747
  /**
206
- * Find a job in the archive.
748
+ * Find a specific job in the archive.
749
+ *
750
+ * @param queue - The name of the queue.
751
+ * @param id - The job ID.
752
+ * @returns The serialized job if found, or null.
207
753
  */
208
754
  find(queue: string, id: string): Promise<SerializedJob | null>;
209
755
  /**
210
- * List jobs from the archive.
756
+ * List jobs from the archive based on criteria.
757
+ *
758
+ * @param queue - The name of the queue.
759
+ * @param options - Filtering and pagination options.
760
+ * @returns A list of matching serialized jobs.
211
761
  */
212
762
  list(queue: string, options?: {
213
763
  limit?: number;
214
764
  offset?: number;
215
- status?: 'completed' | 'failed' | 'waiting' | string;
765
+ status?: 'completed' | 'failed' | 'waiting' | string | string[];
216
766
  jobId?: string;
217
767
  startTime?: Date;
218
768
  endTime?: Date;
219
769
  }): Promise<SerializedJob[]>;
220
770
  /**
221
- * Archive multiple jobs (batch write).
771
+ * Archive multiple jobs in a single batch operation.
772
+ *
773
+ * @param jobs - Array of job data to archive.
774
+ * @returns A promise that resolves when all jobs are archived.
222
775
  */
223
776
  archiveMany?(jobs: Array<{
224
777
  queue: string;
@@ -227,23 +780,35 @@ interface PersistenceAdapter {
227
780
  }>): Promise<void>;
228
781
  /**
229
782
  * Remove old data from the archive.
783
+ *
784
+ * @param days - Retention period in days; older records will be deleted.
785
+ * @returns The number of records deleted.
230
786
  */
231
787
  cleanup(days: number): Promise<number>;
232
788
  /**
233
- * Flush any buffered data.
789
+ * Flush any buffered data to storage.
790
+ *
791
+ * @returns A promise that resolves when the flush is complete.
234
792
  */
235
793
  flush?(): Promise<void>;
236
794
  /**
237
- * Count jobs in the archive.
795
+ * Count jobs in the archive matching specific criteria.
796
+ *
797
+ * @param queue - The name of the queue.
798
+ * @param options - Filtering options.
799
+ * @returns The total count of matching jobs.
238
800
  */
239
801
  count(queue: string, options?: {
240
- status?: 'completed' | 'failed' | 'waiting' | string;
802
+ status?: 'completed' | 'failed' | 'waiting' | string | string[];
241
803
  jobId?: string;
242
804
  startTime?: Date;
243
805
  endTime?: Date;
244
806
  }): Promise<number>;
245
807
  /**
246
808
  * Archive a system log message.
809
+ *
810
+ * @param log - The log entry to archive.
811
+ * @returns A promise that resolves when the log is archived.
247
812
  */
248
813
  archiveLog(log: {
249
814
  level: string;
@@ -253,7 +818,10 @@ interface PersistenceAdapter {
253
818
  timestamp: Date;
254
819
  }): Promise<void>;
255
820
  /**
256
- * Archive multiple log messages (batch write).
821
+ * Archive multiple log messages in a batch.
822
+ *
823
+ * @param logs - Array of log entries to archive.
824
+ * @returns A promise that resolves when logs are archived.
257
825
  */
258
826
  archiveLogMany?(logs: Array<{
259
827
  level: string;
@@ -264,6 +832,9 @@ interface PersistenceAdapter {
264
832
  }>): Promise<void>;
265
833
  /**
266
834
  * List system logs from the archive.
835
+ *
836
+ * @param options - Filtering and pagination options.
837
+ * @returns A list of matching log entries.
267
838
  */
268
839
  listLogs(options?: {
269
840
  limit?: number;
@@ -277,6 +848,9 @@ interface PersistenceAdapter {
277
848
  }): Promise<any[]>;
278
849
  /**
279
850
  * Count system logs in the archive.
851
+ *
852
+ * @param options - Filtering options.
853
+ * @returns The total count of matching logs.
280
854
  */
281
855
  countLogs(options?: {
282
856
  level?: string;
@@ -288,18 +862,31 @@ interface PersistenceAdapter {
288
862
  }): Promise<number>;
289
863
  }
290
864
  /**
291
- * Options when pushing a job.
865
+ * Options used when pushing a job to the queue.
866
+ *
867
+ * Allows customizing delivery behavior, such as delays, priority, and ordering.
868
+ *
869
+ * @example
870
+ * ```typescript
871
+ * const options: JobPushOptions = {
872
+ * priority: 'high',
873
+ * groupId: 'user-123'
874
+ * };
875
+ * ```
292
876
  */
293
877
  interface JobPushOptions {
294
878
  /**
295
- * Group ID for FIFO ordering (e.g. userId).
296
- * If set, jobs with the same groupId will be processed strictly sequentially.
879
+ * Group ID for FIFO ordering.
880
+ *
881
+ * If provided, jobs with the same `groupId` are guaranteed to be processed strictly
882
+ * sequentially. This is useful for event streams where order matters (e.g., per-user events).
297
883
  */
298
884
  groupId?: string;
299
885
  /**
300
- * Job priority.
301
- * Higher priority jobs are processed first (if supported by driver).
302
- * Example: 'high', 'low', 'critical'
886
+ * Job priority level.
887
+ *
888
+ * Higher priority jobs are processed before lower priority ones, depending on driver support.
889
+ * Common values: 'critical', 'high', 'default', 'low'.
303
890
  */
304
891
  priority?: string | number;
305
892
  }
@@ -307,122 +894,143 @@ interface JobPushOptions {
307
894
  /**
308
895
  * Queue driver interface.
309
896
  *
310
- * All queue drivers must implement this interface.
311
- * Defines basic queue operations plus optional enterprise-grade capabilities.
897
+ * Defines the contract that all storage backends (Redis, Database, SQS, etc.) must implement
898
+ * to be compatible with the QueueManager. It covers the basic operations for a job queue:
899
+ * push, pop, size, and clear.
312
900
  *
901
+ * Advanced capabilities like rate limiting, reliable delivery (acknowledgements), and
902
+ * batch operations are optional but recommended for high-performance drivers.
903
+ *
904
+ * @public
313
905
  * @example
314
906
  * ```typescript
315
- * class MyDriver implements QueueDriver {
316
- * async push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void> {
317
- * // push a job
318
- * }
319
- *
320
- * async pop(queue: string): Promise<SerializedJob | null> {
321
- * // pop a job
322
- * }
323
- *
324
- * async complete(queue: string, job: SerializedJob): Promise<void> {
325
- * // job completed (for FIFO handling)
326
- * }
327
- *
328
- * async size(queue: string): Promise<number> {
329
- * // queue size
907
+ * class MyCustomDriver implements QueueDriver {
908
+ * async push(queue: string, job: SerializedJob) {
909
+ * // ... write to storage
330
910
  * }
331
- *
332
- * async clear(queue: string): Promise<void> {
333
- * // clear queue
911
+ * async pop(queue: string) {
912
+ * // ... read from storage
913
+ * return job;
334
914
  * }
915
+ * async size(queue: string) { return 0; }
916
+ * async clear(queue: string) {}
335
917
  * }
336
918
  * ```
337
919
  */
338
920
  interface QueueDriver {
339
921
  /**
340
- * Push a job to a queue.
341
- * @param queue - Queue name
342
- * @param job - Serialized job
343
- * @param options - Push options (e.g. groupId)
922
+ * Pushes a job onto the specified queue.
923
+ *
924
+ * @param queue - The name of the queue.
925
+ * @param job - The serialized job data.
926
+ * @param options - Optional parameters like priority or group ID.
927
+ * @returns A promise that resolves when the job is successfully stored.
344
928
  */
345
929
  push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void>;
346
930
  /**
347
- * Pop a job from a queue (non-blocking).
348
- * @param queue - Queue name
349
- * @returns Serialized job, or `null` if the queue is empty
931
+ * Retrieves and removes the next job from the queue (FIFO).
932
+ *
933
+ * @param queue - The name of the queue.
934
+ * @returns The serialized job if available, or `null` if the queue is empty.
350
935
  */
351
936
  pop(queue: string): Promise<SerializedJob | null>;
352
937
  /**
353
- * Pop a job from a queue (blocking).
354
- * @param queues - Queue name or array of queue names
355
- * @param timeout - Timeout in seconds
356
- * @returns Serialized job, or `null` if timeout reached
938
+ * Blocking version of `pop`. Waits for a job to arrive if the queue is empty.
939
+ *
940
+ * @param queues - A single queue name or an array of queues to listen to.
941
+ * @param timeout - The maximum time to wait in seconds (0 means indefinite).
942
+ * @returns The serialized job if one arrives within the timeout, or `null`.
357
943
  */
358
944
  popBlocking?(queues: string | string[], timeout: number): Promise<SerializedJob | null>;
359
945
  /**
360
- * Mark a job as completed (used for FIFO/Group handling).
361
- * @param queue - Queue name
362
- * @param job - Serialized job
946
+ * Marks a job as completed.
947
+ *
948
+ * Used primarily by drivers that support advanced flow control (like FIFO groups)
949
+ * or explicit acknowledgement (like SQS/RabbitMQ).
950
+ *
951
+ * @param queue - The name of the queue.
952
+ * @param job - The job that was completed.
363
953
  */
364
954
  complete?(queue: string, job: SerializedJob): Promise<void>;
365
955
  /**
366
- * Get queue size.
367
- * @param queue - Queue name
368
- * @returns Number of jobs in the queue
956
+ * Returns the number of jobs currently waiting in the queue.
957
+ *
958
+ * @param queue - The name of the queue.
959
+ * @returns The count of pending jobs.
369
960
  */
370
961
  size(queue: string): Promise<number>;
371
962
  /**
372
- * Clear a queue.
373
- * @param queue - Queue name
963
+ * Removes all jobs from the specified queue.
964
+ *
965
+ * @param queue - The name of the queue to purge.
374
966
  */
375
967
  clear(queue: string): Promise<void>;
376
968
  /**
377
- * Mark a job as permanently failed (move to DLQ).
378
- * @param queue - Queue name
379
- * @param job - Serialized job with error info
969
+ * Moves a job to the Dead Letter Queue (DLQ) after repeated failures.
970
+ *
971
+ * @param queue - The original queue name.
972
+ * @param job - The job data (usually including error information).
380
973
  */
381
974
  fail?(queue: string, job: SerializedJob): Promise<void>;
382
975
  /**
383
- * Get queue statistics including pending, delayed, and failed job counts.
384
- * @param queue - Queue name
976
+ * Retrieves detailed statistics for a queue.
977
+ *
978
+ * @param queue - The name of the queue.
979
+ * @returns An object containing counts for pending, delayed, failed, etc.
385
980
  */
386
981
  stats?(queue: string): Promise<QueueStats>;
387
982
  /**
388
- * Push multiple jobs (optional, higher throughput).
389
- * @param queue - Queue name
390
- * @param jobs - Serialized job array
983
+ * Pushes multiple jobs to the queue in a single batch operation.
984
+ *
985
+ * @param queue - The name of the queue.
986
+ * @param jobs - An array of serialized jobs.
391
987
  */
392
988
  pushMany?(queue: string, jobs: SerializedJob[]): Promise<void>;
393
989
  /**
394
- * Pop multiple jobs (optional, higher throughput).
395
- * @param queue - Queue name
396
- * @param count - Max number of jobs to pop
397
- * @returns Serialized job array
990
+ * Retrieves multiple jobs from the queue in a single batch operation.
991
+ *
992
+ * @param queue - The name of the queue.
993
+ * @param count - The maximum number of jobs to retrieve.
994
+ * @returns An array of serialized jobs.
398
995
  */
399
996
  popMany?(queue: string, count: number): Promise<SerializedJob[]>;
400
997
  /**
401
- * Acknowledge a message (enterprise capability, e.g. Kafka/SQS).
402
- * @param messageId - Message ID
998
+ * Acknowledges a specific message by its ID.
999
+ *
1000
+ * Used for drivers that require explicit acknowledgement (e.g., SQS, Kafka).
1001
+ *
1002
+ * @param messageId - The ID of the message to acknowledge.
403
1003
  */
404
1004
  acknowledge?(messageId: string): Promise<void>;
405
1005
  /**
406
- * Subscribe to a queue (push-based model, e.g. Kafka/SQS).
407
- * @param queue - Queue name
408
- * @param callback - Callback to process jobs
1006
+ * Subscribes to a queue for real-time job processing (Push model).
1007
+ *
1008
+ * Alternative to polling (`pop`). The driver pushes jobs to the callback as they arrive.
1009
+ *
1010
+ * @param queue - The name of the queue.
1011
+ * @param callback - The function to call when a job is received.
409
1012
  */
410
1013
  subscribe?(queue: string, callback: (job: SerializedJob) => Promise<void>): Promise<void>;
411
1014
  /**
412
- * Create a topic (Kafka, etc.).
413
- * @param topic - Topic name
414
- * @param options - Topic options
1015
+ * Creates a new topic or queue (for drivers like Kafka/RabbitMQ).
1016
+ *
1017
+ * @param topic - The name of the topic/queue.
1018
+ * @param options - Configuration options (partitions, replication, etc.).
415
1019
  */
416
1020
  createTopic?(topic: string, options?: TopicOptions): Promise<void>;
417
1021
  /**
418
- * Delete a topic (Kafka, etc.).
419
- * @param topic - Topic name
1022
+ * Deletes a topic or queue.
1023
+ *
1024
+ * @param topic - The name of the topic/queue to delete.
420
1025
  */
421
1026
  deleteTopic?(topic: string): Promise<void>;
422
1027
  /**
423
- * Report worker heartbeat for monitoring.
424
- * @param workerInfo - Worker information
425
- * @param prefix - Optional prefix for monitoring keys
1028
+ * Sends a heartbeat signal for a worker instance.
1029
+ *
1030
+ * Used for monitoring worker health and presence.
1031
+ *
1032
+ * @param workerInfo - Metadata about the worker (ID, status, resources).
1033
+ * @param prefix - Optional key prefix for storage.
426
1034
  */
427
1035
  reportHeartbeat?(workerInfo: {
428
1036
  id: string;
@@ -436,9 +1044,10 @@ interface QueueDriver {
436
1044
  [key: string]: any;
437
1045
  }, prefix?: string): Promise<void>;
438
1046
  /**
439
- * Publish a log message for monitoring.
440
- * @param logPayload - Log payload
441
- * @param prefix - Optional prefix for monitoring channels/keys
1047
+ * Publishes a log entry to the monitoring system.
1048
+ *
1049
+ * @param logPayload - The log data (level, message, context).
1050
+ * @param prefix - Optional key prefix.
442
1051
  */
443
1052
  publishLog?(logPayload: {
444
1053
  level: string;
@@ -449,291 +1058,260 @@ interface QueueDriver {
449
1058
  [key: string]: any;
450
1059
  }, prefix?: string): Promise<void>;
451
1060
  /**
452
- * Check if a queue is rate limited.
453
- * @param queue - Queue name
454
- * @param config - Rate limit configuration
455
- * @returns true if allowed, false if limited
1061
+ * Checks if a specific queue has exceeded its rate limit.
1062
+ *
1063
+ * @param queue - The name of the queue.
1064
+ * @param config - The rate limit rules (max jobs per duration).
1065
+ * @returns `true` if the job is allowed to proceed, `false` if limited.
456
1066
  */
457
1067
  checkRateLimit?(queue: string, config: {
458
1068
  max: number;
459
1069
  duration: number;
460
1070
  }): Promise<boolean>;
461
1071
  /**
462
- * Retry failed jobs from DLQ.
463
- * @param queue - Queue name
464
- * @param count - Optional count (default: all)
465
- * @returns Number of jobs retried
1072
+ * Retries failed jobs from the Dead Letter Queue.
1073
+ *
1074
+ * Moves jobs from the DLQ back to the active queue.
1075
+ *
1076
+ * @param queue - The name of the queue.
1077
+ * @param count - The number of jobs to retry (optional).
1078
+ * @returns The number of jobs actually moved.
466
1079
  */
467
1080
  retryFailed?(queue: string, count?: number): Promise<number>;
468
1081
  /**
469
- * Get failed jobs from DLQ.
470
- * @param queue - Queue name
471
- * @param start - Start index
472
- * @param end - End index
473
- * @returns Array of failed jobs
1082
+ * Retrieves a list of failed jobs from the Dead Letter Queue.
1083
+ *
1084
+ * @param queue - The name of the queue.
1085
+ * @param start - Pagination start index.
1086
+ * @param end - Pagination end index.
1087
+ * @returns An array of failed jobs.
474
1088
  */
475
1089
  getFailed?(queue: string, start?: number, end?: number): Promise<SerializedJob[]>;
476
1090
  /**
477
- * Clear failed jobs from DLQ.
478
- * @param queue - Queue name
1091
+ * Clears the Dead Letter Queue for a specific queue.
1092
+ *
1093
+ * @param queue - The name of the queue.
479
1094
  */
480
1095
  clearFailed?(queue: string): Promise<void>;
1096
+ /**
1097
+ * Lists all queues managed by this driver.
1098
+ *
1099
+ * Useful for monitoring dashboards to discover active queues dynamically.
1100
+ *
1101
+ * @returns A list of queue names.
1102
+ */
1103
+ getQueues?(): Promise<string[]>;
481
1104
  }
482
1105
 
483
1106
  /**
484
- * Queueable interface.
1107
+ * Configuration for a recurring scheduled job.
485
1108
  *
486
- * Classes implementing this interface can be pushed to a queue for execution.
487
- * Provides a fluent API for queue/connection/delay configuration.
1109
+ * Defines the schedule (CRON), the job to execute, and metadata tracking execution times.
488
1110
  *
1111
+ * @public
1112
+ * @since 3.0.0
489
1113
  * @example
490
1114
  * ```typescript
491
- * class MyJob implements Queueable {
492
- * queueName?: string
493
- * connectionName?: string
494
- * delaySeconds?: number
495
- *
496
- * onQueue(queue: string): this {
497
- * this.queueName = queue
498
- * return this
499
- * }
500
- *
501
- * onConnection(connection: string): this {
502
- * this.connectionName = connection
503
- * return this
504
- * }
505
- *
506
- * delay(seconds: number): this {
507
- * this.delaySeconds = seconds
508
- * return this
509
- * }
510
- * }
1115
+ * const config: ScheduledJobConfig = {
1116
+ * id: 'daily-report',
1117
+ * cron: '0 0 * * *',
1118
+ * queue: 'reports',
1119
+ * job: serializedJob,
1120
+ * enabled: true
1121
+ * };
511
1122
  * ```
512
1123
  */
513
- interface Queueable {
514
- /**
515
- * Queue name where the job should be pushed.
516
- */
517
- queueName?: string;
518
- /**
519
- * Connection name the job should use.
520
- */
521
- connectionName?: string;
522
- /**
523
- * Delay before execution (seconds).
524
- */
525
- delaySeconds?: number;
526
- /**
527
- * Job priority.
528
- */
529
- priority?: string | number;
530
- /**
531
- * Set target queue.
532
- * @param queue - Queue name
533
- * @returns Self for fluent chaining
534
- */
535
- onQueue(queue: string): this;
536
- /**
537
- * Set target connection.
538
- * @param connection - Connection name
539
- * @returns Self for fluent chaining
540
- */
541
- onConnection(connection: string): this;
542
- /**
543
- * Set job priority.
544
- * @param priority - Priority level
545
- * @returns Self for fluent chaining
546
- */
547
- withPriority(priority: string | number): this;
548
- /**
549
- * Set delay (seconds).
550
- * @param delay - Delay seconds
551
- * @returns Self for fluent chaining
552
- */
553
- delay(delay: number): this;
1124
+ interface ScheduledJobConfig {
1125
+ /** Unique identifier for the scheduled task. */
1126
+ id: string;
1127
+ /** Cron expression defining the schedule (e.g., '* * * * *'). */
1128
+ cron: string;
1129
+ /** The target queue name where the job should be pushed. */
1130
+ queue: string;
1131
+ /** The serialized job data. */
1132
+ job: SerializedJob;
1133
+ /** Timestamp of the last successful execution in milliseconds. */
1134
+ lastRun?: number;
1135
+ /** Timestamp of the next scheduled execution in milliseconds. */
1136
+ nextRun?: number;
1137
+ /** Whether the scheduled job is active. */
1138
+ enabled: boolean;
554
1139
  }
555
-
556
1140
  /**
557
- * Base Job.
1141
+ * Configuration options for the Scheduler.
558
1142
  *
559
- * All tasks that should be pushed to a queue should extend this class.
560
- * Implements the `Queueable` interface, providing a fluent API for queue/connection/delay.
1143
+ * Defines behavior for scheduling tasks, including distributed lock settings.
561
1144
  *
1145
+ * @public
1146
+ * @since 3.1.0
562
1147
  * @example
563
1148
  * ```typescript
564
- * class SendWelcomeEmail extends Job {
565
- * constructor(private userId: string) {
566
- * super()
567
- * }
568
- *
569
- * async handle(): Promise<void> {
570
- * const user = await User.find(this.userId)
571
- * await mail.send(new WelcomeEmail(user))
572
- * }
573
- * }
574
- *
575
- * // Usage
576
- * await queue.push(new SendWelcomeEmail('123'))
577
- * .onQueue('emails')
578
- * .delay(60)
1149
+ * const options: SchedulerOptions = {
1150
+ * prefix: 'myapp:queue:',
1151
+ * lockTtl: 60000, // Lock held for 60 seconds
1152
+ * lockRefreshInterval: 20000 // Auto-renew every 20 seconds
1153
+ * };
579
1154
  * ```
580
- * @public
581
1155
  */
582
- declare abstract class Job implements Queueable {
583
- /**
584
- * Unique job identifier.
585
- */
586
- id?: string;
587
- /**
588
- * Queue name.
589
- */
590
- queueName?: string;
591
- /**
592
- * Connection name.
593
- */
594
- connectionName?: string;
595
- /**
596
- * Delay before execution (seconds).
597
- */
598
- delaySeconds?: number;
599
- /**
600
- * Current attempt number.
601
- */
602
- attempts?: number;
603
- /**
604
- * Maximum attempts.
605
- */
606
- maxAttempts?: number;
607
- /**
608
- * Group ID for FIFO.
609
- */
610
- groupId?: string;
611
- /**
612
- * Job priority.
613
- */
614
- priority?: string | number;
615
- /**
616
- * Initial retry delay (seconds).
617
- */
618
- retryAfterSeconds?: number;
619
- /**
620
- * Retry delay multiplier.
621
- */
622
- retryMultiplier?: number;
623
- /**
624
- * Set target queue.
625
- */
626
- onQueue(queue: string): this;
627
- /**
628
- * Set target connection.
629
- */
630
- onConnection(connection: string): this;
1156
+ interface SchedulerOptions {
631
1157
  /**
632
- * Set job priority.
633
- * @param priority - 'high', 'low', or number
1158
+ * Prefix for Redis keys.
1159
+ *
1160
+ * @default 'queue:'
634
1161
  */
635
- withPriority(priority: string | number): this;
1162
+ prefix?: string;
636
1163
  /**
637
- * Set delay (seconds).
1164
+ * Time-to-live for the distributed lock in milliseconds.
1165
+ *
1166
+ * Setting a longer TTL ensures long-running tasks are not executed repeatedly
1167
+ * due to lock expiration. Recommended to be 2-3 times the expected execution time.
1168
+ *
1169
+ * @default 60000 (60 seconds)
638
1170
  */
639
- delay(delay: number): this;
1171
+ lockTtl?: number;
640
1172
  /**
641
- * Set retry backoff strategy.
642
- * @param seconds - Initial delay in seconds
643
- * @param multiplier - Multiplier for each subsequent attempt (default: 2)
1173
+ * Interval for automatic lock renewal in milliseconds.
1174
+ *
1175
+ * If set, the lock will be automatically extended every `lockRefreshInterval`.
1176
+ * Recommended to be 1/3 of `lockTtl`.
1177
+ *
1178
+ * @default 20000 (20 seconds)
644
1179
  */
645
- backoff(seconds: number, multiplier?: number): this;
1180
+ lockRefreshInterval?: number;
646
1181
  /**
647
- * Calculate retry delay for the next attempt.
648
- * @param attempt - Current attempt number (1-based)
649
- * @returns Delay in milliseconds
1182
+ * Number of retries when acquiring a lock fails.
1183
+ *
1184
+ * @default 0
650
1185
  */
651
- getRetryDelay(attempt: number): number;
1186
+ lockRetryCount?: number;
652
1187
  /**
653
- * Job handler logic.
1188
+ * Delay between lock acquisition retries in milliseconds.
654
1189
  *
655
- * Subclasses must implement this method.
1190
+ * @default 100
656
1191
  */
657
- abstract handle(): Promise<void>;
1192
+ lockRetryDelay?: number;
658
1193
  /**
659
- * Failure handler (optional).
1194
+ * The interval in milliseconds at which the scheduler checks for due tasks.
660
1195
  *
661
- * Called when the job fails and reaches the maximum number of attempts.
662
- * Subclasses can override to implement custom failure handling.
1196
+ * @default 60000 (1 minute)
1197
+ */
1198
+ tickInterval?: number;
1199
+ /**
1200
+ * The time-to-live for the leader lock in milliseconds.
663
1201
  *
664
- * @param error - Error instance
1202
+ * Ensures that only one node acts as the scheduler leader.
1203
+ * @default 30000 (30 seconds)
665
1204
  */
666
- failed(_error: Error): Promise<void>;
1205
+ leaderTtl?: number;
667
1206
  }
668
-
669
1207
  /**
670
- * Configuration for a recurring scheduled job.
1208
+ * Manages recurring tasks and cron jobs.
1209
+ *
1210
+ * The Scheduler allows you to register jobs to run at specific intervals using CRON syntax.
1211
+ * It uses Redis (or a compatible driver) to coordinate distributed execution, ensuring that
1212
+ * a scheduled job runs only once per interval across multiple scheduler instances.
671
1213
  *
672
1214
  * @public
673
1215
  * @since 3.0.0
674
- */
675
- interface ScheduledJobConfig {
676
- /** Unique identifier for the scheduled task. */
677
- id: string;
678
- /** Cron expression defining the schedule (e.g., '* * * * *'). */
679
- cron: string;
680
- /** The target queue name where the job should be pushed. */
681
- queue: string;
682
- /** The serialized job data. */
683
- job: SerializedJob;
684
- /** Timestamp of the last successful execution in milliseconds. */
685
- lastRun?: number;
686
- /** Timestamp of the next scheduled execution in milliseconds. */
687
- nextRun?: number;
688
- /** Whether the scheduled job is active. */
689
- enabled: boolean;
690
- }
691
- /**
692
- * Scheduler manages recurring (cron) jobs in Gravito.
693
- *
694
- * It uses Redis to store schedule metadata and coordinates distributed
695
- * execution using locks to ensure jobs are triggered exactly once per interval.
696
- *
697
1216
  * @example
698
1217
  * ```typescript
699
- * const scheduler = new Scheduler(queueManager);
1218
+ * const scheduler = manager.getScheduler();
700
1219
  * await scheduler.register({
701
- * id: 'daily-cleanup',
702
- * cron: '0 0 * * *',
703
- * queue: 'default',
704
- * job: myJob.serialize()
1220
+ * id: 'cleanup',
1221
+ * cron: '0 * * * *', // Every hour
1222
+ * job: new CleanupJob()
705
1223
  * });
706
- * ```
707
1224
  *
708
- * @public
709
- * @since 3.0.0
1225
+ * // Automatically start the scheduler loop
1226
+ * await scheduler.start();
1227
+ * ```
710
1228
  */
711
1229
  declare class Scheduler {
712
1230
  private manager;
713
1231
  private prefix;
714
- constructor(manager: QueueManager, options?: {
715
- prefix?: string;
716
- });
1232
+ private lockTtl;
1233
+ private lockRefreshInterval?;
1234
+ private lockRetryCount;
1235
+ private lockRetryDelay;
1236
+ private tickInterval;
1237
+ private leaderTtl;
1238
+ private distributedLock?;
1239
+ private running;
1240
+ private timer;
1241
+ private isLeader;
1242
+ constructor(manager: QueueManager, options?: SchedulerOptions);
717
1243
  private get client();
718
1244
  /**
719
- * Register a scheduled job.
1245
+ * Gets or creates the distributed lock instance.
1246
+ *
1247
+ * @private
1248
+ */
1249
+ private getDistributedLock;
1250
+ /**
1251
+ * Registers a new scheduled job or updates an existing one.
1252
+ *
1253
+ * Calculates the next run time based on the CRON expression and stores the configuration in Redis.
1254
+ *
1255
+ * @param config - The job configuration (excluding nextRun and enabled status which are auto-set).
1256
+ * @throws {Error} If Redis client does not support pipelining.
720
1257
  */
721
1258
  register(config: Omit<ScheduledJobConfig, 'nextRun' | 'enabled'>): Promise<void>;
722
1259
  /**
723
- * Remove a scheduled job.
1260
+ * Removes a scheduled job.
1261
+ *
1262
+ * Deletes the job metadata and schedule entry from Redis.
1263
+ *
1264
+ * @param id - The unique identifier of the scheduled job.
724
1265
  */
725
1266
  remove(id: string): Promise<void>;
726
1267
  /**
727
- * List all scheduled jobs.
1268
+ * Lists all registered scheduled jobs.
1269
+ *
1270
+ * @returns An array of all scheduled job configurations.
728
1271
  */
729
1272
  list(): Promise<ScheduledJobConfig[]>;
730
1273
  /**
731
- * Run a scheduled job immediately (out of schedule).
1274
+ * Starts the automatic scheduler loop.
1275
+ *
1276
+ * Periodically triggers `tick()` to process due jobs. Uses leader election
1277
+ * to ensure that only one node performs the scanning in a multi-node environment.
1278
+ */
1279
+ start(): Promise<void>;
1280
+ /**
1281
+ * Stops the automatic scheduler loop.
1282
+ */
1283
+ stop(): Promise<void>;
1284
+ /**
1285
+ * Acquires the leader lock and performs a tick.
1286
+ *
1287
+ * @private
1288
+ */
1289
+ private performTickWithLeaderElection;
1290
+ /**
1291
+ * Releases the leader lock.
1292
+ *
1293
+ * @private
1294
+ */
1295
+ private releaseLeader;
1296
+ /**
1297
+ * Manually triggers a scheduled job immediately.
1298
+ *
1299
+ * Forces execution of the job regardless of its schedule, without affecting the next scheduled run time.
1300
+ *
1301
+ * @param id - The unique identifier of the scheduled job.
732
1302
  */
733
1303
  runNow(id: string): Promise<void>;
734
1304
  /**
735
- * Process due tasks (TICK).
736
- * This should be called periodically (e.g. every minute).
1305
+ * Checks for and triggers tasks that are due for execution.
1306
+ *
1307
+ * This method should be called periodically (e.g., via a system cron or a dedicated tick loop).
1308
+ * It scans the schedule for tasks with `nextRun <= now`, acquires a distributed lock for each,
1309
+ * pushes them to their queue, and updates the `nextRun` time.
1310
+ *
1311
+ * The distributed lock ensures that in a multi-node environment, each scheduled job is executed
1312
+ * only once per interval, even if multiple scheduler instances are running.
1313
+ *
1314
+ * @returns The number of jobs triggered in this tick.
737
1315
  */
738
1316
  tick(): Promise<number>;
739
1317
  }
@@ -741,54 +1319,53 @@ declare class Scheduler {
741
1319
  /**
742
1320
  * Job serializer interface.
743
1321
  *
744
- * Responsible for serializing and deserializing jobs.
745
- * Supports multiple strategies (JSON, class-name, etc.).
1322
+ * Defines the contract for serializing job objects into storage-friendly formats
1323
+ * (strings/buffers) and deserializing them back into executable Job instances.
746
1324
  *
1325
+ * @public
747
1326
  * @example
748
1327
  * ```typescript
749
- * class MySerializer implements JobSerializer {
750
- * serialize(job: Job): SerializedJob {
751
- * // serialization logic
752
- * }
753
- *
754
- * deserialize(serialized: SerializedJob): Job {
755
- * // deserialization logic
756
- * }
1328
+ * class JSONSerializer implements JobSerializer {
1329
+ * serialize(job) { return { ...job, data: JSON.stringify(job) }; }
1330
+ * deserialize(serialized) { return JSON.parse(serialized.data); }
757
1331
  * }
758
1332
  * ```
759
1333
  */
760
1334
  interface JobSerializer {
761
1335
  /**
762
- * Serialize a job.
763
- * @param job - Job instance
764
- * @returns Serialized job payload
1336
+ * Converts a Job instance into a serializable object.
1337
+ *
1338
+ * @param job - The job instance to serialize.
1339
+ * @returns A `SerializedJob` object containing the data payload and metadata.
765
1340
  */
766
1341
  serialize(job: Job): SerializedJob;
767
1342
  /**
768
- * Deserialize a job.
769
- * @param serialized - Serialized job payload
770
- * @returns Job instance
1343
+ * Reconstructs a Job instance from a serialized object.
1344
+ *
1345
+ * @param serialized - The serialized job data.
1346
+ * @returns A fully hydrated Job instance ready for execution.
771
1347
  */
772
1348
  deserialize(serialized: SerializedJob): Job;
773
1349
  }
774
1350
 
775
1351
  /**
776
- * Queue Manager
1352
+ * The central manager for queue operations.
777
1353
  *
778
- * Manages multiple queue connections and drivers, exposing a unified API for pushing and consuming jobs.
779
- * Supports lazy-loading drivers to keep the core lightweight.
1354
+ * This class manages multiple queue connections and drivers, exposing a unified API for pushing,
1355
+ * popping, and managing jobs. It handles connection pooling, serialization, persistence,
1356
+ * and driver lazy-loading.
780
1357
  *
1358
+ * @public
781
1359
  * @example
782
1360
  * ```typescript
783
1361
  * const manager = new QueueManager({
784
- * default: 'database',
1362
+ * default: 'redis',
785
1363
  * connections: {
786
- * database: { driver: 'database', table: 'jobs' },
787
- * redis: { driver: 'redis', url: 'redis://...' }
1364
+ * redis: { driver: 'redis', client: redisClient }
788
1365
  * }
789
- * })
1366
+ * });
790
1367
  *
791
- * await manager.push(new SendEmail('user@example.com'))
1368
+ * await manager.push(new SendEmailJob('hello@example.com'));
792
1369
  * ```
793
1370
  */
794
1371
  declare class QueueManager {
@@ -805,420 +1382,1138 @@ declare class QueueManager {
805
1382
  */
806
1383
  private log;
807
1384
  /**
808
- * Register a connection.
809
- * @param name - Connection name
810
- * @param config - Connection config
1385
+ * Registers a new queue connection with the manager.
1386
+ *
1387
+ * Dynamically loads the required driver implementation based on the configuration.
1388
+ *
1389
+ * @param name - The name of the connection (e.g., 'primary').
1390
+ * @param config - The configuration object for the driver.
1391
+ * @throws {Error} If the driver type is missing required dependencies or unsupported.
1392
+ *
1393
+ * @example
1394
+ * ```typescript
1395
+ * manager.registerConnection('analytics', { driver: 'sqs', client: sqs });
1396
+ * ```
811
1397
  */
812
1398
  registerConnection(name: string, config: QueueConnectionConfig): void;
813
1399
  /**
814
- * Get a driver for a connection.
815
- * @param connection - Connection name
816
- * @returns Driver instance
1400
+ * Retrieves the driver instance for a specific connection.
1401
+ *
1402
+ * @param connection - The name of the connection.
1403
+ * @returns The configured QueueDriver instance.
1404
+ * @throws {Error} If the connection has not been registered.
1405
+ *
1406
+ * @example
1407
+ * ```typescript
1408
+ * const driver = manager.getDriver('redis');
1409
+ * ```
817
1410
  */
818
1411
  getDriver(connection: string): QueueDriver;
819
1412
  /**
820
- * Get the default connection name.
821
- * @returns Default connection name
1413
+ * Gets the name of the default connection.
1414
+ *
1415
+ * @returns The default connection name.
822
1416
  */
823
1417
  getDefaultConnection(): string;
824
1418
  /**
825
- * Get a serializer.
826
- * @param type - Serializer type
827
- * @returns Serializer instance
1419
+ * Retrieves a serializer instance by type.
1420
+ *
1421
+ * @param type - The serializer type (e.g., 'json', 'class'). If omitted, returns the default serializer.
1422
+ * @returns The JobSerializer instance.
1423
+ * @throws {Error} If the requested serializer type is not found.
828
1424
  */
829
1425
  getSerializer(type?: string): JobSerializer;
830
1426
  /**
831
- * Register Job classes (used by ClassNameSerializer).
832
- * @param jobClasses - Job class array
1427
+ * Registers Job classes for the `ClassNameSerializer`.
1428
+ *
1429
+ * This is required when using 'class' serialization to allow proper hydration of job instances
1430
+ * upon deserialization.
1431
+ *
1432
+ * @param jobClasses - An array of Job class constructors.
1433
+ *
1434
+ * @example
1435
+ * ```typescript
1436
+ * manager.registerJobClasses([SendEmailJob, ProcessOrderJob]);
1437
+ * ```
1438
+ */
1439
+ registerJobClasses(jobClasses: Array<new (...args: unknown[]) => Job>): void;
1440
+ /**
1441
+ * Pushes a single job to the queue.
1442
+ *
1443
+ * Serializes the job, selects the appropriate driver based on job configuration,
1444
+ * and dispatches it. Also handles audit logging if persistence is enabled.
1445
+ *
1446
+ * @template T - The type of the job (extends Job).
1447
+ * @param job - The job instance to enqueue.
1448
+ * @param options - Optional overrides for push behavior (priority, delay, etc.).
1449
+ * @returns The same job instance (for chaining).
1450
+ *
1451
+ * @example
1452
+ * ```typescript
1453
+ * await manager.push(new SendEmailJob('user@example.com'));
1454
+ * ```
1455
+ */
1456
+ push<T extends Job & Queueable>(job: T, options?: JobPushOptions): Promise<T>;
1457
+ /**
1458
+ * Pushes multiple jobs to the queue in a batch.
1459
+ *
1460
+ * Optimizes network requests by batching jobs where possible. Groups jobs by connection
1461
+ * and queue to maximize throughput.
1462
+ *
1463
+ * @template T - The type of the jobs.
1464
+ * @param jobs - An array of job instances to enqueue.
1465
+ * @param options - Configuration for batch size and concurrency.
1466
+ * @returns A promise that resolves when all jobs have been pushed.
1467
+ *
1468
+ * @example
1469
+ * ```typescript
1470
+ * await manager.pushMany(jobs, { batchSize: 500, concurrency: 5 });
1471
+ * ```
1472
+ */
1473
+ pushMany<T extends Job & Queueable>(jobs: T[], options?: {
1474
+ batchSize?: number;
1475
+ concurrency?: number;
1476
+ }): Promise<void>;
1477
+ /**
1478
+ * Pops a single job from the queue.
1479
+ *
1480
+ * Retrieves the next available job from the specified queue.
1481
+ *
1482
+ * @param queue - The queue name (default: 'default').
1483
+ * @param connection - The connection name (defaults to default connection).
1484
+ * @returns A Job instance if found, or `null` if the queue is empty.
1485
+ *
1486
+ * @example
1487
+ * ```typescript
1488
+ * const job = await manager.pop('priority-queue');
1489
+ * if (job) await job.handle();
1490
+ * ```
1491
+ */
1492
+ pop(queue?: string, connection?: string): Promise<Job | null>;
1493
+ /**
1494
+ * Pops multiple jobs from the queue efficiently.
1495
+ *
1496
+ * Attempts to retrieve a batch of jobs from the driver. If the driver does not support
1497
+ * batching, it falls back to sequential popping.
1498
+ *
1499
+ * @param queue - The queue name (default: 'default').
1500
+ * @param count - The maximum number of jobs to retrieve (default: 10).
1501
+ * @param connection - The connection name.
1502
+ * @returns An array of Job instances.
1503
+ *
1504
+ * @example
1505
+ * ```typescript
1506
+ * const jobs = await manager.popMany('default', 50);
1507
+ * ```
1508
+ */
1509
+ popMany(queue?: string, count?: number, connection?: string): Promise<Job[]>;
1510
+ /**
1511
+ * Retrieves the current size of a queue.
1512
+ *
1513
+ * @param queue - The queue name (default: 'default').
1514
+ * @param connection - The connection name.
1515
+ * @returns The number of waiting jobs.
1516
+ *
1517
+ * @example
1518
+ * ```typescript
1519
+ * const count = await manager.size('emails');
1520
+ * ```
1521
+ */
1522
+ size(queue?: string, connection?: string): Promise<number>;
1523
+ /**
1524
+ * Pops a job from the queue with blocking (wait) behavior.
1525
+ *
1526
+ * Waits for a job to become available for the specified timeout duration.
1527
+ * Useful for reducing polling loop frequency.
1528
+ *
1529
+ * @param queues - A queue name or array of queue names to listen to.
1530
+ * @param timeout - Timeout in seconds (0 = block indefinitely).
1531
+ * @param connection - The connection name.
1532
+ * @returns A Job instance if found, or `null` if timed out.
1533
+ *
1534
+ * @example
1535
+ * ```typescript
1536
+ * // Wait up to 30 seconds for a job
1537
+ * const job = await manager.popBlocking('default', 30);
1538
+ * ```
1539
+ */
1540
+ popBlocking(queues?: string | string[], timeout?: number, connection?: string): Promise<Job | null>;
1541
+ /**
1542
+ * Removes all jobs from a specific queue.
1543
+ *
1544
+ * @param queue - The queue name to purge.
1545
+ * @param connection - The connection name.
1546
+ *
1547
+ * @example
1548
+ * ```typescript
1549
+ * await manager.clear('test-queue');
1550
+ * ```
1551
+ */
1552
+ clear(queue?: string, connection?: string): Promise<void>;
1553
+ /**
1554
+ * Retrieves comprehensive statistics for a queue.
1555
+ *
1556
+ * Includes counts for pending, processing, delayed, and failed jobs.
1557
+ *
1558
+ * @param queue - The queue name.
1559
+ * @param connection - The connection name.
1560
+ * @returns A QueueStats object.
1561
+ *
1562
+ * @example
1563
+ * ```typescript
1564
+ * const stats = await manager.stats('default');
1565
+ * console.log(stats.size, stats.failed);
1566
+ * ```
1567
+ */
1568
+ stats(queue?: string, connection?: string): Promise<QueueStats>;
1569
+ /**
1570
+ * Marks a job as successfully completed.
1571
+ *
1572
+ * Removes the job from the processing state and optionally archives it.
1573
+ *
1574
+ * @param job - The job instance that finished.
1575
+ *
1576
+ * @example
1577
+ * ```typescript
1578
+ * await manager.complete(job);
1579
+ * ```
1580
+ */
1581
+ complete<T extends Job & Queueable>(job: T): Promise<void>;
1582
+ /**
1583
+ * Marks a job as failed.
1584
+ *
1585
+ * Moves the job to the failed state (Dead Letter Queue) and optionally archives it.
1586
+ * This is typically called after max retry attempts are exhausted.
1587
+ *
1588
+ * @param job - The job instance that failed.
1589
+ * @param error - The error that caused the failure.
1590
+ *
1591
+ * @example
1592
+ * ```typescript
1593
+ * await manager.fail(job, new Error('Something went wrong'));
1594
+ * ```
1595
+ */
1596
+ fail<T extends Job & Queueable>(job: T, error: Error): Promise<void>;
1597
+ /**
1598
+ * Retrieves the configured persistence adapter.
1599
+ *
1600
+ * @returns The PersistenceAdapter instance, or undefined if not configured.
1601
+ */
1602
+ getPersistence(): PersistenceAdapter | undefined;
1603
+ /**
1604
+ * Gets the Scheduler instance associated with this manager.
1605
+ *
1606
+ * The Scheduler handles delayed jobs and periodic tasks.
1607
+ *
1608
+ * @returns The Scheduler instance.
1609
+ */
1610
+ getScheduler(): Scheduler;
1611
+ /**
1612
+ * Retrieves failed jobs from the Dead Letter Queue.
1613
+ *
1614
+ * @param queue - The queue name.
1615
+ * @param start - The starting index (pagination).
1616
+ * @param end - The ending index (pagination).
1617
+ * @param connection - The connection name.
1618
+ * @returns An array of serialized jobs.
1619
+ *
1620
+ * @example
1621
+ * ```typescript
1622
+ * const failedJobs = await manager.getFailed('default', 0, 10);
1623
+ * ```
1624
+ */
1625
+ getFailed(queue: string, start?: number, end?: number, connection?: string): Promise<SerializedJob[]>;
1626
+ /**
1627
+ * Retries failed jobs from the Dead Letter Queue.
1628
+ *
1629
+ * Moves jobs from the failed state back to the active queue for re-processing.
1630
+ *
1631
+ * @param queue - The queue name.
1632
+ * @param count - The number of jobs to retry.
1633
+ * @param connection - The connection name.
1634
+ * @returns The number of jobs successfully retried.
1635
+ *
1636
+ * @example
1637
+ * ```typescript
1638
+ * await manager.retryFailed('default', 5);
1639
+ * ```
1640
+ */
1641
+ retryFailed(queue: string, count?: number, connection?: string): Promise<number>;
1642
+ /**
1643
+ * Clears all failed jobs from the Dead Letter Queue.
1644
+ *
1645
+ * @param queue - The queue name.
1646
+ * @param connection - The connection name.
1647
+ *
1648
+ * @example
1649
+ * ```typescript
1650
+ * await manager.clearFailed('default');
1651
+ * ```
1652
+ */
1653
+ clearFailed(queue: string, connection?: string): Promise<void>;
1654
+ /**
1655
+ * Retrieves high-level statistics across all registered connections and queues.
1656
+ *
1657
+ * Iterates through all drivers and collects metadata to provide a comprehensive
1658
+ * snapshot of the entire queue system's health.
1659
+ *
1660
+ * @returns A promise resolving to a GlobalStats object.
1661
+ */
1662
+ getGlobalStats(): Promise<GlobalStats>;
1663
+ }
1664
+
1665
+ /**
1666
+ * Configuration options for the BatchConsumer.
1667
+ *
1668
+ * @example
1669
+ * ```typescript
1670
+ * const options: BatchConsumerOptions = {
1671
+ * batchSize: 50,
1672
+ * autoAck: false
1673
+ * };
1674
+ * ```
1675
+ */
1676
+ interface BatchConsumerOptions {
1677
+ /**
1678
+ * The name of the queue to consume from.
1679
+ * @default 'default'
1680
+ */
1681
+ queue?: string;
1682
+ /**
1683
+ * The connection name to use.
1684
+ * @default The default connection of QueueManager
1685
+ */
1686
+ connection?: string;
1687
+ /**
1688
+ * The number of jobs to try to retrieve in each batch.
1689
+ * @default 10
1690
+ */
1691
+ batchSize?: number;
1692
+ /**
1693
+ * The polling interval in milliseconds when the queue is empty.
1694
+ * @default 1000
1695
+ */
1696
+ pollInterval?: number;
1697
+ /**
1698
+ * Whether to automatically complete jobs after the handler returns successfully.
1699
+ *
1700
+ * If set to `false`, the handler function is responsible for calling `manager.complete()`
1701
+ * or `manager.fail()` for each job.
1702
+ *
1703
+ * @default true
1704
+ */
1705
+ autoAck?: boolean;
1706
+ }
1707
+ /**
1708
+ * Specialized consumer for processing jobs in bulk.
1709
+ *
1710
+ * Unlike the standard `Consumer` which processes jobs individually (even if fetched in batches),
1711
+ * the `BatchConsumer` passes an array of jobs to a single handler function. This is ideal for
1712
+ * operations that benefit from bulk processing, such as database inserts or API calls that support batching.
1713
+ *
1714
+ * @public
1715
+ * @example
1716
+ * ```typescript
1717
+ * const consumer = new BatchConsumer(manager, async (jobs) => {
1718
+ * // Process 100 jobs at once
1719
+ * await elasticsearch.bulkIndex(jobs.map(j => j.data));
1720
+ * }, { batchSize: 100 });
1721
+ *
1722
+ * consumer.start();
1723
+ * ```
1724
+ */
1725
+ declare class BatchConsumer {
1726
+ private manager;
1727
+ private handler;
1728
+ private running;
1729
+ private options;
1730
+ constructor(manager: QueueManager, handler: (jobs: Job[]) => Promise<void>, options?: BatchConsumerOptions);
1731
+ /**
1732
+ * Starts the batch consuming loop.
1733
+ *
1734
+ * Continuously polls for batches of jobs and passes them to the handler.
833
1735
  */
834
- registerJobClasses(jobClasses: Array<new (...args: unknown[]) => Job>): void;
1736
+ start(): Promise<void>;
835
1737
  /**
836
- * Push a Job to the queue.
1738
+ * Stops the consumer loop.
837
1739
  *
838
- * @template T - The type of the job.
839
- * @param job - Job instance to push.
840
- * @param options - Push options.
841
- * @returns The same job instance (for fluent chaining).
842
- *
843
- * @example
844
- * ```typescript
845
- * await manager.push(new SendEmailJob('user@example.com'));
846
- * ```
1740
+ * Sets the running flag to false. The loop will exit after the current iteration finishes.
847
1741
  */
848
- push<T extends Job & Queueable>(job: T, options?: JobPushOptions): Promise<T>;
1742
+ stop(): void;
1743
+ }
1744
+
1745
+ /**
1746
+ * Sandboxed Worker Implementation.
1747
+ *
1748
+ * Executes jobs in isolated Worker Threads to provide context isolation,
1749
+ * error containment, and resource limits.
1750
+ *
1751
+ * @public
1752
+ */
1753
+
1754
+ /**
1755
+ * Configuration options for the Sandboxed Worker.
1756
+ */
1757
+ interface SandboxedWorkerConfig {
849
1758
  /**
850
- * Push multiple jobs to the queue.
851
- *
852
- * @template T - The type of the jobs.
853
- * @param jobs - Array of job instances.
854
- * @param options - Bulk push options.
1759
+ * Maximum execution time for a job in milliseconds.
855
1760
  *
856
- * @example
857
- * ```typescript
858
- * await manager.pushMany(jobs, { batchSize: 500, concurrency: 2 });
859
- * ```
1761
+ * Jobs exceeding this duration will be forcefully terminated.
1762
+ * @default 30000 (30 seconds)
860
1763
  */
861
- pushMany<T extends Job & Queueable>(jobs: T[], options?: {
862
- batchSize?: number;
863
- concurrency?: number;
864
- }): Promise<void>;
1764
+ maxExecutionTime?: number;
865
1765
  /**
866
- * Pop a job from the queue.
1766
+ * Maximum memory limit for the worker in MB.
867
1767
  *
868
- * @param queue - Queue name (default: 'default').
869
- * @param connection - Connection name (optional).
870
- * @returns Job instance or null if queue is empty.
871
- *
872
- * @example
873
- * ```typescript
874
- * const job = await manager.pop('emails');
875
- * if (job) await job.handle();
876
- * ```
1768
+ * If the worker exceeds this limit, it will be terminated and restarted.
1769
+ * Note: Relies on `resourceLimits` which may vary by platform.
1770
+ * @default undefined (unlimited)
877
1771
  */
878
- pop(queue?: string, connection?: string): Promise<Job | null>;
1772
+ maxMemory?: number;
879
1773
  /**
880
- * Pop multiple jobs from the queue.
1774
+ * Whether to isolate contexts for each job.
881
1775
  *
882
- * @param queue - Queue name (default: 'default').
883
- * @param count - Number of jobs to pop (default: 10).
884
- * @param connection - Connection name (optional).
885
- * @returns Array of Job instances.
1776
+ * If `true`, a new Worker Thread is created for every job execution.
1777
+ * If `false`, the Worker Thread is reused across multiple jobs.
1778
+ * @default false
886
1779
  */
887
- popMany(queue?: string, count?: number, connection?: string): Promise<Job[]>;
1780
+ isolateContexts?: boolean;
888
1781
  /**
889
- * Get queue size.
1782
+ * Idle timeout for the Worker Thread in milliseconds.
890
1783
  *
891
- * @param queue - Queue name (default: 'default').
892
- * @param connection - Connection name (optional).
893
- * @returns Number of jobs in the queue.
1784
+ * The worker will be terminated if it remains idle for this duration to save resources.
1785
+ * @default 60000 (60 seconds)
894
1786
  */
895
- size(queue?: string, connection?: string): Promise<number>;
896
- /**
897
- * Pop a job from the queue (blocking).
1787
+ idleTimeout?: number;
1788
+ }
1789
+ /**
1790
+ * Sandboxed Worker.
1791
+ *
1792
+ * Manages the lifecycle of a Node.js Worker Thread for job execution.
1793
+ * Provides features like:
1794
+ * - Context Isolation: Run code in a separate thread.
1795
+ * - Timeout Enforcement: Terminate hangs or long-running jobs.
1796
+ * - Memory Limits: Prevent OOM issues affecting the main process.
1797
+ * - Error Containment: Worker crashes do not crash the main application.
1798
+ *
1799
+ * @example
1800
+ * ```typescript
1801
+ * const worker = new SandboxedWorker({
1802
+ * maxExecutionTime: 30000,
1803
+ * maxMemory: 512,
1804
+ * isolateContexts: true
1805
+ * });
1806
+ *
1807
+ * await worker.execute(serializedJob);
1808
+ * await worker.terminate();
1809
+ * ```
1810
+ */
1811
+ declare class SandboxedWorker {
1812
+ private worker;
1813
+ private state;
1814
+ private config;
1815
+ private idleTimer;
1816
+ private executionTimer;
1817
+ /**
1818
+ * Creates a SandboxedWorker instance.
898
1819
  *
899
- * @param queue - Queue name (default: 'default').
900
- * @param timeout - Timeout in seconds (default: 0, wait forever).
901
- * @param connection - Connection name (optional).
1820
+ * @param config - Configuration options for the worker.
902
1821
  */
903
- popBlocking(queues?: string | string[], timeout?: number, connection?: string): Promise<Job | null>;
1822
+ constructor(config?: SandboxedWorkerConfig);
904
1823
  /**
905
- * Clear all jobs from a queue.
1824
+ * Initializes the Worker Thread.
906
1825
  *
907
- * @param queue - Queue name (default: 'default').
908
- * @param connection - Connection name (optional).
1826
+ * @returns The active Worker Thread instance.
1827
+ * @throws {Error} If worker initialization fails or times out.
909
1828
  */
910
- clear(queue?: string, connection?: string): Promise<void>;
1829
+ private initWorker;
911
1830
  /**
912
- * Get queue statistics including size, delayed, and failed job counts.
1831
+ * Executes a job in the sandboxed environment.
913
1832
  *
914
- * @param queue - Queue name (default: 'default').
915
- * @param connection - Connection name (optional).
916
- * @returns Queue statistics object.
1833
+ * @param job - The serialized job data to execute.
1834
+ * @throws {Error} If execution fails, times out, or the worker crashes.
917
1835
  */
918
- stats(queue?: string, connection?: string): Promise<QueueStats>;
1836
+ execute(job: SerializedJob): Promise<void>;
919
1837
  /**
920
- * Mark a job as completed.
921
- * @param job - Job instance
1838
+ * Internal method to send execution message to the worker thread.
1839
+ *
1840
+ * @param worker - The worker thread instance.
1841
+ * @param job - Job data.
922
1842
  */
923
- complete<T extends Job & Queueable>(job: T): Promise<void>;
1843
+ private executeInWorker;
924
1844
  /**
925
- * Mark a job as permanently failed.
926
- * @param job - Job instance
927
- * @param error - Error object
1845
+ * Creates a promise that rejects after the configured timeout.
928
1846
  */
929
- fail<T extends Job & Queueable>(job: T, error: Error): Promise<void>;
1847
+ private createTimeoutPromise;
930
1848
  /**
931
- * Get the persistence adapter if configured.
1849
+ * Starts the idle timer to auto-terminate the worker.
932
1850
  */
933
- getPersistence(): PersistenceAdapter | undefined;
1851
+ private startIdleTimer;
934
1852
  /**
935
- * Get the scheduler if configured.
1853
+ * Terminates the Worker Thread immediately.
1854
+ *
1855
+ * Stops any running job and releases resources.
936
1856
  */
937
- getScheduler(): Scheduler;
1857
+ terminate(): Promise<void>;
938
1858
  /**
939
- * Get failed jobs from DLQ (if driver supports it).
1859
+ * Gets the current state of the worker.
1860
+ *
1861
+ * @returns The current `WorkerState`.
940
1862
  */
941
- getFailed(queue: string, start?: number, end?: number, connection?: string): Promise<SerializedJob[]>;
1863
+ getState(): string;
942
1864
  /**
943
- * Retry failed jobs from DLQ (if driver supports it).
1865
+ * Checks if the worker is ready to accept a job.
1866
+ *
1867
+ * @returns `true` if ready, `false` otherwise.
944
1868
  */
945
- retryFailed(queue: string, count?: number, connection?: string): Promise<number>;
1869
+ isReady(): boolean;
946
1870
  /**
947
- * Clear failed jobs from DLQ (if driver supports it).
1871
+ * Checks if the worker is currently executing a job.
1872
+ *
1873
+ * @returns `true` if busy, `false` otherwise.
948
1874
  */
949
- clearFailed(queue: string, connection?: string): Promise<void>;
1875
+ isBusy(): boolean;
950
1876
  }
951
1877
 
952
1878
  /**
953
- * Worker options.
1879
+ * Configuration options for the Worker.
1880
+ *
1881
+ * Controls the execution behavior of jobs, including retry limits and timeouts.
1882
+ *
1883
+ * @example
1884
+ * ```typescript
1885
+ * const options: WorkerOptions = {
1886
+ * maxAttempts: 3,
1887
+ * timeout: 30
1888
+ * };
1889
+ * ```
954
1890
  */
955
1891
  interface WorkerOptions {
956
1892
  /**
957
- * Maximum retry attempts.
1893
+ * The maximum number of attempts for a job before it is marked as failed.
1894
+ *
1895
+ * This value serves as a default fallback if the job itself does not specify `maxAttempts`.
958
1896
  */
959
1897
  maxAttempts?: number;
960
1898
  /**
961
- * Job timeout (seconds).
1899
+ * The maximum execution time for a job in seconds.
1900
+ *
1901
+ * If the job exceeds this duration, it will be timed out and marked as failed.
962
1902
  */
963
1903
  timeout?: number;
964
1904
  /**
965
- * Failure callback.
1905
+ * Callback function triggered when a job permanently fails.
1906
+ *
1907
+ * This allows for custom error reporting or cleanup logic outside of the job class.
966
1908
  */
967
1909
  onFailed?: (job: Job, error: Error) => Promise<void>;
1910
+ /**
1911
+ * Enable sandboxed execution using Worker Threads.
1912
+ *
1913
+ * When enabled, jobs are executed in isolated Worker Threads, providing:
1914
+ * - Context isolation: Each job runs in a separate execution environment
1915
+ * - Crash protection: Worker crashes don't affect the main thread
1916
+ * - Memory limits: Prevent memory leaks from affecting the main process
1917
+ * - Timeout enforcement: Jobs exceeding the timeout are forcefully terminated
1918
+ *
1919
+ * @default false
1920
+ */
1921
+ sandboxed?: boolean;
1922
+ /**
1923
+ * Sandboxed worker configuration options.
1924
+ *
1925
+ * Only used when `sandboxed` is true.
1926
+ */
1927
+ sandboxConfig?: SandboxedWorkerConfig;
968
1928
  }
969
1929
  /**
970
- * Base Worker.
1930
+ * Executes background jobs.
971
1931
  *
972
- * Responsible for executing `Job` instances.
973
- * Provides error handling, retry logic, and timeout support.
1932
+ * The Worker is responsible for running the `handle()` method of a job, managing its lifecycle,
1933
+ * enforcing timeouts, and handling retries or failures.
1934
+ *
1935
+ * Supports two execution modes:
1936
+ * - **Standard Mode** (default): Executes jobs directly in the current process
1937
+ * - **Sandboxed Mode**: Executes jobs in isolated Worker Threads for enhanced security and stability
974
1938
  *
975
1939
  * @example
976
1940
  * ```typescript
1941
+ * // Standard mode
977
1942
  * const worker = new Worker({
978
1943
  * maxAttempts: 3,
979
1944
  * timeout: 60
980
- * })
1945
+ * });
1946
+ *
1947
+ * // Sandboxed mode
1948
+ * const sandboxedWorker = new Worker({
1949
+ * maxAttempts: 3,
1950
+ * timeout: 60,
1951
+ * sandboxed: true,
1952
+ * sandboxConfig: {
1953
+ * maxExecutionTime: 30000,
1954
+ * maxMemory: 512,
1955
+ * isolateContexts: true
1956
+ * }
1957
+ * });
981
1958
  *
982
- * await worker.process(job)
1959
+ * await worker.process(job);
983
1960
  * ```
984
1961
  */
985
1962
  declare class Worker {
986
1963
  private options;
1964
+ private sandboxedWorker?;
987
1965
  constructor(options?: WorkerOptions);
988
1966
  /**
989
- * Process a Job.
990
- * @param job - Job instance
1967
+ * Processes a single job instance.
1968
+ *
1969
+ * 1. Checks attempt counts.
1970
+ * 2. Enforces execution timeout (if configured).
1971
+ * 3. Runs `job.handle()` (either directly or in a sandboxed Worker Thread).
1972
+ * 4. Catches errors and invokes failure handlers if max attempts are reached.
1973
+ *
1974
+ * @param job - The job to process.
1975
+ * @throws {Error} If the job execution fails (to trigger retry logic in the consumer).
991
1976
  */
992
1977
  process(job: Job): Promise<void>;
993
1978
  /**
994
- * Handle failure.
1979
+ * Processes a job in standard mode (directly in current process).
1980
+ *
1981
+ * @param job - The job to process.
1982
+ * @param timeout - Optional timeout in seconds.
1983
+ */
1984
+ private processStandard;
1985
+ /**
1986
+ * Processes a job in sandboxed mode (in Worker Thread).
1987
+ *
1988
+ * @param job - The job to process.
1989
+ */
1990
+ private processSandboxed;
1991
+ /**
1992
+ * Serializes a Job instance for Worker Thread execution.
1993
+ *
1994
+ * @param job - The job to serialize.
1995
+ * @returns Serialized job data.
1996
+ */
1997
+ private serializeJob;
1998
+ /**
1999
+ * Handles the permanent failure of a job.
2000
+ *
2001
+ * Invokes the job's `failed()` method and any global `onFailed` callback.
2002
+ *
2003
+ * @param job - The failed job.
2004
+ * @param error - The error that caused the failure.
995
2005
  */
996
2006
  private handleFailure;
2007
+ /**
2008
+ * Terminates the sandboxed worker and releases resources.
2009
+ *
2010
+ * Should be called when the worker is no longer needed.
2011
+ * Only applicable when running in sandboxed mode.
2012
+ */
2013
+ terminate(): Promise<void>;
997
2014
  }
998
2015
 
999
2016
  /**
1000
- * Consumer options.
2017
+ * Configuration options for the Consumer.
2018
+ *
2019
+ * Defines which queues to listen to, connection settings, concurrency levels,
2020
+ * and advanced behavior like rate limiting and batch processing.
2021
+ *
2022
+ * @example
2023
+ * ```typescript
2024
+ * const options: ConsumerOptions = {
2025
+ * queues: ['emails', 'notifications'],
2026
+ * concurrency: 5,
2027
+ * pollInterval: 2000
2028
+ * };
2029
+ * ```
1001
2030
  */
1002
2031
  interface ConsumerOptions {
1003
2032
  /**
1004
- * Queues to listen on.
2033
+ * List of queue names to consume jobs from.
2034
+ *
2035
+ * The consumer will poll these queues in the order provided or based on driver logic.
1005
2036
  */
1006
2037
  queues: string[];
1007
2038
  /**
1008
- * Connection name.
2039
+ * The connection name to use (e.g., 'redis', 'sqs').
2040
+ *
2041
+ * If not provided, uses the default connection from QueueManager.
1009
2042
  */
1010
2043
  connection?: string;
1011
2044
  /**
1012
- * Worker options.
2045
+ * Configuration options passed to the underlying Worker.
1013
2046
  */
1014
2047
  workerOptions?: WorkerOptions;
1015
2048
  /**
1016
- * Polling interval (milliseconds).
2049
+ * The interval in milliseconds to wait before polling again when the queue is empty.
1017
2050
  */
1018
2051
  pollInterval?: number;
1019
2052
  /**
1020
- * Whether to keep polling when queues are empty.
2053
+ * Whether to keep the process alive when queues are empty.
2054
+ *
2055
+ * If false, the consumer will exit the loop when no jobs are found (useful for one-off scripts).
1021
2056
  */
1022
2057
  keepAlive?: boolean;
1023
2058
  /**
1024
- * Monitoring options.
2059
+ * Monitoring configuration.
2060
+ *
2061
+ * Can be a boolean to enable default monitoring, or an object for advanced configuration.
1025
2062
  */
1026
2063
  monitor?: boolean | {
1027
2064
  /**
1028
- * Heartbeat interval (milliseconds). Default: 5000.
2065
+ * The interval in milliseconds for sending heartbeat updates.
2066
+ * @default 5000
1029
2067
  */
1030
2068
  interval?: number;
1031
2069
  /**
1032
- * Extra info to report with heartbeat.
2070
+ * Additional metadata to include in heartbeat payloads.
1033
2071
  */
1034
2072
  extraInfo?: Record<string, unknown>;
1035
2073
  /**
1036
- * Prefix for monitoring keys/channels.
2074
+ * Key prefix for monitoring events (e.g. for Redis Pub/Sub).
1037
2075
  */
1038
2076
  prefix?: string;
1039
2077
  };
1040
2078
  /**
1041
- * Rate limits per queue.
1042
- * Example: { 'emails': { max: 10, duration: 1000 } }
2079
+ * Rate limiting configuration per queue.
2080
+ *
2081
+ * Defines the maximum number of jobs to process within a given duration.
2082
+ *
2083
+ * @example
2084
+ * ```typescript
2085
+ * { 'emails': { max: 10, duration: 1000 } } // 10 emails per second
2086
+ * ```
1043
2087
  */
1044
2088
  rateLimits?: Record<string, {
1045
2089
  max: number;
1046
2090
  duration: number;
1047
2091
  }>;
1048
2092
  /**
1049
- * Max concurrent jobs to process. Default: 1.
2093
+ * The maximum number of jobs to process concurrently.
2094
+ *
2095
+ * @default 1
1050
2096
  */
1051
2097
  concurrency?: number;
1052
2098
  /**
1053
- * Whether to process jobs with the same groupId sequentially.
1054
- * If true, jobs with the same groupId will never run concurrently,
1055
- * regardless of the global concurrency setting.
2099
+ * Whether to enforce sequential processing for jobs with the same `groupId`.
2100
+ *
2101
+ * If true, jobs sharing a `groupId` will be processed one after another,
2102
+ * even if global concurrency is high.
2103
+ *
1056
2104
  * @default true
1057
2105
  */
1058
2106
  groupJobsSequential?: boolean;
1059
2107
  /**
1060
- * Minimum polling interval in ms (for adaptive polling).
2108
+ * The minimum polling interval in milliseconds for adaptive polling.
2109
+ *
1061
2110
  * @default 100
1062
2111
  */
1063
2112
  minPollInterval?: number;
1064
2113
  /**
1065
- * Maximum polling interval in ms (for adaptive polling).
2114
+ * The maximum polling interval in milliseconds for adaptive polling.
2115
+ *
1066
2116
  * @default 5000
1067
2117
  */
1068
2118
  maxPollInterval?: number;
1069
2119
  /**
1070
- * Backoff multiplier for adaptive polling.
2120
+ * The multiplier used to increase the polling interval when the queue is empty.
2121
+ *
1071
2122
  * @default 1.5
1072
2123
  */
1073
2124
  backoffMultiplier?: number;
1074
2125
  /**
1075
- * Batch size for consuming jobs.
1076
- * If > 1, tries to fetch multiple jobs at once.
2126
+ * The number of jobs to try to fetch in a single request.
2127
+ *
2128
+ * If supported by the driver, fetching multiple jobs reduces network round-trips.
2129
+ *
1077
2130
  * @default 1
1078
2131
  */
1079
2132
  batchSize?: number;
1080
2133
  /**
1081
- * Whether to use blocking pop (BLPOP/long-polling) if supported by driver.
1082
- * Only applies when batchSize is 1.
2134
+ * Whether to use blocking operations (like BLPOP in Redis) when polling.
2135
+ *
2136
+ * Significant optimization for low-latency job pickup. Only applies when `batchSize` is 1.
2137
+ *
1083
2138
  * @default true
1084
2139
  */
1085
2140
  useBlocking?: boolean;
1086
2141
  /**
1087
- * Timeout in seconds for blocking pop.
2142
+ * The timeout in seconds for blocking operations.
2143
+ *
1088
2144
  * @default 5
1089
2145
  */
1090
2146
  blockingTimeout?: number;
1091
2147
  /**
1092
- * Enable verbose debug logging.
2148
+ * Enable verbose debug logging for consumer activities.
2149
+ *
1093
2150
  * @default false
1094
2151
  */
1095
2152
  debug?: boolean;
2153
+ /**
2154
+ * 最大處理請求數量。
2155
+ *
2156
+ * 當 consumer 處理完這個數量的 job 後會自動停止(觸發 max_requests_reached 事件)。
2157
+ * 適用於需要定期重啟 worker 的場景(避免記憶體累積、載入最新程式碼等)。
2158
+ *
2159
+ * @default undefined (無限制)
2160
+ */
2161
+ maxRequests?: number;
2162
+ /**
2163
+ * Optional event callback for external monitoring systems.
2164
+ *
2165
+ * Called whenever a job lifecycle event occurs (started, processed, failed, etc.).
2166
+ */
2167
+ onEvent?: (event: string, payload: any) => void;
1096
2168
  }
1097
2169
  /**
1098
- * Consumer
2170
+ * The Consumer responsible for processing jobs from the queue.
1099
2171
  *
1100
- * Consumes and executes jobs from queues.
1101
- * Supports embedded mode (inside the main app) and standalone mode (as a worker service).
2172
+ * It polls the configured queues, retrieves jobs, and delegates execution to a `Worker`.
2173
+ * It handles concurrency, rate limiting, adaptive polling, and emits lifecycle events
2174
+ * (job:started, job:processed, job:failed, etc.).
1102
2175
  *
2176
+ * @public
1103
2177
  * @example
1104
2178
  * ```typescript
1105
- * // Embedded mode
1106
2179
  * const consumer = new Consumer(queueManager, {
1107
- * queues: ['default', 'emails'],
1108
- * pollInterval: 1000
1109
- * })
1110
- *
1111
- * consumer.start()
2180
+ * queues: ['default'],
2181
+ * concurrency: 10
2182
+ * });
1112
2183
  *
1113
- * // Standalone mode (CLI)
1114
- * // Start via CLI tooling with graceful shutdown
2184
+ * await consumer.start();
1115
2185
  * ```
1116
2186
  *
1117
2187
  * @emits job:started - When a job begins processing. Payload: { job: Job, queue: string }
1118
2188
  * @emits job:processed - When a job completes successfully. Payload: { job: Job, duration: number, queue: string }
1119
2189
  * @emits job:failed - When a job fails an attempt. Payload: { job: Job, error: Error, duration: number, queue: string }
1120
2190
  * @emits job:retried - When a job is scheduled for a retry. Payload: { job: Job, attempt: number, delay: number }
1121
- * @emits job:failed_permanently - When a job fails all attempts and is moved to DLQ. Payload: { job: Job, error: Error }
2191
+ * @emits job:failed_permanently - When a job fails all attempts. Payload: { job: Job, error: Error }
1122
2192
  */
1123
2193
  declare class Consumer extends EventEmitter {
1124
2194
  private queueManager;
1125
2195
  private options;
2196
+ /**
2197
+ * Group limiter 的存活時間(毫秒)。
2198
+ * 超過此時間未使用的 group limiter 會被清理,避免記憶體洩漏。
2199
+ */
2200
+ private static readonly GROUP_LIMITER_TTL;
1126
2201
  private running;
1127
2202
  private stopRequested;
1128
2203
  private workerId;
1129
2204
  private heartbeatTimer;
2205
+ private cleanupTimer;
1130
2206
  private groupLimiters;
2207
+ private groupLimiterLastUsed;
1131
2208
  private stats;
1132
2209
  constructor(queueManager: QueueManager, options: ConsumerOptions);
1133
2210
  private get connectionName();
1134
2211
  /**
1135
- * Log debug message.
2212
+ * Logs a debug message if debug mode is enabled.
2213
+ */
2214
+ private log;
2215
+ /**
2216
+ * Starts the consumer loop.
2217
+ *
2218
+ * Begins polling the queues and processing jobs. This method returns a promise that resolves
2219
+ * only when the consumer stops (if `keepAlive` is false) or throws if already running.
2220
+ *
2221
+ * @throws {Error} If the consumer is already running.
2222
+ */
2223
+ start(): Promise<void>;
2224
+ /**
2225
+ * Run a job with concurrency controls and group locking.
2226
+ */
2227
+ private runJob;
2228
+ /**
2229
+ * Delegates the actual processing to the worker and handles stats/logging.
2230
+ */
2231
+ private handleJob;
2232
+ private startHeartbeat;
2233
+ private stopHeartbeat;
2234
+ /**
2235
+ * 清理閒置的 group limiters。
2236
+ *
2237
+ * 定期檢查並移除超過 TTL 且沒有 active/pending jobs 的 group limiters,
2238
+ * 避免記憶體洩漏。
2239
+ */
2240
+ private cleanupGroupLimiters;
2241
+ /**
2242
+ * 啟動 group limiter 清理計時器。
2243
+ */
2244
+ private startCleanupTimer;
2245
+ /**
2246
+ * 停止 group limiter 清理計時器。
2247
+ */
2248
+ private stopCleanupTimer;
2249
+ private publishLog;
2250
+ /**
2251
+ * Gracefully stops the consumer.
2252
+ *
2253
+ * Signals the consumer to stop accepting new jobs and waits for currently running jobs
2254
+ * to complete.
2255
+ *
2256
+ * @returns A promise that resolves when the consumer has fully stopped.
2257
+ */
2258
+ stop(): Promise<void>;
2259
+ /**
2260
+ * Checks if the consumer is currently active.
2261
+ *
2262
+ * @returns True if the consumer loop is running.
2263
+ */
2264
+ isRunning(): boolean;
2265
+ /**
2266
+ * Retrieves current operational statistics.
2267
+ *
2268
+ * @returns An object containing processed, failed, retried, and active job counts.
2269
+ */
2270
+ getStats(): {
2271
+ processed: number;
2272
+ failed: number;
2273
+ retried: number;
2274
+ active: number;
2275
+ };
2276
+ /**
2277
+ * Resets the internal statistics counters.
2278
+ */
2279
+ resetStats(): void;
2280
+ }
2281
+
2282
+ /**
2283
+ * Bull Queue client interface (compatible with bullmq package).
2284
+ */
2285
+ interface BullQueueClient {
2286
+ add(name: string, data: any, options?: any): Promise<any>;
2287
+ getJob(id: string): Promise<any | null>;
2288
+ count(): Promise<number>;
2289
+ process(handler: (job: any) => Promise<void>): void;
2290
+ on(event: string, handler: (...args: any[]) => void): void;
2291
+ off(event: string, handler: (...args: any[]) => void): void;
2292
+ pause(): Promise<void>;
2293
+ resume(): Promise<void>;
2294
+ clean(grace: number, limit?: number, type?: string): Promise<number>;
2295
+ close(): Promise<void>;
2296
+ getJobCounts(types?: string[]): Promise<Record<string, number>>;
2297
+ getDelayedCount(): Promise<number>;
2298
+ getFailedCount(): Promise<number>;
2299
+ getActiveCount(): Promise<number>;
2300
+ [key: string]: any;
2301
+ }
2302
+ interface BullWorkerClient {
2303
+ [key: string]: any;
2304
+ }
2305
+ /**
2306
+ * Bull Queue driver configuration.
2307
+ */
2308
+ interface BullMQDriverConfig {
2309
+ /**
2310
+ * Bull Queue instance (from bullmq package).
2311
+ */
2312
+ queue: BullQueueClient;
2313
+ /**
2314
+ * Optional Bull Worker instance for processing jobs.
2315
+ */
2316
+ worker?: BullWorkerClient;
2317
+ /**
2318
+ * Connection options (host, port, etc. for Redis).
2319
+ */
2320
+ connection?: {
2321
+ host?: string;
2322
+ port?: number;
2323
+ password?: string;
2324
+ db?: number;
2325
+ [key: string]: any;
2326
+ };
2327
+ /**
2328
+ * Default number of concurrent workers (default: 1).
2329
+ */
2330
+ concurrency?: number;
2331
+ /**
2332
+ * Key prefix for namespacing queues (default: 'gravito:').
2333
+ */
2334
+ prefix?: string;
2335
+ /**
2336
+ * Enable debug logging.
2337
+ */
2338
+ debug?: boolean;
2339
+ }
2340
+ /**
2341
+ * Bull Queue driver implementation.
2342
+ *
2343
+ * Provides high-performance, persistent job queuing using Bull Queue (backed by Redis).
2344
+ * Supports priority, delays, retries, and group-based FIFO processing.
2345
+ *
2346
+ * @public
2347
+ * @example
2348
+ * ```typescript
2349
+ * import { Queue } from 'bullmq'
2350
+ * import Redis from 'ioredis'
2351
+ *
2352
+ * const redis = new Redis()
2353
+ * const queue = new Queue('gravito-events', { connection: redis })
2354
+ * const driver = new BullMQDriver({ queue })
2355
+ * ```
2356
+ */
2357
+ declare class BullMQDriver implements QueueDriver {
2358
+ private queue;
2359
+ private prefix;
2360
+ private debug;
2361
+ private queueMap;
2362
+ constructor(config: BullMQDriverConfig);
2363
+ /**
2364
+ * Get or create a queue for the given queue name.
2365
+ */
2366
+ private getQueue;
2367
+ /**
2368
+ * Build Job Options from JobPushOptions.
2369
+ */
2370
+ private buildJobOptions;
2371
+ /**
2372
+ * Create Bull job data from SerializedJob.
2373
+ */
2374
+ private createBullJobData;
2375
+ /**
2376
+ * Pushes a job to Bull Queue.
2377
+ */
2378
+ push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void>;
2379
+ /**
2380
+ * Pops a job from Bull Queue.
2381
+ * Note: Bull Queue typically uses Workers, not manual pop.
2382
+ * This is a fallback implementation.
2383
+ */
2384
+ pop(queue: string): Promise<SerializedJob | null>;
2385
+ /**
2386
+ * Returns the size of the queue.
2387
+ */
2388
+ size(queue: string): Promise<number>;
2389
+ /**
2390
+ * Clears the queue.
2391
+ */
2392
+ clear(queue: string): Promise<void>;
2393
+ /**
2394
+ * Marks a job as failed (moves to failed list).
1136
2395
  */
1137
- private log;
2396
+ fail(queue: string, job: SerializedJob): Promise<void>;
1138
2397
  /**
1139
- * Start the consumer loop.
2398
+ * Returns detailed statistics for the queue.
1140
2399
  */
1141
- start(): Promise<void>;
2400
+ stats(queue: string): Promise<QueueStats>;
1142
2401
  /**
1143
- * Run a job with concurrency controls.
2402
+ * Retrieves failed jobs from the Dead Letter Queue.
1144
2403
  */
1145
- private runJob;
2404
+ getFailed(queue: string, _start?: number, _end?: number): Promise<SerializedJob[]>;
1146
2405
  /**
1147
- * Handle a single job.
2406
+ * Retries failed jobs.
1148
2407
  */
1149
- private handleJob;
1150
- private startHeartbeat;
1151
- private stopHeartbeat;
1152
- private publishLog;
2408
+ retryFailed(queue: string, _count?: number): Promise<number>;
1153
2409
  /**
1154
- * Stop the consumer loop (graceful shutdown).
2410
+ * Clears the Dead Letter Queue.
1155
2411
  */
1156
- stop(): Promise<void>;
2412
+ clearFailed(queue: string): Promise<void>;
1157
2413
  /**
1158
- * Check whether the consumer is running.
2414
+ * Creates a new queue/topic.
1159
2415
  */
1160
- isRunning(): boolean;
2416
+ createTopic(_topic: string, _options?: TopicOptions): Promise<void>;
1161
2417
  /**
1162
- * Get current consumer statistics.
2418
+ * Deletes a queue/topic.
1163
2419
  */
1164
- getStats(): {
1165
- processed: number;
1166
- failed: number;
1167
- retried: number;
1168
- active: number;
1169
- };
2420
+ deleteTopic(topic: string): Promise<void>;
1170
2421
  /**
1171
- * Reset statistics counters.
2422
+ * Pushes multiple jobs in batch.
1172
2423
  */
1173
- resetStats(): void;
2424
+ pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
2425
+ /**
2426
+ * Pops multiple jobs in batch.
2427
+ */
2428
+ popMany(_queue: string, _count: number): Promise<SerializedJob[]>;
2429
+ /**
2430
+ * Reports worker heartbeat.
2431
+ */
2432
+ reportHeartbeat(workerInfo: {
2433
+ id: string;
2434
+ status: string;
2435
+ hostname: string;
2436
+ pid: number;
2437
+ uptime: number;
2438
+ last_ping: string;
2439
+ queues: string[];
2440
+ metrics?: Record<string, any>;
2441
+ [key: string]: any;
2442
+ }, _prefix?: string): Promise<void>;
2443
+ /**
2444
+ * Publishes a log message.
2445
+ */
2446
+ publishLog(logPayload: {
2447
+ level: string;
2448
+ message: string;
2449
+ workerId: string;
2450
+ jobId?: string;
2451
+ timestamp: string;
2452
+ [key: string]: any;
2453
+ }, _prefix?: string): Promise<void>;
2454
+ /**
2455
+ * Checks rate limit for a queue.
2456
+ */
2457
+ checkRateLimit(_queue: string, _config: {
2458
+ max: number;
2459
+ duration: number;
2460
+ }): Promise<boolean>;
2461
+ /**
2462
+ * Retrieves all queue names.
2463
+ */
2464
+ getQueues(): Promise<string[]>;
1174
2465
  }
1175
2466
 
1176
2467
  /**
1177
2468
  * Generic database service interface.
1178
- * Users should implement this interface with their preferred ORM/database client.
2469
+ *
2470
+ * Adapts any SQL database client (e.g., pg, mysql2, sqlite3) for use with the DatabaseDriver.
2471
+ * Users must provide an implementation of this interface that wraps their specific DB library.
1179
2472
  */
1180
2473
  interface DatabaseService {
1181
2474
  /**
1182
2475
  * Execute a raw SQL query.
1183
- * @param sql - The SQL query string with placeholders ($1, $2, etc.)
1184
- * @param bindings - The values to bind to placeholders
2476
+ *
2477
+ * @param sql - The SQL query string with placeholders (e.g., $1, ?).
2478
+ * @param bindings - The values to bind to the placeholders.
2479
+ * @returns The query result (rows or metadata).
1185
2480
  */
1186
2481
  execute<T = unknown>(sql: string, bindings?: unknown[]): Promise<T[] | T>;
1187
2482
  /**
1188
- * Execute multiple queries within a transaction.
1189
- * @param callback - The callback to execute within the transaction
2483
+ * Execute multiple queries within a single transaction.
2484
+ *
2485
+ * @param callback - A function that receives a transaction-scoped service instance.
2486
+ * @returns The result of the callback.
1190
2487
  */
1191
2488
  transaction<T>(callback: (tx: DatabaseService) => Promise<T>): Promise<T>;
1192
2489
  }
1193
2490
  /**
1194
- * Database driver configuration.
2491
+ * Configuration options for the DatabaseDriver.
1195
2492
  */
1196
2493
  interface DatabaseDriverConfig {
1197
2494
  /**
1198
- * Table name (default: `jobs`).
2495
+ * The name of the table used to store jobs.
2496
+ * @default 'jobs'
1199
2497
  */
1200
2498
  table?: string;
1201
2499
  /**
1202
- * Database service instance that implements DatabaseService interface.
2500
+ * The database service adapter instance.
1203
2501
  */
1204
2502
  dbService?: DatabaseService;
1205
2503
  }
1206
2504
  /**
1207
- * Database Driver
2505
+ * Database-backed queue driver.
1208
2506
  *
1209
- * Uses a database as the queue backend.
1210
- * Works with any database service that implements the DatabaseService interface.
2507
+ * Persists jobs in a SQL database table. Supports delayed jobs, reservation (locking),
2508
+ * and reliable delivery. Compatible with PostgreSQL (SKIP LOCKED), MySQL, and SQLite.
1211
2509
  *
2510
+ * @public
1212
2511
  * @example
1213
2512
  * ```typescript
1214
- * // Create a database service adapter
1215
- * const dbService = {
1216
- * execute: async (sql, bindings) => yourDbClient.query(sql, bindings),
1217
- * transaction: async (callback) => yourDbClient.transaction(callback),
1218
- * }
1219
- *
1220
- * const driver = new DatabaseDriver({ dbService, table: 'jobs' })
1221
- * await driver.push('default', serializedJob)
2513
+ * const driver = new DatabaseDriver({
2514
+ * dbService: myDbAdapter,
2515
+ * table: 'queue_jobs'
2516
+ * });
1222
2517
  * ```
1223
2518
  */
1224
2519
  declare class DatabaseDriver implements QueueDriver {
@@ -1226,55 +2521,101 @@ declare class DatabaseDriver implements QueueDriver {
1226
2521
  private dbService;
1227
2522
  constructor(config: DatabaseDriverConfig);
1228
2523
  /**
1229
- * Push a job to a queue.
2524
+ * Pushes a job to the database queue.
2525
+ *
2526
+ * Inserts a new row into the jobs table.
2527
+ *
2528
+ * @param queue - The queue name.
2529
+ * @param job - The serialized job.
1230
2530
  */
1231
2531
  push(queue: string, job: SerializedJob): Promise<void>;
1232
2532
  /**
1233
- * Pop a job from the queue (FIFO, with delay support).
2533
+ * Pops the next available job from the queue.
2534
+ *
2535
+ * Uses transactional locking (SELECT ... FOR UPDATE SKIP LOCKED if supported) to ensure
2536
+ * atomic reservation of jobs by workers.
2537
+ *
2538
+ * @param queue - The queue name.
2539
+ * @returns The job or `null`.
1234
2540
  */
1235
2541
  pop(queue: string): Promise<SerializedJob | null>;
1236
2542
  /**
1237
- * Pop multiple jobs from the queue.
2543
+ * Pops multiple jobs from the queue in a single transaction.
2544
+ *
2545
+ * @param queue - The queue name.
2546
+ * @param count - Max jobs to pop.
1238
2547
  */
1239
2548
  popMany(queue: string, count: number): Promise<SerializedJob[]>;
1240
2549
  /**
1241
- * Get queue statistics.
2550
+ * Retrieves queue statistics by querying the table.
2551
+ *
2552
+ * @param queue - The queue name.
1242
2553
  */
1243
2554
  stats(queue: string): Promise<QueueStats>;
1244
2555
  /**
1245
- * Get queue size.
2556
+ * Returns the count of pending jobs.
2557
+ *
2558
+ * @param queue - The queue name.
1246
2559
  */
1247
2560
  size(queue: string): Promise<number>;
1248
2561
  /**
1249
- * Clear a queue.
2562
+ * Clears the queue by deleting all rows for the queue.
2563
+ *
2564
+ * @param queue - The queue name.
1250
2565
  */
1251
2566
  clear(queue: string): Promise<void>;
1252
2567
  /**
1253
- * Pop a job from the queue (blocking).
1254
- * Simple polling fallback for databases.
2568
+ * Pops a job using a polling loop (Blocking simulation).
2569
+ *
2570
+ * @param queue - The queue name.
2571
+ * @param timeout - Timeout in seconds.
1255
2572
  */
1256
2573
  popBlocking(queue: string, timeout: number): Promise<SerializedJob | null>;
1257
2574
  /**
1258
- * Push multiple jobs.
1259
- * Optimizes by using a single multi-row insert if possible.
2575
+ * Pushes multiple jobs using a transaction.
2576
+ *
2577
+ * @param queue - The queue name.
2578
+ * @param jobs - Array of jobs.
1260
2579
  */
1261
2580
  pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
1262
2581
  /**
1263
- * Mark a job as failed (DLQ).
2582
+ * Marks a job as permanently failed by moving it to the DLQ (separate logical queue in DB).
2583
+ *
2584
+ * @param queue - The queue name.
2585
+ * @param job - The failed job.
1264
2586
  */
1265
2587
  fail(queue: string, job: SerializedJob): Promise<void>;
1266
2588
  /**
1267
- * Acknowledge/Complete a job.
2589
+ * Deletes a job row from the database (completion).
2590
+ *
2591
+ * @param _queue - The queue name (unused).
2592
+ * @param job - The job to complete.
1268
2593
  */
1269
2594
  complete(_queue: string, job: SerializedJob): Promise<void>;
1270
2595
  }
1271
2596
 
2597
+ declare class GrpcDriver implements QueueDriver {
2598
+ private client;
2599
+ constructor(config: GrpcDriverConfig);
2600
+ private getCredentials;
2601
+ push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void>;
2602
+ pop(queue: string): Promise<SerializedJob | null>;
2603
+ size(queue: string): Promise<number>;
2604
+ clear(queue: string): Promise<void>;
2605
+ acknowledge(messageId: string): Promise<void>;
2606
+ stats(queue: string): Promise<QueueStats>;
2607
+ private toProtoJob;
2608
+ private fromProtoJob;
2609
+ }
2610
+
1272
2611
  /**
1273
2612
  * Kafka driver configuration.
1274
2613
  */
1275
2614
  interface KafkaDriverConfig {
1276
2615
  /**
1277
2616
  * Kafka client instance (kafkajs).
2617
+ *
2618
+ * Must provide producer, admin, and consumer factories compatible with KafkaJS.
1278
2619
  */
1279
2620
  client: {
1280
2621
  producer: () => {
@@ -1324,29 +2665,22 @@ interface KafkaDriverConfig {
1324
2665
  };
1325
2666
  };
1326
2667
  /**
1327
- * Consumer group ID (for consuming messages).
2668
+ * Consumer group ID used for reading messages.
2669
+ * @default 'gravito-workers'
1328
2670
  */
1329
2671
  consumerGroupId?: string;
1330
2672
  }
1331
2673
  /**
1332
- * Kafka Driver
2674
+ * Kafka-backed queue driver.
1333
2675
  *
1334
- * Uses Apache Kafka as the queue backend.
1335
- * Supports topic management, consumer groups, and batch operations.
1336
- *
1337
- * Requires `kafkajs`.
2676
+ * Uses Apache Kafka topics as queues. Designed for high-throughput streaming
2677
+ * rather than traditional job queue semantics (pop/delete).
2678
+ * Supports push-based consumption via `subscribe()`.
1338
2679
  *
2680
+ * @public
1339
2681
  * @example
1340
2682
  * ```typescript
1341
- * import { Kafka } from 'kafkajs'
1342
- *
1343
- * const kafka = new Kafka({
1344
- * brokers: ['localhost:9092'],
1345
- * clientId: 'gravito-app'
1346
- * })
1347
- *
1348
- * const driver = new KafkaDriver({ client: kafka, consumerGroupId: 'workers' })
1349
- * await driver.push('default', serializedJob)
2683
+ * const driver = new KafkaDriver({ client: kafka, consumerGroupId: 'my-app' });
1350
2684
  * ```
1351
2685
  */
1352
2686
  declare class KafkaDriver implements QueueDriver {
@@ -1364,63 +2698,86 @@ declare class KafkaDriver implements QueueDriver {
1364
2698
  */
1365
2699
  private ensureAdmin;
1366
2700
  /**
1367
- * Push a job to a topic.
2701
+ * Pushes a job to a Kafka topic.
2702
+ *
2703
+ * @param queue - The topic name.
2704
+ * @param job - The job to publish.
1368
2705
  */
1369
2706
  push(queue: string, job: SerializedJob): Promise<void>;
1370
2707
  /**
1371
- * Pop is not supported for Kafka.
2708
+ * Pop is not supported for Kafka (Push-based).
1372
2709
  *
1373
- * Note: Kafka uses a push-based model, so you should use `subscribe()`.
2710
+ * Kafka consumers typically stream messages. Use `subscribe()` instead.
2711
+ *
2712
+ * @throws {Error} Always throws as Kafka does not support polling individual messages in this manner.
1374
2713
  */
1375
2714
  pop(_queue: string): Promise<SerializedJob | null>;
1376
2715
  /**
1377
- * Kafka does not provide a direct queue size.
2716
+ * Returns 0 as Kafka does not expose a simple "queue size".
1378
2717
  *
1379
- * Returns 0; use Kafka tooling/metrics for lag/size insights.
2718
+ * Monitoring lag requires external tools or Admin API checks not implemented here.
1380
2719
  */
1381
2720
  size(_queue: string): Promise<number>;
1382
2721
  /**
1383
- * Clear a queue by deleting the topic.
2722
+ * Clears a queue by deleting the topic.
2723
+ *
2724
+ * @param queue - The topic name.
1384
2725
  */
1385
2726
  clear(queue: string): Promise<void>;
1386
2727
  /**
1387
- * Push multiple jobs.
2728
+ * Pushes multiple jobs to a Kafka topic.
2729
+ *
2730
+ * @param queue - The topic name.
2731
+ * @param jobs - Array of jobs.
1388
2732
  */
1389
2733
  pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
1390
2734
  /**
1391
- * Create a topic.
2735
+ * Creates a new Kafka topic.
2736
+ *
2737
+ * @param topic - The topic name.
2738
+ * @param options - Config for partitions/replication.
1392
2739
  */
1393
2740
  createTopic(topic: string, options?: TopicOptions): Promise<void>;
1394
2741
  /**
1395
- * Delete a topic.
2742
+ * Deletes a Kafka topic.
2743
+ *
2744
+ * @param topic - The topic name.
1396
2745
  */
1397
2746
  deleteTopic(topic: string): Promise<void>;
1398
2747
  /**
1399
- * Subscribe to a topic (push-based model).
2748
+ * Subscribes to a topic for streaming jobs.
2749
+ *
2750
+ * Starts a Kafka consumer group and processes messages as they arrive.
2751
+ *
2752
+ * @param queue - The topic name.
2753
+ * @param callback - Function to handle the job.
1400
2754
  */
1401
2755
  subscribe(queue: string, callback: (job: SerializedJob) => Promise<void>): Promise<void>;
1402
2756
  }
1403
2757
 
2758
+ /**
2759
+ * Configuration options for the MemoryDriver.
2760
+ */
1404
2761
  interface MemoryDriverConfig {
1405
2762
  /**
1406
- * Maximum number of jobs per queue.
2763
+ * The maximum number of jobs allowed in a single queue.
2764
+ *
1407
2765
  * @default Infinity
1408
2766
  */
1409
2767
  maxSize?: number;
1410
2768
  }
1411
2769
  /**
1412
- * Memory Driver
2770
+ * In-memory queue driver.
1413
2771
  *
1414
- * In-memory driver for development and testing.
1415
- * All data is stored in memory and will be lost when the process restarts.
1416
- *
1417
- * Zero-config: works out of the box.
2772
+ * Stores jobs in a local JavaScript Map. Ideal for development, testing, and simple
2773
+ * use cases where persistence across restarts is not required.
2774
+ * It supports basic delay handling but data is volatile.
1418
2775
  *
2776
+ * @public
1419
2777
  * @example
1420
2778
  * ```typescript
1421
- * const driver = new MemoryDriver({ maxSize: 1000 })
1422
- * await driver.push('default', serializedJob)
1423
- * const job = await driver.pop('default')
2779
+ * const driver = new MemoryDriver({ maxSize: 1000 });
2780
+ * await driver.push('default', job);
1424
2781
  * ```
1425
2782
  */
1426
2783
  declare class MemoryDriver implements QueueDriver {
@@ -1428,37 +2785,69 @@ declare class MemoryDriver implements QueueDriver {
1428
2785
  private maxSize;
1429
2786
  constructor(config?: MemoryDriverConfig);
1430
2787
  /**
1431
- * Push a job to a queue.
2788
+ * Pushes a job to the in-memory queue.
2789
+ *
2790
+ * @param queue - The queue name.
2791
+ * @param job - The serialized job.
2792
+ * @throws {Error} If the queue has reached `maxSize`.
1432
2793
  */
1433
2794
  push(queue: string, job: SerializedJob): Promise<void>;
1434
2795
  /**
1435
- * Pop a job from a queue (FIFO).
2796
+ * Pops the next available job from the queue.
2797
+ *
2798
+ * Respects `delaySeconds` by checking the job's `createdAt` timestamp.
2799
+ *
2800
+ * @param queue - The queue name.
2801
+ * @returns The job or `null`.
1436
2802
  */
1437
2803
  pop(queue: string): Promise<SerializedJob | null>;
1438
2804
  /**
1439
- * Get queue size.
2805
+ * Returns the number of jobs in the queue.
2806
+ *
2807
+ * @param queue - The queue name.
1440
2808
  */
1441
2809
  size(queue: string): Promise<number>;
1442
2810
  /**
1443
- * Clear a queue.
2811
+ * Clears all jobs from the queue.
2812
+ *
2813
+ * @param queue - The queue name.
1444
2814
  */
1445
2815
  clear(queue: string): Promise<void>;
1446
2816
  /**
1447
- * Mark a job as permanently failed.
2817
+ * Moves a job to the failed (DLQ) list.
2818
+ *
2819
+ * In MemoryDriver, this simply pushes to a `failed:{queue}` list.
2820
+ *
2821
+ * @param queue - The original queue name.
2822
+ * @param job - The failed job.
1448
2823
  */
1449
2824
  fail(queue: string, job: SerializedJob): Promise<void>;
1450
2825
  /**
1451
- * Get queue statistics.
2826
+ * Retrieves statistics for the queue.
2827
+ *
2828
+ * Calculates pending, delayed, and failed counts by iterating through the list.
2829
+ *
2830
+ * @param queue - The queue name.
1452
2831
  */
1453
2832
  stats(queue: string): Promise<QueueStats>;
1454
2833
  /**
1455
- * Push multiple jobs.
2834
+ * Pushes multiple jobs to the queue.
2835
+ *
2836
+ * @param queue - The queue name.
2837
+ * @param jobs - Array of jobs.
1456
2838
  */
1457
2839
  pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
1458
2840
  /**
1459
- * Pop multiple jobs.
2841
+ * Pops multiple jobs from the queue.
2842
+ *
2843
+ * @param queue - The queue name.
2844
+ * @param count - Max jobs to pop.
1460
2845
  */
1461
2846
  popMany(queue: string, count: number): Promise<SerializedJob[]>;
2847
+ /**
2848
+ * Lists all active queues in memory.
2849
+ */
2850
+ getQueues(): Promise<string[]>;
1462
2851
  }
1463
2852
 
1464
2853
  /**
@@ -1480,21 +2869,17 @@ interface RabbitMQDriverConfig {
1480
2869
  exchangeType?: 'direct' | 'topic' | 'headers' | 'fanout' | 'match';
1481
2870
  }
1482
2871
  /**
1483
- * RabbitMQ Driver
1484
- *
1485
- * Uses RabbitMQ as the queue backend.
1486
- * Implements FIFO via RabbitMQ Queues.
2872
+ * RabbitMQ (AMQP) queue driver.
1487
2873
  *
1488
- * Requires `amqplib`.
2874
+ * Uses RabbitMQ as the backend. Supports standard AMQP queues, exchanges,
2875
+ * and reliable message acknowledgements.
1489
2876
  *
2877
+ * @public
1490
2878
  * @example
1491
2879
  * ```typescript
1492
- * import amqp from 'amqplib'
1493
- *
1494
- * const connection = await amqp.connect('amqp://localhost')
1495
- * const driver = new RabbitMQDriver({ client: connection })
1496
- *
1497
- * await driver.push('default', serializedJob)
2880
+ * import amqp from 'amqplib';
2881
+ * const conn = await amqp.connect('amqp://localhost');
2882
+ * const driver = new RabbitMQDriver({ client: conn });
1498
2883
  * ```
1499
2884
  */
1500
2885
  declare class RabbitMQDriver implements QueueDriver {
@@ -1512,20 +2897,29 @@ declare class RabbitMQDriver implements QueueDriver {
1512
2897
  */
1513
2898
  getRawConnection(): any;
1514
2899
  /**
1515
- * Push a job (sendToQueue / publish).
2900
+ * Pushes a job to a RabbitMQ queue or exchange.
2901
+ *
2902
+ * @param queue - The queue name.
2903
+ * @param job - The serialized job.
1516
2904
  */
1517
2905
  push(queue: string, job: SerializedJob): Promise<void>;
1518
2906
  /**
1519
- * Pop a job (get).
2907
+ * Pops a job from the queue.
2908
+ *
2909
+ * @param queue - The queue name.
1520
2910
  */
1521
2911
  pop(queue: string): Promise<SerializedJob | null>;
1522
2912
  /**
1523
- * Pop multiple jobs.
1524
- * Uses channel.get() in a loop (no native batch get in AMQP).
2913
+ * Pops multiple jobs.
2914
+ *
2915
+ * @param queue - The queue name.
2916
+ * @param count - Max jobs.
1525
2917
  */
1526
2918
  popMany(queue: string, count: number): Promise<SerializedJob[]>;
1527
2919
  /**
1528
- * Acknowledge a message.
2920
+ * Acknowledges a message.
2921
+ *
2922
+ * @param messageId - The message object (RabbitMQ requires object reference).
1529
2923
  */
1530
2924
  acknowledge(messageId: string): Promise<void>;
1531
2925
  /**
@@ -1537,18 +2931,22 @@ declare class RabbitMQDriver implements QueueDriver {
1537
2931
  */
1538
2932
  reject(message: any, requeue?: boolean): Promise<void>;
1539
2933
  /**
1540
- * Subscribe to a queue.
2934
+ * Subscribes to a queue.
1541
2935
  */
1542
2936
  subscribe(queue: string, callback: (job: SerializedJob) => Promise<void>, options?: {
1543
2937
  autoAck?: boolean;
1544
2938
  prefetch?: number;
1545
2939
  }): Promise<void>;
1546
2940
  /**
1547
- * Get queue size.
2941
+ * Returns the number of messages in the queue.
2942
+ *
2943
+ * @param queue - The queue name.
1548
2944
  */
1549
2945
  size(queue: string): Promise<number>;
1550
2946
  /**
1551
- * Clear a queue.
2947
+ * Purges the queue.
2948
+ *
2949
+ * @param queue - The queue name.
1552
2950
  */
1553
2951
  clear(queue: string): Promise<void>;
1554
2952
  }
@@ -1581,6 +2979,18 @@ interface RedisClient {
1581
2979
  eval(script: string, numKeys: number, ...args: (string | number)[]): Promise<any>;
1582
2980
  [key: string]: any;
1583
2981
  }
2982
+ /**
2983
+ * Extended Redis client with custom commands.
2984
+ */
2985
+ interface CustomRedisClient extends RedisClient {
2986
+ pushGroupJob(waitList: string, activeSet: string, pendingList: string, groupId: string, payload: string): Promise<number>;
2987
+ completeGroupJob(waitList: string, activeSet: string, pendingList: string, groupId: string): Promise<number>;
2988
+ popMany(queue: string, prefix: string, count: number, now: string): Promise<string[]>;
2989
+ }
2990
+ /**
2991
+ * Extended Redis client with custom group commands (Legacy name).
2992
+ */
2993
+ type GroupRedisClient = CustomRedisClient;
1584
2994
  /**
1585
2995
  * Redis driver configuration.
1586
2996
  */
@@ -1595,22 +3005,18 @@ interface RedisDriverConfig {
1595
3005
  prefix?: string;
1596
3006
  }
1597
3007
  /**
1598
- * Redis Driver
3008
+ * High-performance Redis queue driver.
1599
3009
  *
1600
- * Uses Redis as the queue backend.
1601
- * Implements FIFO via Redis Lists (LPUSH/RPOP).
1602
- *
1603
- * Requires `ioredis` or `redis`.
3010
+ * Implements FIFO queues using Redis Lists, reliable priority support, delayed jobs via Sorted Sets,
3011
+ * and rate limiting. Uses Lua scripts for atomic operations and advanced features like
3012
+ * group-based sequential processing.
1604
3013
  *
3014
+ * @public
1605
3015
  * @example
1606
3016
  * ```typescript
1607
- * import Redis from 'ioredis'
1608
- *
1609
- * const redis = new Redis('redis://localhost:6379')
1610
- * const redis = new Redis('ioredis://localhost:6379')
1611
- * const driver = new RedisDriver({ client: redis })
1612
- *
1613
- * await driver.push('default', serializedJob)
3017
+ * import Redis from 'ioredis';
3018
+ * const redis = new Redis();
3019
+ * const driver = new RedisDriver({ client: redis });
1614
3020
  * ```
1615
3021
  */
1616
3022
  declare class RedisDriver implements QueueDriver {
@@ -1625,16 +3031,32 @@ declare class RedisDriver implements QueueDriver {
1625
3031
  */
1626
3032
  private getKey;
1627
3033
  /**
1628
- * Push a job (LPUSH).
3034
+ * Pushes a job to Redis.
3035
+ *
3036
+ * Handles regular jobs (LPUSH), delayed jobs (ZADD), and grouped jobs (custom Lua logic).
3037
+ *
3038
+ * @param queue - The queue name.
3039
+ * @param job - The serialized job.
3040
+ * @param options - Push options.
1629
3041
  */
1630
3042
  push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void>;
1631
3043
  /**
1632
- * Complete a job (handle Group FIFO).
3044
+ * Completes a job.
3045
+ *
3046
+ * Crucial for Group FIFO logic to unlock the next job in the group.
3047
+ *
3048
+ * @param queue - The queue name.
3049
+ * @param job - The job to complete.
1633
3050
  */
1634
3051
  complete(queue: string, job: SerializedJob): Promise<void>;
1635
3052
  /**
1636
- * Pop a job from a queue (non-blocking).
1637
- * Optimized with Lua script for atomic priority polling.
3053
+ * Pops a job from the queue.
3054
+ *
3055
+ * Checks priorities in order (critical -> high -> default -> low).
3056
+ * Also checks for due delayed jobs and moves them to the active list.
3057
+ *
3058
+ * @param queue - The queue name.
3059
+ * @returns The job or `null`.
1638
3060
  */
1639
3061
  pop(queue: string): Promise<SerializedJob | null>;
1640
3062
  /**
@@ -1642,8 +3064,12 @@ declare class RedisDriver implements QueueDriver {
1642
3064
  */
1643
3065
  private popManualFallback;
1644
3066
  /**
1645
- * Pop a job from the queue (blocking).
1646
- * Uses BRPOP for efficiency. Supports multiple queues and priorities.
3067
+ * Pops a job using blocking Redis commands (BRPOP).
3068
+ *
3069
+ * Efficiently waits for a job to arrive without polling.
3070
+ *
3071
+ * @param queues - The queues to listen to.
3072
+ * @param timeout - Timeout in seconds.
1647
3073
  */
1648
3074
  popBlocking(queues: string | string[], timeout: number): Promise<SerializedJob | null>;
1649
3075
  /**
@@ -1651,60 +3077,101 @@ declare class RedisDriver implements QueueDriver {
1651
3077
  */
1652
3078
  private parsePayload;
1653
3079
  /**
1654
- * Get queue size.
3080
+ * Returns the length of the queue (Redis List length).
3081
+ *
3082
+ * @param queue - The queue name.
1655
3083
  */
1656
3084
  size(queue: string): Promise<number>;
1657
3085
  /**
1658
- * Mark a job as permanently failed (DLQ).
3086
+ * Marks a job as permanently failed by moving it to a DLQ list.
3087
+ *
3088
+ * @param queue - The queue name.
3089
+ * @param job - The failed job.
1659
3090
  */
1660
3091
  fail(queue: string, job: SerializedJob): Promise<void>;
1661
3092
  /**
1662
- * Clear a queue.
3093
+ * Clears the queue and its associated delayed/active sets.
3094
+ *
3095
+ * @param queue - The queue name.
1663
3096
  */
1664
3097
  clear(queue: string): Promise<void>;
1665
3098
  /**
1666
- * Get queue statistics.
1667
- * Optimized with Redis Pipeline to fetch all priorities and DLQ stats in one trip.
3099
+ * Retrieves full stats for the queue using Redis Pipelining.
3100
+ *
3101
+ * Aggregates counts from all priority lists and the DLQ.
3102
+ *
3103
+ * @param queue - The queue name.
1668
3104
  */
1669
3105
  stats(queue: string): Promise<QueueStats>;
1670
3106
  /**
1671
- * Push multiple jobs.
3107
+ * Pushes multiple jobs to the queue.
3108
+ *
3109
+ * Uses pipeline for batch efficiency. Falls back to individual pushes if complex logic (groups/priority) is involved.
3110
+ *
3111
+ * @param queue - The queue name.
3112
+ * @param jobs - Array of jobs.
1672
3113
  */
1673
3114
  pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
1674
3115
  /**
1675
- * Pop multiple jobs.
1676
- * Atomic operation across multiple priority levels.
3116
+ * Pops multiple jobs from the queue.
3117
+ *
3118
+ * Uses a Lua script for atomic retrieval across priorities.
3119
+ *
3120
+ * @param queue - The queue name.
3121
+ * @param count - Max jobs to pop.
1677
3122
  */
1678
3123
  popMany(queue: string, count: number): Promise<SerializedJob[]>;
1679
3124
  /**
1680
- * Report worker heartbeat for monitoring.
3125
+ * Reports a worker heartbeat.
3126
+ *
3127
+ * Stores worker metadata in a key with an expiration (TTL).
1681
3128
  */
1682
3129
  reportHeartbeat(workerInfo: any, prefix?: string): Promise<void>;
1683
3130
  /**
1684
- * Publish a log message for monitoring.
3131
+ * Publishes monitoring logs.
3132
+ *
3133
+ * Uses Redis Pub/Sub for real-time logs and a capped List for history.
1685
3134
  */
1686
3135
  publishLog(logPayload: any, prefix?: string): Promise<void>;
1687
3136
  /**
1688
- * Check if a queue is rate limited.
1689
- * Uses a fixed window counter.
3137
+ * Checks the rate limit for a queue.
3138
+ *
3139
+ * Uses a simple Fixed Window counter (INCR + EXPIRE).
3140
+ *
3141
+ * @param queue - The queue name.
3142
+ * @param config - Rate limit rules.
1690
3143
  */
1691
3144
  checkRateLimit(queue: string, config: {
1692
3145
  max: number;
1693
3146
  duration: number;
1694
3147
  }): Promise<boolean>;
1695
3148
  /**
1696
- * Get failed jobs from DLQ.
3149
+ * Retrieves failed jobs from the DLQ.
3150
+ *
3151
+ * @param queue - The queue name.
3152
+ * @param start - Start index.
3153
+ * @param end - End index.
1697
3154
  */
1698
3155
  getFailed(queue: string, start?: number, end?: number): Promise<SerializedJob[]>;
1699
3156
  /**
1700
- * Retry failed jobs from DLQ.
1701
- * Moves jobs from failed list back to the main queue.
3157
+ * Retries failed jobs.
3158
+ *
3159
+ * Pops from DLQ and pushes back to the active queue (RPOPLPUSH equivalent logic).
3160
+ *
3161
+ * @param queue - The queue name.
3162
+ * @param count - Jobs to retry.
1702
3163
  */
1703
3164
  retryFailed(queue: string, count?: number): Promise<number>;
1704
3165
  /**
1705
- * Clear failed jobs from DLQ.
3166
+ * Clears the Dead Letter Queue.
3167
+ *
3168
+ * @param queue - The queue name.
1706
3169
  */
1707
3170
  clearFailed(queue: string): Promise<void>;
3171
+ /**
3172
+ * Retrieves all discovered queue names from Redis.
3173
+ */
3174
+ getQueues(): Promise<string[]>;
1708
3175
  }
1709
3176
 
1710
3177
  /**
@@ -1738,27 +3205,17 @@ interface SQSDriverConfig {
1738
3205
  waitTimeSeconds?: number;
1739
3206
  }
1740
3207
  /**
1741
- * SQS Driver
1742
- *
1743
- * Uses AWS SQS as the queue backend.
1744
- * Supports standard/FIFO queues, long polling, DLQ setups, etc.
3208
+ * Amazon SQS queue driver.
1745
3209
  *
1746
- * Requires `@aws-sdk/client-sqs`.
3210
+ * Wraps the AWS SDK for SQS. Supports standard and FIFO queues, long polling,
3211
+ * and visibility timeouts.
1747
3212
  *
3213
+ * @public
1748
3214
  * @example
1749
3215
  * ```typescript
1750
- * import { SQSClient } from '@aws-sdk/client-sqs'
1751
- *
1752
- * const sqs = new SQSClient({
1753
- * region: 'us-east-1',
1754
- * credentials: {
1755
- * accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
1756
- * secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
1757
- * }
1758
- * })
1759
- *
1760
- * const driver = new SQSDriver({ client: sqs })
1761
- * await driver.push('default', serializedJob)
3216
+ * import { SQSClient } from '@aws-sdk/client-sqs';
3217
+ * const sqs = new SQSClient({ region: 'us-east-1' });
3218
+ * const driver = new SQSDriver({ client: sqs });
1762
3219
  * ```
1763
3220
  */
1764
3221
  declare class SQSDriver implements QueueDriver {
@@ -1773,98 +3230,371 @@ declare class SQSDriver implements QueueDriver {
1773
3230
  */
1774
3231
  private getQueueUrl;
1775
3232
  /**
1776
- * Push a job to SQS.
3233
+ * Pushes a job to SQS.
3234
+ *
3235
+ * @param queue - The queue name (or URL).
3236
+ * @param job - The serialized job.
1777
3237
  */
1778
3238
  push(queue: string, job: SerializedJob): Promise<void>;
1779
3239
  /**
1780
- * Pop a job (long polling).
3240
+ * Pops a job from SQS (using long polling).
3241
+ *
3242
+ * @param queue - The queue name (or URL).
1781
3243
  */
1782
3244
  pop(queue: string): Promise<SerializedJob | null>;
1783
3245
  /**
1784
- * Pop multiple jobs.
1785
- * Leverages SQS MaxNumberOfMessages (up to 10).
3246
+ * Pops multiple jobs (up to 10).
3247
+ *
3248
+ * @param queue - The queue name.
3249
+ * @param count - Max jobs (capped at 10 by SQS).
1786
3250
  */
1787
3251
  popMany(queue: string, count: number): Promise<SerializedJob[]>;
1788
3252
  /**
1789
- * Get queue size (approximate).
3253
+ * Returns the approximate number of messages in the queue.
3254
+ *
3255
+ * @param queue - The queue name.
1790
3256
  */
1791
3257
  size(queue: string): Promise<number>;
1792
3258
  /**
1793
- * Clear a queue by receiving and deleting messages.
3259
+ * Clears the queue by continuously receiving and deleting messages.
3260
+ *
3261
+ * SQS does not have a "purge" command in the client data plane easily accessible here,
3262
+ * so we drain the queue.
1794
3263
  *
1795
- * Note: SQS does not provide a direct "purge" API via this wrapper. This method will
1796
- * keep receiving and deleting messages until the queue is empty.
3264
+ * @param queue - The queue name.
1797
3265
  */
1798
3266
  clear(queue: string): Promise<void>;
1799
3267
  /**
1800
- * Push multiple jobs.
3268
+ * Pushes multiple jobs using SQS batch API.
3269
+ *
3270
+ * @param queue - The queue name.
3271
+ * @param jobs - Array of jobs.
1801
3272
  */
1802
3273
  pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
1803
3274
  /**
1804
- * Acknowledge is not supported via messageId.
3275
+ * Throws error as SQS requires ReceiptHandle, not just MessageId.
1805
3276
  */
1806
3277
  acknowledge(_messageId: string): Promise<void>;
1807
3278
  /**
1808
- * Delete a message (acknowledge processing completion).
3279
+ * Deletes a message using its ReceiptHandle (ACK).
3280
+ *
3281
+ * @param queue - The queue name.
3282
+ * @param receiptHandle - The SQS receipt handle.
1809
3283
  */
1810
3284
  deleteMessage(queue: string, receiptHandle: string): Promise<void>;
1811
3285
  }
1812
3286
 
1813
3287
  /**
1814
- * Options for configuring OrbitStream (Queue Orbit).
3288
+ * Configuration options for distributed locks.
3289
+ *
3290
+ * Defines the time-to-live (TTL), retry strategy, and automatic renewal behavior for a lock.
3291
+ *
3292
+ * @public
3293
+ * @since 3.1.0
3294
+ * @example
3295
+ * ```typescript
3296
+ * const options: LockOptions = {
3297
+ * ttl: 60000, // Lock held for 60 seconds
3298
+ * retryCount: 3, // Retry 3 times on failure
3299
+ * retryDelay: 100, // Wait 100ms between retries
3300
+ * refreshInterval: 20000 // Auto-renew every 20 seconds
3301
+ * };
3302
+ * ```
3303
+ */
3304
+ interface LockOptions {
3305
+ /**
3306
+ * Time-to-live for the lock in milliseconds.
3307
+ *
3308
+ * The lock will automatically expire if the holder does not release or renew it
3309
+ * before this duration elapses.
3310
+ */
3311
+ ttl: number;
3312
+ /**
3313
+ * Number of retry attempts if lock acquisition fails.
3314
+ *
3315
+ * Set to 0 to disable retries.
3316
+ */
3317
+ retryCount: number;
3318
+ /**
3319
+ * Delay between retry attempts in milliseconds.
3320
+ */
3321
+ retryDelay: number;
3322
+ /**
3323
+ * Interval for automatic lock renewal in milliseconds.
3324
+ *
3325
+ * If set, the lock will automatically extend its TTL every `refreshInterval`.
3326
+ * Recommended value is 1/3 of the `ttl`.
3327
+ *
3328
+ * @optional
3329
+ */
3330
+ refreshInterval?: number;
3331
+ }
3332
+ /**
3333
+ * Distributed lock implementation based on Redis (Redlock style).
3334
+ *
3335
+ * Provides mutual exclusion in a distributed environment, ensuring only one node
3336
+ * holds a specific lock at a time. Supports automatic renewal, retry mechanisms,
3337
+ * and safe release (only the holder can release).
3338
+ *
3339
+ * @public
3340
+ * @since 3.1.0
3341
+ * @example
3342
+ * ```typescript
3343
+ * const lock = new DistributedLock(redisClient);
3344
+ *
3345
+ * const acquired = await lock.acquire('my-resource', {
3346
+ * ttl: 60000,
3347
+ * retryCount: 3,
3348
+ * retryDelay: 100,
3349
+ * refreshInterval: 20000
3350
+ * });
3351
+ *
3352
+ * if (acquired) {
3353
+ * try {
3354
+ * // Perform exclusive operation
3355
+ * } finally {
3356
+ * await lock.release('my-resource');
3357
+ * }
3358
+ * }
3359
+ * ```
3360
+ */
3361
+ declare class DistributedLock {
3362
+ private client;
3363
+ /**
3364
+ * Unique identifier for this lock instance.
3365
+ * Used to ensure only the owner can release the lock.
3366
+ */
3367
+ private lockId;
3368
+ /**
3369
+ * Timer for automatic renewal.
3370
+ */
3371
+ private refreshTimer;
3372
+ /**
3373
+ * The key of the currently held lock.
3374
+ */
3375
+ private currentLockKey;
3376
+ /**
3377
+ * Creates a DistributedLock instance.
3378
+ *
3379
+ * @param client - Redis client instance. Must support SET, DEL, and EVAL commands.
3380
+ */
3381
+ constructor(client: GroupRedisClient);
3382
+ /**
3383
+ * Attempts to acquire a distributed lock for the specified key.
3384
+ *
3385
+ * Uses Redis `SET key value EX ttl NX` for atomic acquisition.
3386
+ * If the lock is held by another node, it retries according to `retryCount`.
3387
+ * Upon success, if `refreshInterval` is set, automatic renewal starts.
3388
+ *
3389
+ * @param key - The lock key. Use a meaningful resource identifier.
3390
+ * @param options - Configuration options for the lock.
3391
+ * @returns `true` if the lock was acquired, `false` otherwise.
3392
+ *
3393
+ * @throws {Error} If the Redis client does not support the SET command.
3394
+ *
3395
+ * @example
3396
+ * ```typescript
3397
+ * const acquired = await lock.acquire('schedule:job-123', {
3398
+ * ttl: 30000,
3399
+ * retryCount: 5,
3400
+ * retryDelay: 200
3401
+ * });
3402
+ *
3403
+ * if (!acquired) {
3404
+ * console.log('Resource is currently locked by another node');
3405
+ * }
3406
+ * ```
3407
+ */
3408
+ acquire(key: string, options: LockOptions): Promise<boolean>;
3409
+ /**
3410
+ * Releases the lock for the specified key.
3411
+ *
3412
+ * Uses a Lua script to ensure atomicity: the lock is deleted ONLY if the value matches
3413
+ * this instance's `lockId`. This prevents deleting locks held by others.
3414
+ * Stops the auto-renewal timer upon success.
3415
+ *
3416
+ * @param key - The lock key to release.
3417
+ *
3418
+ * @throws {Error} If the Redis client does not support the EVAL command.
3419
+ *
3420
+ * @example
3421
+ * ```typescript
3422
+ * await lock.release('schedule:job-123');
3423
+ * ```
3424
+ */
3425
+ release(key: string): Promise<void>;
3426
+ /**
3427
+ * Starts the automatic renewal mechanism.
3428
+ *
3429
+ * Periodically extends the lock's TTL to prevent expiration during long-running tasks.
3430
+ * Uses a Lua script to ensure only owned locks are renewed.
3431
+ *
3432
+ * @param key - The lock key.
3433
+ * @param options - Lock options containing `refreshInterval`.
3434
+ */
3435
+ private startRefresh;
3436
+ /**
3437
+ * Stops the automatic renewal timer.
3438
+ */
3439
+ private stopRefresh;
3440
+ /**
3441
+ * Helper for delay.
3442
+ *
3443
+ * @param ms - Milliseconds to sleep.
3444
+ */
3445
+ private sleep;
3446
+ /**
3447
+ * Checks if the specified lock is currently held by this instance.
3448
+ *
3449
+ * @param key - The lock key.
3450
+ * @returns `true` if held, `false` otherwise.
3451
+ *
3452
+ * @example
3453
+ * ```typescript
3454
+ * if (lock.isHeld('schedule:job-123')) {
3455
+ * console.log('Lock is active');
3456
+ * }
3457
+ * ```
3458
+ */
3459
+ isHeld(key: string): boolean;
3460
+ }
3461
+
3462
+ /**
3463
+ * Configuration options for the OrbitStream extension.
3464
+ *
3465
+ * Extends the standard `QueueConfig` with specific options for integration into
3466
+ * the Gravito lifecycle, such as auto-starting workers in development environments.
3467
+ *
1815
3468
  * @public
3469
+ * @example
3470
+ * ```typescript
3471
+ * const options: OrbitStreamOptions = {
3472
+ * default: 'redis',
3473
+ * autoStartWorker: true
3474
+ * };
3475
+ * ```
1816
3476
  */
1817
3477
  interface OrbitStreamOptions extends QueueConfig {
1818
3478
  /**
1819
- * Whether to automatically start an embedded worker in development mode.
1820
- * Useful for simple local testing without running a separate worker process.
3479
+ * Automatically start an embedded worker process.
3480
+ *
3481
+ * If set to `true`, a background worker will be spawned within the main process.
3482
+ * This is recommended for development or simple deployments but should be
3483
+ * avoided in high-scale production to keep web servers stateless.
1821
3484
  */
1822
3485
  autoStartWorker?: boolean;
1823
3486
  /**
1824
- * Configuration for the embedded worker/consumer.
3487
+ * Configuration options for the embedded worker.
3488
+ *
3489
+ * Only used if `autoStartWorker` is true. Defines concurrency, polling intervals, etc.
1825
3490
  */
1826
3491
  workerOptions?: ConsumerOptions;
3492
+ /**
3493
+ * Configuration for the Stream Monitoring Dashboard API.
3494
+ *
3495
+ * If enabled, registers API routes for monitoring queue health and job history.
3496
+ *
3497
+ * @default false
3498
+ */
3499
+ dashboard?: boolean | {
3500
+ /**
3501
+ * Base path for the dashboard API routes.
3502
+ * @default '/_flux'
3503
+ */
3504
+ path?: string;
3505
+ };
1827
3506
  }
1828
3507
  /**
1829
- * OrbitStream provides a powerful, multi-driver queue system for Gravito.
1830
- * It integrates with various backends (Redis, Database, SQS, RabbitMQ)
1831
- * and supports job serialization, delayed jobs, and FIFO processing.
3508
+ * The Queue Orbit (Plugin) for Gravito Framework.
3509
+ *
3510
+ * This class acts as the integration layer between the `@gravito/stream` package
3511
+ * and the Gravito core system (`PlanetCore`). It registers the `QueueManager`
3512
+ * service, injects it into the request context, and manages the lifecycle of
3513
+ * embedded workers.
1832
3514
  *
3515
+ * @public
1833
3516
  * @example
1834
3517
  * ```typescript
1835
- * const stream = new OrbitStream({
3518
+ * const stream = OrbitStream.configure({
1836
3519
  * default: 'redis',
1837
3520
  * connections: {
1838
- * redis: { driver: 'redis', host: 'localhost' }
3521
+ * redis: { driver: 'redis', client: redis }
1839
3522
  * }
1840
3523
  * });
1841
- * core.addOrbit(stream);
3524
+ *
3525
+ * await PlanetCore.boot({ orbits: [stream] });
1842
3526
  * ```
1843
- * @public
1844
3527
  */
1845
3528
  declare class OrbitStream implements GravitoOrbit {
1846
3529
  private options;
1847
3530
  private queueManager?;
1848
3531
  private consumer?;
3532
+ private core?;
1849
3533
  constructor(options?: OrbitStreamOptions);
1850
3534
  /**
1851
- * Static configuration helper.
3535
+ * Factory method for creating and configuring an OrbitStream instance.
3536
+ *
3537
+ * Provides a fluent way to instantiate the orbit during application bootstrap.
3538
+ *
3539
+ * @param options - Configuration options.
3540
+ * @returns A new OrbitStream instance.
3541
+ *
3542
+ * @example
3543
+ * ```typescript
3544
+ * const orbit = OrbitStream.configure({ default: 'memory' });
3545
+ * ```
1852
3546
  */
1853
3547
  static configure(options: OrbitStreamOptions): OrbitStream;
1854
3548
  /**
1855
- * Install into PlanetCore.
3549
+ * Installs the Queue system into the Gravito PlanetCore.
3550
+ *
3551
+ * This lifecycle method:
3552
+ * 1. Initializes the `QueueManager`.
3553
+ * 2. Registers the `queue` service in the dependency injection container.
3554
+ * 3. Sets up a global middleware to inject `QueueManager` into the request context (`c.get('queue')`).
3555
+ * 4. Automatically detects and registers database connections if available in the context.
3556
+ * 5. Starts the embedded worker if configured.
3557
+ *
3558
+ * @param core - The PlanetCore instance.
1856
3559
  */
1857
3560
  install(core: PlanetCore): void;
1858
3561
  /**
1859
- * Start embedded worker.
3562
+ * Starts the embedded worker process.
3563
+ *
3564
+ * Launches a `Consumer` instance to process jobs in the background.
3565
+ * Throws an error if `QueueManager` is not initialized or if a worker is already running.
3566
+ *
3567
+ * @param options - Consumer configuration options.
3568
+ * @throws {Error} If QueueManager is missing or worker is already active.
3569
+ *
3570
+ * @example
3571
+ * ```typescript
3572
+ * orbit.startWorker({ queues: ['default'] });
3573
+ * ```
1860
3574
  */
1861
3575
  startWorker(options: ConsumerOptions): void;
1862
3576
  /**
1863
- * Stop embedded worker.
3577
+ * Stops the embedded worker process.
3578
+ *
3579
+ * Gracefully shuts down the consumer, waiting for active jobs to complete.
3580
+ *
3581
+ * @returns A promise that resolves when the worker has stopped.
3582
+ *
3583
+ * @example
3584
+ * ```typescript
3585
+ * await orbit.stopWorker();
3586
+ * ```
1864
3587
  */
1865
3588
  stopWorker(): Promise<void>;
1866
3589
  /**
1867
- * Get QueueManager instance.
3590
+ * Retrieves the underlying QueueManager instance.
3591
+ *
3592
+ * @returns The active QueueManager, or undefined if not installed.
3593
+ *
3594
+ * @example
3595
+ * ```typescript
3596
+ * const manager = orbit.getQueueManager();
3597
+ * ```
1868
3598
  */
1869
3599
  getQueueManager(): QueueManager | undefined;
1870
3600
  }
@@ -1879,7 +3609,20 @@ declare module '@gravito/core' {
1879
3609
 
1880
3610
  /**
1881
3611
  * Buffered Persistence Wrapper.
1882
- * Wraps any PersistenceAdapter to add buffering and batch writing capabilities.
3612
+ *
3613
+ * Decorates any `PersistenceAdapter` to add write buffering. Instead of writing
3614
+ * to the database immediately for every event, it collects jobs and logs in memory
3615
+ * and flushes them in batches. This significantly reduces database I/O for high-throughput queues.
3616
+ *
3617
+ * @public
3618
+ * @example
3619
+ * ```typescript
3620
+ * const mysqlAdapter = new MySQLPersistence(db);
3621
+ * const bufferedAdapter = new BufferedPersistence(mysqlAdapter, {
3622
+ * maxBufferSize: 100,
3623
+ * flushInterval: 500
3624
+ * });
3625
+ * ```
1883
3626
  */
1884
3627
  declare class BufferedPersistence implements PersistenceAdapter {
1885
3628
  private adapter;
@@ -1892,8 +3635,21 @@ declare class BufferedPersistence implements PersistenceAdapter {
1892
3635
  maxBufferSize?: number;
1893
3636
  flushInterval?: number;
1894
3637
  });
3638
+ /**
3639
+ * Buffers a job archive request.
3640
+ *
3641
+ * @param queue - The queue name.
3642
+ * @param job - The serialized job.
3643
+ * @param status - The final job status.
3644
+ */
1895
3645
  archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
3646
+ /**
3647
+ * Delegates find to the underlying adapter (no buffering for reads).
3648
+ */
1896
3649
  find(queue: string, id: string): Promise<SerializedJob | null>;
3650
+ /**
3651
+ * Delegates list to the underlying adapter (no buffering for reads).
3652
+ */
1897
3653
  list(queue: string, options?: {
1898
3654
  limit?: number;
1899
3655
  offset?: number;
@@ -1902,19 +3658,38 @@ declare class BufferedPersistence implements PersistenceAdapter {
1902
3658
  startTime?: Date;
1903
3659
  endTime?: Date;
1904
3660
  }): Promise<SerializedJob[]>;
3661
+ /**
3662
+ * Archives multiple jobs directly (bypassing buffer, or flushing first).
3663
+ *
3664
+ * Actually, for consistency, this might just pass through.
3665
+ */
1905
3666
  archiveMany(jobs: Array<{
1906
3667
  queue: string;
1907
3668
  job: SerializedJob;
1908
3669
  status: 'completed' | 'failed' | 'waiting' | string;
1909
3670
  }>): Promise<void>;
3671
+ /**
3672
+ * Delegates cleanup to the underlying adapter.
3673
+ */
1910
3674
  cleanup(days: number): Promise<number>;
3675
+ /**
3676
+ * Flushes all buffered data to the underlying adapter.
3677
+ *
3678
+ * Uses `archiveMany` and `archiveLogMany` if supported by the adapter for batch efficiency.
3679
+ */
1911
3680
  flush(): Promise<void>;
3681
+ /**
3682
+ * Delegates count to the underlying adapter.
3683
+ */
1912
3684
  count(queue: string, options?: {
1913
3685
  status?: 'completed' | 'failed' | 'waiting' | string;
1914
3686
  jobId?: string;
1915
3687
  startTime?: Date;
1916
3688
  endTime?: Date;
1917
3689
  }): Promise<number>;
3690
+ /**
3691
+ * Buffers a log message.
3692
+ */
1918
3693
  archiveLog(log: {
1919
3694
  level: string;
1920
3695
  message: string;
@@ -1922,6 +3697,9 @@ declare class BufferedPersistence implements PersistenceAdapter {
1922
3697
  queue?: string;
1923
3698
  timestamp: Date;
1924
3699
  }): Promise<void>;
3700
+ /**
3701
+ * Archives multiple logs directly.
3702
+ */
1925
3703
  archiveLogMany(logs: Array<{
1926
3704
  level: string;
1927
3705
  message: string;
@@ -1929,6 +3707,9 @@ declare class BufferedPersistence implements PersistenceAdapter {
1929
3707
  queue?: string;
1930
3708
  timestamp: Date;
1931
3709
  }>): Promise<void>;
3710
+ /**
3711
+ * Delegates listLogs to the underlying adapter.
3712
+ */
1932
3713
  listLogs(options?: {
1933
3714
  limit?: number;
1934
3715
  offset?: number;
@@ -1939,6 +3720,9 @@ declare class BufferedPersistence implements PersistenceAdapter {
1939
3720
  startTime?: Date;
1940
3721
  endTime?: Date;
1941
3722
  }): Promise<any[]>;
3723
+ /**
3724
+ * Delegates countLogs to the underlying adapter.
3725
+ */
1942
3726
  countLogs(options?: {
1943
3727
  level?: string;
1944
3728
  workerId?: string;
@@ -1947,12 +3731,23 @@ declare class BufferedPersistence implements PersistenceAdapter {
1947
3731
  startTime?: Date;
1948
3732
  endTime?: Date;
1949
3733
  }): Promise<number>;
3734
+ /**
3735
+ * Ensures the auto-flush timer is running.
3736
+ */
1950
3737
  private ensureFlushTimer;
1951
3738
  }
1952
3739
 
1953
3740
  /**
1954
3741
  * MySQL Persistence Adapter.
1955
- * Archives jobs into a MySQL table for long-term auditing.
3742
+ *
3743
+ * Implements the `PersistenceAdapter` interface for MySQL databases.
3744
+ * Stores job history and logs in relational tables for long-term retention and auditing.
3745
+ *
3746
+ * @public
3747
+ * @example
3748
+ * ```typescript
3749
+ * const persistence = new MySQLPersistence(dbConnection);
3750
+ * ```
1956
3751
  */
1957
3752
  declare class MySQLPersistence implements PersistenceAdapter {
1958
3753
  private db;
@@ -1968,13 +3763,25 @@ declare class MySQLPersistence implements PersistenceAdapter {
1968
3763
  maxBufferSize?: number;
1969
3764
  flushInterval?: number;
1970
3765
  });
3766
+ /**
3767
+ * Archives a single job.
3768
+ */
1971
3769
  archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
3770
+ /**
3771
+ * Archives multiple jobs in a batch.
3772
+ */
1972
3773
  archiveMany(jobs: Array<{
1973
3774
  queue: string;
1974
3775
  job: SerializedJob;
1975
3776
  status: 'completed' | 'failed' | 'waiting' | string;
1976
3777
  }>): Promise<void>;
3778
+ /**
3779
+ * No-op. Use BufferedPersistence if flushing is needed.
3780
+ */
1977
3781
  flush(): Promise<void>;
3782
+ /**
3783
+ * Finds an archived job by ID.
3784
+ */
1978
3785
  find(queue: string, id: string): Promise<SerializedJob | null>;
1979
3786
  /**
1980
3787
  * List jobs from the archive.
@@ -1989,6 +3796,9 @@ declare class MySQLPersistence implements PersistenceAdapter {
1989
3796
  }): Promise<SerializedJob[]>;
1990
3797
  /**
1991
3798
  * Search jobs from the archive.
3799
+ *
3800
+ * @param query - Search string (matches ID, payload, or error).
3801
+ * @param options - Filter options.
1992
3802
  */
1993
3803
  search(query: string, options?: {
1994
3804
  limit?: number;
@@ -1996,7 +3806,7 @@ declare class MySQLPersistence implements PersistenceAdapter {
1996
3806
  queue?: string;
1997
3807
  }): Promise<SerializedJob[]>;
1998
3808
  /**
1999
- * Archive a system log message (buffered).
3809
+ * Archive a system log message.
2000
3810
  */
2001
3811
  archiveLog(log: {
2002
3812
  level: string;
@@ -2006,7 +3816,7 @@ declare class MySQLPersistence implements PersistenceAdapter {
2006
3816
  timestamp: Date;
2007
3817
  }): Promise<void>;
2008
3818
  /**
2009
- * Archive multiple log messages (direct batch write).
3819
+ * Archive multiple log messages.
2010
3820
  */
2011
3821
  archiveLogMany(logs: Array<{
2012
3822
  level: string;
@@ -2053,7 +3863,7 @@ declare class MySQLPersistence implements PersistenceAdapter {
2053
3863
  endTime?: Date;
2054
3864
  }): Promise<number>;
2055
3865
  /**
2056
- * Help script to create the necessary table.
3866
+ * Helper to create necessary tables if they don't exist.
2057
3867
  */
2058
3868
  setupTable(): Promise<void>;
2059
3869
  private setupJobsTable;
@@ -2062,7 +3872,15 @@ declare class MySQLPersistence implements PersistenceAdapter {
2062
3872
 
2063
3873
  /**
2064
3874
  * SQLite Persistence Adapter.
3875
+ *
2065
3876
  * Archives jobs into a local SQLite database for zero-config persistence.
3877
+ * Uses transactions to optimize write performance for batches.
3878
+ *
3879
+ * @public
3880
+ * @example
3881
+ * ```typescript
3882
+ * const persistence = new SQLitePersistence(db);
3883
+ * ```
2066
3884
  */
2067
3885
  declare class SQLitePersistence implements PersistenceAdapter {
2068
3886
  private db;
@@ -2078,13 +3896,27 @@ declare class SQLitePersistence implements PersistenceAdapter {
2078
3896
  maxBufferSize?: number;
2079
3897
  flushInterval?: number;
2080
3898
  });
3899
+ /**
3900
+ * Archives a single job.
3901
+ */
2081
3902
  archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
3903
+ /**
3904
+ * Archives multiple jobs in a batch.
3905
+ *
3906
+ * Optimized for SQLite by wrapping chunks in transactions.
3907
+ */
2082
3908
  archiveMany(jobs: Array<{
2083
3909
  queue: string;
2084
3910
  job: SerializedJob;
2085
3911
  status: 'completed' | 'failed' | 'waiting' | string;
2086
3912
  }>): Promise<void>;
3913
+ /**
3914
+ * No-op. Use BufferedPersistence if flushing is needed.
3915
+ */
2087
3916
  flush(): Promise<void>;
3917
+ /**
3918
+ * Finds an archived job by ID.
3919
+ */
2088
3920
  find(queue: string, id: string): Promise<SerializedJob | null>;
2089
3921
  /**
2090
3922
  * List jobs from the archive.
@@ -2092,7 +3924,7 @@ declare class SQLitePersistence implements PersistenceAdapter {
2092
3924
  list(queue: string, options?: {
2093
3925
  limit?: number;
2094
3926
  offset?: number;
2095
- status?: 'completed' | 'failed' | 'waiting' | string;
3927
+ status?: 'completed' | 'failed' | 'waiting' | string | string[];
2096
3928
  jobId?: string;
2097
3929
  startTime?: Date;
2098
3930
  endTime?: Date;
@@ -2157,7 +3989,7 @@ declare class SQLitePersistence implements PersistenceAdapter {
2157
3989
  * Count jobs in the archive.
2158
3990
  */
2159
3991
  count(queue: string, options?: {
2160
- status?: 'completed' | 'failed' | 'waiting' | string;
3992
+ status?: 'completed' | 'failed' | 'waiting' | string | string[];
2161
3993
  jobId?: string;
2162
3994
  startTime?: Date;
2163
3995
  endTime?: Date;
@@ -2171,72 +4003,385 @@ declare class SQLitePersistence implements PersistenceAdapter {
2171
4003
  }
2172
4004
 
2173
4005
  /**
2174
- * Class name serializer (Laravel-style).
4006
+ * Retry strategy for events.
4007
+ *
4008
+ * - 'bull': Use Bull Queue's built-in retry mechanism
4009
+ * - 'core': Use EventPriorityQueue's retry logic (not implemented in Stream backend)
4010
+ * - 'hybrid': Bull retries + Core DLQ fallback (future phase)
4011
+ */
4012
+ type RetryStrategy = 'bull' | 'core' | 'hybrid';
4013
+ /**
4014
+ * Configuration for StreamEventBackend.
4015
+ */
4016
+ interface StreamEventBackendConfig {
4017
+ /**
4018
+ * Retry strategy to use.
4019
+ * @default 'bull'
4020
+ */
4021
+ retryStrategy?: RetryStrategy;
4022
+ /**
4023
+ * Whether to integrate with CircuitBreaker.
4024
+ * @default false
4025
+ */
4026
+ circuitBreakerIntegration?: boolean;
4027
+ /**
4028
+ * Optional DLQ (Dead Letter Queue) handler for failed events.
4029
+ */
4030
+ dlqHandler?: {
4031
+ handle(event: EventTask, error: Error, attempt: number): Promise<void>;
4032
+ };
4033
+ /**
4034
+ * CircuitBreaker getter (injected from core)
4035
+ */
4036
+ getCircuitBreaker?: (hook: string) => any;
4037
+ }
4038
+ /**
4039
+ * Event backend implementation using Gravito Stream (Bull Queue).
4040
+ *
4041
+ * Provides persistent, distributed event processing with:
4042
+ * - Bull Queue persistence (Redis-backed)
4043
+ * - Configurable retry strategies
4044
+ * - Optional CircuitBreaker integration
4045
+ * - DLQ support for failed events
4046
+ *
4047
+ * @example
4048
+ * ```typescript
4049
+ * const queueManager = new QueueManager({
4050
+ * default: 'bullmq',
4051
+ * connections: {
4052
+ * bullmq: {
4053
+ * driver: 'bullmq',
4054
+ * queue: new Queue('gravito-events', { connection: redis })
4055
+ * }
4056
+ * }
4057
+ * })
4058
+ *
4059
+ * const backend = new StreamEventBackend(queueManager, {
4060
+ * retryStrategy: 'bull',
4061
+ * circuitBreakerIntegration: true
4062
+ * })
4063
+ * ```
4064
+ */
4065
+ declare class StreamEventBackend implements EventBackend {
4066
+ private queueManager;
4067
+ private config;
4068
+ constructor(queueManager: QueueManager, config?: StreamEventBackendConfig);
4069
+ /**
4070
+ * Build Job Push Options from EventOptions.
4071
+ *
4072
+ * Maps EventOptions to Bull Queue JobPushOptions with retry strategy applied.
4073
+ */
4074
+ private buildJobOptions;
4075
+ /**
4076
+ * Enqueue an event task to the stream queue.
4077
+ *
4078
+ * Applies retry strategy and CircuitBreaker checks based on configuration.
4079
+ * Supports DLQ routing for failed events.
4080
+ */
4081
+ enqueue(task: EventTask): Promise<void>;
4082
+ /**
4083
+ * Apply retry strategy to the job based on configuration.
4084
+ */
4085
+ private applyRetryStrategy;
4086
+ /**
4087
+ * Handle job failure and route to DLQ if configured.
4088
+ *
4089
+ * Called when a job exhausts all retry attempts.
4090
+ */
4091
+ handleJobFailure(task: EventTask, error: Error, attempt: number): Promise<void>;
4092
+ /**
4093
+ * Record a job failure for CircuitBreaker state management.
4094
+ *
4095
+ * Called when a job fails, regardless of retry status.
4096
+ */
4097
+ recordJobFailure(task: EventTask, error: Error): void;
4098
+ /**
4099
+ * Record a job success for CircuitBreaker state management.
4100
+ *
4101
+ * Called when a job completes successfully.
4102
+ */
4103
+ recordJobSuccess(task: EventTask): void;
4104
+ /**
4105
+ * Get the retry strategy configuration.
4106
+ */
4107
+ getRetryStrategy(): RetryStrategy;
4108
+ /**
4109
+ * Check if CircuitBreaker integration is enabled.
4110
+ */
4111
+ isCircuitBreakerEnabled(): boolean;
4112
+ /**
4113
+ * Get the DLQ handler, if configured.
4114
+ */
4115
+ getDLQHandler(): StreamEventBackendConfig['dlqHandler'] | undefined;
4116
+ }
4117
+
4118
+ /**
4119
+ * SystemEventJob - Internal job for processing Gravito async hooks.
4120
+ *
4121
+ * @internal
4122
+ */
4123
+ declare class SystemEventJob extends Job {
4124
+ readonly hook: string;
4125
+ readonly args: unknown;
4126
+ readonly options: Record<string, any>;
4127
+ /**
4128
+ * Optional failure callback for DLQ handling.
4129
+ */
4130
+ private onFailedCallback?;
4131
+ constructor(hook: string, args: unknown, options?: Record<string, any>);
4132
+ /**
4133
+ * Set failure callback for DLQ handling.
4134
+ *
4135
+ * @param callback - Called when job fails permanently
4136
+ * @returns Self for chaining
4137
+ */
4138
+ onFailed(callback: (error: Error, attempt: number) => Promise<void>): this;
4139
+ /**
4140
+ * Execute the hook listeners in the worker process.
4141
+ */
4142
+ handle(): Promise<void>;
4143
+ /**
4144
+ * Called when job fails permanently after all retries.
4145
+ *
4146
+ * This method is invoked by the worker when job exhausts all retry attempts.
4147
+ */
4148
+ failed(error: Error, attempt?: number): Promise<void>;
4149
+ }
4150
+
4151
+ /**
4152
+ * Class Name Serializer (Laravel-style).
2175
4153
  *
2176
- * Stores the class name and properties, then recreates an instance at runtime.
2177
- * This is the recommended serializer because it can restore class instances correctly.
4154
+ * Serializes jobs by storing their class name along with their properties.
4155
+ * During deserialization, it looks up the registered class constructor and creates a new instance,
4156
+ * populating it with the stored properties.
2178
4157
  *
2179
- * Requirement: Job classes must be dynamically loadable (by class name).
4158
+ * This is the recommended serializer for most use cases as it preserves the behavior (methods)
4159
+ * of the job class.
2180
4160
  *
4161
+ * @public
2181
4162
  * @example
2182
4163
  * ```typescript
2183
- * const serializer = new ClassNameSerializer()
2184
- * const serialized = serializer.serialize(new SendEmail('user@example.com'))
2185
- * // serialized.data contains class name and properties
4164
+ * const serializer = new ClassNameSerializer();
4165
+ * serializer.register(SendEmailJob);
2186
4166
  *
2187
- * const job = serializer.deserialize(serialized)
2188
- * // job is an instance of SendEmail
4167
+ * const serialized = serializer.serialize(new SendEmailJob('foo@bar.com'));
4168
+ * const job = serializer.deserialize(serialized); // instanceof SendEmailJob
2189
4169
  * ```
2190
4170
  */
2191
4171
  declare class ClassNameSerializer implements JobSerializer {
2192
4172
  /**
2193
- * Job class registry (for resolving classes by name).
4173
+ * Registry of job classes, mapped by class name.
2194
4174
  */
2195
4175
  private jobClasses;
2196
4176
  /**
2197
- * Register a Job class.
2198
- * @param jobClass - Job class
4177
+ * Registers a Job class for serialization.
4178
+ *
4179
+ * @param jobClass - The job class constructor.
2199
4180
  */
2200
4181
  register(jobClass: new (...args: unknown[]) => Job): void;
2201
4182
  /**
2202
- * Register multiple Job classes.
2203
- * @param jobClasses - Job class array
4183
+ * Registers multiple Job classes at once.
4184
+ *
4185
+ * @param jobClasses - An array of job class constructors.
2204
4186
  */
2205
4187
  registerMany(jobClasses: Array<new (...args: unknown[]) => Job>): void;
2206
4188
  /**
2207
- * Serialize a Job.
4189
+ * Serializes a Job instance.
4190
+ *
4191
+ * Captures the class name and all enumerable properties.
4192
+ *
4193
+ * @param job - The job to serialize.
2208
4194
  */
2209
4195
  serialize(job: Job): SerializedJob;
2210
4196
  /**
2211
- * Deserialize a Job.
4197
+ * Deserializes a Job instance.
4198
+ *
4199
+ * Instantiates the class matching `className` and assigns properties.
4200
+ *
4201
+ * @param serialized - The serialized job.
4202
+ * @throws {Error} If the job class is not registered.
2212
4203
  */
2213
4204
  deserialize(serialized: SerializedJob): Job;
2214
4205
  }
2215
4206
 
2216
4207
  /**
2217
- * JSON Serializer
2218
- *
2219
- * Serializes jobs using JSON.
2220
- * Suitable for simple scenarios where you only need to persist plain properties.
4208
+ * JSON Serializer.
2221
4209
  *
2222
- * Limitation: cannot restore class instances, functions, or complex objects.
4210
+ * Serializes jobs to standard JSON. This is the simplest serializer but has limitations:
4211
+ * it cannot restore class instances (methods are lost) or handle complex types like Maps/Sets.
4212
+ * Deserialized jobs will be plain objects that must be manually handled or cast.
2223
4213
  *
4214
+ * @public
2224
4215
  * @example
2225
4216
  * ```typescript
2226
- * const serializer = new JsonSerializer()
2227
- * const serialized = serializer.serialize(job)
2228
- * const job = serializer.deserialize(serialized)
4217
+ * const serializer = new JsonSerializer();
2229
4218
  * ```
2230
4219
  */
2231
4220
  declare class JsonSerializer implements JobSerializer {
2232
4221
  /**
2233
- * Serialize a job.
4222
+ * Serializes a job to a JSON object.
2234
4223
  */
2235
4224
  serialize(job: Job): SerializedJob;
2236
4225
  /**
2237
- * Deserialize a job.
4226
+ * Deserializes a JSON object into a basic Job-like object.
4227
+ *
4228
+ * Note: The result is NOT an instance of the original Job class.
2238
4229
  */
2239
4230
  deserialize(serialized: SerializedJob): Job;
2240
4231
  }
2241
4232
 
2242
- export { BufferedPersistence, ClassNameSerializer, Consumer, type ConsumerOptions, DatabaseDriver, type DatabaseDriverConfig, Job, type JobSerializer, JsonSerializer, KafkaDriver, type KafkaDriverConfig, MemoryDriver, MySQLPersistence, OrbitStream, type OrbitStreamOptions, type PersistenceAdapter, type QueueConfig, type QueueConnectionConfig, type QueueDriver, QueueManager, type Queueable, RabbitMQDriver, type RabbitMQDriverConfig, RedisDriver, type RedisDriverConfig, SQLitePersistence, SQSDriver, type SQSDriverConfig, Scheduler, type SerializedJob, type TopicOptions, Worker, type WorkerOptions };
4233
+ /**
4234
+ * Worker Pool Implementation.
4235
+ *
4236
+ * Manages a pool of Sandboxed Workers to provide concurrency control,
4237
+ * worker reuse, load balancing, and health monitoring.
4238
+ *
4239
+ * @public
4240
+ */
4241
+
4242
+ /**
4243
+ * Configuration options for the Worker Pool.
4244
+ */
4245
+ interface WorkerPoolConfig extends SandboxedWorkerConfig {
4246
+ /**
4247
+ * The maximum number of workers allowed in the pool.
4248
+ *
4249
+ * @default 4
4250
+ */
4251
+ poolSize?: number;
4252
+ /**
4253
+ * The minimum number of workers to keep alive.
4254
+ *
4255
+ * The pool will pre-warm and maintain at least this many ready workers.
4256
+ * @default 0
4257
+ */
4258
+ minWorkers?: number;
4259
+ /**
4260
+ * Interval for performing health checks in milliseconds.
4261
+ *
4262
+ * Periodically scans for and removes terminated or unhealthy workers.
4263
+ * @default 30000 (30 seconds)
4264
+ */
4265
+ healthCheckInterval?: number;
4266
+ }
4267
+ /**
4268
+ * Runtime statistics for the Worker Pool.
4269
+ */
4270
+ interface WorkerPoolStats {
4271
+ /** Total number of workers (ready + busy). */
4272
+ total: number;
4273
+ /** Number of idle workers ready for new jobs. */
4274
+ ready: number;
4275
+ /** Number of workers currently executing jobs. */
4276
+ busy: number;
4277
+ /** Number of workers in terminated state awaiting cleanup. */
4278
+ terminated: number;
4279
+ /** Number of jobs waiting in the queue. */
4280
+ pending: number;
4281
+ /** Total number of successfully completed jobs. */
4282
+ completed: number;
4283
+ /** Total number of failed jobs. */
4284
+ failed: number;
4285
+ }
4286
+ /**
4287
+ * Worker Pool.
4288
+ *
4289
+ * Orchestrates multiple `SandboxedWorker` instances to execute jobs concurrently.
4290
+ *
4291
+ * Key features:
4292
+ * - **Concurrency Control**: Limits the number of simultaneous job executions (`poolSize`).
4293
+ * - **Queueing**: Queues jobs when all workers are busy.
4294
+ * - **Lifecycle Management**: Automatically creates, reuses, and terminates workers.
4295
+ * - **Health Monitoring**: Periodically cleans up dead workers and maintains `minWorkers`.
4296
+ *
4297
+ * @example
4298
+ * ```typescript
4299
+ * const pool = new WorkerPool({
4300
+ * poolSize: 8,
4301
+ * minWorkers: 2,
4302
+ * maxExecutionTime: 30000
4303
+ * });
4304
+ *
4305
+ * await pool.execute(job);
4306
+ * await pool.shutdown();
4307
+ * ```
4308
+ */
4309
+ declare class WorkerPool {
4310
+ private workers;
4311
+ private config;
4312
+ private queue;
4313
+ private healthCheckTimer;
4314
+ private stats;
4315
+ /**
4316
+ * Creates a WorkerPool instance.
4317
+ *
4318
+ * @param config - Configuration options for the pool.
4319
+ */
4320
+ constructor(config?: WorkerPoolConfig);
4321
+ /**
4322
+ * Pre-warms the pool by creating the minimum number of workers.
4323
+ */
4324
+ private warmUp;
4325
+ /**
4326
+ * Creates a new SandboxedWorker and adds it to the pool.
4327
+ *
4328
+ * @returns The newly created worker.
4329
+ */
4330
+ private createWorker;
4331
+ /**
4332
+ * Retrieves an available worker from the pool.
4333
+ *
4334
+ * Priorities:
4335
+ * 1. Reuse an existing ready worker.
4336
+ * 2. Create a new worker if the pool is not full.
4337
+ * 3. Return `null` if the pool is saturated.
4338
+ *
4339
+ * @returns An available worker or `null`.
4340
+ */
4341
+ private getAvailableWorker;
4342
+ /**
4343
+ * Executes a job using the worker pool.
4344
+ *
4345
+ * If a worker is available, the job starts immediately.
4346
+ * Otherwise, it is added to the pending queue.
4347
+ *
4348
+ * @param job - The serialized job data.
4349
+ * @throws {Error} If execution fails.
4350
+ */
4351
+ execute(job: SerializedJob): Promise<void>;
4352
+ /**
4353
+ * Processes the next job in the queue if a worker is available.
4354
+ */
4355
+ private processQueue;
4356
+ /**
4357
+ * Starts the periodic health check.
4358
+ */
4359
+ private startHealthCheck;
4360
+ /**
4361
+ * Performs a health check on the pool.
4362
+ *
4363
+ * Removes terminated workers and ensures `minWorkers` are available.
4364
+ */
4365
+ private performHealthCheck;
4366
+ /**
4367
+ * Gets the current statistics of the worker pool.
4368
+ *
4369
+ * @returns Snapshot of pool statistics.
4370
+ */
4371
+ getStats(): WorkerPoolStats;
4372
+ /**
4373
+ * Shuts down the worker pool.
4374
+ *
4375
+ * Terminates all workers and rejects any pending jobs.
4376
+ */
4377
+ shutdown(): Promise<void>;
4378
+ /**
4379
+ * Waits for all active and pending jobs to complete.
4380
+ *
4381
+ * @param timeout - Maximum wait time in milliseconds. 0 for infinite.
4382
+ * @throws {Error} If the timeout is reached.
4383
+ */
4384
+ waitForCompletion(timeout?: number): Promise<void>;
4385
+ }
4386
+
4387
+ export { BatchConsumer, type BatchConsumerOptions, BufferedPersistence, BullMQDriver, type BullMQDriverConfig, ClassNameSerializer, Consumer, type ConsumerOptions, DatabaseDriver, type DatabaseDriverConfig, DistributedLock, GrpcDriver, type GrpcDriverConfig, Job, type JobSerializer, JsonSerializer, KafkaDriver, type KafkaDriverConfig, type LockOptions, MemoryDriver, MySQLPersistence, OrbitStream, type OrbitStreamOptions, type PersistenceAdapter, type QueueConfig, type QueueConnectionConfig, type QueueDriver, QueueManager, type Queueable, RabbitMQDriver, type RabbitMQDriverConfig, RedisDriver, type RedisDriverConfig, type RetryStrategy, SQLitePersistence, SQSDriver, type SQSDriverConfig, SandboxedWorker, type SandboxedWorkerConfig, type ScheduledJobConfig, Scheduler, type SchedulerOptions, type SerializedJob, StreamEventBackend, type StreamEventBackendConfig, SystemEventJob, type TopicOptions, Worker, type WorkerOptions, WorkerPool, type WorkerPoolConfig, type WorkerPoolStats };