@gravito/stream 2.0.2 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/README.md +27 -1
  2. package/dist/BatchConsumer.d.ts +81 -0
  3. package/dist/Consumer.d.ts +215 -0
  4. package/dist/DashboardProvider.d.ts +20 -0
  5. package/dist/Job.d.ts +183 -0
  6. package/dist/OrbitStream.d.ts +151 -0
  7. package/dist/QueueManager.d.ts +319 -0
  8. package/dist/Queueable.d.ts +91 -0
  9. package/dist/Scheduler.d.ts +214 -0
  10. package/dist/StreamEventBackend.d.ts +114 -0
  11. package/dist/SystemEventJob.d.ts +33 -0
  12. package/dist/Worker.d.ts +139 -0
  13. package/dist/benchmarks/PerformanceReporter.d.ts +99 -0
  14. package/dist/consumer/ConcurrencyGate.d.ts +55 -0
  15. package/dist/consumer/ConsumerStrategy.d.ts +41 -0
  16. package/dist/consumer/GroupSequencer.d.ts +57 -0
  17. package/dist/consumer/HeartbeatManager.d.ts +65 -0
  18. package/dist/consumer/JobExecutor.d.ts +61 -0
  19. package/dist/consumer/JobSourceGenerator.d.ts +31 -0
  20. package/dist/consumer/PollingStrategy.d.ts +42 -0
  21. package/dist/consumer/ReactiveStrategy.d.ts +41 -0
  22. package/dist/consumer/StreamingConsumer.d.ts +88 -0
  23. package/dist/consumer/index.d.ts +13 -0
  24. package/dist/consumer/types.d.ts +102 -0
  25. package/dist/drivers/BinaryJobFrame.d.ts +78 -0
  26. package/dist/drivers/BullMQDriver.d.ts +186 -0
  27. package/dist/drivers/DatabaseDriver.d.ts +131 -0
  28. package/dist/drivers/GrpcDriver.d.ts +16 -0
  29. package/dist/drivers/KafkaDriver.d.ts +148 -0
  30. package/dist/drivers/MemoryDriver.d.ts +108 -0
  31. package/dist/drivers/QueueDriver.d.ts +250 -0
  32. package/dist/drivers/RabbitMQDriver.d.ts +102 -0
  33. package/dist/drivers/RedisDriver.d.ts +294 -0
  34. package/dist/drivers/SQSDriver.d.ts +111 -0
  35. package/dist/drivers/kafka/BackpressureController.d.ts +60 -0
  36. package/dist/drivers/kafka/BatchProcessor.d.ts +50 -0
  37. package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +80 -0
  38. package/dist/drivers/kafka/ErrorCategorizer.d.ts +39 -0
  39. package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +100 -0
  40. package/dist/drivers/kafka/HeartbeatManager.d.ts +57 -0
  41. package/dist/drivers/kafka/KafkaDriver.d.ts +138 -0
  42. package/dist/drivers/kafka/KafkaMetrics.d.ts +88 -0
  43. package/dist/drivers/kafka/KafkaNotifier.d.ts +54 -0
  44. package/dist/drivers/kafka/MessageBuffer.d.ts +71 -0
  45. package/dist/drivers/kafka/OffsetTracker.d.ts +63 -0
  46. package/dist/drivers/kafka/PerformanceMonitor.d.ts +88 -0
  47. package/dist/drivers/kafka/RateLimiter.d.ts +52 -0
  48. package/dist/drivers/kafka/RebalanceHandler.d.ts +104 -0
  49. package/dist/drivers/kafka/RingBuffer.d.ts +63 -0
  50. package/dist/drivers/kafka/index.d.ts +22 -0
  51. package/dist/drivers/kafka/types.d.ts +553 -0
  52. package/dist/drivers/prepareJobForTransport.d.ts +10 -0
  53. package/dist/index.cjs +5644 -5508
  54. package/dist/index.cjs.map +71 -0
  55. package/dist/index.d.ts +60 -4378
  56. package/dist/index.js +5609 -5453
  57. package/dist/index.js.map +71 -0
  58. package/dist/locks/DistributedLock.d.ts +175 -0
  59. package/dist/persistence/BufferedPersistence.d.ts +130 -0
  60. package/dist/persistence/BunBufferedPersistence.d.ts +173 -0
  61. package/dist/persistence/MySQLPersistence.d.ts +134 -0
  62. package/dist/persistence/SQLitePersistence.d.ts +133 -0
  63. package/dist/serializers/BinarySerializer.d.ts +42 -0
  64. package/dist/serializers/CachedSerializer.d.ts +38 -0
  65. package/dist/serializers/CborNativeSerializer.d.ts +56 -0
  66. package/dist/serializers/ClassNameSerializer.d.ts +58 -0
  67. package/dist/serializers/JobSerializer.d.ts +33 -0
  68. package/dist/serializers/JsonSerializer.d.ts +28 -0
  69. package/dist/serializers/JsonlSerializer.d.ts +90 -0
  70. package/dist/serializers/MessagePackSerializer.d.ts +29 -0
  71. package/dist/types.d.ts +653 -0
  72. package/dist/workers/BinaryWorkerProtocol.d.ts +77 -0
  73. package/dist/workers/BunWorker.d.ts +179 -0
  74. package/dist/workers/SandboxedWorker.d.ts +132 -0
  75. package/dist/workers/WorkerFactory.d.ts +128 -0
  76. package/dist/workers/WorkerPool.d.ts +186 -0
  77. package/dist/workers/bun-job-executor.d.ts +14 -0
  78. package/dist/workers/index.d.ts +13 -0
  79. package/dist/workers/job-executor.d.ts +9 -0
  80. package/package.json +6 -4
  81. package/dist/index.d.cts +0 -4387
@@ -0,0 +1,653 @@
1
+ /**
2
+ * Represents a job that has been serialized for storage in a queue.
3
+ *
4
+ * This interface defines the data structure used to persist jobs in the underlying
5
+ * storage mechanism (e.g., Redis, Database, SQS). It encapsulates all metadata
6
+ * required for processing, retries, and lifecycle management.
7
+ *
8
+ * @public
9
+ * @example
10
+ * ```typescript
11
+ * const job: SerializedJob = {
12
+ * id: 'job-123',
13
+ * type: 'json',
14
+ * data: '{"userId": 1}',
15
+ * createdAt: Date.now()
16
+ * };
17
+ * ```
18
+ */
19
+ export interface SerializedJob {
20
+ /**
21
+ * Unique identifier for the job.
22
+ */
23
+ id: string;
24
+ /**
25
+ * The serialization format used for the job data.
26
+ *
27
+ * - 'json': Simple JSON objects.
28
+ * - 'class': Serialized class instances (requires class registration).
29
+ * - 'msgpack': Binary MessagePack format.
30
+ * - 'binary': CBOR binary format for optimal performance and size.
31
+ * - 'jsonl': JSON Lines format, optimised for streaming and batch processing.
32
+ */
33
+ type: 'json' | 'class' | 'msgpack' | 'binary' | 'jsonl';
34
+ /**
35
+ * The actual serialized job payload.
36
+ *
37
+ * Contains the business data needed to execute the job.
38
+ * For 'binary' and 'msgpack' types, can be Uint8Array or Base64 string for transport.
39
+ */
40
+ data: string | Uint8Array;
41
+ /**
42
+ * The fully qualified class name of the job.
43
+ *
44
+ * Only required when `type` is 'class' to reconstruct the original object instance.
45
+ */
46
+ className?: string;
47
+ /**
48
+ * The timestamp (in milliseconds) when the job was originally created.
49
+ */
50
+ createdAt: number;
51
+ /**
52
+ * Optional delay in seconds before the job becomes eligible for processing.
53
+ *
54
+ * Used for scheduling future tasks.
55
+ */
56
+ delaySeconds?: number;
57
+ /**
58
+ * The number of times this job has been attempted so far.
59
+ */
60
+ attempts?: number;
61
+ /**
62
+ * The maximum number of retry attempts allowed before marking the job as failed.
63
+ */
64
+ maxAttempts?: number;
65
+ /**
66
+ * Group ID for sequential processing.
67
+ *
68
+ * Jobs sharing the same `groupId` are guaranteed to be processed in order (FIFO),
69
+ * provided the consumer supports this feature.
70
+ */
71
+ groupId?: string;
72
+ /**
73
+ * The initial delay in seconds before the first retry attempt after a failure.
74
+ */
75
+ retryAfterSeconds?: number;
76
+ /**
77
+ * The multiplier applied to the delay for exponential backoff strategies.
78
+ */
79
+ retryMultiplier?: number;
80
+ /**
81
+ * The error message from the last failed attempt, if any.
82
+ */
83
+ error?: string;
84
+ /**
85
+ * The timestamp (in milliseconds) when the job was permanently marked as failed.
86
+ */
87
+ failedAt?: number;
88
+ /**
89
+ * The priority level of the job.
90
+ *
91
+ * Higher values or specific strings (e.g., 'high') indicate higher priority.
92
+ */
93
+ priority?: string | number;
94
+ }
95
+ /**
96
+ * Represents a database record for an archived job.
97
+ *
98
+ * This interface maps to the SQL table structure used for job persistence and auditing.
99
+ * It stores historical data about jobs that have been completed or failed.
100
+ *
101
+ * @example
102
+ * ```typescript
103
+ * const row: JobRow = {
104
+ * id: 1,
105
+ * job_id: 'job-123',
106
+ * queue: 'default',
107
+ * status: 'completed',
108
+ * payload: '...',
109
+ * created_at: new Date(),
110
+ * archived_at: new Date()
111
+ * };
112
+ * ```
113
+ */
114
+ export interface JobRow {
115
+ /** Primary key of the record */
116
+ id: number;
117
+ /** The original job ID */
118
+ job_id: string;
119
+ /** The queue name the job belonged to */
120
+ queue: string;
121
+ /** Final status of the job (e.g., 'completed', 'failed') */
122
+ status: string;
123
+ /** Serialized job data */
124
+ payload: string;
125
+ /** Error message if the job failed */
126
+ error?: string | null;
127
+ /** When the job was created */
128
+ created_at: Date;
129
+ /** When the job was archived */
130
+ archived_at: Date;
131
+ }
132
+ /**
133
+ * Statistics snapshot for a specific queue.
134
+ *
135
+ * Provides insight into the current state of a queue, including pending,
136
+ * delayed, reserved, and failed job counts.
137
+ *
138
+ * @public
139
+ * @example
140
+ * ```typescript
141
+ * const stats: QueueStats = {
142
+ * queue: 'default',
143
+ * size: 42,
144
+ * delayed: 5,
145
+ * failed: 1
146
+ * };
147
+ * ```
148
+ */
149
+ export interface QueueStats {
150
+ /** Name of the queue */
151
+ queue: string;
152
+ /** Number of pending jobs waiting to be processed */
153
+ size: number;
154
+ /** Number of jobs scheduled for future execution */
155
+ delayed?: number;
156
+ /** Number of jobs currently being processed by workers */
157
+ reserved?: number;
158
+ /** Number of jobs in the Dead Letter Queue (DLQ) */
159
+ failed?: number;
160
+ /** Driver-specific custom metrics */
161
+ metrics?: Record<string, number>;
162
+ }
163
+ /**
164
+ * Snapshot of statistics across all connections and queues.
165
+ *
166
+ * Used by monitoring dashboards to provide a high-level overview
167
+ * of the entire background processing system.
168
+ *
169
+ * @public
170
+ */
171
+ export interface GlobalStats {
172
+ /** Map of connection names to their respective queue statistics */
173
+ connections: Record<string, QueueStats[]>;
174
+ /** Total number of pending jobs across all connections */
175
+ totalSize: number;
176
+ /** Total number of failed jobs across all connections */
177
+ totalFailed: number;
178
+ /** Timestamp of the snapshot */
179
+ timestamp: number;
180
+ }
181
+ /**
182
+ * Advanced topic configuration options for distributed queue systems.
183
+ *
184
+ * Used primarily by drivers like Kafka to configure topic properties.
185
+ *
186
+ * @public
187
+ * @example
188
+ * ```typescript
189
+ * const options: TopicOptions = {
190
+ * partitions: 3,
191
+ * replicationFactor: 2
192
+ * };
193
+ * ```
194
+ */
195
+ export interface TopicOptions {
196
+ /** Number of partitions for the topic */
197
+ partitions?: number;
198
+ /** Replication factor for fault tolerance */
199
+ replicationFactor?: number;
200
+ /** Additional driver-specific configuration key-values */
201
+ config?: Record<string, string>;
202
+ }
203
+ /**
204
+ * Configuration for the Database driver.
205
+ *
206
+ * Configures the queue system to use a SQL database for job storage.
207
+ *
208
+ * @public
209
+ * @example
210
+ * ```typescript
211
+ * const config: DatabaseDriverConfig = {
212
+ * driver: 'database',
213
+ * dbService: myDbService,
214
+ * table: 'my_jobs'
215
+ * };
216
+ * ```
217
+ */
218
+ export interface DatabaseDriverConfig {
219
+ /** Driver type identifier */
220
+ driver: 'database';
221
+ /** Database service implementation for executing queries */
222
+ dbService: any;
223
+ /** Optional custom table name for storing jobs */
224
+ table?: string;
225
+ }
226
+ /**
227
+ * Configuration for the Redis driver.
228
+ *
229
+ * Configures the queue system to use Redis for high-performance job storage.
230
+ *
231
+ * @public
232
+ * @example
233
+ * ```typescript
234
+ * const config: RedisDriverConfig = {
235
+ * driver: 'redis',
236
+ * client: redisClient,
237
+ * prefix: 'my-app:queue:'
238
+ * };
239
+ * ```
240
+ */
241
+ export interface RedisDriverConfig {
242
+ /** Driver type identifier */
243
+ driver: 'redis';
244
+ /** Redis client instance (ioredis or node-redis compatible) */
245
+ client: any;
246
+ /** Optional key prefix for namespacing */
247
+ prefix?: string;
248
+ }
249
+ /**
250
+ * Configuration for the Kafka driver.
251
+ *
252
+ * Configures the queue system to use Apache Kafka.
253
+ *
254
+ * @example
255
+ * ```typescript
256
+ * const config: KafkaDriverConfig = {
257
+ * driver: 'kafka',
258
+ * client: kafkaClient,
259
+ * consumerGroupId: 'my-group'
260
+ * };
261
+ * ```
262
+ */
263
+ export interface KafkaDriverConfig {
264
+ /** Driver type identifier */
265
+ driver: 'kafka';
266
+ /** Kafka client instance */
267
+ client: any;
268
+ /** Consumer group ID for coordinating workers */
269
+ consumerGroupId?: string;
270
+ }
271
+ /**
272
+ * Configuration for the SQS driver.
273
+ *
274
+ * Configures the queue system to use Amazon Simple Queue Service.
275
+ *
276
+ * @public
277
+ * @example
278
+ * ```typescript
279
+ * const config: SQSDriverConfig = {
280
+ * driver: 'sqs',
281
+ * client: sqsClient,
282
+ * queueUrlPrefix: 'https://sqs.us-east-1.amazonaws.com/123/'
283
+ * };
284
+ * ```
285
+ */
286
+ export interface SQSDriverConfig {
287
+ /** Driver type identifier */
288
+ driver: 'sqs';
289
+ /** Amazon SQS client instance */
290
+ client: any;
291
+ /** Optional prefix for resolving queue names to URLs */
292
+ queueUrlPrefix?: string;
293
+ /** The duration (in seconds) that received messages are hidden from other consumers */
294
+ visibilityTimeout?: number;
295
+ /** The duration (in seconds) to wait for a message (Long Polling) */
296
+ waitTimeSeconds?: number;
297
+ }
298
+ /**
299
+ * Configuration for the RabbitMQ driver.
300
+ *
301
+ * Configures the queue system to use RabbitMQ (AMQP).
302
+ *
303
+ * @example
304
+ * ```typescript
305
+ * const config: RabbitMQDriverConfig = {
306
+ * driver: 'rabbitmq',
307
+ * client: amqpConnection,
308
+ * exchange: 'jobs',
309
+ * exchangeType: 'direct'
310
+ * };
311
+ * ```
312
+ */
313
+ export interface RabbitMQDriverConfig {
314
+ /** Driver type identifier */
315
+ driver: 'rabbitmq';
316
+ /** AMQP client instance */
317
+ client: any;
318
+ /** Exchange name to publish to */
319
+ exchange?: string;
320
+ /** Type of exchange (direct, topic, fanout, headers) */
321
+ exchangeType?: string;
322
+ }
323
+ /**
324
+ * Configuration for the gRPC driver.
325
+ *
326
+ * Configures the queue system to use a remote gRPC service.
327
+ *
328
+ * @public
329
+ * @example
330
+ * ```typescript
331
+ * const config: GrpcDriverConfig = {
332
+ * driver: 'grpc',
333
+ * url: 'localhost:50051',
334
+ * protoUser: 'myuser',
335
+ * protoPassword: 'mypassword',
336
+ * serviceName: 'QueueService',
337
+ * packageName: 'stream'
338
+ * };
339
+ * ```
340
+ */
341
+ export interface GrpcDriverConfig {
342
+ /** Driver type identifier */
343
+ driver: 'grpc';
344
+ /** The gRPC server URL (host:port) */
345
+ url: string;
346
+ /** Path to the .proto file (optional, defaults to built-in) */
347
+ protoPath?: string;
348
+ /** The package name defined in the .proto file (default: 'stream') */
349
+ packageName?: string;
350
+ /** The service name defined in the .proto file (default: 'QueueService') */
351
+ serviceName?: string;
352
+ /** Optional credentials/metadata for connection */
353
+ credentials?: {
354
+ rootCerts?: Buffer;
355
+ privateKey?: Buffer;
356
+ certChain?: Buffer;
357
+ };
358
+ }
359
+ /**
360
+ * Configuration for the Bull Queue driver.
361
+ *
362
+ * Configures the queue system to use Bull Queue (backed by Redis).
363
+ *
364
+ * @public
365
+ * @example
366
+ * ```typescript
367
+ * import { Queue } from 'bullmq'
368
+ * import Redis from 'ioredis'
369
+ *
370
+ * const redis = new Redis()
371
+ * const queue = new Queue('gravito-events', { connection: redis })
372
+ * const config: BullMQDriverConfig = {
373
+ * driver: 'bullmq',
374
+ * queue: queue
375
+ * };
376
+ * ```
377
+ */
378
+ export interface BullMQDriverConfig {
379
+ /** Driver type identifier */
380
+ driver: 'bullmq';
381
+ /** Bull Queue instance */
382
+ queue: any;
383
+ /** Optional Bull Worker instance */
384
+ worker?: any;
385
+ /** Key prefix for queue namespacing */
386
+ prefix?: string;
387
+ /** Enable debug logging */
388
+ debug?: boolean;
389
+ }
390
+ /**
391
+ * Union type for all supported queue connection configurations.
392
+ *
393
+ * @public
394
+ */
395
+ export type QueueConnectionConfig = {
396
+ driver: 'memory';
397
+ } | DatabaseDriverConfig | RedisDriverConfig | KafkaDriverConfig | SQSDriverConfig | RabbitMQDriverConfig | GrpcDriverConfig | BullMQDriverConfig | {
398
+ driver: 'nats';
399
+ [key: string]: unknown;
400
+ } | {
401
+ driver: string;
402
+ [key: string]: unknown;
403
+ };
404
+ /**
405
+ * Global configuration for the QueueManager.
406
+ *
407
+ * Defines available connections, serialization settings, and system-wide behaviors.
408
+ *
409
+ * @example
410
+ * ```typescript
411
+ * const config: QueueConfig = {
412
+ * default: 'redis',
413
+ * connections: {
414
+ * redis: { driver: 'redis', client: redis }
415
+ * },
416
+ * debug: true
417
+ * };
418
+ * ```
419
+ */
420
+ export interface QueueConfig {
421
+ /**
422
+ * The name of the default connection to use when none is specified.
423
+ */
424
+ default?: string;
425
+ /**
426
+ * Map of connection names to their configurations.
427
+ */
428
+ connections?: Record<string, QueueConnectionConfig>;
429
+ /**
430
+ * The default serialization format to use for jobs.
431
+ */
432
+ defaultSerializer?: 'json' | 'class' | 'msgpack' | 'jsonl';
433
+ /**
434
+ * Whether to cache serialized job data.
435
+ *
436
+ * If true, re-queuing the same Job instance will reuse the cached serialized string,
437
+ * improving performance for frequently pushed jobs.
438
+ *
439
+ * @default false
440
+ */
441
+ useSerializationCache?: boolean;
442
+ /**
443
+ * Enable verbose debug logging.
444
+ *
445
+ * Useful for troubleshooting queue operations and consumer behavior.
446
+ *
447
+ * @default false
448
+ */
449
+ debug?: boolean;
450
+ /**
451
+ * Configuration for the persistence layer (SQL Archive).
452
+ */
453
+ persistence?: {
454
+ /**
455
+ * The persistence adapter instance used to store archived jobs.
456
+ */
457
+ adapter: PersistenceAdapter;
458
+ /**
459
+ * Whether to automatically archive jobs upon successful completion.
460
+ */
461
+ archiveCompleted?: boolean;
462
+ /**
463
+ * Whether to automatically archive jobs upon permanent failure.
464
+ */
465
+ archiveFailed?: boolean;
466
+ /**
467
+ * Whether to archive jobs immediately when they are enqueued (Audit Mode).
468
+ *
469
+ * @default false
470
+ */
471
+ archiveEnqueued?: boolean;
472
+ /**
473
+ * Buffer size for batched writes to the archive.
474
+ *
475
+ * If set, wraps the adapter in a `BufferedPersistence` decorator to improve throughput.
476
+ */
477
+ bufferSize?: number;
478
+ /**
479
+ * Maximum time (in milliseconds) to wait before flushing the write buffer.
480
+ */
481
+ flushInterval?: number;
482
+ };
483
+ }
484
+ /**
485
+ * Interface for persistence adapters.
486
+ *
487
+ * Defines the contract for storing long-term history of jobs in a permanent storage
488
+ * (typically a SQL database).
489
+ *
490
+ * @example
491
+ * ```typescript
492
+ * class MyPersistence implements PersistenceAdapter {
493
+ * async archive(queue, job, status) { ... }
494
+ * // ...
495
+ * }
496
+ * ```
497
+ */
498
+ export interface PersistenceAdapter {
499
+ /**
500
+ * Archive a single job.
501
+ *
502
+ * @param queue - The name of the queue.
503
+ * @param job - The serialized job data.
504
+ * @param status - The final status of the job ('completed', 'failed', etc.).
505
+ * @returns A promise that resolves when the job is archived.
506
+ */
507
+ archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
508
+ /**
509
+ * Find a specific job in the archive.
510
+ *
511
+ * @param queue - The name of the queue.
512
+ * @param id - The job ID.
513
+ * @returns The serialized job if found, or null.
514
+ */
515
+ find(queue: string, id: string): Promise<SerializedJob | null>;
516
+ /**
517
+ * List jobs from the archive based on criteria.
518
+ *
519
+ * @param queue - The name of the queue.
520
+ * @param options - Filtering and pagination options.
521
+ * @returns A list of matching serialized jobs.
522
+ */
523
+ list(queue: string, options?: {
524
+ limit?: number;
525
+ offset?: number;
526
+ status?: 'completed' | 'failed' | 'waiting' | string | string[];
527
+ jobId?: string;
528
+ startTime?: Date;
529
+ endTime?: Date;
530
+ }): Promise<SerializedJob[]>;
531
+ /**
532
+ * Archive multiple jobs in a single batch operation.
533
+ *
534
+ * @param jobs - Array of job data to archive.
535
+ * @returns A promise that resolves when all jobs are archived.
536
+ */
537
+ archiveMany?(jobs: Array<{
538
+ queue: string;
539
+ job: SerializedJob;
540
+ status: 'completed' | 'failed' | 'waiting' | string;
541
+ }>): Promise<void>;
542
+ /**
543
+ * Remove old data from the archive.
544
+ *
545
+ * @param days - Retention period in days; older records will be deleted.
546
+ * @returns The number of records deleted.
547
+ */
548
+ cleanup(days: number): Promise<number>;
549
+ /**
550
+ * Flush any buffered data to storage.
551
+ *
552
+ * @returns A promise that resolves when the flush is complete.
553
+ */
554
+ flush?(): Promise<void>;
555
+ /**
556
+ * Count jobs in the archive matching specific criteria.
557
+ *
558
+ * @param queue - The name of the queue.
559
+ * @param options - Filtering options.
560
+ * @returns The total count of matching jobs.
561
+ */
562
+ count(queue: string, options?: {
563
+ status?: 'completed' | 'failed' | 'waiting' | string | string[];
564
+ jobId?: string;
565
+ startTime?: Date;
566
+ endTime?: Date;
567
+ }): Promise<number>;
568
+ /**
569
+ * Archive a system log message.
570
+ *
571
+ * @param log - The log entry to archive.
572
+ * @returns A promise that resolves when the log is archived.
573
+ */
574
+ archiveLog(log: {
575
+ level: string;
576
+ message: string;
577
+ workerId: string;
578
+ queue?: string;
579
+ timestamp: Date;
580
+ }): Promise<void>;
581
+ /**
582
+ * Archive multiple log messages in a batch.
583
+ *
584
+ * @param logs - Array of log entries to archive.
585
+ * @returns A promise that resolves when logs are archived.
586
+ */
587
+ archiveLogMany?(logs: Array<{
588
+ level: string;
589
+ message: string;
590
+ workerId: string;
591
+ queue?: string;
592
+ timestamp: Date;
593
+ }>): Promise<void>;
594
+ /**
595
+ * List system logs from the archive.
596
+ *
597
+ * @param options - Filtering and pagination options.
598
+ * @returns A list of matching log entries.
599
+ */
600
+ listLogs(options?: {
601
+ limit?: number;
602
+ offset?: number;
603
+ level?: string;
604
+ workerId?: string;
605
+ queue?: string;
606
+ search?: string;
607
+ startTime?: Date;
608
+ endTime?: Date;
609
+ }): Promise<any[]>;
610
+ /**
611
+ * Count system logs in the archive.
612
+ *
613
+ * @param options - Filtering options.
614
+ * @returns The total count of matching logs.
615
+ */
616
+ countLogs(options?: {
617
+ level?: string;
618
+ workerId?: string;
619
+ queue?: string;
620
+ search?: string;
621
+ startTime?: Date;
622
+ endTime?: Date;
623
+ }): Promise<number>;
624
+ }
625
+ /**
626
+ * Options used when pushing a job to the queue.
627
+ *
628
+ * Allows customizing delivery behavior, such as delays, priority, and ordering.
629
+ *
630
+ * @example
631
+ * ```typescript
632
+ * const options: JobPushOptions = {
633
+ * priority: 'high',
634
+ * groupId: 'user-123'
635
+ * };
636
+ * ```
637
+ */
638
+ export interface JobPushOptions {
639
+ /**
640
+ * Group ID for FIFO ordering.
641
+ *
642
+ * If provided, jobs with the same `groupId` are guaranteed to be processed strictly
643
+ * sequentially. This is useful for event streams where order matters (e.g., per-user events).
644
+ */
645
+ groupId?: string;
646
+ /**
647
+ * Job priority level.
648
+ *
649
+ * Higher priority jobs are processed before lower priority ones, depending on driver support.
650
+ * Common values: 'critical', 'high', 'default', 'low'.
651
+ */
652
+ priority?: string | number;
653
+ }