@gravito/stream 2.1.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/package.json +1 -1
  2. package/dist/BatchConsumer.d.ts +0 -81
  3. package/dist/Consumer.d.ts +0 -215
  4. package/dist/DashboardProvider.d.ts +0 -29
  5. package/dist/Job.d.ts +0 -183
  6. package/dist/OrbitStream.d.ts +0 -151
  7. package/dist/QueueManager.d.ts +0 -321
  8. package/dist/Queueable.d.ts +0 -91
  9. package/dist/Scheduler.d.ts +0 -215
  10. package/dist/StreamEventBackend.d.ts +0 -120
  11. package/dist/SystemEventJob.d.ts +0 -41
  12. package/dist/Worker.d.ts +0 -139
  13. package/dist/benchmarks/PerformanceReporter.d.ts +0 -99
  14. package/dist/consumer/ConcurrencyGate.d.ts +0 -55
  15. package/dist/consumer/ConsumerStrategy.d.ts +0 -41
  16. package/dist/consumer/GroupSequencer.d.ts +0 -57
  17. package/dist/consumer/HeartbeatManager.d.ts +0 -65
  18. package/dist/consumer/JobExecutor.d.ts +0 -61
  19. package/dist/consumer/JobSourceGenerator.d.ts +0 -31
  20. package/dist/consumer/PollingStrategy.d.ts +0 -42
  21. package/dist/consumer/ReactiveStrategy.d.ts +0 -41
  22. package/dist/consumer/StreamingConsumer.d.ts +0 -90
  23. package/dist/consumer/index.d.ts +0 -13
  24. package/dist/consumer/types.d.ts +0 -102
  25. package/dist/drivers/BinaryJobFrame.d.ts +0 -78
  26. package/dist/drivers/BullMQDriver.d.ts +0 -237
  27. package/dist/drivers/DatabaseDriver.d.ts +0 -131
  28. package/dist/drivers/GrpcDriver.d.ts +0 -16
  29. package/dist/drivers/KafkaDriver.d.ts +0 -161
  30. package/dist/drivers/MemoryDriver.d.ts +0 -119
  31. package/dist/drivers/QueueDriver.d.ts +0 -250
  32. package/dist/drivers/RabbitMQDriver.d.ts +0 -140
  33. package/dist/drivers/RedisDriver.d.ts +0 -328
  34. package/dist/drivers/SQSDriver.d.ts +0 -114
  35. package/dist/drivers/kafka/BackpressureController.d.ts +0 -60
  36. package/dist/drivers/kafka/BatchProcessor.d.ts +0 -50
  37. package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +0 -80
  38. package/dist/drivers/kafka/ErrorCategorizer.d.ts +0 -39
  39. package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +0 -100
  40. package/dist/drivers/kafka/HeartbeatManager.d.ts +0 -57
  41. package/dist/drivers/kafka/KafkaDriver.d.ts +0 -138
  42. package/dist/drivers/kafka/KafkaMetrics.d.ts +0 -88
  43. package/dist/drivers/kafka/KafkaNotifier.d.ts +0 -70
  44. package/dist/drivers/kafka/MessageBuffer.d.ts +0 -71
  45. package/dist/drivers/kafka/OffsetTracker.d.ts +0 -65
  46. package/dist/drivers/kafka/PerformanceMonitor.d.ts +0 -88
  47. package/dist/drivers/kafka/RateLimiter.d.ts +0 -52
  48. package/dist/drivers/kafka/RebalanceHandler.d.ts +0 -104
  49. package/dist/drivers/kafka/RingBuffer.d.ts +0 -63
  50. package/dist/drivers/kafka/index.d.ts +0 -22
  51. package/dist/drivers/kafka/types.d.ts +0 -553
  52. package/dist/drivers/prepareJobForTransport.d.ts +0 -10
  53. package/dist/index.d.ts +0 -69
  54. package/dist/locks/DistributedLock.d.ts +0 -175
  55. package/dist/persistence/BufferedPersistence.d.ts +0 -130
  56. package/dist/persistence/BunBufferedPersistence.d.ts +0 -173
  57. package/dist/persistence/MySQLPersistence.d.ts +0 -134
  58. package/dist/persistence/SQLitePersistence.d.ts +0 -133
  59. package/dist/serializers/BinarySerializer.d.ts +0 -42
  60. package/dist/serializers/CachedSerializer.d.ts +0 -42
  61. package/dist/serializers/CborNativeSerializer.d.ts +0 -56
  62. package/dist/serializers/ClassNameSerializer.d.ts +0 -58
  63. package/dist/serializers/JobSerializer.d.ts +0 -33
  64. package/dist/serializers/JsonSerializer.d.ts +0 -28
  65. package/dist/serializers/JsonlSerializer.d.ts +0 -90
  66. package/dist/serializers/MessagePackSerializer.d.ts +0 -29
  67. package/dist/types.d.ts +0 -672
  68. package/dist/workers/BinaryWorkerProtocol.d.ts +0 -77
  69. package/dist/workers/BunWorker.d.ts +0 -179
  70. package/dist/workers/SandboxedWorker.d.ts +0 -132
  71. package/dist/workers/WorkerFactory.d.ts +0 -128
  72. package/dist/workers/WorkerPool.d.ts +0 -186
  73. package/dist/workers/bun-job-executor.d.ts +0 -14
  74. package/dist/workers/index.d.ts +0 -13
  75. package/dist/workers/job-executor.d.ts +0 -9
package/dist/types.d.ts DELETED
@@ -1,672 +0,0 @@
1
- import type { DatabaseService } from './drivers/DatabaseDriver';
2
- export interface RedisLikeClient {
3
- [key: string]: unknown;
4
- }
5
- export interface KafkaLikeClient {
6
- [key: string]: unknown;
7
- }
8
- export interface SqsLikeClient {
9
- [key: string]: unknown;
10
- }
11
- export interface RabbitMqLikeClient {
12
- [key: string]: unknown;
13
- }
14
- export interface BullMqQueueLike {
15
- [key: string]: unknown;
16
- }
17
- export interface BullMqWorkerLike {
18
- [key: string]: unknown;
19
- }
20
- /**
21
- * Represents a job that has been serialized for storage in a queue.
22
- *
23
- * This interface defines the data structure used to persist jobs in the underlying
24
- * storage mechanism (e.g., Redis, Database, SQS). It encapsulates all metadata
25
- * required for processing, retries, and lifecycle management.
26
- *
27
- * @public
28
- * @example
29
- * ```typescript
30
- * const job: SerializedJob = {
31
- * id: 'job-123',
32
- * type: 'json',
33
- * data: '{"userId": 1}',
34
- * createdAt: Date.now()
35
- * };
36
- * ```
37
- */
38
- export interface SerializedJob {
39
- /**
40
- * Unique identifier for the job.
41
- */
42
- id: string;
43
- /**
44
- * The serialization format used for the job data.
45
- *
46
- * - 'json': Simple JSON objects.
47
- * - 'class': Serialized class instances (requires class registration).
48
- * - 'msgpack': Binary MessagePack format.
49
- * - 'binary': CBOR binary format for optimal performance and size.
50
- * - 'jsonl': JSON Lines format, optimised for streaming and batch processing.
51
- */
52
- type: 'json' | 'class' | 'msgpack' | 'binary' | 'jsonl';
53
- /**
54
- * The actual serialized job payload.
55
- *
56
- * Contains the business data needed to execute the job.
57
- * For 'binary' and 'msgpack' types, can be Uint8Array or Base64 string for transport.
58
- */
59
- data: string | Uint8Array;
60
- /**
61
- * The fully qualified class name of the job.
62
- *
63
- * Only required when `type` is 'class' to reconstruct the original object instance.
64
- */
65
- className?: string;
66
- /**
67
- * The timestamp (in milliseconds) when the job was originally created.
68
- */
69
- createdAt: number;
70
- /**
71
- * Optional delay in seconds before the job becomes eligible for processing.
72
- *
73
- * Used for scheduling future tasks.
74
- */
75
- delaySeconds?: number;
76
- /**
77
- * The number of times this job has been attempted so far.
78
- */
79
- attempts?: number;
80
- /**
81
- * The maximum number of retry attempts allowed before marking the job as failed.
82
- */
83
- maxAttempts?: number;
84
- /**
85
- * Group ID for sequential processing.
86
- *
87
- * Jobs sharing the same `groupId` are guaranteed to be processed in order (FIFO),
88
- * provided the consumer supports this feature.
89
- */
90
- groupId?: string;
91
- /**
92
- * The initial delay in seconds before the first retry attempt after a failure.
93
- */
94
- retryAfterSeconds?: number;
95
- /**
96
- * The multiplier applied to the delay for exponential backoff strategies.
97
- */
98
- retryMultiplier?: number;
99
- /**
100
- * The error message from the last failed attempt, if any.
101
- */
102
- error?: string;
103
- /**
104
- * The timestamp (in milliseconds) when the job was permanently marked as failed.
105
- */
106
- failedAt?: number;
107
- /**
108
- * The priority level of the job.
109
- *
110
- * Higher values or specific strings (e.g., 'high') indicate higher priority.
111
- */
112
- priority?: string | number;
113
- }
114
- /**
115
- * Represents a database record for an archived job.
116
- *
117
- * This interface maps to the SQL table structure used for job persistence and auditing.
118
- * It stores historical data about jobs that have been completed or failed.
119
- *
120
- * @example
121
- * ```typescript
122
- * const row: JobRow = {
123
- * id: 1,
124
- * job_id: 'job-123',
125
- * queue: 'default',
126
- * status: 'completed',
127
- * payload: '...',
128
- * created_at: new Date(),
129
- * archived_at: new Date()
130
- * };
131
- * ```
132
- */
133
- export interface JobRow {
134
- /** Primary key of the record */
135
- id: number;
136
- /** The original job ID */
137
- job_id: string;
138
- /** The queue name the job belonged to */
139
- queue: string;
140
- /** Final status of the job (e.g., 'completed', 'failed') */
141
- status: string;
142
- /** Serialized job data */
143
- payload: string;
144
- /** Error message if the job failed */
145
- error?: string | null;
146
- /** When the job was created */
147
- created_at: Date;
148
- /** When the job was archived */
149
- archived_at: Date;
150
- }
151
- /**
152
- * Statistics snapshot for a specific queue.
153
- *
154
- * Provides insight into the current state of a queue, including pending,
155
- * delayed, reserved, and failed job counts.
156
- *
157
- * @public
158
- * @example
159
- * ```typescript
160
- * const stats: QueueStats = {
161
- * queue: 'default',
162
- * size: 42,
163
- * delayed: 5,
164
- * failed: 1
165
- * };
166
- * ```
167
- */
168
- export interface QueueStats {
169
- /** Name of the queue */
170
- queue: string;
171
- /** Number of pending jobs waiting to be processed */
172
- size: number;
173
- /** Number of jobs scheduled for future execution */
174
- delayed?: number;
175
- /** Number of jobs currently being processed by workers */
176
- reserved?: number;
177
- /** Number of jobs in the Dead Letter Queue (DLQ) */
178
- failed?: number;
179
- /** Driver-specific custom metrics */
180
- metrics?: Record<string, number>;
181
- }
182
- /**
183
- * Snapshot of statistics across all connections and queues.
184
- *
185
- * Used by monitoring dashboards to provide a high-level overview
186
- * of the entire background processing system.
187
- *
188
- * @public
189
- */
190
- export interface GlobalStats {
191
- /** Map of connection names to their respective queue statistics */
192
- connections: Record<string, QueueStats[]>;
193
- /** Total number of pending jobs across all connections */
194
- totalSize: number;
195
- /** Total number of failed jobs across all connections */
196
- totalFailed: number;
197
- /** Timestamp of the snapshot */
198
- timestamp: number;
199
- }
200
- /**
201
- * Advanced topic configuration options for distributed queue systems.
202
- *
203
- * Used primarily by drivers like Kafka to configure topic properties.
204
- *
205
- * @public
206
- * @example
207
- * ```typescript
208
- * const options: TopicOptions = {
209
- * partitions: 3,
210
- * replicationFactor: 2
211
- * };
212
- * ```
213
- */
214
- export interface TopicOptions {
215
- /** Number of partitions for the topic */
216
- partitions?: number;
217
- /** Replication factor for fault tolerance */
218
- replicationFactor?: number;
219
- /** Additional driver-specific configuration key-values */
220
- config?: Record<string, string>;
221
- }
222
- /**
223
- * Configuration for the Database driver.
224
- *
225
- * Configures the queue system to use a SQL database for job storage.
226
- *
227
- * @public
228
- * @example
229
- * ```typescript
230
- * const config: DatabaseDriverConfig = {
231
- * driver: 'database',
232
- * dbService: myDbService,
233
- * table: 'my_jobs'
234
- * };
235
- * ```
236
- */
237
- export interface DatabaseDriverConfig {
238
- /** Driver type identifier */
239
- driver: 'database';
240
- /** Database service implementation for executing queries */
241
- dbService: DatabaseService;
242
- /** Optional custom table name for storing jobs */
243
- table?: string;
244
- }
245
- /**
246
- * Configuration for the Redis driver.
247
- *
248
- * Configures the queue system to use Redis for high-performance job storage.
249
- *
250
- * @public
251
- * @example
252
- * ```typescript
253
- * const config: RedisDriverConfig = {
254
- * driver: 'redis',
255
- * client: redisClient,
256
- * prefix: 'my-app:queue:'
257
- * };
258
- * ```
259
- */
260
- export interface RedisDriverConfig {
261
- /** Driver type identifier */
262
- driver: 'redis';
263
- /** Redis client instance (ioredis or node-redis compatible) */
264
- client: RedisLikeClient;
265
- /** Optional key prefix for namespacing */
266
- prefix?: string;
267
- }
268
- /**
269
- * Configuration for the Kafka driver.
270
- *
271
- * Configures the queue system to use Apache Kafka.
272
- *
273
- * @example
274
- * ```typescript
275
- * const config: KafkaDriverConfig = {
276
- * driver: 'kafka',
277
- * client: kafkaClient,
278
- * consumerGroupId: 'my-group'
279
- * };
280
- * ```
281
- */
282
- export interface KafkaDriverConfig {
283
- /** Driver type identifier */
284
- driver: 'kafka';
285
- /** Kafka client instance */
286
- client: KafkaLikeClient;
287
- /** Consumer group ID for coordinating workers */
288
- consumerGroupId?: string;
289
- }
290
- /**
291
- * Configuration for the SQS driver.
292
- *
293
- * Configures the queue system to use Amazon Simple Queue Service.
294
- *
295
- * @public
296
- * @example
297
- * ```typescript
298
- * const config: SQSDriverConfig = {
299
- * driver: 'sqs',
300
- * client: sqsClient,
301
- * queueUrlPrefix: 'https://sqs.us-east-1.amazonaws.com/123/'
302
- * };
303
- * ```
304
- */
305
- export interface SQSDriverConfig {
306
- /** Driver type identifier */
307
- driver: 'sqs';
308
- /** Amazon SQS client instance */
309
- client: SqsLikeClient;
310
- /** Optional prefix for resolving queue names to URLs */
311
- queueUrlPrefix?: string;
312
- /** The duration (in seconds) that received messages are hidden from other consumers */
313
- visibilityTimeout?: number;
314
- /** The duration (in seconds) to wait for a message (Long Polling) */
315
- waitTimeSeconds?: number;
316
- }
317
- /**
318
- * Configuration for the RabbitMQ driver.
319
- *
320
- * Configures the queue system to use RabbitMQ (AMQP).
321
- *
322
- * @example
323
- * ```typescript
324
- * const config: RabbitMQDriverConfig = {
325
- * driver: 'rabbitmq',
326
- * client: amqpConnection,
327
- * exchange: 'jobs',
328
- * exchangeType: 'direct'
329
- * };
330
- * ```
331
- */
332
- export interface RabbitMQDriverConfig {
333
- /** Driver type identifier */
334
- driver: 'rabbitmq';
335
- /** AMQP client instance */
336
- client: RabbitMqLikeClient;
337
- /** Exchange name to publish to */
338
- exchange?: string;
339
- /** Type of exchange (direct, topic, fanout, headers) */
340
- exchangeType?: string;
341
- }
342
- /**
343
- * Configuration for the gRPC driver.
344
- *
345
- * Configures the queue system to use a remote gRPC service.
346
- *
347
- * @public
348
- * @example
349
- * ```typescript
350
- * const config: GrpcDriverConfig = {
351
- * driver: 'grpc',
352
- * url: 'localhost:50051',
353
- * protoUser: 'myuser',
354
- * protoPassword: 'mypassword',
355
- * serviceName: 'QueueService',
356
- * packageName: 'stream'
357
- * };
358
- * ```
359
- */
360
- export interface GrpcDriverConfig {
361
- /** Driver type identifier */
362
- driver: 'grpc';
363
- /** The gRPC server URL (host:port) */
364
- url: string;
365
- /** Path to the .proto file (optional, defaults to built-in) */
366
- protoPath?: string;
367
- /** The package name defined in the .proto file (default: 'stream') */
368
- packageName?: string;
369
- /** The service name defined in the .proto file (default: 'QueueService') */
370
- serviceName?: string;
371
- /** Optional credentials/metadata for connection */
372
- credentials?: {
373
- rootCerts?: Buffer;
374
- privateKey?: Buffer;
375
- certChain?: Buffer;
376
- };
377
- }
378
- /**
379
- * Configuration for the Bull Queue driver.
380
- *
381
- * Configures the queue system to use Bull Queue (backed by Redis).
382
- *
383
- * @public
384
- * @example
385
- * ```typescript
386
- * import { Queue } from 'bullmq'
387
- * import Redis from 'ioredis'
388
- *
389
- * const redis = new Redis()
390
- * const queue = new Queue('gravito-events', { connection: redis })
391
- * const config: BullMQDriverConfig = {
392
- * driver: 'bullmq',
393
- * queue: queue
394
- * };
395
- * ```
396
- */
397
- export interface BullMQDriverConfig {
398
- /** Driver type identifier */
399
- driver: 'bullmq';
400
- /** Bull Queue instance */
401
- queue: BullMqQueueLike;
402
- /** Optional Bull Worker instance */
403
- worker?: BullMqWorkerLike;
404
- /** Key prefix for queue namespacing */
405
- prefix?: string;
406
- /** Enable debug logging */
407
- debug?: boolean;
408
- }
409
- /**
410
- * Union type for all supported queue connection configurations.
411
- *
412
- * @public
413
- */
414
- export type QueueConnectionConfig = {
415
- driver: 'memory';
416
- } | DatabaseDriverConfig | RedisDriverConfig | KafkaDriverConfig | SQSDriverConfig | RabbitMQDriverConfig | GrpcDriverConfig | BullMQDriverConfig | {
417
- driver: 'nats';
418
- [key: string]: unknown;
419
- } | {
420
- driver: string;
421
- [key: string]: unknown;
422
- };
423
- /**
424
- * Global configuration for the QueueManager.
425
- *
426
- * Defines available connections, serialization settings, and system-wide behaviors.
427
- *
428
- * @example
429
- * ```typescript
430
- * const config: QueueConfig = {
431
- * default: 'redis',
432
- * connections: {
433
- * redis: { driver: 'redis', client: redis }
434
- * },
435
- * debug: true
436
- * };
437
- * ```
438
- */
439
- export interface QueueConfig {
440
- /**
441
- * The name of the default connection to use when none is specified.
442
- */
443
- default?: string;
444
- /**
445
- * Map of connection names to their configurations.
446
- */
447
- connections?: Record<string, QueueConnectionConfig>;
448
- /**
449
- * The default serialization format to use for jobs.
450
- */
451
- defaultSerializer?: 'json' | 'class' | 'msgpack' | 'jsonl';
452
- /**
453
- * Whether to cache serialized job data.
454
- *
455
- * If true, re-queuing the same Job instance will reuse the cached serialized string,
456
- * improving performance for frequently pushed jobs.
457
- *
458
- * @default false
459
- */
460
- useSerializationCache?: boolean;
461
- /**
462
- * Enable verbose debug logging.
463
- *
464
- * Useful for troubleshooting queue operations and consumer behavior.
465
- *
466
- * @default false
467
- */
468
- debug?: boolean;
469
- /**
470
- * Configuration for the persistence layer (SQL Archive).
471
- */
472
- persistence?: {
473
- /**
474
- * The persistence adapter instance used to store archived jobs.
475
- */
476
- adapter: PersistenceAdapter;
477
- /**
478
- * Whether to automatically archive jobs upon successful completion.
479
- */
480
- archiveCompleted?: boolean;
481
- /**
482
- * Whether to automatically archive jobs upon permanent failure.
483
- */
484
- archiveFailed?: boolean;
485
- /**
486
- * Whether to archive jobs immediately when they are enqueued (Audit Mode).
487
- *
488
- * @default false
489
- */
490
- archiveEnqueued?: boolean;
491
- /**
492
- * Buffer size for batched writes to the archive.
493
- *
494
- * If set, wraps the adapter in a `BufferedPersistence` decorator to improve throughput.
495
- */
496
- bufferSize?: number;
497
- /**
498
- * Maximum time (in milliseconds) to wait before flushing the write buffer.
499
- */
500
- flushInterval?: number;
501
- };
502
- }
503
- /**
504
- * Interface for persistence adapters.
505
- *
506
- * Defines the contract for storing long-term history of jobs in a permanent storage
507
- * (typically a SQL database).
508
- *
509
- * @example
510
- * ```typescript
511
- * class MyPersistence implements PersistenceAdapter {
512
- * async archive(queue, job, status) { ... }
513
- * // ...
514
- * }
515
- * ```
516
- */
517
- export interface PersistenceAdapter {
518
- /**
519
- * Archive a single job.
520
- *
521
- * @param queue - The name of the queue.
522
- * @param job - The serialized job data.
523
- * @param status - The final status of the job ('completed', 'failed', etc.).
524
- * @returns A promise that resolves when the job is archived.
525
- */
526
- archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
527
- /**
528
- * Find a specific job in the archive.
529
- *
530
- * @param queue - The name of the queue.
531
- * @param id - The job ID.
532
- * @returns The serialized job if found, or null.
533
- */
534
- find(queue: string, id: string): Promise<SerializedJob | null>;
535
- /**
536
- * List jobs from the archive based on criteria.
537
- *
538
- * @param queue - The name of the queue.
539
- * @param options - Filtering and pagination options.
540
- * @returns A list of matching serialized jobs.
541
- */
542
- list(queue: string, options?: {
543
- limit?: number;
544
- offset?: number;
545
- status?: 'completed' | 'failed' | 'waiting' | string | string[];
546
- jobId?: string;
547
- startTime?: Date;
548
- endTime?: Date;
549
- }): Promise<SerializedJob[]>;
550
- /**
551
- * Archive multiple jobs in a single batch operation.
552
- *
553
- * @param jobs - Array of job data to archive.
554
- * @returns A promise that resolves when all jobs are archived.
555
- */
556
- archiveMany?(jobs: Array<{
557
- queue: string;
558
- job: SerializedJob;
559
- status: 'completed' | 'failed' | 'waiting' | string;
560
- }>): Promise<void>;
561
- /**
562
- * Remove old data from the archive.
563
- *
564
- * @param days - Retention period in days; older records will be deleted.
565
- * @returns The number of records deleted.
566
- */
567
- cleanup(days: number): Promise<number>;
568
- /**
569
- * Flush any buffered data to storage.
570
- *
571
- * @returns A promise that resolves when the flush is complete.
572
- */
573
- flush?(): Promise<void>;
574
- /**
575
- * Count jobs in the archive matching specific criteria.
576
- *
577
- * @param queue - The name of the queue.
578
- * @param options - Filtering options.
579
- * @returns The total count of matching jobs.
580
- */
581
- count(queue: string, options?: {
582
- status?: 'completed' | 'failed' | 'waiting' | string | string[];
583
- jobId?: string;
584
- startTime?: Date;
585
- endTime?: Date;
586
- }): Promise<number>;
587
- /**
588
- * Archive a system log message.
589
- *
590
- * @param log - The log entry to archive.
591
- * @returns A promise that resolves when the log is archived.
592
- */
593
- archiveLog(log: {
594
- level: string;
595
- message: string;
596
- workerId: string;
597
- queue?: string;
598
- timestamp: Date;
599
- }): Promise<void>;
600
- /**
601
- * Archive multiple log messages in a batch.
602
- *
603
- * @param logs - Array of log entries to archive.
604
- * @returns A promise that resolves when logs are archived.
605
- */
606
- archiveLogMany?(logs: Array<{
607
- level: string;
608
- message: string;
609
- workerId: string;
610
- queue?: string;
611
- timestamp: Date;
612
- }>): Promise<void>;
613
- /**
614
- * List system logs from the archive.
615
- *
616
- * @param options - Filtering and pagination options.
617
- * @returns A list of matching log entries.
618
- */
619
- listLogs(options?: {
620
- limit?: number;
621
- offset?: number;
622
- level?: string;
623
- workerId?: string;
624
- queue?: string;
625
- search?: string;
626
- startTime?: Date;
627
- endTime?: Date;
628
- }): Promise<Record<string, unknown>[]>;
629
- /**
630
- * Count system logs in the archive.
631
- *
632
- * @param options - Filtering options.
633
- * @returns The total count of matching logs.
634
- */
635
- countLogs(options?: {
636
- level?: string;
637
- workerId?: string;
638
- queue?: string;
639
- search?: string;
640
- startTime?: Date;
641
- endTime?: Date;
642
- }): Promise<number>;
643
- }
644
- /**
645
- * Options used when pushing a job to the queue.
646
- *
647
- * Allows customizing delivery behavior, such as delays, priority, and ordering.
648
- *
649
- * @example
650
- * ```typescript
651
- * const options: JobPushOptions = {
652
- * priority: 'high',
653
- * groupId: 'user-123'
654
- * };
655
- * ```
656
- */
657
- export interface JobPushOptions {
658
- /**
659
- * Group ID for FIFO ordering.
660
- *
661
- * If provided, jobs with the same `groupId` are guaranteed to be processed strictly
662
- * sequentially. This is useful for event streams where order matters (e.g., per-user events).
663
- */
664
- groupId?: string;
665
- /**
666
- * Job priority level.
667
- *
668
- * Higher priority jobs are processed before lower priority ones, depending on driver support.
669
- * Common values: 'critical', 'high', 'default', 'low'.
670
- */
671
- priority?: string | number;
672
- }