groupmq-plus 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/LICENSE +59 -0
  2. package/README.md +722 -0
  3. package/dist/index.cjs +2567 -0
  4. package/dist/index.cjs.map +1 -0
  5. package/dist/index.d.cts +1300 -0
  6. package/dist/index.d.ts +1300 -0
  7. package/dist/index.js +2557 -0
  8. package/dist/index.js.map +1 -0
  9. package/dist/lua/change-delay.lua +62 -0
  10. package/dist/lua/check-stalled.lua +86 -0
  11. package/dist/lua/clean-status.lua +64 -0
  12. package/dist/lua/cleanup-poisoned-group.lua +46 -0
  13. package/dist/lua/cleanup.lua +46 -0
  14. package/dist/lua/complete-and-reserve-next-with-metadata.lua +221 -0
  15. package/dist/lua/complete-with-metadata.lua +190 -0
  16. package/dist/lua/complete.lua +51 -0
  17. package/dist/lua/dead-letter.lua +86 -0
  18. package/dist/lua/enqueue-batch.lua +149 -0
  19. package/dist/lua/enqueue-flow.lua +107 -0
  20. package/dist/lua/enqueue.lua +154 -0
  21. package/dist/lua/get-active-count.lua +6 -0
  22. package/dist/lua/get-active-jobs.lua +6 -0
  23. package/dist/lua/get-delayed-count.lua +5 -0
  24. package/dist/lua/get-delayed-jobs.lua +5 -0
  25. package/dist/lua/get-unique-groups-count.lua +13 -0
  26. package/dist/lua/get-unique-groups.lua +15 -0
  27. package/dist/lua/get-waiting-count.lua +11 -0
  28. package/dist/lua/get-waiting-jobs.lua +15 -0
  29. package/dist/lua/heartbeat.lua +22 -0
  30. package/dist/lua/is-empty.lua +35 -0
  31. package/dist/lua/promote-delayed-jobs.lua +40 -0
  32. package/dist/lua/promote-delayed-one.lua +44 -0
  33. package/dist/lua/promote-staged.lua +70 -0
  34. package/dist/lua/record-job-result.lua +143 -0
  35. package/dist/lua/remove.lua +55 -0
  36. package/dist/lua/reserve-atomic.lua +114 -0
  37. package/dist/lua/reserve-batch.lua +141 -0
  38. package/dist/lua/reserve.lua +161 -0
  39. package/dist/lua/retry.lua +53 -0
  40. package/package.json +92 -0
@@ -0,0 +1,1300 @@
1
+ import { BaseAdapter } from "@bull-board/api/dist/queueAdapters/base.js";
2
+ import { JobCounts, JobStatus, QueueJob, Status } from "@bull-board/api/typings/app";
3
+ import * as ioredis0 from "ioredis";
4
+ import Redis from "ioredis";
5
+
6
+ //#region src/status.d.ts
7
+ declare const STATUS: {
8
+ readonly latest: 'latest';
9
+ readonly active: 'active';
10
+ readonly waiting: 'waiting';
11
+ readonly waitingChildren: 'waiting-children';
12
+ readonly prioritized: 'prioritized';
13
+ readonly completed: 'completed';
14
+ readonly failed: 'failed';
15
+ readonly delayed: 'delayed';
16
+ readonly paused: 'paused';
17
+ };
18
+ type Status$1 = (typeof STATUS)[keyof typeof STATUS];
19
+ //#endregion
20
+ //#region src/job.d.ts
21
+ declare class Job<T = any> {
22
+ readonly queue: Queue<T>;
23
+ readonly id: string;
24
+ readonly name: string;
25
+ readonly data: T;
26
+ readonly groupId: string;
27
+ readonly attemptsMade: number;
28
+ readonly opts: {
29
+ attempts: number;
30
+ delay?: number;
31
+ };
32
+ readonly processedOn?: number;
33
+ readonly finishedOn?: number;
34
+ readonly failedReason: string;
35
+ readonly stacktrace?: string;
36
+ readonly returnvalue?: any;
37
+ readonly timestamp: number;
38
+ readonly orderMs?: number;
39
+ readonly status: Status$1 | 'unknown';
40
+ constructor(args: {
41
+ queue: Queue<T>;
42
+ id: string;
43
+ name?: string;
44
+ data: T;
45
+ groupId: string;
46
+ attemptsMade: number;
47
+ opts: {
48
+ attempts: number;
49
+ delay?: number;
50
+ };
51
+ processedOn?: number;
52
+ finishedOn?: number;
53
+ failedReason?: string;
54
+ stacktrace?: string;
55
+ returnvalue?: any;
56
+ timestamp: number;
57
+ orderMs?: number;
58
+ status?: Status$1 | 'unknown';
59
+ });
60
+ getState(): Promise<Status$1 | 'stuck' | 'waiting-children' | 'prioritized' | 'unknown'>;
61
+ toJSON(): {
62
+ id: string;
63
+ name: string;
64
+ data: T;
65
+ groupId: string;
66
+ attemptsMade: number;
67
+ opts: {
68
+ attempts: number;
69
+ delay?: number;
70
+ };
71
+ processedOn: number | undefined;
72
+ finishedOn: number | undefined;
73
+ failedReason: string;
74
+ stacktrace: string[] | null;
75
+ returnvalue: any;
76
+ timestamp: number;
77
+ orderMs: number | undefined;
78
+ status: Status$1 | "unknown";
79
+ progress: number;
80
+ };
81
+ changeDelay(newDelay: number): Promise<boolean>;
82
+ promote(): Promise<void>;
83
+ remove(): Promise<void>;
84
+ retry(_state?: Extract<Status$1, 'completed' | 'failed'>): Promise<void>;
85
+ updateData(jobData: T): Promise<void>;
86
+ update(jobData: T): Promise<void>;
87
+ static fromReserved<T = any>(queue: Queue<T>, reserved: ReservedJob<T>, meta?: {
88
+ processedOn?: number;
89
+ finishedOn?: number;
90
+ failedReason?: string;
91
+ stacktrace?: string;
92
+ returnvalue?: any;
93
+ status?: Status$1 | string;
94
+ delayMs?: number;
95
+ }): Job<T>;
96
+ /**
97
+ * Create a Job from raw Redis hash data with optional known status
98
+ * This avoids extra Redis lookups when status is already known
99
+ */
100
+ static fromRawHash<T = any>(queue: Queue<T>, id: string, raw: Record<string, string>, knownStatus?: Status$1 | 'unknown'): Job<T>;
101
+ static fromStore<T = any>(queue: Queue<T>, id: string): Promise<Job<T>>;
102
+ }
103
+ //#endregion
104
+ //#region src/logger.d.ts
105
+ interface LoggerInterface {
106
+ warn(...args: any[]): void;
107
+ info(...args: any[]): void;
108
+ error(...args: any[]): void;
109
+ debug(...args: any[]): void;
110
+ }
111
+ //#endregion
112
+ //#region src/queue.d.ts
113
+ /**
114
+ * Options for configuring a GroupMQ queue
115
+ */
116
+ type QueueOptions = {
117
+ /**
118
+ * Logger configuration for queue operations and debugging.
119
+ *
120
+ * @default false (no logging)
121
+ * @example true // Enable basic logging
122
+ * @example customLogger // Use custom logger instance
123
+ *
124
+ * **When to enable:**
125
+ * - Development: For debugging queue operations
126
+ * - Production monitoring: For operational insights
127
+ * - Troubleshooting: When investigating performance issues
128
+ */
129
+ logger?: LoggerInterface | boolean;
130
+ /**
131
+ * Redis client instance for queue operations.
132
+ * Should be a connected ioredis client.
133
+ *
134
+ * @example new Redis('redis://localhost:6379')
135
+ * @example new Redis({ host: 'localhost', port: 6379, db: 0 })
136
+ */
137
+ redis: Redis;
138
+ /**
139
+ * Unique namespace for this queue. Used to separate different queues in the same Redis instance.
140
+ * Should be unique across your application to avoid conflicts.
141
+ *
142
+ * @example 'email-queue'
143
+ * @example 'user-notifications'
144
+ * @example 'data-processing'
145
+ */
146
+ namespace: string;
147
+ /**
148
+ * Maximum time in milliseconds a job can run before being considered failed.
149
+ * Jobs that exceed this timeout will be retried or moved to failed state.
150
+ *
151
+ * @default 30000 (30 seconds)
152
+ * @example 60000 // 1 minute timeout
153
+ * @example 300000 // 5 minute timeout for long-running jobs
154
+ *
155
+ * **When to adjust:**
156
+ * - Long-running jobs: Increase (5-30 minutes)
157
+ * - Short jobs: Decrease (5-15 seconds) for faster failure detection
158
+ * - External API calls: Consider API timeout + buffer
159
+ * - Database operations: Consider query timeout + buffer
160
+ */
161
+ jobTimeoutMs?: number;
162
+ /**
163
+ * Default maximum number of retry attempts for failed jobs.
164
+ * Can be overridden per job or per worker.
165
+ *
166
+ * @default 3
167
+ * @example 5 // Retry failed jobs up to 5 times
168
+ * @example 1 // Fail fast with minimal retries
169
+ *
170
+ * **When to override:**
171
+ * - Critical jobs: Increase retries
172
+ * - Non-critical jobs: Decrease retries
173
+ * - Idempotent operations: Can safely retry more
174
+ * - External API calls: Consider API reliability
175
+ */
176
+ maxAttempts?: number;
177
+ /**
178
+ * Maximum number of groups to scan when looking for available jobs.
179
+ * Higher values may find more jobs but use more Redis resources.
180
+ *
181
+ * @default 20
182
+ * @example 50 // Scan more groups for better job distribution
183
+ * @example 10 // Reduce Redis load for simple queues
184
+ *
185
+ * **When to adjust:**
186
+ * - Many groups: Increase (50-100) for better job distribution
187
+ * - Few groups: Decrease (5-10) to reduce Redis overhead
188
+ * - High job volume: Increase for better throughput
189
+ * - Resource constraints: Decrease to reduce Redis load
190
+ */
191
+ reserveScanLimit?: number;
192
+ /**
193
+ * Maximum number of completed jobs to retain for inspection.
194
+ * Jobs beyond this limit are automatically cleaned up.
195
+ *
196
+ * @default 0 (no retention)
197
+ * @example 100 // Keep last 100 completed jobs
198
+ * @example 1000 // Keep last 1000 completed jobs for analysis
199
+ *
200
+ * **When to adjust:**
201
+ * - Debugging: Increase to investigate issues
202
+ * - Memory constraints: Decrease to reduce Redis memory usage
203
+ * - Compliance: Increase for audit requirements
204
+ */
205
+ keepCompleted?: number;
206
+ /**
207
+ * Maximum number of failed jobs to retain for inspection.
208
+ * Jobs beyond this limit are automatically cleaned up.
209
+ *
210
+ * @default 0 (no retention)
211
+ * @example 1000 // Keep last 1000 failed jobs for analysis
212
+ * @example 10000 // Keep more failed jobs for trend analysis
213
+ *
214
+ * **When to adjust:**
215
+ * - Error analysis: Increase to investigate failure patterns
216
+ * - Memory constraints: Decrease to reduce Redis memory usage
217
+ * - Compliance: Increase for audit requirements
218
+ */
219
+ keepFailed?: number;
220
+ /**
221
+ * TTL for scheduler lock in milliseconds.
222
+ * Prevents multiple schedulers from running simultaneously.
223
+ *
224
+ * @default 1500
225
+ * @example 3000 // 3 seconds for slower environments
226
+ * @example 1000 // 1 second for faster environments
227
+ */
228
+ schedulerLockTtlMs?: number;
229
+ /**
230
+ * Ordering delay in milliseconds. When set, jobs with orderMs will be staged
231
+ * and promoted only after orderMs + orderingDelayMs to ensure proper ordering
232
+ * even when producers are out of sync.
233
+ *
234
+ * @default 0 (no staging, jobs processed immediately)
235
+ * @example 200 // Wait 200ms to ensure all jobs arrive in order
236
+ * @example 1000 // Wait 1 second for strict ordering
237
+ *
238
+ * **When to use:**
239
+ * - Distributed producers with clock drift
240
+ * - Strict timestamp ordering required
241
+ * - Network latency between producers
242
+ *
243
+ * **Note:** Only applies to jobs with orderMs set. Jobs without orderMs
244
+ * are never staged.
245
+ */
246
+ orderingDelayMs?: number;
247
+ /**
248
+ * Enable automatic job batching to reduce Redis load.
249
+ * Jobs are buffered in memory and sent in batches.
250
+ *
251
+ * @default undefined (disabled)
252
+ * @example true // Enable with defaults (size: 10, maxWaitMs: 10)
253
+ * @example { size: 20, maxWaitMs: 5 } // Custom configuration
254
+ *
255
+ * **Trade-offs:**
256
+ * - ✅ 10x fewer Redis calls (huge performance win)
257
+ * - ✅ Higher throughput (5-10x improvement)
258
+ * - ✅ Lower latency per add() call
259
+ * - ⚠️ Jobs buffered in memory briefly before Redis
260
+ * - ⚠️ If process crashes during batch window, those jobs are lost
261
+ *
262
+ * **When to use:**
263
+ * - High job volume (>100 jobs/s)
264
+ * - Using orderingDelayMs (already buffering)
265
+ * - Network latency is a bottleneck
266
+ * - Acceptable risk of losing jobs during crash (e.g., non-critical jobs)
267
+ *
268
+ * **When NOT to use:**
269
+ * - Critical jobs that must be persisted immediately
270
+ * - Very low volume (<10 jobs/s)
271
+ * - Zero tolerance for data loss
272
+ *
273
+ * **Configuration:**
274
+ * - size: Maximum jobs per batch (default: 10)
275
+ * - maxWaitMs: Maximum time to wait before flushing (default: 10)
276
+ *
277
+ * **Safety:**
278
+ * - Keep maxWaitMs small (10ms = very low risk)
279
+ * - Batches are flushed on queue.close()
280
+ * - Consider graceful shutdown handling
281
+ */
282
+ autoBatch?: boolean | {
283
+ size?: number;
284
+ maxWaitMs?: number;
285
+ };
286
+ };
287
+ /**
288
+ * Configuration for repeating jobs
289
+ */
290
+ type RepeatOptions = {
291
+ /**
292
+ * Repeat interval in milliseconds. Job will be created every N milliseconds.
293
+ *
294
+ * @example 60000 // Every minute
295
+ * @example 3600000 // Every hour
296
+ * @example 86400000 // Every day
297
+ *
298
+ * When to use:
299
+ * - Simple intervals: Use for regular, predictable schedules
300
+ * - High frequency: Good for sub-hour intervals
301
+ * - Performance: More efficient than cron for simple intervals
302
+ */
303
+ every: number;
304
+ } | {
305
+ /**
306
+ * Cron pattern for complex scheduling. Uses standard cron syntax with seconds.
307
+ * Format: second minute hour day month dayOfWeek
308
+ *
309
+ * When to use:
310
+ * - Complex schedules: Business hours, specific days, etc.
311
+ * - Low frequency: Good for daily, weekly, monthly schedules
312
+ * - Business logic: Align with business requirements
313
+ *
314
+ * Cron format uses standard syntax with seconds precision.
315
+ */
316
+ pattern: string;
317
+ };
318
+ /**
319
+ * Options for a single job in a flow
320
+ */
321
+ type FlowJob<T = any> = {
322
+ /**
323
+ * Unique ID for the job. If not provided, a UUID will be generated.
324
+ */
325
+ jobId?: string;
326
+ /**
327
+ * Group ID for the job.
328
+ */
329
+ groupId: string;
330
+ /**
331
+ * Data for the job.
332
+ */
333
+ data: T;
334
+ /**
335
+ * Maximum number of retry attempts.
336
+ */
337
+ maxAttempts?: number;
338
+ /**
339
+ * Delay in milliseconds before the job becomes available.
340
+ */
341
+ delay?: number;
342
+ /**
343
+ * Priority/Order timestamp.
344
+ */
345
+ orderMs?: number;
346
+ };
347
+ /**
348
+ * Options for creating a parent-child flow
349
+ */
350
+ type FlowOptions<PT = any, CT = any> = {
351
+ /**
352
+ * The parent job that will be triggered after all children complete.
353
+ */
354
+ parent: FlowJob<PT>;
355
+ /**
356
+ * List of child jobs that must complete before the parent starts.
357
+ */
358
+ children: FlowJob<CT>[];
359
+ };
360
+ /**
361
+ * Options for adding a job to the queue
362
+ *
363
+ * @template T The type of data to store in the job
364
+ */
365
+ type AddOptions<T> = {
366
+ /**
367
+ * Group ID for this job. Jobs with the same groupId are processed sequentially (FIFO).
368
+ * Only one job per group can be processed at a time.
369
+ *
370
+ * @example 'user-123' // All jobs for user 123
371
+ * @example 'email-notifications' // All email jobs
372
+ * @example 'order-processing' // All order-related jobs
373
+ *
374
+ * **Best practices:**
375
+ * - Use meaningful group IDs (user ID, resource ID, etc.)
376
+ * - Keep group IDs consistent for related jobs
377
+ * - Avoid too many unique groups (can impact performance)
378
+ */
379
+ groupId: string;
380
+ /**
381
+ * The data payload for this job. Can be any serializable data.
382
+ *
383
+ * @example { userId: 123, email: 'user@example.com' }
384
+ * @example { orderId: 'order-456', items: [...] }
385
+ * @example 'simple string data'
386
+ */
387
+ data: T;
388
+ /**
389
+ * Custom ordering timestamp in milliseconds. Jobs are processed in orderMs order within each group.
390
+ * If not provided, uses current timestamp (Date.now()).
391
+ *
392
+ * @default Date.now()
393
+ * @example Date.now() + 5000 // Process 5 seconds from now
394
+ * @example 1640995200000 // Specific timestamp
395
+ *
396
+ * **When to use:**
397
+ * - Delayed processing: Set future timestamp
398
+ * - Priority ordering: Use lower timestamps for higher priority
399
+ * - Batch processing: Group related jobs with same timestamp
400
+ */
401
+ orderMs?: number;
402
+ /**
403
+ * Maximum number of retry attempts for this specific job.
404
+ * Overrides the queue's default maxAttempts setting.
405
+ *
406
+ * @default queue.maxAttemptsDefault
407
+ * @example 5 // Retry this job up to 5 times
408
+ * @example 1 // Fail fast with no retries
409
+ *
410
+ * **When to override:**
411
+ * - Critical jobs: Increase retries
412
+ * - Non-critical jobs: Decrease retries
413
+ * - Idempotent operations: Can safely retry more
414
+ * - External API calls: Consider API reliability
415
+ */
416
+ maxAttempts?: number;
417
+ /**
418
+ * Delay in milliseconds before this job becomes available for processing.
419
+ * Alternative to using orderMs for simple delays.
420
+ *
421
+ * @example 5000 // Process after 5 seconds
422
+ * @example 300000 // Process after 5 minutes
423
+ *
424
+ * **When to use:**
425
+ * - Simple delays: Use delay instead of orderMs
426
+ * - Rate limiting: Delay jobs to spread load
427
+ * - Retry backoff: Delay retry attempts
428
+ */
429
+ delay?: number;
430
+ /**
431
+ * Specific time when this job should be processed.
432
+ * Can be a Date object or timestamp in milliseconds.
433
+ *
434
+ * @example new Date('2024-01-01T12:00:00Z')
435
+ * @example Date.now() + 3600000 // 1 hour from now
436
+ *
437
+ * **When to use:**
438
+ * - Scheduled processing: Process at specific time
439
+ * - Business hours: Schedule during working hours
440
+ * - Maintenance windows: Schedule during low-traffic periods
441
+ */
442
+ runAt?: Date | number;
443
+ /**
444
+ * Configuration for repeating jobs (cron or interval-based).
445
+ * Creates a repeating job that generates new instances automatically.
446
+ *
447
+ * @example { every: 60000 } // Every minute
448
+ *
449
+ * When to use:
450
+ * - Periodic tasks: Regular cleanup, reports, etc.
451
+ * - Monitoring: Health checks, metrics collection
452
+ * - Maintenance: Regular database cleanup, cache warming
453
+ */
454
+ repeat?: RepeatOptions;
455
+ /**
456
+ * Custom job ID for idempotence. If a job with this ID already exists,
457
+ * the new job will be ignored (idempotent behavior).
458
+ *
459
+ * @example 'user-123-email-welcome'
460
+ * @example 'order-456-payment-process'
461
+ *
462
+ * **When to use:**
463
+ * - Idempotent operations: Prevent duplicate processing
464
+ * - External system integration: Use external IDs
465
+ * - Retry scenarios: Ensure same job isn't added multiple times
466
+ * - Deduplication: Prevent duplicate jobs from being created
467
+ */
468
+ jobId?: string;
469
+ };
470
+ type ReservedJob<T = any> = {
471
+ id: string;
472
+ groupId: string;
473
+ data: T;
474
+ attempts: number;
475
+ maxAttempts: number;
476
+ seq: number;
477
+ timestamp: number;
478
+ orderMs: number;
479
+ score: number;
480
+ deadlineAt: number;
481
+ };
482
+ declare class Queue<T = any> {
483
+ private logger;
484
+ private r;
485
+ private rawNs;
486
+ private ns;
487
+ private vt;
488
+ private defaultMaxAttempts;
489
+ private scanLimit;
490
+ private keepCompleted;
491
+ private keepFailed;
492
+ private schedulerLockTtlMs;
493
+ orderingDelayMs: number;
494
+ name: string;
495
+ private _consecutiveEmptyReserves;
496
+ private promoterRedis?;
497
+ private promoterRunning;
498
+ private promoterLockId?;
499
+ private promoterInterval?;
500
+ private batchConfig?;
501
+ private batchBuffer;
502
+ private batchTimer?;
503
+ private flushing;
504
+ constructor(opts: QueueOptions);
505
+ get redis(): Redis;
506
+ get namespace(): string;
507
+ get rawNamespace(): string;
508
+ get jobTimeoutMs(): number;
509
+ get maxAttemptsDefault(): number;
510
+ add(opts: AddOptions<T>): Promise<Job<T>>;
511
+ /**
512
+ * Adds a parent-child flow to the queue.
513
+ * The parent job will only be processed after all child jobs have completed successfully.
514
+ * This operation is atomic.
515
+ *
516
+ * @param flow The flow configuration containing parent and children jobs
517
+ * @returns The parent job entity
518
+ */
519
+ addFlow<PT = any, CT = any>(flow: FlowOptions<PT, CT>): Promise<Job<PT>>;
520
+ /**
521
+ * Gets the number of remaining child jobs for a parent job in a flow.
522
+ * @param parentId The ID of the parent job
523
+ * @returns The number of remaining children, or null if the job is not a parent
524
+ */
525
+ getFlowDependencies(parentId: string): Promise<number | null>;
526
+ /**
527
+ * Gets the results of all child jobs in a flow.
528
+ * @param parentId The ID of the parent job
529
+ * @returns An object mapping child job IDs to their results
530
+ */
531
+ getFlowResults(parentId: string): Promise<Record<string, any>>;
532
+ private addSingle;
533
+ private flushBatch;
534
+ reserve(): Promise<ReservedJob<T> | null>;
535
+ /**
536
+ * Check how many jobs are waiting in a specific group
537
+ */
538
+ getGroupJobCount(groupId: string): Promise<number>;
539
+ /**
540
+ * Complete a job by removing from processing and unlocking the group.
541
+ * Note: Job metadata recording is handled separately by recordCompleted().
542
+ *
543
+ * @deprecated Use completeWithMetadata() for internal operations. This method
544
+ * is kept for backward compatibility and testing only.
545
+ */
546
+ complete(job: {
547
+ id: string;
548
+ groupId: string;
549
+ }): Promise<void>;
550
+ /**
551
+ * Complete a job AND record metadata in a single atomic operation.
552
+ * This is the efficient internal method used by workers.
553
+ */
554
+ completeWithMetadata(job: {
555
+ id: string;
556
+ groupId: string;
557
+ }, result: unknown, meta: {
558
+ processedOn: number;
559
+ finishedOn: number;
560
+ attempts: number;
561
+ maxAttempts: number;
562
+ }): Promise<void>;
563
+ /**
564
+ * Atomically complete a job and try to reserve the next job from the same group
565
+ * This prevents race conditions where other workers can steal subsequent jobs from the same group
566
+ */
567
+ /**
568
+ * Atomically complete a job with metadata and reserve the next job from the same group.
569
+ */
570
+ completeAndReserveNextWithMetadata(completedJobId: string, groupId: string, handlerResult: unknown, meta: {
571
+ processedOn: number;
572
+ finishedOn: number;
573
+ attempts: number;
574
+ maxAttempts: number;
575
+ }): Promise<ReservedJob<T> | null>;
576
+ /**
577
+ * Check if a job is currently in processing state
578
+ */
579
+ isJobProcessing(jobId: string): Promise<boolean>;
580
+ retry(jobId: string, backoffMs?: number): Promise<number>;
581
+ /**
582
+ * Dead letter a job (remove from group and optionally store in dead letter queue)
583
+ */
584
+ deadLetter(jobId: string, groupId: string): Promise<number>;
585
+ /**
586
+ * Record a successful completion for retention and inspection
587
+ * Uses consolidated Lua script for atomic operation with retention management
588
+ */
589
+ recordCompleted(job: {
590
+ id: string;
591
+ groupId: string;
592
+ }, result: unknown, meta: {
593
+ processedOn?: number;
594
+ finishedOn?: number;
595
+ attempts?: number;
596
+ maxAttempts?: number;
597
+ data?: unknown;
598
+ }): Promise<void>;
599
+ /**
600
+ * Record a failure attempt (non-final), storing last error for visibility
601
+ */
602
+ recordAttemptFailure(job: {
603
+ id: string;
604
+ groupId: string;
605
+ }, error: {
606
+ message?: string;
607
+ name?: string;
608
+ stack?: string;
609
+ } | string, meta: {
610
+ processedOn?: number;
611
+ finishedOn?: number;
612
+ attempts?: number;
613
+ maxAttempts?: number;
614
+ }): Promise<void>;
615
+ /**
616
+ * Record a final failure (dead-lettered) for retention and inspection
617
+ * Uses consolidated Lua script for atomic operation
618
+ */
619
+ recordFinalFailure(job: {
620
+ id: string;
621
+ groupId: string;
622
+ }, error: {
623
+ message?: string;
624
+ name?: string;
625
+ stack?: string;
626
+ } | string, meta: {
627
+ processedOn?: number;
628
+ finishedOn?: number;
629
+ attempts?: number;
630
+ maxAttempts?: number;
631
+ data?: unknown;
632
+ }): Promise<void>;
633
+ getCompleted(limit?: number): Promise<Array<{
634
+ id: string;
635
+ groupId: string;
636
+ data: any;
637
+ returnvalue: any;
638
+ processedOn?: number;
639
+ finishedOn?: number;
640
+ attempts: number;
641
+ maxAttempts: number;
642
+ }>>;
643
+ getFailed(limit?: number): Promise<Array<{
644
+ id: string;
645
+ groupId: string;
646
+ data: any;
647
+ failedReason: string;
648
+ stacktrace?: string;
649
+ processedOn?: number;
650
+ finishedOn?: number;
651
+ attempts: number;
652
+ maxAttempts: number;
653
+ }>>;
654
+ /**
655
+ * Convenience: return completed jobs as Job entities (non-breaking, new API)
656
+ */
657
+ getCompletedJobs(limit?: number): Promise<Array<Job<T>>>;
658
+ /**
659
+ * Convenience: return failed jobs as Job entities (non-breaking, new API)
660
+ */
661
+ getFailedJobs(limit?: number): Promise<Array<Job<T>>>;
662
+ getCompletedCount(): Promise<number>;
663
+ getFailedCount(): Promise<number>;
664
+ heartbeat(job: {
665
+ id: string;
666
+ groupId: string;
667
+ }, extendMs?: number): Promise<number>;
668
+ /**
669
+ * Clean up expired jobs and stale data.
670
+ * Uses distributed lock to ensure only one worker runs cleanup at a time,
671
+ * similar to scheduler lock pattern.
672
+ */
673
+ cleanup(): Promise<number>;
674
+ /**
675
+ * Calculate adaptive blocking timeout like BullMQ
676
+ * Returns timeout in seconds
677
+ *
678
+ * Inspiration by BullMQ ⭐️
679
+ */
680
+ private getBlockTimeout;
681
+ /**
682
+ * Check if an error is a Redis connection error (should retry)
683
+ * Conservative approach: only connection closed and ECONNREFUSED
684
+ */
685
+ isConnectionError(err: any): boolean;
686
+ reserveBlocking(timeoutSec?: number, blockUntil?: number, blockingClient?: ioredis0.default): Promise<ReservedJob<T> | null>;
687
+ /**
688
+ * Reserve a job from a specific group atomically (eliminates race conditions)
689
+ * @param groupId - The group to reserve from
690
+ */
691
+ reserveAtomic(groupId: string): Promise<ReservedJob<T> | null>;
692
+ /**
693
+ * 获取处于 Ready 状态的 Group 列表
694
+ * @param start
695
+ * @param end
696
+ */
697
+ getReadyGroups(start?: number, end?: number): Promise<string[]>;
698
+ /**
699
+ * 设置组的元数据 (优先级/并发度)
700
+ * 我们将使用 Hash 存储这些配置: groupmq:{ns}:config:{groupId}
701
+ */
702
+ setGroupConfig(groupId: string, config: {
703
+ priority?: number;
704
+ concurrency?: number;
705
+ }): Promise<void>;
706
+ getGroupConfig(groupId: string): Promise<{
707
+ priority: number;
708
+ concurrency: number;
709
+ }>;
710
+ /**
711
+ * 设置指定组的并发上限
712
+ * @param groupId 组 ID
713
+ * @param limit 并发数 (必须 >= 1)
714
+ */
715
+ setGroupConcurrency(groupId: string, limit: number): Promise<void>;
716
+ /**
717
+ * 获取指定组的并发上限
718
+ */
719
+ getGroupConcurrency(groupId: string): Promise<number>;
720
+ /**
721
+ * 获取组内最老任务的入队时间戳
722
+ * 用于 PriorityStrategy 的 aging 算法
723
+ * @param groupId 组 ID
724
+ * @returns 最老任务的时间戳,如果组为空则返回 undefined
725
+ */
726
+ getGroupOldestTimestamp(groupId: string): Promise<number | undefined>;
727
+ /**
728
+ * Reserve up to maxBatch jobs (one per available group) atomically in Lua.
729
+ */
730
+ reserveBatch(maxBatch?: number): Promise<Array<ReservedJob<T>>>;
731
+ /**
732
+ * Get the number of jobs currently being processed (active jobs)
733
+ */
734
+ getActiveCount(): Promise<number>;
735
+ /**
736
+ * Get the number of jobs waiting to be processed
737
+ */
738
+ getWaitingCount(): Promise<number>;
739
+ /**
740
+ * Get the number of jobs delayed due to backoff
741
+ */
742
+ getDelayedCount(): Promise<number>;
743
+ /**
744
+ * Get list of active job IDs
745
+ */
746
+ getActiveJobs(): Promise<string[]>;
747
+ /**
748
+ * Get list of waiting job IDs
749
+ */
750
+ getWaitingJobs(): Promise<string[]>;
751
+ /**
752
+ * Get list of delayed job IDs
753
+ */
754
+ getDelayedJobs(): Promise<string[]>;
755
+ /**
756
+ * Get list of unique group IDs that have jobs
757
+ */
758
+ getUniqueGroups(): Promise<string[]>;
759
+ /**
760
+ * Get count of unique groups that have jobs
761
+ */
762
+ getUniqueGroupsCount(): Promise<number>;
763
+ /**
764
+ * Fetch a single job by ID with enriched fields for UI/inspection.
765
+ * Attempts to mimic BullMQ's Job shape for fields commonly used by BullBoard.
766
+ */
767
+ getJob(id: string): Promise<Job<T>>;
768
+ /**
769
+ * Fetch jobs by statuses, emulating BullMQ's Queue.getJobs API used by BullBoard.
770
+ * Only getter functionality; ordering is best-effort.
771
+ *
772
+ * Optimized with pagination to reduce Redis load - especially important for BullBoard polling.
773
+ */
774
+ getJobsByStatus(jobStatuses: Array<Status$1>, start?: number, end?: number): Promise<Array<Job<T>>>;
775
+ /**
776
+ * Provide counts structured like BullBoard expects.
777
+ */
778
+ getJobCounts(): Promise<Record<'active' | 'waiting' | 'delayed' | 'completed' | 'failed' | 'paused' | 'waiting-children' | 'prioritized', number>>;
779
+ /**
780
+ * Check for stalled jobs and recover or fail them
781
+ * Returns array of [jobId, groupId, action] tuples
782
+ */
783
+ checkStalledJobs(now: number, gracePeriod: number, maxStalledCount: number): Promise<string[]>;
784
+ /**
785
+ * Start the promoter service for staging system.
786
+ * Promoter listens to Redis keyspace notifications and promotes staged jobs when ready.
787
+ * This is idempotent - calling multiple times has no effect if already running.
788
+ */
789
+ startPromoter(): Promise<void>;
790
+ /**
791
+ * Run a single promotion cycle with distributed locking
792
+ */
793
+ private runPromotion;
794
+ /**
795
+ * Stop the promoter service
796
+ */
797
+ stopPromoter(): Promise<void>;
798
+ /**
799
+ * Close underlying Redis connections
800
+ */
801
+ close(): Promise<void>;
802
+ private get pausedKey();
803
+ pause(): Promise<void>;
804
+ resume(): Promise<void>;
805
+ isPaused(): Promise<boolean>;
806
+ /**
807
+ * Wait for the queue to become empty (no active jobs)
808
+ * @param timeoutMs Maximum time to wait in milliseconds (default: 60 seconds)
809
+ * @returns true if queue became empty, false if timeout reached
810
+ */
811
+ waitForEmpty(timeoutMs?: number): Promise<boolean>;
812
+ private _groupCleanupTracking;
813
+ /**
814
+ * Remove problematic groups from ready queue to prevent infinite loops
815
+ * Handles both poisoned groups (only failed/expired jobs) and locked groups
816
+ *
817
+ * Throttled to 1% sampling rate to reduce Redis overhead
818
+ */
819
+ private cleanupPoisonedGroup;
820
+ /**
821
+ * Distributed one-shot scheduler: promotes delayed jobs and processes repeating jobs.
822
+ * Only proceeds if a short-lived scheduler lock can be acquired.
823
+ */
824
+ private schedulerLockKey;
825
+ acquireSchedulerLock(ttlMs?: number): Promise<boolean>;
826
+ runSchedulerOnce(now?: number): Promise<void>;
827
+ /**
828
+ * Promote up to `limit` delayed jobs that are due now. Uses a small Lua to move one item per tick.
829
+ */
830
+ promoteDelayedJobsBounded(limit?: number, now?: number): Promise<number>;
831
+ /**
832
+ * Process up to `limit` repeating job ticks.
833
+ * Intentionally small per-tick work to keep Redis CPU flat.
834
+ */
835
+ processRepeatingJobsBounded(limit?: number, now?: number): Promise<number>;
836
+ /**
837
+ * Promote delayed jobs that are now ready to be processed
838
+ * This should be called periodically to move jobs from delayed set to ready queue
839
+ */
840
+ promoteDelayedJobs(): Promise<number>;
841
+ /**
842
+ * Change the delay of a specific job
843
+ */
844
+ changeDelay(jobId: string, newDelay: number): Promise<boolean>;
845
+ /**
846
+ * Promote a delayed job to be ready immediately
847
+ */
848
+ promote(jobId: string): Promise<boolean>;
849
+ /**
850
+ * Remove a job from the queue regardless of state (waiting, delayed, processing)
851
+ */
852
+ remove(jobId: string): Promise<boolean>;
853
+ /**
854
+ * Clean jobs of a given status older than graceTimeMs
855
+ * @param graceTimeMs Remove jobs with finishedOn <= now - graceTimeMs (for completed/failed)
856
+ * @param limit Max number of jobs to clean in one call
857
+ * @param status Either 'completed' or 'failed'
858
+ */
859
+ clean(graceTimeMs: number, limit: number, status: 'completed' | 'failed' | 'delayed'): Promise<number>;
860
+ /**
861
+ * Update a job's data payload (BullMQ-style)
862
+ */
863
+ updateData(jobId: string, data: T): Promise<void>;
864
+ /**
865
+ * Add a repeating job (cron job)
866
+ */
867
+ private addRepeatingJob;
868
+ /**
869
+ * Compute next execution time using cron-parser (BullMQ-style)
870
+ */
871
+ private getNextCronTime;
872
+ /**
873
+ * Remove a repeating job
874
+ */
875
+ removeRepeatingJob(groupId: string, repeat: RepeatOptions): Promise<boolean>;
876
+ }
877
+ //#endregion
878
+ //#region src/adapters/groupmq-bullboard-adapter.d.ts
879
+ type GroupMQBullBoardAdapterOptions = {
880
+ readOnlyMode?: boolean;
881
+ prefix?: string;
882
+ delimiter?: string;
883
+ description?: string;
884
+ displayName?: string;
885
+ };
886
+ declare class BullBoardGroupMQAdapter<T = any> extends BaseAdapter {
887
+ private queue;
888
+ private options;
889
+ constructor(queue: Queue<T>, options?: GroupMQBullBoardAdapterOptions);
890
+ getDescription(): string;
891
+ getDisplayName(): string;
892
+ getName(): string;
893
+ getRedisInfo(): Promise<string>;
894
+ getJob(id: string): Promise<QueueJob | undefined | null>;
895
+ getJobs(jobStatuses: JobStatus[], start?: number, end?: number): Promise<QueueJob[]>;
896
+ getJobCounts(): Promise<JobCounts>;
897
+ getJobLogs(_id: string): Promise<string[]>;
898
+ getStatuses(): Status[];
899
+ getJobStatuses(): JobStatus[];
900
+ private assertWritable;
901
+ clean(jobStatus: any, graceTimeMs: number): Promise<void>;
902
+ addJob(_name: string, data: any, options: any): Promise<QueueJob>;
903
+ isPaused(): Promise<boolean>;
904
+ pause(): Promise<void>;
905
+ resume(): Promise<void>;
906
+ empty(): Promise<void>;
907
+ promoteAll(): Promise<void>;
908
+ }
909
+ //#endregion
910
+ //#region src/strategies/dispatch-strategy.d.ts
911
+ interface DispatchStrategy {
912
+ /**
913
+ * 决定下一个应该处理的 Group ID。
914
+ * 如果返回 null,表示根据策略当前没有合适的 Group 需要处理(或者队列为空)。
915
+ */
916
+ getNextGroup(queue: Queue<any>): Promise<string | null>;
917
+ }
918
+ //#endregion
919
+ //#region src/worker.d.ts
920
+ type BackoffStrategy = (attempt: number) => number;
921
+ interface WorkerEvents<T = any> extends Record<string, (...args: any[]) => void> {
922
+ error: (error: Error) => void;
923
+ closed: () => void;
924
+ ready: () => void;
925
+ failed: (job: Job<T>) => void;
926
+ completed: (job: Job<T>) => void;
927
+ 'ioredis:close': () => void;
928
+ 'graceful-timeout': (job: Job<T>) => void;
929
+ stalled: (jobId: string, groupId: string) => void;
930
+ }
931
+ declare class TypedEventEmitter<TEvents extends Record<string, (...args: any[]) => void>> {
932
+ private listeners;
933
+ on<K extends keyof TEvents>(event: K, listener: TEvents[K]): this;
934
+ off<K extends keyof TEvents>(event: K, listener: TEvents[K]): this;
935
+ emit<K extends keyof TEvents>(event: K, ...args: Parameters<TEvents[K]>): boolean;
936
+ removeAllListeners<K extends keyof TEvents>(event?: K): this;
937
+ }
938
+ /**
939
+ * Configuration options for a GroupMQ Worker
940
+ *
941
+ * @template T The type of data stored in jobs
942
+ */
943
+ type WorkerOptions<T> = {
944
+ /** The queue instance this worker will process jobs from */
945
+ queue: Queue<T>;
946
+ /**
947
+ * Optional worker name for logging and identification
948
+ * @default queue.name
949
+ */
950
+ name?: string;
951
+ /**
952
+ * The function that processes jobs. Must be async and handle job failures gracefully.
953
+ * @param job The reserved job to process
954
+ * @returns Promise that resolves when job is complete
955
+ */
956
+ handler: (job: ReservedJob<T>) => Promise<unknown>;
957
+ /**
958
+ * Heartbeat interval in milliseconds to keep jobs alive during processing.
959
+ * Prevents jobs from timing out during long-running operations.
960
+ *
961
+ * @default Math.max(1000, queue.jobTimeoutMs / 3)
962
+ * @example 5000 // Heartbeat every 5 seconds
963
+ *
964
+ * **When to adjust:**
965
+ * - Long-running jobs: Increase to reduce Redis overhead
966
+ * - Short jobs: Decrease for faster timeout detection
967
+ * - High job volume: Increase to reduce Redis commands
968
+ */
969
+ heartbeatMs?: number;
970
+ /**
971
+ * Error handler called when job processing fails or worker encounters errors
972
+ * @param err The error that occurred
973
+ * @param job The job that failed (if applicable)
974
+ */
975
+ onError?: (err: unknown, job?: ReservedJob<T>) => void;
976
+ /**
977
+ * Maximum number of retry attempts for failed jobs at the worker level.
978
+ * This overrides the queue's default maxAttempts setting.
979
+ *
980
+ * @default queue.maxAttemptsDefault
981
+ * @example 5 // Retry failed jobs up to 5 times
982
+ *
983
+ * **When to adjust:**
984
+ * - Critical jobs: Increase for more retries
985
+ * - Non-critical jobs: Decrease to fail faster
986
+ * - External API calls: Consider network reliability
987
+ */
988
+ maxAttempts?: number;
989
+ /**
990
+ * Backoff strategy for retrying failed jobs. Determines delay between retries.
991
+ *
992
+ * @default Exponential backoff with jitter (500ms, 1s, 2s, 4s, 8s, 16s, 30s max)
993
+ * @example (attempt) => Math.min(10000, attempt * 1000) // Linear backoff
994
+ *
995
+ * **When to adjust:**
996
+ * - Rate-limited APIs: Use longer delays
997
+ * - Database timeouts: Use shorter delays
998
+ * - External services: Consider their retry policies
999
+ */
1000
+ backoff?: BackoffStrategy;
1001
+ /**
1002
+ * Whether to enable automatic cleanup of expired and completed jobs.
1003
+ * Cleanup removes old jobs to prevent Redis memory growth.
1004
+ *
1005
+ * @default true
1006
+ * @example false // Disable if you handle cleanup manually
1007
+ *
1008
+ * **When to disable:**
1009
+ * - Manual cleanup: If you have your own cleanup process
1010
+ * - Job auditing: If you need to keep all job history
1011
+ * - Development: For debugging job states
1012
+ */
1013
+ enableCleanup?: boolean;
1014
+ /**
1015
+ * Interval in milliseconds between cleanup operations.
1016
+ * Cleanup removes expired jobs and trims completed/failed job retention.
1017
+ *
1018
+ * @default 300000 (5 minutes)
1019
+ * @example 600000 // Cleanup every 10 minutes
1020
+ *
1021
+ * **When to adjust:**
1022
+ * - High job volume: Increase to reduce Redis overhead
1023
+ * - Low job volume: Decrease for more frequent cleanup
1024
+ * - Memory constraints: Decrease to prevent Redis memory growth
1025
+ * - Job retention needs: Adjust based on keepCompleted/keepFailed settings
1026
+ */
1027
+ cleanupIntervalMs?: number;
1028
+ /**
1029
+ * Interval in milliseconds between scheduler operations.
1030
+ * Scheduler promotes delayed jobs and processes cron/repeating jobs.
1031
+ *
1032
+ * @default 5000 (5 seconds)
1033
+ * @example 1000 // For fast cron jobs (every minute or less)
1034
+ * @example 10000 // For slow cron jobs (hourly or daily)
1035
+ *
1036
+ * **When to adjust:**
1037
+ * - Fast cron jobs: Decrease (1000-2000ms) for sub-minute schedules
1038
+ * - Slow cron jobs: Increase (10000-60000ms) to reduce Redis overhead
1039
+ * - No cron jobs: Increase (5000-10000ms) since only delayed jobs are affected
1040
+ */
1041
+ schedulerIntervalMs?: number;
1042
+ /**
1043
+ * Maximum time in seconds to wait for new jobs when queue is empty.
1044
+ * Shorter timeouts make workers more responsive but use more Redis resources.
1045
+ *
1046
+ * @default 1
1047
+ * @example 0.5 // Very responsive, higher Redis usage
1048
+ * @example 2 // Less responsive, lower Redis usage
1049
+ *
1050
+ * **When to adjust:**
1051
+ * - High job volume: Use 1s or less for faster job pickup
1052
+ * - Low job volume: Increase (2-3s) to reduce Redis overhead
1053
+ * - Real-time requirements: Decrease to 0.5-1s for lower latency
1054
+ * - Resource constraints: Increase to 2-5s to reduce Redis load
1055
+ *
1056
+ * **Note:** The actual timeout is adaptive and can go as low as 1ms
1057
+ * based on queue activity and delayed job schedules.
1058
+ */
1059
+ blockingTimeoutSec?: number;
1060
+ /**
1061
+ * Logger configuration for worker operations and debugging.
1062
+ *
1063
+ * @default false (no logging)
1064
+ * @example true // Enable basic logging
1065
+ * @example customLogger // Use custom logger instance
1066
+ *
1067
+ * **When to enable:**
1068
+ * - Development: For debugging job processing
1069
+ * - Production monitoring: For operational insights
1070
+ * - Troubleshooting: When investigating performance issues
1071
+ */
1072
+ logger?: LoggerInterface | true;
1073
+ /**
1074
+ * Number of jobs this worker can process concurrently.
1075
+ * Higher concurrency increases throughput but uses more memory and CPU.
1076
+ *
1077
+ * @default 1
1078
+ * @example 4 // Process 4 jobs simultaneously
1079
+ * @example 8 // For CPU-intensive jobs on multi-core systems
1080
+ *
1081
+ * **When to adjust:**
1082
+ * - CPU-bound jobs: Set to number of CPU cores
1083
+ * - I/O-bound jobs: Set to 2-4x number of CPU cores
1084
+ * - Memory constraints: Lower concurrency to reduce memory usage
1085
+ * - High job volume: Increase for better throughput
1086
+ * - Single-threaded requirements: Keep at 1
1087
+ */
1088
+ concurrency?: number;
1089
+ /**
1090
+ * Interval in milliseconds between stalled job checks.
1091
+ * Stalled jobs are those whose worker crashed or lost connection.
1092
+ *
1093
+ * @default 30000 (30 seconds)
1094
+ * @example 60000 // Check every minute for lower overhead
1095
+ * @example 10000 // Check every 10 seconds for faster recovery
1096
+ *
1097
+ * **When to adjust:**
1098
+ * - Fast recovery needed: Decrease (10-20s)
1099
+ * - Lower Redis overhead: Increase (60s+)
1100
+ * - Unreliable workers: Decrease for faster detection
1101
+ */
1102
+ stalledInterval?: number;
1103
+ /**
1104
+ * Maximum number of times a job can become stalled before being failed.
1105
+ * A job becomes stalled when its worker crashes or loses connection.
1106
+ *
1107
+ * @default 1
1108
+ * @example 2 // Allow jobs to stall twice before failing
1109
+ * @example 0 // Never fail jobs due to stalling (not recommended)
1110
+ *
1111
+ * **When to adjust:**
1112
+ * - Unreliable infrastructure: Increase to tolerate more failures
1113
+ * - Critical jobs: Increase to allow more recovery attempts
1114
+ * - Quick failure detection: Keep at 1
1115
+ */
1116
+ maxStalledCount?: number;
1117
+ /**
1118
+ * Grace period in milliseconds before a job is considered stalled.
1119
+ * Jobs are only marked as stalled if their deadline has passed by this amount.
1120
+ *
1121
+ * @default 0 (no grace period)
1122
+ * @example 5000 // 5 second grace period for clock skew
1123
+ * @example 1000 // 1 second grace for network latency
1124
+ *
1125
+ * **When to adjust:**
1126
+ * - Clock skew between servers: Add 1-5s grace
1127
+ * - Network latency: Add 1-2s grace
1128
+ * - Strict timing: Keep at 0
1129
+ */
1130
+ stalledGracePeriod?: number;
1131
+ /**
1132
+ * 自定义调度策略。如果设置,Worker 将忽略默认的 FIFO 调度,
1133
+ * 转而使用策略轮询模式。
1134
+ */
1135
+ strategy?: DispatchStrategy;
1136
+ /**
1137
+ * 策略模式下的轮询间隔 (ms)
1138
+ * 当使用 Strategy 时,我们无法使用阻塞读取 (BZPOPMIN),必须退化为短轮询
1139
+ * @default 50
1140
+ */
1141
+ strategyPollInterval?: number;
1142
+ };
1143
+ declare class _Worker<T = any> extends TypedEventEmitter<WorkerEvents<T>> {
1144
+ private logger;
1145
+ readonly name: string;
1146
+ private q;
1147
+ private handler;
1148
+ private hbMs;
1149
+ private onError?;
1150
+ private stopping;
1151
+ private opts;
1152
+ private ready;
1153
+ private closed;
1154
+ private maxAttempts;
1155
+ private backoff;
1156
+ private enableCleanup;
1157
+ private cleanupMs;
1158
+ private cleanupTimer?;
1159
+ private schedulerTimer?;
1160
+ private schedulerMs;
1161
+ private blockingTimeoutSec;
1162
+ private concurrency;
1163
+ private blockingClient;
1164
+ private stalledCheckTimer?;
1165
+ private stalledInterval;
1166
+ private maxStalledCount;
1167
+ private stalledGracePeriod;
1168
+ private jobsInProgress;
1169
+ private lastJobPickupTime;
1170
+ private totalJobsProcessed;
1171
+ private blockingStats;
1172
+ private emptyReserveBackoffMs;
1173
+ private redisCloseHandler?;
1174
+ private redisErrorHandler?;
1175
+ private redisReadyHandler?;
1176
+ private runLoopPromise?;
1177
+ constructor(opts: WorkerOptions<T>);
1178
+ get isClosed(): boolean;
1179
+ /**
1180
+ * Add jitter to prevent thundering herd problems in high-concurrency environments
1181
+ * @param baseInterval The base interval in milliseconds
1182
+ * @param jitterPercent Percentage of jitter to add (0-1, default 0.1 for 10%)
1183
+ * @returns The interval with jitter applied
1184
+ */
1185
+ private addJitter;
1186
+ private setupRedisEventHandlers;
1187
+ run(): Promise<void>;
1188
+ private _runLoop;
1189
+ private delay;
1190
+ /**
1191
+ * Process a job and return the next job if atomic completion succeeds
1192
+ * This matches BullMQ's processJob signature
1193
+ */
1194
+ private processJob;
1195
+ /**
1196
+ * Complete a job and try to atomically get next job from same group
1197
+ */
1198
+ private completeJob;
1199
+ /**
1200
+ * Start the stalled job checker
1201
+ * Checks periodically for jobs that exceeded their deadline and recovers or fails them
1202
+ */
1203
+ private startStalledChecker;
1204
+ /**
1205
+ * Check for stalled jobs and recover or fail them
1206
+ * A job is stalled when its worker crashed or lost connection
1207
+ */
1208
+ private checkStalled;
1209
+ /**
1210
+ * Get worker performance metrics
1211
+ */
1212
+ getWorkerMetrics(): {
1213
+ name: string;
1214
+ totalJobsProcessed: number;
1215
+ lastJobPickupTime: number;
1216
+ timeSinceLastJob: number | null;
1217
+ blockingStats: {
1218
+ totalBlockingCalls: number;
1219
+ consecutiveEmptyReserves: number;
1220
+ lastActivityTime: number;
1221
+ };
1222
+ isProcessing: boolean;
1223
+ jobsInProgressCount: number;
1224
+ jobsInProgress: {
1225
+ jobId: string;
1226
+ groupId: string;
1227
+ processingTimeMs: number;
1228
+ }[];
1229
+ };
1230
+ /**
1231
+ * Stop the worker gracefully
1232
+ * @param gracefulTimeoutMs Maximum time to wait for current job to finish (default: 30 seconds)
1233
+ */
1234
+ close(gracefulTimeoutMs?: number): Promise<void>;
1235
+ /**
1236
+ * Get information about the first currently processing job (if any)
1237
+ * For concurrency > 1, returns the oldest job in progress
1238
+ */
1239
+ getCurrentJob(): {
1240
+ job: ReservedJob<T>;
1241
+ processingTimeMs: number;
1242
+ } | null;
1243
+ /**
1244
+ * Get information about all currently processing jobs
1245
+ */
1246
+ getCurrentJobs(): Array<{
1247
+ job: ReservedJob<T>;
1248
+ processingTimeMs: number;
1249
+ }>;
1250
+ /**
1251
+ * Check if the worker is currently processing any jobs
1252
+ */
1253
+ isProcessing(): boolean;
1254
+ add(opts: AddOptions<T>): Promise<Job<T>>;
1255
+ private processSingleJob;
1256
+ /**
1257
+ * Handle job failure: emit events, retry or dead-letter
1258
+ */
1259
+ private handleJobFailure;
1260
+ /**
1261
+ * Dead-letter a job that exceeded max attempts
1262
+ */
1263
+ private deadLetterJob;
1264
+ /**
1265
+ * Record a failed attempt (not final)
1266
+ */
1267
+ private recordFailureAttempt;
1268
+ }
1269
+ type Worker<T = any> = _Worker<T>;
1270
+ type WorkerConstructor = new <T>(opts: WorkerOptions<T>) => _Worker<T>;
1271
+ declare const Worker: WorkerConstructor;
1272
+ //#endregion
1273
+ //#region src/helpers.d.ts
1274
+ /**
1275
+ * Wait for a queue to become empty
1276
+ * @param queue The queue to monitor
1277
+ * @param timeoutMs Maximum time to wait (default: 60 seconds)
1278
+ * @returns Promise that resolves when queue is empty or timeout is reached
1279
+ */
1280
+ declare function waitForQueueToEmpty(queue: Queue, timeoutMs?: number): Promise<boolean>;
1281
+ /**
1282
+ * Get status of all workers
1283
+ */
1284
+ declare function getWorkersStatus<T = any>(workers: Worker<T>[]): {
1285
+ total: number;
1286
+ processing: number;
1287
+ idle: number;
1288
+ workers: Array<{
1289
+ index: number;
1290
+ isProcessing: boolean;
1291
+ currentJob?: {
1292
+ jobId: string;
1293
+ groupId: string;
1294
+ processingTimeMs: number;
1295
+ };
1296
+ }>;
1297
+ };
1298
+ //#endregion
1299
+ export { AddOptions, BackoffStrategy, BullBoardGroupMQAdapter, FlowJob, FlowOptions, GroupMQBullBoardAdapterOptions, Job, Queue, QueueOptions, RepeatOptions, ReservedJob, Worker, WorkerEvents, WorkerOptions, getWorkersStatus, waitForQueueToEmpty };
1300
+ //# sourceMappingURL=index.d.cts.map