@trigger.dev/redis-worker 4.2.0 → 4.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,8 +1,8 @@
1
- import { Callback, Result, RedisOptions } from '@internal/redis';
1
+ import { Callback, Result, RedisOptions, Redis } from '@internal/redis';
2
2
  import { Logger } from '@trigger.dev/core/logger';
3
3
  import { z } from 'zod';
4
- import { Tracer, Meter } from '@internal/tracing';
5
- import { RetryOptions } from '@trigger.dev/core/v3/schemas';
4
+ import { Tracer, Meter, Counter, Histogram, ObservableGauge, Span, SpanKind, Attributes } from '@internal/tracing';
5
+ import { RetryOptions as RetryOptions$1 } from '@trigger.dev/core/v3/schemas';
6
6
 
7
7
  interface MessageCatalogSchema {
8
8
  [key: string]: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion<any, any>;
@@ -101,7 +101,7 @@ type WorkerCatalog = {
101
101
  [key: string]: {
102
102
  schema: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion<any, any>;
103
103
  visibilityTimeoutMs: number;
104
- retry?: RetryOptions;
104
+ retry?: RetryOptions$1;
105
105
  cron?: string;
106
106
  jitterInMs?: number;
107
107
  /** Defaults to true. If false, errors will not be logged. */
@@ -229,4 +229,1455 @@ declare class Worker<TCatalog extends WorkerCatalog> {
229
229
  stop(): Promise<void>;
230
230
  }
231
231
 
232
- export { type AnyMessageCatalog, type AnyQueueItem, CronSchema, type JobHandler, type JobHandlerParams, type MessageCatalogKey, type MessageCatalogSchema, type MessageCatalogValue, type QueueItem, SimpleQueue, Worker, type WorkerCatalog, type WorkerConcurrencyOptions };
232
+ /**
233
+ * RetryStrategy interface for pluggable retry logic.
234
+ */
235
+ interface RetryStrategy {
236
+ /**
237
+ * Calculate the next retry delay in milliseconds.
238
+ * Return null to indicate the message should be sent to DLQ.
239
+ *
240
+ * @param attempt - Current attempt number (1-indexed)
241
+ * @param error - Optional error from the failed attempt
242
+ * @returns Delay in milliseconds, or null to send to DLQ
243
+ */
244
+ getNextDelay(attempt: number, error?: Error): number | null;
245
+ /**
246
+ * Maximum number of attempts before moving to DLQ.
247
+ */
248
+ maxAttempts: number;
249
+ }
250
+ /**
251
+ * Exponential backoff retry strategy.
252
+ *
253
+ * Uses the same algorithm as @trigger.dev/core's calculateNextRetryDelay.
254
+ */
255
+ declare class ExponentialBackoffRetry implements RetryStrategy {
256
+ readonly maxAttempts: number;
257
+ private options;
258
+ constructor(options?: Partial<RetryOptions$1>);
259
+ getNextDelay(attempt: number, _error?: Error): number | null;
260
+ }
261
+ /**
262
+ * Fixed delay retry strategy.
263
+ *
264
+ * Always waits the same amount of time between retries.
265
+ */
266
+ declare class FixedDelayRetry implements RetryStrategy {
267
+ readonly maxAttempts: number;
268
+ private delayMs;
269
+ constructor(options: {
270
+ maxAttempts: number;
271
+ delayMs: number;
272
+ });
273
+ getNextDelay(attempt: number, _error?: Error): number | null;
274
+ }
275
+ /**
276
+ * Linear backoff retry strategy.
277
+ *
278
+ * Delay increases linearly with each attempt.
279
+ */
280
+ declare class LinearBackoffRetry implements RetryStrategy {
281
+ readonly maxAttempts: number;
282
+ private baseDelayMs;
283
+ private maxDelayMs;
284
+ constructor(options: {
285
+ maxAttempts: number;
286
+ baseDelayMs: number;
287
+ maxDelayMs?: number;
288
+ });
289
+ getNextDelay(attempt: number, _error?: Error): number | null;
290
+ }
291
+ /**
292
+ * No retry strategy.
293
+ *
294
+ * Messages go directly to DLQ on first failure.
295
+ */
296
+ declare class NoRetry implements RetryStrategy {
297
+ readonly maxAttempts = 1;
298
+ getNextDelay(_attempt: number, _error?: Error): number | null;
299
+ }
300
+ /**
301
+ * Immediate retry strategy.
302
+ *
303
+ * Retries immediately without any delay.
304
+ */
305
+ declare class ImmediateRetry implements RetryStrategy {
306
+ readonly maxAttempts: number;
307
+ constructor(maxAttempts: number);
308
+ getNextDelay(attempt: number, _error?: Error): number | null;
309
+ }
310
+ /**
311
+ * Custom retry strategy that uses a user-provided function.
312
+ */
313
+ declare class CustomRetry implements RetryStrategy {
314
+ readonly maxAttempts: number;
315
+ private calculateDelay;
316
+ constructor(options: {
317
+ maxAttempts: number;
318
+ calculateDelay: (attempt: number, error?: Error) => number | null;
319
+ });
320
+ getNextDelay(attempt: number, error?: Error): number | null;
321
+ }
322
+ /**
323
+ * Default retry options matching @trigger.dev/core defaults.
324
+ */
325
+ declare const defaultRetryOptions: RetryOptions$1;
326
+ /**
327
+ * Create an exponential backoff retry strategy with default options.
328
+ */
329
+ declare function createDefaultRetryStrategy(): RetryStrategy;
330
+
331
+ /**
332
+ * Interface for a global rate limiter that limits processing across all consumers.
333
+ * When configured, consumers will check this before processing each message.
334
+ */
335
+ interface GlobalRateLimiter {
336
+ /**
337
+ * Check if processing is allowed under the rate limit.
338
+ * @returns Object with allowed flag and optional resetAt timestamp (ms since epoch)
339
+ */
340
+ limit(): Promise<{
341
+ allowed: boolean;
342
+ resetAt?: number;
343
+ }>;
344
+ }
345
+ /**
346
+ * Descriptor for a queue in the fair queue system.
347
+ * Contains all the metadata needed to identify and route a queue.
348
+ */
349
+ interface QueueDescriptor {
350
+ /** Unique queue identifier */
351
+ id: string;
352
+ /** Tenant this queue belongs to */
353
+ tenantId: string;
354
+ /** Additional metadata for concurrency group extraction */
355
+ metadata: Record<string, unknown>;
356
+ }
357
+ /**
358
+ * A message in the queue with its metadata.
359
+ */
360
+ interface QueueMessage<TPayload = unknown> {
361
+ /** Unique message identifier */
362
+ id: string;
363
+ /** The queue this message belongs to */
364
+ queueId: string;
365
+ /** Message payload */
366
+ payload: TPayload;
367
+ /** Timestamp when message was enqueued */
368
+ timestamp: number;
369
+ /** Current attempt number (1-indexed, for retries) */
370
+ attempt: number;
371
+ /** Optional metadata */
372
+ metadata?: Record<string, unknown>;
373
+ }
374
+ /**
375
+ * Internal message format stored in Redis.
376
+ * Includes additional fields for tracking and routing.
377
+ */
378
+ interface StoredMessage<TPayload = unknown> {
379
+ /** Message ID */
380
+ id: string;
381
+ /** Queue ID */
382
+ queueId: string;
383
+ /** Tenant ID */
384
+ tenantId: string;
385
+ /** Message payload */
386
+ payload: TPayload;
387
+ /** Timestamp when enqueued */
388
+ timestamp: number;
389
+ /** Current attempt number */
390
+ attempt: number;
391
+ /** Worker queue to route to */
392
+ workerQueue?: string;
393
+ /** Additional metadata */
394
+ metadata?: Record<string, unknown>;
395
+ }
396
+ /**
397
+ * Queue with its score (oldest message timestamp) from the master queue.
398
+ */
399
+ interface QueueWithScore {
400
+ /** Queue identifier */
401
+ queueId: string;
402
+ /** Score (typically oldest message timestamp) */
403
+ score: number;
404
+ /** Tenant ID extracted from queue */
405
+ tenantId: string;
406
+ }
407
+ /**
408
+ * Configuration for a concurrency group.
409
+ * Allows defining arbitrary levels of concurrency (tenant, org, project, etc.)
410
+ */
411
+ interface ConcurrencyGroupConfig {
412
+ /** Group name (e.g., "tenant", "organization", "project") */
413
+ name: string;
414
+ /** Extract the group ID from a queue descriptor */
415
+ extractGroupId: (queue: QueueDescriptor) => string;
416
+ /** Get the concurrency limit for a specific group ID */
417
+ getLimit: (groupId: string) => Promise<number>;
418
+ /** Default limit if not specified */
419
+ defaultLimit: number;
420
+ }
421
+ /**
422
+ * Current concurrency state for a group.
423
+ */
424
+ interface ConcurrencyState {
425
+ /** Group name */
426
+ groupName: string;
427
+ /** Group ID */
428
+ groupId: string;
429
+ /** Current active count */
430
+ current: number;
431
+ /** Configured limit */
432
+ limit: number;
433
+ }
434
+ /**
435
+ * Result of a concurrency check.
436
+ */
437
+ interface ConcurrencyCheckResult {
438
+ /** Whether processing is allowed */
439
+ allowed: boolean;
440
+ /** If not allowed, which group is blocking */
441
+ blockedBy?: ConcurrencyState;
442
+ }
443
+ /**
444
+ * Queues grouped by tenant for the scheduler.
445
+ */
446
+ interface TenantQueues {
447
+ /** Tenant identifier */
448
+ tenantId: string;
449
+ /** Queue IDs belonging to this tenant, in priority order */
450
+ queues: string[];
451
+ }
452
+ /**
453
+ * Context provided to the scheduler for making decisions.
454
+ */
455
+ interface SchedulerContext {
456
+ /** Get current concurrency for a group */
457
+ getCurrentConcurrency(groupName: string, groupId: string): Promise<number>;
458
+ /** Get concurrency limit for a group */
459
+ getConcurrencyLimit(groupName: string, groupId: string): Promise<number>;
460
+ /** Check if a group is at capacity */
461
+ isAtCapacity(groupName: string, groupId: string): Promise<boolean>;
462
+ /** Get queue descriptor by ID */
463
+ getQueueDescriptor(queueId: string): QueueDescriptor;
464
+ }
465
+ /**
466
+ * Pluggable scheduler interface for fair queue selection.
467
+ */
468
+ interface FairScheduler {
469
+ /**
470
+ * Select queues for processing from a master queue shard.
471
+ * Returns queues grouped by tenant, ordered by the fairness algorithm.
472
+ *
473
+ * @param masterQueueShard - The master queue shard key
474
+ * @param consumerId - The consumer making the request
475
+ * @param context - Context for concurrency checks
476
+ * @returns Queues grouped by tenant in priority order
477
+ */
478
+ selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
479
+ /**
480
+ * Called after processing a message to update scheduler state.
481
+ * Optional - not all schedulers need to track state.
482
+ */
483
+ recordProcessed?(tenantId: string, queueId: string): Promise<void>;
484
+ /**
485
+ * Initialize the scheduler (called once on startup).
486
+ */
487
+ initialize?(): Promise<void>;
488
+ /**
489
+ * Cleanup scheduler resources.
490
+ */
491
+ close?(): Promise<void>;
492
+ }
493
+ /**
494
+ * An in-flight message being processed.
495
+ */
496
+ interface InFlightMessage<TPayload = unknown> {
497
+ /** Message ID */
498
+ messageId: string;
499
+ /** Queue ID */
500
+ queueId: string;
501
+ /** Message payload */
502
+ payload: TPayload;
503
+ /** When visibility timeout expires */
504
+ deadline: number;
505
+ /** Consumer that claimed this message */
506
+ consumerId: string;
507
+ }
508
+ /**
509
+ * Result of claiming a message.
510
+ */
511
+ interface ClaimResult<TPayload = unknown> {
512
+ /** Whether the claim was successful */
513
+ claimed: boolean;
514
+ /** The claimed message if successful */
515
+ message?: InFlightMessage<TPayload>;
516
+ }
517
+ /**
518
+ * Interface for generating Redis keys for the fair queue system.
519
+ * Implementations can customize key prefixes and structures.
520
+ */
521
+ interface FairQueueKeyProducer {
522
+ /** Get the master queue key for a shard */
523
+ masterQueueKey(shardId: number): string;
524
+ /** Get the queue key for storing messages */
525
+ queueKey(queueId: string): string;
526
+ /** Get the queue items hash key */
527
+ queueItemsKey(queueId: string): string;
528
+ /** Get the concurrency set key for a group */
529
+ concurrencyKey(groupName: string, groupId: string): string;
530
+ /** Get the in-flight sorted set key for a shard */
531
+ inflightKey(shardId: number): string;
532
+ /** Get the in-flight message data hash key */
533
+ inflightDataKey(shardId: number): string;
534
+ /** Get the worker queue key for a consumer */
535
+ workerQueueKey(consumerId: string): string;
536
+ /** Get the dead letter queue key for a tenant */
537
+ deadLetterQueueKey(tenantId: string): string;
538
+ /** Get the dead letter queue data hash key for a tenant */
539
+ deadLetterQueueDataKey(tenantId: string): string;
540
+ /** Extract tenant ID from a queue ID */
541
+ extractTenantId(queueId: string): string;
542
+ /** Extract a specific group ID from a queue ID */
543
+ extractGroupId(groupName: string, queueId: string): string;
544
+ }
545
+ /**
546
+ * Worker queue configuration options.
547
+ */
548
+ interface WorkerQueueOptions<TPayload = unknown> {
549
+ /** Whether to enable worker queues (default: false for backwards compatibility) */
550
+ enabled: boolean;
551
+ /** Blocking pop timeout in seconds (default: 10) */
552
+ blockingTimeoutSeconds?: number;
553
+ /** Function to resolve which worker queue a message should go to */
554
+ resolveWorkerQueue?: (message: StoredMessage<TPayload>) => string;
555
+ }
556
+ /**
557
+ * Retry and dead letter queue configuration.
558
+ */
559
+ interface RetryOptions {
560
+ /** Retry strategy for failed messages */
561
+ strategy: RetryStrategy;
562
+ /** Whether to enable dead letter queue (default: true) */
563
+ deadLetterQueue?: boolean;
564
+ }
565
+ /**
566
+ * Queue cooloff configuration to avoid repeatedly polling concurrency-limited queues.
567
+ */
568
+ interface CooloffOptions {
569
+ /** Whether cooloff is enabled (default: true) */
570
+ enabled?: boolean;
571
+ /** Number of consecutive empty dequeues before entering cooloff (default: 10) */
572
+ threshold?: number;
573
+ /** Duration of cooloff period in milliseconds (default: 10000) */
574
+ periodMs?: number;
575
+ }
576
+ /**
577
+ * Options for creating a FairQueue instance.
578
+ *
579
+ * @typeParam TPayloadSchema - Zod schema for message payload validation
580
+ */
581
+ interface FairQueueOptions<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
582
+ /** Redis connection options */
583
+ redis: RedisOptions;
584
+ /** Key producer for Redis keys */
585
+ keys: FairQueueKeyProducer;
586
+ /** Scheduler for fair queue selection */
587
+ scheduler: FairScheduler;
588
+ /** Zod schema for message payload validation */
589
+ payloadSchema?: TPayloadSchema;
590
+ /** Whether to validate payloads on enqueue (default: false) */
591
+ validateOnEnqueue?: boolean;
592
+ /** Number of master queue shards (default: 1) */
593
+ shardCount?: number;
594
+ /** Concurrency group configurations */
595
+ concurrencyGroups?: ConcurrencyGroupConfig[];
596
+ /** Worker queue configuration */
597
+ workerQueue?: WorkerQueueOptions<z.infer<TPayloadSchema>>;
598
+ /** Retry and dead letter queue configuration */
599
+ retry?: RetryOptions;
600
+ /** Visibility timeout in milliseconds (default: 30000) */
601
+ visibilityTimeoutMs?: number;
602
+ /** Heartbeat interval in milliseconds (default: visibilityTimeoutMs / 3) */
603
+ heartbeatIntervalMs?: number;
604
+ /** Interval for reclaiming timed-out messages (default: 5000) */
605
+ reclaimIntervalMs?: number;
606
+ /** Number of consumer loops to run (default: 1) */
607
+ consumerCount?: number;
608
+ /** Interval between consumer iterations in milliseconds (default: 100) */
609
+ consumerIntervalMs?: number;
610
+ /** Whether to start consumers on initialization (default: true) */
611
+ startConsumers?: boolean;
612
+ /** Queue cooloff configuration */
613
+ cooloff?: CooloffOptions;
614
+ /** Logger instance */
615
+ logger?: Logger;
616
+ /** OpenTelemetry tracer */
617
+ tracer?: Tracer;
618
+ /** OpenTelemetry meter */
619
+ meter?: Meter;
620
+ /** Name for metrics/tracing (default: "fairqueue") */
621
+ name?: string;
622
+ /** Optional global rate limiter to limit processing across all consumers */
623
+ globalRateLimiter?: GlobalRateLimiter;
624
+ }
625
+ /**
626
+ * Context passed to the message handler.
627
+ */
628
+ interface MessageHandlerContext<TPayload = unknown> {
629
+ /** The message being processed */
630
+ message: QueueMessage<TPayload>;
631
+ /** Queue descriptor */
632
+ queue: QueueDescriptor;
633
+ /** Consumer ID processing this message */
634
+ consumerId: string;
635
+ /** Extend the visibility timeout */
636
+ heartbeat(): Promise<boolean>;
637
+ /** Mark message as successfully processed */
638
+ complete(): Promise<void>;
639
+ /** Release message back to the queue for retry */
640
+ release(): Promise<void>;
641
+ /** Mark message as failed (triggers retry or DLQ) */
642
+ fail(error?: Error): Promise<void>;
643
+ }
644
+ /**
645
+ * Handler function for processing messages.
646
+ */
647
+ type MessageHandler<TPayload = unknown> = (context: MessageHandlerContext<TPayload>) => Promise<void>;
648
+ /**
649
+ * A message in the dead letter queue.
650
+ */
651
+ interface DeadLetterMessage<TPayload = unknown> {
652
+ /** Message ID */
653
+ id: string;
654
+ /** Original queue ID */
655
+ queueId: string;
656
+ /** Tenant ID */
657
+ tenantId: string;
658
+ /** Message payload */
659
+ payload: TPayload;
660
+ /** Timestamp when moved to DLQ */
661
+ deadLetteredAt: number;
662
+ /** Number of attempts before DLQ */
663
+ attempts: number;
664
+ /** Last error message if available */
665
+ lastError?: string;
666
+ /** Original message timestamp */
667
+ originalTimestamp: number;
668
+ }
669
+ /**
670
+ * Cooloff state for a queue.
671
+ */
672
+ type QueueCooloffState = {
673
+ tag: "normal";
674
+ consecutiveFailures: number;
675
+ } | {
676
+ tag: "cooloff";
677
+ expiresAt: number;
678
+ };
679
+ /**
680
+ * Options for enqueueing a message.
681
+ */
682
+ interface EnqueueOptions<TPayload = unknown> {
683
+ /** Queue to add the message to */
684
+ queueId: string;
685
+ /** Tenant ID for the queue */
686
+ tenantId: string;
687
+ /** Message payload */
688
+ payload: TPayload;
689
+ /** Optional message ID (auto-generated if not provided) */
690
+ messageId?: string;
691
+ /** Optional timestamp (defaults to now) */
692
+ timestamp?: number;
693
+ /** Optional metadata for concurrency group extraction */
694
+ metadata?: Record<string, string>;
695
+ }
696
+ /**
697
+ * Options for enqueueing multiple messages.
698
+ */
699
+ interface EnqueueBatchOptions<TPayload = unknown> {
700
+ /** Queue to add messages to */
701
+ queueId: string;
702
+ /** Tenant ID for the queue */
703
+ tenantId: string;
704
+ /** Messages to enqueue */
705
+ messages: Array<{
706
+ payload: TPayload;
707
+ messageId?: string;
708
+ timestamp?: number;
709
+ }>;
710
+ /** Optional metadata for concurrency group extraction */
711
+ metadata?: Record<string, string>;
712
+ }
713
+ /**
714
+ * Configuration for the Deficit Round Robin scheduler.
715
+ */
716
+ interface DRRSchedulerConfig {
717
+ /** Credits allocated per tenant per round */
718
+ quantum: number;
719
+ /** Maximum accumulated deficit (prevents starvation) */
720
+ maxDeficit: number;
721
+ /** Redis options for state storage */
722
+ redis: RedisOptions;
723
+ /** Key producer */
724
+ keys: FairQueueKeyProducer;
725
+ /** Optional logger */
726
+ logger?: {
727
+ debug: (message: string, context?: Record<string, unknown>) => void;
728
+ error: (message: string, context?: Record<string, unknown>) => void;
729
+ };
730
+ }
731
+ /**
732
+ * Bias configuration for weighted shuffle scheduler.
733
+ */
734
+ interface WeightedSchedulerBiases {
735
+ /**
736
+ * How much to bias towards tenants with higher concurrency limits.
737
+ * 0 = no bias, 1 = full bias based on limit differences
738
+ */
739
+ concurrencyLimitBias: number;
740
+ /**
741
+ * How much to bias towards tenants with more available capacity.
742
+ * 0 = no bias, 1 = full bias based on available capacity
743
+ */
744
+ availableCapacityBias: number;
745
+ /**
746
+ * Controls randomization of queue ordering within tenants.
747
+ * 0 = strict age-based ordering (oldest first)
748
+ * 1 = completely random ordering
749
+ * Values between 0-1 blend between age-based and random ordering
750
+ */
751
+ queueAgeRandomization: number;
752
+ }
753
+ /**
754
+ * Configuration for the weighted shuffle scheduler.
755
+ */
756
+ interface WeightedSchedulerConfig {
757
+ /** Redis options */
758
+ redis: RedisOptions;
759
+ /** Key producer */
760
+ keys: FairQueueKeyProducer;
761
+ /** Default tenant concurrency limit */
762
+ defaultTenantConcurrencyLimit?: number;
763
+ /** Maximum queues to consider from master queue */
764
+ masterQueueLimit?: number;
765
+ /** Bias configuration */
766
+ biases?: WeightedSchedulerBiases;
767
+ /** Number of iterations to reuse a snapshot */
768
+ reuseSnapshotCount?: number;
769
+ /** Maximum number of tenants to consider */
770
+ maximumTenantCount?: number;
771
+ /** Random seed for reproducibility */
772
+ seed?: string;
773
+ /** Optional tracer */
774
+ tracer?: Tracer;
775
+ }
776
+
777
+ /**
778
+ * Default key producer for the fair queue system.
779
+ * Uses a configurable prefix and standard key structure.
780
+ *
781
+ * Key structure:
782
+ * - Master queue: {prefix}:master:{shardId}
783
+ * - Queue: {prefix}:queue:{queueId}
784
+ * - Queue items: {prefix}:queue:{queueId}:items
785
+ * - Concurrency: {prefix}:concurrency:{groupName}:{groupId}
786
+ * - In-flight: {prefix}:inflight:{shardId}
787
+ * - In-flight data: {prefix}:inflight:{shardId}:data
788
+ * - Worker queue: {prefix}:worker:{consumerId}
789
+ */
790
+ declare class DefaultFairQueueKeyProducer implements FairQueueKeyProducer {
791
+ #private;
792
+ private readonly prefix;
793
+ private readonly separator;
794
+ constructor(options?: {
795
+ prefix?: string;
796
+ separator?: string;
797
+ });
798
+ masterQueueKey(shardId: number): string;
799
+ queueKey(queueId: string): string;
800
+ queueItemsKey(queueId: string): string;
801
+ concurrencyKey(groupName: string, groupId: string): string;
802
+ inflightKey(shardId: number): string;
803
+ inflightDataKey(shardId: number): string;
804
+ workerQueueKey(consumerId: string): string;
805
+ deadLetterQueueKey(tenantId: string): string;
806
+ deadLetterQueueDataKey(tenantId: string): string;
807
+ /**
808
+ * Extract tenant ID from a queue ID.
809
+ * Default implementation assumes queue IDs are formatted as: tenant:{tenantId}:...
810
+ * Override this method for custom queue ID formats.
811
+ */
812
+ extractTenantId(queueId: string): string;
813
+ /**
814
+ * Extract a group ID from a queue ID.
815
+ * Default implementation looks for pattern: {groupName}:{groupId}:...
816
+ * Override this method for custom queue ID formats.
817
+ */
818
+ extractGroupId(groupName: string, queueId: string): string;
819
+ }
820
+ /**
821
+ * Key producer with custom extraction logic via callbacks.
822
+ * Useful when queue IDs don't follow a standard pattern.
823
+ */
824
+ declare class CallbackFairQueueKeyProducer extends DefaultFairQueueKeyProducer {
825
+ private readonly tenantExtractor;
826
+ private readonly groupExtractor;
827
+ constructor(options: {
828
+ prefix?: string;
829
+ separator?: string;
830
+ extractTenantId: (queueId: string) => string;
831
+ extractGroupId: (groupName: string, queueId: string) => string;
832
+ });
833
+ extractTenantId(queueId: string): string;
834
+ extractGroupId(groupName: string, queueId: string): string;
835
+ }
836
+
837
+ interface MasterQueueOptions {
838
+ redis: RedisOptions;
839
+ keys: FairQueueKeyProducer;
840
+ shardCount: number;
841
+ }
842
+ /**
843
+ * Master queue manages the top-level queue of queues.
844
+ *
845
+ * Features:
846
+ * - Sharding for horizontal scaling
847
+ * - Consistent hashing for queue-to-shard assignment
848
+ * - Queues scored by oldest message timestamp
849
+ */
850
+ declare class MasterQueue {
851
+ #private;
852
+ private options;
853
+ private redis;
854
+ private keys;
855
+ private shardCount;
856
+ constructor(options: MasterQueueOptions);
857
+ /**
858
+ * Get the shard ID for a queue.
859
+ * Uses consistent hashing based on queue ID.
860
+ */
861
+ getShardForQueue(queueId: string): number;
862
+ /**
863
+ * Add a queue to its master queue shard.
864
+ * Updates the score to the oldest message timestamp.
865
+ *
866
+ * @param queueId - The queue identifier
867
+ * @param oldestMessageTimestamp - Timestamp of the oldest message in the queue
868
+ */
869
+ addQueue(queueId: string, oldestMessageTimestamp: number): Promise<void>;
870
+ /**
871
+ * Update a queue's score in the master queue.
872
+ * This is typically called after dequeuing to update to the new oldest message.
873
+ *
874
+ * @param queueId - The queue identifier
875
+ * @param newOldestTimestamp - New timestamp of the oldest message
876
+ */
877
+ updateQueueScore(queueId: string, newOldestTimestamp: number): Promise<void>;
878
+ /**
879
+ * Remove a queue from its master queue shard.
880
+ * Called when a queue becomes empty.
881
+ *
882
+ * @param queueId - The queue identifier
883
+ */
884
+ removeQueue(queueId: string): Promise<void>;
885
+ /**
886
+ * Get queues from a shard, ordered by oldest message (lowest score first).
887
+ *
888
+ * @param shardId - The shard to query
889
+ * @param limit - Maximum number of queues to return (default: 1000)
890
+ * @param maxScore - Maximum score (timestamp) to include (default: now)
891
+ */
892
+ getQueuesFromShard(shardId: number, limit?: number, maxScore?: number): Promise<QueueWithScore[]>;
893
+ /**
894
+ * Get the number of queues in a shard.
895
+ */
896
+ getShardQueueCount(shardId: number): Promise<number>;
897
+ /**
898
+ * Get total queue count across all shards.
899
+ */
900
+ getTotalQueueCount(): Promise<number>;
901
+ /**
902
+ * Atomically add a queue to master queue only if queue has messages.
903
+ * Uses Lua script for atomicity.
904
+ *
905
+ * @param queueId - The queue identifier
906
+ * @param queueKey - The actual queue sorted set key
907
+ * @returns Whether the queue was added to the master queue
908
+ */
909
+ addQueueIfNotEmpty(queueId: string, queueKey: string): Promise<boolean>;
910
+ /**
911
+ * Atomically remove a queue from master queue only if queue is empty.
912
+ * Uses Lua script for atomicity.
913
+ *
914
+ * @param queueId - The queue identifier
915
+ * @param queueKey - The actual queue sorted set key
916
+ * @returns Whether the queue was removed from the master queue
917
+ */
918
+ removeQueueIfEmpty(queueId: string, queueKey: string): Promise<boolean>;
919
+ /**
920
+ * Close the Redis connection.
921
+ */
922
+ close(): Promise<void>;
923
+ }
924
+ declare module "@internal/redis" {
925
+ interface RedisCommander<Context> {
926
+ addQueueIfNotEmpty(masterKey: string, queueKey: string, queueId: string): Promise<number>;
927
+ removeQueueIfEmpty(masterKey: string, queueKey: string, queueId: string): Promise<number>;
928
+ }
929
+ }
930
+
931
+ interface ConcurrencyManagerOptions {
932
+ redis: RedisOptions;
933
+ keys: FairQueueKeyProducer;
934
+ groups: ConcurrencyGroupConfig[];
935
+ }
936
+ /**
937
+ * ConcurrencyManager handles multi-level concurrency tracking and limiting.
938
+ *
939
+ * Features:
940
+ * - Multiple concurrent concurrency groups (tenant, org, project, etc.)
941
+ * - Atomic reserve/release operations using Lua scripts
942
+ * - Efficient batch checking of all groups
943
+ */
944
+ declare class ConcurrencyManager {
945
+ #private;
946
+ private options;
947
+ private redis;
948
+ private keys;
949
+ private groups;
950
+ private groupsByName;
951
+ constructor(options: ConcurrencyManagerOptions);
952
+ /**
953
+ * Check if a message can be processed given all concurrency constraints.
954
+ * Checks all configured groups and returns the first one at capacity.
955
+ */
956
+ canProcess(queue: QueueDescriptor): Promise<ConcurrencyCheckResult>;
957
+ /**
958
+ * Reserve concurrency slots for a message across all groups.
959
+ * Atomic - either all groups are reserved or none.
960
+ *
961
+ * @returns true if reservation successful, false if any group is at capacity
962
+ */
963
+ reserve(queue: QueueDescriptor, messageId: string): Promise<boolean>;
964
+ /**
965
+ * Release concurrency slots for a message across all groups.
966
+ */
967
+ release(queue: QueueDescriptor, messageId: string): Promise<void>;
968
+ /**
969
+ * Get current concurrency for a specific group.
970
+ */
971
+ getCurrentConcurrency(groupName: string, groupId: string): Promise<number>;
972
+ /**
973
+ * Get concurrency limit for a specific group.
974
+ */
975
+ getConcurrencyLimit(groupName: string, groupId: string): Promise<number>;
976
+ /**
977
+ * Check if a group is at capacity.
978
+ */
979
+ isAtCapacity(groupName: string, groupId: string): Promise<boolean>;
980
+ /**
981
+ * Get full state for a group.
982
+ */
983
+ getState(groupName: string, groupId: string): Promise<ConcurrencyState>;
984
+ /**
985
+ * Get all active message IDs for a group.
986
+ */
987
+ getActiveMessages(groupName: string, groupId: string): Promise<string[]>;
988
+ /**
989
+ * Force-clear concurrency for a group (use with caution).
990
+ * Useful for cleanup after crashes.
991
+ */
992
+ clearGroup(groupName: string, groupId: string): Promise<void>;
993
+ /**
994
+ * Remove a specific message from concurrency tracking.
995
+ * Useful for cleanup.
996
+ */
997
+ removeMessage(messageId: string, queue: QueueDescriptor): Promise<void>;
998
+ /**
999
+ * Get configured group names.
1000
+ */
1001
+ getGroupNames(): string[];
1002
+ /**
1003
+ * Close the Redis connection.
1004
+ */
1005
+ close(): Promise<void>;
1006
+ }
1007
+ declare module "@internal/redis" {
1008
+ interface RedisCommander<Context> {
1009
+ reserveConcurrency(numKeys: number, keys: string[], messageId: string, ...limits: string[]): Promise<number>;
1010
+ }
1011
+ }
1012
+
1013
+ interface VisibilityManagerOptions {
1014
+ redis: RedisOptions;
1015
+ keys: FairQueueKeyProducer;
1016
+ shardCount: number;
1017
+ defaultTimeoutMs: number;
1018
+ logger?: {
1019
+ debug: (message: string, context?: Record<string, unknown>) => void;
1020
+ error: (message: string, context?: Record<string, unknown>) => void;
1021
+ };
1022
+ }
1023
+ /**
1024
+ * VisibilityManager handles message visibility timeouts for safe message processing.
1025
+ *
1026
+ * Features:
1027
+ * - Claim messages with visibility timeout
1028
+ * - Heartbeat to extend timeout
1029
+ * - Automatic reclaim of timed-out messages
1030
+ * - Per-shard in-flight tracking
1031
+ *
1032
+ * Data structures:
1033
+ * - In-flight sorted set: score = deadline timestamp, member = "{messageId}:{queueId}"
1034
+ * - In-flight data hash: field = messageId, value = JSON message data
1035
+ */
1036
+ declare class VisibilityManager {
1037
+ #private;
1038
+ private options;
1039
+ private redis;
1040
+ private keys;
1041
+ private shardCount;
1042
+ private defaultTimeoutMs;
1043
+ private logger;
1044
+ constructor(options: VisibilityManagerOptions);
1045
+ /**
1046
+ * Claim a message for processing.
1047
+ * Moves the message from its queue to the in-flight set with a visibility timeout.
1048
+ *
1049
+ * @param queueId - The queue to claim from
1050
+ * @param queueKey - The Redis key for the queue sorted set
1051
+ * @param queueItemsKey - The Redis key for the queue items hash
1052
+ * @param consumerId - ID of the consumer claiming the message
1053
+ * @param timeoutMs - Visibility timeout in milliseconds
1054
+ * @returns Claim result with the message if successful
1055
+ */
1056
+ claim<TPayload = unknown>(queueId: string, queueKey: string, queueItemsKey: string, consumerId: string, timeoutMs?: number): Promise<ClaimResult<TPayload>>;
1057
+ /**
1058
+ * Extend the visibility timeout for a message (heartbeat).
1059
+ *
1060
+ * @param messageId - The message ID
1061
+ * @param queueId - The queue ID
1062
+ * @param extendMs - Additional milliseconds to add to the deadline
1063
+ * @returns true if the heartbeat was successful
1064
+ */
1065
+ heartbeat(messageId: string, queueId: string, extendMs: number): Promise<boolean>;
1066
+ /**
1067
+ * Mark a message as successfully processed.
1068
+ * Removes the message from in-flight tracking.
1069
+ *
1070
+ * @param messageId - The message ID
1071
+ * @param queueId - The queue ID
1072
+ */
1073
+ complete(messageId: string, queueId: string): Promise<void>;
1074
+ /**
1075
+ * Release a message back to its queue.
1076
+ * Used when processing fails or consumer wants to retry later.
1077
+ *
1078
+ * @param messageId - The message ID
1079
+ * @param queueId - The queue ID
1080
+ * @param queueKey - The Redis key for the queue
1081
+ * @param queueItemsKey - The Redis key for the queue items hash
1082
+ * @param score - Optional score for the message (defaults to now)
1083
+ */
1084
+ release<TPayload = unknown>(messageId: string, queueId: string, queueKey: string, queueItemsKey: string, score?: number): Promise<void>;
1085
+ /**
1086
+ * Reclaim timed-out messages from a shard.
1087
+ * Returns messages to their original queues.
1088
+ *
1089
+ * @param shardId - The shard to check
1090
+ * @param getQueueKeys - Function to get queue keys for a queue ID
1091
+ * @returns Number of messages reclaimed
1092
+ */
1093
+ reclaimTimedOut(shardId: number, getQueueKeys: (queueId: string) => {
1094
+ queueKey: string;
1095
+ queueItemsKey: string;
1096
+ }): Promise<number>;
1097
+ /**
1098
+ * Get all in-flight messages for a shard.
1099
+ */
1100
+ getInflightMessages(shardId: number): Promise<Array<{
1101
+ messageId: string;
1102
+ queueId: string;
1103
+ deadline: number;
1104
+ }>>;
1105
+ /**
1106
+ * Get count of in-flight messages for a shard.
1107
+ */
1108
+ getInflightCount(shardId: number): Promise<number>;
1109
+ /**
1110
+ * Get total in-flight count across all shards.
1111
+ */
1112
+ getTotalInflightCount(): Promise<number>;
1113
+ /**
1114
+ * Close the Redis connection.
1115
+ */
1116
+ close(): Promise<void>;
1117
+ }
1118
+ declare module "@internal/redis" {
1119
+ interface RedisCommander<Context> {
1120
+ claimMessage(queueKey: string, queueItemsKey: string, inflightKey: string, inflightDataKey: string, queueId: string, consumerId: string, deadline: string): Promise<[string, string] | null>;
1121
+ releaseMessage(inflightKey: string, inflightDataKey: string, queueKey: string, queueItemsKey: string, member: string, messageId: string, score: string): Promise<number>;
1122
+ heartbeatMessage(inflightKey: string, member: string, newDeadline: string): Promise<number>;
1123
+ }
1124
+ }
1125
+
1126
+ interface WorkerQueueManagerOptions {
1127
+ redis: RedisOptions;
1128
+ keys: FairQueueKeyProducer;
1129
+ logger?: {
1130
+ debug: (message: string, context?: Record<string, unknown>) => void;
1131
+ error: (message: string, context?: Record<string, unknown>) => void;
1132
+ };
1133
+ }
1134
+ /**
1135
+ * WorkerQueueManager handles the intermediate worker queue layer.
1136
+ *
1137
+ * This provides:
1138
+ * - Low-latency message delivery via blocking pop (BLPOP)
1139
+ * - Routing of messages to specific workers/consumers
1140
+ * - Efficient waiting without polling
1141
+ *
1142
+ * Flow:
1143
+ * 1. Master queue consumer claims message from message queue
1144
+ * 2. Message key is pushed to worker queue
1145
+ * 3. Worker queue consumer does blocking pop to receive message
1146
+ */
1147
+ declare class WorkerQueueManager {
1148
+ #private;
1149
+ private options;
1150
+ private redis;
1151
+ private keys;
1152
+ private logger;
1153
+ constructor(options: WorkerQueueManagerOptions);
1154
+ /**
1155
+ * Push a message key to a worker queue.
1156
+ * Called after claiming a message from the message queue.
1157
+ *
1158
+ * @param workerQueueId - The worker queue identifier
1159
+ * @param messageKey - The message key to push (typically "messageId:queueId")
1160
+ */
1161
+ push(workerQueueId: string, messageKey: string): Promise<void>;
1162
+ /**
1163
+ * Push multiple message keys to a worker queue.
1164
+ *
1165
+ * @param workerQueueId - The worker queue identifier
1166
+ * @param messageKeys - The message keys to push
1167
+ */
1168
+ pushBatch(workerQueueId: string, messageKeys: string[]): Promise<void>;
1169
+ /**
1170
+ * Blocking pop from a worker queue.
1171
+ * Waits until a message is available or timeout expires.
1172
+ *
1173
+ * @param workerQueueId - The worker queue identifier
1174
+ * @param timeoutSeconds - Maximum time to wait (0 = wait forever)
1175
+ * @param signal - Optional abort signal to cancel waiting
1176
+ * @returns The message key, or null if timeout
1177
+ */
1178
+ blockingPop(workerQueueId: string, timeoutSeconds: number, signal?: AbortSignal): Promise<string | null>;
1179
+ /**
1180
+ * Non-blocking pop from a worker queue.
1181
+ *
1182
+ * @param workerQueueId - The worker queue identifier
1183
+ * @returns The message key and queue length, or null if empty
1184
+ */
1185
+ pop(workerQueueId: string): Promise<{
1186
+ messageKey: string;
1187
+ queueLength: number;
1188
+ } | null>;
1189
+ /**
1190
+ * Get the current length of a worker queue.
1191
+ */
1192
+ getLength(workerQueueId: string): Promise<number>;
1193
+ /**
1194
+ * Peek at all messages in a worker queue without removing them.
1195
+ * Useful for debugging and tests.
1196
+ */
1197
+ peek(workerQueueId: string): Promise<string[]>;
1198
+ /**
1199
+ * Remove a specific message from the worker queue.
1200
+ * Used when a message needs to be removed without processing.
1201
+ *
1202
+ * @param workerQueueId - The worker queue identifier
1203
+ * @param messageKey - The message key to remove
1204
+ * @returns Number of removed items
1205
+ */
1206
+ remove(workerQueueId: string, messageKey: string): Promise<number>;
1207
+ /**
1208
+ * Clear all messages from a worker queue.
1209
+ */
1210
+ clear(workerQueueId: string): Promise<void>;
1211
+ /**
1212
+ * Close the Redis connection.
1213
+ */
1214
+ close(): Promise<void>;
1215
+ /**
1216
+ * Register custom commands on an external Redis client.
1217
+ * Use this when initializing FairQueue with worker queues.
1218
+ */
1219
+ registerCommands(redis: Redis): void;
1220
+ }
1221
+ declare module "@internal/redis" {
1222
+ interface RedisCommander<Context> {
1223
+ popWithLength(workerQueueKey: string): Promise<[string, string] | null>;
1224
+ }
1225
+ }
1226
+
1227
+ /**
1228
+ * Base class for scheduler implementations.
1229
+ * Provides common utilities and default implementations.
1230
+ */
1231
+ declare abstract class BaseScheduler implements FairScheduler {
1232
+ /**
1233
+ * Select queues for processing from a master queue shard.
1234
+ * Must be implemented by subclasses.
1235
+ */
1236
+ abstract selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
1237
+ /**
1238
+ * Called after processing a message to update scheduler state.
1239
+ * Default implementation does nothing.
1240
+ */
1241
+ recordProcessed(_tenantId: string, _queueId: string): Promise<void>;
1242
+ /**
1243
+ * Initialize the scheduler.
1244
+ * Default implementation does nothing.
1245
+ */
1246
+ initialize(): Promise<void>;
1247
+ /**
1248
+ * Cleanup scheduler resources.
1249
+ * Default implementation does nothing.
1250
+ */
1251
+ close(): Promise<void>;
1252
+ /**
1253
+ * Helper to group queues by tenant.
1254
+ */
1255
+ protected groupQueuesByTenant(queues: Array<{
1256
+ queueId: string;
1257
+ tenantId: string;
1258
+ }>): Map<string, string[]>;
1259
+ /**
1260
+ * Helper to convert grouped queues to TenantQueues array.
1261
+ */
1262
+ protected toTenantQueuesArray(grouped: Map<string, string[]>): TenantQueues[];
1263
+ /**
1264
+ * Helper to filter out tenants at capacity.
1265
+ */
1266
+ protected filterAtCapacity(tenants: TenantQueues[], context: SchedulerContext, groupName?: string): Promise<TenantQueues[]>;
1267
+ }
1268
+ /**
1269
+ * Simple noop scheduler that returns empty results.
1270
+ * Useful for testing or disabling scheduling.
1271
+ */
1272
+ declare class NoopScheduler extends BaseScheduler {
1273
+ selectQueues(_masterQueueShard: string, _consumerId: string, _context: SchedulerContext): Promise<TenantQueues[]>;
1274
+ }
1275
+
1276
+ /**
1277
+ * Deficit Round Robin (DRR) Scheduler.
1278
+ *
1279
+ * DRR ensures fair processing across tenants by:
1280
+ * - Allocating a "quantum" of credits to each tenant per round
1281
+ * - Accumulating unused credits as "deficit"
1282
+ * - Processing from tenants with available deficit
1283
+ * - Capping deficit to prevent starvation
1284
+ *
1285
+ * Key improvements over basic implementations:
1286
+ * - Atomic deficit operations using Lua scripts
1287
+ * - Efficient iteration through tenants
1288
+ * - Automatic deficit cleanup for inactive tenants
1289
+ */
1290
+ declare class DRRScheduler extends BaseScheduler {
1291
+ #private;
1292
+ private config;
1293
+ private redis;
1294
+ private keys;
1295
+ private quantum;
1296
+ private maxDeficit;
1297
+ private logger;
1298
+ constructor(config: DRRSchedulerConfig);
1299
+ /**
1300
+ * Select queues for processing using DRR algorithm.
1301
+ *
1302
+ * Algorithm:
1303
+ * 1. Get all queues from the master shard
1304
+ * 2. Group by tenant
1305
+ * 3. Filter out tenants at concurrency capacity
1306
+ * 4. Add quantum to each tenant's deficit (atomically)
1307
+ * 5. Select queues from tenants with deficit >= 1
1308
+ * 6. Order tenants by deficit (highest first for fairness)
1309
+ */
1310
+ selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
1311
+ /**
1312
+ * Record that a message was processed from a tenant.
1313
+ * Decrements the tenant's deficit.
1314
+ */
1315
+ recordProcessed(tenantId: string, _queueId: string): Promise<void>;
1316
+ close(): Promise<void>;
1317
+ /**
1318
+ * Get the current deficit for a tenant.
1319
+ */
1320
+ getDeficit(tenantId: string): Promise<number>;
1321
+ /**
1322
+ * Reset deficit for a tenant.
1323
+ * Used when a tenant has no more active queues.
1324
+ */
1325
+ resetDeficit(tenantId: string): Promise<void>;
1326
+ /**
1327
+ * Get all tenant deficits.
1328
+ */
1329
+ getAllDeficits(): Promise<Map<string, number>>;
1330
+ }
1331
+ declare module "@internal/redis" {
1332
+ interface RedisCommander<Context> {
1333
+ drrAddQuantum(deficitKey: string, quantum: string, maxDeficit: string, ...tenantIds: string[]): Promise<string[]>;
1334
+ drrDecrementDeficit(deficitKey: string, tenantId: string): Promise<string>;
1335
+ }
1336
+ }
1337
+
1338
+ /**
1339
+ * Weighted Shuffle Scheduler.
1340
+ *
1341
+ * Uses weighted random selection to balance between:
1342
+ * - Concurrency limit (higher limits get more weight)
1343
+ * - Available capacity (tenants with more capacity get more weight)
1344
+ * - Queue age (older queues get priority, with configurable randomization)
1345
+ *
1346
+ * Features:
1347
+ * - Snapshot caching to reduce Redis calls
1348
+ * - Configurable biases for fine-tuning
1349
+ * - Maximum tenant count to limit iteration
1350
+ */
1351
+ declare class WeightedScheduler extends BaseScheduler {
1352
+ #private;
1353
+ private config;
1354
+ private redis;
1355
+ private keys;
1356
+ private rng;
1357
+ private biases;
1358
+ private defaultTenantLimit;
1359
+ private masterQueueLimit;
1360
+ private reuseSnapshotCount;
1361
+ private maximumTenantCount;
1362
+ private snapshotCache;
1363
+ constructor(config: WeightedSchedulerConfig);
1364
+ selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
1365
+ close(): Promise<void>;
1366
+ }
1367
+
1368
+ interface RoundRobinSchedulerConfig {
1369
+ redis: RedisOptions;
1370
+ keys: FairQueueKeyProducer;
1371
+ /** Maximum queues to fetch from master queue per iteration */
1372
+ masterQueueLimit?: number;
1373
+ }
1374
+ /**
1375
+ * Round Robin Scheduler.
1376
+ *
1377
+ * Simple scheduler that processes tenants in strict rotation order.
1378
+ * Maintains a "last served" pointer in Redis to track position.
1379
+ *
1380
+ * Features:
1381
+ * - Predictable ordering (good for debugging)
1382
+ * - Fair rotation through all tenants
1383
+ * - No weighting or bias
1384
+ */
1385
+ declare class RoundRobinScheduler extends BaseScheduler {
1386
+ #private;
1387
+ private config;
1388
+ private redis;
1389
+ private keys;
1390
+ private masterQueueLimit;
1391
+ constructor(config: RoundRobinSchedulerConfig);
1392
+ selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
1393
+ close(): Promise<void>;
1394
+ }
1395
+
1396
+ /**
1397
+ * Semantic attributes for fair queue messaging operations.
1398
+ */
1399
+ declare const FairQueueAttributes: {
1400
+ readonly QUEUE_ID: "fairqueue.queue_id";
1401
+ readonly TENANT_ID: "fairqueue.tenant_id";
1402
+ readonly MESSAGE_ID: "fairqueue.message_id";
1403
+ readonly SHARD_ID: "fairqueue.shard_id";
1404
+ readonly WORKER_QUEUE: "fairqueue.worker_queue";
1405
+ readonly CONSUMER_ID: "fairqueue.consumer_id";
1406
+ readonly ATTEMPT: "fairqueue.attempt";
1407
+ readonly CONCURRENCY_GROUP: "fairqueue.concurrency_group";
1408
+ readonly MESSAGE_COUNT: "fairqueue.message_count";
1409
+ readonly RESULT: "fairqueue.result";
1410
+ };
1411
+ /**
1412
+ * Standard messaging semantic attributes.
1413
+ */
1414
+ declare const MessagingAttributes: {
1415
+ readonly SYSTEM: "messaging.system";
1416
+ readonly OPERATION: "messaging.operation";
1417
+ readonly MESSAGE_ID: "messaging.message_id";
1418
+ readonly DESTINATION_NAME: "messaging.destination.name";
1419
+ };
1420
+ /**
1421
+ * FairQueue metrics collection.
1422
+ */
1423
+ interface FairQueueMetrics {
1424
+ messagesEnqueued: Counter;
1425
+ messagesCompleted: Counter;
1426
+ messagesFailed: Counter;
1427
+ messagesRetried: Counter;
1428
+ messagesToDLQ: Counter;
1429
+ processingTime: Histogram;
1430
+ queueTime: Histogram;
1431
+ queueLength: ObservableGauge;
1432
+ masterQueueLength: ObservableGauge;
1433
+ inflightCount: ObservableGauge;
1434
+ dlqLength: ObservableGauge;
1435
+ }
1436
+ /**
1437
+ * Options for creating FairQueue telemetry.
1438
+ */
1439
+ interface TelemetryOptions {
1440
+ tracer?: Tracer;
1441
+ meter?: Meter;
1442
+ /** Custom name for metrics prefix */
1443
+ name?: string;
1444
+ }
1445
+ /**
1446
+ * Telemetry helper for FairQueue.
1447
+ *
1448
+ * Provides:
1449
+ * - Span creation with proper attributes
1450
+ * - Metric recording
1451
+ * - Context propagation helpers
1452
+ */
1453
+ declare class FairQueueTelemetry {
1454
+ #private;
1455
+ private tracer?;
1456
+ private meter?;
1457
+ private metrics?;
1458
+ private name;
1459
+ constructor(options: TelemetryOptions);
1460
+ /**
1461
+ * Create a traced span for an operation.
1462
+ * Returns the result of the function, or throws any error after recording it.
1463
+ */
1464
+ trace<T>(name: string, fn: (span: Span) => Promise<T>, options?: {
1465
+ kind?: SpanKind;
1466
+ attributes?: Attributes;
1467
+ }): Promise<T>;
1468
+ /**
1469
+ * Synchronous version of trace.
1470
+ */
1471
+ traceSync<T>(name: string, fn: (span: Span) => T, options?: {
1472
+ kind?: SpanKind;
1473
+ attributes?: Attributes;
1474
+ }): T;
1475
+ /**
1476
+ * Record a message enqueued.
1477
+ */
1478
+ recordEnqueue(attributes?: Attributes): void;
1479
+ /**
1480
+ * Record a batch of messages enqueued.
1481
+ */
1482
+ recordEnqueueBatch(count: number, attributes?: Attributes): void;
1483
+ /**
1484
+ * Record a message completed successfully.
1485
+ */
1486
+ recordComplete(attributes?: Attributes): void;
1487
+ /**
1488
+ * Record a message processing failure.
1489
+ */
1490
+ recordFailure(attributes?: Attributes): void;
1491
+ /**
1492
+ * Record a message retry.
1493
+ */
1494
+ recordRetry(attributes?: Attributes): void;
1495
+ /**
1496
+ * Record a message sent to DLQ.
1497
+ */
1498
+ recordDLQ(attributes?: Attributes): void;
1499
+ /**
1500
+ * Record message processing time.
1501
+ *
1502
+ * @param durationMs - Processing duration in milliseconds
1503
+ */
1504
+ recordProcessingTime(durationMs: number, attributes?: Attributes): void;
1505
+ /**
1506
+ * Record time a message spent waiting in queue.
1507
+ *
1508
+ * @param durationMs - Queue wait time in milliseconds
1509
+ */
1510
+ recordQueueTime(durationMs: number, attributes?: Attributes): void;
1511
+ /**
1512
+ * Register observable gauge callbacks.
1513
+ * Call this after FairQueue is initialized to register the gauge callbacks.
1514
+ */
1515
+ registerGaugeCallbacks(callbacks: {
1516
+ getQueueLength?: (queueId: string) => Promise<number>;
1517
+ getMasterQueueLength?: (shardId: number) => Promise<number>;
1518
+ getInflightCount?: (shardId: number) => Promise<number>;
1519
+ getDLQLength?: (tenantId: string) => Promise<number>;
1520
+ shardCount?: number;
1521
+ observedQueues?: string[];
1522
+ observedTenants?: string[];
1523
+ }): void;
1524
+ /**
1525
+ * Create standard attributes for a message operation.
1526
+ */
1527
+ messageAttributes(params: {
1528
+ queueId?: string;
1529
+ tenantId?: string;
1530
+ messageId?: string;
1531
+ attempt?: number;
1532
+ workerQueue?: string;
1533
+ consumerId?: string;
1534
+ }): Attributes;
1535
+ /**
1536
+ * Check if telemetry is enabled.
1537
+ */
1538
+ get isEnabled(): boolean;
1539
+ /**
1540
+ * Check if tracing is enabled.
1541
+ */
1542
+ get hasTracer(): boolean;
1543
+ /**
1544
+ * Check if metrics are enabled.
1545
+ */
1546
+ get hasMetrics(): boolean;
1547
+ }
1548
+ /**
1549
+ * No-op telemetry instance for when telemetry is disabled.
1550
+ */
1551
+ declare const noopTelemetry: FairQueueTelemetry;
1552
+
1553
+ /**
1554
+ * FairQueue is the main orchestrator for fair queue processing.
1555
+ *
1556
+ * It coordinates:
1557
+ * - Master queue with sharding (using jump consistent hash)
1558
+ * - Fair scheduling via pluggable schedulers
1559
+ * - Multi-level concurrency limiting
1560
+ * - Visibility timeouts with heartbeats
1561
+ * - Worker queues with blocking pop
1562
+ * - Retry strategies with dead letter queue
1563
+ * - OpenTelemetry tracing and metrics
1564
+ *
1565
+ * @typeParam TPayloadSchema - Zod schema for message payload validation
1566
+ */
1567
+ declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
1568
+ #private;
1569
+ private options;
1570
+ private redis;
1571
+ private keys;
1572
+ private scheduler;
1573
+ private masterQueue;
1574
+ private concurrencyManager?;
1575
+ private visibilityManager;
1576
+ private workerQueueManager?;
1577
+ private telemetry;
1578
+ private logger;
1579
+ private payloadSchema?;
1580
+ private validateOnEnqueue;
1581
+ private retryStrategy?;
1582
+ private deadLetterQueueEnabled;
1583
+ private shardCount;
1584
+ private consumerCount;
1585
+ private consumerIntervalMs;
1586
+ private visibilityTimeoutMs;
1587
+ private heartbeatIntervalMs;
1588
+ private reclaimIntervalMs;
1589
+ private workerQueueEnabled;
1590
+ private workerQueueBlockingTimeoutSeconds;
1591
+ private workerQueueResolver?;
1592
+ private cooloffEnabled;
1593
+ private cooloffThreshold;
1594
+ private cooloffPeriodMs;
1595
+ private queueCooloffStates;
1596
+ private globalRateLimiter?;
1597
+ private messageHandler?;
1598
+ private isRunning;
1599
+ private abortController;
1600
+ private masterQueueConsumerLoops;
1601
+ private workerQueueConsumerLoops;
1602
+ private reclaimLoop?;
1603
+ private queueDescriptorCache;
1604
+ constructor(options: FairQueueOptions<TPayloadSchema>);
1605
+ /**
1606
+ * Register observable gauge callbacks for telemetry.
1607
+ * Call this after FairQueue is created to enable gauge metrics.
1608
+ *
1609
+ * @param options.observedTenants - List of tenant IDs to observe for DLQ metrics
1610
+ */
1611
+ registerTelemetryGauges(options?: {
1612
+ observedTenants?: string[];
1613
+ }): void;
1614
+ /**
1615
+ * Set the message handler for processing dequeued messages.
1616
+ */
1617
+ onMessage(handler: MessageHandler<z.infer<TPayloadSchema>>): void;
1618
+ /**
1619
+ * Enqueue a single message to a queue.
1620
+ */
1621
+ enqueue(options: EnqueueOptions<z.infer<TPayloadSchema>>): Promise<string>;
1622
+ /**
1623
+ * Enqueue multiple messages to a queue.
1624
+ */
1625
+ enqueueBatch(options: EnqueueBatchOptions<z.infer<TPayloadSchema>>): Promise<string[]>;
1626
+ /**
1627
+ * Get messages from the dead letter queue for a tenant.
1628
+ */
1629
+ getDeadLetterMessages(tenantId: string, limit?: number): Promise<DeadLetterMessage<z.infer<TPayloadSchema>>[]>;
1630
+ /**
1631
+ * Redrive a message from DLQ back to its original queue.
1632
+ */
1633
+ redriveMessage(tenantId: string, messageId: string): Promise<boolean>;
1634
+ /**
1635
+ * Redrive all messages from DLQ back to their original queues.
1636
+ */
1637
+ redriveAll(tenantId: string): Promise<number>;
1638
+ /**
1639
+ * Purge all messages from a tenant's DLQ.
1640
+ */
1641
+ purgeDeadLetterQueue(tenantId: string): Promise<number>;
1642
+ /**
1643
+ * Get the number of messages in a tenant's DLQ.
1644
+ */
1645
+ getDeadLetterQueueLength(tenantId: string): Promise<number>;
1646
+ /**
1647
+ * Start the consumer loops and reclaim loop.
1648
+ */
1649
+ start(): void;
1650
+ /**
1651
+ * Stop the consumer loops gracefully.
1652
+ */
1653
+ stop(): Promise<void>;
1654
+ /**
1655
+ * Close all resources.
1656
+ */
1657
+ close(): Promise<void>;
1658
+ /**
1659
+ * Get the number of messages in a queue.
1660
+ */
1661
+ getQueueLength(queueId: string): Promise<number>;
1662
+ /**
1663
+ * Get total queue count across all shards.
1664
+ */
1665
+ getTotalQueueCount(): Promise<number>;
1666
+ /**
1667
+ * Get total in-flight message count.
1668
+ */
1669
+ getTotalInflightCount(): Promise<number>;
1670
+ /**
1671
+ * Get the shard ID for a queue.
1672
+ */
1673
+ getShardForQueue(queueId: string): number;
1674
+ }
1675
+ declare module "@internal/redis" {
1676
+ interface RedisCommander<Context> {
1677
+ enqueueMessageAtomic(queueKey: string, queueItemsKey: string, masterQueueKey: string, queueId: string, messageId: string, timestamp: string, payload: string): Promise<number>;
1678
+ enqueueBatchAtomic(queueKey: string, queueItemsKey: string, masterQueueKey: string, queueId: string, ...args: string[]): Promise<number>;
1679
+ updateMasterQueueIfEmpty(masterQueueKey: string, queueKey: string, queueId: string): Promise<number>;
1680
+ }
1681
+ }
1682
+
1683
+ export { type AnyMessageCatalog, type AnyQueueItem, BaseScheduler, CallbackFairQueueKeyProducer, type ClaimResult, type ConcurrencyCheckResult, type ConcurrencyGroupConfig, ConcurrencyManager, type ConcurrencyManagerOptions, type ConcurrencyState, type CooloffOptions, CronSchema, CustomRetry, DRRScheduler, type DRRSchedulerConfig, type DeadLetterMessage, DefaultFairQueueKeyProducer, type EnqueueBatchOptions, type EnqueueOptions, ExponentialBackoffRetry, FairQueue, FairQueueAttributes, type FairQueueKeyProducer, type FairQueueMetrics, type FairQueueOptions, FairQueueTelemetry, type FairScheduler, FixedDelayRetry, type GlobalRateLimiter, ImmediateRetry, type InFlightMessage, type JobHandler, type JobHandlerParams, LinearBackoffRetry, MasterQueue, type MasterQueueOptions, type MessageCatalogKey, type MessageCatalogSchema, type MessageCatalogValue, type MessageHandler, type MessageHandlerContext, MessagingAttributes, NoRetry, NoopScheduler, type QueueCooloffState, type QueueDescriptor, type QueueItem, type QueueMessage, type QueueWithScore, type RetryOptions, type RetryStrategy, RoundRobinScheduler, type SchedulerContext, SimpleQueue, type StoredMessage, type TelemetryOptions, type TenantQueues, VisibilityManager, type VisibilityManagerOptions, WeightedScheduler, type WeightedSchedulerBiases, type WeightedSchedulerConfig, Worker, type WorkerCatalog, type WorkerConcurrencyOptions, WorkerQueueManager, type WorkerQueueManagerOptions, type WorkerQueueOptions, createDefaultRetryStrategy, defaultRetryOptions, noopTelemetry };