@trigger.dev/redis-worker 4.3.0 → 4.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +3834 -3
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1699 -5
- package/dist/index.d.ts +1699 -5
- package/dist/index.js +3806 -2
- package/dist/index.js.map +1 -1
- package/package.json +4 -2
package/dist/index.d.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import { Callback, Result, RedisOptions } from '@internal/redis';
|
|
1
|
+
import { Callback, Result, RedisOptions, Redis } from '@internal/redis';
|
|
2
2
|
import { Logger } from '@trigger.dev/core/logger';
|
|
3
3
|
import { z } from 'zod';
|
|
4
|
-
import { Tracer, Meter } from '@internal/tracing';
|
|
5
|
-
import { RetryOptions } from '@trigger.dev/core/v3/schemas';
|
|
4
|
+
import { Tracer, Meter, Counter, Histogram, ObservableGauge, Span, SpanKind, Attributes, Context } from '@internal/tracing';
|
|
5
|
+
import { RetryOptions as RetryOptions$1 } from '@trigger.dev/core/v3/schemas';
|
|
6
6
|
|
|
7
7
|
interface MessageCatalogSchema {
|
|
8
8
|
[key: string]: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion<any, any>;
|
|
@@ -101,7 +101,7 @@ type WorkerCatalog = {
|
|
|
101
101
|
[key: string]: {
|
|
102
102
|
schema: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion<any, any>;
|
|
103
103
|
visibilityTimeoutMs: number;
|
|
104
|
-
retry?: RetryOptions;
|
|
104
|
+
retry?: RetryOptions$1;
|
|
105
105
|
cron?: string;
|
|
106
106
|
jitterInMs?: number;
|
|
107
107
|
/** Defaults to true. If false, errors will not be logged. */
|
|
@@ -229,4 +229,1698 @@ declare class Worker<TCatalog extends WorkerCatalog> {
|
|
|
229
229
|
stop(): Promise<void>;
|
|
230
230
|
}
|
|
231
231
|
|
|
232
|
-
|
|
232
|
+
/**
|
|
233
|
+
* Check if an error is an AbortError.
|
|
234
|
+
*
|
|
235
|
+
* This handles both:
|
|
236
|
+
* - Custom abort errors created with `new Error("AbortError")` (sets .message)
|
|
237
|
+
* - Native Node.js AbortError from timers/promises (sets .name)
|
|
238
|
+
*/
|
|
239
|
+
declare function isAbortError(error: unknown): boolean;
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* RetryStrategy interface for pluggable retry logic.
|
|
243
|
+
*/
|
|
244
|
+
interface RetryStrategy {
|
|
245
|
+
/**
|
|
246
|
+
* Calculate the next retry delay in milliseconds.
|
|
247
|
+
* Return null to indicate the message should be sent to DLQ.
|
|
248
|
+
*
|
|
249
|
+
* @param attempt - Current attempt number (1-indexed)
|
|
250
|
+
* @param error - Optional error from the failed attempt
|
|
251
|
+
* @returns Delay in milliseconds, or null to send to DLQ
|
|
252
|
+
*/
|
|
253
|
+
getNextDelay(attempt: number, error?: Error): number | null;
|
|
254
|
+
/**
|
|
255
|
+
* Maximum number of attempts before moving to DLQ.
|
|
256
|
+
*/
|
|
257
|
+
maxAttempts: number;
|
|
258
|
+
}
|
|
259
|
+
/**
|
|
260
|
+
* Exponential backoff retry strategy.
|
|
261
|
+
*
|
|
262
|
+
* Uses the same algorithm as @trigger.dev/core's calculateNextRetryDelay.
|
|
263
|
+
*/
|
|
264
|
+
declare class ExponentialBackoffRetry implements RetryStrategy {
|
|
265
|
+
readonly maxAttempts: number;
|
|
266
|
+
private options;
|
|
267
|
+
constructor(options?: Partial<RetryOptions$1>);
|
|
268
|
+
getNextDelay(attempt: number, _error?: Error): number | null;
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Fixed delay retry strategy.
|
|
272
|
+
*
|
|
273
|
+
* Always waits the same amount of time between retries.
|
|
274
|
+
*/
|
|
275
|
+
declare class FixedDelayRetry implements RetryStrategy {
|
|
276
|
+
readonly maxAttempts: number;
|
|
277
|
+
private delayMs;
|
|
278
|
+
constructor(options: {
|
|
279
|
+
maxAttempts: number;
|
|
280
|
+
delayMs: number;
|
|
281
|
+
});
|
|
282
|
+
getNextDelay(attempt: number, _error?: Error): number | null;
|
|
283
|
+
}
|
|
284
|
+
/**
|
|
285
|
+
* Linear backoff retry strategy.
|
|
286
|
+
*
|
|
287
|
+
* Delay increases linearly with each attempt.
|
|
288
|
+
*/
|
|
289
|
+
declare class LinearBackoffRetry implements RetryStrategy {
|
|
290
|
+
readonly maxAttempts: number;
|
|
291
|
+
private baseDelayMs;
|
|
292
|
+
private maxDelayMs;
|
|
293
|
+
constructor(options: {
|
|
294
|
+
maxAttempts: number;
|
|
295
|
+
baseDelayMs: number;
|
|
296
|
+
maxDelayMs?: number;
|
|
297
|
+
});
|
|
298
|
+
getNextDelay(attempt: number, _error?: Error): number | null;
|
|
299
|
+
}
|
|
300
|
+
/**
|
|
301
|
+
* No retry strategy.
|
|
302
|
+
*
|
|
303
|
+
* Messages go directly to DLQ on first failure.
|
|
304
|
+
*/
|
|
305
|
+
declare class NoRetry implements RetryStrategy {
|
|
306
|
+
readonly maxAttempts = 1;
|
|
307
|
+
getNextDelay(_attempt: number, _error?: Error): number | null;
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Immediate retry strategy.
|
|
311
|
+
*
|
|
312
|
+
* Retries immediately without any delay.
|
|
313
|
+
*/
|
|
314
|
+
declare class ImmediateRetry implements RetryStrategy {
|
|
315
|
+
readonly maxAttempts: number;
|
|
316
|
+
constructor(maxAttempts: number);
|
|
317
|
+
getNextDelay(attempt: number, _error?: Error): number | null;
|
|
318
|
+
}
|
|
319
|
+
/**
|
|
320
|
+
* Custom retry strategy that uses a user-provided function.
|
|
321
|
+
*/
|
|
322
|
+
declare class CustomRetry implements RetryStrategy {
|
|
323
|
+
readonly maxAttempts: number;
|
|
324
|
+
private calculateDelay;
|
|
325
|
+
constructor(options: {
|
|
326
|
+
maxAttempts: number;
|
|
327
|
+
calculateDelay: (attempt: number, error?: Error) => number | null;
|
|
328
|
+
});
|
|
329
|
+
getNextDelay(attempt: number, error?: Error): number | null;
|
|
330
|
+
}
|
|
331
|
+
/**
|
|
332
|
+
* Default retry options matching @trigger.dev/core defaults.
|
|
333
|
+
*/
|
|
334
|
+
declare const defaultRetryOptions: RetryOptions$1;
|
|
335
|
+
/**
|
|
336
|
+
* Create an exponential backoff retry strategy with default options.
|
|
337
|
+
*/
|
|
338
|
+
declare function createDefaultRetryStrategy(): RetryStrategy;
|
|
339
|
+
|
|
340
|
+
/**
|
|
341
|
+
* Interface for a global rate limiter that limits processing across all consumers.
|
|
342
|
+
* When configured, consumers will check this before processing each message.
|
|
343
|
+
*/
|
|
344
|
+
interface GlobalRateLimiter {
|
|
345
|
+
/**
|
|
346
|
+
* Check if processing is allowed under the rate limit.
|
|
347
|
+
* @returns Object with allowed flag and optional resetAt timestamp (ms since epoch)
|
|
348
|
+
*/
|
|
349
|
+
limit(): Promise<{
|
|
350
|
+
allowed: boolean;
|
|
351
|
+
resetAt?: number;
|
|
352
|
+
}>;
|
|
353
|
+
}
|
|
354
|
+
/**
|
|
355
|
+
* Descriptor for a queue in the fair queue system.
|
|
356
|
+
* Contains all the metadata needed to identify and route a queue.
|
|
357
|
+
*/
|
|
358
|
+
interface QueueDescriptor {
|
|
359
|
+
/** Unique queue identifier */
|
|
360
|
+
id: string;
|
|
361
|
+
/** Tenant this queue belongs to */
|
|
362
|
+
tenantId: string;
|
|
363
|
+
/** Additional metadata for concurrency group extraction */
|
|
364
|
+
metadata: Record<string, unknown>;
|
|
365
|
+
}
|
|
366
|
+
/**
|
|
367
|
+
* A message in the queue with its metadata.
|
|
368
|
+
*/
|
|
369
|
+
interface QueueMessage<TPayload = unknown> {
|
|
370
|
+
/** Unique message identifier */
|
|
371
|
+
id: string;
|
|
372
|
+
/** The queue this message belongs to */
|
|
373
|
+
queueId: string;
|
|
374
|
+
/** Message payload */
|
|
375
|
+
payload: TPayload;
|
|
376
|
+
/** Timestamp when message was enqueued */
|
|
377
|
+
timestamp: number;
|
|
378
|
+
/** Current attempt number (1-indexed, for retries) */
|
|
379
|
+
attempt: number;
|
|
380
|
+
/** Optional metadata */
|
|
381
|
+
metadata?: Record<string, unknown>;
|
|
382
|
+
}
|
|
383
|
+
/**
|
|
384
|
+
* Internal message format stored in Redis.
|
|
385
|
+
* Includes additional fields for tracking and routing.
|
|
386
|
+
*/
|
|
387
|
+
interface StoredMessage<TPayload = unknown> {
|
|
388
|
+
/** Message ID */
|
|
389
|
+
id: string;
|
|
390
|
+
/** Queue ID */
|
|
391
|
+
queueId: string;
|
|
392
|
+
/** Tenant ID */
|
|
393
|
+
tenantId: string;
|
|
394
|
+
/** Message payload */
|
|
395
|
+
payload: TPayload;
|
|
396
|
+
/** Timestamp when enqueued */
|
|
397
|
+
timestamp: number;
|
|
398
|
+
/** Current attempt number */
|
|
399
|
+
attempt: number;
|
|
400
|
+
/** Worker queue to route to */
|
|
401
|
+
workerQueue?: string;
|
|
402
|
+
/** Additional metadata */
|
|
403
|
+
metadata?: Record<string, unknown>;
|
|
404
|
+
}
|
|
405
|
+
/**
|
|
406
|
+
* Queue with its score (oldest message timestamp) from the master queue.
|
|
407
|
+
*/
|
|
408
|
+
interface QueueWithScore {
|
|
409
|
+
/** Queue identifier */
|
|
410
|
+
queueId: string;
|
|
411
|
+
/** Score (typically oldest message timestamp) */
|
|
412
|
+
score: number;
|
|
413
|
+
/** Tenant ID extracted from queue */
|
|
414
|
+
tenantId: string;
|
|
415
|
+
}
|
|
416
|
+
/**
|
|
417
|
+
* Configuration for a concurrency group.
|
|
418
|
+
* Allows defining arbitrary levels of concurrency (tenant, org, project, etc.)
|
|
419
|
+
*/
|
|
420
|
+
interface ConcurrencyGroupConfig {
|
|
421
|
+
/** Group name (e.g., "tenant", "organization", "project") */
|
|
422
|
+
name: string;
|
|
423
|
+
/** Extract the group ID from a queue descriptor */
|
|
424
|
+
extractGroupId: (queue: QueueDescriptor) => string;
|
|
425
|
+
/** Get the concurrency limit for a specific group ID */
|
|
426
|
+
getLimit: (groupId: string) => Promise<number>;
|
|
427
|
+
/** Default limit if not specified */
|
|
428
|
+
defaultLimit: number;
|
|
429
|
+
}
|
|
430
|
+
/**
|
|
431
|
+
* Current concurrency state for a group.
|
|
432
|
+
*/
|
|
433
|
+
interface ConcurrencyState {
|
|
434
|
+
/** Group name */
|
|
435
|
+
groupName: string;
|
|
436
|
+
/** Group ID */
|
|
437
|
+
groupId: string;
|
|
438
|
+
/** Current active count */
|
|
439
|
+
current: number;
|
|
440
|
+
/** Configured limit */
|
|
441
|
+
limit: number;
|
|
442
|
+
}
|
|
443
|
+
/**
|
|
444
|
+
* Result of a concurrency check.
|
|
445
|
+
*/
|
|
446
|
+
interface ConcurrencyCheckResult {
|
|
447
|
+
/** Whether processing is allowed */
|
|
448
|
+
allowed: boolean;
|
|
449
|
+
/** If not allowed, which group is blocking */
|
|
450
|
+
blockedBy?: ConcurrencyState;
|
|
451
|
+
}
|
|
452
|
+
/**
|
|
453
|
+
* Queues grouped by tenant for the scheduler.
|
|
454
|
+
*/
|
|
455
|
+
interface TenantQueues {
|
|
456
|
+
/** Tenant identifier */
|
|
457
|
+
tenantId: string;
|
|
458
|
+
/** Queue IDs belonging to this tenant, in priority order */
|
|
459
|
+
queues: string[];
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Context provided to the scheduler for making decisions.
|
|
463
|
+
*/
|
|
464
|
+
interface SchedulerContext {
|
|
465
|
+
/** Get current concurrency for a group */
|
|
466
|
+
getCurrentConcurrency(groupName: string, groupId: string): Promise<number>;
|
|
467
|
+
/** Get concurrency limit for a group */
|
|
468
|
+
getConcurrencyLimit(groupName: string, groupId: string): Promise<number>;
|
|
469
|
+
/** Check if a group is at capacity */
|
|
470
|
+
isAtCapacity(groupName: string, groupId: string): Promise<boolean>;
|
|
471
|
+
/** Get queue descriptor by ID */
|
|
472
|
+
getQueueDescriptor(queueId: string): QueueDescriptor;
|
|
473
|
+
}
|
|
474
|
+
/**
|
|
475
|
+
* Pluggable scheduler interface for fair queue selection.
|
|
476
|
+
*/
|
|
477
|
+
interface FairScheduler {
|
|
478
|
+
/**
|
|
479
|
+
* Select queues for processing from a master queue shard.
|
|
480
|
+
* Returns queues grouped by tenant, ordered by the fairness algorithm.
|
|
481
|
+
*
|
|
482
|
+
* @param masterQueueShard - The master queue shard key
|
|
483
|
+
* @param consumerId - The consumer making the request
|
|
484
|
+
* @param context - Context for concurrency checks
|
|
485
|
+
* @returns Queues grouped by tenant in priority order
|
|
486
|
+
*/
|
|
487
|
+
selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
|
|
488
|
+
/**
|
|
489
|
+
* Called after processing a message to update scheduler state.
|
|
490
|
+
* Optional - not all schedulers need to track state.
|
|
491
|
+
*/
|
|
492
|
+
recordProcessed?(tenantId: string, queueId: string): Promise<void>;
|
|
493
|
+
/**
|
|
494
|
+
* Called after processing multiple messages to update scheduler state.
|
|
495
|
+
* Batch variant for efficiency - reduces Redis calls when processing multiple messages.
|
|
496
|
+
* Optional - falls back to calling recordProcessed multiple times if not implemented.
|
|
497
|
+
*/
|
|
498
|
+
recordProcessedBatch?(tenantId: string, queueId: string, count: number): Promise<void>;
|
|
499
|
+
/**
|
|
500
|
+
* Initialize the scheduler (called once on startup).
|
|
501
|
+
*/
|
|
502
|
+
initialize?(): Promise<void>;
|
|
503
|
+
/**
|
|
504
|
+
* Cleanup scheduler resources.
|
|
505
|
+
*/
|
|
506
|
+
close?(): Promise<void>;
|
|
507
|
+
}
|
|
508
|
+
/**
|
|
509
|
+
* An in-flight message being processed.
|
|
510
|
+
*/
|
|
511
|
+
interface InFlightMessage<TPayload = unknown> {
|
|
512
|
+
/** Message ID */
|
|
513
|
+
messageId: string;
|
|
514
|
+
/** Queue ID */
|
|
515
|
+
queueId: string;
|
|
516
|
+
/** Message payload */
|
|
517
|
+
payload: TPayload;
|
|
518
|
+
/** When visibility timeout expires */
|
|
519
|
+
deadline: number;
|
|
520
|
+
/** Consumer that claimed this message */
|
|
521
|
+
consumerId: string;
|
|
522
|
+
}
|
|
523
|
+
/**
|
|
524
|
+
* Result of claiming a message.
|
|
525
|
+
*/
|
|
526
|
+
interface ClaimResult<TPayload = unknown> {
|
|
527
|
+
/** Whether the claim was successful */
|
|
528
|
+
claimed: boolean;
|
|
529
|
+
/** The claimed message if successful */
|
|
530
|
+
message?: InFlightMessage<TPayload>;
|
|
531
|
+
}
|
|
532
|
+
/**
|
|
533
|
+
* Interface for generating Redis keys for the fair queue system.
|
|
534
|
+
* Implementations can customize key prefixes and structures.
|
|
535
|
+
*/
|
|
536
|
+
interface FairQueueKeyProducer {
|
|
537
|
+
/** Get the master queue key for a shard */
|
|
538
|
+
masterQueueKey(shardId: number): string;
|
|
539
|
+
/** Get the queue key for storing messages */
|
|
540
|
+
queueKey(queueId: string): string;
|
|
541
|
+
/** Get the queue items hash key */
|
|
542
|
+
queueItemsKey(queueId: string): string;
|
|
543
|
+
/** Get the concurrency set key for a group */
|
|
544
|
+
concurrencyKey(groupName: string, groupId: string): string;
|
|
545
|
+
/** Get the in-flight sorted set key for a shard */
|
|
546
|
+
inflightKey(shardId: number): string;
|
|
547
|
+
/** Get the in-flight message data hash key */
|
|
548
|
+
inflightDataKey(shardId: number): string;
|
|
549
|
+
/** Get the worker queue key for a consumer */
|
|
550
|
+
workerQueueKey(consumerId: string): string;
|
|
551
|
+
/** Get the dead letter queue key for a tenant */
|
|
552
|
+
deadLetterQueueKey(tenantId: string): string;
|
|
553
|
+
/** Get the dead letter queue data hash key for a tenant */
|
|
554
|
+
deadLetterQueueDataKey(tenantId: string): string;
|
|
555
|
+
/** Extract tenant ID from a queue ID */
|
|
556
|
+
extractTenantId(queueId: string): string;
|
|
557
|
+
/** Extract a specific group ID from a queue ID */
|
|
558
|
+
extractGroupId(groupName: string, queueId: string): string;
|
|
559
|
+
}
|
|
560
|
+
/**
|
|
561
|
+
* Worker queue configuration options.
|
|
562
|
+
* Worker queues are always enabled - FairQueue routes messages to worker queues,
|
|
563
|
+
* and external consumers are responsible for consuming from those queues.
|
|
564
|
+
*/
|
|
565
|
+
interface WorkerQueueOptions<TPayload = unknown> {
|
|
566
|
+
/**
|
|
567
|
+
* Function to resolve which worker queue a message should go to.
|
|
568
|
+
* This is called during the claim-and-push phase to determine the target queue.
|
|
569
|
+
*/
|
|
570
|
+
resolveWorkerQueue: (message: StoredMessage<TPayload>) => string;
|
|
571
|
+
}
|
|
572
|
+
/**
|
|
573
|
+
* Retry and dead letter queue configuration.
|
|
574
|
+
*/
|
|
575
|
+
interface RetryOptions {
|
|
576
|
+
/** Retry strategy for failed messages */
|
|
577
|
+
strategy: RetryStrategy;
|
|
578
|
+
/** Whether to enable dead letter queue (default: true) */
|
|
579
|
+
deadLetterQueue?: boolean;
|
|
580
|
+
}
|
|
581
|
+
/**
|
|
582
|
+
* Queue cooloff configuration to avoid repeatedly polling concurrency-limited queues.
|
|
583
|
+
*/
|
|
584
|
+
interface CooloffOptions {
|
|
585
|
+
/** Whether cooloff is enabled (default: true) */
|
|
586
|
+
enabled?: boolean;
|
|
587
|
+
/** Number of consecutive empty dequeues before entering cooloff (default: 10) */
|
|
588
|
+
threshold?: number;
|
|
589
|
+
/** Duration of cooloff period in milliseconds (default: 10000) */
|
|
590
|
+
periodMs?: number;
|
|
591
|
+
/** Maximum number of cooloff state entries before triggering cleanup (default: 1000) */
|
|
592
|
+
maxStatesSize?: number;
|
|
593
|
+
}
|
|
594
|
+
/**
|
|
595
|
+
* Options for creating a FairQueue instance.
|
|
596
|
+
*
|
|
597
|
+
* @typeParam TPayloadSchema - Zod schema for message payload validation
|
|
598
|
+
*/
|
|
599
|
+
interface FairQueueOptions<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
|
|
600
|
+
/** Redis connection options */
|
|
601
|
+
redis: RedisOptions;
|
|
602
|
+
/** Key producer for Redis keys */
|
|
603
|
+
keys: FairQueueKeyProducer;
|
|
604
|
+
/** Scheduler for fair queue selection */
|
|
605
|
+
scheduler: FairScheduler;
|
|
606
|
+
/** Zod schema for message payload validation */
|
|
607
|
+
payloadSchema?: TPayloadSchema;
|
|
608
|
+
/** Whether to validate payloads on enqueue (default: false) */
|
|
609
|
+
validateOnEnqueue?: boolean;
|
|
610
|
+
/** Number of master queue shards (default: 1) */
|
|
611
|
+
shardCount?: number;
|
|
612
|
+
/** Concurrency group configurations */
|
|
613
|
+
concurrencyGroups?: ConcurrencyGroupConfig[];
|
|
614
|
+
/**
|
|
615
|
+
* Worker queue configuration.
|
|
616
|
+
* FairQueue routes messages to worker queues; external consumers handle consumption.
|
|
617
|
+
*/
|
|
618
|
+
workerQueue: WorkerQueueOptions<z.infer<TPayloadSchema>>;
|
|
619
|
+
/** Retry and dead letter queue configuration */
|
|
620
|
+
retry?: RetryOptions;
|
|
621
|
+
/** Visibility timeout in milliseconds (default: 30000) */
|
|
622
|
+
visibilityTimeoutMs?: number;
|
|
623
|
+
/** Heartbeat interval in milliseconds (default: visibilityTimeoutMs / 3) */
|
|
624
|
+
heartbeatIntervalMs?: number;
|
|
625
|
+
/** Interval for reclaiming timed-out messages (default: 5000) */
|
|
626
|
+
reclaimIntervalMs?: number;
|
|
627
|
+
/** Number of consumer loops to run (default: 1) */
|
|
628
|
+
consumerCount?: number;
|
|
629
|
+
/** Interval between consumer iterations in milliseconds (default: 100) */
|
|
630
|
+
consumerIntervalMs?: number;
|
|
631
|
+
/** Whether to start consumers on initialization (default: true) */
|
|
632
|
+
startConsumers?: boolean;
|
|
633
|
+
/** Maximum number of messages to claim in a single batch operation (default: 10) */
|
|
634
|
+
batchClaimSize?: number;
|
|
635
|
+
/** Maximum iterations before starting a new trace span (default: 500) */
|
|
636
|
+
consumerTraceMaxIterations?: number;
|
|
637
|
+
/** Maximum seconds before starting a new trace span (default: 60) */
|
|
638
|
+
consumerTraceTimeoutSeconds?: number;
|
|
639
|
+
/** Queue cooloff configuration */
|
|
640
|
+
cooloff?: CooloffOptions;
|
|
641
|
+
/** Logger instance */
|
|
642
|
+
logger?: Logger;
|
|
643
|
+
/** OpenTelemetry tracer */
|
|
644
|
+
tracer?: Tracer;
|
|
645
|
+
/** OpenTelemetry meter */
|
|
646
|
+
meter?: Meter;
|
|
647
|
+
/** Name for metrics/tracing (default: "fairqueue") */
|
|
648
|
+
name?: string;
|
|
649
|
+
/** Optional global rate limiter to limit processing across all consumers */
|
|
650
|
+
globalRateLimiter?: GlobalRateLimiter;
|
|
651
|
+
}
|
|
652
|
+
/**
|
|
653
|
+
* Context passed to the message handler.
|
|
654
|
+
*/
|
|
655
|
+
interface MessageHandlerContext<TPayload = unknown> {
|
|
656
|
+
/** The message being processed */
|
|
657
|
+
message: QueueMessage<TPayload>;
|
|
658
|
+
/** Queue descriptor */
|
|
659
|
+
queue: QueueDescriptor;
|
|
660
|
+
/** Consumer ID processing this message */
|
|
661
|
+
consumerId: string;
|
|
662
|
+
/** Extend the visibility timeout */
|
|
663
|
+
heartbeat(): Promise<boolean>;
|
|
664
|
+
/** Mark message as successfully processed */
|
|
665
|
+
complete(): Promise<void>;
|
|
666
|
+
/** Release message back to the queue for retry */
|
|
667
|
+
release(): Promise<void>;
|
|
668
|
+
/** Mark message as failed (triggers retry or DLQ) */
|
|
669
|
+
fail(error?: Error): Promise<void>;
|
|
670
|
+
}
|
|
671
|
+
/**
|
|
672
|
+
* Handler function for processing messages.
|
|
673
|
+
*/
|
|
674
|
+
type MessageHandler<TPayload = unknown> = (context: MessageHandlerContext<TPayload>) => Promise<void>;
|
|
675
|
+
/**
|
|
676
|
+
* A message in the dead letter queue.
|
|
677
|
+
*/
|
|
678
|
+
interface DeadLetterMessage<TPayload = unknown> {
|
|
679
|
+
/** Message ID */
|
|
680
|
+
id: string;
|
|
681
|
+
/** Original queue ID */
|
|
682
|
+
queueId: string;
|
|
683
|
+
/** Tenant ID */
|
|
684
|
+
tenantId: string;
|
|
685
|
+
/** Message payload */
|
|
686
|
+
payload: TPayload;
|
|
687
|
+
/** Timestamp when moved to DLQ */
|
|
688
|
+
deadLetteredAt: number;
|
|
689
|
+
/** Number of attempts before DLQ */
|
|
690
|
+
attempts: number;
|
|
691
|
+
/** Last error message if available */
|
|
692
|
+
lastError?: string;
|
|
693
|
+
/** Original message timestamp */
|
|
694
|
+
originalTimestamp: number;
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Cooloff state for a queue.
|
|
698
|
+
*/
|
|
699
|
+
type QueueCooloffState = {
|
|
700
|
+
tag: "normal";
|
|
701
|
+
consecutiveFailures: number;
|
|
702
|
+
} | {
|
|
703
|
+
tag: "cooloff";
|
|
704
|
+
expiresAt: number;
|
|
705
|
+
};
|
|
706
|
+
/**
|
|
707
|
+
* Options for enqueueing a message.
|
|
708
|
+
*/
|
|
709
|
+
interface EnqueueOptions<TPayload = unknown> {
|
|
710
|
+
/** Queue to add the message to */
|
|
711
|
+
queueId: string;
|
|
712
|
+
/** Tenant ID for the queue */
|
|
713
|
+
tenantId: string;
|
|
714
|
+
/** Message payload */
|
|
715
|
+
payload: TPayload;
|
|
716
|
+
/** Optional message ID (auto-generated if not provided) */
|
|
717
|
+
messageId?: string;
|
|
718
|
+
/** Optional timestamp (defaults to now) */
|
|
719
|
+
timestamp?: number;
|
|
720
|
+
/** Optional metadata for concurrency group extraction */
|
|
721
|
+
metadata?: Record<string, string>;
|
|
722
|
+
}
|
|
723
|
+
/**
|
|
724
|
+
* Options for enqueueing multiple messages.
|
|
725
|
+
*/
|
|
726
|
+
interface EnqueueBatchOptions<TPayload = unknown> {
|
|
727
|
+
/** Queue to add messages to */
|
|
728
|
+
queueId: string;
|
|
729
|
+
/** Tenant ID for the queue */
|
|
730
|
+
tenantId: string;
|
|
731
|
+
/** Messages to enqueue */
|
|
732
|
+
messages: Array<{
|
|
733
|
+
payload: TPayload;
|
|
734
|
+
messageId?: string;
|
|
735
|
+
timestamp?: number;
|
|
736
|
+
}>;
|
|
737
|
+
/** Optional metadata for concurrency group extraction */
|
|
738
|
+
metadata?: Record<string, string>;
|
|
739
|
+
}
|
|
740
|
+
/**
|
|
741
|
+
* Configuration for the Deficit Round Robin scheduler.
|
|
742
|
+
*/
|
|
743
|
+
interface DRRSchedulerConfig {
|
|
744
|
+
/** Credits allocated per tenant per round */
|
|
745
|
+
quantum: number;
|
|
746
|
+
/** Maximum accumulated deficit (prevents starvation) */
|
|
747
|
+
maxDeficit: number;
|
|
748
|
+
/** Maximum queues to fetch from master queue (default: 1000) */
|
|
749
|
+
masterQueueLimit?: number;
|
|
750
|
+
/** Redis options for state storage */
|
|
751
|
+
redis: RedisOptions;
|
|
752
|
+
/** Key producer */
|
|
753
|
+
keys: FairQueueKeyProducer;
|
|
754
|
+
/** Optional logger */
|
|
755
|
+
logger?: {
|
|
756
|
+
debug: (message: string, context?: Record<string, unknown>) => void;
|
|
757
|
+
error: (message: string, context?: Record<string, unknown>) => void;
|
|
758
|
+
};
|
|
759
|
+
}
|
|
760
|
+
/**
|
|
761
|
+
* Bias configuration for weighted shuffle scheduler.
|
|
762
|
+
*/
|
|
763
|
+
interface WeightedSchedulerBiases {
|
|
764
|
+
/**
|
|
765
|
+
* How much to bias towards tenants with higher concurrency limits.
|
|
766
|
+
* 0 = no bias, 1 = full bias based on limit differences
|
|
767
|
+
*/
|
|
768
|
+
concurrencyLimitBias: number;
|
|
769
|
+
/**
|
|
770
|
+
* How much to bias towards tenants with more available capacity.
|
|
771
|
+
* 0 = no bias, 1 = full bias based on available capacity
|
|
772
|
+
*/
|
|
773
|
+
availableCapacityBias: number;
|
|
774
|
+
/**
|
|
775
|
+
* Controls randomization of queue ordering within tenants.
|
|
776
|
+
* 0 = strict age-based ordering (oldest first)
|
|
777
|
+
* 1 = completely random ordering
|
|
778
|
+
* Values between 0-1 blend between age-based and random ordering
|
|
779
|
+
*/
|
|
780
|
+
queueAgeRandomization: number;
|
|
781
|
+
}
|
|
782
|
+
/**
|
|
783
|
+
* Configuration for the weighted shuffle scheduler.
|
|
784
|
+
*/
|
|
785
|
+
interface WeightedSchedulerConfig {
|
|
786
|
+
/** Redis options */
|
|
787
|
+
redis: RedisOptions;
|
|
788
|
+
/** Key producer */
|
|
789
|
+
keys: FairQueueKeyProducer;
|
|
790
|
+
/** Default tenant concurrency limit */
|
|
791
|
+
defaultTenantConcurrencyLimit?: number;
|
|
792
|
+
/** Maximum queues to consider from master queue */
|
|
793
|
+
masterQueueLimit?: number;
|
|
794
|
+
/** Bias configuration */
|
|
795
|
+
biases?: WeightedSchedulerBiases;
|
|
796
|
+
/** Number of iterations to reuse a snapshot */
|
|
797
|
+
reuseSnapshotCount?: number;
|
|
798
|
+
/** Maximum number of tenants to consider */
|
|
799
|
+
maximumTenantCount?: number;
|
|
800
|
+
/** Random seed for reproducibility */
|
|
801
|
+
seed?: string;
|
|
802
|
+
/** Optional tracer */
|
|
803
|
+
tracer?: Tracer;
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
/**
|
|
807
|
+
* Default key producer for the fair queue system.
|
|
808
|
+
* Uses a configurable prefix and standard key structure.
|
|
809
|
+
*
|
|
810
|
+
* Key structure:
|
|
811
|
+
* - Master queue: {prefix}:master:{shardId}
|
|
812
|
+
* - Queue: {prefix}:queue:{queueId}
|
|
813
|
+
* - Queue items: {prefix}:queue:{queueId}:items
|
|
814
|
+
* - Concurrency: {prefix}:concurrency:{groupName}:{groupId}
|
|
815
|
+
* - In-flight: {prefix}:inflight:{shardId}
|
|
816
|
+
* - In-flight data: {prefix}:inflight:{shardId}:data
|
|
817
|
+
* - Worker queue: {prefix}:worker:{consumerId}
|
|
818
|
+
*/
|
|
819
|
+
declare class DefaultFairQueueKeyProducer implements FairQueueKeyProducer {
|
|
820
|
+
#private;
|
|
821
|
+
private readonly prefix;
|
|
822
|
+
private readonly separator;
|
|
823
|
+
constructor(options?: {
|
|
824
|
+
prefix?: string;
|
|
825
|
+
separator?: string;
|
|
826
|
+
});
|
|
827
|
+
masterQueueKey(shardId: number): string;
|
|
828
|
+
queueKey(queueId: string): string;
|
|
829
|
+
queueItemsKey(queueId: string): string;
|
|
830
|
+
concurrencyKey(groupName: string, groupId: string): string;
|
|
831
|
+
inflightKey(shardId: number): string;
|
|
832
|
+
inflightDataKey(shardId: number): string;
|
|
833
|
+
workerQueueKey(consumerId: string): string;
|
|
834
|
+
deadLetterQueueKey(tenantId: string): string;
|
|
835
|
+
deadLetterQueueDataKey(tenantId: string): string;
|
|
836
|
+
/**
|
|
837
|
+
* Extract tenant ID from a queue ID.
|
|
838
|
+
* Default implementation assumes queue IDs are formatted as: tenant:{tenantId}:...
|
|
839
|
+
* Override this method for custom queue ID formats.
|
|
840
|
+
*/
|
|
841
|
+
extractTenantId(queueId: string): string;
|
|
842
|
+
/**
|
|
843
|
+
* Extract a group ID from a queue ID.
|
|
844
|
+
* Default implementation looks for pattern: {groupName}:{groupId}:...
|
|
845
|
+
* Override this method for custom queue ID formats.
|
|
846
|
+
*/
|
|
847
|
+
extractGroupId(groupName: string, queueId: string): string;
|
|
848
|
+
}
|
|
849
|
+
/**
|
|
850
|
+
* Key producer with custom extraction logic via callbacks.
|
|
851
|
+
* Useful when queue IDs don't follow a standard pattern.
|
|
852
|
+
*/
|
|
853
|
+
declare class CallbackFairQueueKeyProducer extends DefaultFairQueueKeyProducer {
|
|
854
|
+
private readonly tenantExtractor;
|
|
855
|
+
private readonly groupExtractor;
|
|
856
|
+
constructor(options: {
|
|
857
|
+
prefix?: string;
|
|
858
|
+
separator?: string;
|
|
859
|
+
extractTenantId: (queueId: string) => string;
|
|
860
|
+
extractGroupId: (groupName: string, queueId: string) => string;
|
|
861
|
+
});
|
|
862
|
+
extractTenantId(queueId: string): string;
|
|
863
|
+
extractGroupId(groupName: string, queueId: string): string;
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
interface MasterQueueOptions {
|
|
867
|
+
redis: RedisOptions;
|
|
868
|
+
keys: FairQueueKeyProducer;
|
|
869
|
+
shardCount: number;
|
|
870
|
+
}
|
|
871
|
+
/**
|
|
872
|
+
* Master queue manages the top-level queue of queues.
|
|
873
|
+
*
|
|
874
|
+
* Features:
|
|
875
|
+
* - Sharding for horizontal scaling
|
|
876
|
+
* - Consistent hashing for queue-to-shard assignment
|
|
877
|
+
* - Queues scored by oldest message timestamp
|
|
878
|
+
*/
|
|
879
|
+
declare class MasterQueue {
|
|
880
|
+
#private;
|
|
881
|
+
private options;
|
|
882
|
+
private redis;
|
|
883
|
+
private keys;
|
|
884
|
+
private shardCount;
|
|
885
|
+
constructor(options: MasterQueueOptions);
|
|
886
|
+
/**
|
|
887
|
+
* Get the shard ID for a queue.
|
|
888
|
+
* Uses consistent hashing based on queue ID.
|
|
889
|
+
*/
|
|
890
|
+
getShardForQueue(queueId: string): number;
|
|
891
|
+
/**
|
|
892
|
+
* Add a queue to its master queue shard.
|
|
893
|
+
* Updates the score to the oldest message timestamp.
|
|
894
|
+
*
|
|
895
|
+
* @param queueId - The queue identifier
|
|
896
|
+
* @param oldestMessageTimestamp - Timestamp of the oldest message in the queue
|
|
897
|
+
*/
|
|
898
|
+
addQueue(queueId: string, oldestMessageTimestamp: number): Promise<void>;
|
|
899
|
+
/**
|
|
900
|
+
* Update a queue's score in the master queue.
|
|
901
|
+
* This is typically called after dequeuing to update to the new oldest message.
|
|
902
|
+
*
|
|
903
|
+
* @param queueId - The queue identifier
|
|
904
|
+
* @param newOldestTimestamp - New timestamp of the oldest message
|
|
905
|
+
*/
|
|
906
|
+
updateQueueScore(queueId: string, newOldestTimestamp: number): Promise<void>;
|
|
907
|
+
/**
|
|
908
|
+
* Remove a queue from its master queue shard.
|
|
909
|
+
* Called when a queue becomes empty.
|
|
910
|
+
*
|
|
911
|
+
* @param queueId - The queue identifier
|
|
912
|
+
*/
|
|
913
|
+
removeQueue(queueId: string): Promise<void>;
|
|
914
|
+
/**
|
|
915
|
+
* Get queues from a shard, ordered by oldest message (lowest score first).
|
|
916
|
+
*
|
|
917
|
+
* @param shardId - The shard to query
|
|
918
|
+
* @param limit - Maximum number of queues to return (default: 1000)
|
|
919
|
+
* @param maxScore - Maximum score (timestamp) to include (default: now)
|
|
920
|
+
*/
|
|
921
|
+
getQueuesFromShard(shardId: number, limit?: number, maxScore?: number): Promise<QueueWithScore[]>;
|
|
922
|
+
/**
|
|
923
|
+
* Get the number of queues in a shard.
|
|
924
|
+
*/
|
|
925
|
+
getShardQueueCount(shardId: number): Promise<number>;
|
|
926
|
+
/**
|
|
927
|
+
* Get total queue count across all shards.
|
|
928
|
+
*/
|
|
929
|
+
getTotalQueueCount(): Promise<number>;
|
|
930
|
+
/**
|
|
931
|
+
* Atomically add a queue to master queue only if queue has messages.
|
|
932
|
+
* Uses Lua script for atomicity.
|
|
933
|
+
*
|
|
934
|
+
* @param queueId - The queue identifier
|
|
935
|
+
* @param queueKey - The actual queue sorted set key
|
|
936
|
+
* @returns Whether the queue was added to the master queue
|
|
937
|
+
*/
|
|
938
|
+
addQueueIfNotEmpty(queueId: string, queueKey: string): Promise<boolean>;
|
|
939
|
+
/**
|
|
940
|
+
* Atomically remove a queue from master queue only if queue is empty.
|
|
941
|
+
* Uses Lua script for atomicity.
|
|
942
|
+
*
|
|
943
|
+
* @param queueId - The queue identifier
|
|
944
|
+
* @param queueKey - The actual queue sorted set key
|
|
945
|
+
* @returns Whether the queue was removed from the master queue
|
|
946
|
+
*/
|
|
947
|
+
removeQueueIfEmpty(queueId: string, queueKey: string): Promise<boolean>;
|
|
948
|
+
/**
|
|
949
|
+
* Close the Redis connection.
|
|
950
|
+
*/
|
|
951
|
+
close(): Promise<void>;
|
|
952
|
+
}
|
|
953
|
+
declare module "@internal/redis" {
|
|
954
|
+
interface RedisCommander<Context> {
|
|
955
|
+
addQueueIfNotEmpty(masterKey: string, queueKey: string, queueId: string): Promise<number>;
|
|
956
|
+
removeQueueIfEmpty(masterKey: string, queueKey: string, queueId: string): Promise<number>;
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
|
|
960
|
+
interface ConcurrencyManagerOptions {
|
|
961
|
+
redis: RedisOptions;
|
|
962
|
+
keys: FairQueueKeyProducer;
|
|
963
|
+
groups: ConcurrencyGroupConfig[];
|
|
964
|
+
}
|
|
965
|
+
/**
|
|
966
|
+
* ConcurrencyManager handles multi-level concurrency tracking and limiting.
|
|
967
|
+
*
|
|
968
|
+
* Features:
|
|
969
|
+
* - Multiple concurrent concurrency groups (tenant, org, project, etc.)
|
|
970
|
+
* - Atomic reserve/release operations using Lua scripts
|
|
971
|
+
* - Efficient batch checking of all groups
|
|
972
|
+
*/
|
|
973
|
+
declare class ConcurrencyManager {
|
|
974
|
+
#private;
|
|
975
|
+
private options;
|
|
976
|
+
private redis;
|
|
977
|
+
private keys;
|
|
978
|
+
private groups;
|
|
979
|
+
private groupsByName;
|
|
980
|
+
constructor(options: ConcurrencyManagerOptions);
|
|
981
|
+
/**
|
|
982
|
+
* Check if a message can be processed given all concurrency constraints.
|
|
983
|
+
* Checks all configured groups and returns the first one at capacity.
|
|
984
|
+
*/
|
|
985
|
+
canProcess(queue: QueueDescriptor): Promise<ConcurrencyCheckResult>;
|
|
986
|
+
/**
|
|
987
|
+
* Reserve concurrency slots for a message across all groups.
|
|
988
|
+
* Atomic - either all groups are reserved or none.
|
|
989
|
+
*
|
|
990
|
+
* @returns true if reservation successful, false if any group is at capacity
|
|
991
|
+
*/
|
|
992
|
+
reserve(queue: QueueDescriptor, messageId: string): Promise<boolean>;
|
|
993
|
+
/**
|
|
994
|
+
* Release concurrency slots for a message across all groups.
|
|
995
|
+
*/
|
|
996
|
+
release(queue: QueueDescriptor, messageId: string): Promise<void>;
|
|
997
|
+
/**
|
|
998
|
+
* Get current concurrency for a specific group.
|
|
999
|
+
*/
|
|
1000
|
+
getCurrentConcurrency(groupName: string, groupId: string): Promise<number>;
|
|
1001
|
+
/**
|
|
1002
|
+
* Get available capacity for a queue across all concurrency groups.
|
|
1003
|
+
* Returns the minimum available capacity across all groups.
|
|
1004
|
+
*/
|
|
1005
|
+
getAvailableCapacity(queue: QueueDescriptor): Promise<number>;
|
|
1006
|
+
/**
|
|
1007
|
+
* Get concurrency limit for a specific group.
|
|
1008
|
+
*/
|
|
1009
|
+
getConcurrencyLimit(groupName: string, groupId: string): Promise<number>;
|
|
1010
|
+
/**
|
|
1011
|
+
* Check if a group is at capacity.
|
|
1012
|
+
*/
|
|
1013
|
+
isAtCapacity(groupName: string, groupId: string): Promise<boolean>;
|
|
1014
|
+
/**
|
|
1015
|
+
* Get full state for a group.
|
|
1016
|
+
*/
|
|
1017
|
+
getState(groupName: string, groupId: string): Promise<ConcurrencyState>;
|
|
1018
|
+
/**
|
|
1019
|
+
* Get all active message IDs for a group.
|
|
1020
|
+
*/
|
|
1021
|
+
getActiveMessages(groupName: string, groupId: string): Promise<string[]>;
|
|
1022
|
+
/**
|
|
1023
|
+
* Force-clear concurrency for a group (use with caution).
|
|
1024
|
+
* Useful for cleanup after crashes.
|
|
1025
|
+
*/
|
|
1026
|
+
clearGroup(groupName: string, groupId: string): Promise<void>;
|
|
1027
|
+
/**
|
|
1028
|
+
* Remove a specific message from concurrency tracking.
|
|
1029
|
+
* Useful for cleanup.
|
|
1030
|
+
*/
|
|
1031
|
+
removeMessage(messageId: string, queue: QueueDescriptor): Promise<void>;
|
|
1032
|
+
/**
|
|
1033
|
+
* Get configured group names.
|
|
1034
|
+
*/
|
|
1035
|
+
getGroupNames(): string[];
|
|
1036
|
+
/**
|
|
1037
|
+
* Close the Redis connection.
|
|
1038
|
+
*/
|
|
1039
|
+
close(): Promise<void>;
|
|
1040
|
+
}
|
|
1041
|
+
declare module "@internal/redis" {
|
|
1042
|
+
interface RedisCommander<Context> {
|
|
1043
|
+
reserveConcurrency(numKeys: number, keys: string[], messageId: string, ...limits: string[]): Promise<number>;
|
|
1044
|
+
}
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
interface VisibilityManagerOptions {
|
|
1048
|
+
redis: RedisOptions;
|
|
1049
|
+
keys: FairQueueKeyProducer;
|
|
1050
|
+
shardCount: number;
|
|
1051
|
+
defaultTimeoutMs: number;
|
|
1052
|
+
logger?: {
|
|
1053
|
+
debug: (message: string, context?: Record<string, unknown>) => void;
|
|
1054
|
+
error: (message: string, context?: Record<string, unknown>) => void;
|
|
1055
|
+
};
|
|
1056
|
+
}
|
|
1057
|
+
/**
|
|
1058
|
+
* VisibilityManager handles message visibility timeouts for safe message processing.
|
|
1059
|
+
*
|
|
1060
|
+
* Features:
|
|
1061
|
+
* - Claim messages with visibility timeout
|
|
1062
|
+
* - Heartbeat to extend timeout
|
|
1063
|
+
* - Automatic reclaim of timed-out messages
|
|
1064
|
+
* - Per-shard in-flight tracking
|
|
1065
|
+
*
|
|
1066
|
+
* Data structures:
|
|
1067
|
+
* - In-flight sorted set: score = deadline timestamp, member = "{messageId}:{queueId}"
|
|
1068
|
+
* - In-flight data hash: field = messageId, value = JSON message data
|
|
1069
|
+
*/
|
|
1070
|
+
declare class VisibilityManager {
|
|
1071
|
+
#private;
|
|
1072
|
+
private options;
|
|
1073
|
+
private redis;
|
|
1074
|
+
private keys;
|
|
1075
|
+
private shardCount;
|
|
1076
|
+
private defaultTimeoutMs;
|
|
1077
|
+
private logger;
|
|
1078
|
+
constructor(options: VisibilityManagerOptions);
|
|
1079
|
+
/**
|
|
1080
|
+
* Claim a message for processing.
|
|
1081
|
+
* Moves the message from its queue to the in-flight set with a visibility timeout.
|
|
1082
|
+
*
|
|
1083
|
+
* @param queueId - The queue to claim from
|
|
1084
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
1085
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
1086
|
+
* @param consumerId - ID of the consumer claiming the message
|
|
1087
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
1088
|
+
* @returns Claim result with the message if successful
|
|
1089
|
+
*/
|
|
1090
|
+
claim<TPayload = unknown>(queueId: string, queueKey: string, queueItemsKey: string, consumerId: string, timeoutMs?: number): Promise<ClaimResult<TPayload>>;
|
|
1091
|
+
/**
|
|
1092
|
+
* Claim multiple messages for processing (batch claim).
|
|
1093
|
+
* Moves up to maxCount messages from the queue to the in-flight set.
|
|
1094
|
+
*
|
|
1095
|
+
* @param queueId - The queue to claim from
|
|
1096
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
1097
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
1098
|
+
* @param consumerId - ID of the consumer claiming the messages
|
|
1099
|
+
* @param maxCount - Maximum number of messages to claim
|
|
1100
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
1101
|
+
* @returns Array of claimed messages
|
|
1102
|
+
*/
|
|
1103
|
+
claimBatch<TPayload = unknown>(queueId: string, queueKey: string, queueItemsKey: string, consumerId: string, maxCount: number, timeoutMs?: number): Promise<Array<InFlightMessage<TPayload>>>;
|
|
1104
|
+
/**
|
|
1105
|
+
* Extend the visibility timeout for a message (heartbeat).
|
|
1106
|
+
*
|
|
1107
|
+
* @param messageId - The message ID
|
|
1108
|
+
* @param queueId - The queue ID
|
|
1109
|
+
* @param extendMs - Additional milliseconds to add to the deadline
|
|
1110
|
+
* @returns true if the heartbeat was successful
|
|
1111
|
+
*/
|
|
1112
|
+
heartbeat(messageId: string, queueId: string, extendMs: number): Promise<boolean>;
|
|
1113
|
+
/**
|
|
1114
|
+
* Mark a message as successfully processed.
|
|
1115
|
+
* Removes the message from in-flight tracking.
|
|
1116
|
+
*
|
|
1117
|
+
* @param messageId - The message ID
|
|
1118
|
+
* @param queueId - The queue ID
|
|
1119
|
+
*/
|
|
1120
|
+
complete(messageId: string, queueId: string): Promise<void>;
|
|
1121
|
+
/**
|
|
1122
|
+
* Release a message back to its queue.
|
|
1123
|
+
* Used when processing fails or consumer wants to retry later.
|
|
1124
|
+
*
|
|
1125
|
+
* @param messageId - The message ID
|
|
1126
|
+
* @param queueId - The queue ID
|
|
1127
|
+
* @param queueKey - The Redis key for the queue
|
|
1128
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
1129
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
1130
|
+
* @param score - Optional score for the message (defaults to now)
|
|
1131
|
+
*/
|
|
1132
|
+
release<TPayload = unknown>(messageId: string, queueId: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, score?: number): Promise<void>;
|
|
1133
|
+
/**
|
|
1134
|
+
* Release multiple messages back to their queue in a single operation.
|
|
1135
|
+
* Used when processing fails or consumer wants to retry later.
|
|
1136
|
+
* All messages must belong to the same queue.
|
|
1137
|
+
*
|
|
1138
|
+
* @param messages - Array of messages to release (must all have same queueId)
|
|
1139
|
+
* @param queueId - The queue ID
|
|
1140
|
+
* @param queueKey - The Redis key for the queue
|
|
1141
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
1142
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
1143
|
+
* @param score - Optional score for the messages (defaults to now)
|
|
1144
|
+
*/
|
|
1145
|
+
releaseBatch(messages: Array<{
|
|
1146
|
+
messageId: string;
|
|
1147
|
+
}>, queueId: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, score?: number): Promise<void>;
|
|
1148
|
+
/**
|
|
1149
|
+
* Reclaim timed-out messages from a shard.
|
|
1150
|
+
* Returns messages to their original queues.
|
|
1151
|
+
*
|
|
1152
|
+
* @param shardId - The shard to check
|
|
1153
|
+
* @param getQueueKeys - Function to get queue keys for a queue ID
|
|
1154
|
+
* @returns Number of messages reclaimed
|
|
1155
|
+
*/
|
|
1156
|
+
reclaimTimedOut(shardId: number, getQueueKeys: (queueId: string) => {
|
|
1157
|
+
queueKey: string;
|
|
1158
|
+
queueItemsKey: string;
|
|
1159
|
+
masterQueueKey: string;
|
|
1160
|
+
}): Promise<number>;
|
|
1161
|
+
/**
|
|
1162
|
+
* Get all in-flight messages for a shard.
|
|
1163
|
+
*/
|
|
1164
|
+
getInflightMessages(shardId: number): Promise<Array<{
|
|
1165
|
+
messageId: string;
|
|
1166
|
+
queueId: string;
|
|
1167
|
+
deadline: number;
|
|
1168
|
+
}>>;
|
|
1169
|
+
/**
|
|
1170
|
+
* Get count of in-flight messages for a shard.
|
|
1171
|
+
*/
|
|
1172
|
+
getInflightCount(shardId: number): Promise<number>;
|
|
1173
|
+
/**
|
|
1174
|
+
* Get total in-flight count across all shards.
|
|
1175
|
+
*/
|
|
1176
|
+
getTotalInflightCount(): Promise<number>;
|
|
1177
|
+
/**
|
|
1178
|
+
* Close the Redis connection.
|
|
1179
|
+
*/
|
|
1180
|
+
close(): Promise<void>;
|
|
1181
|
+
}
|
|
1182
|
+
declare module "@internal/redis" {
|
|
1183
|
+
interface RedisCommander<Context> {
|
|
1184
|
+
claimMessage(queueKey: string, queueItemsKey: string, inflightKey: string, inflightDataKey: string, queueId: string, consumerId: string, deadline: string): Promise<[string, string] | null>;
|
|
1185
|
+
claimMessageBatch(queueKey: string, queueItemsKey: string, inflightKey: string, inflightDataKey: string, queueId: string, deadline: string, maxCount: string): Promise<string[]>;
|
|
1186
|
+
releaseMessage(inflightKey: string, inflightDataKey: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, member: string, messageId: string, score: string, queueId: string): Promise<number>;
|
|
1187
|
+
releaseMessageBatch(inflightKey: string, inflightDataKey: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, score: string, queueId: string, ...membersAndMessageIds: string[]): Promise<number>;
|
|
1188
|
+
heartbeatMessage(inflightKey: string, member: string, newDeadline: string): Promise<number>;
|
|
1189
|
+
}
|
|
1190
|
+
}
|
|
1191
|
+
|
|
1192
|
+
interface WorkerQueueManagerOptions {
|
|
1193
|
+
redis: RedisOptions;
|
|
1194
|
+
keys: FairQueueKeyProducer;
|
|
1195
|
+
logger?: {
|
|
1196
|
+
debug: (message: string, context?: Record<string, unknown>) => void;
|
|
1197
|
+
error: (message: string, context?: Record<string, unknown>) => void;
|
|
1198
|
+
};
|
|
1199
|
+
}
|
|
1200
|
+
/**
|
|
1201
|
+
* WorkerQueueManager handles the intermediate worker queue layer.
|
|
1202
|
+
*
|
|
1203
|
+
* This provides:
|
|
1204
|
+
* - Low-latency message delivery via blocking pop (BLPOP)
|
|
1205
|
+
* - Routing of messages to specific workers/consumers
|
|
1206
|
+
* - Efficient waiting without polling
|
|
1207
|
+
*
|
|
1208
|
+
* Flow:
|
|
1209
|
+
* 1. Master queue consumer claims message from message queue
|
|
1210
|
+
* 2. Message key is pushed to worker queue
|
|
1211
|
+
* 3. Worker queue consumer does blocking pop to receive message
|
|
1212
|
+
*/
|
|
1213
|
+
declare class WorkerQueueManager {
|
|
1214
|
+
#private;
|
|
1215
|
+
private options;
|
|
1216
|
+
private redis;
|
|
1217
|
+
private keys;
|
|
1218
|
+
private logger;
|
|
1219
|
+
constructor(options: WorkerQueueManagerOptions);
|
|
1220
|
+
/**
|
|
1221
|
+
* Push a message key to a worker queue.
|
|
1222
|
+
* Called after claiming a message from the message queue.
|
|
1223
|
+
*
|
|
1224
|
+
* @param workerQueueId - The worker queue identifier
|
|
1225
|
+
* @param messageKey - The message key to push (typically "messageId:queueId")
|
|
1226
|
+
*/
|
|
1227
|
+
push(workerQueueId: string, messageKey: string): Promise<void>;
|
|
1228
|
+
/**
|
|
1229
|
+
* Push multiple message keys to a worker queue.
|
|
1230
|
+
*
|
|
1231
|
+
* @param workerQueueId - The worker queue identifier
|
|
1232
|
+
* @param messageKeys - The message keys to push
|
|
1233
|
+
*/
|
|
1234
|
+
pushBatch(workerQueueId: string, messageKeys: string[]): Promise<void>;
|
|
1235
|
+
/**
|
|
1236
|
+
* Blocking pop from a worker queue.
|
|
1237
|
+
* Waits until a message is available or timeout expires.
|
|
1238
|
+
*
|
|
1239
|
+
* @param workerQueueId - The worker queue identifier
|
|
1240
|
+
* @param timeoutSeconds - Maximum time to wait (0 = wait forever)
|
|
1241
|
+
* @param signal - Optional abort signal to cancel waiting
|
|
1242
|
+
* @returns The message key, or null if timeout
|
|
1243
|
+
*/
|
|
1244
|
+
blockingPop(workerQueueId: string, timeoutSeconds: number, signal?: AbortSignal): Promise<string | null>;
|
|
1245
|
+
/**
|
|
1246
|
+
* Non-blocking pop from a worker queue.
|
|
1247
|
+
*
|
|
1248
|
+
* @param workerQueueId - The worker queue identifier
|
|
1249
|
+
* @returns The message key and queue length, or null if empty
|
|
1250
|
+
*/
|
|
1251
|
+
pop(workerQueueId: string): Promise<{
|
|
1252
|
+
messageKey: string;
|
|
1253
|
+
queueLength: number;
|
|
1254
|
+
} | null>;
|
|
1255
|
+
/**
|
|
1256
|
+
* Get the current length of a worker queue.
|
|
1257
|
+
*/
|
|
1258
|
+
getLength(workerQueueId: string): Promise<number>;
|
|
1259
|
+
/**
|
|
1260
|
+
* Peek at all messages in a worker queue without removing them.
|
|
1261
|
+
* Useful for debugging and tests.
|
|
1262
|
+
*/
|
|
1263
|
+
peek(workerQueueId: string): Promise<string[]>;
|
|
1264
|
+
/**
|
|
1265
|
+
* Remove a specific message from the worker queue.
|
|
1266
|
+
* Used when a message needs to be removed without processing.
|
|
1267
|
+
*
|
|
1268
|
+
* @param workerQueueId - The worker queue identifier
|
|
1269
|
+
* @param messageKey - The message key to remove
|
|
1270
|
+
* @returns Number of removed items
|
|
1271
|
+
*/
|
|
1272
|
+
remove(workerQueueId: string, messageKey: string): Promise<number>;
|
|
1273
|
+
/**
|
|
1274
|
+
* Clear all messages from a worker queue.
|
|
1275
|
+
*/
|
|
1276
|
+
clear(workerQueueId: string): Promise<void>;
|
|
1277
|
+
/**
|
|
1278
|
+
* Close the Redis connection.
|
|
1279
|
+
*/
|
|
1280
|
+
close(): Promise<void>;
|
|
1281
|
+
/**
|
|
1282
|
+
* Register custom commands on an external Redis client.
|
|
1283
|
+
* Use this when initializing FairQueue with worker queues.
|
|
1284
|
+
*/
|
|
1285
|
+
registerCommands(redis: Redis): void;
|
|
1286
|
+
}
|
|
1287
|
+
declare module "@internal/redis" {
|
|
1288
|
+
interface RedisCommander<Context> {
|
|
1289
|
+
popWithLength(workerQueueKey: string): Promise<[string, string] | null>;
|
|
1290
|
+
}
|
|
1291
|
+
}
|
|
1292
|
+
|
|
1293
|
+
/**
|
|
1294
|
+
* Base class for scheduler implementations.
|
|
1295
|
+
* Provides common utilities and default implementations.
|
|
1296
|
+
*/
|
|
1297
|
+
declare abstract class BaseScheduler implements FairScheduler {
|
|
1298
|
+
/**
|
|
1299
|
+
* Select queues for processing from a master queue shard.
|
|
1300
|
+
* Must be implemented by subclasses.
|
|
1301
|
+
*/
|
|
1302
|
+
abstract selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
|
|
1303
|
+
/**
|
|
1304
|
+
* Called after processing a message to update scheduler state.
|
|
1305
|
+
* Default implementation does nothing.
|
|
1306
|
+
*/
|
|
1307
|
+
recordProcessed(_tenantId: string, _queueId: string): Promise<void>;
|
|
1308
|
+
/**
|
|
1309
|
+
* Called after processing multiple messages to update scheduler state.
|
|
1310
|
+
* Batch variant for efficiency - reduces Redis calls when processing multiple messages.
|
|
1311
|
+
* Default implementation does nothing.
|
|
1312
|
+
*/
|
|
1313
|
+
recordProcessedBatch(_tenantId: string, _queueId: string, _count: number): Promise<void>;
|
|
1314
|
+
/**
|
|
1315
|
+
* Initialize the scheduler.
|
|
1316
|
+
* Default implementation does nothing.
|
|
1317
|
+
*/
|
|
1318
|
+
initialize(): Promise<void>;
|
|
1319
|
+
/**
|
|
1320
|
+
* Cleanup scheduler resources.
|
|
1321
|
+
* Default implementation does nothing.
|
|
1322
|
+
*/
|
|
1323
|
+
close(): Promise<void>;
|
|
1324
|
+
/**
|
|
1325
|
+
* Helper to group queues by tenant.
|
|
1326
|
+
*/
|
|
1327
|
+
protected groupQueuesByTenant(queues: Array<{
|
|
1328
|
+
queueId: string;
|
|
1329
|
+
tenantId: string;
|
|
1330
|
+
}>): Map<string, string[]>;
|
|
1331
|
+
/**
|
|
1332
|
+
* Helper to convert grouped queues to TenantQueues array.
|
|
1333
|
+
*/
|
|
1334
|
+
protected toTenantQueuesArray(grouped: Map<string, string[]>): TenantQueues[];
|
|
1335
|
+
/**
|
|
1336
|
+
* Helper to filter out tenants at capacity.
|
|
1337
|
+
*/
|
|
1338
|
+
protected filterAtCapacity(tenants: TenantQueues[], context: SchedulerContext, groupName?: string): Promise<TenantQueues[]>;
|
|
1339
|
+
}
|
|
1340
|
+
/**
|
|
1341
|
+
* Simple noop scheduler that returns empty results.
|
|
1342
|
+
* Useful for testing or disabling scheduling.
|
|
1343
|
+
*/
|
|
1344
|
+
declare class NoopScheduler extends BaseScheduler {
|
|
1345
|
+
selectQueues(_masterQueueShard: string, _consumerId: string, _context: SchedulerContext): Promise<TenantQueues[]>;
|
|
1346
|
+
}
|
|
1347
|
+
|
|
1348
|
+
/**
|
|
1349
|
+
* Deficit Round Robin (DRR) Scheduler.
|
|
1350
|
+
*
|
|
1351
|
+
* DRR ensures fair processing across tenants by:
|
|
1352
|
+
* - Allocating a "quantum" of credits to each tenant per round
|
|
1353
|
+
* - Accumulating unused credits as "deficit"
|
|
1354
|
+
* - Processing from tenants with available deficit
|
|
1355
|
+
* - Capping deficit to prevent starvation
|
|
1356
|
+
*
|
|
1357
|
+
* Key improvements over basic implementations:
|
|
1358
|
+
* - Atomic deficit operations using Lua scripts
|
|
1359
|
+
* - Efficient iteration through tenants
|
|
1360
|
+
* - Automatic deficit cleanup for inactive tenants
|
|
1361
|
+
*/
|
|
1362
|
+
declare class DRRScheduler extends BaseScheduler {
|
|
1363
|
+
#private;
|
|
1364
|
+
private config;
|
|
1365
|
+
private redis;
|
|
1366
|
+
private keys;
|
|
1367
|
+
private quantum;
|
|
1368
|
+
private maxDeficit;
|
|
1369
|
+
private masterQueueLimit;
|
|
1370
|
+
private logger;
|
|
1371
|
+
constructor(config: DRRSchedulerConfig);
|
|
1372
|
+
/**
|
|
1373
|
+
* Select queues for processing using DRR algorithm.
|
|
1374
|
+
*
|
|
1375
|
+
* Algorithm:
|
|
1376
|
+
* 1. Get all queues from the master shard
|
|
1377
|
+
* 2. Group by tenant
|
|
1378
|
+
* 3. Filter out tenants at concurrency capacity
|
|
1379
|
+
* 4. Add quantum to each tenant's deficit (atomically)
|
|
1380
|
+
* 5. Select queues from tenants with deficit >= 1
|
|
1381
|
+
* 6. Order tenants by deficit (highest first for fairness)
|
|
1382
|
+
*/
|
|
1383
|
+
selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
|
|
1384
|
+
/**
|
|
1385
|
+
* Record that a message was processed from a tenant.
|
|
1386
|
+
* Decrements the tenant's deficit.
|
|
1387
|
+
*/
|
|
1388
|
+
recordProcessed(tenantId: string, _queueId: string): Promise<void>;
|
|
1389
|
+
/**
|
|
1390
|
+
* Record that multiple messages were processed from a tenant.
|
|
1391
|
+
* Decrements the tenant's deficit by count atomically.
|
|
1392
|
+
*/
|
|
1393
|
+
recordProcessedBatch(tenantId: string, _queueId: string, count: number): Promise<void>;
|
|
1394
|
+
close(): Promise<void>;
|
|
1395
|
+
/**
|
|
1396
|
+
* Get the current deficit for a tenant.
|
|
1397
|
+
*/
|
|
1398
|
+
getDeficit(tenantId: string): Promise<number>;
|
|
1399
|
+
/**
|
|
1400
|
+
* Reset deficit for a tenant.
|
|
1401
|
+
* Used when a tenant has no more active queues.
|
|
1402
|
+
*/
|
|
1403
|
+
resetDeficit(tenantId: string): Promise<void>;
|
|
1404
|
+
/**
|
|
1405
|
+
* Get all tenant deficits.
|
|
1406
|
+
*/
|
|
1407
|
+
getAllDeficits(): Promise<Map<string, number>>;
|
|
1408
|
+
}
|
|
1409
|
+
declare module "@internal/redis" {
|
|
1410
|
+
interface RedisCommander<Context> {
|
|
1411
|
+
drrAddQuantum(deficitKey: string, quantum: string, maxDeficit: string, ...tenantIds: string[]): Promise<string[]>;
|
|
1412
|
+
drrDecrementDeficit(deficitKey: string, tenantId: string): Promise<string>;
|
|
1413
|
+
drrDecrementDeficitBatch(deficitKey: string, tenantId: string, count: string): Promise<string>;
|
|
1414
|
+
}
|
|
1415
|
+
}
|
|
1416
|
+
|
|
1417
|
+
/**
|
|
1418
|
+
* Weighted Shuffle Scheduler.
|
|
1419
|
+
*
|
|
1420
|
+
* Uses weighted random selection to balance between:
|
|
1421
|
+
* - Concurrency limit (higher limits get more weight)
|
|
1422
|
+
* - Available capacity (tenants with more capacity get more weight)
|
|
1423
|
+
* - Queue age (older queues get priority, with configurable randomization)
|
|
1424
|
+
*
|
|
1425
|
+
* Features:
|
|
1426
|
+
* - Snapshot caching to reduce Redis calls
|
|
1427
|
+
* - Configurable biases for fine-tuning
|
|
1428
|
+
* - Maximum tenant count to limit iteration
|
|
1429
|
+
*/
|
|
1430
|
+
declare class WeightedScheduler extends BaseScheduler {
|
|
1431
|
+
#private;
|
|
1432
|
+
private config;
|
|
1433
|
+
private redis;
|
|
1434
|
+
private keys;
|
|
1435
|
+
private rng;
|
|
1436
|
+
private biases;
|
|
1437
|
+
private defaultTenantLimit;
|
|
1438
|
+
private masterQueueLimit;
|
|
1439
|
+
private reuseSnapshotCount;
|
|
1440
|
+
private maximumTenantCount;
|
|
1441
|
+
private snapshotCache;
|
|
1442
|
+
constructor(config: WeightedSchedulerConfig);
|
|
1443
|
+
selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
|
|
1444
|
+
close(): Promise<void>;
|
|
1445
|
+
}
|
|
1446
|
+
|
|
1447
|
+
interface RoundRobinSchedulerConfig {
|
|
1448
|
+
redis: RedisOptions;
|
|
1449
|
+
keys: FairQueueKeyProducer;
|
|
1450
|
+
/** Maximum queues to fetch from master queue per iteration */
|
|
1451
|
+
masterQueueLimit?: number;
|
|
1452
|
+
}
|
|
1453
|
+
/**
|
|
1454
|
+
* Round Robin Scheduler.
|
|
1455
|
+
*
|
|
1456
|
+
* Simple scheduler that processes tenants in strict rotation order.
|
|
1457
|
+
* Maintains a "last served" pointer in Redis to track position.
|
|
1458
|
+
*
|
|
1459
|
+
* Features:
|
|
1460
|
+
* - Predictable ordering (good for debugging)
|
|
1461
|
+
* - Fair rotation through all tenants
|
|
1462
|
+
* - No weighting or bias
|
|
1463
|
+
*/
|
|
1464
|
+
declare class RoundRobinScheduler extends BaseScheduler {
|
|
1465
|
+
#private;
|
|
1466
|
+
private config;
|
|
1467
|
+
private redis;
|
|
1468
|
+
private keys;
|
|
1469
|
+
private masterQueueLimit;
|
|
1470
|
+
constructor(config: RoundRobinSchedulerConfig);
|
|
1471
|
+
selectQueues(masterQueueShard: string, consumerId: string, context: SchedulerContext): Promise<TenantQueues[]>;
|
|
1472
|
+
close(): Promise<void>;
|
|
1473
|
+
}
|
|
1474
|
+
|
|
1475
|
+
/**
|
|
1476
|
+
* Semantic attributes for fair queue messaging operations.
|
|
1477
|
+
*/
|
|
1478
|
+
declare const FairQueueAttributes: {
|
|
1479
|
+
readonly QUEUE_ID: "fairqueue.queue_id";
|
|
1480
|
+
readonly TENANT_ID: "fairqueue.tenant_id";
|
|
1481
|
+
readonly MESSAGE_ID: "fairqueue.message_id";
|
|
1482
|
+
readonly SHARD_ID: "fairqueue.shard_id";
|
|
1483
|
+
readonly WORKER_QUEUE: "fairqueue.worker_queue";
|
|
1484
|
+
readonly CONSUMER_ID: "fairqueue.consumer_id";
|
|
1485
|
+
readonly ATTEMPT: "fairqueue.attempt";
|
|
1486
|
+
readonly CONCURRENCY_GROUP: "fairqueue.concurrency_group";
|
|
1487
|
+
readonly MESSAGE_COUNT: "fairqueue.message_count";
|
|
1488
|
+
readonly RESULT: "fairqueue.result";
|
|
1489
|
+
};
|
|
1490
|
+
/**
|
|
1491
|
+
* Standard messaging semantic attributes.
|
|
1492
|
+
*/
|
|
1493
|
+
declare const MessagingAttributes: {
|
|
1494
|
+
readonly SYSTEM: "messaging.system";
|
|
1495
|
+
readonly OPERATION: "messaging.operation";
|
|
1496
|
+
readonly MESSAGE_ID: "messaging.message_id";
|
|
1497
|
+
readonly DESTINATION_NAME: "messaging.destination.name";
|
|
1498
|
+
};
|
|
1499
|
+
/**
|
|
1500
|
+
* FairQueue metrics collection.
|
|
1501
|
+
*/
|
|
1502
|
+
interface FairQueueMetrics {
|
|
1503
|
+
messagesEnqueued: Counter;
|
|
1504
|
+
messagesCompleted: Counter;
|
|
1505
|
+
messagesFailed: Counter;
|
|
1506
|
+
messagesRetried: Counter;
|
|
1507
|
+
messagesToDLQ: Counter;
|
|
1508
|
+
processingTime: Histogram;
|
|
1509
|
+
queueTime: Histogram;
|
|
1510
|
+
queueLength: ObservableGauge;
|
|
1511
|
+
masterQueueLength: ObservableGauge;
|
|
1512
|
+
inflightCount: ObservableGauge;
|
|
1513
|
+
dlqLength: ObservableGauge;
|
|
1514
|
+
}
|
|
1515
|
+
/**
|
|
1516
|
+
* Options for creating FairQueue telemetry.
|
|
1517
|
+
*/
|
|
1518
|
+
interface TelemetryOptions {
|
|
1519
|
+
tracer?: Tracer;
|
|
1520
|
+
meter?: Meter;
|
|
1521
|
+
/** Custom name for metrics prefix */
|
|
1522
|
+
name?: string;
|
|
1523
|
+
}
|
|
1524
|
+
/**
|
|
1525
|
+
* Telemetry helper for FairQueue.
|
|
1526
|
+
*
|
|
1527
|
+
* Provides:
|
|
1528
|
+
* - Span creation with proper attributes
|
|
1529
|
+
* - Metric recording
|
|
1530
|
+
* - Context propagation helpers
|
|
1531
|
+
*/
|
|
1532
|
+
declare class FairQueueTelemetry {
|
|
1533
|
+
#private;
|
|
1534
|
+
private tracer?;
|
|
1535
|
+
private meter?;
|
|
1536
|
+
private metrics?;
|
|
1537
|
+
private name;
|
|
1538
|
+
constructor(options: TelemetryOptions);
|
|
1539
|
+
/**
|
|
1540
|
+
* Create a traced span for an operation.
|
|
1541
|
+
* Returns the result of the function, or throws any error after recording it.
|
|
1542
|
+
*/
|
|
1543
|
+
trace<T>(name: string, fn: (span: Span) => Promise<T>, options?: {
|
|
1544
|
+
kind?: SpanKind;
|
|
1545
|
+
attributes?: Attributes;
|
|
1546
|
+
}): Promise<T>;
|
|
1547
|
+
/**
|
|
1548
|
+
* Synchronous version of trace.
|
|
1549
|
+
*/
|
|
1550
|
+
traceSync<T>(name: string, fn: (span: Span) => T, options?: {
|
|
1551
|
+
kind?: SpanKind;
|
|
1552
|
+
attributes?: Attributes;
|
|
1553
|
+
}): T;
|
|
1554
|
+
/**
|
|
1555
|
+
* Record a message enqueued.
|
|
1556
|
+
*/
|
|
1557
|
+
recordEnqueue(attributes?: Attributes): void;
|
|
1558
|
+
/**
|
|
1559
|
+
* Record a batch of messages enqueued.
|
|
1560
|
+
*/
|
|
1561
|
+
recordEnqueueBatch(count: number, attributes?: Attributes): void;
|
|
1562
|
+
/**
|
|
1563
|
+
* Record a message completed successfully.
|
|
1564
|
+
*/
|
|
1565
|
+
recordComplete(attributes?: Attributes): void;
|
|
1566
|
+
/**
|
|
1567
|
+
* Record a message processing failure.
|
|
1568
|
+
*/
|
|
1569
|
+
recordFailure(attributes?: Attributes): void;
|
|
1570
|
+
/**
|
|
1571
|
+
* Record a message retry.
|
|
1572
|
+
*/
|
|
1573
|
+
recordRetry(attributes?: Attributes): void;
|
|
1574
|
+
/**
|
|
1575
|
+
* Record a message sent to DLQ.
|
|
1576
|
+
*/
|
|
1577
|
+
recordDLQ(attributes?: Attributes): void;
|
|
1578
|
+
/**
|
|
1579
|
+
* Record message processing time.
|
|
1580
|
+
*
|
|
1581
|
+
* @param durationMs - Processing duration in milliseconds
|
|
1582
|
+
*/
|
|
1583
|
+
recordProcessingTime(durationMs: number, attributes?: Attributes): void;
|
|
1584
|
+
/**
|
|
1585
|
+
* Record time a message spent waiting in queue.
|
|
1586
|
+
*
|
|
1587
|
+
* @param durationMs - Queue wait time in milliseconds
|
|
1588
|
+
*/
|
|
1589
|
+
recordQueueTime(durationMs: number, attributes?: Attributes): void;
|
|
1590
|
+
/**
|
|
1591
|
+
* Register observable gauge callbacks.
|
|
1592
|
+
* Call this after FairQueue is initialized to register the gauge callbacks.
|
|
1593
|
+
*/
|
|
1594
|
+
registerGaugeCallbacks(callbacks: {
|
|
1595
|
+
getQueueLength?: (queueId: string) => Promise<number>;
|
|
1596
|
+
getMasterQueueLength?: (shardId: number) => Promise<number>;
|
|
1597
|
+
getInflightCount?: (shardId: number) => Promise<number>;
|
|
1598
|
+
getDLQLength?: (tenantId: string) => Promise<number>;
|
|
1599
|
+
shardCount?: number;
|
|
1600
|
+
observedQueues?: string[];
|
|
1601
|
+
observedTenants?: string[];
|
|
1602
|
+
}): void;
|
|
1603
|
+
/**
|
|
1604
|
+
* Create standard attributes for a message operation (for spans/traces).
|
|
1605
|
+
* Use this for span attributes where high cardinality is acceptable.
|
|
1606
|
+
*/
|
|
1607
|
+
messageAttributes(params: {
|
|
1608
|
+
queueId?: string;
|
|
1609
|
+
tenantId?: string;
|
|
1610
|
+
messageId?: string;
|
|
1611
|
+
attempt?: number;
|
|
1612
|
+
workerQueue?: string;
|
|
1613
|
+
consumerId?: string;
|
|
1614
|
+
}): Attributes;
|
|
1615
|
+
/**
|
|
1616
|
+
* Check if telemetry is enabled.
|
|
1617
|
+
*/
|
|
1618
|
+
get isEnabled(): boolean;
|
|
1619
|
+
/**
|
|
1620
|
+
* Check if tracing is enabled.
|
|
1621
|
+
*/
|
|
1622
|
+
get hasTracer(): boolean;
|
|
1623
|
+
/**
|
|
1624
|
+
* Check if metrics are enabled.
|
|
1625
|
+
*/
|
|
1626
|
+
get hasMetrics(): boolean;
|
|
1627
|
+
}
|
|
1628
|
+
/**
|
|
1629
|
+
* State for tracking a consumer loop's batched span.
|
|
1630
|
+
*/
|
|
1631
|
+
interface ConsumerLoopState {
|
|
1632
|
+
/** Countdown of iterations before starting a new span */
|
|
1633
|
+
perTraceCountdown: number;
|
|
1634
|
+
/** When the current trace started */
|
|
1635
|
+
traceStartedAt: Date;
|
|
1636
|
+
/** The current batched span */
|
|
1637
|
+
currentSpan?: Span;
|
|
1638
|
+
/** The context for the current batched span */
|
|
1639
|
+
currentSpanContext?: Context;
|
|
1640
|
+
/** Number of iterations in the current span */
|
|
1641
|
+
iterationsCount: number;
|
|
1642
|
+
/** Total iterations across all spans */
|
|
1643
|
+
totalIterationsCount: number;
|
|
1644
|
+
/** Running duration in milliseconds for the current span */
|
|
1645
|
+
runningDurationInMs: number;
|
|
1646
|
+
/** Stats counters for the current span */
|
|
1647
|
+
stats: Record<string, number>;
|
|
1648
|
+
/** Flag to force span end on next iteration */
|
|
1649
|
+
endSpanInNextIteration: boolean;
|
|
1650
|
+
}
|
|
1651
|
+
/**
|
|
1652
|
+
* Configuration for the BatchedSpanManager.
|
|
1653
|
+
*/
|
|
1654
|
+
interface BatchedSpanManagerOptions {
|
|
1655
|
+
/** The tracer to use for creating spans */
|
|
1656
|
+
tracer?: Tracer;
|
|
1657
|
+
/** Name prefix for spans */
|
|
1658
|
+
name: string;
|
|
1659
|
+
/** Maximum iterations before rotating the span */
|
|
1660
|
+
maxIterations: number;
|
|
1661
|
+
/** Maximum seconds before rotating the span */
|
|
1662
|
+
timeoutSeconds: number;
|
|
1663
|
+
/** Optional callback to get dynamic attributes when starting a new batched span */
|
|
1664
|
+
getDynamicAttributes?: () => Attributes;
|
|
1665
|
+
}
|
|
1666
|
+
/**
|
|
1667
|
+
* Manages batched spans for consumer loops.
|
|
1668
|
+
*
|
|
1669
|
+
* This allows multiple iterations to be grouped into a single parent span,
|
|
1670
|
+
* reducing the volume of spans while maintaining observability.
|
|
1671
|
+
*/
|
|
1672
|
+
declare class BatchedSpanManager {
|
|
1673
|
+
private tracer?;
|
|
1674
|
+
private name;
|
|
1675
|
+
private maxIterations;
|
|
1676
|
+
private timeoutSeconds;
|
|
1677
|
+
private loopStates;
|
|
1678
|
+
private getDynamicAttributes?;
|
|
1679
|
+
constructor(options: BatchedSpanManagerOptions);
|
|
1680
|
+
/**
|
|
1681
|
+
* Initialize state for a consumer loop.
|
|
1682
|
+
*/
|
|
1683
|
+
initializeLoop(loopId: string): void;
|
|
1684
|
+
/**
|
|
1685
|
+
* Get the state for a consumer loop.
|
|
1686
|
+
*/
|
|
1687
|
+
getState(loopId: string): ConsumerLoopState | undefined;
|
|
1688
|
+
/**
|
|
1689
|
+
* Increment a stat counter for a loop.
|
|
1690
|
+
*/
|
|
1691
|
+
incrementStat(loopId: string, statName: string, value?: number): void;
|
|
1692
|
+
/**
|
|
1693
|
+
* Mark that the span should end on the next iteration.
|
|
1694
|
+
*/
|
|
1695
|
+
markForRotation(loopId: string): void;
|
|
1696
|
+
/**
|
|
1697
|
+
* Check if the span should be rotated (ended and a new one started).
|
|
1698
|
+
*/
|
|
1699
|
+
shouldRotate(loopId: string): boolean;
|
|
1700
|
+
/**
|
|
1701
|
+
* End the current span for a loop and record stats.
|
|
1702
|
+
*/
|
|
1703
|
+
endCurrentSpan(loopId: string): void;
|
|
1704
|
+
/**
|
|
1705
|
+
* Start a new batched span for a loop.
|
|
1706
|
+
*/
|
|
1707
|
+
startNewSpan(loopId: string, attributes?: Attributes): void;
|
|
1708
|
+
/**
|
|
1709
|
+
* Execute a function within the batched span context.
|
|
1710
|
+
* Automatically handles span rotation and iteration tracking.
|
|
1711
|
+
*/
|
|
1712
|
+
withBatchedSpan<T>(loopId: string, fn: (span: Span) => Promise<T>, options?: {
|
|
1713
|
+
iterationSpanName?: string;
|
|
1714
|
+
attributes?: Attributes;
|
|
1715
|
+
}): Promise<T>;
|
|
1716
|
+
/**
|
|
1717
|
+
* Clean up state for a loop when it's stopped.
|
|
1718
|
+
*/
|
|
1719
|
+
cleanup(loopId: string): void;
|
|
1720
|
+
/**
|
|
1721
|
+
* Clean up all loop states.
|
|
1722
|
+
*/
|
|
1723
|
+
cleanupAll(): void;
|
|
1724
|
+
}
|
|
1725
|
+
/**
|
|
1726
|
+
* No-op telemetry instance for when telemetry is disabled.
|
|
1727
|
+
*/
|
|
1728
|
+
declare const noopTelemetry: FairQueueTelemetry;
|
|
1729
|
+
|
|
1730
|
+
/**
|
|
1731
|
+
* FairQueue is the main orchestrator for fair queue message routing.
|
|
1732
|
+
*
|
|
1733
|
+
* FairQueue handles:
|
|
1734
|
+
* - Master queue with sharding (using jump consistent hash)
|
|
1735
|
+
* - Fair scheduling via pluggable schedulers
|
|
1736
|
+
* - Multi-level concurrency limiting
|
|
1737
|
+
* - Visibility timeouts with heartbeats
|
|
1738
|
+
* - Routing messages to worker queues
|
|
1739
|
+
* - Retry strategies with dead letter queue
|
|
1740
|
+
* - OpenTelemetry tracing and metrics
|
|
1741
|
+
*
|
|
1742
|
+
* External consumers are responsible for:
|
|
1743
|
+
* - Running their own worker queue consumer loops
|
|
1744
|
+
* - Calling complete/release/fail APIs after processing
|
|
1745
|
+
*
|
|
1746
|
+
* @typeParam TPayloadSchema - Zod schema for message payload validation
|
|
1747
|
+
*/
|
|
1748
|
+
declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
|
|
1749
|
+
#private;
|
|
1750
|
+
private options;
|
|
1751
|
+
private redis;
|
|
1752
|
+
private keys;
|
|
1753
|
+
private scheduler;
|
|
1754
|
+
private masterQueue;
|
|
1755
|
+
private concurrencyManager?;
|
|
1756
|
+
private visibilityManager;
|
|
1757
|
+
private workerQueueManager;
|
|
1758
|
+
private telemetry;
|
|
1759
|
+
private logger;
|
|
1760
|
+
private payloadSchema?;
|
|
1761
|
+
private validateOnEnqueue;
|
|
1762
|
+
private retryStrategy?;
|
|
1763
|
+
private deadLetterQueueEnabled;
|
|
1764
|
+
private shardCount;
|
|
1765
|
+
private consumerCount;
|
|
1766
|
+
private consumerIntervalMs;
|
|
1767
|
+
private visibilityTimeoutMs;
|
|
1768
|
+
private heartbeatIntervalMs;
|
|
1769
|
+
private reclaimIntervalMs;
|
|
1770
|
+
private workerQueueResolver;
|
|
1771
|
+
private batchClaimSize;
|
|
1772
|
+
private cooloffEnabled;
|
|
1773
|
+
private cooloffThreshold;
|
|
1774
|
+
private cooloffPeriodMs;
|
|
1775
|
+
private maxCooloffStatesSize;
|
|
1776
|
+
private queueCooloffStates;
|
|
1777
|
+
private globalRateLimiter?;
|
|
1778
|
+
private consumerTraceMaxIterations;
|
|
1779
|
+
private consumerTraceTimeoutSeconds;
|
|
1780
|
+
private batchedSpanManager;
|
|
1781
|
+
private isRunning;
|
|
1782
|
+
private abortController;
|
|
1783
|
+
private masterQueueConsumerLoops;
|
|
1784
|
+
private reclaimLoop?;
|
|
1785
|
+
private queueDescriptorCache;
|
|
1786
|
+
constructor(options: FairQueueOptions<TPayloadSchema>);
|
|
1787
|
+
/**
|
|
1788
|
+
* Register observable gauge callbacks for telemetry.
|
|
1789
|
+
* Call this after FairQueue is created to enable gauge metrics.
|
|
1790
|
+
*
|
|
1791
|
+
* @param options.observedTenants - List of tenant IDs to observe for DLQ metrics
|
|
1792
|
+
*/
|
|
1793
|
+
registerTelemetryGauges(options?: {
|
|
1794
|
+
observedTenants?: string[];
|
|
1795
|
+
}): void;
|
|
1796
|
+
/**
|
|
1797
|
+
* Enqueue a single message to a queue.
|
|
1798
|
+
*/
|
|
1799
|
+
enqueue(options: EnqueueOptions<z.infer<TPayloadSchema>>): Promise<string>;
|
|
1800
|
+
/**
|
|
1801
|
+
* Enqueue multiple messages to a queue.
|
|
1802
|
+
*/
|
|
1803
|
+
enqueueBatch(options: EnqueueBatchOptions<z.infer<TPayloadSchema>>): Promise<string[]>;
|
|
1804
|
+
/**
|
|
1805
|
+
* Get messages from the dead letter queue for a tenant.
|
|
1806
|
+
*/
|
|
1807
|
+
getDeadLetterMessages(tenantId: string, limit?: number): Promise<DeadLetterMessage<z.infer<TPayloadSchema>>[]>;
|
|
1808
|
+
/**
|
|
1809
|
+
* Redrive a message from DLQ back to its original queue.
|
|
1810
|
+
*/
|
|
1811
|
+
redriveMessage(tenantId: string, messageId: string): Promise<boolean>;
|
|
1812
|
+
/**
|
|
1813
|
+
* Redrive all messages from DLQ back to their original queues.
|
|
1814
|
+
*/
|
|
1815
|
+
redriveAll(tenantId: string): Promise<number>;
|
|
1816
|
+
/**
|
|
1817
|
+
* Purge all messages from a tenant's DLQ.
|
|
1818
|
+
*/
|
|
1819
|
+
purgeDeadLetterQueue(tenantId: string): Promise<number>;
|
|
1820
|
+
/**
|
|
1821
|
+
* Get the number of messages in a tenant's DLQ.
|
|
1822
|
+
*/
|
|
1823
|
+
getDeadLetterQueueLength(tenantId: string): Promise<number>;
|
|
1824
|
+
/**
|
|
1825
|
+
* Get the size of the in-memory queue descriptor cache.
|
|
1826
|
+
* This cache stores metadata for queues that have been enqueued.
|
|
1827
|
+
* The cache is cleaned up when queues are fully processed.
|
|
1828
|
+
*/
|
|
1829
|
+
getQueueDescriptorCacheSize(): number;
|
|
1830
|
+
/**
|
|
1831
|
+
* Get the size of the in-memory cooloff states cache.
|
|
1832
|
+
* This cache tracks queues that are in cooloff due to repeated failures.
|
|
1833
|
+
* The cache is cleaned up when queues are fully processed or cooloff expires.
|
|
1834
|
+
*/
|
|
1835
|
+
getQueueCooloffStatesSize(): number;
|
|
1836
|
+
/**
|
|
1837
|
+
* Get all in-memory cache sizes for monitoring.
|
|
1838
|
+
* Useful for adding as span attributes.
|
|
1839
|
+
*/
|
|
1840
|
+
getCacheSizes(): {
|
|
1841
|
+
descriptorCacheSize: number;
|
|
1842
|
+
cooloffStatesSize: number;
|
|
1843
|
+
};
|
|
1844
|
+
/**
|
|
1845
|
+
* Start the master queue consumer loops and reclaim loop.
|
|
1846
|
+
* FairQueue claims messages and pushes them to worker queues.
|
|
1847
|
+
* External consumers are responsible for consuming from worker queues.
|
|
1848
|
+
*/
|
|
1849
|
+
start(): void;
|
|
1850
|
+
/**
|
|
1851
|
+
* Stop the consumer loops gracefully.
|
|
1852
|
+
*/
|
|
1853
|
+
stop(): Promise<void>;
|
|
1854
|
+
/**
|
|
1855
|
+
* Close all resources.
|
|
1856
|
+
*/
|
|
1857
|
+
close(): Promise<void>;
|
|
1858
|
+
/**
|
|
1859
|
+
* Get the number of messages in a queue.
|
|
1860
|
+
*/
|
|
1861
|
+
getQueueLength(queueId: string): Promise<number>;
|
|
1862
|
+
/**
|
|
1863
|
+
* Get total queue count across all shards.
|
|
1864
|
+
*/
|
|
1865
|
+
getTotalQueueCount(): Promise<number>;
|
|
1866
|
+
/**
|
|
1867
|
+
* Get total in-flight message count.
|
|
1868
|
+
*/
|
|
1869
|
+
getTotalInflightCount(): Promise<number>;
|
|
1870
|
+
/**
|
|
1871
|
+
* Get the shard ID for a queue.
|
|
1872
|
+
*/
|
|
1873
|
+
getShardForQueue(queueId: string): number;
|
|
1874
|
+
/**
|
|
1875
|
+
* Get message data from in-flight storage.
|
|
1876
|
+
* External consumers use this to retrieve the stored message after popping from worker queue.
|
|
1877
|
+
*
|
|
1878
|
+
* @param messageId - The ID of the message
|
|
1879
|
+
* @param queueId - The queue ID the message belongs to
|
|
1880
|
+
* @returns The stored message or null if not found
|
|
1881
|
+
*/
|
|
1882
|
+
getMessageData(messageId: string, queueId: string): Promise<StoredMessage<z.infer<TPayloadSchema>> | null>;
|
|
1883
|
+
/**
|
|
1884
|
+
* Extend the visibility timeout for a message.
|
|
1885
|
+
* External consumers should call this periodically during long-running processing.
|
|
1886
|
+
*
|
|
1887
|
+
* @param messageId - The ID of the message
|
|
1888
|
+
* @param queueId - The queue ID the message belongs to
|
|
1889
|
+
* @returns true if heartbeat was successful
|
|
1890
|
+
*/
|
|
1891
|
+
heartbeatMessage(messageId: string, queueId: string): Promise<boolean>;
|
|
1892
|
+
/**
|
|
1893
|
+
* Mark a message as successfully processed.
|
|
1894
|
+
* This removes the message from in-flight and releases concurrency.
|
|
1895
|
+
*
|
|
1896
|
+
* @param messageId - The ID of the message
|
|
1897
|
+
* @param queueId - The queue ID the message belongs to
|
|
1898
|
+
*/
|
|
1899
|
+
completeMessage(messageId: string, queueId: string): Promise<void>;
|
|
1900
|
+
/**
|
|
1901
|
+
* Release a message back to the queue for processing by another consumer.
|
|
1902
|
+
* The message is placed at the back of the queue.
|
|
1903
|
+
*
|
|
1904
|
+
* @param messageId - The ID of the message
|
|
1905
|
+
* @param queueId - The queue ID the message belongs to
|
|
1906
|
+
*/
|
|
1907
|
+
releaseMessage(messageId: string, queueId: string): Promise<void>;
|
|
1908
|
+
/**
|
|
1909
|
+
* Mark a message as failed. This will trigger retry logic if configured,
|
|
1910
|
+
* or move the message to the dead letter queue.
|
|
1911
|
+
*
|
|
1912
|
+
* @param messageId - The ID of the message
|
|
1913
|
+
* @param queueId - The queue ID the message belongs to
|
|
1914
|
+
* @param error - Optional error that caused the failure
|
|
1915
|
+
*/
|
|
1916
|
+
failMessage(messageId: string, queueId: string, error?: Error): Promise<void>;
|
|
1917
|
+
}
|
|
1918
|
+
declare module "@internal/redis" {
|
|
1919
|
+
interface RedisCommander<Context> {
|
|
1920
|
+
enqueueMessageAtomic(queueKey: string, queueItemsKey: string, masterQueueKey: string, queueId: string, messageId: string, timestamp: string, payload: string): Promise<number>;
|
|
1921
|
+
enqueueBatchAtomic(queueKey: string, queueItemsKey: string, masterQueueKey: string, queueId: string, ...args: string[]): Promise<number>;
|
|
1922
|
+
updateMasterQueueIfEmpty(masterQueueKey: string, queueKey: string, queueId: string): Promise<number>;
|
|
1923
|
+
}
|
|
1924
|
+
}
|
|
1925
|
+
|
|
1926
|
+
export { type AnyMessageCatalog, type AnyQueueItem, BaseScheduler, BatchedSpanManager, type BatchedSpanManagerOptions, CallbackFairQueueKeyProducer, type ClaimResult, type ConcurrencyCheckResult, type ConcurrencyGroupConfig, ConcurrencyManager, type ConcurrencyManagerOptions, type ConcurrencyState, type ConsumerLoopState, type CooloffOptions, CronSchema, CustomRetry, DRRScheduler, type DRRSchedulerConfig, type DeadLetterMessage, DefaultFairQueueKeyProducer, type EnqueueBatchOptions, type EnqueueOptions, ExponentialBackoffRetry, FairQueue, FairQueueAttributes, type FairQueueKeyProducer, type FairQueueMetrics, type FairQueueOptions, FairQueueTelemetry, type FairScheduler, FixedDelayRetry, type GlobalRateLimiter, ImmediateRetry, type InFlightMessage, type JobHandler, type JobHandlerParams, LinearBackoffRetry, MasterQueue, type MasterQueueOptions, type MessageCatalogKey, type MessageCatalogSchema, type MessageCatalogValue, type MessageHandler, type MessageHandlerContext, MessagingAttributes, NoRetry, NoopScheduler, type QueueCooloffState, type QueueDescriptor, type QueueItem, type QueueMessage, type QueueWithScore, type RetryOptions, type RetryStrategy, RoundRobinScheduler, type SchedulerContext, SimpleQueue, type StoredMessage, type TelemetryOptions, type TenantQueues, VisibilityManager, type VisibilityManagerOptions, WeightedScheduler, type WeightedSchedulerBiases, type WeightedSchedulerConfig, Worker, type WorkerCatalog, type WorkerConcurrencyOptions, WorkerQueueManager, type WorkerQueueManagerOptions, type WorkerQueueOptions, createDefaultRetryStrategy, defaultRetryOptions, isAbortError, noopTelemetry };
|