@trigger.dev/redis-worker 4.3.1 → 4.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { Callback, Result, RedisOptions, Redis } from '@internal/redis';
2
2
  import { Logger } from '@trigger.dev/core/logger';
3
3
  import { z } from 'zod';
4
- import { Tracer, Meter, Counter, Histogram, ObservableGauge, Span, SpanKind, Attributes } from '@internal/tracing';
4
+ import { Tracer, Meter, Counter, Histogram, ObservableGauge, Span, SpanKind, Attributes, Context } from '@internal/tracing';
5
5
  import { RetryOptions as RetryOptions$1 } from '@trigger.dev/core/v3/schemas';
6
6
 
7
7
  interface MessageCatalogSchema {
@@ -229,6 +229,15 @@ declare class Worker<TCatalog extends WorkerCatalog> {
229
229
  stop(): Promise<void>;
230
230
  }
231
231
 
232
+ /**
233
+ * Check if an error is an AbortError.
234
+ *
235
+ * This handles both:
236
+ * - Custom abort errors created with `new Error("AbortError")` (sets .message)
237
+ * - Native Node.js AbortError from timers/promises (sets .name)
238
+ */
239
+ declare function isAbortError(error: unknown): boolean;
240
+
232
241
  /**
233
242
  * RetryStrategy interface for pluggable retry logic.
234
243
  */
@@ -481,6 +490,12 @@ interface FairScheduler {
481
490
  * Optional - not all schedulers need to track state.
482
491
  */
483
492
  recordProcessed?(tenantId: string, queueId: string): Promise<void>;
493
+ /**
494
+ * Called after processing multiple messages to update scheduler state.
495
+ * Batch variant for efficiency - reduces Redis calls when processing multiple messages.
496
+ * Optional - falls back to calling recordProcessed multiple times if not implemented.
497
+ */
498
+ recordProcessedBatch?(tenantId: string, queueId: string, count: number): Promise<void>;
484
499
  /**
485
500
  * Initialize the scheduler (called once on startup).
486
501
  */
@@ -544,14 +559,15 @@ interface FairQueueKeyProducer {
544
559
  }
545
560
  /**
546
561
  * Worker queue configuration options.
562
+ * Worker queues are always enabled - FairQueue routes messages to worker queues,
563
+ * and external consumers are responsible for consuming from those queues.
547
564
  */
548
565
  interface WorkerQueueOptions<TPayload = unknown> {
549
- /** Whether to enable worker queues (default: false for backwards compatibility) */
550
- enabled: boolean;
551
- /** Blocking pop timeout in seconds (default: 10) */
552
- blockingTimeoutSeconds?: number;
553
- /** Function to resolve which worker queue a message should go to */
554
- resolveWorkerQueue?: (message: StoredMessage<TPayload>) => string;
566
+ /**
567
+ * Function to resolve which worker queue a message should go to.
568
+ * This is called during the claim-and-push phase to determine the target queue.
569
+ */
570
+ resolveWorkerQueue: (message: StoredMessage<TPayload>) => string;
555
571
  }
556
572
  /**
557
573
  * Retry and dead letter queue configuration.
@@ -572,6 +588,8 @@ interface CooloffOptions {
572
588
  threshold?: number;
573
589
  /** Duration of cooloff period in milliseconds (default: 10000) */
574
590
  periodMs?: number;
591
+ /** Maximum number of cooloff state entries before triggering cleanup (default: 1000) */
592
+ maxStatesSize?: number;
575
593
  }
576
594
  /**
577
595
  * Options for creating a FairQueue instance.
@@ -593,8 +611,11 @@ interface FairQueueOptions<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
593
611
  shardCount?: number;
594
612
  /** Concurrency group configurations */
595
613
  concurrencyGroups?: ConcurrencyGroupConfig[];
596
- /** Worker queue configuration */
597
- workerQueue?: WorkerQueueOptions<z.infer<TPayloadSchema>>;
614
+ /**
615
+ * Worker queue configuration.
616
+ * FairQueue routes messages to worker queues; external consumers handle consumption.
617
+ */
618
+ workerQueue: WorkerQueueOptions<z.infer<TPayloadSchema>>;
598
619
  /** Retry and dead letter queue configuration */
599
620
  retry?: RetryOptions;
600
621
  /** Visibility timeout in milliseconds (default: 30000) */
@@ -609,6 +630,12 @@ interface FairQueueOptions<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
609
630
  consumerIntervalMs?: number;
610
631
  /** Whether to start consumers on initialization (default: true) */
611
632
  startConsumers?: boolean;
633
+ /** Maximum number of messages to claim in a single batch operation (default: 10) */
634
+ batchClaimSize?: number;
635
+ /** Maximum iterations before starting a new trace span (default: 500) */
636
+ consumerTraceMaxIterations?: number;
637
+ /** Maximum seconds before starting a new trace span (default: 60) */
638
+ consumerTraceTimeoutSeconds?: number;
612
639
  /** Queue cooloff configuration */
613
640
  cooloff?: CooloffOptions;
614
641
  /** Logger instance */
@@ -718,6 +745,8 @@ interface DRRSchedulerConfig {
718
745
  quantum: number;
719
746
  /** Maximum accumulated deficit (prevents starvation) */
720
747
  maxDeficit: number;
748
+ /** Maximum queues to fetch from master queue (default: 1000) */
749
+ masterQueueLimit?: number;
721
750
  /** Redis options for state storage */
722
751
  redis: RedisOptions;
723
752
  /** Key producer */
@@ -969,6 +998,11 @@ declare class ConcurrencyManager {
969
998
  * Get current concurrency for a specific group.
970
999
  */
971
1000
  getCurrentConcurrency(groupName: string, groupId: string): Promise<number>;
1001
+ /**
1002
+ * Get available capacity for a queue across all concurrency groups.
1003
+ * Returns the minimum available capacity across all groups.
1004
+ */
1005
+ getAvailableCapacity(queue: QueueDescriptor): Promise<number>;
972
1006
  /**
973
1007
  * Get concurrency limit for a specific group.
974
1008
  */
@@ -1054,6 +1088,19 @@ declare class VisibilityManager {
1054
1088
  * @returns Claim result with the message if successful
1055
1089
  */
1056
1090
  claim<TPayload = unknown>(queueId: string, queueKey: string, queueItemsKey: string, consumerId: string, timeoutMs?: number): Promise<ClaimResult<TPayload>>;
1091
+ /**
1092
+ * Claim multiple messages for processing (batch claim).
1093
+ * Moves up to maxCount messages from the queue to the in-flight set.
1094
+ *
1095
+ * @param queueId - The queue to claim from
1096
+ * @param queueKey - The Redis key for the queue sorted set
1097
+ * @param queueItemsKey - The Redis key for the queue items hash
1098
+ * @param consumerId - ID of the consumer claiming the messages
1099
+ * @param maxCount - Maximum number of messages to claim
1100
+ * @param timeoutMs - Visibility timeout in milliseconds
1101
+ * @returns Array of claimed messages
1102
+ */
1103
+ claimBatch<TPayload = unknown>(queueId: string, queueKey: string, queueItemsKey: string, consumerId: string, maxCount: number, timeoutMs?: number): Promise<Array<InFlightMessage<TPayload>>>;
1057
1104
  /**
1058
1105
  * Extend the visibility timeout for a message (heartbeat).
1059
1106
  *
@@ -1079,9 +1126,25 @@ declare class VisibilityManager {
1079
1126
  * @param queueId - The queue ID
1080
1127
  * @param queueKey - The Redis key for the queue
1081
1128
  * @param queueItemsKey - The Redis key for the queue items hash
1129
+ * @param masterQueueKey - The Redis key for the master queue
1082
1130
  * @param score - Optional score for the message (defaults to now)
1083
1131
  */
1084
- release<TPayload = unknown>(messageId: string, queueId: string, queueKey: string, queueItemsKey: string, score?: number): Promise<void>;
1132
+ release<TPayload = unknown>(messageId: string, queueId: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, score?: number): Promise<void>;
1133
+ /**
1134
+ * Release multiple messages back to their queue in a single operation.
1135
+ * Used when processing fails or consumer wants to retry later.
1136
+ * All messages must belong to the same queue.
1137
+ *
1138
+ * @param messages - Array of messages to release (must all have same queueId)
1139
+ * @param queueId - The queue ID
1140
+ * @param queueKey - The Redis key for the queue
1141
+ * @param queueItemsKey - The Redis key for the queue items hash
1142
+ * @param masterQueueKey - The Redis key for the master queue
1143
+ * @param score - Optional score for the messages (defaults to now)
1144
+ */
1145
+ releaseBatch(messages: Array<{
1146
+ messageId: string;
1147
+ }>, queueId: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, score?: number): Promise<void>;
1085
1148
  /**
1086
1149
  * Reclaim timed-out messages from a shard.
1087
1150
  * Returns messages to their original queues.
@@ -1093,6 +1156,7 @@ declare class VisibilityManager {
1093
1156
  reclaimTimedOut(shardId: number, getQueueKeys: (queueId: string) => {
1094
1157
  queueKey: string;
1095
1158
  queueItemsKey: string;
1159
+ masterQueueKey: string;
1096
1160
  }): Promise<number>;
1097
1161
  /**
1098
1162
  * Get all in-flight messages for a shard.
@@ -1118,7 +1182,9 @@ declare class VisibilityManager {
1118
1182
  declare module "@internal/redis" {
1119
1183
  interface RedisCommander<Context> {
1120
1184
  claimMessage(queueKey: string, queueItemsKey: string, inflightKey: string, inflightDataKey: string, queueId: string, consumerId: string, deadline: string): Promise<[string, string] | null>;
1121
- releaseMessage(inflightKey: string, inflightDataKey: string, queueKey: string, queueItemsKey: string, member: string, messageId: string, score: string): Promise<number>;
1185
+ claimMessageBatch(queueKey: string, queueItemsKey: string, inflightKey: string, inflightDataKey: string, queueId: string, deadline: string, maxCount: string): Promise<string[]>;
1186
+ releaseMessage(inflightKey: string, inflightDataKey: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, member: string, messageId: string, score: string, queueId: string): Promise<number>;
1187
+ releaseMessageBatch(inflightKey: string, inflightDataKey: string, queueKey: string, queueItemsKey: string, masterQueueKey: string, score: string, queueId: string, ...membersAndMessageIds: string[]): Promise<number>;
1122
1188
  heartbeatMessage(inflightKey: string, member: string, newDeadline: string): Promise<number>;
1123
1189
  }
1124
1190
  }
@@ -1239,6 +1305,12 @@ declare abstract class BaseScheduler implements FairScheduler {
1239
1305
  * Default implementation does nothing.
1240
1306
  */
1241
1307
  recordProcessed(_tenantId: string, _queueId: string): Promise<void>;
1308
+ /**
1309
+ * Called after processing multiple messages to update scheduler state.
1310
+ * Batch variant for efficiency - reduces Redis calls when processing multiple messages.
1311
+ * Default implementation does nothing.
1312
+ */
1313
+ recordProcessedBatch(_tenantId: string, _queueId: string, _count: number): Promise<void>;
1242
1314
  /**
1243
1315
  * Initialize the scheduler.
1244
1316
  * Default implementation does nothing.
@@ -1294,6 +1366,7 @@ declare class DRRScheduler extends BaseScheduler {
1294
1366
  private keys;
1295
1367
  private quantum;
1296
1368
  private maxDeficit;
1369
+ private masterQueueLimit;
1297
1370
  private logger;
1298
1371
  constructor(config: DRRSchedulerConfig);
1299
1372
  /**
@@ -1313,6 +1386,11 @@ declare class DRRScheduler extends BaseScheduler {
1313
1386
  * Decrements the tenant's deficit.
1314
1387
  */
1315
1388
  recordProcessed(tenantId: string, _queueId: string): Promise<void>;
1389
+ /**
1390
+ * Record that multiple messages were processed from a tenant.
1391
+ * Decrements the tenant's deficit by count atomically.
1392
+ */
1393
+ recordProcessedBatch(tenantId: string, _queueId: string, count: number): Promise<void>;
1316
1394
  close(): Promise<void>;
1317
1395
  /**
1318
1396
  * Get the current deficit for a tenant.
@@ -1332,6 +1410,7 @@ declare module "@internal/redis" {
1332
1410
  interface RedisCommander<Context> {
1333
1411
  drrAddQuantum(deficitKey: string, quantum: string, maxDeficit: string, ...tenantIds: string[]): Promise<string[]>;
1334
1412
  drrDecrementDeficit(deficitKey: string, tenantId: string): Promise<string>;
1413
+ drrDecrementDeficitBatch(deficitKey: string, tenantId: string, count: string): Promise<string>;
1335
1414
  }
1336
1415
  }
1337
1416
 
@@ -1522,7 +1601,8 @@ declare class FairQueueTelemetry {
1522
1601
  observedTenants?: string[];
1523
1602
  }): void;
1524
1603
  /**
1525
- * Create standard attributes for a message operation.
1604
+ * Create standard attributes for a message operation (for spans/traces).
1605
+ * Use this for span attributes where high cardinality is acceptable.
1526
1606
  */
1527
1607
  messageAttributes(params: {
1528
1608
  queueId?: string;
@@ -1545,23 +1625,124 @@ declare class FairQueueTelemetry {
1545
1625
  */
1546
1626
  get hasMetrics(): boolean;
1547
1627
  }
1628
+ /**
1629
+ * State for tracking a consumer loop's batched span.
1630
+ */
1631
+ interface ConsumerLoopState {
1632
+ /** Countdown of iterations before starting a new span */
1633
+ perTraceCountdown: number;
1634
+ /** When the current trace started */
1635
+ traceStartedAt: Date;
1636
+ /** The current batched span */
1637
+ currentSpan?: Span;
1638
+ /** The context for the current batched span */
1639
+ currentSpanContext?: Context;
1640
+ /** Number of iterations in the current span */
1641
+ iterationsCount: number;
1642
+ /** Total iterations across all spans */
1643
+ totalIterationsCount: number;
1644
+ /** Running duration in milliseconds for the current span */
1645
+ runningDurationInMs: number;
1646
+ /** Stats counters for the current span */
1647
+ stats: Record<string, number>;
1648
+ /** Flag to force span end on next iteration */
1649
+ endSpanInNextIteration: boolean;
1650
+ }
1651
+ /**
1652
+ * Configuration for the BatchedSpanManager.
1653
+ */
1654
+ interface BatchedSpanManagerOptions {
1655
+ /** The tracer to use for creating spans */
1656
+ tracer?: Tracer;
1657
+ /** Name prefix for spans */
1658
+ name: string;
1659
+ /** Maximum iterations before rotating the span */
1660
+ maxIterations: number;
1661
+ /** Maximum seconds before rotating the span */
1662
+ timeoutSeconds: number;
1663
+ /** Optional callback to get dynamic attributes when starting a new batched span */
1664
+ getDynamicAttributes?: () => Attributes;
1665
+ }
1666
+ /**
1667
+ * Manages batched spans for consumer loops.
1668
+ *
1669
+ * This allows multiple iterations to be grouped into a single parent span,
1670
+ * reducing the volume of spans while maintaining observability.
1671
+ */
1672
+ declare class BatchedSpanManager {
1673
+ private tracer?;
1674
+ private name;
1675
+ private maxIterations;
1676
+ private timeoutSeconds;
1677
+ private loopStates;
1678
+ private getDynamicAttributes?;
1679
+ constructor(options: BatchedSpanManagerOptions);
1680
+ /**
1681
+ * Initialize state for a consumer loop.
1682
+ */
1683
+ initializeLoop(loopId: string): void;
1684
+ /**
1685
+ * Get the state for a consumer loop.
1686
+ */
1687
+ getState(loopId: string): ConsumerLoopState | undefined;
1688
+ /**
1689
+ * Increment a stat counter for a loop.
1690
+ */
1691
+ incrementStat(loopId: string, statName: string, value?: number): void;
1692
+ /**
1693
+ * Mark that the span should end on the next iteration.
1694
+ */
1695
+ markForRotation(loopId: string): void;
1696
+ /**
1697
+ * Check if the span should be rotated (ended and a new one started).
1698
+ */
1699
+ shouldRotate(loopId: string): boolean;
1700
+ /**
1701
+ * End the current span for a loop and record stats.
1702
+ */
1703
+ endCurrentSpan(loopId: string): void;
1704
+ /**
1705
+ * Start a new batched span for a loop.
1706
+ */
1707
+ startNewSpan(loopId: string, attributes?: Attributes): void;
1708
+ /**
1709
+ * Execute a function within the batched span context.
1710
+ * Automatically handles span rotation and iteration tracking.
1711
+ */
1712
+ withBatchedSpan<T>(loopId: string, fn: (span: Span) => Promise<T>, options?: {
1713
+ iterationSpanName?: string;
1714
+ attributes?: Attributes;
1715
+ }): Promise<T>;
1716
+ /**
1717
+ * Clean up state for a loop when it's stopped.
1718
+ */
1719
+ cleanup(loopId: string): void;
1720
+ /**
1721
+ * Clean up all loop states.
1722
+ */
1723
+ cleanupAll(): void;
1724
+ }
1548
1725
  /**
1549
1726
  * No-op telemetry instance for when telemetry is disabled.
1550
1727
  */
1551
1728
  declare const noopTelemetry: FairQueueTelemetry;
1552
1729
 
1553
1730
  /**
1554
- * FairQueue is the main orchestrator for fair queue processing.
1731
+ * FairQueue is the main orchestrator for fair queue message routing.
1555
1732
  *
1556
- * It coordinates:
1733
+ * FairQueue handles:
1557
1734
  * - Master queue with sharding (using jump consistent hash)
1558
1735
  * - Fair scheduling via pluggable schedulers
1559
1736
  * - Multi-level concurrency limiting
1560
1737
  * - Visibility timeouts with heartbeats
1561
- * - Worker queues with blocking pop
1738
+ * - Routing messages to worker queues
1562
1739
  * - Retry strategies with dead letter queue
1563
1740
  * - OpenTelemetry tracing and metrics
1564
1741
  *
1742
+ * External consumers are responsible for:
1743
+ * - Running their own worker queue consumer loops
1744
+ * - Calling complete/release/fail APIs after processing
1745
+ *
1565
1746
  * @typeParam TPayloadSchema - Zod schema for message payload validation
1566
1747
  */
1567
1748
  declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
@@ -1573,7 +1754,7 @@ declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
1573
1754
  private masterQueue;
1574
1755
  private concurrencyManager?;
1575
1756
  private visibilityManager;
1576
- private workerQueueManager?;
1757
+ private workerQueueManager;
1577
1758
  private telemetry;
1578
1759
  private logger;
1579
1760
  private payloadSchema?;
@@ -1586,19 +1767,20 @@ declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
1586
1767
  private visibilityTimeoutMs;
1587
1768
  private heartbeatIntervalMs;
1588
1769
  private reclaimIntervalMs;
1589
- private workerQueueEnabled;
1590
- private workerQueueBlockingTimeoutSeconds;
1591
- private workerQueueResolver?;
1770
+ private workerQueueResolver;
1771
+ private batchClaimSize;
1592
1772
  private cooloffEnabled;
1593
1773
  private cooloffThreshold;
1594
1774
  private cooloffPeriodMs;
1775
+ private maxCooloffStatesSize;
1595
1776
  private queueCooloffStates;
1596
1777
  private globalRateLimiter?;
1597
- private messageHandler?;
1778
+ private consumerTraceMaxIterations;
1779
+ private consumerTraceTimeoutSeconds;
1780
+ private batchedSpanManager;
1598
1781
  private isRunning;
1599
1782
  private abortController;
1600
1783
  private masterQueueConsumerLoops;
1601
- private workerQueueConsumerLoops;
1602
1784
  private reclaimLoop?;
1603
1785
  private queueDescriptorCache;
1604
1786
  constructor(options: FairQueueOptions<TPayloadSchema>);
@@ -1611,10 +1793,6 @@ declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
1611
1793
  registerTelemetryGauges(options?: {
1612
1794
  observedTenants?: string[];
1613
1795
  }): void;
1614
- /**
1615
- * Set the message handler for processing dequeued messages.
1616
- */
1617
- onMessage(handler: MessageHandler<z.infer<TPayloadSchema>>): void;
1618
1796
  /**
1619
1797
  * Enqueue a single message to a queue.
1620
1798
  */
@@ -1644,7 +1822,29 @@ declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
1644
1822
  */
1645
1823
  getDeadLetterQueueLength(tenantId: string): Promise<number>;
1646
1824
  /**
1647
- * Start the consumer loops and reclaim loop.
1825
+ * Get the size of the in-memory queue descriptor cache.
1826
+ * This cache stores metadata for queues that have been enqueued.
1827
+ * The cache is cleaned up when queues are fully processed.
1828
+ */
1829
+ getQueueDescriptorCacheSize(): number;
1830
+ /**
1831
+ * Get the size of the in-memory cooloff states cache.
1832
+ * This cache tracks queues that are in cooloff due to repeated failures.
1833
+ * The cache is cleaned up when queues are fully processed or cooloff expires.
1834
+ */
1835
+ getQueueCooloffStatesSize(): number;
1836
+ /**
1837
+ * Get all in-memory cache sizes for monitoring.
1838
+ * Useful for adding as span attributes.
1839
+ */
1840
+ getCacheSizes(): {
1841
+ descriptorCacheSize: number;
1842
+ cooloffStatesSize: number;
1843
+ };
1844
+ /**
1845
+ * Start the master queue consumer loops and reclaim loop.
1846
+ * FairQueue claims messages and pushes them to worker queues.
1847
+ * External consumers are responsible for consuming from worker queues.
1648
1848
  */
1649
1849
  start(): void;
1650
1850
  /**
@@ -1671,6 +1871,49 @@ declare class FairQueue<TPayloadSchema extends z.ZodTypeAny = z.ZodUnknown> {
1671
1871
  * Get the shard ID for a queue.
1672
1872
  */
1673
1873
  getShardForQueue(queueId: string): number;
1874
+ /**
1875
+ * Get message data from in-flight storage.
1876
+ * External consumers use this to retrieve the stored message after popping from worker queue.
1877
+ *
1878
+ * @param messageId - The ID of the message
1879
+ * @param queueId - The queue ID the message belongs to
1880
+ * @returns The stored message or null if not found
1881
+ */
1882
+ getMessageData(messageId: string, queueId: string): Promise<StoredMessage<z.infer<TPayloadSchema>> | null>;
1883
+ /**
1884
+ * Extend the visibility timeout for a message.
1885
+ * External consumers should call this periodically during long-running processing.
1886
+ *
1887
+ * @param messageId - The ID of the message
1888
+ * @param queueId - The queue ID the message belongs to
1889
+ * @returns true if heartbeat was successful
1890
+ */
1891
+ heartbeatMessage(messageId: string, queueId: string): Promise<boolean>;
1892
+ /**
1893
+ * Mark a message as successfully processed.
1894
+ * This removes the message from in-flight and releases concurrency.
1895
+ *
1896
+ * @param messageId - The ID of the message
1897
+ * @param queueId - The queue ID the message belongs to
1898
+ */
1899
+ completeMessage(messageId: string, queueId: string): Promise<void>;
1900
+ /**
1901
+ * Release a message back to the queue for processing by another consumer.
1902
+ * The message is placed at the back of the queue.
1903
+ *
1904
+ * @param messageId - The ID of the message
1905
+ * @param queueId - The queue ID the message belongs to
1906
+ */
1907
+ releaseMessage(messageId: string, queueId: string): Promise<void>;
1908
+ /**
1909
+ * Mark a message as failed. This will trigger retry logic if configured,
1910
+ * or move the message to the dead letter queue.
1911
+ *
1912
+ * @param messageId - The ID of the message
1913
+ * @param queueId - The queue ID the message belongs to
1914
+ * @param error - Optional error that caused the failure
1915
+ */
1916
+ failMessage(messageId: string, queueId: string, error?: Error): Promise<void>;
1674
1917
  }
1675
1918
  declare module "@internal/redis" {
1676
1919
  interface RedisCommander<Context> {
@@ -1680,4 +1923,4 @@ declare module "@internal/redis" {
1680
1923
  }
1681
1924
  }
1682
1925
 
1683
- export { type AnyMessageCatalog, type AnyQueueItem, BaseScheduler, CallbackFairQueueKeyProducer, type ClaimResult, type ConcurrencyCheckResult, type ConcurrencyGroupConfig, ConcurrencyManager, type ConcurrencyManagerOptions, type ConcurrencyState, type CooloffOptions, CronSchema, CustomRetry, DRRScheduler, type DRRSchedulerConfig, type DeadLetterMessage, DefaultFairQueueKeyProducer, type EnqueueBatchOptions, type EnqueueOptions, ExponentialBackoffRetry, FairQueue, FairQueueAttributes, type FairQueueKeyProducer, type FairQueueMetrics, type FairQueueOptions, FairQueueTelemetry, type FairScheduler, FixedDelayRetry, type GlobalRateLimiter, ImmediateRetry, type InFlightMessage, type JobHandler, type JobHandlerParams, LinearBackoffRetry, MasterQueue, type MasterQueueOptions, type MessageCatalogKey, type MessageCatalogSchema, type MessageCatalogValue, type MessageHandler, type MessageHandlerContext, MessagingAttributes, NoRetry, NoopScheduler, type QueueCooloffState, type QueueDescriptor, type QueueItem, type QueueMessage, type QueueWithScore, type RetryOptions, type RetryStrategy, RoundRobinScheduler, type SchedulerContext, SimpleQueue, type StoredMessage, type TelemetryOptions, type TenantQueues, VisibilityManager, type VisibilityManagerOptions, WeightedScheduler, type WeightedSchedulerBiases, type WeightedSchedulerConfig, Worker, type WorkerCatalog, type WorkerConcurrencyOptions, WorkerQueueManager, type WorkerQueueManagerOptions, type WorkerQueueOptions, createDefaultRetryStrategy, defaultRetryOptions, noopTelemetry };
1926
+ export { type AnyMessageCatalog, type AnyQueueItem, BaseScheduler, BatchedSpanManager, type BatchedSpanManagerOptions, CallbackFairQueueKeyProducer, type ClaimResult, type ConcurrencyCheckResult, type ConcurrencyGroupConfig, ConcurrencyManager, type ConcurrencyManagerOptions, type ConcurrencyState, type ConsumerLoopState, type CooloffOptions, CronSchema, CustomRetry, DRRScheduler, type DRRSchedulerConfig, type DeadLetterMessage, DefaultFairQueueKeyProducer, type EnqueueBatchOptions, type EnqueueOptions, ExponentialBackoffRetry, FairQueue, FairQueueAttributes, type FairQueueKeyProducer, type FairQueueMetrics, type FairQueueOptions, FairQueueTelemetry, type FairScheduler, FixedDelayRetry, type GlobalRateLimiter, ImmediateRetry, type InFlightMessage, type JobHandler, type JobHandlerParams, LinearBackoffRetry, MasterQueue, type MasterQueueOptions, type MessageCatalogKey, type MessageCatalogSchema, type MessageCatalogValue, type MessageHandler, type MessageHandlerContext, MessagingAttributes, NoRetry, NoopScheduler, type QueueCooloffState, type QueueDescriptor, type QueueItem, type QueueMessage, type QueueWithScore, type RetryOptions, type RetryStrategy, RoundRobinScheduler, type SchedulerContext, SimpleQueue, type StoredMessage, type TelemetryOptions, type TenantQueues, VisibilityManager, type VisibilityManagerOptions, WeightedScheduler, type WeightedSchedulerBiases, type WeightedSchedulerConfig, Worker, type WorkerCatalog, type WorkerConcurrencyOptions, WorkerQueueManager, type WorkerQueueManagerOptions, type WorkerQueueOptions, createDefaultRetryStrategy, defaultRetryOptions, isAbortError, noopTelemetry };