@trigger.dev/redis-worker 4.3.1 → 4.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +863 -452
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +270 -27
- package/dist/index.d.ts +270 -27
- package/dist/index.js +862 -453
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.cjs
CHANGED
|
@@ -10833,6 +10833,9 @@ var SpanStatusCode;
|
|
|
10833
10833
|
SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
|
|
10834
10834
|
})(SpanStatusCode || (SpanStatusCode = {}));
|
|
10835
10835
|
|
|
10836
|
+
// ../../node_modules/.pnpm/@opentelemetry+api@1.9.0/node_modules/@opentelemetry/api/build/esm/context-api.js
|
|
10837
|
+
var context = ContextAPI.getInstance();
|
|
10838
|
+
|
|
10836
10839
|
// ../../node_modules/.pnpm/@opentelemetry+api@1.9.0/node_modules/@opentelemetry/api/build/esm/metrics/NoopMeterProvider.js
|
|
10837
10840
|
var NoopMeterProvider = (
|
|
10838
10841
|
/** @class */
|
|
@@ -11730,6 +11733,11 @@ var Worker = class _Worker {
|
|
|
11730
11733
|
}
|
|
11731
11734
|
};
|
|
11732
11735
|
|
|
11736
|
+
// src/utils.ts
|
|
11737
|
+
function isAbortError(error) {
|
|
11738
|
+
return error instanceof Error && (error.name === "AbortError" || error.message === "AbortError");
|
|
11739
|
+
}
|
|
11740
|
+
|
|
11733
11741
|
// src/fair-queue/concurrency.ts
|
|
11734
11742
|
var ConcurrencyManager = class {
|
|
11735
11743
|
constructor(options) {
|
|
@@ -11806,6 +11814,37 @@ var ConcurrencyManager = class {
|
|
|
11806
11814
|
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11807
11815
|
return await this.redis.scard(key);
|
|
11808
11816
|
}
|
|
11817
|
+
/**
|
|
11818
|
+
* Get available capacity for a queue across all concurrency groups.
|
|
11819
|
+
* Returns the minimum available capacity across all groups.
|
|
11820
|
+
*/
|
|
11821
|
+
async getAvailableCapacity(queue) {
|
|
11822
|
+
if (this.groups.length === 0) {
|
|
11823
|
+
return 0;
|
|
11824
|
+
}
|
|
11825
|
+
const groupData = this.groups.map((group) => ({
|
|
11826
|
+
group,
|
|
11827
|
+
groupId: group.extractGroupId(queue)
|
|
11828
|
+
}));
|
|
11829
|
+
const [currents, limits] = await Promise.all([
|
|
11830
|
+
Promise.all(
|
|
11831
|
+
groupData.map(
|
|
11832
|
+
({ group, groupId }) => this.redis.scard(this.keys.concurrencyKey(group.name, groupId))
|
|
11833
|
+
)
|
|
11834
|
+
),
|
|
11835
|
+
Promise.all(
|
|
11836
|
+
groupData.map(
|
|
11837
|
+
({ group, groupId }) => group.getLimit(groupId).then((limit) => limit || group.defaultLimit)
|
|
11838
|
+
)
|
|
11839
|
+
)
|
|
11840
|
+
]);
|
|
11841
|
+
let minCapacity = Infinity;
|
|
11842
|
+
for (let i = 0; i < groupData.length; i++) {
|
|
11843
|
+
const available = Math.max(0, limits[i] - currents[i]);
|
|
11844
|
+
minCapacity = Math.min(minCapacity, available);
|
|
11845
|
+
}
|
|
11846
|
+
return minCapacity === Infinity ? 0 : minCapacity;
|
|
11847
|
+
}
|
|
11809
11848
|
/**
|
|
11810
11849
|
* Get concurrency limit for a specific group.
|
|
11811
11850
|
*/
|
|
@@ -12312,7 +12351,8 @@ var FairQueueTelemetry = class {
|
|
|
12312
12351
|
// Helper Methods
|
|
12313
12352
|
// ============================================================================
|
|
12314
12353
|
/**
|
|
12315
|
-
* Create standard attributes for a message operation.
|
|
12354
|
+
* Create standard attributes for a message operation (for spans/traces).
|
|
12355
|
+
* Use this for span attributes where high cardinality is acceptable.
|
|
12316
12356
|
*/
|
|
12317
12357
|
messageAttributes(params) {
|
|
12318
12358
|
const attrs = {};
|
|
@@ -12398,6 +12438,187 @@ var FairQueueTelemetry = class {
|
|
|
12398
12438
|
};
|
|
12399
12439
|
}
|
|
12400
12440
|
};
|
|
12441
|
+
var BatchedSpanManager = class {
|
|
12442
|
+
tracer;
|
|
12443
|
+
name;
|
|
12444
|
+
maxIterations;
|
|
12445
|
+
timeoutSeconds;
|
|
12446
|
+
loopStates = /* @__PURE__ */ new Map();
|
|
12447
|
+
getDynamicAttributes;
|
|
12448
|
+
constructor(options) {
|
|
12449
|
+
this.tracer = options.tracer;
|
|
12450
|
+
this.name = options.name;
|
|
12451
|
+
this.maxIterations = options.maxIterations;
|
|
12452
|
+
this.timeoutSeconds = options.timeoutSeconds;
|
|
12453
|
+
this.getDynamicAttributes = options.getDynamicAttributes;
|
|
12454
|
+
}
|
|
12455
|
+
/**
|
|
12456
|
+
* Initialize state for a consumer loop.
|
|
12457
|
+
*/
|
|
12458
|
+
initializeLoop(loopId) {
|
|
12459
|
+
this.loopStates.set(loopId, {
|
|
12460
|
+
perTraceCountdown: this.maxIterations,
|
|
12461
|
+
traceStartedAt: /* @__PURE__ */ new Date(),
|
|
12462
|
+
iterationsCount: 0,
|
|
12463
|
+
totalIterationsCount: 0,
|
|
12464
|
+
runningDurationInMs: 0,
|
|
12465
|
+
stats: {},
|
|
12466
|
+
endSpanInNextIteration: false
|
|
12467
|
+
});
|
|
12468
|
+
}
|
|
12469
|
+
/**
|
|
12470
|
+
* Get the state for a consumer loop.
|
|
12471
|
+
*/
|
|
12472
|
+
getState(loopId) {
|
|
12473
|
+
return this.loopStates.get(loopId);
|
|
12474
|
+
}
|
|
12475
|
+
/**
|
|
12476
|
+
* Increment a stat counter for a loop.
|
|
12477
|
+
*/
|
|
12478
|
+
incrementStat(loopId, statName, value = 1) {
|
|
12479
|
+
const state = this.loopStates.get(loopId);
|
|
12480
|
+
if (state) {
|
|
12481
|
+
state.stats[statName] = (state.stats[statName] ?? 0) + value;
|
|
12482
|
+
}
|
|
12483
|
+
}
|
|
12484
|
+
/**
|
|
12485
|
+
* Mark that the span should end on the next iteration.
|
|
12486
|
+
*/
|
|
12487
|
+
markForRotation(loopId) {
|
|
12488
|
+
const state = this.loopStates.get(loopId);
|
|
12489
|
+
if (state) {
|
|
12490
|
+
state.endSpanInNextIteration = true;
|
|
12491
|
+
}
|
|
12492
|
+
}
|
|
12493
|
+
/**
|
|
12494
|
+
* Check if the span should be rotated (ended and a new one started).
|
|
12495
|
+
*/
|
|
12496
|
+
shouldRotate(loopId) {
|
|
12497
|
+
const state = this.loopStates.get(loopId);
|
|
12498
|
+
if (!state) return true;
|
|
12499
|
+
return state.perTraceCountdown <= 0 || Date.now() - state.traceStartedAt.getTime() > this.timeoutSeconds * 1e3 || state.currentSpanContext === void 0 || state.endSpanInNextIteration;
|
|
12500
|
+
}
|
|
12501
|
+
/**
|
|
12502
|
+
* End the current span for a loop and record stats.
|
|
12503
|
+
*/
|
|
12504
|
+
endCurrentSpan(loopId) {
|
|
12505
|
+
const state = this.loopStates.get(loopId);
|
|
12506
|
+
if (!state?.currentSpan) return;
|
|
12507
|
+
for (const [statName, count] of Object.entries(state.stats)) {
|
|
12508
|
+
state.currentSpan.setAttribute(`stats.${statName}`, count);
|
|
12509
|
+
}
|
|
12510
|
+
state.currentSpan.end();
|
|
12511
|
+
state.currentSpan = void 0;
|
|
12512
|
+
state.currentSpanContext = void 0;
|
|
12513
|
+
}
|
|
12514
|
+
/**
|
|
12515
|
+
* Start a new batched span for a loop.
|
|
12516
|
+
*/
|
|
12517
|
+
startNewSpan(loopId, attributes) {
|
|
12518
|
+
if (!this.tracer) return;
|
|
12519
|
+
const state = this.loopStates.get(loopId);
|
|
12520
|
+
if (!state) return;
|
|
12521
|
+
this.endCurrentSpan(loopId);
|
|
12522
|
+
const traceDurationInMs = state.traceStartedAt ? Date.now() - state.traceStartedAt.getTime() : void 0;
|
|
12523
|
+
const iterationsPerSecond = traceDurationInMs && traceDurationInMs > 0 ? state.iterationsCount / (traceDurationInMs / 1e3) : void 0;
|
|
12524
|
+
const dynamicAttributes = this.getDynamicAttributes?.() ?? {};
|
|
12525
|
+
state.currentSpan = this.tracer.startSpan(
|
|
12526
|
+
`${this.name}.consumerLoop`,
|
|
12527
|
+
{
|
|
12528
|
+
kind: 1,
|
|
12529
|
+
// SpanKind.CONSUMER
|
|
12530
|
+
attributes: {
|
|
12531
|
+
loop_id: loopId,
|
|
12532
|
+
max_iterations: this.maxIterations,
|
|
12533
|
+
timeout_seconds: this.timeoutSeconds,
|
|
12534
|
+
previous_iterations: state.iterationsCount,
|
|
12535
|
+
previous_duration_ms: traceDurationInMs,
|
|
12536
|
+
previous_iterations_per_second: iterationsPerSecond,
|
|
12537
|
+
total_iterations: state.totalIterationsCount,
|
|
12538
|
+
...dynamicAttributes,
|
|
12539
|
+
...attributes
|
|
12540
|
+
}
|
|
12541
|
+
},
|
|
12542
|
+
ROOT_CONTEXT
|
|
12543
|
+
);
|
|
12544
|
+
state.currentSpanContext = trace.setSpan(ROOT_CONTEXT, state.currentSpan);
|
|
12545
|
+
state.perTraceCountdown = this.maxIterations;
|
|
12546
|
+
state.traceStartedAt = /* @__PURE__ */ new Date();
|
|
12547
|
+
state.iterationsCount = 0;
|
|
12548
|
+
state.runningDurationInMs = 0;
|
|
12549
|
+
state.stats = {};
|
|
12550
|
+
state.endSpanInNextIteration = false;
|
|
12551
|
+
}
|
|
12552
|
+
/**
|
|
12553
|
+
* Execute a function within the batched span context.
|
|
12554
|
+
* Automatically handles span rotation and iteration tracking.
|
|
12555
|
+
*/
|
|
12556
|
+
async withBatchedSpan(loopId, fn, options) {
|
|
12557
|
+
let state = this.loopStates.get(loopId);
|
|
12558
|
+
if (!state) {
|
|
12559
|
+
this.initializeLoop(loopId);
|
|
12560
|
+
state = this.loopStates.get(loopId);
|
|
12561
|
+
}
|
|
12562
|
+
if (this.shouldRotate(loopId)) {
|
|
12563
|
+
this.startNewSpan(loopId);
|
|
12564
|
+
}
|
|
12565
|
+
const startTime = performance.now();
|
|
12566
|
+
try {
|
|
12567
|
+
if (!this.tracer || !state.currentSpanContext) {
|
|
12568
|
+
return await fn(noopSpan);
|
|
12569
|
+
}
|
|
12570
|
+
return await context.with(state.currentSpanContext, async () => {
|
|
12571
|
+
const iterationSpanName = options?.iterationSpanName ?? "iteration";
|
|
12572
|
+
return await this.tracer.startActiveSpan(
|
|
12573
|
+
`${this.name}.${iterationSpanName}`,
|
|
12574
|
+
{
|
|
12575
|
+
attributes: {
|
|
12576
|
+
loop_id: loopId,
|
|
12577
|
+
iteration: state.iterationsCount,
|
|
12578
|
+
...options?.attributes
|
|
12579
|
+
}
|
|
12580
|
+
},
|
|
12581
|
+
async (iterationSpan) => {
|
|
12582
|
+
try {
|
|
12583
|
+
return await fn(iterationSpan);
|
|
12584
|
+
} catch (error) {
|
|
12585
|
+
if (error instanceof Error) {
|
|
12586
|
+
iterationSpan.recordException(error);
|
|
12587
|
+
state.currentSpan?.recordException(error);
|
|
12588
|
+
}
|
|
12589
|
+
iterationSpan.setStatus({ code: SpanStatusCode.ERROR });
|
|
12590
|
+
state.endSpanInNextIteration = true;
|
|
12591
|
+
throw error;
|
|
12592
|
+
} finally {
|
|
12593
|
+
iterationSpan.end();
|
|
12594
|
+
}
|
|
12595
|
+
}
|
|
12596
|
+
);
|
|
12597
|
+
});
|
|
12598
|
+
} finally {
|
|
12599
|
+
const duration = performance.now() - startTime;
|
|
12600
|
+
state.runningDurationInMs += duration;
|
|
12601
|
+
state.iterationsCount++;
|
|
12602
|
+
state.totalIterationsCount++;
|
|
12603
|
+
state.perTraceCountdown--;
|
|
12604
|
+
}
|
|
12605
|
+
}
|
|
12606
|
+
/**
|
|
12607
|
+
* Clean up state for a loop when it's stopped.
|
|
12608
|
+
*/
|
|
12609
|
+
cleanup(loopId) {
|
|
12610
|
+
this.endCurrentSpan(loopId);
|
|
12611
|
+
this.loopStates.delete(loopId);
|
|
12612
|
+
}
|
|
12613
|
+
/**
|
|
12614
|
+
* Clean up all loop states.
|
|
12615
|
+
*/
|
|
12616
|
+
cleanupAll() {
|
|
12617
|
+
for (const loopId of this.loopStates.keys()) {
|
|
12618
|
+
this.cleanup(loopId);
|
|
12619
|
+
}
|
|
12620
|
+
}
|
|
12621
|
+
};
|
|
12401
12622
|
var noopSpan = {
|
|
12402
12623
|
spanContext: () => ({
|
|
12403
12624
|
traceId: "",
|
|
@@ -12497,6 +12718,71 @@ var VisibilityManager = class {
|
|
|
12497
12718
|
return { claimed: false };
|
|
12498
12719
|
}
|
|
12499
12720
|
}
|
|
12721
|
+
/**
|
|
12722
|
+
* Claim multiple messages for processing (batch claim).
|
|
12723
|
+
* Moves up to maxCount messages from the queue to the in-flight set.
|
|
12724
|
+
*
|
|
12725
|
+
* @param queueId - The queue to claim from
|
|
12726
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
12727
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12728
|
+
* @param consumerId - ID of the consumer claiming the messages
|
|
12729
|
+
* @param maxCount - Maximum number of messages to claim
|
|
12730
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
12731
|
+
* @returns Array of claimed messages
|
|
12732
|
+
*/
|
|
12733
|
+
async claimBatch(queueId, queueKey, queueItemsKey, consumerId, maxCount, timeoutMs) {
|
|
12734
|
+
const timeout = timeoutMs ?? this.defaultTimeoutMs;
|
|
12735
|
+
const deadline = Date.now() + timeout;
|
|
12736
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12737
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12738
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12739
|
+
const result = await this.redis.claimMessageBatch(
|
|
12740
|
+
queueKey,
|
|
12741
|
+
queueItemsKey,
|
|
12742
|
+
inflightKey,
|
|
12743
|
+
inflightDataKey,
|
|
12744
|
+
queueId,
|
|
12745
|
+
deadline.toString(),
|
|
12746
|
+
maxCount.toString()
|
|
12747
|
+
);
|
|
12748
|
+
if (!result || result.length === 0) {
|
|
12749
|
+
return [];
|
|
12750
|
+
}
|
|
12751
|
+
const messages = [];
|
|
12752
|
+
for (let i = 0; i < result.length; i += 2) {
|
|
12753
|
+
const messageId = result[i];
|
|
12754
|
+
const payloadJson = result[i + 1];
|
|
12755
|
+
if (!messageId || !payloadJson) {
|
|
12756
|
+
continue;
|
|
12757
|
+
}
|
|
12758
|
+
try {
|
|
12759
|
+
const payload = JSON.parse(payloadJson);
|
|
12760
|
+
messages.push({
|
|
12761
|
+
messageId,
|
|
12762
|
+
queueId,
|
|
12763
|
+
payload,
|
|
12764
|
+
deadline,
|
|
12765
|
+
consumerId
|
|
12766
|
+
});
|
|
12767
|
+
} catch (error) {
|
|
12768
|
+
this.logger.error("Failed to parse claimed message in batch", {
|
|
12769
|
+
messageId,
|
|
12770
|
+
queueId,
|
|
12771
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12772
|
+
});
|
|
12773
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12774
|
+
}
|
|
12775
|
+
}
|
|
12776
|
+
if (messages.length > 0) {
|
|
12777
|
+
this.logger.debug("Batch claimed messages", {
|
|
12778
|
+
queueId,
|
|
12779
|
+
consumerId,
|
|
12780
|
+
count: messages.length,
|
|
12781
|
+
deadline
|
|
12782
|
+
});
|
|
12783
|
+
}
|
|
12784
|
+
return messages;
|
|
12785
|
+
}
|
|
12500
12786
|
/**
|
|
12501
12787
|
* Extend the visibility timeout for a message (heartbeat).
|
|
12502
12788
|
*
|
|
@@ -12510,11 +12796,7 @@ var VisibilityManager = class {
|
|
|
12510
12796
|
const inflightKey = this.keys.inflightKey(shardId);
|
|
12511
12797
|
const member = this.#makeMember(messageId, queueId);
|
|
12512
12798
|
const newDeadline = Date.now() + extendMs;
|
|
12513
|
-
const result = await this.redis.heartbeatMessage(
|
|
12514
|
-
inflightKey,
|
|
12515
|
-
member,
|
|
12516
|
-
newDeadline.toString()
|
|
12517
|
-
);
|
|
12799
|
+
const result = await this.redis.heartbeatMessage(inflightKey, member, newDeadline.toString());
|
|
12518
12800
|
const success = result === 1;
|
|
12519
12801
|
if (success) {
|
|
12520
12802
|
this.logger.debug("Heartbeat successful", {
|
|
@@ -12548,9 +12830,10 @@ var VisibilityManager = class {
|
|
|
12548
12830
|
* @param queueId - The queue ID
|
|
12549
12831
|
* @param queueKey - The Redis key for the queue
|
|
12550
12832
|
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12833
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
12551
12834
|
* @param score - Optional score for the message (defaults to now)
|
|
12552
12835
|
*/
|
|
12553
|
-
async release(messageId, queueId, queueKey, queueItemsKey, score) {
|
|
12836
|
+
async release(messageId, queueId, queueKey, queueItemsKey, masterQueueKey, score) {
|
|
12554
12837
|
const shardId = this.#getShardForQueue(queueId);
|
|
12555
12838
|
const inflightKey = this.keys.inflightKey(shardId);
|
|
12556
12839
|
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
@@ -12561,9 +12844,11 @@ var VisibilityManager = class {
|
|
|
12561
12844
|
inflightDataKey,
|
|
12562
12845
|
queueKey,
|
|
12563
12846
|
queueItemsKey,
|
|
12847
|
+
masterQueueKey,
|
|
12564
12848
|
member,
|
|
12565
12849
|
messageId,
|
|
12566
|
-
messageScore.toString()
|
|
12850
|
+
messageScore.toString(),
|
|
12851
|
+
queueId
|
|
12567
12852
|
);
|
|
12568
12853
|
this.logger.debug("Message released", {
|
|
12569
12854
|
messageId,
|
|
@@ -12571,6 +12856,45 @@ var VisibilityManager = class {
|
|
|
12571
12856
|
score: messageScore
|
|
12572
12857
|
});
|
|
12573
12858
|
}
|
|
12859
|
+
/**
|
|
12860
|
+
* Release multiple messages back to their queue in a single operation.
|
|
12861
|
+
* Used when processing fails or consumer wants to retry later.
|
|
12862
|
+
* All messages must belong to the same queue.
|
|
12863
|
+
*
|
|
12864
|
+
* @param messages - Array of messages to release (must all have same queueId)
|
|
12865
|
+
* @param queueId - The queue ID
|
|
12866
|
+
* @param queueKey - The Redis key for the queue
|
|
12867
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12868
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
12869
|
+
* @param score - Optional score for the messages (defaults to now)
|
|
12870
|
+
*/
|
|
12871
|
+
async releaseBatch(messages, queueId, queueKey, queueItemsKey, masterQueueKey, score) {
|
|
12872
|
+
if (messages.length === 0) {
|
|
12873
|
+
return;
|
|
12874
|
+
}
|
|
12875
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12876
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12877
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12878
|
+
const messageScore = score ?? Date.now();
|
|
12879
|
+
const messageIds = messages.map((m) => m.messageId);
|
|
12880
|
+
const members = messages.map((m) => this.#makeMember(m.messageId, queueId));
|
|
12881
|
+
await this.redis.releaseMessageBatch(
|
|
12882
|
+
inflightKey,
|
|
12883
|
+
inflightDataKey,
|
|
12884
|
+
queueKey,
|
|
12885
|
+
queueItemsKey,
|
|
12886
|
+
masterQueueKey,
|
|
12887
|
+
messageScore.toString(),
|
|
12888
|
+
queueId,
|
|
12889
|
+
...members,
|
|
12890
|
+
...messageIds
|
|
12891
|
+
);
|
|
12892
|
+
this.logger.debug("Batch messages released", {
|
|
12893
|
+
queueId,
|
|
12894
|
+
count: messages.length,
|
|
12895
|
+
score: messageScore
|
|
12896
|
+
});
|
|
12897
|
+
}
|
|
12574
12898
|
/**
|
|
12575
12899
|
* Reclaim timed-out messages from a shard.
|
|
12576
12900
|
* Returns messages to their original queues.
|
|
@@ -12601,7 +12925,7 @@ var VisibilityManager = class {
|
|
|
12601
12925
|
continue;
|
|
12602
12926
|
}
|
|
12603
12927
|
const { messageId, queueId } = this.#parseMember(member);
|
|
12604
|
-
const { queueKey, queueItemsKey } = getQueueKeys(queueId);
|
|
12928
|
+
const { queueKey, queueItemsKey, masterQueueKey } = getQueueKeys(queueId);
|
|
12605
12929
|
try {
|
|
12606
12930
|
const score = parseFloat(originalScore) || now;
|
|
12607
12931
|
await this.redis.releaseMessage(
|
|
@@ -12609,9 +12933,11 @@ var VisibilityManager = class {
|
|
|
12609
12933
|
inflightDataKey,
|
|
12610
12934
|
queueKey,
|
|
12611
12935
|
queueItemsKey,
|
|
12936
|
+
masterQueueKey,
|
|
12612
12937
|
member,
|
|
12613
12938
|
messageId,
|
|
12614
|
-
score.toString()
|
|
12939
|
+
score.toString(),
|
|
12940
|
+
queueId
|
|
12615
12941
|
);
|
|
12616
12942
|
reclaimed++;
|
|
12617
12943
|
this.logger.debug("Reclaimed timed-out message", {
|
|
@@ -12745,17 +13071,67 @@ redis.call('HSET', inflightDataKey, messageId, payload)
|
|
|
12745
13071
|
return {messageId, payload}
|
|
12746
13072
|
`
|
|
12747
13073
|
});
|
|
12748
|
-
this.redis.defineCommand("
|
|
13074
|
+
this.redis.defineCommand("claimMessageBatch", {
|
|
12749
13075
|
numberOfKeys: 4,
|
|
12750
13076
|
lua: `
|
|
13077
|
+
local queueKey = KEYS[1]
|
|
13078
|
+
local queueItemsKey = KEYS[2]
|
|
13079
|
+
local inflightKey = KEYS[3]
|
|
13080
|
+
local inflightDataKey = KEYS[4]
|
|
13081
|
+
|
|
13082
|
+
local queueId = ARGV[1]
|
|
13083
|
+
local deadline = tonumber(ARGV[2])
|
|
13084
|
+
local maxCount = tonumber(ARGV[3])
|
|
13085
|
+
|
|
13086
|
+
-- Get up to maxCount oldest messages from queue
|
|
13087
|
+
local items = redis.call('ZRANGE', queueKey, 0, maxCount - 1)
|
|
13088
|
+
if #items == 0 then
|
|
13089
|
+
return {}
|
|
13090
|
+
end
|
|
13091
|
+
|
|
13092
|
+
local results = {}
|
|
13093
|
+
|
|
13094
|
+
for i, messageId in ipairs(items) do
|
|
13095
|
+
-- Get message data
|
|
13096
|
+
local payload = redis.call('HGET', queueItemsKey, messageId)
|
|
13097
|
+
|
|
13098
|
+
if payload then
|
|
13099
|
+
-- Remove from queue
|
|
13100
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13101
|
+
redis.call('HDEL', queueItemsKey, messageId)
|
|
13102
|
+
|
|
13103
|
+
-- Add to in-flight set with deadline
|
|
13104
|
+
local member = messageId .. ':' .. queueId
|
|
13105
|
+
redis.call('ZADD', inflightKey, deadline, member)
|
|
13106
|
+
|
|
13107
|
+
-- Store message data for potential release
|
|
13108
|
+
redis.call('HSET', inflightDataKey, messageId, payload)
|
|
13109
|
+
|
|
13110
|
+
-- Add to results
|
|
13111
|
+
table.insert(results, messageId)
|
|
13112
|
+
table.insert(results, payload)
|
|
13113
|
+
else
|
|
13114
|
+
-- Message data missing, remove from queue
|
|
13115
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13116
|
+
end
|
|
13117
|
+
end
|
|
13118
|
+
|
|
13119
|
+
return results
|
|
13120
|
+
`
|
|
13121
|
+
});
|
|
13122
|
+
this.redis.defineCommand("releaseMessage", {
|
|
13123
|
+
numberOfKeys: 5,
|
|
13124
|
+
lua: `
|
|
12751
13125
|
local inflightKey = KEYS[1]
|
|
12752
13126
|
local inflightDataKey = KEYS[2]
|
|
12753
13127
|
local queueKey = KEYS[3]
|
|
12754
13128
|
local queueItemsKey = KEYS[4]
|
|
13129
|
+
local masterQueueKey = KEYS[5]
|
|
12755
13130
|
|
|
12756
13131
|
local member = ARGV[1]
|
|
12757
13132
|
local messageId = ARGV[2]
|
|
12758
13133
|
local score = tonumber(ARGV[3])
|
|
13134
|
+
local queueId = ARGV[4]
|
|
12759
13135
|
|
|
12760
13136
|
-- Get message data from in-flight
|
|
12761
13137
|
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
@@ -12772,9 +13148,67 @@ redis.call('HDEL', inflightDataKey, messageId)
|
|
|
12772
13148
|
redis.call('ZADD', queueKey, score, messageId)
|
|
12773
13149
|
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
12774
13150
|
|
|
13151
|
+
-- Update master queue with oldest message timestamp
|
|
13152
|
+
-- This ensures delayed messages don't push the queue priority to the future
|
|
13153
|
+
-- when there are other ready messages in the queue
|
|
13154
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
13155
|
+
if #oldest >= 2 then
|
|
13156
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
13157
|
+
end
|
|
13158
|
+
|
|
12775
13159
|
return 1
|
|
12776
13160
|
`
|
|
12777
13161
|
});
|
|
13162
|
+
this.redis.defineCommand("releaseMessageBatch", {
|
|
13163
|
+
numberOfKeys: 5,
|
|
13164
|
+
lua: `
|
|
13165
|
+
local inflightKey = KEYS[1]
|
|
13166
|
+
local inflightDataKey = KEYS[2]
|
|
13167
|
+
local queueKey = KEYS[3]
|
|
13168
|
+
local queueItemsKey = KEYS[4]
|
|
13169
|
+
local masterQueueKey = KEYS[5]
|
|
13170
|
+
|
|
13171
|
+
local score = tonumber(ARGV[1])
|
|
13172
|
+
local queueId = ARGV[2]
|
|
13173
|
+
|
|
13174
|
+
-- Remaining args are: members..., messageIds...
|
|
13175
|
+
-- Calculate how many messages we have
|
|
13176
|
+
local numMessages = (table.getn(ARGV) - 2) / 2
|
|
13177
|
+
local membersStart = 3
|
|
13178
|
+
local messageIdsStart = membersStart + numMessages
|
|
13179
|
+
|
|
13180
|
+
local releasedCount = 0
|
|
13181
|
+
|
|
13182
|
+
for i = 0, numMessages - 1 do
|
|
13183
|
+
local member = ARGV[membersStart + i]
|
|
13184
|
+
local messageId = ARGV[messageIdsStart + i]
|
|
13185
|
+
|
|
13186
|
+
-- Get message data from in-flight
|
|
13187
|
+
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
13188
|
+
if payload then
|
|
13189
|
+
-- Remove from in-flight
|
|
13190
|
+
redis.call('ZREM', inflightKey, member)
|
|
13191
|
+
redis.call('HDEL', inflightDataKey, messageId)
|
|
13192
|
+
|
|
13193
|
+
-- Add back to queue
|
|
13194
|
+
redis.call('ZADD', queueKey, score, messageId)
|
|
13195
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
13196
|
+
|
|
13197
|
+
releasedCount = releasedCount + 1
|
|
13198
|
+
end
|
|
13199
|
+
end
|
|
13200
|
+
|
|
13201
|
+
-- Update master queue with oldest message timestamp (only once at the end)
|
|
13202
|
+
if releasedCount > 0 then
|
|
13203
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
13204
|
+
if #oldest >= 2 then
|
|
13205
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
13206
|
+
end
|
|
13207
|
+
end
|
|
13208
|
+
|
|
13209
|
+
return releasedCount
|
|
13210
|
+
`
|
|
13211
|
+
});
|
|
12778
13212
|
this.redis.defineCommand("heartbeatMessage", {
|
|
12779
13213
|
numberOfKeys: 1,
|
|
12780
13214
|
lua: `
|
|
@@ -12862,11 +13296,11 @@ var WorkerQueueManager = class {
|
|
|
12862
13296
|
async blockingPop(workerQueueId, timeoutSeconds, signal) {
|
|
12863
13297
|
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12864
13298
|
const blockingClient = this.redis.duplicate();
|
|
13299
|
+
const cleanup = signal ? () => {
|
|
13300
|
+
blockingClient.disconnect();
|
|
13301
|
+
} : null;
|
|
12865
13302
|
try {
|
|
12866
|
-
if (signal) {
|
|
12867
|
-
const cleanup = () => {
|
|
12868
|
-
blockingClient.disconnect();
|
|
12869
|
-
};
|
|
13303
|
+
if (signal && cleanup) {
|
|
12870
13304
|
signal.addEventListener("abort", cleanup, { once: true });
|
|
12871
13305
|
if (signal.aborted) {
|
|
12872
13306
|
return null;
|
|
@@ -12893,6 +13327,9 @@ var WorkerQueueManager = class {
|
|
|
12893
13327
|
});
|
|
12894
13328
|
throw error;
|
|
12895
13329
|
} finally {
|
|
13330
|
+
if (cleanup && signal) {
|
|
13331
|
+
signal.removeEventListener("abort", cleanup);
|
|
13332
|
+
}
|
|
12896
13333
|
await blockingClient.quit().catch(() => {
|
|
12897
13334
|
});
|
|
12898
13335
|
}
|
|
@@ -13124,6 +13561,13 @@ var BaseScheduler = class {
|
|
|
13124
13561
|
*/
|
|
13125
13562
|
async recordProcessed(_tenantId, _queueId) {
|
|
13126
13563
|
}
|
|
13564
|
+
/**
|
|
13565
|
+
* Called after processing multiple messages to update scheduler state.
|
|
13566
|
+
* Batch variant for efficiency - reduces Redis calls when processing multiple messages.
|
|
13567
|
+
* Default implementation does nothing.
|
|
13568
|
+
*/
|
|
13569
|
+
async recordProcessedBatch(_tenantId, _queueId, _count) {
|
|
13570
|
+
}
|
|
13127
13571
|
/**
|
|
13128
13572
|
* Initialize the scheduler.
|
|
13129
13573
|
* Default implementation does nothing.
|
|
@@ -13186,6 +13630,7 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13186
13630
|
this.keys = config.keys;
|
|
13187
13631
|
this.quantum = config.quantum;
|
|
13188
13632
|
this.maxDeficit = config.maxDeficit;
|
|
13633
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 1e3;
|
|
13189
13634
|
this.logger = config.logger ?? {
|
|
13190
13635
|
debug: () => {
|
|
13191
13636
|
},
|
|
@@ -13198,6 +13643,7 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13198
13643
|
keys;
|
|
13199
13644
|
quantum;
|
|
13200
13645
|
maxDeficit;
|
|
13646
|
+
masterQueueLimit;
|
|
13201
13647
|
logger;
|
|
13202
13648
|
// ============================================================================
|
|
13203
13649
|
// FairScheduler Implementation
|
|
@@ -13263,6 +13709,13 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13263
13709
|
async recordProcessed(tenantId, _queueId) {
|
|
13264
13710
|
await this.#decrementDeficit(tenantId);
|
|
13265
13711
|
}
|
|
13712
|
+
/**
|
|
13713
|
+
* Record that multiple messages were processed from a tenant.
|
|
13714
|
+
* Decrements the tenant's deficit by count atomically.
|
|
13715
|
+
*/
|
|
13716
|
+
async recordProcessedBatch(tenantId, _queueId, count) {
|
|
13717
|
+
await this.#decrementDeficitBatch(tenantId, count);
|
|
13718
|
+
}
|
|
13266
13719
|
async close() {
|
|
13267
13720
|
await this.redis.quit();
|
|
13268
13721
|
}
|
|
@@ -13312,8 +13765,7 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13312
13765
|
"WITHSCORES",
|
|
13313
13766
|
"LIMIT",
|
|
13314
13767
|
0,
|
|
13315
|
-
|
|
13316
|
-
// Limit for performance
|
|
13768
|
+
this.masterQueueLimit
|
|
13317
13769
|
);
|
|
13318
13770
|
const queues = [];
|
|
13319
13771
|
for (let i = 0; i < results.length; i += 2) {
|
|
@@ -13354,6 +13806,14 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13354
13806
|
const result = await this.redis.drrDecrementDeficit(key, tenantId);
|
|
13355
13807
|
return parseFloat(result);
|
|
13356
13808
|
}
|
|
13809
|
+
/**
|
|
13810
|
+
* Decrement deficit for a tenant by a count atomically.
|
|
13811
|
+
*/
|
|
13812
|
+
async #decrementDeficitBatch(tenantId, count) {
|
|
13813
|
+
const key = this.#deficitKey();
|
|
13814
|
+
const result = await this.redis.drrDecrementDeficitBatch(key, tenantId, count.toString());
|
|
13815
|
+
return parseFloat(result);
|
|
13816
|
+
}
|
|
13357
13817
|
#registerCommands() {
|
|
13358
13818
|
this.redis.defineCommand("drrAddQuantum", {
|
|
13359
13819
|
numberOfKeys: 1,
|
|
@@ -13397,6 +13857,25 @@ if newDeficit < 0 then
|
|
|
13397
13857
|
newDeficit = 0
|
|
13398
13858
|
end
|
|
13399
13859
|
|
|
13860
|
+
return tostring(newDeficit)
|
|
13861
|
+
`
|
|
13862
|
+
});
|
|
13863
|
+
this.redis.defineCommand("drrDecrementDeficitBatch", {
|
|
13864
|
+
numberOfKeys: 1,
|
|
13865
|
+
lua: `
|
|
13866
|
+
local deficitKey = KEYS[1]
|
|
13867
|
+
local tenantId = ARGV[1]
|
|
13868
|
+
local count = tonumber(ARGV[2])
|
|
13869
|
+
|
|
13870
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, -count)
|
|
13871
|
+
newDeficit = tonumber(newDeficit)
|
|
13872
|
+
|
|
13873
|
+
-- Floor at 0
|
|
13874
|
+
if newDeficit < 0 then
|
|
13875
|
+
redis.call('HSET', deficitKey, tenantId, 0)
|
|
13876
|
+
newDeficit = 0
|
|
13877
|
+
end
|
|
13878
|
+
|
|
13400
13879
|
return tostring(newDeficit)
|
|
13401
13880
|
`
|
|
13402
13881
|
});
|
|
@@ -13904,18 +14383,30 @@ var FairQueue = class {
|
|
|
13904
14383
|
this.visibilityTimeoutMs = options.visibilityTimeoutMs ?? 3e4;
|
|
13905
14384
|
this.heartbeatIntervalMs = options.heartbeatIntervalMs ?? this.visibilityTimeoutMs / 3;
|
|
13906
14385
|
this.reclaimIntervalMs = options.reclaimIntervalMs ?? 5e3;
|
|
13907
|
-
this.
|
|
13908
|
-
this.
|
|
13909
|
-
this.workerQueueResolver = options.workerQueue?.resolveWorkerQueue;
|
|
14386
|
+
this.workerQueueResolver = options.workerQueue.resolveWorkerQueue;
|
|
14387
|
+
this.batchClaimSize = options.batchClaimSize ?? 10;
|
|
13910
14388
|
this.cooloffEnabled = options.cooloff?.enabled ?? true;
|
|
13911
14389
|
this.cooloffThreshold = options.cooloff?.threshold ?? 10;
|
|
13912
14390
|
this.cooloffPeriodMs = options.cooloff?.periodMs ?? 1e4;
|
|
14391
|
+
this.maxCooloffStatesSize = options.cooloff?.maxStatesSize ?? 1e3;
|
|
13913
14392
|
this.globalRateLimiter = options.globalRateLimiter;
|
|
14393
|
+
this.consumerTraceMaxIterations = options.consumerTraceMaxIterations ?? 500;
|
|
14394
|
+
this.consumerTraceTimeoutSeconds = options.consumerTraceTimeoutSeconds ?? 60;
|
|
13914
14395
|
this.telemetry = new FairQueueTelemetry({
|
|
13915
14396
|
tracer: options.tracer,
|
|
13916
14397
|
meter: options.meter,
|
|
13917
14398
|
name: options.name ?? "fairqueue"
|
|
13918
14399
|
});
|
|
14400
|
+
this.batchedSpanManager = new BatchedSpanManager({
|
|
14401
|
+
tracer: options.tracer,
|
|
14402
|
+
name: options.name ?? "fairqueue",
|
|
14403
|
+
maxIterations: this.consumerTraceMaxIterations,
|
|
14404
|
+
timeoutSeconds: this.consumerTraceTimeoutSeconds,
|
|
14405
|
+
getDynamicAttributes: () => ({
|
|
14406
|
+
"cache.descriptor_size": this.queueDescriptorCache.size,
|
|
14407
|
+
"cache.cooloff_states_size": this.queueCooloffStates.size
|
|
14408
|
+
})
|
|
14409
|
+
});
|
|
13919
14410
|
this.masterQueue = new MasterQueue({
|
|
13920
14411
|
redis: options.redis,
|
|
13921
14412
|
keys: options.keys,
|
|
@@ -13938,16 +14429,14 @@ var FairQueue = class {
|
|
|
13938
14429
|
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
13939
14430
|
}
|
|
13940
14431
|
});
|
|
13941
|
-
|
|
13942
|
-
|
|
13943
|
-
|
|
13944
|
-
|
|
13945
|
-
|
|
13946
|
-
|
|
13947
|
-
|
|
13948
|
-
|
|
13949
|
-
});
|
|
13950
|
-
}
|
|
14432
|
+
this.workerQueueManager = new WorkerQueueManager({
|
|
14433
|
+
redis: options.redis,
|
|
14434
|
+
keys: options.keys,
|
|
14435
|
+
logger: {
|
|
14436
|
+
debug: (msg, ctx) => this.logger.debug(msg, ctx),
|
|
14437
|
+
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
14438
|
+
}
|
|
14439
|
+
});
|
|
13951
14440
|
this.#registerCommands();
|
|
13952
14441
|
if (options.startConsumers !== false) {
|
|
13953
14442
|
this.start();
|
|
@@ -13973,22 +14462,24 @@ var FairQueue = class {
|
|
|
13973
14462
|
visibilityTimeoutMs;
|
|
13974
14463
|
heartbeatIntervalMs;
|
|
13975
14464
|
reclaimIntervalMs;
|
|
13976
|
-
workerQueueEnabled;
|
|
13977
|
-
workerQueueBlockingTimeoutSeconds;
|
|
13978
14465
|
workerQueueResolver;
|
|
14466
|
+
batchClaimSize;
|
|
13979
14467
|
// Cooloff state
|
|
13980
14468
|
cooloffEnabled;
|
|
13981
14469
|
cooloffThreshold;
|
|
13982
14470
|
cooloffPeriodMs;
|
|
14471
|
+
maxCooloffStatesSize;
|
|
13983
14472
|
queueCooloffStates = /* @__PURE__ */ new Map();
|
|
13984
14473
|
// Global rate limiter
|
|
13985
14474
|
globalRateLimiter;
|
|
14475
|
+
// Consumer tracing
|
|
14476
|
+
consumerTraceMaxIterations;
|
|
14477
|
+
consumerTraceTimeoutSeconds;
|
|
14478
|
+
batchedSpanManager;
|
|
13986
14479
|
// Runtime state
|
|
13987
|
-
messageHandler;
|
|
13988
14480
|
isRunning = false;
|
|
13989
14481
|
abortController;
|
|
13990
14482
|
masterQueueConsumerLoops = [];
|
|
13991
|
-
workerQueueConsumerLoops = [];
|
|
13992
14483
|
reclaimLoop;
|
|
13993
14484
|
// Queue descriptor cache for message processing
|
|
13994
14485
|
queueDescriptorCache = /* @__PURE__ */ new Map();
|
|
@@ -14017,15 +14508,6 @@ var FairQueue = class {
|
|
|
14017
14508
|
});
|
|
14018
14509
|
}
|
|
14019
14510
|
// ============================================================================
|
|
14020
|
-
// Public API - Message Handler
|
|
14021
|
-
// ============================================================================
|
|
14022
|
-
/**
|
|
14023
|
-
* Set the message handler for processing dequeued messages.
|
|
14024
|
-
*/
|
|
14025
|
-
onMessage(handler) {
|
|
14026
|
-
this.messageHandler = handler;
|
|
14027
|
-
}
|
|
14028
|
-
// ============================================================================
|
|
14029
14511
|
// Public API - Enqueueing
|
|
14030
14512
|
// ============================================================================
|
|
14031
14513
|
/**
|
|
@@ -14068,7 +14550,7 @@ var FairQueue = class {
|
|
|
14068
14550
|
timestamp,
|
|
14069
14551
|
attempt: 1,
|
|
14070
14552
|
metadata: options.metadata
|
|
14071
|
-
}) :
|
|
14553
|
+
}) : void 0,
|
|
14072
14554
|
metadata: options.metadata
|
|
14073
14555
|
};
|
|
14074
14556
|
await this.redis.enqueueMessageAtomic(
|
|
@@ -14086,13 +14568,7 @@ var FairQueue = class {
|
|
|
14086
14568
|
[FairQueueAttributes.MESSAGE_ID]: messageId,
|
|
14087
14569
|
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14088
14570
|
});
|
|
14089
|
-
this.telemetry.recordEnqueue(
|
|
14090
|
-
this.telemetry.messageAttributes({
|
|
14091
|
-
queueId: options.queueId,
|
|
14092
|
-
tenantId: options.tenantId,
|
|
14093
|
-
messageId
|
|
14094
|
-
})
|
|
14095
|
-
);
|
|
14571
|
+
this.telemetry.recordEnqueue();
|
|
14096
14572
|
this.logger.debug("Message enqueued", {
|
|
14097
14573
|
queueId: options.queueId,
|
|
14098
14574
|
messageId,
|
|
@@ -14154,7 +14630,7 @@ var FairQueue = class {
|
|
|
14154
14630
|
timestamp,
|
|
14155
14631
|
attempt: 1,
|
|
14156
14632
|
metadata: options.metadata
|
|
14157
|
-
}) :
|
|
14633
|
+
}) : void 0,
|
|
14158
14634
|
metadata: options.metadata
|
|
14159
14635
|
};
|
|
14160
14636
|
messageIds.push(messageId);
|
|
@@ -14173,13 +14649,7 @@ var FairQueue = class {
|
|
|
14173
14649
|
[FairQueueAttributes.MESSAGE_COUNT]: messageIds.length,
|
|
14174
14650
|
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14175
14651
|
});
|
|
14176
|
-
this.telemetry.recordEnqueueBatch(
|
|
14177
|
-
messageIds.length,
|
|
14178
|
-
this.telemetry.messageAttributes({
|
|
14179
|
-
queueId: options.queueId,
|
|
14180
|
-
tenantId: options.tenantId
|
|
14181
|
-
})
|
|
14182
|
-
);
|
|
14652
|
+
this.telemetry.recordEnqueueBatch(messageIds.length);
|
|
14183
14653
|
this.logger.debug("Batch enqueued", {
|
|
14184
14654
|
queueId: options.queueId,
|
|
14185
14655
|
messageCount: messageIds.length
|
|
@@ -14306,40 +14776,54 @@ var FairQueue = class {
|
|
|
14306
14776
|
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14307
14777
|
return await this.redis.zcard(dlqKey);
|
|
14308
14778
|
}
|
|
14309
|
-
// ============================================================================
|
|
14310
|
-
// Public API - Lifecycle
|
|
14311
|
-
// ============================================================================
|
|
14312
14779
|
/**
|
|
14313
|
-
*
|
|
14780
|
+
* Get the size of the in-memory queue descriptor cache.
|
|
14781
|
+
* This cache stores metadata for queues that have been enqueued.
|
|
14782
|
+
* The cache is cleaned up when queues are fully processed.
|
|
14314
14783
|
*/
|
|
14315
|
-
|
|
14316
|
-
|
|
14317
|
-
|
|
14318
|
-
|
|
14319
|
-
|
|
14320
|
-
|
|
14321
|
-
|
|
14322
|
-
|
|
14323
|
-
|
|
14324
|
-
|
|
14325
|
-
|
|
14326
|
-
|
|
14327
|
-
|
|
14328
|
-
|
|
14329
|
-
|
|
14330
|
-
|
|
14331
|
-
|
|
14332
|
-
|
|
14333
|
-
|
|
14334
|
-
|
|
14335
|
-
|
|
14336
|
-
|
|
14784
|
+
getQueueDescriptorCacheSize() {
|
|
14785
|
+
return this.queueDescriptorCache.size;
|
|
14786
|
+
}
|
|
14787
|
+
/**
|
|
14788
|
+
* Get the size of the in-memory cooloff states cache.
|
|
14789
|
+
* This cache tracks queues that are in cooloff due to repeated failures.
|
|
14790
|
+
* The cache is cleaned up when queues are fully processed or cooloff expires.
|
|
14791
|
+
*/
|
|
14792
|
+
getQueueCooloffStatesSize() {
|
|
14793
|
+
return this.queueCooloffStates.size;
|
|
14794
|
+
}
|
|
14795
|
+
/**
|
|
14796
|
+
* Get all in-memory cache sizes for monitoring.
|
|
14797
|
+
* Useful for adding as span attributes.
|
|
14798
|
+
*/
|
|
14799
|
+
getCacheSizes() {
|
|
14800
|
+
return {
|
|
14801
|
+
descriptorCacheSize: this.queueDescriptorCache.size,
|
|
14802
|
+
cooloffStatesSize: this.queueCooloffStates.size
|
|
14803
|
+
};
|
|
14804
|
+
}
|
|
14805
|
+
// ============================================================================
|
|
14806
|
+
// Public API - Lifecycle
|
|
14807
|
+
// ============================================================================
|
|
14808
|
+
/**
|
|
14809
|
+
* Start the master queue consumer loops and reclaim loop.
|
|
14810
|
+
* FairQueue claims messages and pushes them to worker queues.
|
|
14811
|
+
* External consumers are responsible for consuming from worker queues.
|
|
14812
|
+
*/
|
|
14813
|
+
start() {
|
|
14814
|
+
if (this.isRunning) {
|
|
14815
|
+
return;
|
|
14816
|
+
}
|
|
14817
|
+
this.isRunning = true;
|
|
14818
|
+
this.abortController = new AbortController();
|
|
14819
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14820
|
+
const loop = this.#runMasterQueueConsumerLoop(shardId);
|
|
14821
|
+
this.masterQueueConsumerLoops.push(loop);
|
|
14337
14822
|
}
|
|
14338
14823
|
this.reclaimLoop = this.#runReclaimLoop();
|
|
14339
14824
|
this.logger.info("FairQueue started", {
|
|
14340
14825
|
consumerCount: this.consumerCount,
|
|
14341
14826
|
shardCount: this.shardCount,
|
|
14342
|
-
workerQueueEnabled: this.workerQueueEnabled,
|
|
14343
14827
|
consumerIntervalMs: this.consumerIntervalMs
|
|
14344
14828
|
});
|
|
14345
14829
|
}
|
|
@@ -14352,13 +14836,8 @@ var FairQueue = class {
|
|
|
14352
14836
|
}
|
|
14353
14837
|
this.isRunning = false;
|
|
14354
14838
|
this.abortController.abort();
|
|
14355
|
-
await Promise.allSettled([
|
|
14356
|
-
...this.masterQueueConsumerLoops,
|
|
14357
|
-
...this.workerQueueConsumerLoops,
|
|
14358
|
-
this.reclaimLoop
|
|
14359
|
-
]);
|
|
14839
|
+
await Promise.allSettled([...this.masterQueueConsumerLoops, this.reclaimLoop]);
|
|
14360
14840
|
this.masterQueueConsumerLoops = [];
|
|
14361
|
-
this.workerQueueConsumerLoops = [];
|
|
14362
14841
|
this.reclaimLoop = void 0;
|
|
14363
14842
|
this.logger.info("FairQueue stopped");
|
|
14364
14843
|
}
|
|
@@ -14367,11 +14846,12 @@ var FairQueue = class {
|
|
|
14367
14846
|
*/
|
|
14368
14847
|
async close() {
|
|
14369
14848
|
await this.stop();
|
|
14849
|
+
this.batchedSpanManager.cleanupAll();
|
|
14370
14850
|
await Promise.all([
|
|
14371
14851
|
this.masterQueue.close(),
|
|
14372
14852
|
this.concurrencyManager?.close(),
|
|
14373
14853
|
this.visibilityManager.close(),
|
|
14374
|
-
this.workerQueueManager
|
|
14854
|
+
this.workerQueueManager.close(),
|
|
14375
14855
|
this.scheduler.close?.(),
|
|
14376
14856
|
this.redis.quit()
|
|
14377
14857
|
]);
|
|
@@ -14409,49 +14889,148 @@ var FairQueue = class {
|
|
|
14409
14889
|
// ============================================================================
|
|
14410
14890
|
async #runMasterQueueConsumerLoop(shardId) {
|
|
14411
14891
|
const loopId = `master-shard-${shardId}`;
|
|
14892
|
+
this.batchedSpanManager.initializeLoop(loopId);
|
|
14412
14893
|
try {
|
|
14413
|
-
|
|
14414
|
-
|
|
14415
|
-
|
|
14894
|
+
while (this.isRunning) {
|
|
14895
|
+
if (this.abortController.signal.aborted) {
|
|
14896
|
+
break;
|
|
14897
|
+
}
|
|
14898
|
+
let hadWork = false;
|
|
14416
14899
|
try {
|
|
14417
|
-
await this
|
|
14900
|
+
hadWork = await this.batchedSpanManager.withBatchedSpan(
|
|
14901
|
+
loopId,
|
|
14902
|
+
async (span) => {
|
|
14903
|
+
span.setAttribute("shard_id", shardId);
|
|
14904
|
+
return await this.#processMasterQueueShard(loopId, shardId, span);
|
|
14905
|
+
},
|
|
14906
|
+
{
|
|
14907
|
+
iterationSpanName: "processMasterQueueShard",
|
|
14908
|
+
attributes: { shard_id: shardId }
|
|
14909
|
+
}
|
|
14910
|
+
);
|
|
14418
14911
|
} catch (error) {
|
|
14419
14912
|
this.logger.error("Master queue consumer error", {
|
|
14420
14913
|
loopId,
|
|
14421
14914
|
shardId,
|
|
14422
14915
|
error: error instanceof Error ? error.message : String(error)
|
|
14423
14916
|
});
|
|
14917
|
+
this.batchedSpanManager.markForRotation(loopId);
|
|
14424
14918
|
}
|
|
14919
|
+
const waitMs = hadWork ? 1 : this.consumerIntervalMs;
|
|
14920
|
+
await new Promise((resolve, reject) => {
|
|
14921
|
+
const abortHandler = () => {
|
|
14922
|
+
clearTimeout(timeout);
|
|
14923
|
+
reject(new Error("AbortError"));
|
|
14924
|
+
};
|
|
14925
|
+
const timeout = setTimeout(() => {
|
|
14926
|
+
this.abortController.signal.removeEventListener("abort", abortHandler);
|
|
14927
|
+
resolve();
|
|
14928
|
+
}, waitMs);
|
|
14929
|
+
this.abortController.signal.addEventListener("abort", abortHandler, { once: true });
|
|
14930
|
+
});
|
|
14425
14931
|
}
|
|
14426
14932
|
} catch (error) {
|
|
14427
|
-
if (error
|
|
14933
|
+
if (isAbortError(error)) {
|
|
14428
14934
|
this.logger.debug("Master queue consumer aborted", { loopId });
|
|
14935
|
+
this.batchedSpanManager.cleanup(loopId);
|
|
14429
14936
|
return;
|
|
14430
14937
|
}
|
|
14431
14938
|
throw error;
|
|
14939
|
+
} finally {
|
|
14940
|
+
this.batchedSpanManager.cleanup(loopId);
|
|
14432
14941
|
}
|
|
14433
14942
|
}
|
|
14434
|
-
async #processMasterQueueShard(loopId, shardId) {
|
|
14943
|
+
async #processMasterQueueShard(loopId, shardId, parentSpan) {
|
|
14435
14944
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14436
|
-
const
|
|
14437
|
-
|
|
14945
|
+
const masterQueueSize = await this.masterQueue.getShardQueueCount(shardId);
|
|
14946
|
+
parentSpan?.setAttribute("master_queue_size", masterQueueSize);
|
|
14947
|
+
this.batchedSpanManager.incrementStat(loopId, "master_queue_size_sum", masterQueueSize);
|
|
14948
|
+
const schedulerContext = this.#createSchedulerContext();
|
|
14949
|
+
const tenantQueues = await this.telemetry.trace(
|
|
14950
|
+
"selectQueues",
|
|
14951
|
+
async (span) => {
|
|
14952
|
+
span.setAttribute(FairQueueAttributes.SHARD_ID, shardId.toString());
|
|
14953
|
+
span.setAttribute(FairQueueAttributes.CONSUMER_ID, loopId);
|
|
14954
|
+
span.setAttribute("master_queue_size", masterQueueSize);
|
|
14955
|
+
const result = await this.scheduler.selectQueues(masterQueueKey, loopId, schedulerContext);
|
|
14956
|
+
span.setAttribute("tenant_count", result.length);
|
|
14957
|
+
span.setAttribute(
|
|
14958
|
+
"queue_count",
|
|
14959
|
+
result.reduce((acc, t) => acc + t.queues.length, 0)
|
|
14960
|
+
);
|
|
14961
|
+
return result;
|
|
14962
|
+
},
|
|
14963
|
+
{ kind: SpanKind.INTERNAL }
|
|
14964
|
+
);
|
|
14438
14965
|
if (tenantQueues.length === 0) {
|
|
14439
|
-
|
|
14966
|
+
this.batchedSpanManager.incrementStat(loopId, "empty_iterations");
|
|
14967
|
+
return false;
|
|
14440
14968
|
}
|
|
14969
|
+
this.batchedSpanManager.incrementStat(loopId, "tenants_selected", tenantQueues.length);
|
|
14970
|
+
this.batchedSpanManager.incrementStat(
|
|
14971
|
+
loopId,
|
|
14972
|
+
"queues_selected",
|
|
14973
|
+
tenantQueues.reduce((acc, t) => acc + t.queues.length, 0)
|
|
14974
|
+
);
|
|
14975
|
+
let messagesProcessed = 0;
|
|
14441
14976
|
for (const { tenantId, queues } of tenantQueues) {
|
|
14442
14977
|
for (const queueId of queues) {
|
|
14443
14978
|
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14979
|
+
this.batchedSpanManager.incrementStat(loopId, "cooloff_skipped");
|
|
14444
14980
|
continue;
|
|
14445
14981
|
}
|
|
14446
|
-
|
|
14447
|
-
|
|
14448
|
-
|
|
14449
|
-
|
|
14982
|
+
if (this.concurrencyManager) {
|
|
14983
|
+
const isAtCapacity = await this.concurrencyManager.isAtCapacity("tenant", tenantId);
|
|
14984
|
+
if (isAtCapacity) {
|
|
14985
|
+
this.batchedSpanManager.incrementStat(loopId, "tenant_capacity_skipped");
|
|
14986
|
+
break;
|
|
14987
|
+
}
|
|
14988
|
+
}
|
|
14989
|
+
const processedFromQueue = await this.telemetry.trace(
|
|
14990
|
+
"claimAndPushToWorkerQueue",
|
|
14991
|
+
async (span) => {
|
|
14992
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
14993
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
14994
|
+
span.setAttribute(FairQueueAttributes.SHARD_ID, shardId.toString());
|
|
14995
|
+
const count = await this.#claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId);
|
|
14996
|
+
span.setAttribute("messages_claimed", count);
|
|
14997
|
+
return count;
|
|
14998
|
+
},
|
|
14999
|
+
{ kind: SpanKind.INTERNAL }
|
|
15000
|
+
);
|
|
15001
|
+
if (processedFromQueue > 0) {
|
|
15002
|
+
messagesProcessed += processedFromQueue;
|
|
15003
|
+
this.batchedSpanManager.incrementStat(loopId, "messages_claimed", processedFromQueue);
|
|
15004
|
+
if (this.scheduler.recordProcessedBatch) {
|
|
15005
|
+
await this.telemetry.trace(
|
|
15006
|
+
"recordProcessedBatch",
|
|
15007
|
+
async (span) => {
|
|
15008
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
15009
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
15010
|
+
span.setAttribute("count", processedFromQueue);
|
|
15011
|
+
await this.scheduler.recordProcessedBatch(tenantId, queueId, processedFromQueue);
|
|
15012
|
+
},
|
|
15013
|
+
{ kind: SpanKind.INTERNAL }
|
|
15014
|
+
);
|
|
15015
|
+
} else if (this.scheduler.recordProcessed) {
|
|
15016
|
+
for (let i = 0; i < processedFromQueue; i++) {
|
|
15017
|
+
await this.telemetry.trace(
|
|
15018
|
+
"recordProcessed",
|
|
15019
|
+
async (span) => {
|
|
15020
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
15021
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
15022
|
+
await this.scheduler.recordProcessed(tenantId, queueId);
|
|
15023
|
+
},
|
|
15024
|
+
{ kind: SpanKind.INTERNAL }
|
|
15025
|
+
);
|
|
15026
|
+
}
|
|
15027
|
+
}
|
|
14450
15028
|
} else {
|
|
14451
|
-
this
|
|
15029
|
+
this.batchedSpanManager.incrementStat(loopId, "claim_skipped");
|
|
14452
15030
|
}
|
|
14453
15031
|
}
|
|
14454
15032
|
}
|
|
15033
|
+
return messagesProcessed > 0;
|
|
14455
15034
|
}
|
|
14456
15035
|
async #claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId) {
|
|
14457
15036
|
const queueKey = this.keys.queueKey(queueId);
|
|
@@ -14462,11 +15041,14 @@ var FairQueue = class {
|
|
|
14462
15041
|
tenantId,
|
|
14463
15042
|
metadata: {}
|
|
14464
15043
|
};
|
|
15044
|
+
let maxClaimCount = this.batchClaimSize;
|
|
14465
15045
|
if (this.concurrencyManager) {
|
|
14466
|
-
const
|
|
14467
|
-
if (
|
|
14468
|
-
|
|
15046
|
+
const availableCapacity = await this.concurrencyManager.getAvailableCapacity(descriptor);
|
|
15047
|
+
if (availableCapacity === 0) {
|
|
15048
|
+
this.#incrementCooloff(queueId);
|
|
15049
|
+
return 0;
|
|
14469
15050
|
}
|
|
15051
|
+
maxClaimCount = Math.min(maxClaimCount, availableCapacity);
|
|
14470
15052
|
}
|
|
14471
15053
|
if (this.globalRateLimiter) {
|
|
14472
15054
|
const result = await this.globalRateLimiter.limit();
|
|
@@ -14478,377 +15060,209 @@ var FairQueue = class {
|
|
|
14478
15060
|
}
|
|
14479
15061
|
}
|
|
14480
15062
|
}
|
|
14481
|
-
const
|
|
14482
|
-
|
|
14483
|
-
queueKey,
|
|
14484
|
-
|
|
14485
|
-
|
|
14486
|
-
|
|
14487
|
-
);
|
|
14488
|
-
if (!claimResult.claimed || !claimResult.message) {
|
|
14489
|
-
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14490
|
-
return false;
|
|
14491
|
-
}
|
|
14492
|
-
const { message } = claimResult;
|
|
14493
|
-
if (this.concurrencyManager) {
|
|
14494
|
-
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
14495
|
-
if (!reserved) {
|
|
14496
|
-
await this.visibilityManager.release(message.messageId, queueId, queueKey, queueItemsKey);
|
|
14497
|
-
return false;
|
|
15063
|
+
const claimedMessages = await this.visibilityManager.claimBatch(queueId, queueKey, queueItemsKey, loopId, maxClaimCount, this.visibilityTimeoutMs);
|
|
15064
|
+
if (claimedMessages.length === 0) {
|
|
15065
|
+
const removed = await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
15066
|
+
if (removed === 1) {
|
|
15067
|
+
this.queueDescriptorCache.delete(queueId);
|
|
15068
|
+
this.queueCooloffStates.delete(queueId);
|
|
14498
15069
|
}
|
|
15070
|
+
return 0;
|
|
14499
15071
|
}
|
|
14500
|
-
|
|
14501
|
-
|
|
14502
|
-
|
|
14503
|
-
|
|
14504
|
-
|
|
14505
|
-
|
|
14506
|
-
|
|
14507
|
-
|
|
14508
|
-
|
|
14509
|
-
|
|
14510
|
-
|
|
14511
|
-
|
|
14512
|
-
while (this.isRunning) {
|
|
14513
|
-
if (!this.messageHandler) {
|
|
14514
|
-
await new Promise((resolve) => setTimeout(resolve, this.consumerIntervalMs));
|
|
14515
|
-
continue;
|
|
14516
|
-
}
|
|
14517
|
-
try {
|
|
14518
|
-
const messageKey = await this.workerQueueManager.blockingPop(
|
|
14519
|
-
workerQueueId,
|
|
14520
|
-
this.workerQueueBlockingTimeoutSeconds,
|
|
14521
|
-
this.abortController.signal
|
|
15072
|
+
let processedCount = 0;
|
|
15073
|
+
for (let i = 0; i < claimedMessages.length; i++) {
|
|
15074
|
+
const message = claimedMessages[i];
|
|
15075
|
+
if (this.concurrencyManager) {
|
|
15076
|
+
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
15077
|
+
if (!reserved) {
|
|
15078
|
+
await this.visibilityManager.releaseBatch(
|
|
15079
|
+
claimedMessages.slice(i),
|
|
15080
|
+
queueId,
|
|
15081
|
+
queueKey,
|
|
15082
|
+
queueItemsKey,
|
|
15083
|
+
masterQueueKey
|
|
14522
15084
|
);
|
|
14523
|
-
|
|
14524
|
-
continue;
|
|
14525
|
-
}
|
|
14526
|
-
const colonIndex = messageKey.indexOf(":");
|
|
14527
|
-
if (colonIndex === -1) {
|
|
14528
|
-
this.logger.error("Invalid message key format", { messageKey });
|
|
14529
|
-
continue;
|
|
14530
|
-
}
|
|
14531
|
-
const messageId = messageKey.substring(0, colonIndex);
|
|
14532
|
-
const queueId = messageKey.substring(colonIndex + 1);
|
|
14533
|
-
await this.#processMessageFromWorkerQueue(loopId, messageId, queueId);
|
|
14534
|
-
} catch (error) {
|
|
14535
|
-
if (this.abortController.signal.aborted) {
|
|
14536
|
-
break;
|
|
14537
|
-
}
|
|
14538
|
-
this.logger.error("Worker queue consumer error", {
|
|
14539
|
-
loopId,
|
|
14540
|
-
error: error instanceof Error ? error.message : String(error)
|
|
14541
|
-
});
|
|
15085
|
+
break;
|
|
14542
15086
|
}
|
|
14543
15087
|
}
|
|
14544
|
-
|
|
14545
|
-
|
|
14546
|
-
|
|
14547
|
-
|
|
14548
|
-
|
|
14549
|
-
|
|
15088
|
+
const workerQueueId = this.workerQueueResolver(message.payload);
|
|
15089
|
+
const messageKey = `${message.messageId}:${queueId}`;
|
|
15090
|
+
await this.workerQueueManager.push(workerQueueId, messageKey);
|
|
15091
|
+
processedCount++;
|
|
15092
|
+
}
|
|
15093
|
+
if (processedCount > 0) {
|
|
15094
|
+
this.#resetCooloff(queueId);
|
|
14550
15095
|
}
|
|
15096
|
+
return processedCount;
|
|
14551
15097
|
}
|
|
14552
|
-
|
|
15098
|
+
// ============================================================================
|
|
15099
|
+
// Public API - Message Lifecycle (for external consumers)
|
|
15100
|
+
// ============================================================================
|
|
15101
|
+
/**
|
|
15102
|
+
* Get message data from in-flight storage.
|
|
15103
|
+
* External consumers use this to retrieve the stored message after popping from worker queue.
|
|
15104
|
+
*
|
|
15105
|
+
* @param messageId - The ID of the message
|
|
15106
|
+
* @param queueId - The queue ID the message belongs to
|
|
15107
|
+
* @returns The stored message or null if not found
|
|
15108
|
+
*/
|
|
15109
|
+
async getMessageData(messageId, queueId) {
|
|
14553
15110
|
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14554
15111
|
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
14555
15112
|
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
14556
15113
|
if (!dataJson) {
|
|
14557
|
-
|
|
14558
|
-
return;
|
|
15114
|
+
return null;
|
|
14559
15115
|
}
|
|
14560
|
-
let storedMessage;
|
|
14561
15116
|
try {
|
|
14562
|
-
|
|
15117
|
+
return JSON.parse(dataJson);
|
|
14563
15118
|
} catch {
|
|
14564
15119
|
this.logger.error("Failed to parse message data", { messageId, queueId });
|
|
14565
|
-
return;
|
|
15120
|
+
return null;
|
|
14566
15121
|
}
|
|
14567
|
-
await this.#processMessage(loopId, storedMessage, queueId);
|
|
14568
15122
|
}
|
|
14569
|
-
|
|
14570
|
-
|
|
14571
|
-
|
|
14572
|
-
|
|
14573
|
-
|
|
14574
|
-
|
|
14575
|
-
|
|
14576
|
-
|
|
14577
|
-
|
|
14578
|
-
|
|
14579
|
-
continue;
|
|
14580
|
-
}
|
|
14581
|
-
try {
|
|
14582
|
-
await this.#processDirectIteration(loopId, shardId);
|
|
14583
|
-
} catch (error) {
|
|
14584
|
-
this.logger.error("Direct consumer iteration error", {
|
|
14585
|
-
loopId,
|
|
14586
|
-
error: error instanceof Error ? error.message : String(error)
|
|
14587
|
-
});
|
|
14588
|
-
}
|
|
14589
|
-
}
|
|
14590
|
-
} catch (error) {
|
|
14591
|
-
if (error instanceof Error && error.name === "AbortError") {
|
|
14592
|
-
this.logger.debug("Direct consumer loop aborted", { loopId });
|
|
14593
|
-
return;
|
|
14594
|
-
}
|
|
14595
|
-
throw error;
|
|
14596
|
-
}
|
|
15123
|
+
/**
|
|
15124
|
+
* Extend the visibility timeout for a message.
|
|
15125
|
+
* External consumers should call this periodically during long-running processing.
|
|
15126
|
+
*
|
|
15127
|
+
* @param messageId - The ID of the message
|
|
15128
|
+
* @param queueId - The queue ID the message belongs to
|
|
15129
|
+
* @returns true if heartbeat was successful
|
|
15130
|
+
*/
|
|
15131
|
+
async heartbeatMessage(messageId, queueId) {
|
|
15132
|
+
return this.visibilityManager.heartbeat(messageId, queueId, this.heartbeatIntervalMs);
|
|
14597
15133
|
}
|
|
14598
|
-
|
|
15134
|
+
/**
|
|
15135
|
+
* Mark a message as successfully processed.
|
|
15136
|
+
* This removes the message from in-flight and releases concurrency.
|
|
15137
|
+
*
|
|
15138
|
+
* @param messageId - The ID of the message
|
|
15139
|
+
* @param queueId - The queue ID the message belongs to
|
|
15140
|
+
*/
|
|
15141
|
+
async completeMessage(messageId, queueId) {
|
|
15142
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
15143
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14599
15144
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14600
|
-
const
|
|
14601
|
-
const
|
|
14602
|
-
|
|
14603
|
-
|
|
14604
|
-
|
|
14605
|
-
|
|
14606
|
-
|
|
14607
|
-
if (this.concurrencyManager) {
|
|
14608
|
-
const [current, limit] = await Promise.all([
|
|
14609
|
-
this.concurrencyManager.getCurrentConcurrency("tenant", tenantId),
|
|
14610
|
-
this.concurrencyManager.getConcurrencyLimit("tenant", tenantId)
|
|
14611
|
-
]);
|
|
14612
|
-
availableSlots = Math.max(1, limit - current);
|
|
14613
|
-
}
|
|
14614
|
-
let slotsUsed = 0;
|
|
14615
|
-
queueLoop: for (const queueId of queues) {
|
|
14616
|
-
while (slotsUsed < availableSlots) {
|
|
14617
|
-
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14618
|
-
break;
|
|
14619
|
-
}
|
|
14620
|
-
const processed = await this.#processOneMessage(loopId, queueId, tenantId, shardId);
|
|
14621
|
-
if (processed) {
|
|
14622
|
-
await this.scheduler.recordProcessed?.(tenantId, queueId);
|
|
14623
|
-
this.#resetCooloff(queueId);
|
|
14624
|
-
slotsUsed++;
|
|
14625
|
-
} else {
|
|
14626
|
-
this.#incrementCooloff(queueId);
|
|
14627
|
-
break;
|
|
14628
|
-
}
|
|
14629
|
-
}
|
|
14630
|
-
if (slotsUsed >= availableSlots) {
|
|
14631
|
-
break queueLoop;
|
|
14632
|
-
}
|
|
15145
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15146
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15147
|
+
let storedMessage = null;
|
|
15148
|
+
if (dataJson) {
|
|
15149
|
+
try {
|
|
15150
|
+
storedMessage = JSON.parse(dataJson);
|
|
15151
|
+
} catch {
|
|
14633
15152
|
}
|
|
14634
15153
|
}
|
|
15154
|
+
const descriptor = storedMessage ? this.queueDescriptorCache.get(queueId) ?? {
|
|
15155
|
+
id: queueId,
|
|
15156
|
+
tenantId: storedMessage.tenantId,
|
|
15157
|
+
metadata: storedMessage.metadata ?? {}
|
|
15158
|
+
} : { id: queueId, tenantId: "", metadata: {} };
|
|
15159
|
+
await this.visibilityManager.complete(messageId, queueId);
|
|
15160
|
+
if (this.concurrencyManager && storedMessage) {
|
|
15161
|
+
await this.concurrencyManager.release(descriptor, messageId);
|
|
15162
|
+
}
|
|
15163
|
+
const removed = await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
15164
|
+
if (removed === 1) {
|
|
15165
|
+
this.queueDescriptorCache.delete(queueId);
|
|
15166
|
+
this.queueCooloffStates.delete(queueId);
|
|
15167
|
+
}
|
|
15168
|
+
this.telemetry.recordComplete();
|
|
15169
|
+
this.logger.debug("Message completed", {
|
|
15170
|
+
messageId,
|
|
15171
|
+
queueId
|
|
15172
|
+
});
|
|
14635
15173
|
}
|
|
14636
|
-
|
|
15174
|
+
/**
|
|
15175
|
+
* Release a message back to the queue for processing by another consumer.
|
|
15176
|
+
* The message is placed at the back of the queue.
|
|
15177
|
+
*
|
|
15178
|
+
* @param messageId - The ID of the message
|
|
15179
|
+
* @param queueId - The queue ID the message belongs to
|
|
15180
|
+
*/
|
|
15181
|
+
async releaseMessage(messageId, queueId) {
|
|
15182
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14637
15183
|
const queueKey = this.keys.queueKey(queueId);
|
|
14638
15184
|
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14639
15185
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14640
|
-
const
|
|
14641
|
-
|
|
14642
|
-
|
|
14643
|
-
|
|
14644
|
-
|
|
14645
|
-
|
|
14646
|
-
|
|
14647
|
-
if (!check.allowed) {
|
|
14648
|
-
return false;
|
|
14649
|
-
}
|
|
14650
|
-
}
|
|
14651
|
-
if (this.globalRateLimiter) {
|
|
14652
|
-
const result = await this.globalRateLimiter.limit();
|
|
14653
|
-
if (!result.allowed && result.resetAt) {
|
|
14654
|
-
const waitMs = Math.max(0, result.resetAt - Date.now());
|
|
14655
|
-
if (waitMs > 0) {
|
|
14656
|
-
this.logger.debug("Global rate limit reached, waiting", { waitMs, loopId });
|
|
14657
|
-
await new Promise((resolve) => setTimeout(resolve, waitMs));
|
|
14658
|
-
}
|
|
15186
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15187
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15188
|
+
let storedMessage = null;
|
|
15189
|
+
if (dataJson) {
|
|
15190
|
+
try {
|
|
15191
|
+
storedMessage = JSON.parse(dataJson);
|
|
15192
|
+
} catch {
|
|
14659
15193
|
}
|
|
14660
15194
|
}
|
|
14661
|
-
const
|
|
15195
|
+
const descriptor = storedMessage ? this.queueDescriptorCache.get(queueId) ?? {
|
|
15196
|
+
id: queueId,
|
|
15197
|
+
tenantId: storedMessage.tenantId,
|
|
15198
|
+
metadata: storedMessage.metadata ?? {}
|
|
15199
|
+
} : { id: queueId, tenantId: "", metadata: {} };
|
|
15200
|
+
await this.visibilityManager.release(
|
|
15201
|
+
messageId,
|
|
14662
15202
|
queueId,
|
|
14663
15203
|
queueKey,
|
|
14664
15204
|
queueItemsKey,
|
|
14665
|
-
|
|
14666
|
-
|
|
15205
|
+
masterQueueKey,
|
|
15206
|
+
Date.now()
|
|
15207
|
+
// Put at back of queue
|
|
14667
15208
|
);
|
|
14668
|
-
if (
|
|
14669
|
-
await this.
|
|
14670
|
-
return false;
|
|
14671
|
-
}
|
|
14672
|
-
const { message } = claimResult;
|
|
14673
|
-
if (this.concurrencyManager) {
|
|
14674
|
-
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
14675
|
-
if (!reserved) {
|
|
14676
|
-
await this.visibilityManager.release(message.messageId, queueId, queueKey, queueItemsKey);
|
|
14677
|
-
return false;
|
|
14678
|
-
}
|
|
15209
|
+
if (this.concurrencyManager && storedMessage) {
|
|
15210
|
+
await this.concurrencyManager.release(descriptor, messageId);
|
|
14679
15211
|
}
|
|
14680
|
-
|
|
14681
|
-
|
|
15212
|
+
this.logger.debug("Message released", {
|
|
15213
|
+
messageId,
|
|
15214
|
+
queueId
|
|
15215
|
+
});
|
|
14682
15216
|
}
|
|
14683
|
-
|
|
14684
|
-
|
|
14685
|
-
|
|
14686
|
-
|
|
14687
|
-
|
|
15217
|
+
/**
|
|
15218
|
+
* Mark a message as failed. This will trigger retry logic if configured,
|
|
15219
|
+
* or move the message to the dead letter queue.
|
|
15220
|
+
*
|
|
15221
|
+
* @param messageId - The ID of the message
|
|
15222
|
+
* @param queueId - The queue ID the message belongs to
|
|
15223
|
+
* @param error - Optional error that caused the failure
|
|
15224
|
+
*/
|
|
15225
|
+
async failMessage(messageId, queueId, error) {
|
|
15226
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14688
15227
|
const queueKey = this.keys.queueKey(queueId);
|
|
14689
15228
|
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14690
|
-
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14691
15229
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
15230
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15231
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15232
|
+
if (!dataJson) {
|
|
15233
|
+
this.logger.error("Cannot fail message: not found in in-flight data", { messageId, queueId });
|
|
15234
|
+
return;
|
|
15235
|
+
}
|
|
15236
|
+
let storedMessage;
|
|
15237
|
+
try {
|
|
15238
|
+
storedMessage = JSON.parse(dataJson);
|
|
15239
|
+
} catch {
|
|
15240
|
+
this.logger.error("Cannot fail message: failed to parse stored message", {
|
|
15241
|
+
messageId,
|
|
15242
|
+
queueId
|
|
15243
|
+
});
|
|
15244
|
+
return;
|
|
15245
|
+
}
|
|
14692
15246
|
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
14693
15247
|
id: queueId,
|
|
14694
15248
|
tenantId: storedMessage.tenantId,
|
|
14695
15249
|
metadata: storedMessage.metadata ?? {}
|
|
14696
15250
|
};
|
|
14697
|
-
|
|
14698
|
-
|
|
14699
|
-
const result = this.payloadSchema.safeParse(storedMessage.payload);
|
|
14700
|
-
if (!result.success) {
|
|
14701
|
-
this.logger.error("Payload validation failed on dequeue", {
|
|
14702
|
-
messageId: storedMessage.id,
|
|
14703
|
-
queueId,
|
|
14704
|
-
error: result.error.message
|
|
14705
|
-
});
|
|
14706
|
-
await this.#moveToDeadLetterQueue(storedMessage, "Payload validation failed");
|
|
14707
|
-
if (this.concurrencyManager) {
|
|
14708
|
-
try {
|
|
14709
|
-
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14710
|
-
} catch (releaseError) {
|
|
14711
|
-
this.logger.error("Failed to release concurrency slot after payload validation failure", {
|
|
14712
|
-
messageId: storedMessage.id,
|
|
14713
|
-
queueId,
|
|
14714
|
-
error: releaseError instanceof Error ? releaseError.message : String(releaseError)
|
|
14715
|
-
});
|
|
14716
|
-
}
|
|
14717
|
-
}
|
|
14718
|
-
return;
|
|
14719
|
-
}
|
|
14720
|
-
payload = result.data;
|
|
14721
|
-
} else {
|
|
14722
|
-
payload = storedMessage.payload;
|
|
14723
|
-
}
|
|
14724
|
-
const queueMessage = {
|
|
14725
|
-
id: storedMessage.id,
|
|
14726
|
-
queueId,
|
|
14727
|
-
payload,
|
|
14728
|
-
timestamp: storedMessage.timestamp,
|
|
14729
|
-
attempt: storedMessage.attempt,
|
|
14730
|
-
metadata: storedMessage.metadata
|
|
14731
|
-
};
|
|
14732
|
-
const queueTime = startTime - storedMessage.timestamp;
|
|
14733
|
-
this.telemetry.recordQueueTime(
|
|
14734
|
-
queueTime,
|
|
14735
|
-
this.telemetry.messageAttributes({
|
|
14736
|
-
queueId,
|
|
14737
|
-
tenantId: storedMessage.tenantId,
|
|
14738
|
-
messageId: storedMessage.id
|
|
14739
|
-
})
|
|
14740
|
-
);
|
|
14741
|
-
const handlerContext = {
|
|
14742
|
-
message: queueMessage,
|
|
14743
|
-
queue: descriptor,
|
|
14744
|
-
consumerId: loopId,
|
|
14745
|
-
heartbeat: async () => {
|
|
14746
|
-
return this.visibilityManager.heartbeat(
|
|
14747
|
-
storedMessage.id,
|
|
14748
|
-
queueId,
|
|
14749
|
-
this.heartbeatIntervalMs
|
|
14750
|
-
);
|
|
14751
|
-
},
|
|
14752
|
-
complete: async () => {
|
|
14753
|
-
await this.#completeMessage(storedMessage, queueId, queueKey, masterQueueKey, descriptor);
|
|
14754
|
-
this.telemetry.recordComplete(
|
|
14755
|
-
this.telemetry.messageAttributes({
|
|
14756
|
-
queueId,
|
|
14757
|
-
tenantId: storedMessage.tenantId,
|
|
14758
|
-
messageId: storedMessage.id
|
|
14759
|
-
})
|
|
14760
|
-
);
|
|
14761
|
-
this.telemetry.recordProcessingTime(
|
|
14762
|
-
Date.now() - startTime,
|
|
14763
|
-
this.telemetry.messageAttributes({
|
|
14764
|
-
queueId,
|
|
14765
|
-
tenantId: storedMessage.tenantId,
|
|
14766
|
-
messageId: storedMessage.id
|
|
14767
|
-
})
|
|
14768
|
-
);
|
|
14769
|
-
},
|
|
14770
|
-
release: async () => {
|
|
14771
|
-
await this.#releaseMessage(storedMessage, queueId, queueKey, queueItemsKey, descriptor);
|
|
14772
|
-
},
|
|
14773
|
-
fail: async (error) => {
|
|
14774
|
-
await this.#handleMessageFailure(
|
|
14775
|
-
storedMessage,
|
|
14776
|
-
queueId,
|
|
14777
|
-
queueKey,
|
|
14778
|
-
queueItemsKey,
|
|
14779
|
-
masterQueueKey,
|
|
14780
|
-
descriptor,
|
|
14781
|
-
error
|
|
14782
|
-
);
|
|
14783
|
-
}
|
|
14784
|
-
};
|
|
14785
|
-
try {
|
|
14786
|
-
await this.telemetry.trace(
|
|
14787
|
-
"processMessage",
|
|
14788
|
-
async (span) => {
|
|
14789
|
-
span.setAttributes({
|
|
14790
|
-
[FairQueueAttributes.QUEUE_ID]: queueId,
|
|
14791
|
-
[FairQueueAttributes.TENANT_ID]: storedMessage.tenantId,
|
|
14792
|
-
[FairQueueAttributes.MESSAGE_ID]: storedMessage.id,
|
|
14793
|
-
[FairQueueAttributes.ATTEMPT]: storedMessage.attempt,
|
|
14794
|
-
[FairQueueAttributes.CONSUMER_ID]: loopId
|
|
14795
|
-
});
|
|
14796
|
-
await this.messageHandler(handlerContext);
|
|
14797
|
-
},
|
|
14798
|
-
{
|
|
14799
|
-
kind: SpanKind.CONSUMER,
|
|
14800
|
-
attributes: {
|
|
14801
|
-
[MessagingAttributes.OPERATION]: "process"
|
|
14802
|
-
}
|
|
14803
|
-
}
|
|
14804
|
-
);
|
|
14805
|
-
} catch (error) {
|
|
14806
|
-
this.logger.error("Message handler error", {
|
|
14807
|
-
messageId: storedMessage.id,
|
|
14808
|
-
queueId,
|
|
14809
|
-
error: error instanceof Error ? error.message : String(error)
|
|
14810
|
-
});
|
|
14811
|
-
await handlerContext.fail(error instanceof Error ? error : new Error(String(error)));
|
|
14812
|
-
}
|
|
14813
|
-
}
|
|
14814
|
-
async #completeMessage(storedMessage, queueId, queueKey, masterQueueKey, descriptor) {
|
|
14815
|
-
this.masterQueue.getShardForQueue(queueId);
|
|
14816
|
-
await this.visibilityManager.complete(storedMessage.id, queueId);
|
|
14817
|
-
if (this.concurrencyManager) {
|
|
14818
|
-
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14819
|
-
}
|
|
14820
|
-
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14821
|
-
this.logger.debug("Message completed", {
|
|
14822
|
-
messageId: storedMessage.id,
|
|
14823
|
-
queueId
|
|
14824
|
-
});
|
|
14825
|
-
}
|
|
14826
|
-
async #releaseMessage(storedMessage, queueId, queueKey, queueItemsKey, descriptor) {
|
|
14827
|
-
await this.visibilityManager.release(
|
|
14828
|
-
storedMessage.id,
|
|
15251
|
+
await this.#handleMessageFailure(
|
|
15252
|
+
storedMessage,
|
|
14829
15253
|
queueId,
|
|
14830
15254
|
queueKey,
|
|
14831
15255
|
queueItemsKey,
|
|
14832
|
-
|
|
14833
|
-
|
|
15256
|
+
masterQueueKey,
|
|
15257
|
+
descriptor,
|
|
15258
|
+
error
|
|
14834
15259
|
);
|
|
14835
|
-
if (this.concurrencyManager) {
|
|
14836
|
-
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14837
|
-
}
|
|
14838
|
-
this.logger.debug("Message released", {
|
|
14839
|
-
messageId: storedMessage.id,
|
|
14840
|
-
queueId
|
|
14841
|
-
});
|
|
14842
15260
|
}
|
|
15261
|
+
// ============================================================================
|
|
15262
|
+
// Private - Message Processing Helpers
|
|
15263
|
+
// ============================================================================
|
|
14843
15264
|
async #handleMessageFailure(storedMessage, queueId, queueKey, queueItemsKey, masterQueueKey, descriptor, error) {
|
|
14844
|
-
this.telemetry.recordFailure(
|
|
14845
|
-
this.telemetry.messageAttributes({
|
|
14846
|
-
queueId,
|
|
14847
|
-
tenantId: storedMessage.tenantId,
|
|
14848
|
-
messageId: storedMessage.id,
|
|
14849
|
-
attempt: storedMessage.attempt
|
|
14850
|
-
})
|
|
14851
|
-
);
|
|
15265
|
+
this.telemetry.recordFailure();
|
|
14852
15266
|
if (this.retryStrategy) {
|
|
14853
15267
|
const nextDelay = this.retryStrategy.getNextDelay(storedMessage.attempt, error);
|
|
14854
15268
|
if (nextDelay !== null) {
|
|
@@ -14861,20 +15275,14 @@ var FairQueue = class {
|
|
|
14861
15275
|
queueId,
|
|
14862
15276
|
queueKey,
|
|
14863
15277
|
queueItemsKey,
|
|
15278
|
+
masterQueueKey,
|
|
14864
15279
|
Date.now() + nextDelay
|
|
14865
15280
|
);
|
|
14866
15281
|
await this.redis.hset(queueItemsKey, storedMessage.id, JSON.stringify(updatedMessage));
|
|
14867
15282
|
if (this.concurrencyManager) {
|
|
14868
15283
|
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14869
15284
|
}
|
|
14870
|
-
this.telemetry.recordRetry(
|
|
14871
|
-
this.telemetry.messageAttributes({
|
|
14872
|
-
queueId,
|
|
14873
|
-
tenantId: storedMessage.tenantId,
|
|
14874
|
-
messageId: storedMessage.id,
|
|
14875
|
-
attempt: storedMessage.attempt + 1
|
|
14876
|
-
})
|
|
14877
|
-
);
|
|
15285
|
+
this.telemetry.recordRetry();
|
|
14878
15286
|
this.logger.debug("Message scheduled for retry", {
|
|
14879
15287
|
messageId: storedMessage.id,
|
|
14880
15288
|
queueId,
|
|
@@ -14912,14 +15320,7 @@ var FairQueue = class {
|
|
|
14912
15320
|
pipeline.zadd(dlqKey, dlqMessage.deadLetteredAt, storedMessage.id);
|
|
14913
15321
|
pipeline.hset(dlqDataKey, storedMessage.id, JSON.stringify(dlqMessage));
|
|
14914
15322
|
await pipeline.exec();
|
|
14915
|
-
this.telemetry.recordDLQ(
|
|
14916
|
-
this.telemetry.messageAttributes({
|
|
14917
|
-
queueId: storedMessage.queueId,
|
|
14918
|
-
tenantId: storedMessage.tenantId,
|
|
14919
|
-
messageId: storedMessage.id,
|
|
14920
|
-
attempt: storedMessage.attempt
|
|
14921
|
-
})
|
|
14922
|
-
);
|
|
15323
|
+
this.telemetry.recordDLQ();
|
|
14923
15324
|
this.logger.info("Message moved to DLQ", {
|
|
14924
15325
|
messageId: storedMessage.id,
|
|
14925
15326
|
queueId: storedMessage.queueId,
|
|
@@ -14945,7 +15346,7 @@ var FairQueue = class {
|
|
|
14945
15346
|
}
|
|
14946
15347
|
}
|
|
14947
15348
|
} catch (error) {
|
|
14948
|
-
if (error
|
|
15349
|
+
if (isAbortError(error)) {
|
|
14949
15350
|
this.logger.debug("Reclaim loop aborted");
|
|
14950
15351
|
return;
|
|
14951
15352
|
}
|
|
@@ -14957,7 +15358,8 @@ var FairQueue = class {
|
|
|
14957
15358
|
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14958
15359
|
const reclaimed = await this.visibilityManager.reclaimTimedOut(shardId, (queueId) => ({
|
|
14959
15360
|
queueKey: this.keys.queueKey(queueId),
|
|
14960
|
-
queueItemsKey: this.keys.queueItemsKey(queueId)
|
|
15361
|
+
queueItemsKey: this.keys.queueItemsKey(queueId),
|
|
15362
|
+
masterQueueKey: this.keys.masterQueueKey(this.masterQueue.getShardForQueue(queueId))
|
|
14961
15363
|
}));
|
|
14962
15364
|
totalReclaimed += reclaimed;
|
|
14963
15365
|
}
|
|
@@ -14981,6 +15383,13 @@ var FairQueue = class {
|
|
|
14981
15383
|
return false;
|
|
14982
15384
|
}
|
|
14983
15385
|
#incrementCooloff(queueId) {
|
|
15386
|
+
if (this.queueCooloffStates.size >= this.maxCooloffStatesSize) {
|
|
15387
|
+
this.logger.warn("Cooloff states cache hit size cap, clearing all entries", {
|
|
15388
|
+
size: this.queueCooloffStates.size,
|
|
15389
|
+
cap: this.maxCooloffStatesSize
|
|
15390
|
+
});
|
|
15391
|
+
this.queueCooloffStates.clear();
|
|
15392
|
+
}
|
|
14984
15393
|
const state = this.queueCooloffStates.get(queueId) ?? {
|
|
14985
15394
|
tag: "normal",
|
|
14986
15395
|
consecutiveFailures: 0
|
|
@@ -15124,6 +15533,7 @@ end
|
|
|
15124
15533
|
};
|
|
15125
15534
|
|
|
15126
15535
|
exports.BaseScheduler = BaseScheduler;
|
|
15536
|
+
exports.BatchedSpanManager = BatchedSpanManager;
|
|
15127
15537
|
exports.CallbackFairQueueKeyProducer = CallbackFairQueueKeyProducer;
|
|
15128
15538
|
exports.ConcurrencyManager = ConcurrencyManager;
|
|
15129
15539
|
exports.CronSchema = CronSchema;
|
|
@@ -15149,6 +15559,7 @@ exports.Worker = Worker;
|
|
|
15149
15559
|
exports.WorkerQueueManager = WorkerQueueManager;
|
|
15150
15560
|
exports.createDefaultRetryStrategy = createDefaultRetryStrategy;
|
|
15151
15561
|
exports.defaultRetryOptions = defaultRetryOptions;
|
|
15562
|
+
exports.isAbortError = isAbortError;
|
|
15152
15563
|
exports.noopTelemetry = noopTelemetry;
|
|
15153
15564
|
//# sourceMappingURL=index.cjs.map
|
|
15154
15565
|
//# sourceMappingURL=index.cjs.map
|