@trigger.dev/redis-worker 4.3.1 → 4.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +863 -452
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +270 -27
- package/dist/index.d.ts +270 -27
- package/dist/index.js +862 -453
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
|
@@ -10826,6 +10826,9 @@ var SpanStatusCode;
|
|
|
10826
10826
|
SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
|
|
10827
10827
|
})(SpanStatusCode || (SpanStatusCode = {}));
|
|
10828
10828
|
|
|
10829
|
+
// ../../node_modules/.pnpm/@opentelemetry+api@1.9.0/node_modules/@opentelemetry/api/build/esm/context-api.js
|
|
10830
|
+
var context = ContextAPI.getInstance();
|
|
10831
|
+
|
|
10829
10832
|
// ../../node_modules/.pnpm/@opentelemetry+api@1.9.0/node_modules/@opentelemetry/api/build/esm/metrics/NoopMeterProvider.js
|
|
10830
10833
|
var NoopMeterProvider = (
|
|
10831
10834
|
/** @class */
|
|
@@ -11723,6 +11726,11 @@ var Worker = class _Worker {
|
|
|
11723
11726
|
}
|
|
11724
11727
|
};
|
|
11725
11728
|
|
|
11729
|
+
// src/utils.ts
|
|
11730
|
+
function isAbortError(error) {
|
|
11731
|
+
return error instanceof Error && (error.name === "AbortError" || error.message === "AbortError");
|
|
11732
|
+
}
|
|
11733
|
+
|
|
11726
11734
|
// src/fair-queue/concurrency.ts
|
|
11727
11735
|
var ConcurrencyManager = class {
|
|
11728
11736
|
constructor(options) {
|
|
@@ -11799,6 +11807,37 @@ var ConcurrencyManager = class {
|
|
|
11799
11807
|
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11800
11808
|
return await this.redis.scard(key);
|
|
11801
11809
|
}
|
|
11810
|
+
/**
|
|
11811
|
+
* Get available capacity for a queue across all concurrency groups.
|
|
11812
|
+
* Returns the minimum available capacity across all groups.
|
|
11813
|
+
*/
|
|
11814
|
+
async getAvailableCapacity(queue) {
|
|
11815
|
+
if (this.groups.length === 0) {
|
|
11816
|
+
return 0;
|
|
11817
|
+
}
|
|
11818
|
+
const groupData = this.groups.map((group) => ({
|
|
11819
|
+
group,
|
|
11820
|
+
groupId: group.extractGroupId(queue)
|
|
11821
|
+
}));
|
|
11822
|
+
const [currents, limits] = await Promise.all([
|
|
11823
|
+
Promise.all(
|
|
11824
|
+
groupData.map(
|
|
11825
|
+
({ group, groupId }) => this.redis.scard(this.keys.concurrencyKey(group.name, groupId))
|
|
11826
|
+
)
|
|
11827
|
+
),
|
|
11828
|
+
Promise.all(
|
|
11829
|
+
groupData.map(
|
|
11830
|
+
({ group, groupId }) => group.getLimit(groupId).then((limit) => limit || group.defaultLimit)
|
|
11831
|
+
)
|
|
11832
|
+
)
|
|
11833
|
+
]);
|
|
11834
|
+
let minCapacity = Infinity;
|
|
11835
|
+
for (let i = 0; i < groupData.length; i++) {
|
|
11836
|
+
const available = Math.max(0, limits[i] - currents[i]);
|
|
11837
|
+
minCapacity = Math.min(minCapacity, available);
|
|
11838
|
+
}
|
|
11839
|
+
return minCapacity === Infinity ? 0 : minCapacity;
|
|
11840
|
+
}
|
|
11802
11841
|
/**
|
|
11803
11842
|
* Get concurrency limit for a specific group.
|
|
11804
11843
|
*/
|
|
@@ -12305,7 +12344,8 @@ var FairQueueTelemetry = class {
|
|
|
12305
12344
|
// Helper Methods
|
|
12306
12345
|
// ============================================================================
|
|
12307
12346
|
/**
|
|
12308
|
-
* Create standard attributes for a message operation.
|
|
12347
|
+
* Create standard attributes for a message operation (for spans/traces).
|
|
12348
|
+
* Use this for span attributes where high cardinality is acceptable.
|
|
12309
12349
|
*/
|
|
12310
12350
|
messageAttributes(params) {
|
|
12311
12351
|
const attrs = {};
|
|
@@ -12391,6 +12431,187 @@ var FairQueueTelemetry = class {
|
|
|
12391
12431
|
};
|
|
12392
12432
|
}
|
|
12393
12433
|
};
|
|
12434
|
+
var BatchedSpanManager = class {
|
|
12435
|
+
tracer;
|
|
12436
|
+
name;
|
|
12437
|
+
maxIterations;
|
|
12438
|
+
timeoutSeconds;
|
|
12439
|
+
loopStates = /* @__PURE__ */ new Map();
|
|
12440
|
+
getDynamicAttributes;
|
|
12441
|
+
constructor(options) {
|
|
12442
|
+
this.tracer = options.tracer;
|
|
12443
|
+
this.name = options.name;
|
|
12444
|
+
this.maxIterations = options.maxIterations;
|
|
12445
|
+
this.timeoutSeconds = options.timeoutSeconds;
|
|
12446
|
+
this.getDynamicAttributes = options.getDynamicAttributes;
|
|
12447
|
+
}
|
|
12448
|
+
/**
|
|
12449
|
+
* Initialize state for a consumer loop.
|
|
12450
|
+
*/
|
|
12451
|
+
initializeLoop(loopId) {
|
|
12452
|
+
this.loopStates.set(loopId, {
|
|
12453
|
+
perTraceCountdown: this.maxIterations,
|
|
12454
|
+
traceStartedAt: /* @__PURE__ */ new Date(),
|
|
12455
|
+
iterationsCount: 0,
|
|
12456
|
+
totalIterationsCount: 0,
|
|
12457
|
+
runningDurationInMs: 0,
|
|
12458
|
+
stats: {},
|
|
12459
|
+
endSpanInNextIteration: false
|
|
12460
|
+
});
|
|
12461
|
+
}
|
|
12462
|
+
/**
|
|
12463
|
+
* Get the state for a consumer loop.
|
|
12464
|
+
*/
|
|
12465
|
+
getState(loopId) {
|
|
12466
|
+
return this.loopStates.get(loopId);
|
|
12467
|
+
}
|
|
12468
|
+
/**
|
|
12469
|
+
* Increment a stat counter for a loop.
|
|
12470
|
+
*/
|
|
12471
|
+
incrementStat(loopId, statName, value = 1) {
|
|
12472
|
+
const state = this.loopStates.get(loopId);
|
|
12473
|
+
if (state) {
|
|
12474
|
+
state.stats[statName] = (state.stats[statName] ?? 0) + value;
|
|
12475
|
+
}
|
|
12476
|
+
}
|
|
12477
|
+
/**
|
|
12478
|
+
* Mark that the span should end on the next iteration.
|
|
12479
|
+
*/
|
|
12480
|
+
markForRotation(loopId) {
|
|
12481
|
+
const state = this.loopStates.get(loopId);
|
|
12482
|
+
if (state) {
|
|
12483
|
+
state.endSpanInNextIteration = true;
|
|
12484
|
+
}
|
|
12485
|
+
}
|
|
12486
|
+
/**
|
|
12487
|
+
* Check if the span should be rotated (ended and a new one started).
|
|
12488
|
+
*/
|
|
12489
|
+
shouldRotate(loopId) {
|
|
12490
|
+
const state = this.loopStates.get(loopId);
|
|
12491
|
+
if (!state) return true;
|
|
12492
|
+
return state.perTraceCountdown <= 0 || Date.now() - state.traceStartedAt.getTime() > this.timeoutSeconds * 1e3 || state.currentSpanContext === void 0 || state.endSpanInNextIteration;
|
|
12493
|
+
}
|
|
12494
|
+
/**
|
|
12495
|
+
* End the current span for a loop and record stats.
|
|
12496
|
+
*/
|
|
12497
|
+
endCurrentSpan(loopId) {
|
|
12498
|
+
const state = this.loopStates.get(loopId);
|
|
12499
|
+
if (!state?.currentSpan) return;
|
|
12500
|
+
for (const [statName, count] of Object.entries(state.stats)) {
|
|
12501
|
+
state.currentSpan.setAttribute(`stats.${statName}`, count);
|
|
12502
|
+
}
|
|
12503
|
+
state.currentSpan.end();
|
|
12504
|
+
state.currentSpan = void 0;
|
|
12505
|
+
state.currentSpanContext = void 0;
|
|
12506
|
+
}
|
|
12507
|
+
/**
|
|
12508
|
+
* Start a new batched span for a loop.
|
|
12509
|
+
*/
|
|
12510
|
+
startNewSpan(loopId, attributes) {
|
|
12511
|
+
if (!this.tracer) return;
|
|
12512
|
+
const state = this.loopStates.get(loopId);
|
|
12513
|
+
if (!state) return;
|
|
12514
|
+
this.endCurrentSpan(loopId);
|
|
12515
|
+
const traceDurationInMs = state.traceStartedAt ? Date.now() - state.traceStartedAt.getTime() : void 0;
|
|
12516
|
+
const iterationsPerSecond = traceDurationInMs && traceDurationInMs > 0 ? state.iterationsCount / (traceDurationInMs / 1e3) : void 0;
|
|
12517
|
+
const dynamicAttributes = this.getDynamicAttributes?.() ?? {};
|
|
12518
|
+
state.currentSpan = this.tracer.startSpan(
|
|
12519
|
+
`${this.name}.consumerLoop`,
|
|
12520
|
+
{
|
|
12521
|
+
kind: 1,
|
|
12522
|
+
// SpanKind.CONSUMER
|
|
12523
|
+
attributes: {
|
|
12524
|
+
loop_id: loopId,
|
|
12525
|
+
max_iterations: this.maxIterations,
|
|
12526
|
+
timeout_seconds: this.timeoutSeconds,
|
|
12527
|
+
previous_iterations: state.iterationsCount,
|
|
12528
|
+
previous_duration_ms: traceDurationInMs,
|
|
12529
|
+
previous_iterations_per_second: iterationsPerSecond,
|
|
12530
|
+
total_iterations: state.totalIterationsCount,
|
|
12531
|
+
...dynamicAttributes,
|
|
12532
|
+
...attributes
|
|
12533
|
+
}
|
|
12534
|
+
},
|
|
12535
|
+
ROOT_CONTEXT
|
|
12536
|
+
);
|
|
12537
|
+
state.currentSpanContext = trace.setSpan(ROOT_CONTEXT, state.currentSpan);
|
|
12538
|
+
state.perTraceCountdown = this.maxIterations;
|
|
12539
|
+
state.traceStartedAt = /* @__PURE__ */ new Date();
|
|
12540
|
+
state.iterationsCount = 0;
|
|
12541
|
+
state.runningDurationInMs = 0;
|
|
12542
|
+
state.stats = {};
|
|
12543
|
+
state.endSpanInNextIteration = false;
|
|
12544
|
+
}
|
|
12545
|
+
/**
|
|
12546
|
+
* Execute a function within the batched span context.
|
|
12547
|
+
* Automatically handles span rotation and iteration tracking.
|
|
12548
|
+
*/
|
|
12549
|
+
async withBatchedSpan(loopId, fn, options) {
|
|
12550
|
+
let state = this.loopStates.get(loopId);
|
|
12551
|
+
if (!state) {
|
|
12552
|
+
this.initializeLoop(loopId);
|
|
12553
|
+
state = this.loopStates.get(loopId);
|
|
12554
|
+
}
|
|
12555
|
+
if (this.shouldRotate(loopId)) {
|
|
12556
|
+
this.startNewSpan(loopId);
|
|
12557
|
+
}
|
|
12558
|
+
const startTime = performance.now();
|
|
12559
|
+
try {
|
|
12560
|
+
if (!this.tracer || !state.currentSpanContext) {
|
|
12561
|
+
return await fn(noopSpan);
|
|
12562
|
+
}
|
|
12563
|
+
return await context.with(state.currentSpanContext, async () => {
|
|
12564
|
+
const iterationSpanName = options?.iterationSpanName ?? "iteration";
|
|
12565
|
+
return await this.tracer.startActiveSpan(
|
|
12566
|
+
`${this.name}.${iterationSpanName}`,
|
|
12567
|
+
{
|
|
12568
|
+
attributes: {
|
|
12569
|
+
loop_id: loopId,
|
|
12570
|
+
iteration: state.iterationsCount,
|
|
12571
|
+
...options?.attributes
|
|
12572
|
+
}
|
|
12573
|
+
},
|
|
12574
|
+
async (iterationSpan) => {
|
|
12575
|
+
try {
|
|
12576
|
+
return await fn(iterationSpan);
|
|
12577
|
+
} catch (error) {
|
|
12578
|
+
if (error instanceof Error) {
|
|
12579
|
+
iterationSpan.recordException(error);
|
|
12580
|
+
state.currentSpan?.recordException(error);
|
|
12581
|
+
}
|
|
12582
|
+
iterationSpan.setStatus({ code: SpanStatusCode.ERROR });
|
|
12583
|
+
state.endSpanInNextIteration = true;
|
|
12584
|
+
throw error;
|
|
12585
|
+
} finally {
|
|
12586
|
+
iterationSpan.end();
|
|
12587
|
+
}
|
|
12588
|
+
}
|
|
12589
|
+
);
|
|
12590
|
+
});
|
|
12591
|
+
} finally {
|
|
12592
|
+
const duration = performance.now() - startTime;
|
|
12593
|
+
state.runningDurationInMs += duration;
|
|
12594
|
+
state.iterationsCount++;
|
|
12595
|
+
state.totalIterationsCount++;
|
|
12596
|
+
state.perTraceCountdown--;
|
|
12597
|
+
}
|
|
12598
|
+
}
|
|
12599
|
+
/**
|
|
12600
|
+
* Clean up state for a loop when it's stopped.
|
|
12601
|
+
*/
|
|
12602
|
+
cleanup(loopId) {
|
|
12603
|
+
this.endCurrentSpan(loopId);
|
|
12604
|
+
this.loopStates.delete(loopId);
|
|
12605
|
+
}
|
|
12606
|
+
/**
|
|
12607
|
+
* Clean up all loop states.
|
|
12608
|
+
*/
|
|
12609
|
+
cleanupAll() {
|
|
12610
|
+
for (const loopId of this.loopStates.keys()) {
|
|
12611
|
+
this.cleanup(loopId);
|
|
12612
|
+
}
|
|
12613
|
+
}
|
|
12614
|
+
};
|
|
12394
12615
|
var noopSpan = {
|
|
12395
12616
|
spanContext: () => ({
|
|
12396
12617
|
traceId: "",
|
|
@@ -12490,6 +12711,71 @@ var VisibilityManager = class {
|
|
|
12490
12711
|
return { claimed: false };
|
|
12491
12712
|
}
|
|
12492
12713
|
}
|
|
12714
|
+
/**
|
|
12715
|
+
* Claim multiple messages for processing (batch claim).
|
|
12716
|
+
* Moves up to maxCount messages from the queue to the in-flight set.
|
|
12717
|
+
*
|
|
12718
|
+
* @param queueId - The queue to claim from
|
|
12719
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
12720
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12721
|
+
* @param consumerId - ID of the consumer claiming the messages
|
|
12722
|
+
* @param maxCount - Maximum number of messages to claim
|
|
12723
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
12724
|
+
* @returns Array of claimed messages
|
|
12725
|
+
*/
|
|
12726
|
+
async claimBatch(queueId, queueKey, queueItemsKey, consumerId, maxCount, timeoutMs) {
|
|
12727
|
+
const timeout = timeoutMs ?? this.defaultTimeoutMs;
|
|
12728
|
+
const deadline = Date.now() + timeout;
|
|
12729
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12730
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12731
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12732
|
+
const result = await this.redis.claimMessageBatch(
|
|
12733
|
+
queueKey,
|
|
12734
|
+
queueItemsKey,
|
|
12735
|
+
inflightKey,
|
|
12736
|
+
inflightDataKey,
|
|
12737
|
+
queueId,
|
|
12738
|
+
deadline.toString(),
|
|
12739
|
+
maxCount.toString()
|
|
12740
|
+
);
|
|
12741
|
+
if (!result || result.length === 0) {
|
|
12742
|
+
return [];
|
|
12743
|
+
}
|
|
12744
|
+
const messages = [];
|
|
12745
|
+
for (let i = 0; i < result.length; i += 2) {
|
|
12746
|
+
const messageId = result[i];
|
|
12747
|
+
const payloadJson = result[i + 1];
|
|
12748
|
+
if (!messageId || !payloadJson) {
|
|
12749
|
+
continue;
|
|
12750
|
+
}
|
|
12751
|
+
try {
|
|
12752
|
+
const payload = JSON.parse(payloadJson);
|
|
12753
|
+
messages.push({
|
|
12754
|
+
messageId,
|
|
12755
|
+
queueId,
|
|
12756
|
+
payload,
|
|
12757
|
+
deadline,
|
|
12758
|
+
consumerId
|
|
12759
|
+
});
|
|
12760
|
+
} catch (error) {
|
|
12761
|
+
this.logger.error("Failed to parse claimed message in batch", {
|
|
12762
|
+
messageId,
|
|
12763
|
+
queueId,
|
|
12764
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12765
|
+
});
|
|
12766
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12767
|
+
}
|
|
12768
|
+
}
|
|
12769
|
+
if (messages.length > 0) {
|
|
12770
|
+
this.logger.debug("Batch claimed messages", {
|
|
12771
|
+
queueId,
|
|
12772
|
+
consumerId,
|
|
12773
|
+
count: messages.length,
|
|
12774
|
+
deadline
|
|
12775
|
+
});
|
|
12776
|
+
}
|
|
12777
|
+
return messages;
|
|
12778
|
+
}
|
|
12493
12779
|
/**
|
|
12494
12780
|
* Extend the visibility timeout for a message (heartbeat).
|
|
12495
12781
|
*
|
|
@@ -12503,11 +12789,7 @@ var VisibilityManager = class {
|
|
|
12503
12789
|
const inflightKey = this.keys.inflightKey(shardId);
|
|
12504
12790
|
const member = this.#makeMember(messageId, queueId);
|
|
12505
12791
|
const newDeadline = Date.now() + extendMs;
|
|
12506
|
-
const result = await this.redis.heartbeatMessage(
|
|
12507
|
-
inflightKey,
|
|
12508
|
-
member,
|
|
12509
|
-
newDeadline.toString()
|
|
12510
|
-
);
|
|
12792
|
+
const result = await this.redis.heartbeatMessage(inflightKey, member, newDeadline.toString());
|
|
12511
12793
|
const success = result === 1;
|
|
12512
12794
|
if (success) {
|
|
12513
12795
|
this.logger.debug("Heartbeat successful", {
|
|
@@ -12541,9 +12823,10 @@ var VisibilityManager = class {
|
|
|
12541
12823
|
* @param queueId - The queue ID
|
|
12542
12824
|
* @param queueKey - The Redis key for the queue
|
|
12543
12825
|
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12826
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
12544
12827
|
* @param score - Optional score for the message (defaults to now)
|
|
12545
12828
|
*/
|
|
12546
|
-
async release(messageId, queueId, queueKey, queueItemsKey, score) {
|
|
12829
|
+
async release(messageId, queueId, queueKey, queueItemsKey, masterQueueKey, score) {
|
|
12547
12830
|
const shardId = this.#getShardForQueue(queueId);
|
|
12548
12831
|
const inflightKey = this.keys.inflightKey(shardId);
|
|
12549
12832
|
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
@@ -12554,9 +12837,11 @@ var VisibilityManager = class {
|
|
|
12554
12837
|
inflightDataKey,
|
|
12555
12838
|
queueKey,
|
|
12556
12839
|
queueItemsKey,
|
|
12840
|
+
masterQueueKey,
|
|
12557
12841
|
member,
|
|
12558
12842
|
messageId,
|
|
12559
|
-
messageScore.toString()
|
|
12843
|
+
messageScore.toString(),
|
|
12844
|
+
queueId
|
|
12560
12845
|
);
|
|
12561
12846
|
this.logger.debug("Message released", {
|
|
12562
12847
|
messageId,
|
|
@@ -12564,6 +12849,45 @@ var VisibilityManager = class {
|
|
|
12564
12849
|
score: messageScore
|
|
12565
12850
|
});
|
|
12566
12851
|
}
|
|
12852
|
+
/**
|
|
12853
|
+
* Release multiple messages back to their queue in a single operation.
|
|
12854
|
+
* Used when processing fails or consumer wants to retry later.
|
|
12855
|
+
* All messages must belong to the same queue.
|
|
12856
|
+
*
|
|
12857
|
+
* @param messages - Array of messages to release (must all have same queueId)
|
|
12858
|
+
* @param queueId - The queue ID
|
|
12859
|
+
* @param queueKey - The Redis key for the queue
|
|
12860
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12861
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
12862
|
+
* @param score - Optional score for the messages (defaults to now)
|
|
12863
|
+
*/
|
|
12864
|
+
async releaseBatch(messages, queueId, queueKey, queueItemsKey, masterQueueKey, score) {
|
|
12865
|
+
if (messages.length === 0) {
|
|
12866
|
+
return;
|
|
12867
|
+
}
|
|
12868
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12869
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12870
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12871
|
+
const messageScore = score ?? Date.now();
|
|
12872
|
+
const messageIds = messages.map((m) => m.messageId);
|
|
12873
|
+
const members = messages.map((m) => this.#makeMember(m.messageId, queueId));
|
|
12874
|
+
await this.redis.releaseMessageBatch(
|
|
12875
|
+
inflightKey,
|
|
12876
|
+
inflightDataKey,
|
|
12877
|
+
queueKey,
|
|
12878
|
+
queueItemsKey,
|
|
12879
|
+
masterQueueKey,
|
|
12880
|
+
messageScore.toString(),
|
|
12881
|
+
queueId,
|
|
12882
|
+
...members,
|
|
12883
|
+
...messageIds
|
|
12884
|
+
);
|
|
12885
|
+
this.logger.debug("Batch messages released", {
|
|
12886
|
+
queueId,
|
|
12887
|
+
count: messages.length,
|
|
12888
|
+
score: messageScore
|
|
12889
|
+
});
|
|
12890
|
+
}
|
|
12567
12891
|
/**
|
|
12568
12892
|
* Reclaim timed-out messages from a shard.
|
|
12569
12893
|
* Returns messages to their original queues.
|
|
@@ -12594,7 +12918,7 @@ var VisibilityManager = class {
|
|
|
12594
12918
|
continue;
|
|
12595
12919
|
}
|
|
12596
12920
|
const { messageId, queueId } = this.#parseMember(member);
|
|
12597
|
-
const { queueKey, queueItemsKey } = getQueueKeys(queueId);
|
|
12921
|
+
const { queueKey, queueItemsKey, masterQueueKey } = getQueueKeys(queueId);
|
|
12598
12922
|
try {
|
|
12599
12923
|
const score = parseFloat(originalScore) || now;
|
|
12600
12924
|
await this.redis.releaseMessage(
|
|
@@ -12602,9 +12926,11 @@ var VisibilityManager = class {
|
|
|
12602
12926
|
inflightDataKey,
|
|
12603
12927
|
queueKey,
|
|
12604
12928
|
queueItemsKey,
|
|
12929
|
+
masterQueueKey,
|
|
12605
12930
|
member,
|
|
12606
12931
|
messageId,
|
|
12607
|
-
score.toString()
|
|
12932
|
+
score.toString(),
|
|
12933
|
+
queueId
|
|
12608
12934
|
);
|
|
12609
12935
|
reclaimed++;
|
|
12610
12936
|
this.logger.debug("Reclaimed timed-out message", {
|
|
@@ -12738,17 +13064,67 @@ redis.call('HSET', inflightDataKey, messageId, payload)
|
|
|
12738
13064
|
return {messageId, payload}
|
|
12739
13065
|
`
|
|
12740
13066
|
});
|
|
12741
|
-
this.redis.defineCommand("
|
|
13067
|
+
this.redis.defineCommand("claimMessageBatch", {
|
|
12742
13068
|
numberOfKeys: 4,
|
|
12743
13069
|
lua: `
|
|
13070
|
+
local queueKey = KEYS[1]
|
|
13071
|
+
local queueItemsKey = KEYS[2]
|
|
13072
|
+
local inflightKey = KEYS[3]
|
|
13073
|
+
local inflightDataKey = KEYS[4]
|
|
13074
|
+
|
|
13075
|
+
local queueId = ARGV[1]
|
|
13076
|
+
local deadline = tonumber(ARGV[2])
|
|
13077
|
+
local maxCount = tonumber(ARGV[3])
|
|
13078
|
+
|
|
13079
|
+
-- Get up to maxCount oldest messages from queue
|
|
13080
|
+
local items = redis.call('ZRANGE', queueKey, 0, maxCount - 1)
|
|
13081
|
+
if #items == 0 then
|
|
13082
|
+
return {}
|
|
13083
|
+
end
|
|
13084
|
+
|
|
13085
|
+
local results = {}
|
|
13086
|
+
|
|
13087
|
+
for i, messageId in ipairs(items) do
|
|
13088
|
+
-- Get message data
|
|
13089
|
+
local payload = redis.call('HGET', queueItemsKey, messageId)
|
|
13090
|
+
|
|
13091
|
+
if payload then
|
|
13092
|
+
-- Remove from queue
|
|
13093
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13094
|
+
redis.call('HDEL', queueItemsKey, messageId)
|
|
13095
|
+
|
|
13096
|
+
-- Add to in-flight set with deadline
|
|
13097
|
+
local member = messageId .. ':' .. queueId
|
|
13098
|
+
redis.call('ZADD', inflightKey, deadline, member)
|
|
13099
|
+
|
|
13100
|
+
-- Store message data for potential release
|
|
13101
|
+
redis.call('HSET', inflightDataKey, messageId, payload)
|
|
13102
|
+
|
|
13103
|
+
-- Add to results
|
|
13104
|
+
table.insert(results, messageId)
|
|
13105
|
+
table.insert(results, payload)
|
|
13106
|
+
else
|
|
13107
|
+
-- Message data missing, remove from queue
|
|
13108
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13109
|
+
end
|
|
13110
|
+
end
|
|
13111
|
+
|
|
13112
|
+
return results
|
|
13113
|
+
`
|
|
13114
|
+
});
|
|
13115
|
+
this.redis.defineCommand("releaseMessage", {
|
|
13116
|
+
numberOfKeys: 5,
|
|
13117
|
+
lua: `
|
|
12744
13118
|
local inflightKey = KEYS[1]
|
|
12745
13119
|
local inflightDataKey = KEYS[2]
|
|
12746
13120
|
local queueKey = KEYS[3]
|
|
12747
13121
|
local queueItemsKey = KEYS[4]
|
|
13122
|
+
local masterQueueKey = KEYS[5]
|
|
12748
13123
|
|
|
12749
13124
|
local member = ARGV[1]
|
|
12750
13125
|
local messageId = ARGV[2]
|
|
12751
13126
|
local score = tonumber(ARGV[3])
|
|
13127
|
+
local queueId = ARGV[4]
|
|
12752
13128
|
|
|
12753
13129
|
-- Get message data from in-flight
|
|
12754
13130
|
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
@@ -12765,9 +13141,67 @@ redis.call('HDEL', inflightDataKey, messageId)
|
|
|
12765
13141
|
redis.call('ZADD', queueKey, score, messageId)
|
|
12766
13142
|
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
12767
13143
|
|
|
13144
|
+
-- Update master queue with oldest message timestamp
|
|
13145
|
+
-- This ensures delayed messages don't push the queue priority to the future
|
|
13146
|
+
-- when there are other ready messages in the queue
|
|
13147
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
13148
|
+
if #oldest >= 2 then
|
|
13149
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
13150
|
+
end
|
|
13151
|
+
|
|
12768
13152
|
return 1
|
|
12769
13153
|
`
|
|
12770
13154
|
});
|
|
13155
|
+
this.redis.defineCommand("releaseMessageBatch", {
|
|
13156
|
+
numberOfKeys: 5,
|
|
13157
|
+
lua: `
|
|
13158
|
+
local inflightKey = KEYS[1]
|
|
13159
|
+
local inflightDataKey = KEYS[2]
|
|
13160
|
+
local queueKey = KEYS[3]
|
|
13161
|
+
local queueItemsKey = KEYS[4]
|
|
13162
|
+
local masterQueueKey = KEYS[5]
|
|
13163
|
+
|
|
13164
|
+
local score = tonumber(ARGV[1])
|
|
13165
|
+
local queueId = ARGV[2]
|
|
13166
|
+
|
|
13167
|
+
-- Remaining args are: members..., messageIds...
|
|
13168
|
+
-- Calculate how many messages we have
|
|
13169
|
+
local numMessages = (table.getn(ARGV) - 2) / 2
|
|
13170
|
+
local membersStart = 3
|
|
13171
|
+
local messageIdsStart = membersStart + numMessages
|
|
13172
|
+
|
|
13173
|
+
local releasedCount = 0
|
|
13174
|
+
|
|
13175
|
+
for i = 0, numMessages - 1 do
|
|
13176
|
+
local member = ARGV[membersStart + i]
|
|
13177
|
+
local messageId = ARGV[messageIdsStart + i]
|
|
13178
|
+
|
|
13179
|
+
-- Get message data from in-flight
|
|
13180
|
+
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
13181
|
+
if payload then
|
|
13182
|
+
-- Remove from in-flight
|
|
13183
|
+
redis.call('ZREM', inflightKey, member)
|
|
13184
|
+
redis.call('HDEL', inflightDataKey, messageId)
|
|
13185
|
+
|
|
13186
|
+
-- Add back to queue
|
|
13187
|
+
redis.call('ZADD', queueKey, score, messageId)
|
|
13188
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
13189
|
+
|
|
13190
|
+
releasedCount = releasedCount + 1
|
|
13191
|
+
end
|
|
13192
|
+
end
|
|
13193
|
+
|
|
13194
|
+
-- Update master queue with oldest message timestamp (only once at the end)
|
|
13195
|
+
if releasedCount > 0 then
|
|
13196
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
13197
|
+
if #oldest >= 2 then
|
|
13198
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
13199
|
+
end
|
|
13200
|
+
end
|
|
13201
|
+
|
|
13202
|
+
return releasedCount
|
|
13203
|
+
`
|
|
13204
|
+
});
|
|
12771
13205
|
this.redis.defineCommand("heartbeatMessage", {
|
|
12772
13206
|
numberOfKeys: 1,
|
|
12773
13207
|
lua: `
|
|
@@ -12855,11 +13289,11 @@ var WorkerQueueManager = class {
|
|
|
12855
13289
|
async blockingPop(workerQueueId, timeoutSeconds, signal) {
|
|
12856
13290
|
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12857
13291
|
const blockingClient = this.redis.duplicate();
|
|
13292
|
+
const cleanup = signal ? () => {
|
|
13293
|
+
blockingClient.disconnect();
|
|
13294
|
+
} : null;
|
|
12858
13295
|
try {
|
|
12859
|
-
if (signal) {
|
|
12860
|
-
const cleanup = () => {
|
|
12861
|
-
blockingClient.disconnect();
|
|
12862
|
-
};
|
|
13296
|
+
if (signal && cleanup) {
|
|
12863
13297
|
signal.addEventListener("abort", cleanup, { once: true });
|
|
12864
13298
|
if (signal.aborted) {
|
|
12865
13299
|
return null;
|
|
@@ -12886,6 +13320,9 @@ var WorkerQueueManager = class {
|
|
|
12886
13320
|
});
|
|
12887
13321
|
throw error;
|
|
12888
13322
|
} finally {
|
|
13323
|
+
if (cleanup && signal) {
|
|
13324
|
+
signal.removeEventListener("abort", cleanup);
|
|
13325
|
+
}
|
|
12889
13326
|
await blockingClient.quit().catch(() => {
|
|
12890
13327
|
});
|
|
12891
13328
|
}
|
|
@@ -13117,6 +13554,13 @@ var BaseScheduler = class {
|
|
|
13117
13554
|
*/
|
|
13118
13555
|
async recordProcessed(_tenantId, _queueId) {
|
|
13119
13556
|
}
|
|
13557
|
+
/**
|
|
13558
|
+
* Called after processing multiple messages to update scheduler state.
|
|
13559
|
+
* Batch variant for efficiency - reduces Redis calls when processing multiple messages.
|
|
13560
|
+
* Default implementation does nothing.
|
|
13561
|
+
*/
|
|
13562
|
+
async recordProcessedBatch(_tenantId, _queueId, _count) {
|
|
13563
|
+
}
|
|
13120
13564
|
/**
|
|
13121
13565
|
* Initialize the scheduler.
|
|
13122
13566
|
* Default implementation does nothing.
|
|
@@ -13179,6 +13623,7 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13179
13623
|
this.keys = config.keys;
|
|
13180
13624
|
this.quantum = config.quantum;
|
|
13181
13625
|
this.maxDeficit = config.maxDeficit;
|
|
13626
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 1e3;
|
|
13182
13627
|
this.logger = config.logger ?? {
|
|
13183
13628
|
debug: () => {
|
|
13184
13629
|
},
|
|
@@ -13191,6 +13636,7 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13191
13636
|
keys;
|
|
13192
13637
|
quantum;
|
|
13193
13638
|
maxDeficit;
|
|
13639
|
+
masterQueueLimit;
|
|
13194
13640
|
logger;
|
|
13195
13641
|
// ============================================================================
|
|
13196
13642
|
// FairScheduler Implementation
|
|
@@ -13256,6 +13702,13 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13256
13702
|
async recordProcessed(tenantId, _queueId) {
|
|
13257
13703
|
await this.#decrementDeficit(tenantId);
|
|
13258
13704
|
}
|
|
13705
|
+
/**
|
|
13706
|
+
* Record that multiple messages were processed from a tenant.
|
|
13707
|
+
* Decrements the tenant's deficit by count atomically.
|
|
13708
|
+
*/
|
|
13709
|
+
async recordProcessedBatch(tenantId, _queueId, count) {
|
|
13710
|
+
await this.#decrementDeficitBatch(tenantId, count);
|
|
13711
|
+
}
|
|
13259
13712
|
async close() {
|
|
13260
13713
|
await this.redis.quit();
|
|
13261
13714
|
}
|
|
@@ -13305,8 +13758,7 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13305
13758
|
"WITHSCORES",
|
|
13306
13759
|
"LIMIT",
|
|
13307
13760
|
0,
|
|
13308
|
-
|
|
13309
|
-
// Limit for performance
|
|
13761
|
+
this.masterQueueLimit
|
|
13310
13762
|
);
|
|
13311
13763
|
const queues = [];
|
|
13312
13764
|
for (let i = 0; i < results.length; i += 2) {
|
|
@@ -13347,6 +13799,14 @@ var DRRScheduler = class extends BaseScheduler {
|
|
|
13347
13799
|
const result = await this.redis.drrDecrementDeficit(key, tenantId);
|
|
13348
13800
|
return parseFloat(result);
|
|
13349
13801
|
}
|
|
13802
|
+
/**
|
|
13803
|
+
* Decrement deficit for a tenant by a count atomically.
|
|
13804
|
+
*/
|
|
13805
|
+
async #decrementDeficitBatch(tenantId, count) {
|
|
13806
|
+
const key = this.#deficitKey();
|
|
13807
|
+
const result = await this.redis.drrDecrementDeficitBatch(key, tenantId, count.toString());
|
|
13808
|
+
return parseFloat(result);
|
|
13809
|
+
}
|
|
13350
13810
|
#registerCommands() {
|
|
13351
13811
|
this.redis.defineCommand("drrAddQuantum", {
|
|
13352
13812
|
numberOfKeys: 1,
|
|
@@ -13390,6 +13850,25 @@ if newDeficit < 0 then
|
|
|
13390
13850
|
newDeficit = 0
|
|
13391
13851
|
end
|
|
13392
13852
|
|
|
13853
|
+
return tostring(newDeficit)
|
|
13854
|
+
`
|
|
13855
|
+
});
|
|
13856
|
+
this.redis.defineCommand("drrDecrementDeficitBatch", {
|
|
13857
|
+
numberOfKeys: 1,
|
|
13858
|
+
lua: `
|
|
13859
|
+
local deficitKey = KEYS[1]
|
|
13860
|
+
local tenantId = ARGV[1]
|
|
13861
|
+
local count = tonumber(ARGV[2])
|
|
13862
|
+
|
|
13863
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, -count)
|
|
13864
|
+
newDeficit = tonumber(newDeficit)
|
|
13865
|
+
|
|
13866
|
+
-- Floor at 0
|
|
13867
|
+
if newDeficit < 0 then
|
|
13868
|
+
redis.call('HSET', deficitKey, tenantId, 0)
|
|
13869
|
+
newDeficit = 0
|
|
13870
|
+
end
|
|
13871
|
+
|
|
13393
13872
|
return tostring(newDeficit)
|
|
13394
13873
|
`
|
|
13395
13874
|
});
|
|
@@ -13897,18 +14376,30 @@ var FairQueue = class {
|
|
|
13897
14376
|
this.visibilityTimeoutMs = options.visibilityTimeoutMs ?? 3e4;
|
|
13898
14377
|
this.heartbeatIntervalMs = options.heartbeatIntervalMs ?? this.visibilityTimeoutMs / 3;
|
|
13899
14378
|
this.reclaimIntervalMs = options.reclaimIntervalMs ?? 5e3;
|
|
13900
|
-
this.
|
|
13901
|
-
this.
|
|
13902
|
-
this.workerQueueResolver = options.workerQueue?.resolveWorkerQueue;
|
|
14379
|
+
this.workerQueueResolver = options.workerQueue.resolveWorkerQueue;
|
|
14380
|
+
this.batchClaimSize = options.batchClaimSize ?? 10;
|
|
13903
14381
|
this.cooloffEnabled = options.cooloff?.enabled ?? true;
|
|
13904
14382
|
this.cooloffThreshold = options.cooloff?.threshold ?? 10;
|
|
13905
14383
|
this.cooloffPeriodMs = options.cooloff?.periodMs ?? 1e4;
|
|
14384
|
+
this.maxCooloffStatesSize = options.cooloff?.maxStatesSize ?? 1e3;
|
|
13906
14385
|
this.globalRateLimiter = options.globalRateLimiter;
|
|
14386
|
+
this.consumerTraceMaxIterations = options.consumerTraceMaxIterations ?? 500;
|
|
14387
|
+
this.consumerTraceTimeoutSeconds = options.consumerTraceTimeoutSeconds ?? 60;
|
|
13907
14388
|
this.telemetry = new FairQueueTelemetry({
|
|
13908
14389
|
tracer: options.tracer,
|
|
13909
14390
|
meter: options.meter,
|
|
13910
14391
|
name: options.name ?? "fairqueue"
|
|
13911
14392
|
});
|
|
14393
|
+
this.batchedSpanManager = new BatchedSpanManager({
|
|
14394
|
+
tracer: options.tracer,
|
|
14395
|
+
name: options.name ?? "fairqueue",
|
|
14396
|
+
maxIterations: this.consumerTraceMaxIterations,
|
|
14397
|
+
timeoutSeconds: this.consumerTraceTimeoutSeconds,
|
|
14398
|
+
getDynamicAttributes: () => ({
|
|
14399
|
+
"cache.descriptor_size": this.queueDescriptorCache.size,
|
|
14400
|
+
"cache.cooloff_states_size": this.queueCooloffStates.size
|
|
14401
|
+
})
|
|
14402
|
+
});
|
|
13912
14403
|
this.masterQueue = new MasterQueue({
|
|
13913
14404
|
redis: options.redis,
|
|
13914
14405
|
keys: options.keys,
|
|
@@ -13931,16 +14422,14 @@ var FairQueue = class {
|
|
|
13931
14422
|
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
13932
14423
|
}
|
|
13933
14424
|
});
|
|
13934
|
-
|
|
13935
|
-
|
|
13936
|
-
|
|
13937
|
-
|
|
13938
|
-
|
|
13939
|
-
|
|
13940
|
-
|
|
13941
|
-
|
|
13942
|
-
});
|
|
13943
|
-
}
|
|
14425
|
+
this.workerQueueManager = new WorkerQueueManager({
|
|
14426
|
+
redis: options.redis,
|
|
14427
|
+
keys: options.keys,
|
|
14428
|
+
logger: {
|
|
14429
|
+
debug: (msg, ctx) => this.logger.debug(msg, ctx),
|
|
14430
|
+
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
14431
|
+
}
|
|
14432
|
+
});
|
|
13944
14433
|
this.#registerCommands();
|
|
13945
14434
|
if (options.startConsumers !== false) {
|
|
13946
14435
|
this.start();
|
|
@@ -13966,22 +14455,24 @@ var FairQueue = class {
|
|
|
13966
14455
|
visibilityTimeoutMs;
|
|
13967
14456
|
heartbeatIntervalMs;
|
|
13968
14457
|
reclaimIntervalMs;
|
|
13969
|
-
workerQueueEnabled;
|
|
13970
|
-
workerQueueBlockingTimeoutSeconds;
|
|
13971
14458
|
workerQueueResolver;
|
|
14459
|
+
batchClaimSize;
|
|
13972
14460
|
// Cooloff state
|
|
13973
14461
|
cooloffEnabled;
|
|
13974
14462
|
cooloffThreshold;
|
|
13975
14463
|
cooloffPeriodMs;
|
|
14464
|
+
maxCooloffStatesSize;
|
|
13976
14465
|
queueCooloffStates = /* @__PURE__ */ new Map();
|
|
13977
14466
|
// Global rate limiter
|
|
13978
14467
|
globalRateLimiter;
|
|
14468
|
+
// Consumer tracing
|
|
14469
|
+
consumerTraceMaxIterations;
|
|
14470
|
+
consumerTraceTimeoutSeconds;
|
|
14471
|
+
batchedSpanManager;
|
|
13979
14472
|
// Runtime state
|
|
13980
|
-
messageHandler;
|
|
13981
14473
|
isRunning = false;
|
|
13982
14474
|
abortController;
|
|
13983
14475
|
masterQueueConsumerLoops = [];
|
|
13984
|
-
workerQueueConsumerLoops = [];
|
|
13985
14476
|
reclaimLoop;
|
|
13986
14477
|
// Queue descriptor cache for message processing
|
|
13987
14478
|
queueDescriptorCache = /* @__PURE__ */ new Map();
|
|
@@ -14010,15 +14501,6 @@ var FairQueue = class {
|
|
|
14010
14501
|
});
|
|
14011
14502
|
}
|
|
14012
14503
|
// ============================================================================
|
|
14013
|
-
// Public API - Message Handler
|
|
14014
|
-
// ============================================================================
|
|
14015
|
-
/**
|
|
14016
|
-
* Set the message handler for processing dequeued messages.
|
|
14017
|
-
*/
|
|
14018
|
-
onMessage(handler) {
|
|
14019
|
-
this.messageHandler = handler;
|
|
14020
|
-
}
|
|
14021
|
-
// ============================================================================
|
|
14022
14504
|
// Public API - Enqueueing
|
|
14023
14505
|
// ============================================================================
|
|
14024
14506
|
/**
|
|
@@ -14061,7 +14543,7 @@ var FairQueue = class {
|
|
|
14061
14543
|
timestamp,
|
|
14062
14544
|
attempt: 1,
|
|
14063
14545
|
metadata: options.metadata
|
|
14064
|
-
}) :
|
|
14546
|
+
}) : void 0,
|
|
14065
14547
|
metadata: options.metadata
|
|
14066
14548
|
};
|
|
14067
14549
|
await this.redis.enqueueMessageAtomic(
|
|
@@ -14079,13 +14561,7 @@ var FairQueue = class {
|
|
|
14079
14561
|
[FairQueueAttributes.MESSAGE_ID]: messageId,
|
|
14080
14562
|
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14081
14563
|
});
|
|
14082
|
-
this.telemetry.recordEnqueue(
|
|
14083
|
-
this.telemetry.messageAttributes({
|
|
14084
|
-
queueId: options.queueId,
|
|
14085
|
-
tenantId: options.tenantId,
|
|
14086
|
-
messageId
|
|
14087
|
-
})
|
|
14088
|
-
);
|
|
14564
|
+
this.telemetry.recordEnqueue();
|
|
14089
14565
|
this.logger.debug("Message enqueued", {
|
|
14090
14566
|
queueId: options.queueId,
|
|
14091
14567
|
messageId,
|
|
@@ -14147,7 +14623,7 @@ var FairQueue = class {
|
|
|
14147
14623
|
timestamp,
|
|
14148
14624
|
attempt: 1,
|
|
14149
14625
|
metadata: options.metadata
|
|
14150
|
-
}) :
|
|
14626
|
+
}) : void 0,
|
|
14151
14627
|
metadata: options.metadata
|
|
14152
14628
|
};
|
|
14153
14629
|
messageIds.push(messageId);
|
|
@@ -14166,13 +14642,7 @@ var FairQueue = class {
|
|
|
14166
14642
|
[FairQueueAttributes.MESSAGE_COUNT]: messageIds.length,
|
|
14167
14643
|
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14168
14644
|
});
|
|
14169
|
-
this.telemetry.recordEnqueueBatch(
|
|
14170
|
-
messageIds.length,
|
|
14171
|
-
this.telemetry.messageAttributes({
|
|
14172
|
-
queueId: options.queueId,
|
|
14173
|
-
tenantId: options.tenantId
|
|
14174
|
-
})
|
|
14175
|
-
);
|
|
14645
|
+
this.telemetry.recordEnqueueBatch(messageIds.length);
|
|
14176
14646
|
this.logger.debug("Batch enqueued", {
|
|
14177
14647
|
queueId: options.queueId,
|
|
14178
14648
|
messageCount: messageIds.length
|
|
@@ -14299,40 +14769,54 @@ var FairQueue = class {
|
|
|
14299
14769
|
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14300
14770
|
return await this.redis.zcard(dlqKey);
|
|
14301
14771
|
}
|
|
14302
|
-
// ============================================================================
|
|
14303
|
-
// Public API - Lifecycle
|
|
14304
|
-
// ============================================================================
|
|
14305
14772
|
/**
|
|
14306
|
-
*
|
|
14773
|
+
* Get the size of the in-memory queue descriptor cache.
|
|
14774
|
+
* This cache stores metadata for queues that have been enqueued.
|
|
14775
|
+
* The cache is cleaned up when queues are fully processed.
|
|
14307
14776
|
*/
|
|
14308
|
-
|
|
14309
|
-
|
|
14310
|
-
|
|
14311
|
-
|
|
14312
|
-
|
|
14313
|
-
|
|
14314
|
-
|
|
14315
|
-
|
|
14316
|
-
|
|
14317
|
-
|
|
14318
|
-
|
|
14319
|
-
|
|
14320
|
-
|
|
14321
|
-
|
|
14322
|
-
|
|
14323
|
-
|
|
14324
|
-
|
|
14325
|
-
|
|
14326
|
-
|
|
14327
|
-
|
|
14328
|
-
|
|
14329
|
-
|
|
14777
|
+
getQueueDescriptorCacheSize() {
|
|
14778
|
+
return this.queueDescriptorCache.size;
|
|
14779
|
+
}
|
|
14780
|
+
/**
|
|
14781
|
+
* Get the size of the in-memory cooloff states cache.
|
|
14782
|
+
* This cache tracks queues that are in cooloff due to repeated failures.
|
|
14783
|
+
* The cache is cleaned up when queues are fully processed or cooloff expires.
|
|
14784
|
+
*/
|
|
14785
|
+
getQueueCooloffStatesSize() {
|
|
14786
|
+
return this.queueCooloffStates.size;
|
|
14787
|
+
}
|
|
14788
|
+
/**
|
|
14789
|
+
* Get all in-memory cache sizes for monitoring.
|
|
14790
|
+
* Useful for adding as span attributes.
|
|
14791
|
+
*/
|
|
14792
|
+
getCacheSizes() {
|
|
14793
|
+
return {
|
|
14794
|
+
descriptorCacheSize: this.queueDescriptorCache.size,
|
|
14795
|
+
cooloffStatesSize: this.queueCooloffStates.size
|
|
14796
|
+
};
|
|
14797
|
+
}
|
|
14798
|
+
// ============================================================================
|
|
14799
|
+
// Public API - Lifecycle
|
|
14800
|
+
// ============================================================================
|
|
14801
|
+
/**
|
|
14802
|
+
* Start the master queue consumer loops and reclaim loop.
|
|
14803
|
+
* FairQueue claims messages and pushes them to worker queues.
|
|
14804
|
+
* External consumers are responsible for consuming from worker queues.
|
|
14805
|
+
*/
|
|
14806
|
+
start() {
|
|
14807
|
+
if (this.isRunning) {
|
|
14808
|
+
return;
|
|
14809
|
+
}
|
|
14810
|
+
this.isRunning = true;
|
|
14811
|
+
this.abortController = new AbortController();
|
|
14812
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14813
|
+
const loop = this.#runMasterQueueConsumerLoop(shardId);
|
|
14814
|
+
this.masterQueueConsumerLoops.push(loop);
|
|
14330
14815
|
}
|
|
14331
14816
|
this.reclaimLoop = this.#runReclaimLoop();
|
|
14332
14817
|
this.logger.info("FairQueue started", {
|
|
14333
14818
|
consumerCount: this.consumerCount,
|
|
14334
14819
|
shardCount: this.shardCount,
|
|
14335
|
-
workerQueueEnabled: this.workerQueueEnabled,
|
|
14336
14820
|
consumerIntervalMs: this.consumerIntervalMs
|
|
14337
14821
|
});
|
|
14338
14822
|
}
|
|
@@ -14345,13 +14829,8 @@ var FairQueue = class {
|
|
|
14345
14829
|
}
|
|
14346
14830
|
this.isRunning = false;
|
|
14347
14831
|
this.abortController.abort();
|
|
14348
|
-
await Promise.allSettled([
|
|
14349
|
-
...this.masterQueueConsumerLoops,
|
|
14350
|
-
...this.workerQueueConsumerLoops,
|
|
14351
|
-
this.reclaimLoop
|
|
14352
|
-
]);
|
|
14832
|
+
await Promise.allSettled([...this.masterQueueConsumerLoops, this.reclaimLoop]);
|
|
14353
14833
|
this.masterQueueConsumerLoops = [];
|
|
14354
|
-
this.workerQueueConsumerLoops = [];
|
|
14355
14834
|
this.reclaimLoop = void 0;
|
|
14356
14835
|
this.logger.info("FairQueue stopped");
|
|
14357
14836
|
}
|
|
@@ -14360,11 +14839,12 @@ var FairQueue = class {
|
|
|
14360
14839
|
*/
|
|
14361
14840
|
async close() {
|
|
14362
14841
|
await this.stop();
|
|
14842
|
+
this.batchedSpanManager.cleanupAll();
|
|
14363
14843
|
await Promise.all([
|
|
14364
14844
|
this.masterQueue.close(),
|
|
14365
14845
|
this.concurrencyManager?.close(),
|
|
14366
14846
|
this.visibilityManager.close(),
|
|
14367
|
-
this.workerQueueManager
|
|
14847
|
+
this.workerQueueManager.close(),
|
|
14368
14848
|
this.scheduler.close?.(),
|
|
14369
14849
|
this.redis.quit()
|
|
14370
14850
|
]);
|
|
@@ -14402,49 +14882,148 @@ var FairQueue = class {
|
|
|
14402
14882
|
// ============================================================================
|
|
14403
14883
|
async #runMasterQueueConsumerLoop(shardId) {
|
|
14404
14884
|
const loopId = `master-shard-${shardId}`;
|
|
14885
|
+
this.batchedSpanManager.initializeLoop(loopId);
|
|
14405
14886
|
try {
|
|
14406
|
-
|
|
14407
|
-
|
|
14408
|
-
|
|
14887
|
+
while (this.isRunning) {
|
|
14888
|
+
if (this.abortController.signal.aborted) {
|
|
14889
|
+
break;
|
|
14890
|
+
}
|
|
14891
|
+
let hadWork = false;
|
|
14409
14892
|
try {
|
|
14410
|
-
await this
|
|
14893
|
+
hadWork = await this.batchedSpanManager.withBatchedSpan(
|
|
14894
|
+
loopId,
|
|
14895
|
+
async (span) => {
|
|
14896
|
+
span.setAttribute("shard_id", shardId);
|
|
14897
|
+
return await this.#processMasterQueueShard(loopId, shardId, span);
|
|
14898
|
+
},
|
|
14899
|
+
{
|
|
14900
|
+
iterationSpanName: "processMasterQueueShard",
|
|
14901
|
+
attributes: { shard_id: shardId }
|
|
14902
|
+
}
|
|
14903
|
+
);
|
|
14411
14904
|
} catch (error) {
|
|
14412
14905
|
this.logger.error("Master queue consumer error", {
|
|
14413
14906
|
loopId,
|
|
14414
14907
|
shardId,
|
|
14415
14908
|
error: error instanceof Error ? error.message : String(error)
|
|
14416
14909
|
});
|
|
14910
|
+
this.batchedSpanManager.markForRotation(loopId);
|
|
14417
14911
|
}
|
|
14912
|
+
const waitMs = hadWork ? 1 : this.consumerIntervalMs;
|
|
14913
|
+
await new Promise((resolve, reject) => {
|
|
14914
|
+
const abortHandler = () => {
|
|
14915
|
+
clearTimeout(timeout);
|
|
14916
|
+
reject(new Error("AbortError"));
|
|
14917
|
+
};
|
|
14918
|
+
const timeout = setTimeout(() => {
|
|
14919
|
+
this.abortController.signal.removeEventListener("abort", abortHandler);
|
|
14920
|
+
resolve();
|
|
14921
|
+
}, waitMs);
|
|
14922
|
+
this.abortController.signal.addEventListener("abort", abortHandler, { once: true });
|
|
14923
|
+
});
|
|
14418
14924
|
}
|
|
14419
14925
|
} catch (error) {
|
|
14420
|
-
if (error
|
|
14926
|
+
if (isAbortError(error)) {
|
|
14421
14927
|
this.logger.debug("Master queue consumer aborted", { loopId });
|
|
14928
|
+
this.batchedSpanManager.cleanup(loopId);
|
|
14422
14929
|
return;
|
|
14423
14930
|
}
|
|
14424
14931
|
throw error;
|
|
14932
|
+
} finally {
|
|
14933
|
+
this.batchedSpanManager.cleanup(loopId);
|
|
14425
14934
|
}
|
|
14426
14935
|
}
|
|
14427
|
-
async #processMasterQueueShard(loopId, shardId) {
|
|
14936
|
+
async #processMasterQueueShard(loopId, shardId, parentSpan) {
|
|
14428
14937
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14429
|
-
const
|
|
14430
|
-
|
|
14938
|
+
const masterQueueSize = await this.masterQueue.getShardQueueCount(shardId);
|
|
14939
|
+
parentSpan?.setAttribute("master_queue_size", masterQueueSize);
|
|
14940
|
+
this.batchedSpanManager.incrementStat(loopId, "master_queue_size_sum", masterQueueSize);
|
|
14941
|
+
const schedulerContext = this.#createSchedulerContext();
|
|
14942
|
+
const tenantQueues = await this.telemetry.trace(
|
|
14943
|
+
"selectQueues",
|
|
14944
|
+
async (span) => {
|
|
14945
|
+
span.setAttribute(FairQueueAttributes.SHARD_ID, shardId.toString());
|
|
14946
|
+
span.setAttribute(FairQueueAttributes.CONSUMER_ID, loopId);
|
|
14947
|
+
span.setAttribute("master_queue_size", masterQueueSize);
|
|
14948
|
+
const result = await this.scheduler.selectQueues(masterQueueKey, loopId, schedulerContext);
|
|
14949
|
+
span.setAttribute("tenant_count", result.length);
|
|
14950
|
+
span.setAttribute(
|
|
14951
|
+
"queue_count",
|
|
14952
|
+
result.reduce((acc, t) => acc + t.queues.length, 0)
|
|
14953
|
+
);
|
|
14954
|
+
return result;
|
|
14955
|
+
},
|
|
14956
|
+
{ kind: SpanKind.INTERNAL }
|
|
14957
|
+
);
|
|
14431
14958
|
if (tenantQueues.length === 0) {
|
|
14432
|
-
|
|
14959
|
+
this.batchedSpanManager.incrementStat(loopId, "empty_iterations");
|
|
14960
|
+
return false;
|
|
14433
14961
|
}
|
|
14962
|
+
this.batchedSpanManager.incrementStat(loopId, "tenants_selected", tenantQueues.length);
|
|
14963
|
+
this.batchedSpanManager.incrementStat(
|
|
14964
|
+
loopId,
|
|
14965
|
+
"queues_selected",
|
|
14966
|
+
tenantQueues.reduce((acc, t) => acc + t.queues.length, 0)
|
|
14967
|
+
);
|
|
14968
|
+
let messagesProcessed = 0;
|
|
14434
14969
|
for (const { tenantId, queues } of tenantQueues) {
|
|
14435
14970
|
for (const queueId of queues) {
|
|
14436
14971
|
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14972
|
+
this.batchedSpanManager.incrementStat(loopId, "cooloff_skipped");
|
|
14437
14973
|
continue;
|
|
14438
14974
|
}
|
|
14439
|
-
|
|
14440
|
-
|
|
14441
|
-
|
|
14442
|
-
|
|
14975
|
+
if (this.concurrencyManager) {
|
|
14976
|
+
const isAtCapacity = await this.concurrencyManager.isAtCapacity("tenant", tenantId);
|
|
14977
|
+
if (isAtCapacity) {
|
|
14978
|
+
this.batchedSpanManager.incrementStat(loopId, "tenant_capacity_skipped");
|
|
14979
|
+
break;
|
|
14980
|
+
}
|
|
14981
|
+
}
|
|
14982
|
+
const processedFromQueue = await this.telemetry.trace(
|
|
14983
|
+
"claimAndPushToWorkerQueue",
|
|
14984
|
+
async (span) => {
|
|
14985
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
14986
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
14987
|
+
span.setAttribute(FairQueueAttributes.SHARD_ID, shardId.toString());
|
|
14988
|
+
const count = await this.#claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId);
|
|
14989
|
+
span.setAttribute("messages_claimed", count);
|
|
14990
|
+
return count;
|
|
14991
|
+
},
|
|
14992
|
+
{ kind: SpanKind.INTERNAL }
|
|
14993
|
+
);
|
|
14994
|
+
if (processedFromQueue > 0) {
|
|
14995
|
+
messagesProcessed += processedFromQueue;
|
|
14996
|
+
this.batchedSpanManager.incrementStat(loopId, "messages_claimed", processedFromQueue);
|
|
14997
|
+
if (this.scheduler.recordProcessedBatch) {
|
|
14998
|
+
await this.telemetry.trace(
|
|
14999
|
+
"recordProcessedBatch",
|
|
15000
|
+
async (span) => {
|
|
15001
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
15002
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
15003
|
+
span.setAttribute("count", processedFromQueue);
|
|
15004
|
+
await this.scheduler.recordProcessedBatch(tenantId, queueId, processedFromQueue);
|
|
15005
|
+
},
|
|
15006
|
+
{ kind: SpanKind.INTERNAL }
|
|
15007
|
+
);
|
|
15008
|
+
} else if (this.scheduler.recordProcessed) {
|
|
15009
|
+
for (let i = 0; i < processedFromQueue; i++) {
|
|
15010
|
+
await this.telemetry.trace(
|
|
15011
|
+
"recordProcessed",
|
|
15012
|
+
async (span) => {
|
|
15013
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
15014
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
15015
|
+
await this.scheduler.recordProcessed(tenantId, queueId);
|
|
15016
|
+
},
|
|
15017
|
+
{ kind: SpanKind.INTERNAL }
|
|
15018
|
+
);
|
|
15019
|
+
}
|
|
15020
|
+
}
|
|
14443
15021
|
} else {
|
|
14444
|
-
this
|
|
15022
|
+
this.batchedSpanManager.incrementStat(loopId, "claim_skipped");
|
|
14445
15023
|
}
|
|
14446
15024
|
}
|
|
14447
15025
|
}
|
|
15026
|
+
return messagesProcessed > 0;
|
|
14448
15027
|
}
|
|
14449
15028
|
async #claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId) {
|
|
14450
15029
|
const queueKey = this.keys.queueKey(queueId);
|
|
@@ -14455,11 +15034,14 @@ var FairQueue = class {
|
|
|
14455
15034
|
tenantId,
|
|
14456
15035
|
metadata: {}
|
|
14457
15036
|
};
|
|
15037
|
+
let maxClaimCount = this.batchClaimSize;
|
|
14458
15038
|
if (this.concurrencyManager) {
|
|
14459
|
-
const
|
|
14460
|
-
if (
|
|
14461
|
-
|
|
15039
|
+
const availableCapacity = await this.concurrencyManager.getAvailableCapacity(descriptor);
|
|
15040
|
+
if (availableCapacity === 0) {
|
|
15041
|
+
this.#incrementCooloff(queueId);
|
|
15042
|
+
return 0;
|
|
14462
15043
|
}
|
|
15044
|
+
maxClaimCount = Math.min(maxClaimCount, availableCapacity);
|
|
14463
15045
|
}
|
|
14464
15046
|
if (this.globalRateLimiter) {
|
|
14465
15047
|
const result = await this.globalRateLimiter.limit();
|
|
@@ -14471,377 +15053,209 @@ var FairQueue = class {
|
|
|
14471
15053
|
}
|
|
14472
15054
|
}
|
|
14473
15055
|
}
|
|
14474
|
-
const
|
|
14475
|
-
|
|
14476
|
-
queueKey,
|
|
14477
|
-
|
|
14478
|
-
|
|
14479
|
-
|
|
14480
|
-
);
|
|
14481
|
-
if (!claimResult.claimed || !claimResult.message) {
|
|
14482
|
-
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14483
|
-
return false;
|
|
14484
|
-
}
|
|
14485
|
-
const { message } = claimResult;
|
|
14486
|
-
if (this.concurrencyManager) {
|
|
14487
|
-
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
14488
|
-
if (!reserved) {
|
|
14489
|
-
await this.visibilityManager.release(message.messageId, queueId, queueKey, queueItemsKey);
|
|
14490
|
-
return false;
|
|
15056
|
+
const claimedMessages = await this.visibilityManager.claimBatch(queueId, queueKey, queueItemsKey, loopId, maxClaimCount, this.visibilityTimeoutMs);
|
|
15057
|
+
if (claimedMessages.length === 0) {
|
|
15058
|
+
const removed = await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
15059
|
+
if (removed === 1) {
|
|
15060
|
+
this.queueDescriptorCache.delete(queueId);
|
|
15061
|
+
this.queueCooloffStates.delete(queueId);
|
|
14491
15062
|
}
|
|
15063
|
+
return 0;
|
|
14492
15064
|
}
|
|
14493
|
-
|
|
14494
|
-
|
|
14495
|
-
|
|
14496
|
-
|
|
14497
|
-
|
|
14498
|
-
|
|
14499
|
-
|
|
14500
|
-
|
|
14501
|
-
|
|
14502
|
-
|
|
14503
|
-
|
|
14504
|
-
|
|
14505
|
-
while (this.isRunning) {
|
|
14506
|
-
if (!this.messageHandler) {
|
|
14507
|
-
await new Promise((resolve) => setTimeout(resolve, this.consumerIntervalMs));
|
|
14508
|
-
continue;
|
|
14509
|
-
}
|
|
14510
|
-
try {
|
|
14511
|
-
const messageKey = await this.workerQueueManager.blockingPop(
|
|
14512
|
-
workerQueueId,
|
|
14513
|
-
this.workerQueueBlockingTimeoutSeconds,
|
|
14514
|
-
this.abortController.signal
|
|
15065
|
+
let processedCount = 0;
|
|
15066
|
+
for (let i = 0; i < claimedMessages.length; i++) {
|
|
15067
|
+
const message = claimedMessages[i];
|
|
15068
|
+
if (this.concurrencyManager) {
|
|
15069
|
+
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
15070
|
+
if (!reserved) {
|
|
15071
|
+
await this.visibilityManager.releaseBatch(
|
|
15072
|
+
claimedMessages.slice(i),
|
|
15073
|
+
queueId,
|
|
15074
|
+
queueKey,
|
|
15075
|
+
queueItemsKey,
|
|
15076
|
+
masterQueueKey
|
|
14515
15077
|
);
|
|
14516
|
-
|
|
14517
|
-
continue;
|
|
14518
|
-
}
|
|
14519
|
-
const colonIndex = messageKey.indexOf(":");
|
|
14520
|
-
if (colonIndex === -1) {
|
|
14521
|
-
this.logger.error("Invalid message key format", { messageKey });
|
|
14522
|
-
continue;
|
|
14523
|
-
}
|
|
14524
|
-
const messageId = messageKey.substring(0, colonIndex);
|
|
14525
|
-
const queueId = messageKey.substring(colonIndex + 1);
|
|
14526
|
-
await this.#processMessageFromWorkerQueue(loopId, messageId, queueId);
|
|
14527
|
-
} catch (error) {
|
|
14528
|
-
if (this.abortController.signal.aborted) {
|
|
14529
|
-
break;
|
|
14530
|
-
}
|
|
14531
|
-
this.logger.error("Worker queue consumer error", {
|
|
14532
|
-
loopId,
|
|
14533
|
-
error: error instanceof Error ? error.message : String(error)
|
|
14534
|
-
});
|
|
15078
|
+
break;
|
|
14535
15079
|
}
|
|
14536
15080
|
}
|
|
14537
|
-
|
|
14538
|
-
|
|
14539
|
-
|
|
14540
|
-
|
|
14541
|
-
|
|
14542
|
-
|
|
15081
|
+
const workerQueueId = this.workerQueueResolver(message.payload);
|
|
15082
|
+
const messageKey = `${message.messageId}:${queueId}`;
|
|
15083
|
+
await this.workerQueueManager.push(workerQueueId, messageKey);
|
|
15084
|
+
processedCount++;
|
|
15085
|
+
}
|
|
15086
|
+
if (processedCount > 0) {
|
|
15087
|
+
this.#resetCooloff(queueId);
|
|
14543
15088
|
}
|
|
15089
|
+
return processedCount;
|
|
14544
15090
|
}
|
|
14545
|
-
|
|
15091
|
+
// ============================================================================
|
|
15092
|
+
// Public API - Message Lifecycle (for external consumers)
|
|
15093
|
+
// ============================================================================
|
|
15094
|
+
/**
|
|
15095
|
+
* Get message data from in-flight storage.
|
|
15096
|
+
* External consumers use this to retrieve the stored message after popping from worker queue.
|
|
15097
|
+
*
|
|
15098
|
+
* @param messageId - The ID of the message
|
|
15099
|
+
* @param queueId - The queue ID the message belongs to
|
|
15100
|
+
* @returns The stored message or null if not found
|
|
15101
|
+
*/
|
|
15102
|
+
async getMessageData(messageId, queueId) {
|
|
14546
15103
|
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14547
15104
|
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
14548
15105
|
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
14549
15106
|
if (!dataJson) {
|
|
14550
|
-
|
|
14551
|
-
return;
|
|
15107
|
+
return null;
|
|
14552
15108
|
}
|
|
14553
|
-
let storedMessage;
|
|
14554
15109
|
try {
|
|
14555
|
-
|
|
15110
|
+
return JSON.parse(dataJson);
|
|
14556
15111
|
} catch {
|
|
14557
15112
|
this.logger.error("Failed to parse message data", { messageId, queueId });
|
|
14558
|
-
return;
|
|
15113
|
+
return null;
|
|
14559
15114
|
}
|
|
14560
|
-
await this.#processMessage(loopId, storedMessage, queueId);
|
|
14561
15115
|
}
|
|
14562
|
-
|
|
14563
|
-
|
|
14564
|
-
|
|
14565
|
-
|
|
14566
|
-
|
|
14567
|
-
|
|
14568
|
-
|
|
14569
|
-
|
|
14570
|
-
|
|
14571
|
-
|
|
14572
|
-
continue;
|
|
14573
|
-
}
|
|
14574
|
-
try {
|
|
14575
|
-
await this.#processDirectIteration(loopId, shardId);
|
|
14576
|
-
} catch (error) {
|
|
14577
|
-
this.logger.error("Direct consumer iteration error", {
|
|
14578
|
-
loopId,
|
|
14579
|
-
error: error instanceof Error ? error.message : String(error)
|
|
14580
|
-
});
|
|
14581
|
-
}
|
|
14582
|
-
}
|
|
14583
|
-
} catch (error) {
|
|
14584
|
-
if (error instanceof Error && error.name === "AbortError") {
|
|
14585
|
-
this.logger.debug("Direct consumer loop aborted", { loopId });
|
|
14586
|
-
return;
|
|
14587
|
-
}
|
|
14588
|
-
throw error;
|
|
14589
|
-
}
|
|
15116
|
+
/**
|
|
15117
|
+
* Extend the visibility timeout for a message.
|
|
15118
|
+
* External consumers should call this periodically during long-running processing.
|
|
15119
|
+
*
|
|
15120
|
+
* @param messageId - The ID of the message
|
|
15121
|
+
* @param queueId - The queue ID the message belongs to
|
|
15122
|
+
* @returns true if heartbeat was successful
|
|
15123
|
+
*/
|
|
15124
|
+
async heartbeatMessage(messageId, queueId) {
|
|
15125
|
+
return this.visibilityManager.heartbeat(messageId, queueId, this.heartbeatIntervalMs);
|
|
14590
15126
|
}
|
|
14591
|
-
|
|
15127
|
+
/**
|
|
15128
|
+
* Mark a message as successfully processed.
|
|
15129
|
+
* This removes the message from in-flight and releases concurrency.
|
|
15130
|
+
*
|
|
15131
|
+
* @param messageId - The ID of the message
|
|
15132
|
+
* @param queueId - The queue ID the message belongs to
|
|
15133
|
+
*/
|
|
15134
|
+
async completeMessage(messageId, queueId) {
|
|
15135
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
15136
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14592
15137
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14593
|
-
const
|
|
14594
|
-
const
|
|
14595
|
-
|
|
14596
|
-
|
|
14597
|
-
|
|
14598
|
-
|
|
14599
|
-
|
|
14600
|
-
if (this.concurrencyManager) {
|
|
14601
|
-
const [current, limit] = await Promise.all([
|
|
14602
|
-
this.concurrencyManager.getCurrentConcurrency("tenant", tenantId),
|
|
14603
|
-
this.concurrencyManager.getConcurrencyLimit("tenant", tenantId)
|
|
14604
|
-
]);
|
|
14605
|
-
availableSlots = Math.max(1, limit - current);
|
|
14606
|
-
}
|
|
14607
|
-
let slotsUsed = 0;
|
|
14608
|
-
queueLoop: for (const queueId of queues) {
|
|
14609
|
-
while (slotsUsed < availableSlots) {
|
|
14610
|
-
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14611
|
-
break;
|
|
14612
|
-
}
|
|
14613
|
-
const processed = await this.#processOneMessage(loopId, queueId, tenantId, shardId);
|
|
14614
|
-
if (processed) {
|
|
14615
|
-
await this.scheduler.recordProcessed?.(tenantId, queueId);
|
|
14616
|
-
this.#resetCooloff(queueId);
|
|
14617
|
-
slotsUsed++;
|
|
14618
|
-
} else {
|
|
14619
|
-
this.#incrementCooloff(queueId);
|
|
14620
|
-
break;
|
|
14621
|
-
}
|
|
14622
|
-
}
|
|
14623
|
-
if (slotsUsed >= availableSlots) {
|
|
14624
|
-
break queueLoop;
|
|
14625
|
-
}
|
|
15138
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15139
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15140
|
+
let storedMessage = null;
|
|
15141
|
+
if (dataJson) {
|
|
15142
|
+
try {
|
|
15143
|
+
storedMessage = JSON.parse(dataJson);
|
|
15144
|
+
} catch {
|
|
14626
15145
|
}
|
|
14627
15146
|
}
|
|
15147
|
+
const descriptor = storedMessage ? this.queueDescriptorCache.get(queueId) ?? {
|
|
15148
|
+
id: queueId,
|
|
15149
|
+
tenantId: storedMessage.tenantId,
|
|
15150
|
+
metadata: storedMessage.metadata ?? {}
|
|
15151
|
+
} : { id: queueId, tenantId: "", metadata: {} };
|
|
15152
|
+
await this.visibilityManager.complete(messageId, queueId);
|
|
15153
|
+
if (this.concurrencyManager && storedMessage) {
|
|
15154
|
+
await this.concurrencyManager.release(descriptor, messageId);
|
|
15155
|
+
}
|
|
15156
|
+
const removed = await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
15157
|
+
if (removed === 1) {
|
|
15158
|
+
this.queueDescriptorCache.delete(queueId);
|
|
15159
|
+
this.queueCooloffStates.delete(queueId);
|
|
15160
|
+
}
|
|
15161
|
+
this.telemetry.recordComplete();
|
|
15162
|
+
this.logger.debug("Message completed", {
|
|
15163
|
+
messageId,
|
|
15164
|
+
queueId
|
|
15165
|
+
});
|
|
14628
15166
|
}
|
|
14629
|
-
|
|
15167
|
+
/**
|
|
15168
|
+
* Release a message back to the queue for processing by another consumer.
|
|
15169
|
+
* The message is placed at the back of the queue.
|
|
15170
|
+
*
|
|
15171
|
+
* @param messageId - The ID of the message
|
|
15172
|
+
* @param queueId - The queue ID the message belongs to
|
|
15173
|
+
*/
|
|
15174
|
+
async releaseMessage(messageId, queueId) {
|
|
15175
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14630
15176
|
const queueKey = this.keys.queueKey(queueId);
|
|
14631
15177
|
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14632
15178
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14633
|
-
const
|
|
14634
|
-
|
|
14635
|
-
|
|
14636
|
-
|
|
14637
|
-
|
|
14638
|
-
|
|
14639
|
-
|
|
14640
|
-
if (!check.allowed) {
|
|
14641
|
-
return false;
|
|
14642
|
-
}
|
|
14643
|
-
}
|
|
14644
|
-
if (this.globalRateLimiter) {
|
|
14645
|
-
const result = await this.globalRateLimiter.limit();
|
|
14646
|
-
if (!result.allowed && result.resetAt) {
|
|
14647
|
-
const waitMs = Math.max(0, result.resetAt - Date.now());
|
|
14648
|
-
if (waitMs > 0) {
|
|
14649
|
-
this.logger.debug("Global rate limit reached, waiting", { waitMs, loopId });
|
|
14650
|
-
await new Promise((resolve) => setTimeout(resolve, waitMs));
|
|
14651
|
-
}
|
|
15179
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15180
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15181
|
+
let storedMessage = null;
|
|
15182
|
+
if (dataJson) {
|
|
15183
|
+
try {
|
|
15184
|
+
storedMessage = JSON.parse(dataJson);
|
|
15185
|
+
} catch {
|
|
14652
15186
|
}
|
|
14653
15187
|
}
|
|
14654
|
-
const
|
|
15188
|
+
const descriptor = storedMessage ? this.queueDescriptorCache.get(queueId) ?? {
|
|
15189
|
+
id: queueId,
|
|
15190
|
+
tenantId: storedMessage.tenantId,
|
|
15191
|
+
metadata: storedMessage.metadata ?? {}
|
|
15192
|
+
} : { id: queueId, tenantId: "", metadata: {} };
|
|
15193
|
+
await this.visibilityManager.release(
|
|
15194
|
+
messageId,
|
|
14655
15195
|
queueId,
|
|
14656
15196
|
queueKey,
|
|
14657
15197
|
queueItemsKey,
|
|
14658
|
-
|
|
14659
|
-
|
|
15198
|
+
masterQueueKey,
|
|
15199
|
+
Date.now()
|
|
15200
|
+
// Put at back of queue
|
|
14660
15201
|
);
|
|
14661
|
-
if (
|
|
14662
|
-
await this.
|
|
14663
|
-
return false;
|
|
14664
|
-
}
|
|
14665
|
-
const { message } = claimResult;
|
|
14666
|
-
if (this.concurrencyManager) {
|
|
14667
|
-
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
14668
|
-
if (!reserved) {
|
|
14669
|
-
await this.visibilityManager.release(message.messageId, queueId, queueKey, queueItemsKey);
|
|
14670
|
-
return false;
|
|
14671
|
-
}
|
|
15202
|
+
if (this.concurrencyManager && storedMessage) {
|
|
15203
|
+
await this.concurrencyManager.release(descriptor, messageId);
|
|
14672
15204
|
}
|
|
14673
|
-
|
|
14674
|
-
|
|
15205
|
+
this.logger.debug("Message released", {
|
|
15206
|
+
messageId,
|
|
15207
|
+
queueId
|
|
15208
|
+
});
|
|
14675
15209
|
}
|
|
14676
|
-
|
|
14677
|
-
|
|
14678
|
-
|
|
14679
|
-
|
|
14680
|
-
|
|
15210
|
+
/**
|
|
15211
|
+
* Mark a message as failed. This will trigger retry logic if configured,
|
|
15212
|
+
* or move the message to the dead letter queue.
|
|
15213
|
+
*
|
|
15214
|
+
* @param messageId - The ID of the message
|
|
15215
|
+
* @param queueId - The queue ID the message belongs to
|
|
15216
|
+
* @param error - Optional error that caused the failure
|
|
15217
|
+
*/
|
|
15218
|
+
async failMessage(messageId, queueId, error) {
|
|
15219
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14681
15220
|
const queueKey = this.keys.queueKey(queueId);
|
|
14682
15221
|
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14683
|
-
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14684
15222
|
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
15223
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15224
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15225
|
+
if (!dataJson) {
|
|
15226
|
+
this.logger.error("Cannot fail message: not found in in-flight data", { messageId, queueId });
|
|
15227
|
+
return;
|
|
15228
|
+
}
|
|
15229
|
+
let storedMessage;
|
|
15230
|
+
try {
|
|
15231
|
+
storedMessage = JSON.parse(dataJson);
|
|
15232
|
+
} catch {
|
|
15233
|
+
this.logger.error("Cannot fail message: failed to parse stored message", {
|
|
15234
|
+
messageId,
|
|
15235
|
+
queueId
|
|
15236
|
+
});
|
|
15237
|
+
return;
|
|
15238
|
+
}
|
|
14685
15239
|
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
14686
15240
|
id: queueId,
|
|
14687
15241
|
tenantId: storedMessage.tenantId,
|
|
14688
15242
|
metadata: storedMessage.metadata ?? {}
|
|
14689
15243
|
};
|
|
14690
|
-
|
|
14691
|
-
|
|
14692
|
-
const result = this.payloadSchema.safeParse(storedMessage.payload);
|
|
14693
|
-
if (!result.success) {
|
|
14694
|
-
this.logger.error("Payload validation failed on dequeue", {
|
|
14695
|
-
messageId: storedMessage.id,
|
|
14696
|
-
queueId,
|
|
14697
|
-
error: result.error.message
|
|
14698
|
-
});
|
|
14699
|
-
await this.#moveToDeadLetterQueue(storedMessage, "Payload validation failed");
|
|
14700
|
-
if (this.concurrencyManager) {
|
|
14701
|
-
try {
|
|
14702
|
-
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14703
|
-
} catch (releaseError) {
|
|
14704
|
-
this.logger.error("Failed to release concurrency slot after payload validation failure", {
|
|
14705
|
-
messageId: storedMessage.id,
|
|
14706
|
-
queueId,
|
|
14707
|
-
error: releaseError instanceof Error ? releaseError.message : String(releaseError)
|
|
14708
|
-
});
|
|
14709
|
-
}
|
|
14710
|
-
}
|
|
14711
|
-
return;
|
|
14712
|
-
}
|
|
14713
|
-
payload = result.data;
|
|
14714
|
-
} else {
|
|
14715
|
-
payload = storedMessage.payload;
|
|
14716
|
-
}
|
|
14717
|
-
const queueMessage = {
|
|
14718
|
-
id: storedMessage.id,
|
|
14719
|
-
queueId,
|
|
14720
|
-
payload,
|
|
14721
|
-
timestamp: storedMessage.timestamp,
|
|
14722
|
-
attempt: storedMessage.attempt,
|
|
14723
|
-
metadata: storedMessage.metadata
|
|
14724
|
-
};
|
|
14725
|
-
const queueTime = startTime - storedMessage.timestamp;
|
|
14726
|
-
this.telemetry.recordQueueTime(
|
|
14727
|
-
queueTime,
|
|
14728
|
-
this.telemetry.messageAttributes({
|
|
14729
|
-
queueId,
|
|
14730
|
-
tenantId: storedMessage.tenantId,
|
|
14731
|
-
messageId: storedMessage.id
|
|
14732
|
-
})
|
|
14733
|
-
);
|
|
14734
|
-
const handlerContext = {
|
|
14735
|
-
message: queueMessage,
|
|
14736
|
-
queue: descriptor,
|
|
14737
|
-
consumerId: loopId,
|
|
14738
|
-
heartbeat: async () => {
|
|
14739
|
-
return this.visibilityManager.heartbeat(
|
|
14740
|
-
storedMessage.id,
|
|
14741
|
-
queueId,
|
|
14742
|
-
this.heartbeatIntervalMs
|
|
14743
|
-
);
|
|
14744
|
-
},
|
|
14745
|
-
complete: async () => {
|
|
14746
|
-
await this.#completeMessage(storedMessage, queueId, queueKey, masterQueueKey, descriptor);
|
|
14747
|
-
this.telemetry.recordComplete(
|
|
14748
|
-
this.telemetry.messageAttributes({
|
|
14749
|
-
queueId,
|
|
14750
|
-
tenantId: storedMessage.tenantId,
|
|
14751
|
-
messageId: storedMessage.id
|
|
14752
|
-
})
|
|
14753
|
-
);
|
|
14754
|
-
this.telemetry.recordProcessingTime(
|
|
14755
|
-
Date.now() - startTime,
|
|
14756
|
-
this.telemetry.messageAttributes({
|
|
14757
|
-
queueId,
|
|
14758
|
-
tenantId: storedMessage.tenantId,
|
|
14759
|
-
messageId: storedMessage.id
|
|
14760
|
-
})
|
|
14761
|
-
);
|
|
14762
|
-
},
|
|
14763
|
-
release: async () => {
|
|
14764
|
-
await this.#releaseMessage(storedMessage, queueId, queueKey, queueItemsKey, descriptor);
|
|
14765
|
-
},
|
|
14766
|
-
fail: async (error) => {
|
|
14767
|
-
await this.#handleMessageFailure(
|
|
14768
|
-
storedMessage,
|
|
14769
|
-
queueId,
|
|
14770
|
-
queueKey,
|
|
14771
|
-
queueItemsKey,
|
|
14772
|
-
masterQueueKey,
|
|
14773
|
-
descriptor,
|
|
14774
|
-
error
|
|
14775
|
-
);
|
|
14776
|
-
}
|
|
14777
|
-
};
|
|
14778
|
-
try {
|
|
14779
|
-
await this.telemetry.trace(
|
|
14780
|
-
"processMessage",
|
|
14781
|
-
async (span) => {
|
|
14782
|
-
span.setAttributes({
|
|
14783
|
-
[FairQueueAttributes.QUEUE_ID]: queueId,
|
|
14784
|
-
[FairQueueAttributes.TENANT_ID]: storedMessage.tenantId,
|
|
14785
|
-
[FairQueueAttributes.MESSAGE_ID]: storedMessage.id,
|
|
14786
|
-
[FairQueueAttributes.ATTEMPT]: storedMessage.attempt,
|
|
14787
|
-
[FairQueueAttributes.CONSUMER_ID]: loopId
|
|
14788
|
-
});
|
|
14789
|
-
await this.messageHandler(handlerContext);
|
|
14790
|
-
},
|
|
14791
|
-
{
|
|
14792
|
-
kind: SpanKind.CONSUMER,
|
|
14793
|
-
attributes: {
|
|
14794
|
-
[MessagingAttributes.OPERATION]: "process"
|
|
14795
|
-
}
|
|
14796
|
-
}
|
|
14797
|
-
);
|
|
14798
|
-
} catch (error) {
|
|
14799
|
-
this.logger.error("Message handler error", {
|
|
14800
|
-
messageId: storedMessage.id,
|
|
14801
|
-
queueId,
|
|
14802
|
-
error: error instanceof Error ? error.message : String(error)
|
|
14803
|
-
});
|
|
14804
|
-
await handlerContext.fail(error instanceof Error ? error : new Error(String(error)));
|
|
14805
|
-
}
|
|
14806
|
-
}
|
|
14807
|
-
async #completeMessage(storedMessage, queueId, queueKey, masterQueueKey, descriptor) {
|
|
14808
|
-
this.masterQueue.getShardForQueue(queueId);
|
|
14809
|
-
await this.visibilityManager.complete(storedMessage.id, queueId);
|
|
14810
|
-
if (this.concurrencyManager) {
|
|
14811
|
-
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14812
|
-
}
|
|
14813
|
-
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14814
|
-
this.logger.debug("Message completed", {
|
|
14815
|
-
messageId: storedMessage.id,
|
|
14816
|
-
queueId
|
|
14817
|
-
});
|
|
14818
|
-
}
|
|
14819
|
-
async #releaseMessage(storedMessage, queueId, queueKey, queueItemsKey, descriptor) {
|
|
14820
|
-
await this.visibilityManager.release(
|
|
14821
|
-
storedMessage.id,
|
|
15244
|
+
await this.#handleMessageFailure(
|
|
15245
|
+
storedMessage,
|
|
14822
15246
|
queueId,
|
|
14823
15247
|
queueKey,
|
|
14824
15248
|
queueItemsKey,
|
|
14825
|
-
|
|
14826
|
-
|
|
15249
|
+
masterQueueKey,
|
|
15250
|
+
descriptor,
|
|
15251
|
+
error
|
|
14827
15252
|
);
|
|
14828
|
-
if (this.concurrencyManager) {
|
|
14829
|
-
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14830
|
-
}
|
|
14831
|
-
this.logger.debug("Message released", {
|
|
14832
|
-
messageId: storedMessage.id,
|
|
14833
|
-
queueId
|
|
14834
|
-
});
|
|
14835
15253
|
}
|
|
15254
|
+
// ============================================================================
|
|
15255
|
+
// Private - Message Processing Helpers
|
|
15256
|
+
// ============================================================================
|
|
14836
15257
|
async #handleMessageFailure(storedMessage, queueId, queueKey, queueItemsKey, masterQueueKey, descriptor, error) {
|
|
14837
|
-
this.telemetry.recordFailure(
|
|
14838
|
-
this.telemetry.messageAttributes({
|
|
14839
|
-
queueId,
|
|
14840
|
-
tenantId: storedMessage.tenantId,
|
|
14841
|
-
messageId: storedMessage.id,
|
|
14842
|
-
attempt: storedMessage.attempt
|
|
14843
|
-
})
|
|
14844
|
-
);
|
|
15258
|
+
this.telemetry.recordFailure();
|
|
14845
15259
|
if (this.retryStrategy) {
|
|
14846
15260
|
const nextDelay = this.retryStrategy.getNextDelay(storedMessage.attempt, error);
|
|
14847
15261
|
if (nextDelay !== null) {
|
|
@@ -14854,20 +15268,14 @@ var FairQueue = class {
|
|
|
14854
15268
|
queueId,
|
|
14855
15269
|
queueKey,
|
|
14856
15270
|
queueItemsKey,
|
|
15271
|
+
masterQueueKey,
|
|
14857
15272
|
Date.now() + nextDelay
|
|
14858
15273
|
);
|
|
14859
15274
|
await this.redis.hset(queueItemsKey, storedMessage.id, JSON.stringify(updatedMessage));
|
|
14860
15275
|
if (this.concurrencyManager) {
|
|
14861
15276
|
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14862
15277
|
}
|
|
14863
|
-
this.telemetry.recordRetry(
|
|
14864
|
-
this.telemetry.messageAttributes({
|
|
14865
|
-
queueId,
|
|
14866
|
-
tenantId: storedMessage.tenantId,
|
|
14867
|
-
messageId: storedMessage.id,
|
|
14868
|
-
attempt: storedMessage.attempt + 1
|
|
14869
|
-
})
|
|
14870
|
-
);
|
|
15278
|
+
this.telemetry.recordRetry();
|
|
14871
15279
|
this.logger.debug("Message scheduled for retry", {
|
|
14872
15280
|
messageId: storedMessage.id,
|
|
14873
15281
|
queueId,
|
|
@@ -14905,14 +15313,7 @@ var FairQueue = class {
|
|
|
14905
15313
|
pipeline.zadd(dlqKey, dlqMessage.deadLetteredAt, storedMessage.id);
|
|
14906
15314
|
pipeline.hset(dlqDataKey, storedMessage.id, JSON.stringify(dlqMessage));
|
|
14907
15315
|
await pipeline.exec();
|
|
14908
|
-
this.telemetry.recordDLQ(
|
|
14909
|
-
this.telemetry.messageAttributes({
|
|
14910
|
-
queueId: storedMessage.queueId,
|
|
14911
|
-
tenantId: storedMessage.tenantId,
|
|
14912
|
-
messageId: storedMessage.id,
|
|
14913
|
-
attempt: storedMessage.attempt
|
|
14914
|
-
})
|
|
14915
|
-
);
|
|
15316
|
+
this.telemetry.recordDLQ();
|
|
14916
15317
|
this.logger.info("Message moved to DLQ", {
|
|
14917
15318
|
messageId: storedMessage.id,
|
|
14918
15319
|
queueId: storedMessage.queueId,
|
|
@@ -14938,7 +15339,7 @@ var FairQueue = class {
|
|
|
14938
15339
|
}
|
|
14939
15340
|
}
|
|
14940
15341
|
} catch (error) {
|
|
14941
|
-
if (error
|
|
15342
|
+
if (isAbortError(error)) {
|
|
14942
15343
|
this.logger.debug("Reclaim loop aborted");
|
|
14943
15344
|
return;
|
|
14944
15345
|
}
|
|
@@ -14950,7 +15351,8 @@ var FairQueue = class {
|
|
|
14950
15351
|
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14951
15352
|
const reclaimed = await this.visibilityManager.reclaimTimedOut(shardId, (queueId) => ({
|
|
14952
15353
|
queueKey: this.keys.queueKey(queueId),
|
|
14953
|
-
queueItemsKey: this.keys.queueItemsKey(queueId)
|
|
15354
|
+
queueItemsKey: this.keys.queueItemsKey(queueId),
|
|
15355
|
+
masterQueueKey: this.keys.masterQueueKey(this.masterQueue.getShardForQueue(queueId))
|
|
14954
15356
|
}));
|
|
14955
15357
|
totalReclaimed += reclaimed;
|
|
14956
15358
|
}
|
|
@@ -14974,6 +15376,13 @@ var FairQueue = class {
|
|
|
14974
15376
|
return false;
|
|
14975
15377
|
}
|
|
14976
15378
|
#incrementCooloff(queueId) {
|
|
15379
|
+
if (this.queueCooloffStates.size >= this.maxCooloffStatesSize) {
|
|
15380
|
+
this.logger.warn("Cooloff states cache hit size cap, clearing all entries", {
|
|
15381
|
+
size: this.queueCooloffStates.size,
|
|
15382
|
+
cap: this.maxCooloffStatesSize
|
|
15383
|
+
});
|
|
15384
|
+
this.queueCooloffStates.clear();
|
|
15385
|
+
}
|
|
14977
15386
|
const state = this.queueCooloffStates.get(queueId) ?? {
|
|
14978
15387
|
tag: "normal",
|
|
14979
15388
|
consecutiveFailures: 0
|
|
@@ -15116,6 +15525,6 @@ end
|
|
|
15116
15525
|
}
|
|
15117
15526
|
};
|
|
15118
15527
|
|
|
15119
|
-
export { BaseScheduler, CallbackFairQueueKeyProducer, ConcurrencyManager, CronSchema, CustomRetry, DRRScheduler, DefaultFairQueueKeyProducer, ExponentialBackoffRetry, FairQueue, FairQueueAttributes, FairQueueTelemetry, FixedDelayRetry, ImmediateRetry, LinearBackoffRetry, MasterQueue, MessagingAttributes, NoRetry, NoopScheduler, RoundRobinScheduler, SimpleQueue, VisibilityManager, WeightedScheduler, Worker, WorkerQueueManager, createDefaultRetryStrategy, defaultRetryOptions, noopTelemetry };
|
|
15528
|
+
export { BaseScheduler, BatchedSpanManager, CallbackFairQueueKeyProducer, ConcurrencyManager, CronSchema, CustomRetry, DRRScheduler, DefaultFairQueueKeyProducer, ExponentialBackoffRetry, FairQueue, FairQueueAttributes, FairQueueTelemetry, FixedDelayRetry, ImmediateRetry, LinearBackoffRetry, MasterQueue, MessagingAttributes, NoRetry, NoopScheduler, RoundRobinScheduler, SimpleQueue, VisibilityManager, WeightedScheduler, Worker, WorkerQueueManager, createDefaultRetryStrategy, defaultRetryOptions, isAbortError, noopTelemetry };
|
|
15120
15529
|
//# sourceMappingURL=index.js.map
|
|
15121
15530
|
//# sourceMappingURL=index.js.map
|