@trigger.dev/redis-worker 4.3.0 → 4.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +3834 -3
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1699 -5
- package/dist/index.d.ts +1699 -5
- package/dist/index.js +3806 -2
- package/dist/index.js.map +1 -1
- package/package.json +4 -2
package/dist/index.cjs
CHANGED
|
@@ -4,18 +4,21 @@ var process2 = require('process');
|
|
|
4
4
|
var os = require('os');
|
|
5
5
|
var tty = require('tty');
|
|
6
6
|
var logger$1 = require('@trigger.dev/core/logger');
|
|
7
|
-
var crypto = require('crypto');
|
|
7
|
+
var crypto$1 = require('crypto');
|
|
8
8
|
require('@trigger.dev/core/v3/utils/flattenAttributes');
|
|
9
9
|
var v3 = require('@trigger.dev/core/v3');
|
|
10
10
|
var serverOnly = require('@trigger.dev/core/v3/serverOnly');
|
|
11
11
|
var zod = require('zod');
|
|
12
12
|
var cronParser = require('cron-parser');
|
|
13
|
+
var promises = require('timers/promises');
|
|
14
|
+
var seedrandom = require('seedrandom');
|
|
13
15
|
|
|
14
16
|
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
15
17
|
|
|
16
18
|
var process2__default = /*#__PURE__*/_interopDefault(process2);
|
|
17
19
|
var os__default = /*#__PURE__*/_interopDefault(os);
|
|
18
20
|
var tty__default = /*#__PURE__*/_interopDefault(tty);
|
|
21
|
+
var seedrandom__default = /*#__PURE__*/_interopDefault(seedrandom);
|
|
19
22
|
|
|
20
23
|
var __create = Object.create;
|
|
21
24
|
var __defProp = Object.defineProperty;
|
|
@@ -9419,10 +9422,10 @@ var poolOffset;
|
|
|
9419
9422
|
function fillPool(bytes) {
|
|
9420
9423
|
if (!pool || pool.length < bytes) {
|
|
9421
9424
|
pool = Buffer.allocUnsafe(bytes * POOL_SIZE_MULTIPLIER);
|
|
9422
|
-
crypto.webcrypto.getRandomValues(pool);
|
|
9425
|
+
crypto$1.webcrypto.getRandomValues(pool);
|
|
9423
9426
|
poolOffset = 0;
|
|
9424
9427
|
} else if (poolOffset + bytes > pool.length) {
|
|
9425
|
-
crypto.webcrypto.getRandomValues(pool);
|
|
9428
|
+
crypto$1.webcrypto.getRandomValues(pool);
|
|
9426
9429
|
poolOffset = 0;
|
|
9427
9430
|
}
|
|
9428
9431
|
poolOffset += bytes;
|
|
@@ -10830,6 +10833,9 @@ var SpanStatusCode;
|
|
|
10830
10833
|
SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
|
|
10831
10834
|
})(SpanStatusCode || (SpanStatusCode = {}));
|
|
10832
10835
|
|
|
10836
|
+
// ../../node_modules/.pnpm/@opentelemetry+api@1.9.0/node_modules/@opentelemetry/api/build/esm/context-api.js
|
|
10837
|
+
var context = ContextAPI.getInstance();
|
|
10838
|
+
|
|
10833
10839
|
// ../../node_modules/.pnpm/@opentelemetry+api@1.9.0/node_modules/@opentelemetry/api/build/esm/metrics/NoopMeterProvider.js
|
|
10834
10840
|
var NoopMeterProvider = (
|
|
10835
10841
|
/** @class */
|
|
@@ -11727,8 +11733,3833 @@ var Worker = class _Worker {
|
|
|
11727
11733
|
}
|
|
11728
11734
|
};
|
|
11729
11735
|
|
|
11736
|
+
// src/utils.ts
|
|
11737
|
+
function isAbortError(error) {
|
|
11738
|
+
return error instanceof Error && (error.name === "AbortError" || error.message === "AbortError");
|
|
11739
|
+
}
|
|
11740
|
+
|
|
11741
|
+
// src/fair-queue/concurrency.ts
|
|
11742
|
+
var ConcurrencyManager = class {
|
|
11743
|
+
constructor(options) {
|
|
11744
|
+
this.options = options;
|
|
11745
|
+
this.redis = createRedisClient(options.redis);
|
|
11746
|
+
this.keys = options.keys;
|
|
11747
|
+
this.groups = options.groups;
|
|
11748
|
+
this.groupsByName = new Map(options.groups.map((g) => [g.name, g]));
|
|
11749
|
+
this.#registerCommands();
|
|
11750
|
+
}
|
|
11751
|
+
redis;
|
|
11752
|
+
keys;
|
|
11753
|
+
groups;
|
|
11754
|
+
groupsByName;
|
|
11755
|
+
// ============================================================================
|
|
11756
|
+
// Public Methods
|
|
11757
|
+
// ============================================================================
|
|
11758
|
+
/**
|
|
11759
|
+
* Check if a message can be processed given all concurrency constraints.
|
|
11760
|
+
* Checks all configured groups and returns the first one at capacity.
|
|
11761
|
+
*/
|
|
11762
|
+
async canProcess(queue) {
|
|
11763
|
+
for (const group of this.groups) {
|
|
11764
|
+
const groupId = group.extractGroupId(queue);
|
|
11765
|
+
const isAtCapacity = await this.isAtCapacity(group.name, groupId);
|
|
11766
|
+
if (isAtCapacity) {
|
|
11767
|
+
const state = await this.getState(group.name, groupId);
|
|
11768
|
+
return {
|
|
11769
|
+
allowed: false,
|
|
11770
|
+
blockedBy: state
|
|
11771
|
+
};
|
|
11772
|
+
}
|
|
11773
|
+
}
|
|
11774
|
+
return { allowed: true };
|
|
11775
|
+
}
|
|
11776
|
+
/**
|
|
11777
|
+
* Reserve concurrency slots for a message across all groups.
|
|
11778
|
+
* Atomic - either all groups are reserved or none.
|
|
11779
|
+
*
|
|
11780
|
+
* @returns true if reservation successful, false if any group is at capacity
|
|
11781
|
+
*/
|
|
11782
|
+
async reserve(queue, messageId) {
|
|
11783
|
+
const groupData = await Promise.all(
|
|
11784
|
+
this.groups.map(async (group) => {
|
|
11785
|
+
const groupId = group.extractGroupId(queue);
|
|
11786
|
+
const limit = await group.getLimit(groupId);
|
|
11787
|
+
return {
|
|
11788
|
+
key: this.keys.concurrencyKey(group.name, groupId),
|
|
11789
|
+
limit: limit || group.defaultLimit
|
|
11790
|
+
};
|
|
11791
|
+
})
|
|
11792
|
+
);
|
|
11793
|
+
const keys = groupData.map((g) => g.key);
|
|
11794
|
+
const limits = groupData.map((g) => g.limit.toString());
|
|
11795
|
+
const result = await this.redis.reserveConcurrency(keys.length, keys, messageId, ...limits);
|
|
11796
|
+
return result === 1;
|
|
11797
|
+
}
|
|
11798
|
+
/**
|
|
11799
|
+
* Release concurrency slots for a message across all groups.
|
|
11800
|
+
*/
|
|
11801
|
+
async release(queue, messageId) {
|
|
11802
|
+
const pipeline = this.redis.pipeline();
|
|
11803
|
+
for (const group of this.groups) {
|
|
11804
|
+
const groupId = group.extractGroupId(queue);
|
|
11805
|
+
const key = this.keys.concurrencyKey(group.name, groupId);
|
|
11806
|
+
pipeline.srem(key, messageId);
|
|
11807
|
+
}
|
|
11808
|
+
await pipeline.exec();
|
|
11809
|
+
}
|
|
11810
|
+
/**
|
|
11811
|
+
* Get current concurrency for a specific group.
|
|
11812
|
+
*/
|
|
11813
|
+
async getCurrentConcurrency(groupName, groupId) {
|
|
11814
|
+
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11815
|
+
return await this.redis.scard(key);
|
|
11816
|
+
}
|
|
11817
|
+
/**
|
|
11818
|
+
* Get available capacity for a queue across all concurrency groups.
|
|
11819
|
+
* Returns the minimum available capacity across all groups.
|
|
11820
|
+
*/
|
|
11821
|
+
async getAvailableCapacity(queue) {
|
|
11822
|
+
if (this.groups.length === 0) {
|
|
11823
|
+
return 0;
|
|
11824
|
+
}
|
|
11825
|
+
const groupData = this.groups.map((group) => ({
|
|
11826
|
+
group,
|
|
11827
|
+
groupId: group.extractGroupId(queue)
|
|
11828
|
+
}));
|
|
11829
|
+
const [currents, limits] = await Promise.all([
|
|
11830
|
+
Promise.all(
|
|
11831
|
+
groupData.map(
|
|
11832
|
+
({ group, groupId }) => this.redis.scard(this.keys.concurrencyKey(group.name, groupId))
|
|
11833
|
+
)
|
|
11834
|
+
),
|
|
11835
|
+
Promise.all(
|
|
11836
|
+
groupData.map(
|
|
11837
|
+
({ group, groupId }) => group.getLimit(groupId).then((limit) => limit || group.defaultLimit)
|
|
11838
|
+
)
|
|
11839
|
+
)
|
|
11840
|
+
]);
|
|
11841
|
+
let minCapacity = Infinity;
|
|
11842
|
+
for (let i = 0; i < groupData.length; i++) {
|
|
11843
|
+
const available = Math.max(0, limits[i] - currents[i]);
|
|
11844
|
+
minCapacity = Math.min(minCapacity, available);
|
|
11845
|
+
}
|
|
11846
|
+
return minCapacity === Infinity ? 0 : minCapacity;
|
|
11847
|
+
}
|
|
11848
|
+
/**
|
|
11849
|
+
* Get concurrency limit for a specific group.
|
|
11850
|
+
*/
|
|
11851
|
+
async getConcurrencyLimit(groupName, groupId) {
|
|
11852
|
+
const group = this.groupsByName.get(groupName);
|
|
11853
|
+
if (!group) {
|
|
11854
|
+
throw new Error(`Unknown concurrency group: ${groupName}`);
|
|
11855
|
+
}
|
|
11856
|
+
return await group.getLimit(groupId) || group.defaultLimit;
|
|
11857
|
+
}
|
|
11858
|
+
/**
|
|
11859
|
+
* Check if a group is at capacity.
|
|
11860
|
+
*/
|
|
11861
|
+
async isAtCapacity(groupName, groupId) {
|
|
11862
|
+
const [current, limit] = await Promise.all([
|
|
11863
|
+
this.getCurrentConcurrency(groupName, groupId),
|
|
11864
|
+
this.getConcurrencyLimit(groupName, groupId)
|
|
11865
|
+
]);
|
|
11866
|
+
return current >= limit;
|
|
11867
|
+
}
|
|
11868
|
+
/**
|
|
11869
|
+
* Get full state for a group.
|
|
11870
|
+
*/
|
|
11871
|
+
async getState(groupName, groupId) {
|
|
11872
|
+
const [current, limit] = await Promise.all([
|
|
11873
|
+
this.getCurrentConcurrency(groupName, groupId),
|
|
11874
|
+
this.getConcurrencyLimit(groupName, groupId)
|
|
11875
|
+
]);
|
|
11876
|
+
return {
|
|
11877
|
+
groupName,
|
|
11878
|
+
groupId,
|
|
11879
|
+
current,
|
|
11880
|
+
limit
|
|
11881
|
+
};
|
|
11882
|
+
}
|
|
11883
|
+
/**
|
|
11884
|
+
* Get all active message IDs for a group.
|
|
11885
|
+
*/
|
|
11886
|
+
async getActiveMessages(groupName, groupId) {
|
|
11887
|
+
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11888
|
+
return await this.redis.smembers(key);
|
|
11889
|
+
}
|
|
11890
|
+
/**
|
|
11891
|
+
* Force-clear concurrency for a group (use with caution).
|
|
11892
|
+
* Useful for cleanup after crashes.
|
|
11893
|
+
*/
|
|
11894
|
+
async clearGroup(groupName, groupId) {
|
|
11895
|
+
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11896
|
+
await this.redis.del(key);
|
|
11897
|
+
}
|
|
11898
|
+
/**
|
|
11899
|
+
* Remove a specific message from concurrency tracking.
|
|
11900
|
+
* Useful for cleanup.
|
|
11901
|
+
*/
|
|
11902
|
+
async removeMessage(messageId, queue) {
|
|
11903
|
+
await this.release(queue, messageId);
|
|
11904
|
+
}
|
|
11905
|
+
/**
|
|
11906
|
+
* Get configured group names.
|
|
11907
|
+
*/
|
|
11908
|
+
getGroupNames() {
|
|
11909
|
+
return this.groups.map((g) => g.name);
|
|
11910
|
+
}
|
|
11911
|
+
/**
|
|
11912
|
+
* Close the Redis connection.
|
|
11913
|
+
*/
|
|
11914
|
+
async close() {
|
|
11915
|
+
await this.redis.quit();
|
|
11916
|
+
}
|
|
11917
|
+
// ============================================================================
|
|
11918
|
+
// Private Methods
|
|
11919
|
+
// ============================================================================
|
|
11920
|
+
#registerCommands() {
|
|
11921
|
+
this.redis.defineCommand("reserveConcurrency", {
|
|
11922
|
+
lua: `
|
|
11923
|
+
local numGroups = #KEYS
|
|
11924
|
+
local messageId = ARGV[1]
|
|
11925
|
+
|
|
11926
|
+
-- Check all groups first
|
|
11927
|
+
for i = 1, numGroups do
|
|
11928
|
+
local key = KEYS[i]
|
|
11929
|
+
local limit = tonumber(ARGV[1 + i]) -- Limits start at ARGV[2]
|
|
11930
|
+
local current = redis.call('SCARD', key)
|
|
11931
|
+
|
|
11932
|
+
if current >= limit then
|
|
11933
|
+
return 0 -- At capacity
|
|
11934
|
+
end
|
|
11935
|
+
end
|
|
11936
|
+
|
|
11937
|
+
-- All groups have capacity, add message to all
|
|
11938
|
+
for i = 1, numGroups do
|
|
11939
|
+
local key = KEYS[i]
|
|
11940
|
+
redis.call('SADD', key, messageId)
|
|
11941
|
+
end
|
|
11942
|
+
|
|
11943
|
+
return 1
|
|
11944
|
+
`
|
|
11945
|
+
});
|
|
11946
|
+
}
|
|
11947
|
+
};
|
|
11948
|
+
var MasterQueue = class {
|
|
11949
|
+
constructor(options) {
|
|
11950
|
+
this.options = options;
|
|
11951
|
+
this.redis = createRedisClient(options.redis);
|
|
11952
|
+
this.keys = options.keys;
|
|
11953
|
+
this.shardCount = Math.max(1, options.shardCount);
|
|
11954
|
+
this.#registerCommands();
|
|
11955
|
+
}
|
|
11956
|
+
redis;
|
|
11957
|
+
keys;
|
|
11958
|
+
shardCount;
|
|
11959
|
+
// ============================================================================
|
|
11960
|
+
// Public Methods
|
|
11961
|
+
// ============================================================================
|
|
11962
|
+
/**
|
|
11963
|
+
* Get the shard ID for a queue.
|
|
11964
|
+
* Uses consistent hashing based on queue ID.
|
|
11965
|
+
*/
|
|
11966
|
+
getShardForQueue(queueId) {
|
|
11967
|
+
return this.#hashToShard(queueId);
|
|
11968
|
+
}
|
|
11969
|
+
/**
|
|
11970
|
+
* Add a queue to its master queue shard.
|
|
11971
|
+
* Updates the score to the oldest message timestamp.
|
|
11972
|
+
*
|
|
11973
|
+
* @param queueId - The queue identifier
|
|
11974
|
+
* @param oldestMessageTimestamp - Timestamp of the oldest message in the queue
|
|
11975
|
+
*/
|
|
11976
|
+
async addQueue(queueId, oldestMessageTimestamp) {
|
|
11977
|
+
const shardId = this.getShardForQueue(queueId);
|
|
11978
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
11979
|
+
await this.redis.zadd(masterKey, oldestMessageTimestamp, queueId);
|
|
11980
|
+
}
|
|
11981
|
+
/**
|
|
11982
|
+
* Update a queue's score in the master queue.
|
|
11983
|
+
* This is typically called after dequeuing to update to the new oldest message.
|
|
11984
|
+
*
|
|
11985
|
+
* @param queueId - The queue identifier
|
|
11986
|
+
* @param newOldestTimestamp - New timestamp of the oldest message
|
|
11987
|
+
*/
|
|
11988
|
+
async updateQueueScore(queueId, newOldestTimestamp) {
|
|
11989
|
+
const shardId = this.getShardForQueue(queueId);
|
|
11990
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
11991
|
+
await this.redis.zadd(masterKey, newOldestTimestamp, queueId);
|
|
11992
|
+
}
|
|
11993
|
+
/**
|
|
11994
|
+
* Remove a queue from its master queue shard.
|
|
11995
|
+
* Called when a queue becomes empty.
|
|
11996
|
+
*
|
|
11997
|
+
* @param queueId - The queue identifier
|
|
11998
|
+
*/
|
|
11999
|
+
async removeQueue(queueId) {
|
|
12000
|
+
const shardId = this.getShardForQueue(queueId);
|
|
12001
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12002
|
+
await this.redis.zrem(masterKey, queueId);
|
|
12003
|
+
}
|
|
12004
|
+
/**
|
|
12005
|
+
* Get queues from a shard, ordered by oldest message (lowest score first).
|
|
12006
|
+
*
|
|
12007
|
+
* @param shardId - The shard to query
|
|
12008
|
+
* @param limit - Maximum number of queues to return (default: 1000)
|
|
12009
|
+
* @param maxScore - Maximum score (timestamp) to include (default: now)
|
|
12010
|
+
*/
|
|
12011
|
+
async getQueuesFromShard(shardId, limit = 1e3, maxScore) {
|
|
12012
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12013
|
+
const score = maxScore ?? Date.now();
|
|
12014
|
+
const results = await this.redis.zrangebyscore(
|
|
12015
|
+
masterKey,
|
|
12016
|
+
"-inf",
|
|
12017
|
+
score,
|
|
12018
|
+
"WITHSCORES",
|
|
12019
|
+
"LIMIT",
|
|
12020
|
+
0,
|
|
12021
|
+
limit
|
|
12022
|
+
);
|
|
12023
|
+
const queues = [];
|
|
12024
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
12025
|
+
const queueId = results[i];
|
|
12026
|
+
const scoreStr = results[i + 1];
|
|
12027
|
+
if (queueId && scoreStr) {
|
|
12028
|
+
queues.push({
|
|
12029
|
+
queueId,
|
|
12030
|
+
score: parseFloat(scoreStr),
|
|
12031
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
12032
|
+
});
|
|
12033
|
+
}
|
|
12034
|
+
}
|
|
12035
|
+
return queues;
|
|
12036
|
+
}
|
|
12037
|
+
/**
|
|
12038
|
+
* Get the number of queues in a shard.
|
|
12039
|
+
*/
|
|
12040
|
+
async getShardQueueCount(shardId) {
|
|
12041
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12042
|
+
return await this.redis.zcard(masterKey);
|
|
12043
|
+
}
|
|
12044
|
+
/**
|
|
12045
|
+
* Get total queue count across all shards.
|
|
12046
|
+
*/
|
|
12047
|
+
async getTotalQueueCount() {
|
|
12048
|
+
const counts = await Promise.all(
|
|
12049
|
+
Array.from({ length: this.shardCount }, (_, i) => this.getShardQueueCount(i))
|
|
12050
|
+
);
|
|
12051
|
+
return counts.reduce((sum, count) => sum + count, 0);
|
|
12052
|
+
}
|
|
12053
|
+
/**
|
|
12054
|
+
* Atomically add a queue to master queue only if queue has messages.
|
|
12055
|
+
* Uses Lua script for atomicity.
|
|
12056
|
+
*
|
|
12057
|
+
* @param queueId - The queue identifier
|
|
12058
|
+
* @param queueKey - The actual queue sorted set key
|
|
12059
|
+
* @returns Whether the queue was added to the master queue
|
|
12060
|
+
*/
|
|
12061
|
+
async addQueueIfNotEmpty(queueId, queueKey) {
|
|
12062
|
+
const shardId = this.getShardForQueue(queueId);
|
|
12063
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12064
|
+
const result = await this.redis.addQueueIfNotEmpty(masterKey, queueKey, queueId);
|
|
12065
|
+
return result === 1;
|
|
12066
|
+
}
|
|
12067
|
+
/**
|
|
12068
|
+
* Atomically remove a queue from master queue only if queue is empty.
|
|
12069
|
+
* Uses Lua script for atomicity.
|
|
12070
|
+
*
|
|
12071
|
+
* @param queueId - The queue identifier
|
|
12072
|
+
* @param queueKey - The actual queue sorted set key
|
|
12073
|
+
* @returns Whether the queue was removed from the master queue
|
|
12074
|
+
*/
|
|
12075
|
+
async removeQueueIfEmpty(queueId, queueKey) {
|
|
12076
|
+
const shardId = this.getShardForQueue(queueId);
|
|
12077
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12078
|
+
const result = await this.redis.removeQueueIfEmpty(masterKey, queueKey, queueId);
|
|
12079
|
+
return result === 1;
|
|
12080
|
+
}
|
|
12081
|
+
/**
|
|
12082
|
+
* Close the Redis connection.
|
|
12083
|
+
*/
|
|
12084
|
+
async close() {
|
|
12085
|
+
await this.redis.quit();
|
|
12086
|
+
}
|
|
12087
|
+
// ============================================================================
|
|
12088
|
+
// Private Methods
|
|
12089
|
+
// ============================================================================
|
|
12090
|
+
/**
|
|
12091
|
+
* Map queue ID to shard using Jump Consistent Hash.
|
|
12092
|
+
* Provides better distribution than djb2 and minimal reshuffling when shard count changes.
|
|
12093
|
+
*/
|
|
12094
|
+
#hashToShard(queueId) {
|
|
12095
|
+
return serverOnly.jumpHash(queueId, this.shardCount);
|
|
12096
|
+
}
|
|
12097
|
+
#registerCommands() {
|
|
12098
|
+
this.redis.defineCommand("addQueueIfNotEmpty", {
|
|
12099
|
+
numberOfKeys: 2,
|
|
12100
|
+
lua: `
|
|
12101
|
+
local masterKey = KEYS[1]
|
|
12102
|
+
local queueKey = KEYS[2]
|
|
12103
|
+
local queueId = ARGV[1]
|
|
12104
|
+
|
|
12105
|
+
-- Check if queue has any messages
|
|
12106
|
+
local count = redis.call('ZCARD', queueKey)
|
|
12107
|
+
if count == 0 then
|
|
12108
|
+
return 0
|
|
12109
|
+
end
|
|
12110
|
+
|
|
12111
|
+
-- Get the oldest message timestamp (lowest score)
|
|
12112
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
12113
|
+
if #oldest == 0 then
|
|
12114
|
+
return 0
|
|
12115
|
+
end
|
|
12116
|
+
|
|
12117
|
+
local score = oldest[2]
|
|
12118
|
+
|
|
12119
|
+
-- Add to master queue with the oldest message score
|
|
12120
|
+
redis.call('ZADD', masterKey, score, queueId)
|
|
12121
|
+
return 1
|
|
12122
|
+
`
|
|
12123
|
+
});
|
|
12124
|
+
this.redis.defineCommand("removeQueueIfEmpty", {
|
|
12125
|
+
numberOfKeys: 2,
|
|
12126
|
+
lua: `
|
|
12127
|
+
local masterKey = KEYS[1]
|
|
12128
|
+
local queueKey = KEYS[2]
|
|
12129
|
+
local queueId = ARGV[1]
|
|
12130
|
+
|
|
12131
|
+
-- Check if queue is empty
|
|
12132
|
+
local count = redis.call('ZCARD', queueKey)
|
|
12133
|
+
if count > 0 then
|
|
12134
|
+
return 0
|
|
12135
|
+
end
|
|
12136
|
+
|
|
12137
|
+
-- Remove from master queue
|
|
12138
|
+
redis.call('ZREM', masterKey, queueId)
|
|
12139
|
+
return 1
|
|
12140
|
+
`
|
|
12141
|
+
});
|
|
12142
|
+
}
|
|
12143
|
+
};
|
|
12144
|
+
|
|
12145
|
+
// src/fair-queue/telemetry.ts
|
|
12146
|
+
var FairQueueAttributes = {
|
|
12147
|
+
QUEUE_ID: "fairqueue.queue_id",
|
|
12148
|
+
TENANT_ID: "fairqueue.tenant_id",
|
|
12149
|
+
MESSAGE_ID: "fairqueue.message_id",
|
|
12150
|
+
SHARD_ID: "fairqueue.shard_id",
|
|
12151
|
+
WORKER_QUEUE: "fairqueue.worker_queue",
|
|
12152
|
+
CONSUMER_ID: "fairqueue.consumer_id",
|
|
12153
|
+
ATTEMPT: "fairqueue.attempt",
|
|
12154
|
+
CONCURRENCY_GROUP: "fairqueue.concurrency_group",
|
|
12155
|
+
MESSAGE_COUNT: "fairqueue.message_count",
|
|
12156
|
+
RESULT: "fairqueue.result"
|
|
12157
|
+
};
|
|
12158
|
+
var MessagingAttributes = {
|
|
12159
|
+
SYSTEM: "messaging.system",
|
|
12160
|
+
OPERATION: "messaging.operation",
|
|
12161
|
+
MESSAGE_ID: "messaging.message_id",
|
|
12162
|
+
DESTINATION_NAME: "messaging.destination.name"
|
|
12163
|
+
};
|
|
12164
|
+
var FairQueueTelemetry = class {
|
|
12165
|
+
tracer;
|
|
12166
|
+
meter;
|
|
12167
|
+
metrics;
|
|
12168
|
+
name;
|
|
12169
|
+
constructor(options) {
|
|
12170
|
+
this.tracer = options.tracer;
|
|
12171
|
+
this.meter = options.meter;
|
|
12172
|
+
this.name = options.name ?? "fairqueue";
|
|
12173
|
+
if (this.meter) {
|
|
12174
|
+
this.#initializeMetrics();
|
|
12175
|
+
}
|
|
12176
|
+
}
|
|
12177
|
+
// ============================================================================
|
|
12178
|
+
// Tracing
|
|
12179
|
+
// ============================================================================
|
|
12180
|
+
/**
|
|
12181
|
+
* Create a traced span for an operation.
|
|
12182
|
+
* Returns the result of the function, or throws any error after recording it.
|
|
12183
|
+
*/
|
|
12184
|
+
async trace(name, fn, options) {
|
|
12185
|
+
if (!this.tracer) {
|
|
12186
|
+
return fn(noopSpan);
|
|
12187
|
+
}
|
|
12188
|
+
const spanOptions = {
|
|
12189
|
+
kind: options?.kind,
|
|
12190
|
+
attributes: {
|
|
12191
|
+
[MessagingAttributes.SYSTEM]: this.name,
|
|
12192
|
+
...options?.attributes
|
|
12193
|
+
}
|
|
12194
|
+
};
|
|
12195
|
+
return this.tracer.startActiveSpan(`${this.name}.${name}`, spanOptions, async (span) => {
|
|
12196
|
+
try {
|
|
12197
|
+
const result = await fn(span);
|
|
12198
|
+
return result;
|
|
12199
|
+
} catch (error) {
|
|
12200
|
+
if (error instanceof Error) {
|
|
12201
|
+
span.recordException(error);
|
|
12202
|
+
} else {
|
|
12203
|
+
span.recordException(new Error(String(error)));
|
|
12204
|
+
}
|
|
12205
|
+
throw error;
|
|
12206
|
+
} finally {
|
|
12207
|
+
span.end();
|
|
12208
|
+
}
|
|
12209
|
+
});
|
|
12210
|
+
}
|
|
12211
|
+
/**
|
|
12212
|
+
* Synchronous version of trace.
|
|
12213
|
+
*/
|
|
12214
|
+
traceSync(name, fn, options) {
|
|
12215
|
+
if (!this.tracer) {
|
|
12216
|
+
return fn(noopSpan);
|
|
12217
|
+
}
|
|
12218
|
+
const spanOptions = {
|
|
12219
|
+
kind: options?.kind,
|
|
12220
|
+
attributes: {
|
|
12221
|
+
[MessagingAttributes.SYSTEM]: this.name,
|
|
12222
|
+
...options?.attributes
|
|
12223
|
+
}
|
|
12224
|
+
};
|
|
12225
|
+
return this.tracer.startActiveSpan(`${this.name}.${name}`, spanOptions, (span) => {
|
|
12226
|
+
try {
|
|
12227
|
+
return fn(span);
|
|
12228
|
+
} catch (error) {
|
|
12229
|
+
if (error instanceof Error) {
|
|
12230
|
+
span.recordException(error);
|
|
12231
|
+
} else {
|
|
12232
|
+
span.recordException(new Error(String(error)));
|
|
12233
|
+
}
|
|
12234
|
+
throw error;
|
|
12235
|
+
} finally {
|
|
12236
|
+
span.end();
|
|
12237
|
+
}
|
|
12238
|
+
});
|
|
12239
|
+
}
|
|
12240
|
+
// ============================================================================
|
|
12241
|
+
// Metrics
|
|
12242
|
+
// ============================================================================
|
|
12243
|
+
/**
|
|
12244
|
+
* Record a message enqueued.
|
|
12245
|
+
*/
|
|
12246
|
+
recordEnqueue(attributes) {
|
|
12247
|
+
this.metrics?.messagesEnqueued.add(1, attributes);
|
|
12248
|
+
}
|
|
12249
|
+
/**
|
|
12250
|
+
* Record a batch of messages enqueued.
|
|
12251
|
+
*/
|
|
12252
|
+
recordEnqueueBatch(count, attributes) {
|
|
12253
|
+
this.metrics?.messagesEnqueued.add(count, attributes);
|
|
12254
|
+
}
|
|
12255
|
+
/**
|
|
12256
|
+
* Record a message completed successfully.
|
|
12257
|
+
*/
|
|
12258
|
+
recordComplete(attributes) {
|
|
12259
|
+
this.metrics?.messagesCompleted.add(1, attributes);
|
|
12260
|
+
}
|
|
12261
|
+
/**
|
|
12262
|
+
* Record a message processing failure.
|
|
12263
|
+
*/
|
|
12264
|
+
recordFailure(attributes) {
|
|
12265
|
+
this.metrics?.messagesFailed.add(1, attributes);
|
|
12266
|
+
}
|
|
12267
|
+
/**
|
|
12268
|
+
* Record a message retry.
|
|
12269
|
+
*/
|
|
12270
|
+
recordRetry(attributes) {
|
|
12271
|
+
this.metrics?.messagesRetried.add(1, attributes);
|
|
12272
|
+
}
|
|
12273
|
+
/**
|
|
12274
|
+
* Record a message sent to DLQ.
|
|
12275
|
+
*/
|
|
12276
|
+
recordDLQ(attributes) {
|
|
12277
|
+
this.metrics?.messagesToDLQ.add(1, attributes);
|
|
12278
|
+
}
|
|
12279
|
+
/**
|
|
12280
|
+
* Record message processing time.
|
|
12281
|
+
*
|
|
12282
|
+
* @param durationMs - Processing duration in milliseconds
|
|
12283
|
+
*/
|
|
12284
|
+
recordProcessingTime(durationMs, attributes) {
|
|
12285
|
+
this.metrics?.processingTime.record(durationMs, attributes);
|
|
12286
|
+
}
|
|
12287
|
+
/**
|
|
12288
|
+
* Record time a message spent waiting in queue.
|
|
12289
|
+
*
|
|
12290
|
+
* @param durationMs - Queue wait time in milliseconds
|
|
12291
|
+
*/
|
|
12292
|
+
recordQueueTime(durationMs, attributes) {
|
|
12293
|
+
this.metrics?.queueTime.record(durationMs, attributes);
|
|
12294
|
+
}
|
|
12295
|
+
/**
|
|
12296
|
+
* Register observable gauge callbacks.
|
|
12297
|
+
* Call this after FairQueue is initialized to register the gauge callbacks.
|
|
12298
|
+
*/
|
|
12299
|
+
registerGaugeCallbacks(callbacks) {
|
|
12300
|
+
if (!this.metrics) return;
|
|
12301
|
+
if (callbacks.getQueueLength && callbacks.observedQueues) {
|
|
12302
|
+
const getQueueLength = callbacks.getQueueLength;
|
|
12303
|
+
const queues = callbacks.observedQueues;
|
|
12304
|
+
this.metrics.queueLength.addCallback(async (observableResult) => {
|
|
12305
|
+
for (const queueId of queues) {
|
|
12306
|
+
const length = await getQueueLength(queueId);
|
|
12307
|
+
observableResult.observe(length, {
|
|
12308
|
+
[FairQueueAttributes.QUEUE_ID]: queueId
|
|
12309
|
+
});
|
|
12310
|
+
}
|
|
12311
|
+
});
|
|
12312
|
+
}
|
|
12313
|
+
if (callbacks.getMasterQueueLength && callbacks.shardCount) {
|
|
12314
|
+
const getMasterQueueLength = callbacks.getMasterQueueLength;
|
|
12315
|
+
const shardCount = callbacks.shardCount;
|
|
12316
|
+
this.metrics.masterQueueLength.addCallback(async (observableResult) => {
|
|
12317
|
+
for (let shardId = 0; shardId < shardCount; shardId++) {
|
|
12318
|
+
const length = await getMasterQueueLength(shardId);
|
|
12319
|
+
observableResult.observe(length, {
|
|
12320
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
12321
|
+
});
|
|
12322
|
+
}
|
|
12323
|
+
});
|
|
12324
|
+
}
|
|
12325
|
+
if (callbacks.getInflightCount && callbacks.shardCount) {
|
|
12326
|
+
const getInflightCount = callbacks.getInflightCount;
|
|
12327
|
+
const shardCount = callbacks.shardCount;
|
|
12328
|
+
this.metrics.inflightCount.addCallback(async (observableResult) => {
|
|
12329
|
+
for (let shardId = 0; shardId < shardCount; shardId++) {
|
|
12330
|
+
const count = await getInflightCount(shardId);
|
|
12331
|
+
observableResult.observe(count, {
|
|
12332
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
12333
|
+
});
|
|
12334
|
+
}
|
|
12335
|
+
});
|
|
12336
|
+
}
|
|
12337
|
+
if (callbacks.getDLQLength && callbacks.observedTenants) {
|
|
12338
|
+
const getDLQLength = callbacks.getDLQLength;
|
|
12339
|
+
const tenants = callbacks.observedTenants;
|
|
12340
|
+
this.metrics.dlqLength.addCallback(async (observableResult) => {
|
|
12341
|
+
for (const tenantId of tenants) {
|
|
12342
|
+
const length = await getDLQLength(tenantId);
|
|
12343
|
+
observableResult.observe(length, {
|
|
12344
|
+
[FairQueueAttributes.TENANT_ID]: tenantId
|
|
12345
|
+
});
|
|
12346
|
+
}
|
|
12347
|
+
});
|
|
12348
|
+
}
|
|
12349
|
+
}
|
|
12350
|
+
// ============================================================================
|
|
12351
|
+
// Helper Methods
|
|
12352
|
+
// ============================================================================
|
|
12353
|
+
/**
|
|
12354
|
+
* Create standard attributes for a message operation (for spans/traces).
|
|
12355
|
+
* Use this for span attributes where high cardinality is acceptable.
|
|
12356
|
+
*/
|
|
12357
|
+
messageAttributes(params) {
|
|
12358
|
+
const attrs = {};
|
|
12359
|
+
if (params.queueId) attrs[FairQueueAttributes.QUEUE_ID] = params.queueId;
|
|
12360
|
+
if (params.tenantId) attrs[FairQueueAttributes.TENANT_ID] = params.tenantId;
|
|
12361
|
+
if (params.messageId) attrs[FairQueueAttributes.MESSAGE_ID] = params.messageId;
|
|
12362
|
+
if (params.attempt !== void 0) attrs[FairQueueAttributes.ATTEMPT] = params.attempt;
|
|
12363
|
+
if (params.workerQueue) attrs[FairQueueAttributes.WORKER_QUEUE] = params.workerQueue;
|
|
12364
|
+
if (params.consumerId) attrs[FairQueueAttributes.CONSUMER_ID] = params.consumerId;
|
|
12365
|
+
return attrs;
|
|
12366
|
+
}
|
|
12367
|
+
/**
|
|
12368
|
+
* Check if telemetry is enabled.
|
|
12369
|
+
*/
|
|
12370
|
+
get isEnabled() {
|
|
12371
|
+
return !!this.tracer || !!this.meter;
|
|
12372
|
+
}
|
|
12373
|
+
/**
|
|
12374
|
+
* Check if tracing is enabled.
|
|
12375
|
+
*/
|
|
12376
|
+
get hasTracer() {
|
|
12377
|
+
return !!this.tracer;
|
|
12378
|
+
}
|
|
12379
|
+
/**
|
|
12380
|
+
* Check if metrics are enabled.
|
|
12381
|
+
*/
|
|
12382
|
+
get hasMetrics() {
|
|
12383
|
+
return !!this.meter;
|
|
12384
|
+
}
|
|
12385
|
+
// ============================================================================
|
|
12386
|
+
// Private Methods
|
|
12387
|
+
// ============================================================================
|
|
12388
|
+
#initializeMetrics() {
|
|
12389
|
+
if (!this.meter) return;
|
|
12390
|
+
this.metrics = {
|
|
12391
|
+
// Counters
|
|
12392
|
+
messagesEnqueued: this.meter.createCounter(`${this.name}.messages.enqueued`, {
|
|
12393
|
+
description: "Number of messages enqueued",
|
|
12394
|
+
unit: "messages"
|
|
12395
|
+
}),
|
|
12396
|
+
messagesCompleted: this.meter.createCounter(`${this.name}.messages.completed`, {
|
|
12397
|
+
description: "Number of messages completed successfully",
|
|
12398
|
+
unit: "messages"
|
|
12399
|
+
}),
|
|
12400
|
+
messagesFailed: this.meter.createCounter(`${this.name}.messages.failed`, {
|
|
12401
|
+
description: "Number of messages that failed processing",
|
|
12402
|
+
unit: "messages"
|
|
12403
|
+
}),
|
|
12404
|
+
messagesRetried: this.meter.createCounter(`${this.name}.messages.retried`, {
|
|
12405
|
+
description: "Number of message retries",
|
|
12406
|
+
unit: "messages"
|
|
12407
|
+
}),
|
|
12408
|
+
messagesToDLQ: this.meter.createCounter(`${this.name}.messages.dlq`, {
|
|
12409
|
+
description: "Number of messages sent to dead letter queue",
|
|
12410
|
+
unit: "messages"
|
|
12411
|
+
}),
|
|
12412
|
+
// Histograms
|
|
12413
|
+
processingTime: this.meter.createHistogram(`${this.name}.message.processing_time`, {
|
|
12414
|
+
description: "Message processing time",
|
|
12415
|
+
unit: "ms"
|
|
12416
|
+
}),
|
|
12417
|
+
queueTime: this.meter.createHistogram(`${this.name}.message.queue_time`, {
|
|
12418
|
+
description: "Time message spent waiting in queue",
|
|
12419
|
+
unit: "ms"
|
|
12420
|
+
}),
|
|
12421
|
+
// Observable gauges
|
|
12422
|
+
queueLength: this.meter.createObservableGauge(`${this.name}.queue.length`, {
|
|
12423
|
+
description: "Number of messages in a queue",
|
|
12424
|
+
unit: "messages"
|
|
12425
|
+
}),
|
|
12426
|
+
masterQueueLength: this.meter.createObservableGauge(`${this.name}.master_queue.length`, {
|
|
12427
|
+
description: "Number of queues in master queue shard",
|
|
12428
|
+
unit: "queues"
|
|
12429
|
+
}),
|
|
12430
|
+
inflightCount: this.meter.createObservableGauge(`${this.name}.inflight.count`, {
|
|
12431
|
+
description: "Number of messages currently being processed",
|
|
12432
|
+
unit: "messages"
|
|
12433
|
+
}),
|
|
12434
|
+
dlqLength: this.meter.createObservableGauge(`${this.name}.dlq.length`, {
|
|
12435
|
+
description: "Number of messages in dead letter queue",
|
|
12436
|
+
unit: "messages"
|
|
12437
|
+
})
|
|
12438
|
+
};
|
|
12439
|
+
}
|
|
12440
|
+
};
|
|
12441
|
+
var BatchedSpanManager = class {
|
|
12442
|
+
tracer;
|
|
12443
|
+
name;
|
|
12444
|
+
maxIterations;
|
|
12445
|
+
timeoutSeconds;
|
|
12446
|
+
loopStates = /* @__PURE__ */ new Map();
|
|
12447
|
+
getDynamicAttributes;
|
|
12448
|
+
constructor(options) {
|
|
12449
|
+
this.tracer = options.tracer;
|
|
12450
|
+
this.name = options.name;
|
|
12451
|
+
this.maxIterations = options.maxIterations;
|
|
12452
|
+
this.timeoutSeconds = options.timeoutSeconds;
|
|
12453
|
+
this.getDynamicAttributes = options.getDynamicAttributes;
|
|
12454
|
+
}
|
|
12455
|
+
/**
|
|
12456
|
+
* Initialize state for a consumer loop.
|
|
12457
|
+
*/
|
|
12458
|
+
initializeLoop(loopId) {
|
|
12459
|
+
this.loopStates.set(loopId, {
|
|
12460
|
+
perTraceCountdown: this.maxIterations,
|
|
12461
|
+
traceStartedAt: /* @__PURE__ */ new Date(),
|
|
12462
|
+
iterationsCount: 0,
|
|
12463
|
+
totalIterationsCount: 0,
|
|
12464
|
+
runningDurationInMs: 0,
|
|
12465
|
+
stats: {},
|
|
12466
|
+
endSpanInNextIteration: false
|
|
12467
|
+
});
|
|
12468
|
+
}
|
|
12469
|
+
/**
|
|
12470
|
+
* Get the state for a consumer loop.
|
|
12471
|
+
*/
|
|
12472
|
+
getState(loopId) {
|
|
12473
|
+
return this.loopStates.get(loopId);
|
|
12474
|
+
}
|
|
12475
|
+
/**
|
|
12476
|
+
* Increment a stat counter for a loop.
|
|
12477
|
+
*/
|
|
12478
|
+
incrementStat(loopId, statName, value = 1) {
|
|
12479
|
+
const state = this.loopStates.get(loopId);
|
|
12480
|
+
if (state) {
|
|
12481
|
+
state.stats[statName] = (state.stats[statName] ?? 0) + value;
|
|
12482
|
+
}
|
|
12483
|
+
}
|
|
12484
|
+
/**
|
|
12485
|
+
* Mark that the span should end on the next iteration.
|
|
12486
|
+
*/
|
|
12487
|
+
markForRotation(loopId) {
|
|
12488
|
+
const state = this.loopStates.get(loopId);
|
|
12489
|
+
if (state) {
|
|
12490
|
+
state.endSpanInNextIteration = true;
|
|
12491
|
+
}
|
|
12492
|
+
}
|
|
12493
|
+
/**
|
|
12494
|
+
* Check if the span should be rotated (ended and a new one started).
|
|
12495
|
+
*/
|
|
12496
|
+
shouldRotate(loopId) {
|
|
12497
|
+
const state = this.loopStates.get(loopId);
|
|
12498
|
+
if (!state) return true;
|
|
12499
|
+
return state.perTraceCountdown <= 0 || Date.now() - state.traceStartedAt.getTime() > this.timeoutSeconds * 1e3 || state.currentSpanContext === void 0 || state.endSpanInNextIteration;
|
|
12500
|
+
}
|
|
12501
|
+
/**
|
|
12502
|
+
* End the current span for a loop and record stats.
|
|
12503
|
+
*/
|
|
12504
|
+
endCurrentSpan(loopId) {
|
|
12505
|
+
const state = this.loopStates.get(loopId);
|
|
12506
|
+
if (!state?.currentSpan) return;
|
|
12507
|
+
for (const [statName, count] of Object.entries(state.stats)) {
|
|
12508
|
+
state.currentSpan.setAttribute(`stats.${statName}`, count);
|
|
12509
|
+
}
|
|
12510
|
+
state.currentSpan.end();
|
|
12511
|
+
state.currentSpan = void 0;
|
|
12512
|
+
state.currentSpanContext = void 0;
|
|
12513
|
+
}
|
|
12514
|
+
/**
|
|
12515
|
+
* Start a new batched span for a loop.
|
|
12516
|
+
*/
|
|
12517
|
+
startNewSpan(loopId, attributes) {
|
|
12518
|
+
if (!this.tracer) return;
|
|
12519
|
+
const state = this.loopStates.get(loopId);
|
|
12520
|
+
if (!state) return;
|
|
12521
|
+
this.endCurrentSpan(loopId);
|
|
12522
|
+
const traceDurationInMs = state.traceStartedAt ? Date.now() - state.traceStartedAt.getTime() : void 0;
|
|
12523
|
+
const iterationsPerSecond = traceDurationInMs && traceDurationInMs > 0 ? state.iterationsCount / (traceDurationInMs / 1e3) : void 0;
|
|
12524
|
+
const dynamicAttributes = this.getDynamicAttributes?.() ?? {};
|
|
12525
|
+
state.currentSpan = this.tracer.startSpan(
|
|
12526
|
+
`${this.name}.consumerLoop`,
|
|
12527
|
+
{
|
|
12528
|
+
kind: 1,
|
|
12529
|
+
// SpanKind.CONSUMER
|
|
12530
|
+
attributes: {
|
|
12531
|
+
loop_id: loopId,
|
|
12532
|
+
max_iterations: this.maxIterations,
|
|
12533
|
+
timeout_seconds: this.timeoutSeconds,
|
|
12534
|
+
previous_iterations: state.iterationsCount,
|
|
12535
|
+
previous_duration_ms: traceDurationInMs,
|
|
12536
|
+
previous_iterations_per_second: iterationsPerSecond,
|
|
12537
|
+
total_iterations: state.totalIterationsCount,
|
|
12538
|
+
...dynamicAttributes,
|
|
12539
|
+
...attributes
|
|
12540
|
+
}
|
|
12541
|
+
},
|
|
12542
|
+
ROOT_CONTEXT
|
|
12543
|
+
);
|
|
12544
|
+
state.currentSpanContext = trace.setSpan(ROOT_CONTEXT, state.currentSpan);
|
|
12545
|
+
state.perTraceCountdown = this.maxIterations;
|
|
12546
|
+
state.traceStartedAt = /* @__PURE__ */ new Date();
|
|
12547
|
+
state.iterationsCount = 0;
|
|
12548
|
+
state.runningDurationInMs = 0;
|
|
12549
|
+
state.stats = {};
|
|
12550
|
+
state.endSpanInNextIteration = false;
|
|
12551
|
+
}
|
|
12552
|
+
/**
|
|
12553
|
+
* Execute a function within the batched span context.
|
|
12554
|
+
* Automatically handles span rotation and iteration tracking.
|
|
12555
|
+
*/
|
|
12556
|
+
async withBatchedSpan(loopId, fn, options) {
|
|
12557
|
+
let state = this.loopStates.get(loopId);
|
|
12558
|
+
if (!state) {
|
|
12559
|
+
this.initializeLoop(loopId);
|
|
12560
|
+
state = this.loopStates.get(loopId);
|
|
12561
|
+
}
|
|
12562
|
+
if (this.shouldRotate(loopId)) {
|
|
12563
|
+
this.startNewSpan(loopId);
|
|
12564
|
+
}
|
|
12565
|
+
const startTime = performance.now();
|
|
12566
|
+
try {
|
|
12567
|
+
if (!this.tracer || !state.currentSpanContext) {
|
|
12568
|
+
return await fn(noopSpan);
|
|
12569
|
+
}
|
|
12570
|
+
return await context.with(state.currentSpanContext, async () => {
|
|
12571
|
+
const iterationSpanName = options?.iterationSpanName ?? "iteration";
|
|
12572
|
+
return await this.tracer.startActiveSpan(
|
|
12573
|
+
`${this.name}.${iterationSpanName}`,
|
|
12574
|
+
{
|
|
12575
|
+
attributes: {
|
|
12576
|
+
loop_id: loopId,
|
|
12577
|
+
iteration: state.iterationsCount,
|
|
12578
|
+
...options?.attributes
|
|
12579
|
+
}
|
|
12580
|
+
},
|
|
12581
|
+
async (iterationSpan) => {
|
|
12582
|
+
try {
|
|
12583
|
+
return await fn(iterationSpan);
|
|
12584
|
+
} catch (error) {
|
|
12585
|
+
if (error instanceof Error) {
|
|
12586
|
+
iterationSpan.recordException(error);
|
|
12587
|
+
state.currentSpan?.recordException(error);
|
|
12588
|
+
}
|
|
12589
|
+
iterationSpan.setStatus({ code: SpanStatusCode.ERROR });
|
|
12590
|
+
state.endSpanInNextIteration = true;
|
|
12591
|
+
throw error;
|
|
12592
|
+
} finally {
|
|
12593
|
+
iterationSpan.end();
|
|
12594
|
+
}
|
|
12595
|
+
}
|
|
12596
|
+
);
|
|
12597
|
+
});
|
|
12598
|
+
} finally {
|
|
12599
|
+
const duration = performance.now() - startTime;
|
|
12600
|
+
state.runningDurationInMs += duration;
|
|
12601
|
+
state.iterationsCount++;
|
|
12602
|
+
state.totalIterationsCount++;
|
|
12603
|
+
state.perTraceCountdown--;
|
|
12604
|
+
}
|
|
12605
|
+
}
|
|
12606
|
+
/**
|
|
12607
|
+
* Clean up state for a loop when it's stopped.
|
|
12608
|
+
*/
|
|
12609
|
+
cleanup(loopId) {
|
|
12610
|
+
this.endCurrentSpan(loopId);
|
|
12611
|
+
this.loopStates.delete(loopId);
|
|
12612
|
+
}
|
|
12613
|
+
/**
|
|
12614
|
+
* Clean up all loop states.
|
|
12615
|
+
*/
|
|
12616
|
+
cleanupAll() {
|
|
12617
|
+
for (const loopId of this.loopStates.keys()) {
|
|
12618
|
+
this.cleanup(loopId);
|
|
12619
|
+
}
|
|
12620
|
+
}
|
|
12621
|
+
};
|
|
12622
|
+
var noopSpan = {
|
|
12623
|
+
spanContext: () => ({
|
|
12624
|
+
traceId: "",
|
|
12625
|
+
spanId: "",
|
|
12626
|
+
traceFlags: 0
|
|
12627
|
+
}),
|
|
12628
|
+
setAttribute: () => noopSpan,
|
|
12629
|
+
setAttributes: () => noopSpan,
|
|
12630
|
+
addEvent: () => noopSpan,
|
|
12631
|
+
addLink: () => noopSpan,
|
|
12632
|
+
addLinks: () => noopSpan,
|
|
12633
|
+
setStatus: () => noopSpan,
|
|
12634
|
+
updateName: () => noopSpan,
|
|
12635
|
+
end: () => {
|
|
12636
|
+
},
|
|
12637
|
+
isRecording: () => false,
|
|
12638
|
+
recordException: () => {
|
|
12639
|
+
}
|
|
12640
|
+
};
|
|
12641
|
+
var noopTelemetry = new FairQueueTelemetry({});
|
|
12642
|
+
var VisibilityManager = class {
|
|
12643
|
+
constructor(options) {
|
|
12644
|
+
this.options = options;
|
|
12645
|
+
this.redis = createRedisClient(options.redis);
|
|
12646
|
+
this.keys = options.keys;
|
|
12647
|
+
this.shardCount = options.shardCount;
|
|
12648
|
+
this.defaultTimeoutMs = options.defaultTimeoutMs;
|
|
12649
|
+
this.logger = options.logger ?? {
|
|
12650
|
+
debug: () => {
|
|
12651
|
+
},
|
|
12652
|
+
error: () => {
|
|
12653
|
+
}
|
|
12654
|
+
};
|
|
12655
|
+
this.#registerCommands();
|
|
12656
|
+
}
|
|
12657
|
+
redis;
|
|
12658
|
+
keys;
|
|
12659
|
+
shardCount;
|
|
12660
|
+
defaultTimeoutMs;
|
|
12661
|
+
logger;
|
|
12662
|
+
// ============================================================================
|
|
12663
|
+
// Public Methods
|
|
12664
|
+
// ============================================================================
|
|
12665
|
+
/**
|
|
12666
|
+
* Claim a message for processing.
|
|
12667
|
+
* Moves the message from its queue to the in-flight set with a visibility timeout.
|
|
12668
|
+
*
|
|
12669
|
+
* @param queueId - The queue to claim from
|
|
12670
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
12671
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12672
|
+
* @param consumerId - ID of the consumer claiming the message
|
|
12673
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
12674
|
+
* @returns Claim result with the message if successful
|
|
12675
|
+
*/
|
|
12676
|
+
async claim(queueId, queueKey, queueItemsKey, consumerId, timeoutMs) {
|
|
12677
|
+
const timeout = timeoutMs ?? this.defaultTimeoutMs;
|
|
12678
|
+
const deadline = Date.now() + timeout;
|
|
12679
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12680
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12681
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12682
|
+
const result = await this.redis.claimMessage(
|
|
12683
|
+
queueKey,
|
|
12684
|
+
queueItemsKey,
|
|
12685
|
+
inflightKey,
|
|
12686
|
+
inflightDataKey,
|
|
12687
|
+
queueId,
|
|
12688
|
+
consumerId,
|
|
12689
|
+
deadline.toString()
|
|
12690
|
+
);
|
|
12691
|
+
if (!result) {
|
|
12692
|
+
return { claimed: false };
|
|
12693
|
+
}
|
|
12694
|
+
const [messageId, payloadJson] = result;
|
|
12695
|
+
try {
|
|
12696
|
+
const payload = JSON.parse(payloadJson);
|
|
12697
|
+
const message = {
|
|
12698
|
+
messageId,
|
|
12699
|
+
queueId,
|
|
12700
|
+
payload,
|
|
12701
|
+
deadline,
|
|
12702
|
+
consumerId
|
|
12703
|
+
};
|
|
12704
|
+
this.logger.debug("Message claimed", {
|
|
12705
|
+
messageId,
|
|
12706
|
+
queueId,
|
|
12707
|
+
consumerId,
|
|
12708
|
+
deadline
|
|
12709
|
+
});
|
|
12710
|
+
return { claimed: true, message };
|
|
12711
|
+
} catch (error) {
|
|
12712
|
+
this.logger.error("Failed to parse claimed message", {
|
|
12713
|
+
messageId,
|
|
12714
|
+
queueId,
|
|
12715
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12716
|
+
});
|
|
12717
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12718
|
+
return { claimed: false };
|
|
12719
|
+
}
|
|
12720
|
+
}
|
|
12721
|
+
/**
|
|
12722
|
+
* Claim multiple messages for processing (batch claim).
|
|
12723
|
+
* Moves up to maxCount messages from the queue to the in-flight set.
|
|
12724
|
+
*
|
|
12725
|
+
* @param queueId - The queue to claim from
|
|
12726
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
12727
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12728
|
+
* @param consumerId - ID of the consumer claiming the messages
|
|
12729
|
+
* @param maxCount - Maximum number of messages to claim
|
|
12730
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
12731
|
+
* @returns Array of claimed messages
|
|
12732
|
+
*/
|
|
12733
|
+
async claimBatch(queueId, queueKey, queueItemsKey, consumerId, maxCount, timeoutMs) {
|
|
12734
|
+
const timeout = timeoutMs ?? this.defaultTimeoutMs;
|
|
12735
|
+
const deadline = Date.now() + timeout;
|
|
12736
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12737
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12738
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12739
|
+
const result = await this.redis.claimMessageBatch(
|
|
12740
|
+
queueKey,
|
|
12741
|
+
queueItemsKey,
|
|
12742
|
+
inflightKey,
|
|
12743
|
+
inflightDataKey,
|
|
12744
|
+
queueId,
|
|
12745
|
+
deadline.toString(),
|
|
12746
|
+
maxCount.toString()
|
|
12747
|
+
);
|
|
12748
|
+
if (!result || result.length === 0) {
|
|
12749
|
+
return [];
|
|
12750
|
+
}
|
|
12751
|
+
const messages = [];
|
|
12752
|
+
for (let i = 0; i < result.length; i += 2) {
|
|
12753
|
+
const messageId = result[i];
|
|
12754
|
+
const payloadJson = result[i + 1];
|
|
12755
|
+
if (!messageId || !payloadJson) {
|
|
12756
|
+
continue;
|
|
12757
|
+
}
|
|
12758
|
+
try {
|
|
12759
|
+
const payload = JSON.parse(payloadJson);
|
|
12760
|
+
messages.push({
|
|
12761
|
+
messageId,
|
|
12762
|
+
queueId,
|
|
12763
|
+
payload,
|
|
12764
|
+
deadline,
|
|
12765
|
+
consumerId
|
|
12766
|
+
});
|
|
12767
|
+
} catch (error) {
|
|
12768
|
+
this.logger.error("Failed to parse claimed message in batch", {
|
|
12769
|
+
messageId,
|
|
12770
|
+
queueId,
|
|
12771
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12772
|
+
});
|
|
12773
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12774
|
+
}
|
|
12775
|
+
}
|
|
12776
|
+
if (messages.length > 0) {
|
|
12777
|
+
this.logger.debug("Batch claimed messages", {
|
|
12778
|
+
queueId,
|
|
12779
|
+
consumerId,
|
|
12780
|
+
count: messages.length,
|
|
12781
|
+
deadline
|
|
12782
|
+
});
|
|
12783
|
+
}
|
|
12784
|
+
return messages;
|
|
12785
|
+
}
|
|
12786
|
+
/**
|
|
12787
|
+
* Extend the visibility timeout for a message (heartbeat).
|
|
12788
|
+
*
|
|
12789
|
+
* @param messageId - The message ID
|
|
12790
|
+
* @param queueId - The queue ID
|
|
12791
|
+
* @param extendMs - Additional milliseconds to add to the deadline
|
|
12792
|
+
* @returns true if the heartbeat was successful
|
|
12793
|
+
*/
|
|
12794
|
+
async heartbeat(messageId, queueId, extendMs) {
|
|
12795
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12796
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12797
|
+
const member = this.#makeMember(messageId, queueId);
|
|
12798
|
+
const newDeadline = Date.now() + extendMs;
|
|
12799
|
+
const result = await this.redis.heartbeatMessage(inflightKey, member, newDeadline.toString());
|
|
12800
|
+
const success = result === 1;
|
|
12801
|
+
if (success) {
|
|
12802
|
+
this.logger.debug("Heartbeat successful", {
|
|
12803
|
+
messageId,
|
|
12804
|
+
queueId,
|
|
12805
|
+
newDeadline
|
|
12806
|
+
});
|
|
12807
|
+
}
|
|
12808
|
+
return success;
|
|
12809
|
+
}
|
|
12810
|
+
/**
|
|
12811
|
+
* Mark a message as successfully processed.
|
|
12812
|
+
* Removes the message from in-flight tracking.
|
|
12813
|
+
*
|
|
12814
|
+
* @param messageId - The message ID
|
|
12815
|
+
* @param queueId - The queue ID
|
|
12816
|
+
*/
|
|
12817
|
+
async complete(messageId, queueId) {
|
|
12818
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12819
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12820
|
+
this.logger.debug("Message completed", {
|
|
12821
|
+
messageId,
|
|
12822
|
+
queueId
|
|
12823
|
+
});
|
|
12824
|
+
}
|
|
12825
|
+
/**
|
|
12826
|
+
* Release a message back to its queue.
|
|
12827
|
+
* Used when processing fails or consumer wants to retry later.
|
|
12828
|
+
*
|
|
12829
|
+
* @param messageId - The message ID
|
|
12830
|
+
* @param queueId - The queue ID
|
|
12831
|
+
* @param queueKey - The Redis key for the queue
|
|
12832
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12833
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
12834
|
+
* @param score - Optional score for the message (defaults to now)
|
|
12835
|
+
*/
|
|
12836
|
+
async release(messageId, queueId, queueKey, queueItemsKey, masterQueueKey, score) {
|
|
12837
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12838
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12839
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12840
|
+
const member = this.#makeMember(messageId, queueId);
|
|
12841
|
+
const messageScore = score ?? Date.now();
|
|
12842
|
+
await this.redis.releaseMessage(
|
|
12843
|
+
inflightKey,
|
|
12844
|
+
inflightDataKey,
|
|
12845
|
+
queueKey,
|
|
12846
|
+
queueItemsKey,
|
|
12847
|
+
masterQueueKey,
|
|
12848
|
+
member,
|
|
12849
|
+
messageId,
|
|
12850
|
+
messageScore.toString(),
|
|
12851
|
+
queueId
|
|
12852
|
+
);
|
|
12853
|
+
this.logger.debug("Message released", {
|
|
12854
|
+
messageId,
|
|
12855
|
+
queueId,
|
|
12856
|
+
score: messageScore
|
|
12857
|
+
});
|
|
12858
|
+
}
|
|
12859
|
+
/**
|
|
12860
|
+
* Release multiple messages back to their queue in a single operation.
|
|
12861
|
+
* Used when processing fails or consumer wants to retry later.
|
|
12862
|
+
* All messages must belong to the same queue.
|
|
12863
|
+
*
|
|
12864
|
+
* @param messages - Array of messages to release (must all have same queueId)
|
|
12865
|
+
* @param queueId - The queue ID
|
|
12866
|
+
* @param queueKey - The Redis key for the queue
|
|
12867
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12868
|
+
* @param masterQueueKey - The Redis key for the master queue
|
|
12869
|
+
* @param score - Optional score for the messages (defaults to now)
|
|
12870
|
+
*/
|
|
12871
|
+
async releaseBatch(messages, queueId, queueKey, queueItemsKey, masterQueueKey, score) {
|
|
12872
|
+
if (messages.length === 0) {
|
|
12873
|
+
return;
|
|
12874
|
+
}
|
|
12875
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12876
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12877
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12878
|
+
const messageScore = score ?? Date.now();
|
|
12879
|
+
const messageIds = messages.map((m) => m.messageId);
|
|
12880
|
+
const members = messages.map((m) => this.#makeMember(m.messageId, queueId));
|
|
12881
|
+
await this.redis.releaseMessageBatch(
|
|
12882
|
+
inflightKey,
|
|
12883
|
+
inflightDataKey,
|
|
12884
|
+
queueKey,
|
|
12885
|
+
queueItemsKey,
|
|
12886
|
+
masterQueueKey,
|
|
12887
|
+
messageScore.toString(),
|
|
12888
|
+
queueId,
|
|
12889
|
+
...members,
|
|
12890
|
+
...messageIds
|
|
12891
|
+
);
|
|
12892
|
+
this.logger.debug("Batch messages released", {
|
|
12893
|
+
queueId,
|
|
12894
|
+
count: messages.length,
|
|
12895
|
+
score: messageScore
|
|
12896
|
+
});
|
|
12897
|
+
}
|
|
12898
|
+
/**
|
|
12899
|
+
* Reclaim timed-out messages from a shard.
|
|
12900
|
+
* Returns messages to their original queues.
|
|
12901
|
+
*
|
|
12902
|
+
* @param shardId - The shard to check
|
|
12903
|
+
* @param getQueueKeys - Function to get queue keys for a queue ID
|
|
12904
|
+
* @returns Number of messages reclaimed
|
|
12905
|
+
*/
|
|
12906
|
+
async reclaimTimedOut(shardId, getQueueKeys) {
|
|
12907
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12908
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12909
|
+
const now = Date.now();
|
|
12910
|
+
const timedOut = await this.redis.zrangebyscore(
|
|
12911
|
+
inflightKey,
|
|
12912
|
+
"-inf",
|
|
12913
|
+
now,
|
|
12914
|
+
"WITHSCORES",
|
|
12915
|
+
"LIMIT",
|
|
12916
|
+
0,
|
|
12917
|
+
100
|
|
12918
|
+
// Process in batches
|
|
12919
|
+
);
|
|
12920
|
+
let reclaimed = 0;
|
|
12921
|
+
for (let i = 0; i < timedOut.length; i += 2) {
|
|
12922
|
+
const member = timedOut[i];
|
|
12923
|
+
const originalScore = timedOut[i + 1];
|
|
12924
|
+
if (!member || !originalScore) {
|
|
12925
|
+
continue;
|
|
12926
|
+
}
|
|
12927
|
+
const { messageId, queueId } = this.#parseMember(member);
|
|
12928
|
+
const { queueKey, queueItemsKey, masterQueueKey } = getQueueKeys(queueId);
|
|
12929
|
+
try {
|
|
12930
|
+
const score = parseFloat(originalScore) || now;
|
|
12931
|
+
await this.redis.releaseMessage(
|
|
12932
|
+
inflightKey,
|
|
12933
|
+
inflightDataKey,
|
|
12934
|
+
queueKey,
|
|
12935
|
+
queueItemsKey,
|
|
12936
|
+
masterQueueKey,
|
|
12937
|
+
member,
|
|
12938
|
+
messageId,
|
|
12939
|
+
score.toString(),
|
|
12940
|
+
queueId
|
|
12941
|
+
);
|
|
12942
|
+
reclaimed++;
|
|
12943
|
+
this.logger.debug("Reclaimed timed-out message", {
|
|
12944
|
+
messageId,
|
|
12945
|
+
queueId,
|
|
12946
|
+
originalScore
|
|
12947
|
+
});
|
|
12948
|
+
} catch (error) {
|
|
12949
|
+
this.logger.error("Failed to reclaim message", {
|
|
12950
|
+
messageId,
|
|
12951
|
+
queueId,
|
|
12952
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12953
|
+
});
|
|
12954
|
+
}
|
|
12955
|
+
}
|
|
12956
|
+
return reclaimed;
|
|
12957
|
+
}
|
|
12958
|
+
/**
|
|
12959
|
+
* Get all in-flight messages for a shard.
|
|
12960
|
+
*/
|
|
12961
|
+
async getInflightMessages(shardId) {
|
|
12962
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12963
|
+
const results = await this.redis.zrange(inflightKey, 0, -1, "WITHSCORES");
|
|
12964
|
+
const messages = [];
|
|
12965
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
12966
|
+
const member = results[i];
|
|
12967
|
+
const deadlineStr = results[i + 1];
|
|
12968
|
+
if (!member || !deadlineStr) {
|
|
12969
|
+
continue;
|
|
12970
|
+
}
|
|
12971
|
+
const deadline = parseFloat(deadlineStr);
|
|
12972
|
+
const { messageId, queueId } = this.#parseMember(member);
|
|
12973
|
+
messages.push({ messageId, queueId, deadline });
|
|
12974
|
+
}
|
|
12975
|
+
return messages;
|
|
12976
|
+
}
|
|
12977
|
+
/**
|
|
12978
|
+
* Get count of in-flight messages for a shard.
|
|
12979
|
+
*/
|
|
12980
|
+
async getInflightCount(shardId) {
|
|
12981
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12982
|
+
return await this.redis.zcard(inflightKey);
|
|
12983
|
+
}
|
|
12984
|
+
/**
|
|
12985
|
+
* Get total in-flight count across all shards.
|
|
12986
|
+
*/
|
|
12987
|
+
async getTotalInflightCount() {
|
|
12988
|
+
const counts = await Promise.all(
|
|
12989
|
+
Array.from({ length: this.shardCount }, (_, i) => this.getInflightCount(i))
|
|
12990
|
+
);
|
|
12991
|
+
return counts.reduce((sum, count) => sum + count, 0);
|
|
12992
|
+
}
|
|
12993
|
+
/**
|
|
12994
|
+
* Close the Redis connection.
|
|
12995
|
+
*/
|
|
12996
|
+
async close() {
|
|
12997
|
+
await this.redis.quit();
|
|
12998
|
+
}
|
|
12999
|
+
// ============================================================================
|
|
13000
|
+
// Private Methods
|
|
13001
|
+
// ============================================================================
|
|
13002
|
+
/**
|
|
13003
|
+
* Map queue ID to shard using Jump Consistent Hash.
|
|
13004
|
+
* Must use same algorithm as MasterQueue for consistency.
|
|
13005
|
+
*/
|
|
13006
|
+
#getShardForQueue(queueId) {
|
|
13007
|
+
return serverOnly.jumpHash(queueId, this.shardCount);
|
|
13008
|
+
}
|
|
13009
|
+
#makeMember(messageId, queueId) {
|
|
13010
|
+
return `${messageId}:${queueId}`;
|
|
13011
|
+
}
|
|
13012
|
+
#parseMember(member) {
|
|
13013
|
+
const colonIndex = member.indexOf(":");
|
|
13014
|
+
if (colonIndex === -1) {
|
|
13015
|
+
return { messageId: member, queueId: "" };
|
|
13016
|
+
}
|
|
13017
|
+
return {
|
|
13018
|
+
messageId: member.substring(0, colonIndex),
|
|
13019
|
+
queueId: member.substring(colonIndex + 1)
|
|
13020
|
+
};
|
|
13021
|
+
}
|
|
13022
|
+
async #removeFromInflight(shardId, messageId, queueId) {
|
|
13023
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
13024
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
13025
|
+
const member = this.#makeMember(messageId, queueId);
|
|
13026
|
+
const pipeline = this.redis.pipeline();
|
|
13027
|
+
pipeline.zrem(inflightKey, member);
|
|
13028
|
+
pipeline.hdel(inflightDataKey, messageId);
|
|
13029
|
+
await pipeline.exec();
|
|
13030
|
+
}
|
|
13031
|
+
#registerCommands() {
|
|
13032
|
+
this.redis.defineCommand("claimMessage", {
|
|
13033
|
+
numberOfKeys: 4,
|
|
13034
|
+
lua: `
|
|
13035
|
+
local queueKey = KEYS[1]
|
|
13036
|
+
local queueItemsKey = KEYS[2]
|
|
13037
|
+
local inflightKey = KEYS[3]
|
|
13038
|
+
local inflightDataKey = KEYS[4]
|
|
13039
|
+
|
|
13040
|
+
local queueId = ARGV[1]
|
|
13041
|
+
local consumerId = ARGV[2]
|
|
13042
|
+
local deadline = tonumber(ARGV[3])
|
|
13043
|
+
|
|
13044
|
+
-- Get oldest message from queue
|
|
13045
|
+
local items = redis.call('ZRANGE', queueKey, 0, 0)
|
|
13046
|
+
if #items == 0 then
|
|
13047
|
+
return nil
|
|
13048
|
+
end
|
|
13049
|
+
|
|
13050
|
+
local messageId = items[1]
|
|
13051
|
+
|
|
13052
|
+
-- Get message data
|
|
13053
|
+
local payload = redis.call('HGET', queueItemsKey, messageId)
|
|
13054
|
+
if not payload then
|
|
13055
|
+
-- Message data missing, remove from queue and return nil
|
|
13056
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13057
|
+
return nil
|
|
13058
|
+
end
|
|
13059
|
+
|
|
13060
|
+
-- Remove from queue
|
|
13061
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13062
|
+
redis.call('HDEL', queueItemsKey, messageId)
|
|
13063
|
+
|
|
13064
|
+
-- Add to in-flight set with deadline
|
|
13065
|
+
local member = messageId .. ':' .. queueId
|
|
13066
|
+
redis.call('ZADD', inflightKey, deadline, member)
|
|
13067
|
+
|
|
13068
|
+
-- Store message data for potential release
|
|
13069
|
+
redis.call('HSET', inflightDataKey, messageId, payload)
|
|
13070
|
+
|
|
13071
|
+
return {messageId, payload}
|
|
13072
|
+
`
|
|
13073
|
+
});
|
|
13074
|
+
this.redis.defineCommand("claimMessageBatch", {
|
|
13075
|
+
numberOfKeys: 4,
|
|
13076
|
+
lua: `
|
|
13077
|
+
local queueKey = KEYS[1]
|
|
13078
|
+
local queueItemsKey = KEYS[2]
|
|
13079
|
+
local inflightKey = KEYS[3]
|
|
13080
|
+
local inflightDataKey = KEYS[4]
|
|
13081
|
+
|
|
13082
|
+
local queueId = ARGV[1]
|
|
13083
|
+
local deadline = tonumber(ARGV[2])
|
|
13084
|
+
local maxCount = tonumber(ARGV[3])
|
|
13085
|
+
|
|
13086
|
+
-- Get up to maxCount oldest messages from queue
|
|
13087
|
+
local items = redis.call('ZRANGE', queueKey, 0, maxCount - 1)
|
|
13088
|
+
if #items == 0 then
|
|
13089
|
+
return {}
|
|
13090
|
+
end
|
|
13091
|
+
|
|
13092
|
+
local results = {}
|
|
13093
|
+
|
|
13094
|
+
for i, messageId in ipairs(items) do
|
|
13095
|
+
-- Get message data
|
|
13096
|
+
local payload = redis.call('HGET', queueItemsKey, messageId)
|
|
13097
|
+
|
|
13098
|
+
if payload then
|
|
13099
|
+
-- Remove from queue
|
|
13100
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13101
|
+
redis.call('HDEL', queueItemsKey, messageId)
|
|
13102
|
+
|
|
13103
|
+
-- Add to in-flight set with deadline
|
|
13104
|
+
local member = messageId .. ':' .. queueId
|
|
13105
|
+
redis.call('ZADD', inflightKey, deadline, member)
|
|
13106
|
+
|
|
13107
|
+
-- Store message data for potential release
|
|
13108
|
+
redis.call('HSET', inflightDataKey, messageId, payload)
|
|
13109
|
+
|
|
13110
|
+
-- Add to results
|
|
13111
|
+
table.insert(results, messageId)
|
|
13112
|
+
table.insert(results, payload)
|
|
13113
|
+
else
|
|
13114
|
+
-- Message data missing, remove from queue
|
|
13115
|
+
redis.call('ZREM', queueKey, messageId)
|
|
13116
|
+
end
|
|
13117
|
+
end
|
|
13118
|
+
|
|
13119
|
+
return results
|
|
13120
|
+
`
|
|
13121
|
+
});
|
|
13122
|
+
this.redis.defineCommand("releaseMessage", {
|
|
13123
|
+
numberOfKeys: 5,
|
|
13124
|
+
lua: `
|
|
13125
|
+
local inflightKey = KEYS[1]
|
|
13126
|
+
local inflightDataKey = KEYS[2]
|
|
13127
|
+
local queueKey = KEYS[3]
|
|
13128
|
+
local queueItemsKey = KEYS[4]
|
|
13129
|
+
local masterQueueKey = KEYS[5]
|
|
13130
|
+
|
|
13131
|
+
local member = ARGV[1]
|
|
13132
|
+
local messageId = ARGV[2]
|
|
13133
|
+
local score = tonumber(ARGV[3])
|
|
13134
|
+
local queueId = ARGV[4]
|
|
13135
|
+
|
|
13136
|
+
-- Get message data from in-flight
|
|
13137
|
+
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
13138
|
+
if not payload then
|
|
13139
|
+
-- Message not in in-flight or already released
|
|
13140
|
+
return 0
|
|
13141
|
+
end
|
|
13142
|
+
|
|
13143
|
+
-- Remove from in-flight
|
|
13144
|
+
redis.call('ZREM', inflightKey, member)
|
|
13145
|
+
redis.call('HDEL', inflightDataKey, messageId)
|
|
13146
|
+
|
|
13147
|
+
-- Add back to queue
|
|
13148
|
+
redis.call('ZADD', queueKey, score, messageId)
|
|
13149
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
13150
|
+
|
|
13151
|
+
-- Update master queue with oldest message timestamp
|
|
13152
|
+
-- This ensures delayed messages don't push the queue priority to the future
|
|
13153
|
+
-- when there are other ready messages in the queue
|
|
13154
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
13155
|
+
if #oldest >= 2 then
|
|
13156
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
13157
|
+
end
|
|
13158
|
+
|
|
13159
|
+
return 1
|
|
13160
|
+
`
|
|
13161
|
+
});
|
|
13162
|
+
this.redis.defineCommand("releaseMessageBatch", {
|
|
13163
|
+
numberOfKeys: 5,
|
|
13164
|
+
lua: `
|
|
13165
|
+
local inflightKey = KEYS[1]
|
|
13166
|
+
local inflightDataKey = KEYS[2]
|
|
13167
|
+
local queueKey = KEYS[3]
|
|
13168
|
+
local queueItemsKey = KEYS[4]
|
|
13169
|
+
local masterQueueKey = KEYS[5]
|
|
13170
|
+
|
|
13171
|
+
local score = tonumber(ARGV[1])
|
|
13172
|
+
local queueId = ARGV[2]
|
|
13173
|
+
|
|
13174
|
+
-- Remaining args are: members..., messageIds...
|
|
13175
|
+
-- Calculate how many messages we have
|
|
13176
|
+
local numMessages = (table.getn(ARGV) - 2) / 2
|
|
13177
|
+
local membersStart = 3
|
|
13178
|
+
local messageIdsStart = membersStart + numMessages
|
|
13179
|
+
|
|
13180
|
+
local releasedCount = 0
|
|
13181
|
+
|
|
13182
|
+
for i = 0, numMessages - 1 do
|
|
13183
|
+
local member = ARGV[membersStart + i]
|
|
13184
|
+
local messageId = ARGV[messageIdsStart + i]
|
|
13185
|
+
|
|
13186
|
+
-- Get message data from in-flight
|
|
13187
|
+
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
13188
|
+
if payload then
|
|
13189
|
+
-- Remove from in-flight
|
|
13190
|
+
redis.call('ZREM', inflightKey, member)
|
|
13191
|
+
redis.call('HDEL', inflightDataKey, messageId)
|
|
13192
|
+
|
|
13193
|
+
-- Add back to queue
|
|
13194
|
+
redis.call('ZADD', queueKey, score, messageId)
|
|
13195
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
13196
|
+
|
|
13197
|
+
releasedCount = releasedCount + 1
|
|
13198
|
+
end
|
|
13199
|
+
end
|
|
13200
|
+
|
|
13201
|
+
-- Update master queue with oldest message timestamp (only once at the end)
|
|
13202
|
+
if releasedCount > 0 then
|
|
13203
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
13204
|
+
if #oldest >= 2 then
|
|
13205
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
13206
|
+
end
|
|
13207
|
+
end
|
|
13208
|
+
|
|
13209
|
+
return releasedCount
|
|
13210
|
+
`
|
|
13211
|
+
});
|
|
13212
|
+
this.redis.defineCommand("heartbeatMessage", {
|
|
13213
|
+
numberOfKeys: 1,
|
|
13214
|
+
lua: `
|
|
13215
|
+
local inflightKey = KEYS[1]
|
|
13216
|
+
local member = ARGV[1]
|
|
13217
|
+
local newDeadline = tonumber(ARGV[2])
|
|
13218
|
+
|
|
13219
|
+
-- Check if member exists in the in-flight set
|
|
13220
|
+
local score = redis.call('ZSCORE', inflightKey, member)
|
|
13221
|
+
if not score then
|
|
13222
|
+
return 0
|
|
13223
|
+
end
|
|
13224
|
+
|
|
13225
|
+
-- Update the deadline
|
|
13226
|
+
redis.call('ZADD', inflightKey, 'XX', newDeadline, member)
|
|
13227
|
+
return 1
|
|
13228
|
+
`
|
|
13229
|
+
});
|
|
13230
|
+
}
|
|
13231
|
+
};
|
|
13232
|
+
|
|
13233
|
+
// src/fair-queue/workerQueue.ts
|
|
13234
|
+
var WorkerQueueManager = class {
|
|
13235
|
+
constructor(options) {
|
|
13236
|
+
this.options = options;
|
|
13237
|
+
this.redis = createRedisClient(options.redis);
|
|
13238
|
+
this.keys = options.keys;
|
|
13239
|
+
this.logger = options.logger ?? {
|
|
13240
|
+
debug: () => {
|
|
13241
|
+
},
|
|
13242
|
+
error: () => {
|
|
13243
|
+
}
|
|
13244
|
+
};
|
|
13245
|
+
this.#registerCommands();
|
|
13246
|
+
}
|
|
13247
|
+
redis;
|
|
13248
|
+
keys;
|
|
13249
|
+
logger;
|
|
13250
|
+
// ============================================================================
|
|
13251
|
+
// Public Methods
|
|
13252
|
+
// ============================================================================
|
|
13253
|
+
/**
|
|
13254
|
+
* Push a message key to a worker queue.
|
|
13255
|
+
* Called after claiming a message from the message queue.
|
|
13256
|
+
*
|
|
13257
|
+
* @param workerQueueId - The worker queue identifier
|
|
13258
|
+
* @param messageKey - The message key to push (typically "messageId:queueId")
|
|
13259
|
+
*/
|
|
13260
|
+
async push(workerQueueId, messageKey) {
|
|
13261
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13262
|
+
await this.redis.rpush(workerQueueKey, messageKey);
|
|
13263
|
+
this.logger.debug("Pushed to worker queue", {
|
|
13264
|
+
workerQueueId,
|
|
13265
|
+
workerQueueKey,
|
|
13266
|
+
messageKey
|
|
13267
|
+
});
|
|
13268
|
+
}
|
|
13269
|
+
/**
|
|
13270
|
+
* Push multiple message keys to a worker queue.
|
|
13271
|
+
*
|
|
13272
|
+
* @param workerQueueId - The worker queue identifier
|
|
13273
|
+
* @param messageKeys - The message keys to push
|
|
13274
|
+
*/
|
|
13275
|
+
async pushBatch(workerQueueId, messageKeys) {
|
|
13276
|
+
if (messageKeys.length === 0) {
|
|
13277
|
+
return;
|
|
13278
|
+
}
|
|
13279
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13280
|
+
await this.redis.rpush(workerQueueKey, ...messageKeys);
|
|
13281
|
+
this.logger.debug("Pushed batch to worker queue", {
|
|
13282
|
+
workerQueueId,
|
|
13283
|
+
workerQueueKey,
|
|
13284
|
+
count: messageKeys.length
|
|
13285
|
+
});
|
|
13286
|
+
}
|
|
13287
|
+
/**
|
|
13288
|
+
* Blocking pop from a worker queue.
|
|
13289
|
+
* Waits until a message is available or timeout expires.
|
|
13290
|
+
*
|
|
13291
|
+
* @param workerQueueId - The worker queue identifier
|
|
13292
|
+
* @param timeoutSeconds - Maximum time to wait (0 = wait forever)
|
|
13293
|
+
* @param signal - Optional abort signal to cancel waiting
|
|
13294
|
+
* @returns The message key, or null if timeout
|
|
13295
|
+
*/
|
|
13296
|
+
async blockingPop(workerQueueId, timeoutSeconds, signal) {
|
|
13297
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13298
|
+
const blockingClient = this.redis.duplicate();
|
|
13299
|
+
const cleanup = signal ? () => {
|
|
13300
|
+
blockingClient.disconnect();
|
|
13301
|
+
} : null;
|
|
13302
|
+
try {
|
|
13303
|
+
if (signal && cleanup) {
|
|
13304
|
+
signal.addEventListener("abort", cleanup, { once: true });
|
|
13305
|
+
if (signal.aborted) {
|
|
13306
|
+
return null;
|
|
13307
|
+
}
|
|
13308
|
+
}
|
|
13309
|
+
const result = await blockingClient.blpop(workerQueueKey, timeoutSeconds);
|
|
13310
|
+
if (!result) {
|
|
13311
|
+
return null;
|
|
13312
|
+
}
|
|
13313
|
+
const [, messageKey] = result;
|
|
13314
|
+
this.logger.debug("Blocking pop received message", {
|
|
13315
|
+
workerQueueId,
|
|
13316
|
+
workerQueueKey,
|
|
13317
|
+
messageKey
|
|
13318
|
+
});
|
|
13319
|
+
return messageKey;
|
|
13320
|
+
} catch (error) {
|
|
13321
|
+
if (signal?.aborted) {
|
|
13322
|
+
return null;
|
|
13323
|
+
}
|
|
13324
|
+
this.logger.error("Blocking pop error", {
|
|
13325
|
+
workerQueueId,
|
|
13326
|
+
error: error instanceof Error ? error.message : String(error)
|
|
13327
|
+
});
|
|
13328
|
+
throw error;
|
|
13329
|
+
} finally {
|
|
13330
|
+
if (cleanup && signal) {
|
|
13331
|
+
signal.removeEventListener("abort", cleanup);
|
|
13332
|
+
}
|
|
13333
|
+
await blockingClient.quit().catch(() => {
|
|
13334
|
+
});
|
|
13335
|
+
}
|
|
13336
|
+
}
|
|
13337
|
+
/**
|
|
13338
|
+
* Non-blocking pop from a worker queue.
|
|
13339
|
+
*
|
|
13340
|
+
* @param workerQueueId - The worker queue identifier
|
|
13341
|
+
* @returns The message key and queue length, or null if empty
|
|
13342
|
+
*/
|
|
13343
|
+
async pop(workerQueueId) {
|
|
13344
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13345
|
+
const result = await this.redis.popWithLength(workerQueueKey);
|
|
13346
|
+
if (!result) {
|
|
13347
|
+
return null;
|
|
13348
|
+
}
|
|
13349
|
+
const [messageKey, queueLength] = result;
|
|
13350
|
+
this.logger.debug("Non-blocking pop received message", {
|
|
13351
|
+
workerQueueId,
|
|
13352
|
+
workerQueueKey,
|
|
13353
|
+
messageKey,
|
|
13354
|
+
queueLength
|
|
13355
|
+
});
|
|
13356
|
+
return { messageKey, queueLength: Number(queueLength) };
|
|
13357
|
+
}
|
|
13358
|
+
/**
|
|
13359
|
+
* Get the current length of a worker queue.
|
|
13360
|
+
*/
|
|
13361
|
+
async getLength(workerQueueId) {
|
|
13362
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13363
|
+
return await this.redis.llen(workerQueueKey);
|
|
13364
|
+
}
|
|
13365
|
+
/**
|
|
13366
|
+
* Peek at all messages in a worker queue without removing them.
|
|
13367
|
+
* Useful for debugging and tests.
|
|
13368
|
+
*/
|
|
13369
|
+
async peek(workerQueueId) {
|
|
13370
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13371
|
+
return await this.redis.lrange(workerQueueKey, 0, -1);
|
|
13372
|
+
}
|
|
13373
|
+
/**
|
|
13374
|
+
* Remove a specific message from the worker queue.
|
|
13375
|
+
* Used when a message needs to be removed without processing.
|
|
13376
|
+
*
|
|
13377
|
+
* @param workerQueueId - The worker queue identifier
|
|
13378
|
+
* @param messageKey - The message key to remove
|
|
13379
|
+
* @returns Number of removed items
|
|
13380
|
+
*/
|
|
13381
|
+
async remove(workerQueueId, messageKey) {
|
|
13382
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13383
|
+
return await this.redis.lrem(workerQueueKey, 0, messageKey);
|
|
13384
|
+
}
|
|
13385
|
+
/**
|
|
13386
|
+
* Clear all messages from a worker queue.
|
|
13387
|
+
*/
|
|
13388
|
+
async clear(workerQueueId) {
|
|
13389
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
13390
|
+
await this.redis.del(workerQueueKey);
|
|
13391
|
+
}
|
|
13392
|
+
/**
|
|
13393
|
+
* Close the Redis connection.
|
|
13394
|
+
*/
|
|
13395
|
+
async close() {
|
|
13396
|
+
await this.redis.quit();
|
|
13397
|
+
}
|
|
13398
|
+
// ============================================================================
|
|
13399
|
+
// Private - Register Commands
|
|
13400
|
+
// ============================================================================
|
|
13401
|
+
/**
|
|
13402
|
+
* Initialize custom Redis commands.
|
|
13403
|
+
*/
|
|
13404
|
+
#registerCommands() {
|
|
13405
|
+
this.redis.defineCommand("popWithLength", {
|
|
13406
|
+
numberOfKeys: 1,
|
|
13407
|
+
lua: `
|
|
13408
|
+
local workerQueueKey = KEYS[1]
|
|
13409
|
+
|
|
13410
|
+
-- Pop the first message
|
|
13411
|
+
local messageKey = redis.call('LPOP', workerQueueKey)
|
|
13412
|
+
if not messageKey then
|
|
13413
|
+
return nil
|
|
13414
|
+
end
|
|
13415
|
+
|
|
13416
|
+
-- Get remaining queue length
|
|
13417
|
+
local queueLength = redis.call('LLEN', workerQueueKey)
|
|
13418
|
+
|
|
13419
|
+
return {messageKey, queueLength}
|
|
13420
|
+
`
|
|
13421
|
+
});
|
|
13422
|
+
}
|
|
13423
|
+
/**
|
|
13424
|
+
* Register custom commands on an external Redis client.
|
|
13425
|
+
* Use this when initializing FairQueue with worker queues.
|
|
13426
|
+
*/
|
|
13427
|
+
registerCommands(redis) {
|
|
13428
|
+
redis.defineCommand("popWithLength", {
|
|
13429
|
+
numberOfKeys: 1,
|
|
13430
|
+
lua: `
|
|
13431
|
+
local workerQueueKey = KEYS[1]
|
|
13432
|
+
|
|
13433
|
+
-- Pop the first message
|
|
13434
|
+
local messageKey = redis.call('LPOP', workerQueueKey)
|
|
13435
|
+
if not messageKey then
|
|
13436
|
+
return nil
|
|
13437
|
+
end
|
|
13438
|
+
|
|
13439
|
+
-- Get remaining queue length
|
|
13440
|
+
local queueLength = redis.call('LLEN', workerQueueKey)
|
|
13441
|
+
|
|
13442
|
+
return {messageKey, queueLength}
|
|
13443
|
+
`
|
|
13444
|
+
});
|
|
13445
|
+
}
|
|
13446
|
+
};
|
|
13447
|
+
|
|
13448
|
+
// src/fair-queue/keyProducer.ts
|
|
13449
|
+
var DefaultFairQueueKeyProducer = class {
|
|
13450
|
+
prefix;
|
|
13451
|
+
separator;
|
|
13452
|
+
constructor(options = {}) {
|
|
13453
|
+
this.prefix = options.prefix ?? "fq";
|
|
13454
|
+
this.separator = options.separator ?? ":";
|
|
13455
|
+
}
|
|
13456
|
+
// ============================================================================
|
|
13457
|
+
// Master Queue Keys
|
|
13458
|
+
// ============================================================================
|
|
13459
|
+
masterQueueKey(shardId) {
|
|
13460
|
+
return this.#buildKey("master", shardId.toString());
|
|
13461
|
+
}
|
|
13462
|
+
// ============================================================================
|
|
13463
|
+
// Queue Keys
|
|
13464
|
+
// ============================================================================
|
|
13465
|
+
queueKey(queueId) {
|
|
13466
|
+
return this.#buildKey("queue", queueId);
|
|
13467
|
+
}
|
|
13468
|
+
queueItemsKey(queueId) {
|
|
13469
|
+
return this.#buildKey("queue", queueId, "items");
|
|
13470
|
+
}
|
|
13471
|
+
// ============================================================================
|
|
13472
|
+
// Concurrency Keys
|
|
13473
|
+
// ============================================================================
|
|
13474
|
+
concurrencyKey(groupName, groupId) {
|
|
13475
|
+
return this.#buildKey("concurrency", groupName, groupId);
|
|
13476
|
+
}
|
|
13477
|
+
// ============================================================================
|
|
13478
|
+
// In-Flight Keys
|
|
13479
|
+
// ============================================================================
|
|
13480
|
+
inflightKey(shardId) {
|
|
13481
|
+
return this.#buildKey("inflight", shardId.toString());
|
|
13482
|
+
}
|
|
13483
|
+
inflightDataKey(shardId) {
|
|
13484
|
+
return this.#buildKey("inflight", shardId.toString(), "data");
|
|
13485
|
+
}
|
|
13486
|
+
// ============================================================================
|
|
13487
|
+
// Worker Queue Keys
|
|
13488
|
+
// ============================================================================
|
|
13489
|
+
workerQueueKey(consumerId) {
|
|
13490
|
+
return this.#buildKey("worker", consumerId);
|
|
13491
|
+
}
|
|
13492
|
+
// ============================================================================
|
|
13493
|
+
// Dead Letter Queue Keys
|
|
13494
|
+
// ============================================================================
|
|
13495
|
+
deadLetterQueueKey(tenantId) {
|
|
13496
|
+
return this.#buildKey("dlq", tenantId);
|
|
13497
|
+
}
|
|
13498
|
+
deadLetterQueueDataKey(tenantId) {
|
|
13499
|
+
return this.#buildKey("dlq", tenantId, "data");
|
|
13500
|
+
}
|
|
13501
|
+
// ============================================================================
|
|
13502
|
+
// Extraction Methods
|
|
13503
|
+
// ============================================================================
|
|
13504
|
+
/**
|
|
13505
|
+
* Extract tenant ID from a queue ID.
|
|
13506
|
+
* Default implementation assumes queue IDs are formatted as: tenant:{tenantId}:...
|
|
13507
|
+
* Override this method for custom queue ID formats.
|
|
13508
|
+
*/
|
|
13509
|
+
extractTenantId(queueId) {
|
|
13510
|
+
const parts = queueId.split(this.separator);
|
|
13511
|
+
if (parts.length >= 2 && parts[0] === "tenant" && parts[1]) {
|
|
13512
|
+
return parts[1];
|
|
13513
|
+
}
|
|
13514
|
+
return parts[0] ?? "";
|
|
13515
|
+
}
|
|
13516
|
+
/**
|
|
13517
|
+
* Extract a group ID from a queue ID.
|
|
13518
|
+
* Default implementation looks for pattern: {groupName}:{groupId}:...
|
|
13519
|
+
* Override this method for custom queue ID formats.
|
|
13520
|
+
*/
|
|
13521
|
+
extractGroupId(groupName, queueId) {
|
|
13522
|
+
const parts = queueId.split(this.separator);
|
|
13523
|
+
for (let i = 0; i < parts.length - 1; i++) {
|
|
13524
|
+
if (parts[i] === groupName) {
|
|
13525
|
+
const nextPart = parts[i + 1];
|
|
13526
|
+
if (nextPart) {
|
|
13527
|
+
return nextPart;
|
|
13528
|
+
}
|
|
13529
|
+
}
|
|
13530
|
+
}
|
|
13531
|
+
return "";
|
|
13532
|
+
}
|
|
13533
|
+
// ============================================================================
|
|
13534
|
+
// Helper Methods
|
|
13535
|
+
// ============================================================================
|
|
13536
|
+
#buildKey(...parts) {
|
|
13537
|
+
return [this.prefix, ...parts].join(this.separator);
|
|
13538
|
+
}
|
|
13539
|
+
};
|
|
13540
|
+
var CallbackFairQueueKeyProducer = class extends DefaultFairQueueKeyProducer {
|
|
13541
|
+
tenantExtractor;
|
|
13542
|
+
groupExtractor;
|
|
13543
|
+
constructor(options) {
|
|
13544
|
+
super({ prefix: options.prefix, separator: options.separator });
|
|
13545
|
+
this.tenantExtractor = options.extractTenantId;
|
|
13546
|
+
this.groupExtractor = options.extractGroupId;
|
|
13547
|
+
}
|
|
13548
|
+
extractTenantId(queueId) {
|
|
13549
|
+
return this.tenantExtractor(queueId);
|
|
13550
|
+
}
|
|
13551
|
+
extractGroupId(groupName, queueId) {
|
|
13552
|
+
return this.groupExtractor(groupName, queueId);
|
|
13553
|
+
}
|
|
13554
|
+
};
|
|
13555
|
+
|
|
13556
|
+
// src/fair-queue/scheduler.ts
|
|
13557
|
+
var BaseScheduler = class {
|
|
13558
|
+
/**
|
|
13559
|
+
* Called after processing a message to update scheduler state.
|
|
13560
|
+
* Default implementation does nothing.
|
|
13561
|
+
*/
|
|
13562
|
+
async recordProcessed(_tenantId, _queueId) {
|
|
13563
|
+
}
|
|
13564
|
+
/**
|
|
13565
|
+
* Called after processing multiple messages to update scheduler state.
|
|
13566
|
+
* Batch variant for efficiency - reduces Redis calls when processing multiple messages.
|
|
13567
|
+
* Default implementation does nothing.
|
|
13568
|
+
*/
|
|
13569
|
+
async recordProcessedBatch(_tenantId, _queueId, _count) {
|
|
13570
|
+
}
|
|
13571
|
+
/**
|
|
13572
|
+
* Initialize the scheduler.
|
|
13573
|
+
* Default implementation does nothing.
|
|
13574
|
+
*/
|
|
13575
|
+
async initialize() {
|
|
13576
|
+
}
|
|
13577
|
+
/**
|
|
13578
|
+
* Cleanup scheduler resources.
|
|
13579
|
+
* Default implementation does nothing.
|
|
13580
|
+
*/
|
|
13581
|
+
async close() {
|
|
13582
|
+
}
|
|
13583
|
+
/**
|
|
13584
|
+
* Helper to group queues by tenant.
|
|
13585
|
+
*/
|
|
13586
|
+
groupQueuesByTenant(queues) {
|
|
13587
|
+
const grouped = /* @__PURE__ */ new Map();
|
|
13588
|
+
for (const { queueId, tenantId } of queues) {
|
|
13589
|
+
const existing = grouped.get(tenantId) ?? [];
|
|
13590
|
+
existing.push(queueId);
|
|
13591
|
+
grouped.set(tenantId, existing);
|
|
13592
|
+
}
|
|
13593
|
+
return grouped;
|
|
13594
|
+
}
|
|
13595
|
+
/**
|
|
13596
|
+
* Helper to convert grouped queues to TenantQueues array.
|
|
13597
|
+
*/
|
|
13598
|
+
toTenantQueuesArray(grouped) {
|
|
13599
|
+
return Array.from(grouped.entries()).map(([tenantId, queues]) => ({
|
|
13600
|
+
tenantId,
|
|
13601
|
+
queues
|
|
13602
|
+
}));
|
|
13603
|
+
}
|
|
13604
|
+
/**
|
|
13605
|
+
* Helper to filter out tenants at capacity.
|
|
13606
|
+
*/
|
|
13607
|
+
async filterAtCapacity(tenants, context2, groupName = "tenant") {
|
|
13608
|
+
const filtered = [];
|
|
13609
|
+
for (const tenant of tenants) {
|
|
13610
|
+
const isAtCapacity = await context2.isAtCapacity(groupName, tenant.tenantId);
|
|
13611
|
+
if (!isAtCapacity) {
|
|
13612
|
+
filtered.push(tenant);
|
|
13613
|
+
}
|
|
13614
|
+
}
|
|
13615
|
+
return filtered;
|
|
13616
|
+
}
|
|
13617
|
+
};
|
|
13618
|
+
var NoopScheduler = class extends BaseScheduler {
|
|
13619
|
+
async selectQueues(_masterQueueShard, _consumerId, _context) {
|
|
13620
|
+
return [];
|
|
13621
|
+
}
|
|
13622
|
+
};
|
|
13623
|
+
|
|
13624
|
+
// src/fair-queue/schedulers/drr.ts
|
|
13625
|
+
var DRRScheduler = class extends BaseScheduler {
|
|
13626
|
+
constructor(config) {
|
|
13627
|
+
super();
|
|
13628
|
+
this.config = config;
|
|
13629
|
+
this.redis = createRedisClient(config.redis);
|
|
13630
|
+
this.keys = config.keys;
|
|
13631
|
+
this.quantum = config.quantum;
|
|
13632
|
+
this.maxDeficit = config.maxDeficit;
|
|
13633
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 1e3;
|
|
13634
|
+
this.logger = config.logger ?? {
|
|
13635
|
+
debug: () => {
|
|
13636
|
+
},
|
|
13637
|
+
error: () => {
|
|
13638
|
+
}
|
|
13639
|
+
};
|
|
13640
|
+
this.#registerCommands();
|
|
13641
|
+
}
|
|
13642
|
+
redis;
|
|
13643
|
+
keys;
|
|
13644
|
+
quantum;
|
|
13645
|
+
maxDeficit;
|
|
13646
|
+
masterQueueLimit;
|
|
13647
|
+
logger;
|
|
13648
|
+
// ============================================================================
|
|
13649
|
+
// FairScheduler Implementation
|
|
13650
|
+
// ============================================================================
|
|
13651
|
+
/**
|
|
13652
|
+
* Select queues for processing using DRR algorithm.
|
|
13653
|
+
*
|
|
13654
|
+
* Algorithm:
|
|
13655
|
+
* 1. Get all queues from the master shard
|
|
13656
|
+
* 2. Group by tenant
|
|
13657
|
+
* 3. Filter out tenants at concurrency capacity
|
|
13658
|
+
* 4. Add quantum to each tenant's deficit (atomically)
|
|
13659
|
+
* 5. Select queues from tenants with deficit >= 1
|
|
13660
|
+
* 6. Order tenants by deficit (highest first for fairness)
|
|
13661
|
+
*/
|
|
13662
|
+
async selectQueues(masterQueueShard, consumerId, context2) {
|
|
13663
|
+
const queues = await this.#getQueuesFromShard(masterQueueShard);
|
|
13664
|
+
if (queues.length === 0) {
|
|
13665
|
+
return [];
|
|
13666
|
+
}
|
|
13667
|
+
const queuesByTenant = this.groupQueuesByTenant(
|
|
13668
|
+
queues.map((q) => ({ queueId: q.queueId, tenantId: q.tenantId }))
|
|
13669
|
+
);
|
|
13670
|
+
const tenantIds = Array.from(queuesByTenant.keys());
|
|
13671
|
+
const deficits = await this.#addQuantumToTenants(tenantIds);
|
|
13672
|
+
const tenantData = await Promise.all(
|
|
13673
|
+
tenantIds.map(async (tenantId, index) => {
|
|
13674
|
+
const isAtCapacity = await context2.isAtCapacity("tenant", tenantId);
|
|
13675
|
+
return {
|
|
13676
|
+
tenantId,
|
|
13677
|
+
deficit: deficits[index] ?? 0,
|
|
13678
|
+
queues: queuesByTenant.get(tenantId) ?? [],
|
|
13679
|
+
isAtCapacity
|
|
13680
|
+
};
|
|
13681
|
+
})
|
|
13682
|
+
);
|
|
13683
|
+
const eligibleTenants = tenantData.filter(
|
|
13684
|
+
(t) => !t.isAtCapacity && t.deficit >= 1
|
|
13685
|
+
);
|
|
13686
|
+
const blockedTenants = tenantData.filter((t) => t.isAtCapacity);
|
|
13687
|
+
if (blockedTenants.length > 0) {
|
|
13688
|
+
this.logger.debug("DRR: tenants blocked by concurrency", {
|
|
13689
|
+
blockedCount: blockedTenants.length,
|
|
13690
|
+
blockedTenants: blockedTenants.map((t) => t.tenantId)
|
|
13691
|
+
});
|
|
13692
|
+
}
|
|
13693
|
+
eligibleTenants.sort((a, b) => b.deficit - a.deficit);
|
|
13694
|
+
this.logger.debug("DRR: queue selection complete", {
|
|
13695
|
+
totalQueues: queues.length,
|
|
13696
|
+
totalTenants: tenantIds.length,
|
|
13697
|
+
eligibleTenants: eligibleTenants.length,
|
|
13698
|
+
topTenantDeficit: eligibleTenants[0]?.deficit
|
|
13699
|
+
});
|
|
13700
|
+
return eligibleTenants.map((t) => ({
|
|
13701
|
+
tenantId: t.tenantId,
|
|
13702
|
+
queues: t.queues
|
|
13703
|
+
}));
|
|
13704
|
+
}
|
|
13705
|
+
/**
|
|
13706
|
+
* Record that a message was processed from a tenant.
|
|
13707
|
+
* Decrements the tenant's deficit.
|
|
13708
|
+
*/
|
|
13709
|
+
async recordProcessed(tenantId, _queueId) {
|
|
13710
|
+
await this.#decrementDeficit(tenantId);
|
|
13711
|
+
}
|
|
13712
|
+
/**
|
|
13713
|
+
* Record that multiple messages were processed from a tenant.
|
|
13714
|
+
* Decrements the tenant's deficit by count atomically.
|
|
13715
|
+
*/
|
|
13716
|
+
async recordProcessedBatch(tenantId, _queueId, count) {
|
|
13717
|
+
await this.#decrementDeficitBatch(tenantId, count);
|
|
13718
|
+
}
|
|
13719
|
+
async close() {
|
|
13720
|
+
await this.redis.quit();
|
|
13721
|
+
}
|
|
13722
|
+
// ============================================================================
|
|
13723
|
+
// Public Methods for Deficit Management
|
|
13724
|
+
// ============================================================================
|
|
13725
|
+
/**
|
|
13726
|
+
* Get the current deficit for a tenant.
|
|
13727
|
+
*/
|
|
13728
|
+
async getDeficit(tenantId) {
|
|
13729
|
+
const key = this.#deficitKey();
|
|
13730
|
+
const value = await this.redis.hget(key, tenantId);
|
|
13731
|
+
return value ? parseFloat(value) : 0;
|
|
13732
|
+
}
|
|
13733
|
+
/**
|
|
13734
|
+
* Reset deficit for a tenant.
|
|
13735
|
+
* Used when a tenant has no more active queues.
|
|
13736
|
+
*/
|
|
13737
|
+
async resetDeficit(tenantId) {
|
|
13738
|
+
const key = this.#deficitKey();
|
|
13739
|
+
await this.redis.hdel(key, tenantId);
|
|
13740
|
+
}
|
|
13741
|
+
/**
|
|
13742
|
+
* Get all tenant deficits.
|
|
13743
|
+
*/
|
|
13744
|
+
async getAllDeficits() {
|
|
13745
|
+
const key = this.#deficitKey();
|
|
13746
|
+
const data = await this.redis.hgetall(key);
|
|
13747
|
+
const result = /* @__PURE__ */ new Map();
|
|
13748
|
+
for (const [tenantId, value] of Object.entries(data)) {
|
|
13749
|
+
result.set(tenantId, parseFloat(value));
|
|
13750
|
+
}
|
|
13751
|
+
return result;
|
|
13752
|
+
}
|
|
13753
|
+
// ============================================================================
|
|
13754
|
+
// Private Methods
|
|
13755
|
+
// ============================================================================
|
|
13756
|
+
#deficitKey() {
|
|
13757
|
+
return `${this.keys.masterQueueKey(0).split(":")[0]}:drr:deficit`;
|
|
13758
|
+
}
|
|
13759
|
+
async #getQueuesFromShard(shardKey) {
|
|
13760
|
+
const now = Date.now();
|
|
13761
|
+
const results = await this.redis.zrangebyscore(
|
|
13762
|
+
shardKey,
|
|
13763
|
+
"-inf",
|
|
13764
|
+
now,
|
|
13765
|
+
"WITHSCORES",
|
|
13766
|
+
"LIMIT",
|
|
13767
|
+
0,
|
|
13768
|
+
this.masterQueueLimit
|
|
13769
|
+
);
|
|
13770
|
+
const queues = [];
|
|
13771
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
13772
|
+
const queueId = results[i];
|
|
13773
|
+
const scoreStr = results[i + 1];
|
|
13774
|
+
if (queueId && scoreStr) {
|
|
13775
|
+
queues.push({
|
|
13776
|
+
queueId,
|
|
13777
|
+
score: parseFloat(scoreStr),
|
|
13778
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
13779
|
+
});
|
|
13780
|
+
}
|
|
13781
|
+
}
|
|
13782
|
+
return queues;
|
|
13783
|
+
}
|
|
13784
|
+
/**
|
|
13785
|
+
* Add quantum to multiple tenants atomically.
|
|
13786
|
+
* Returns the new deficit values.
|
|
13787
|
+
*/
|
|
13788
|
+
async #addQuantumToTenants(tenantIds) {
|
|
13789
|
+
if (tenantIds.length === 0) {
|
|
13790
|
+
return [];
|
|
13791
|
+
}
|
|
13792
|
+
const key = this.#deficitKey();
|
|
13793
|
+
const results = await this.redis.drrAddQuantum(
|
|
13794
|
+
key,
|
|
13795
|
+
this.quantum.toString(),
|
|
13796
|
+
this.maxDeficit.toString(),
|
|
13797
|
+
...tenantIds
|
|
13798
|
+
);
|
|
13799
|
+
return results.map((r) => parseFloat(r));
|
|
13800
|
+
}
|
|
13801
|
+
/**
|
|
13802
|
+
* Decrement deficit for a tenant atomically.
|
|
13803
|
+
*/
|
|
13804
|
+
async #decrementDeficit(tenantId) {
|
|
13805
|
+
const key = this.#deficitKey();
|
|
13806
|
+
const result = await this.redis.drrDecrementDeficit(key, tenantId);
|
|
13807
|
+
return parseFloat(result);
|
|
13808
|
+
}
|
|
13809
|
+
/**
|
|
13810
|
+
* Decrement deficit for a tenant by a count atomically.
|
|
13811
|
+
*/
|
|
13812
|
+
async #decrementDeficitBatch(tenantId, count) {
|
|
13813
|
+
const key = this.#deficitKey();
|
|
13814
|
+
const result = await this.redis.drrDecrementDeficitBatch(key, tenantId, count.toString());
|
|
13815
|
+
return parseFloat(result);
|
|
13816
|
+
}
|
|
13817
|
+
#registerCommands() {
|
|
13818
|
+
this.redis.defineCommand("drrAddQuantum", {
|
|
13819
|
+
numberOfKeys: 1,
|
|
13820
|
+
lua: `
|
|
13821
|
+
local deficitKey = KEYS[1]
|
|
13822
|
+
local quantum = tonumber(ARGV[1])
|
|
13823
|
+
local maxDeficit = tonumber(ARGV[2])
|
|
13824
|
+
local results = {}
|
|
13825
|
+
|
|
13826
|
+
for i = 3, #ARGV do
|
|
13827
|
+
local tenantId = ARGV[i]
|
|
13828
|
+
|
|
13829
|
+
-- Add quantum to deficit
|
|
13830
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, quantum)
|
|
13831
|
+
newDeficit = tonumber(newDeficit)
|
|
13832
|
+
|
|
13833
|
+
-- Cap at maxDeficit
|
|
13834
|
+
if newDeficit > maxDeficit then
|
|
13835
|
+
redis.call('HSET', deficitKey, tenantId, maxDeficit)
|
|
13836
|
+
newDeficit = maxDeficit
|
|
13837
|
+
end
|
|
13838
|
+
|
|
13839
|
+
table.insert(results, tostring(newDeficit))
|
|
13840
|
+
end
|
|
13841
|
+
|
|
13842
|
+
return results
|
|
13843
|
+
`
|
|
13844
|
+
});
|
|
13845
|
+
this.redis.defineCommand("drrDecrementDeficit", {
|
|
13846
|
+
numberOfKeys: 1,
|
|
13847
|
+
lua: `
|
|
13848
|
+
local deficitKey = KEYS[1]
|
|
13849
|
+
local tenantId = ARGV[1]
|
|
13850
|
+
|
|
13851
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, -1)
|
|
13852
|
+
newDeficit = tonumber(newDeficit)
|
|
13853
|
+
|
|
13854
|
+
-- Floor at 0
|
|
13855
|
+
if newDeficit < 0 then
|
|
13856
|
+
redis.call('HSET', deficitKey, tenantId, 0)
|
|
13857
|
+
newDeficit = 0
|
|
13858
|
+
end
|
|
13859
|
+
|
|
13860
|
+
return tostring(newDeficit)
|
|
13861
|
+
`
|
|
13862
|
+
});
|
|
13863
|
+
this.redis.defineCommand("drrDecrementDeficitBatch", {
|
|
13864
|
+
numberOfKeys: 1,
|
|
13865
|
+
lua: `
|
|
13866
|
+
local deficitKey = KEYS[1]
|
|
13867
|
+
local tenantId = ARGV[1]
|
|
13868
|
+
local count = tonumber(ARGV[2])
|
|
13869
|
+
|
|
13870
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, -count)
|
|
13871
|
+
newDeficit = tonumber(newDeficit)
|
|
13872
|
+
|
|
13873
|
+
-- Floor at 0
|
|
13874
|
+
if newDeficit < 0 then
|
|
13875
|
+
redis.call('HSET', deficitKey, tenantId, 0)
|
|
13876
|
+
newDeficit = 0
|
|
13877
|
+
end
|
|
13878
|
+
|
|
13879
|
+
return tostring(newDeficit)
|
|
13880
|
+
`
|
|
13881
|
+
});
|
|
13882
|
+
}
|
|
13883
|
+
};
|
|
13884
|
+
var defaultBiases = {
|
|
13885
|
+
concurrencyLimitBias: 0,
|
|
13886
|
+
availableCapacityBias: 0,
|
|
13887
|
+
queueAgeRandomization: 0
|
|
13888
|
+
};
|
|
13889
|
+
var WeightedScheduler = class extends BaseScheduler {
|
|
13890
|
+
constructor(config) {
|
|
13891
|
+
super();
|
|
13892
|
+
this.config = config;
|
|
13893
|
+
this.redis = createRedisClient(config.redis);
|
|
13894
|
+
this.keys = config.keys;
|
|
13895
|
+
this.rng = seedrandom__default.default(config.seed);
|
|
13896
|
+
this.biases = config.biases ?? defaultBiases;
|
|
13897
|
+
this.defaultTenantLimit = config.defaultTenantConcurrencyLimit ?? 100;
|
|
13898
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 100;
|
|
13899
|
+
this.reuseSnapshotCount = config.reuseSnapshotCount ?? 0;
|
|
13900
|
+
this.maximumTenantCount = config.maximumTenantCount ?? 0;
|
|
13901
|
+
}
|
|
13902
|
+
redis;
|
|
13903
|
+
keys;
|
|
13904
|
+
rng;
|
|
13905
|
+
biases;
|
|
13906
|
+
defaultTenantLimit;
|
|
13907
|
+
masterQueueLimit;
|
|
13908
|
+
reuseSnapshotCount;
|
|
13909
|
+
maximumTenantCount;
|
|
13910
|
+
// Snapshot cache
|
|
13911
|
+
snapshotCache = /* @__PURE__ */ new Map();
|
|
13912
|
+
// ============================================================================
|
|
13913
|
+
// FairScheduler Implementation
|
|
13914
|
+
// ============================================================================
|
|
13915
|
+
async selectQueues(masterQueueShard, consumerId, context2) {
|
|
13916
|
+
const snapshot = await this.#getOrCreateSnapshot(
|
|
13917
|
+
masterQueueShard,
|
|
13918
|
+
consumerId,
|
|
13919
|
+
context2
|
|
13920
|
+
);
|
|
13921
|
+
if (snapshot.queues.length === 0) {
|
|
13922
|
+
return [];
|
|
13923
|
+
}
|
|
13924
|
+
const shuffledTenants = this.#shuffleTenantsByWeight(snapshot);
|
|
13925
|
+
return shuffledTenants.map((tenantId) => ({
|
|
13926
|
+
tenantId,
|
|
13927
|
+
queues: this.#orderQueuesForTenant(snapshot, tenantId)
|
|
13928
|
+
}));
|
|
13929
|
+
}
|
|
13930
|
+
async close() {
|
|
13931
|
+
this.snapshotCache.clear();
|
|
13932
|
+
await this.redis.quit();
|
|
13933
|
+
}
|
|
13934
|
+
// ============================================================================
|
|
13935
|
+
// Private Methods
|
|
13936
|
+
// ============================================================================
|
|
13937
|
+
async #getOrCreateSnapshot(masterQueueShard, consumerId, context2) {
|
|
13938
|
+
const cacheKey = `${masterQueueShard}:${consumerId}`;
|
|
13939
|
+
if (this.reuseSnapshotCount > 0) {
|
|
13940
|
+
const cached = this.snapshotCache.get(cacheKey);
|
|
13941
|
+
if (cached && cached.reuseCount < this.reuseSnapshotCount) {
|
|
13942
|
+
this.snapshotCache.set(cacheKey, {
|
|
13943
|
+
snapshot: cached.snapshot,
|
|
13944
|
+
reuseCount: cached.reuseCount + 1
|
|
13945
|
+
});
|
|
13946
|
+
return cached.snapshot;
|
|
13947
|
+
}
|
|
13948
|
+
}
|
|
13949
|
+
const snapshot = await this.#createSnapshot(masterQueueShard, context2);
|
|
13950
|
+
if (this.reuseSnapshotCount > 0) {
|
|
13951
|
+
this.snapshotCache.set(cacheKey, { snapshot, reuseCount: 0 });
|
|
13952
|
+
}
|
|
13953
|
+
return snapshot;
|
|
13954
|
+
}
|
|
13955
|
+
async #createSnapshot(masterQueueShard, context2) {
|
|
13956
|
+
const now = Date.now();
|
|
13957
|
+
let rawQueues = await this.#getQueuesFromShard(masterQueueShard, now);
|
|
13958
|
+
if (rawQueues.length === 0) {
|
|
13959
|
+
return { id: crypto.randomUUID(), tenants: /* @__PURE__ */ new Map(), queues: [] };
|
|
13960
|
+
}
|
|
13961
|
+
if (this.maximumTenantCount > 0) {
|
|
13962
|
+
rawQueues = this.#selectTopTenantQueues(rawQueues);
|
|
13963
|
+
}
|
|
13964
|
+
const tenantIds = /* @__PURE__ */ new Set();
|
|
13965
|
+
const queuesByTenant = /* @__PURE__ */ new Map();
|
|
13966
|
+
for (const queue of rawQueues) {
|
|
13967
|
+
tenantIds.add(queue.tenantId);
|
|
13968
|
+
const tenantQueues = queuesByTenant.get(queue.tenantId) ?? [];
|
|
13969
|
+
tenantQueues.push({
|
|
13970
|
+
queueId: queue.queueId,
|
|
13971
|
+
age: now - queue.score
|
|
13972
|
+
});
|
|
13973
|
+
queuesByTenant.set(queue.tenantId, tenantQueues);
|
|
13974
|
+
}
|
|
13975
|
+
const tenants = /* @__PURE__ */ new Map();
|
|
13976
|
+
for (const tenantId of tenantIds) {
|
|
13977
|
+
const [current, limit] = await Promise.all([
|
|
13978
|
+
context2.getCurrentConcurrency("tenant", tenantId),
|
|
13979
|
+
context2.getConcurrencyLimit("tenant", tenantId)
|
|
13980
|
+
]);
|
|
13981
|
+
if (current >= limit) {
|
|
13982
|
+
continue;
|
|
13983
|
+
}
|
|
13984
|
+
tenants.set(tenantId, {
|
|
13985
|
+
tenantId,
|
|
13986
|
+
concurrency: { current, limit },
|
|
13987
|
+
queues: queuesByTenant.get(tenantId) ?? []
|
|
13988
|
+
});
|
|
13989
|
+
}
|
|
13990
|
+
const queues = rawQueues.filter((q) => tenants.has(q.tenantId)).map((q) => ({
|
|
13991
|
+
queueId: q.queueId,
|
|
13992
|
+
tenantId: q.tenantId,
|
|
13993
|
+
age: now - q.score
|
|
13994
|
+
}));
|
|
13995
|
+
return {
|
|
13996
|
+
id: crypto.randomUUID(),
|
|
13997
|
+
tenants,
|
|
13998
|
+
queues
|
|
13999
|
+
};
|
|
14000
|
+
}
|
|
14001
|
+
async #getQueuesFromShard(shardKey, maxScore) {
|
|
14002
|
+
const results = await this.redis.zrangebyscore(
|
|
14003
|
+
shardKey,
|
|
14004
|
+
"-inf",
|
|
14005
|
+
maxScore,
|
|
14006
|
+
"WITHSCORES",
|
|
14007
|
+
"LIMIT",
|
|
14008
|
+
0,
|
|
14009
|
+
this.masterQueueLimit
|
|
14010
|
+
);
|
|
14011
|
+
const queues = [];
|
|
14012
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
14013
|
+
const queueId = results[i];
|
|
14014
|
+
const scoreStr = results[i + 1];
|
|
14015
|
+
if (queueId && scoreStr) {
|
|
14016
|
+
queues.push({
|
|
14017
|
+
queueId,
|
|
14018
|
+
score: parseFloat(scoreStr),
|
|
14019
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
14020
|
+
});
|
|
14021
|
+
}
|
|
14022
|
+
}
|
|
14023
|
+
return queues;
|
|
14024
|
+
}
|
|
14025
|
+
#selectTopTenantQueues(queues) {
|
|
14026
|
+
const queuesByTenant = /* @__PURE__ */ new Map();
|
|
14027
|
+
for (const queue of queues) {
|
|
14028
|
+
const tenantQueues = queuesByTenant.get(queue.tenantId) ?? [];
|
|
14029
|
+
tenantQueues.push(queue);
|
|
14030
|
+
queuesByTenant.set(queue.tenantId, tenantQueues);
|
|
14031
|
+
}
|
|
14032
|
+
const tenantAges = Array.from(queuesByTenant.entries()).map(([tenantId, tQueues]) => {
|
|
14033
|
+
const avgAge = tQueues.reduce((sum, q) => sum + q.score, 0) / tQueues.length;
|
|
14034
|
+
return { tenantId, avgAge };
|
|
14035
|
+
});
|
|
14036
|
+
const maxAge = Math.max(...tenantAges.map((t) => t.avgAge));
|
|
14037
|
+
const weightedTenants = maxAge === 0 ? tenantAges.map((t) => ({
|
|
14038
|
+
tenantId: t.tenantId,
|
|
14039
|
+
weight: 1 / tenantAges.length
|
|
14040
|
+
})) : tenantAges.map((t) => ({
|
|
14041
|
+
tenantId: t.tenantId,
|
|
14042
|
+
weight: t.avgAge / maxAge
|
|
14043
|
+
}));
|
|
14044
|
+
const selectedTenants = /* @__PURE__ */ new Set();
|
|
14045
|
+
let remaining = [...weightedTenants];
|
|
14046
|
+
let totalWeight = remaining.reduce((sum, t) => sum + t.weight, 0);
|
|
14047
|
+
while (selectedTenants.size < this.maximumTenantCount && remaining.length > 0) {
|
|
14048
|
+
let random = this.rng() * totalWeight;
|
|
14049
|
+
let index = 0;
|
|
14050
|
+
while (random > 0 && index < remaining.length) {
|
|
14051
|
+
const item = remaining[index];
|
|
14052
|
+
if (item) {
|
|
14053
|
+
random -= item.weight;
|
|
14054
|
+
}
|
|
14055
|
+
index++;
|
|
14056
|
+
}
|
|
14057
|
+
index = Math.max(0, index - 1);
|
|
14058
|
+
const selected = remaining[index];
|
|
14059
|
+
if (selected) {
|
|
14060
|
+
selectedTenants.add(selected.tenantId);
|
|
14061
|
+
totalWeight -= selected.weight;
|
|
14062
|
+
remaining.splice(index, 1);
|
|
14063
|
+
}
|
|
14064
|
+
}
|
|
14065
|
+
return queues.filter((q) => selectedTenants.has(q.tenantId));
|
|
14066
|
+
}
|
|
14067
|
+
#shuffleTenantsByWeight(snapshot) {
|
|
14068
|
+
const tenantIds = Array.from(snapshot.tenants.keys());
|
|
14069
|
+
if (tenantIds.length === 0) {
|
|
14070
|
+
return [];
|
|
14071
|
+
}
|
|
14072
|
+
const { concurrencyLimitBias, availableCapacityBias } = this.biases;
|
|
14073
|
+
if (concurrencyLimitBias === 0 && availableCapacityBias === 0) {
|
|
14074
|
+
return this.#shuffle(tenantIds);
|
|
14075
|
+
}
|
|
14076
|
+
const maxLimit = Math.max(
|
|
14077
|
+
...tenantIds.map((id) => snapshot.tenants.get(id).concurrency.limit)
|
|
14078
|
+
);
|
|
14079
|
+
const weightedTenants = tenantIds.map((tenantId) => {
|
|
14080
|
+
const tenant = snapshot.tenants.get(tenantId);
|
|
14081
|
+
let weight = 1;
|
|
14082
|
+
if (concurrencyLimitBias > 0) {
|
|
14083
|
+
const normalizedLimit = maxLimit > 0 ? tenant.concurrency.limit / maxLimit : 0;
|
|
14084
|
+
weight *= 1 + Math.pow(normalizedLimit * concurrencyLimitBias, 2);
|
|
14085
|
+
}
|
|
14086
|
+
if (availableCapacityBias > 0) {
|
|
14087
|
+
const usedPercentage = tenant.concurrency.limit > 0 ? tenant.concurrency.current / tenant.concurrency.limit : 1;
|
|
14088
|
+
const availableBonus = 1 - usedPercentage;
|
|
14089
|
+
weight *= 1 + Math.pow(availableBonus * availableCapacityBias, 2);
|
|
14090
|
+
}
|
|
14091
|
+
return { tenantId, weight };
|
|
14092
|
+
});
|
|
14093
|
+
return this.#weightedShuffle(weightedTenants);
|
|
14094
|
+
}
|
|
14095
|
+
#orderQueuesForTenant(snapshot, tenantId) {
|
|
14096
|
+
const tenant = snapshot.tenants.get(tenantId);
|
|
14097
|
+
if (!tenant || tenant.queues.length === 0) {
|
|
14098
|
+
return [];
|
|
14099
|
+
}
|
|
14100
|
+
const queues = [...tenant.queues];
|
|
14101
|
+
const { queueAgeRandomization } = this.biases;
|
|
14102
|
+
if (queueAgeRandomization === 0) {
|
|
14103
|
+
return queues.sort((a, b) => b.age - a.age).map((q) => q.queueId);
|
|
14104
|
+
}
|
|
14105
|
+
const maxAge = Math.max(...queues.map((q) => q.age));
|
|
14106
|
+
const ageDenom = maxAge === 0 ? 1 : maxAge;
|
|
14107
|
+
const weightedQueues = queues.map((q) => ({
|
|
14108
|
+
queue: q,
|
|
14109
|
+
weight: 1 + q.age / ageDenom * queueAgeRandomization
|
|
14110
|
+
}));
|
|
14111
|
+
const result = [];
|
|
14112
|
+
let remaining = [...weightedQueues];
|
|
14113
|
+
let totalWeight = remaining.reduce((sum, q) => sum + q.weight, 0);
|
|
14114
|
+
while (remaining.length > 0) {
|
|
14115
|
+
let random = this.rng() * totalWeight;
|
|
14116
|
+
let index = 0;
|
|
14117
|
+
while (random > 0 && index < remaining.length) {
|
|
14118
|
+
const item = remaining[index];
|
|
14119
|
+
if (item) {
|
|
14120
|
+
random -= item.weight;
|
|
14121
|
+
}
|
|
14122
|
+
index++;
|
|
14123
|
+
}
|
|
14124
|
+
index = Math.max(0, index - 1);
|
|
14125
|
+
const selected = remaining[index];
|
|
14126
|
+
if (selected) {
|
|
14127
|
+
result.push(selected.queue.queueId);
|
|
14128
|
+
totalWeight -= selected.weight;
|
|
14129
|
+
remaining.splice(index, 1);
|
|
14130
|
+
}
|
|
14131
|
+
}
|
|
14132
|
+
return result;
|
|
14133
|
+
}
|
|
14134
|
+
#shuffle(array) {
|
|
14135
|
+
const result = [...array];
|
|
14136
|
+
for (let i = result.length - 1; i > 0; i--) {
|
|
14137
|
+
const j = Math.floor(this.rng() * (i + 1));
|
|
14138
|
+
const temp = result[i];
|
|
14139
|
+
const swapValue = result[j];
|
|
14140
|
+
if (temp !== void 0 && swapValue !== void 0) {
|
|
14141
|
+
result[i] = swapValue;
|
|
14142
|
+
result[j] = temp;
|
|
14143
|
+
}
|
|
14144
|
+
}
|
|
14145
|
+
return result;
|
|
14146
|
+
}
|
|
14147
|
+
#weightedShuffle(items) {
|
|
14148
|
+
const result = [];
|
|
14149
|
+
let remaining = [...items];
|
|
14150
|
+
let totalWeight = remaining.reduce((sum, item) => sum + item.weight, 0);
|
|
14151
|
+
while (remaining.length > 0) {
|
|
14152
|
+
let random = this.rng() * totalWeight;
|
|
14153
|
+
let index = 0;
|
|
14154
|
+
while (random > 0 && index < remaining.length) {
|
|
14155
|
+
const item = remaining[index];
|
|
14156
|
+
if (item) {
|
|
14157
|
+
random -= item.weight;
|
|
14158
|
+
}
|
|
14159
|
+
index++;
|
|
14160
|
+
}
|
|
14161
|
+
index = Math.max(0, index - 1);
|
|
14162
|
+
const selected = remaining[index];
|
|
14163
|
+
if (selected) {
|
|
14164
|
+
result.push(selected.tenantId);
|
|
14165
|
+
totalWeight -= selected.weight;
|
|
14166
|
+
remaining.splice(index, 1);
|
|
14167
|
+
}
|
|
14168
|
+
}
|
|
14169
|
+
return result;
|
|
14170
|
+
}
|
|
14171
|
+
};
|
|
14172
|
+
|
|
14173
|
+
// src/fair-queue/schedulers/roundRobin.ts
|
|
14174
|
+
var RoundRobinScheduler = class extends BaseScheduler {
|
|
14175
|
+
constructor(config) {
|
|
14176
|
+
super();
|
|
14177
|
+
this.config = config;
|
|
14178
|
+
this.redis = createRedisClient(config.redis);
|
|
14179
|
+
this.keys = config.keys;
|
|
14180
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 1e3;
|
|
14181
|
+
}
|
|
14182
|
+
redis;
|
|
14183
|
+
keys;
|
|
14184
|
+
masterQueueLimit;
|
|
14185
|
+
// ============================================================================
|
|
14186
|
+
// FairScheduler Implementation
|
|
14187
|
+
// ============================================================================
|
|
14188
|
+
async selectQueues(masterQueueShard, consumerId, context2) {
|
|
14189
|
+
const now = Date.now();
|
|
14190
|
+
const queues = await this.#getQueuesFromShard(masterQueueShard, now);
|
|
14191
|
+
if (queues.length === 0) {
|
|
14192
|
+
return [];
|
|
14193
|
+
}
|
|
14194
|
+
const queuesByTenant = /* @__PURE__ */ new Map();
|
|
14195
|
+
const tenantOrder = [];
|
|
14196
|
+
for (const queue of queues) {
|
|
14197
|
+
if (!queuesByTenant.has(queue.tenantId)) {
|
|
14198
|
+
queuesByTenant.set(queue.tenantId, []);
|
|
14199
|
+
tenantOrder.push(queue.tenantId);
|
|
14200
|
+
}
|
|
14201
|
+
queuesByTenant.get(queue.tenantId).push(queue.queueId);
|
|
14202
|
+
}
|
|
14203
|
+
const lastServedIndex = await this.#getLastServedIndex(masterQueueShard);
|
|
14204
|
+
const rotatedTenants = this.#rotateArray(tenantOrder, lastServedIndex);
|
|
14205
|
+
const eligibleTenants = [];
|
|
14206
|
+
for (const tenantId of rotatedTenants) {
|
|
14207
|
+
const isAtCapacity = await context2.isAtCapacity("tenant", tenantId);
|
|
14208
|
+
if (!isAtCapacity) {
|
|
14209
|
+
const tenantQueues = queuesByTenant.get(tenantId) ?? [];
|
|
14210
|
+
eligibleTenants.push({
|
|
14211
|
+
tenantId,
|
|
14212
|
+
queues: tenantQueues
|
|
14213
|
+
});
|
|
14214
|
+
}
|
|
14215
|
+
}
|
|
14216
|
+
const firstEligible = eligibleTenants[0];
|
|
14217
|
+
if (firstEligible) {
|
|
14218
|
+
const firstTenantIndex = tenantOrder.indexOf(firstEligible.tenantId);
|
|
14219
|
+
await this.#setLastServedIndex(masterQueueShard, firstTenantIndex + 1);
|
|
14220
|
+
}
|
|
14221
|
+
return eligibleTenants;
|
|
14222
|
+
}
|
|
14223
|
+
async close() {
|
|
14224
|
+
await this.redis.quit();
|
|
14225
|
+
}
|
|
14226
|
+
// ============================================================================
|
|
14227
|
+
// Private Methods
|
|
14228
|
+
// ============================================================================
|
|
14229
|
+
async #getQueuesFromShard(shardKey, maxScore) {
|
|
14230
|
+
const results = await this.redis.zrangebyscore(
|
|
14231
|
+
shardKey,
|
|
14232
|
+
"-inf",
|
|
14233
|
+
maxScore,
|
|
14234
|
+
"WITHSCORES",
|
|
14235
|
+
"LIMIT",
|
|
14236
|
+
0,
|
|
14237
|
+
this.masterQueueLimit
|
|
14238
|
+
);
|
|
14239
|
+
const queues = [];
|
|
14240
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
14241
|
+
const queueId = results[i];
|
|
14242
|
+
const scoreStr = results[i + 1];
|
|
14243
|
+
if (queueId && scoreStr) {
|
|
14244
|
+
queues.push({
|
|
14245
|
+
queueId,
|
|
14246
|
+
score: parseFloat(scoreStr),
|
|
14247
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
14248
|
+
});
|
|
14249
|
+
}
|
|
14250
|
+
}
|
|
14251
|
+
return queues;
|
|
14252
|
+
}
|
|
14253
|
+
#lastServedKey(shardKey) {
|
|
14254
|
+
return `${shardKey}:rr:lastServed`;
|
|
14255
|
+
}
|
|
14256
|
+
async #getLastServedIndex(shardKey) {
|
|
14257
|
+
const key = this.#lastServedKey(shardKey);
|
|
14258
|
+
const value = await this.redis.get(key);
|
|
14259
|
+
return value ? parseInt(value, 10) : 0;
|
|
14260
|
+
}
|
|
14261
|
+
async #setLastServedIndex(shardKey, index) {
|
|
14262
|
+
const key = this.#lastServedKey(shardKey);
|
|
14263
|
+
await this.redis.set(key, index.toString());
|
|
14264
|
+
}
|
|
14265
|
+
#rotateArray(array, startIndex) {
|
|
14266
|
+
if (array.length === 0) return [];
|
|
14267
|
+
const normalizedIndex = startIndex % array.length;
|
|
14268
|
+
return [...array.slice(normalizedIndex), ...array.slice(0, normalizedIndex)];
|
|
14269
|
+
}
|
|
14270
|
+
};
|
|
14271
|
+
var ExponentialBackoffRetry = class {
|
|
14272
|
+
maxAttempts;
|
|
14273
|
+
options;
|
|
14274
|
+
constructor(options) {
|
|
14275
|
+
this.options = {
|
|
14276
|
+
maxAttempts: options?.maxAttempts ?? 12,
|
|
14277
|
+
factor: options?.factor ?? 2,
|
|
14278
|
+
minTimeoutInMs: options?.minTimeoutInMs ?? 1e3,
|
|
14279
|
+
maxTimeoutInMs: options?.maxTimeoutInMs ?? 36e5,
|
|
14280
|
+
// 1 hour
|
|
14281
|
+
randomize: options?.randomize ?? true
|
|
14282
|
+
};
|
|
14283
|
+
this.maxAttempts = this.options.maxAttempts ?? 12;
|
|
14284
|
+
}
|
|
14285
|
+
getNextDelay(attempt, _error) {
|
|
14286
|
+
if (attempt >= this.maxAttempts) {
|
|
14287
|
+
return null;
|
|
14288
|
+
}
|
|
14289
|
+
const delay = v3.calculateNextRetryDelay(this.options, attempt);
|
|
14290
|
+
return delay ?? null;
|
|
14291
|
+
}
|
|
14292
|
+
};
|
|
14293
|
+
var FixedDelayRetry = class {
|
|
14294
|
+
maxAttempts;
|
|
14295
|
+
delayMs;
|
|
14296
|
+
constructor(options) {
|
|
14297
|
+
this.maxAttempts = options.maxAttempts;
|
|
14298
|
+
this.delayMs = options.delayMs;
|
|
14299
|
+
}
|
|
14300
|
+
getNextDelay(attempt, _error) {
|
|
14301
|
+
if (attempt >= this.maxAttempts) {
|
|
14302
|
+
return null;
|
|
14303
|
+
}
|
|
14304
|
+
return this.delayMs;
|
|
14305
|
+
}
|
|
14306
|
+
};
|
|
14307
|
+
var LinearBackoffRetry = class {
|
|
14308
|
+
maxAttempts;
|
|
14309
|
+
baseDelayMs;
|
|
14310
|
+
maxDelayMs;
|
|
14311
|
+
constructor(options) {
|
|
14312
|
+
this.maxAttempts = options.maxAttempts;
|
|
14313
|
+
this.baseDelayMs = options.baseDelayMs;
|
|
14314
|
+
this.maxDelayMs = options.maxDelayMs ?? options.baseDelayMs * options.maxAttempts;
|
|
14315
|
+
}
|
|
14316
|
+
getNextDelay(attempt, _error) {
|
|
14317
|
+
if (attempt >= this.maxAttempts) {
|
|
14318
|
+
return null;
|
|
14319
|
+
}
|
|
14320
|
+
const delay = this.baseDelayMs * attempt;
|
|
14321
|
+
return Math.min(delay, this.maxDelayMs);
|
|
14322
|
+
}
|
|
14323
|
+
};
|
|
14324
|
+
var NoRetry = class {
|
|
14325
|
+
maxAttempts = 1;
|
|
14326
|
+
getNextDelay(_attempt, _error) {
|
|
14327
|
+
return null;
|
|
14328
|
+
}
|
|
14329
|
+
};
|
|
14330
|
+
var ImmediateRetry = class {
|
|
14331
|
+
maxAttempts;
|
|
14332
|
+
constructor(maxAttempts) {
|
|
14333
|
+
this.maxAttempts = maxAttempts;
|
|
14334
|
+
}
|
|
14335
|
+
getNextDelay(attempt, _error) {
|
|
14336
|
+
if (attempt >= this.maxAttempts) {
|
|
14337
|
+
return null;
|
|
14338
|
+
}
|
|
14339
|
+
return 0;
|
|
14340
|
+
}
|
|
14341
|
+
};
|
|
14342
|
+
var CustomRetry = class {
|
|
14343
|
+
maxAttempts;
|
|
14344
|
+
calculateDelay;
|
|
14345
|
+
constructor(options) {
|
|
14346
|
+
this.maxAttempts = options.maxAttempts;
|
|
14347
|
+
this.calculateDelay = options.calculateDelay;
|
|
14348
|
+
}
|
|
14349
|
+
getNextDelay(attempt, error) {
|
|
14350
|
+
if (attempt >= this.maxAttempts) {
|
|
14351
|
+
return null;
|
|
14352
|
+
}
|
|
14353
|
+
return this.calculateDelay(attempt, error);
|
|
14354
|
+
}
|
|
14355
|
+
};
|
|
14356
|
+
var defaultRetryOptions = {
|
|
14357
|
+
maxAttempts: 12,
|
|
14358
|
+
factor: 2,
|
|
14359
|
+
minTimeoutInMs: 1e3,
|
|
14360
|
+
maxTimeoutInMs: 36e5,
|
|
14361
|
+
randomize: true
|
|
14362
|
+
};
|
|
14363
|
+
function createDefaultRetryStrategy() {
|
|
14364
|
+
return new ExponentialBackoffRetry(defaultRetryOptions);
|
|
14365
|
+
}
|
|
14366
|
+
|
|
14367
|
+
// src/fair-queue/index.ts
|
|
14368
|
+
var FairQueue = class {
|
|
14369
|
+
constructor(options) {
|
|
14370
|
+
this.options = options;
|
|
14371
|
+
this.redis = createRedisClient(options.redis);
|
|
14372
|
+
this.keys = options.keys;
|
|
14373
|
+
this.scheduler = options.scheduler;
|
|
14374
|
+
this.logger = options.logger ?? new logger$1.Logger("FairQueue", "info");
|
|
14375
|
+
this.abortController = new AbortController();
|
|
14376
|
+
this.payloadSchema = options.payloadSchema;
|
|
14377
|
+
this.validateOnEnqueue = options.validateOnEnqueue ?? false;
|
|
14378
|
+
this.retryStrategy = options.retry?.strategy;
|
|
14379
|
+
this.deadLetterQueueEnabled = options.retry?.deadLetterQueue ?? true;
|
|
14380
|
+
this.shardCount = options.shardCount ?? 1;
|
|
14381
|
+
this.consumerCount = options.consumerCount ?? 1;
|
|
14382
|
+
this.consumerIntervalMs = options.consumerIntervalMs ?? 100;
|
|
14383
|
+
this.visibilityTimeoutMs = options.visibilityTimeoutMs ?? 3e4;
|
|
14384
|
+
this.heartbeatIntervalMs = options.heartbeatIntervalMs ?? this.visibilityTimeoutMs / 3;
|
|
14385
|
+
this.reclaimIntervalMs = options.reclaimIntervalMs ?? 5e3;
|
|
14386
|
+
this.workerQueueResolver = options.workerQueue.resolveWorkerQueue;
|
|
14387
|
+
this.batchClaimSize = options.batchClaimSize ?? 10;
|
|
14388
|
+
this.cooloffEnabled = options.cooloff?.enabled ?? true;
|
|
14389
|
+
this.cooloffThreshold = options.cooloff?.threshold ?? 10;
|
|
14390
|
+
this.cooloffPeriodMs = options.cooloff?.periodMs ?? 1e4;
|
|
14391
|
+
this.maxCooloffStatesSize = options.cooloff?.maxStatesSize ?? 1e3;
|
|
14392
|
+
this.globalRateLimiter = options.globalRateLimiter;
|
|
14393
|
+
this.consumerTraceMaxIterations = options.consumerTraceMaxIterations ?? 500;
|
|
14394
|
+
this.consumerTraceTimeoutSeconds = options.consumerTraceTimeoutSeconds ?? 60;
|
|
14395
|
+
this.telemetry = new FairQueueTelemetry({
|
|
14396
|
+
tracer: options.tracer,
|
|
14397
|
+
meter: options.meter,
|
|
14398
|
+
name: options.name ?? "fairqueue"
|
|
14399
|
+
});
|
|
14400
|
+
this.batchedSpanManager = new BatchedSpanManager({
|
|
14401
|
+
tracer: options.tracer,
|
|
14402
|
+
name: options.name ?? "fairqueue",
|
|
14403
|
+
maxIterations: this.consumerTraceMaxIterations,
|
|
14404
|
+
timeoutSeconds: this.consumerTraceTimeoutSeconds,
|
|
14405
|
+
getDynamicAttributes: () => ({
|
|
14406
|
+
"cache.descriptor_size": this.queueDescriptorCache.size,
|
|
14407
|
+
"cache.cooloff_states_size": this.queueCooloffStates.size
|
|
14408
|
+
})
|
|
14409
|
+
});
|
|
14410
|
+
this.masterQueue = new MasterQueue({
|
|
14411
|
+
redis: options.redis,
|
|
14412
|
+
keys: options.keys,
|
|
14413
|
+
shardCount: this.shardCount
|
|
14414
|
+
});
|
|
14415
|
+
if (options.concurrencyGroups && options.concurrencyGroups.length > 0) {
|
|
14416
|
+
this.concurrencyManager = new ConcurrencyManager({
|
|
14417
|
+
redis: options.redis,
|
|
14418
|
+
keys: options.keys,
|
|
14419
|
+
groups: options.concurrencyGroups
|
|
14420
|
+
});
|
|
14421
|
+
}
|
|
14422
|
+
this.visibilityManager = new VisibilityManager({
|
|
14423
|
+
redis: options.redis,
|
|
14424
|
+
keys: options.keys,
|
|
14425
|
+
shardCount: this.shardCount,
|
|
14426
|
+
defaultTimeoutMs: this.visibilityTimeoutMs,
|
|
14427
|
+
logger: {
|
|
14428
|
+
debug: (msg, ctx) => this.logger.debug(msg, ctx),
|
|
14429
|
+
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
14430
|
+
}
|
|
14431
|
+
});
|
|
14432
|
+
this.workerQueueManager = new WorkerQueueManager({
|
|
14433
|
+
redis: options.redis,
|
|
14434
|
+
keys: options.keys,
|
|
14435
|
+
logger: {
|
|
14436
|
+
debug: (msg, ctx) => this.logger.debug(msg, ctx),
|
|
14437
|
+
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
14438
|
+
}
|
|
14439
|
+
});
|
|
14440
|
+
this.#registerCommands();
|
|
14441
|
+
if (options.startConsumers !== false) {
|
|
14442
|
+
this.start();
|
|
14443
|
+
}
|
|
14444
|
+
}
|
|
14445
|
+
redis;
|
|
14446
|
+
keys;
|
|
14447
|
+
scheduler;
|
|
14448
|
+
masterQueue;
|
|
14449
|
+
concurrencyManager;
|
|
14450
|
+
visibilityManager;
|
|
14451
|
+
workerQueueManager;
|
|
14452
|
+
telemetry;
|
|
14453
|
+
logger;
|
|
14454
|
+
// Configuration
|
|
14455
|
+
payloadSchema;
|
|
14456
|
+
validateOnEnqueue;
|
|
14457
|
+
retryStrategy;
|
|
14458
|
+
deadLetterQueueEnabled;
|
|
14459
|
+
shardCount;
|
|
14460
|
+
consumerCount;
|
|
14461
|
+
consumerIntervalMs;
|
|
14462
|
+
visibilityTimeoutMs;
|
|
14463
|
+
heartbeatIntervalMs;
|
|
14464
|
+
reclaimIntervalMs;
|
|
14465
|
+
workerQueueResolver;
|
|
14466
|
+
batchClaimSize;
|
|
14467
|
+
// Cooloff state
|
|
14468
|
+
cooloffEnabled;
|
|
14469
|
+
cooloffThreshold;
|
|
14470
|
+
cooloffPeriodMs;
|
|
14471
|
+
maxCooloffStatesSize;
|
|
14472
|
+
queueCooloffStates = /* @__PURE__ */ new Map();
|
|
14473
|
+
// Global rate limiter
|
|
14474
|
+
globalRateLimiter;
|
|
14475
|
+
// Consumer tracing
|
|
14476
|
+
consumerTraceMaxIterations;
|
|
14477
|
+
consumerTraceTimeoutSeconds;
|
|
14478
|
+
batchedSpanManager;
|
|
14479
|
+
// Runtime state
|
|
14480
|
+
isRunning = false;
|
|
14481
|
+
abortController;
|
|
14482
|
+
masterQueueConsumerLoops = [];
|
|
14483
|
+
reclaimLoop;
|
|
14484
|
+
// Queue descriptor cache for message processing
|
|
14485
|
+
queueDescriptorCache = /* @__PURE__ */ new Map();
|
|
14486
|
+
// ============================================================================
|
|
14487
|
+
// Public API - Telemetry
|
|
14488
|
+
// ============================================================================
|
|
14489
|
+
/**
|
|
14490
|
+
* Register observable gauge callbacks for telemetry.
|
|
14491
|
+
* Call this after FairQueue is created to enable gauge metrics.
|
|
14492
|
+
*
|
|
14493
|
+
* @param options.observedTenants - List of tenant IDs to observe for DLQ metrics
|
|
14494
|
+
*/
|
|
14495
|
+
registerTelemetryGauges(options) {
|
|
14496
|
+
this.telemetry.registerGaugeCallbacks({
|
|
14497
|
+
getMasterQueueLength: async (shardId) => {
|
|
14498
|
+
return await this.masterQueue.getShardQueueCount(shardId);
|
|
14499
|
+
},
|
|
14500
|
+
getInflightCount: async (shardId) => {
|
|
14501
|
+
return await this.visibilityManager.getInflightCount(shardId);
|
|
14502
|
+
},
|
|
14503
|
+
getDLQLength: async (tenantId) => {
|
|
14504
|
+
return await this.getDeadLetterQueueLength(tenantId);
|
|
14505
|
+
},
|
|
14506
|
+
shardCount: this.shardCount,
|
|
14507
|
+
observedTenants: options?.observedTenants
|
|
14508
|
+
});
|
|
14509
|
+
}
|
|
14510
|
+
// ============================================================================
|
|
14511
|
+
// Public API - Enqueueing
|
|
14512
|
+
// ============================================================================
|
|
14513
|
+
/**
|
|
14514
|
+
* Enqueue a single message to a queue.
|
|
14515
|
+
*/
|
|
14516
|
+
async enqueue(options) {
|
|
14517
|
+
return this.telemetry.trace(
|
|
14518
|
+
"enqueue",
|
|
14519
|
+
async (span) => {
|
|
14520
|
+
const messageId = options.messageId ?? nanoid();
|
|
14521
|
+
const timestamp = options.timestamp ?? Date.now();
|
|
14522
|
+
const queueKey = this.keys.queueKey(options.queueId);
|
|
14523
|
+
const queueItemsKey = this.keys.queueItemsKey(options.queueId);
|
|
14524
|
+
const shardId = this.masterQueue.getShardForQueue(options.queueId);
|
|
14525
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14526
|
+
if (this.validateOnEnqueue && this.payloadSchema) {
|
|
14527
|
+
const result = this.payloadSchema.safeParse(options.payload);
|
|
14528
|
+
if (!result.success) {
|
|
14529
|
+
throw new Error(`Payload validation failed: ${result.error.message}`);
|
|
14530
|
+
}
|
|
14531
|
+
}
|
|
14532
|
+
const descriptor = {
|
|
14533
|
+
id: options.queueId,
|
|
14534
|
+
tenantId: options.tenantId,
|
|
14535
|
+
metadata: options.metadata ?? {}
|
|
14536
|
+
};
|
|
14537
|
+
this.queueDescriptorCache.set(options.queueId, descriptor);
|
|
14538
|
+
const storedMessage = {
|
|
14539
|
+
id: messageId,
|
|
14540
|
+
queueId: options.queueId,
|
|
14541
|
+
tenantId: options.tenantId,
|
|
14542
|
+
payload: options.payload,
|
|
14543
|
+
timestamp,
|
|
14544
|
+
attempt: 1,
|
|
14545
|
+
workerQueue: this.workerQueueResolver ? this.workerQueueResolver({
|
|
14546
|
+
id: messageId,
|
|
14547
|
+
queueId: options.queueId,
|
|
14548
|
+
tenantId: options.tenantId,
|
|
14549
|
+
payload: options.payload,
|
|
14550
|
+
timestamp,
|
|
14551
|
+
attempt: 1,
|
|
14552
|
+
metadata: options.metadata
|
|
14553
|
+
}) : void 0,
|
|
14554
|
+
metadata: options.metadata
|
|
14555
|
+
};
|
|
14556
|
+
await this.redis.enqueueMessageAtomic(
|
|
14557
|
+
queueKey,
|
|
14558
|
+
queueItemsKey,
|
|
14559
|
+
masterQueueKey,
|
|
14560
|
+
options.queueId,
|
|
14561
|
+
messageId,
|
|
14562
|
+
timestamp.toString(),
|
|
14563
|
+
JSON.stringify(storedMessage)
|
|
14564
|
+
);
|
|
14565
|
+
span.setAttributes({
|
|
14566
|
+
[FairQueueAttributes.QUEUE_ID]: options.queueId,
|
|
14567
|
+
[FairQueueAttributes.TENANT_ID]: options.tenantId,
|
|
14568
|
+
[FairQueueAttributes.MESSAGE_ID]: messageId,
|
|
14569
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14570
|
+
});
|
|
14571
|
+
this.telemetry.recordEnqueue();
|
|
14572
|
+
this.logger.debug("Message enqueued", {
|
|
14573
|
+
queueId: options.queueId,
|
|
14574
|
+
messageId,
|
|
14575
|
+
timestamp
|
|
14576
|
+
});
|
|
14577
|
+
return messageId;
|
|
14578
|
+
},
|
|
14579
|
+
{
|
|
14580
|
+
kind: SpanKind.PRODUCER,
|
|
14581
|
+
attributes: {
|
|
14582
|
+
[MessagingAttributes.OPERATION]: "publish"
|
|
14583
|
+
}
|
|
14584
|
+
}
|
|
14585
|
+
);
|
|
14586
|
+
}
|
|
14587
|
+
/**
|
|
14588
|
+
* Enqueue multiple messages to a queue.
|
|
14589
|
+
*/
|
|
14590
|
+
async enqueueBatch(options) {
|
|
14591
|
+
return this.telemetry.trace(
|
|
14592
|
+
"enqueueBatch",
|
|
14593
|
+
async (span) => {
|
|
14594
|
+
const queueKey = this.keys.queueKey(options.queueId);
|
|
14595
|
+
const queueItemsKey = this.keys.queueItemsKey(options.queueId);
|
|
14596
|
+
const shardId = this.masterQueue.getShardForQueue(options.queueId);
|
|
14597
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14598
|
+
const now = Date.now();
|
|
14599
|
+
const descriptor = {
|
|
14600
|
+
id: options.queueId,
|
|
14601
|
+
tenantId: options.tenantId,
|
|
14602
|
+
metadata: options.metadata ?? {}
|
|
14603
|
+
};
|
|
14604
|
+
this.queueDescriptorCache.set(options.queueId, descriptor);
|
|
14605
|
+
const messageIds = [];
|
|
14606
|
+
const args = [];
|
|
14607
|
+
for (const message of options.messages) {
|
|
14608
|
+
const messageId = message.messageId ?? nanoid();
|
|
14609
|
+
const timestamp = message.timestamp ?? now;
|
|
14610
|
+
if (this.validateOnEnqueue && this.payloadSchema) {
|
|
14611
|
+
const result = this.payloadSchema.safeParse(message.payload);
|
|
14612
|
+
if (!result.success) {
|
|
14613
|
+
throw new Error(
|
|
14614
|
+
`Payload validation failed for message ${messageId}: ${result.error.message}`
|
|
14615
|
+
);
|
|
14616
|
+
}
|
|
14617
|
+
}
|
|
14618
|
+
const storedMessage = {
|
|
14619
|
+
id: messageId,
|
|
14620
|
+
queueId: options.queueId,
|
|
14621
|
+
tenantId: options.tenantId,
|
|
14622
|
+
payload: message.payload,
|
|
14623
|
+
timestamp,
|
|
14624
|
+
attempt: 1,
|
|
14625
|
+
workerQueue: this.workerQueueResolver ? this.workerQueueResolver({
|
|
14626
|
+
id: messageId,
|
|
14627
|
+
queueId: options.queueId,
|
|
14628
|
+
tenantId: options.tenantId,
|
|
14629
|
+
payload: message.payload,
|
|
14630
|
+
timestamp,
|
|
14631
|
+
attempt: 1,
|
|
14632
|
+
metadata: options.metadata
|
|
14633
|
+
}) : void 0,
|
|
14634
|
+
metadata: options.metadata
|
|
14635
|
+
};
|
|
14636
|
+
messageIds.push(messageId);
|
|
14637
|
+
args.push(messageId, timestamp.toString(), JSON.stringify(storedMessage));
|
|
14638
|
+
}
|
|
14639
|
+
await this.redis.enqueueBatchAtomic(
|
|
14640
|
+
queueKey,
|
|
14641
|
+
queueItemsKey,
|
|
14642
|
+
masterQueueKey,
|
|
14643
|
+
options.queueId,
|
|
14644
|
+
...args
|
|
14645
|
+
);
|
|
14646
|
+
span.setAttributes({
|
|
14647
|
+
[FairQueueAttributes.QUEUE_ID]: options.queueId,
|
|
14648
|
+
[FairQueueAttributes.TENANT_ID]: options.tenantId,
|
|
14649
|
+
[FairQueueAttributes.MESSAGE_COUNT]: messageIds.length,
|
|
14650
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14651
|
+
});
|
|
14652
|
+
this.telemetry.recordEnqueueBatch(messageIds.length);
|
|
14653
|
+
this.logger.debug("Batch enqueued", {
|
|
14654
|
+
queueId: options.queueId,
|
|
14655
|
+
messageCount: messageIds.length
|
|
14656
|
+
});
|
|
14657
|
+
return messageIds;
|
|
14658
|
+
},
|
|
14659
|
+
{
|
|
14660
|
+
kind: SpanKind.PRODUCER,
|
|
14661
|
+
attributes: {
|
|
14662
|
+
[MessagingAttributes.OPERATION]: "publish"
|
|
14663
|
+
}
|
|
14664
|
+
}
|
|
14665
|
+
);
|
|
14666
|
+
}
|
|
14667
|
+
// ============================================================================
|
|
14668
|
+
// Public API - Dead Letter Queue
|
|
14669
|
+
// ============================================================================
|
|
14670
|
+
/**
|
|
14671
|
+
* Get messages from the dead letter queue for a tenant.
|
|
14672
|
+
*/
|
|
14673
|
+
async getDeadLetterMessages(tenantId, limit = 100) {
|
|
14674
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14675
|
+
return [];
|
|
14676
|
+
}
|
|
14677
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14678
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(tenantId);
|
|
14679
|
+
const results = await this.redis.zrange(dlqKey, 0, limit - 1, "WITHSCORES");
|
|
14680
|
+
const messages = [];
|
|
14681
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
14682
|
+
const messageId = results[i];
|
|
14683
|
+
const deadLetteredAtStr = results[i + 1];
|
|
14684
|
+
if (!messageId || !deadLetteredAtStr) continue;
|
|
14685
|
+
const dataJson = await this.redis.hget(dlqDataKey, messageId);
|
|
14686
|
+
if (!dataJson) continue;
|
|
14687
|
+
try {
|
|
14688
|
+
const data = JSON.parse(dataJson);
|
|
14689
|
+
data.deadLetteredAt = parseFloat(deadLetteredAtStr);
|
|
14690
|
+
messages.push(data);
|
|
14691
|
+
} catch {
|
|
14692
|
+
this.logger.error("Failed to parse DLQ message", { messageId, tenantId });
|
|
14693
|
+
}
|
|
14694
|
+
}
|
|
14695
|
+
return messages;
|
|
14696
|
+
}
|
|
14697
|
+
/**
|
|
14698
|
+
* Redrive a message from DLQ back to its original queue.
|
|
14699
|
+
*/
|
|
14700
|
+
async redriveMessage(tenantId, messageId) {
|
|
14701
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14702
|
+
return false;
|
|
14703
|
+
}
|
|
14704
|
+
return this.telemetry.trace(
|
|
14705
|
+
"redriveMessage",
|
|
14706
|
+
async (span) => {
|
|
14707
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14708
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(tenantId);
|
|
14709
|
+
const dataJson = await this.redis.hget(dlqDataKey, messageId);
|
|
14710
|
+
if (!dataJson) {
|
|
14711
|
+
return false;
|
|
14712
|
+
}
|
|
14713
|
+
const dlqMessage = JSON.parse(dataJson);
|
|
14714
|
+
await this.enqueue({
|
|
14715
|
+
queueId: dlqMessage.queueId,
|
|
14716
|
+
tenantId: dlqMessage.tenantId,
|
|
14717
|
+
payload: dlqMessage.payload,
|
|
14718
|
+
messageId: dlqMessage.id,
|
|
14719
|
+
timestamp: Date.now()
|
|
14720
|
+
});
|
|
14721
|
+
const pipeline = this.redis.pipeline();
|
|
14722
|
+
pipeline.zrem(dlqKey, messageId);
|
|
14723
|
+
pipeline.hdel(dlqDataKey, messageId);
|
|
14724
|
+
await pipeline.exec();
|
|
14725
|
+
span.setAttributes({
|
|
14726
|
+
[FairQueueAttributes.TENANT_ID]: tenantId,
|
|
14727
|
+
[FairQueueAttributes.MESSAGE_ID]: messageId
|
|
14728
|
+
});
|
|
14729
|
+
this.logger.info("Redrived message from DLQ", { tenantId, messageId });
|
|
14730
|
+
return true;
|
|
14731
|
+
},
|
|
14732
|
+
{
|
|
14733
|
+
kind: SpanKind.PRODUCER,
|
|
14734
|
+
attributes: {
|
|
14735
|
+
[MessagingAttributes.OPERATION]: "redrive"
|
|
14736
|
+
}
|
|
14737
|
+
}
|
|
14738
|
+
);
|
|
14739
|
+
}
|
|
14740
|
+
/**
|
|
14741
|
+
* Redrive all messages from DLQ back to their original queues.
|
|
14742
|
+
*/
|
|
14743
|
+
async redriveAll(tenantId) {
|
|
14744
|
+
const messages = await this.getDeadLetterMessages(tenantId, 1e3);
|
|
14745
|
+
let count = 0;
|
|
14746
|
+
for (const message of messages) {
|
|
14747
|
+
const success = await this.redriveMessage(tenantId, message.id);
|
|
14748
|
+
if (success) count++;
|
|
14749
|
+
}
|
|
14750
|
+
return count;
|
|
14751
|
+
}
|
|
14752
|
+
/**
|
|
14753
|
+
* Purge all messages from a tenant's DLQ.
|
|
14754
|
+
*/
|
|
14755
|
+
async purgeDeadLetterQueue(tenantId) {
|
|
14756
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14757
|
+
return 0;
|
|
14758
|
+
}
|
|
14759
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14760
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(tenantId);
|
|
14761
|
+
const count = await this.redis.zcard(dlqKey);
|
|
14762
|
+
const pipeline = this.redis.pipeline();
|
|
14763
|
+
pipeline.del(dlqKey);
|
|
14764
|
+
pipeline.del(dlqDataKey);
|
|
14765
|
+
await pipeline.exec();
|
|
14766
|
+
this.logger.info("Purged DLQ", { tenantId, count });
|
|
14767
|
+
return count;
|
|
14768
|
+
}
|
|
14769
|
+
/**
|
|
14770
|
+
* Get the number of messages in a tenant's DLQ.
|
|
14771
|
+
*/
|
|
14772
|
+
async getDeadLetterQueueLength(tenantId) {
|
|
14773
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14774
|
+
return 0;
|
|
14775
|
+
}
|
|
14776
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14777
|
+
return await this.redis.zcard(dlqKey);
|
|
14778
|
+
}
|
|
14779
|
+
/**
|
|
14780
|
+
* Get the size of the in-memory queue descriptor cache.
|
|
14781
|
+
* This cache stores metadata for queues that have been enqueued.
|
|
14782
|
+
* The cache is cleaned up when queues are fully processed.
|
|
14783
|
+
*/
|
|
14784
|
+
getQueueDescriptorCacheSize() {
|
|
14785
|
+
return this.queueDescriptorCache.size;
|
|
14786
|
+
}
|
|
14787
|
+
/**
|
|
14788
|
+
* Get the size of the in-memory cooloff states cache.
|
|
14789
|
+
* This cache tracks queues that are in cooloff due to repeated failures.
|
|
14790
|
+
* The cache is cleaned up when queues are fully processed or cooloff expires.
|
|
14791
|
+
*/
|
|
14792
|
+
getQueueCooloffStatesSize() {
|
|
14793
|
+
return this.queueCooloffStates.size;
|
|
14794
|
+
}
|
|
14795
|
+
/**
|
|
14796
|
+
* Get all in-memory cache sizes for monitoring.
|
|
14797
|
+
* Useful for adding as span attributes.
|
|
14798
|
+
*/
|
|
14799
|
+
getCacheSizes() {
|
|
14800
|
+
return {
|
|
14801
|
+
descriptorCacheSize: this.queueDescriptorCache.size,
|
|
14802
|
+
cooloffStatesSize: this.queueCooloffStates.size
|
|
14803
|
+
};
|
|
14804
|
+
}
|
|
14805
|
+
// ============================================================================
|
|
14806
|
+
// Public API - Lifecycle
|
|
14807
|
+
// ============================================================================
|
|
14808
|
+
/**
|
|
14809
|
+
* Start the master queue consumer loops and reclaim loop.
|
|
14810
|
+
* FairQueue claims messages and pushes them to worker queues.
|
|
14811
|
+
* External consumers are responsible for consuming from worker queues.
|
|
14812
|
+
*/
|
|
14813
|
+
start() {
|
|
14814
|
+
if (this.isRunning) {
|
|
14815
|
+
return;
|
|
14816
|
+
}
|
|
14817
|
+
this.isRunning = true;
|
|
14818
|
+
this.abortController = new AbortController();
|
|
14819
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14820
|
+
const loop = this.#runMasterQueueConsumerLoop(shardId);
|
|
14821
|
+
this.masterQueueConsumerLoops.push(loop);
|
|
14822
|
+
}
|
|
14823
|
+
this.reclaimLoop = this.#runReclaimLoop();
|
|
14824
|
+
this.logger.info("FairQueue started", {
|
|
14825
|
+
consumerCount: this.consumerCount,
|
|
14826
|
+
shardCount: this.shardCount,
|
|
14827
|
+
consumerIntervalMs: this.consumerIntervalMs
|
|
14828
|
+
});
|
|
14829
|
+
}
|
|
14830
|
+
/**
|
|
14831
|
+
* Stop the consumer loops gracefully.
|
|
14832
|
+
*/
|
|
14833
|
+
async stop() {
|
|
14834
|
+
if (!this.isRunning) {
|
|
14835
|
+
return;
|
|
14836
|
+
}
|
|
14837
|
+
this.isRunning = false;
|
|
14838
|
+
this.abortController.abort();
|
|
14839
|
+
await Promise.allSettled([...this.masterQueueConsumerLoops, this.reclaimLoop]);
|
|
14840
|
+
this.masterQueueConsumerLoops = [];
|
|
14841
|
+
this.reclaimLoop = void 0;
|
|
14842
|
+
this.logger.info("FairQueue stopped");
|
|
14843
|
+
}
|
|
14844
|
+
/**
|
|
14845
|
+
* Close all resources.
|
|
14846
|
+
*/
|
|
14847
|
+
async close() {
|
|
14848
|
+
await this.stop();
|
|
14849
|
+
this.batchedSpanManager.cleanupAll();
|
|
14850
|
+
await Promise.all([
|
|
14851
|
+
this.masterQueue.close(),
|
|
14852
|
+
this.concurrencyManager?.close(),
|
|
14853
|
+
this.visibilityManager.close(),
|
|
14854
|
+
this.workerQueueManager.close(),
|
|
14855
|
+
this.scheduler.close?.(),
|
|
14856
|
+
this.redis.quit()
|
|
14857
|
+
]);
|
|
14858
|
+
}
|
|
14859
|
+
// ============================================================================
|
|
14860
|
+
// Public API - Inspection
|
|
14861
|
+
// ============================================================================
|
|
14862
|
+
/**
|
|
14863
|
+
* Get the number of messages in a queue.
|
|
14864
|
+
*/
|
|
14865
|
+
async getQueueLength(queueId) {
|
|
14866
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14867
|
+
return await this.redis.zcard(queueKey);
|
|
14868
|
+
}
|
|
14869
|
+
/**
|
|
14870
|
+
* Get total queue count across all shards.
|
|
14871
|
+
*/
|
|
14872
|
+
async getTotalQueueCount() {
|
|
14873
|
+
return await this.masterQueue.getTotalQueueCount();
|
|
14874
|
+
}
|
|
14875
|
+
/**
|
|
14876
|
+
* Get total in-flight message count.
|
|
14877
|
+
*/
|
|
14878
|
+
async getTotalInflightCount() {
|
|
14879
|
+
return await this.visibilityManager.getTotalInflightCount();
|
|
14880
|
+
}
|
|
14881
|
+
/**
|
|
14882
|
+
* Get the shard ID for a queue.
|
|
14883
|
+
*/
|
|
14884
|
+
getShardForQueue(queueId) {
|
|
14885
|
+
return this.masterQueue.getShardForQueue(queueId);
|
|
14886
|
+
}
|
|
14887
|
+
// ============================================================================
|
|
14888
|
+
// Private - Master Queue Consumer Loop (Two-Stage)
|
|
14889
|
+
// ============================================================================
|
|
14890
|
+
async #runMasterQueueConsumerLoop(shardId) {
|
|
14891
|
+
const loopId = `master-shard-${shardId}`;
|
|
14892
|
+
this.batchedSpanManager.initializeLoop(loopId);
|
|
14893
|
+
try {
|
|
14894
|
+
while (this.isRunning) {
|
|
14895
|
+
if (this.abortController.signal.aborted) {
|
|
14896
|
+
break;
|
|
14897
|
+
}
|
|
14898
|
+
let hadWork = false;
|
|
14899
|
+
try {
|
|
14900
|
+
hadWork = await this.batchedSpanManager.withBatchedSpan(
|
|
14901
|
+
loopId,
|
|
14902
|
+
async (span) => {
|
|
14903
|
+
span.setAttribute("shard_id", shardId);
|
|
14904
|
+
return await this.#processMasterQueueShard(loopId, shardId, span);
|
|
14905
|
+
},
|
|
14906
|
+
{
|
|
14907
|
+
iterationSpanName: "processMasterQueueShard",
|
|
14908
|
+
attributes: { shard_id: shardId }
|
|
14909
|
+
}
|
|
14910
|
+
);
|
|
14911
|
+
} catch (error) {
|
|
14912
|
+
this.logger.error("Master queue consumer error", {
|
|
14913
|
+
loopId,
|
|
14914
|
+
shardId,
|
|
14915
|
+
error: error instanceof Error ? error.message : String(error)
|
|
14916
|
+
});
|
|
14917
|
+
this.batchedSpanManager.markForRotation(loopId);
|
|
14918
|
+
}
|
|
14919
|
+
const waitMs = hadWork ? 1 : this.consumerIntervalMs;
|
|
14920
|
+
await new Promise((resolve, reject) => {
|
|
14921
|
+
const abortHandler = () => {
|
|
14922
|
+
clearTimeout(timeout);
|
|
14923
|
+
reject(new Error("AbortError"));
|
|
14924
|
+
};
|
|
14925
|
+
const timeout = setTimeout(() => {
|
|
14926
|
+
this.abortController.signal.removeEventListener("abort", abortHandler);
|
|
14927
|
+
resolve();
|
|
14928
|
+
}, waitMs);
|
|
14929
|
+
this.abortController.signal.addEventListener("abort", abortHandler, { once: true });
|
|
14930
|
+
});
|
|
14931
|
+
}
|
|
14932
|
+
} catch (error) {
|
|
14933
|
+
if (isAbortError(error)) {
|
|
14934
|
+
this.logger.debug("Master queue consumer aborted", { loopId });
|
|
14935
|
+
this.batchedSpanManager.cleanup(loopId);
|
|
14936
|
+
return;
|
|
14937
|
+
}
|
|
14938
|
+
throw error;
|
|
14939
|
+
} finally {
|
|
14940
|
+
this.batchedSpanManager.cleanup(loopId);
|
|
14941
|
+
}
|
|
14942
|
+
}
|
|
14943
|
+
async #processMasterQueueShard(loopId, shardId, parentSpan) {
|
|
14944
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14945
|
+
const masterQueueSize = await this.masterQueue.getShardQueueCount(shardId);
|
|
14946
|
+
parentSpan?.setAttribute("master_queue_size", masterQueueSize);
|
|
14947
|
+
this.batchedSpanManager.incrementStat(loopId, "master_queue_size_sum", masterQueueSize);
|
|
14948
|
+
const schedulerContext = this.#createSchedulerContext();
|
|
14949
|
+
const tenantQueues = await this.telemetry.trace(
|
|
14950
|
+
"selectQueues",
|
|
14951
|
+
async (span) => {
|
|
14952
|
+
span.setAttribute(FairQueueAttributes.SHARD_ID, shardId.toString());
|
|
14953
|
+
span.setAttribute(FairQueueAttributes.CONSUMER_ID, loopId);
|
|
14954
|
+
span.setAttribute("master_queue_size", masterQueueSize);
|
|
14955
|
+
const result = await this.scheduler.selectQueues(masterQueueKey, loopId, schedulerContext);
|
|
14956
|
+
span.setAttribute("tenant_count", result.length);
|
|
14957
|
+
span.setAttribute(
|
|
14958
|
+
"queue_count",
|
|
14959
|
+
result.reduce((acc, t) => acc + t.queues.length, 0)
|
|
14960
|
+
);
|
|
14961
|
+
return result;
|
|
14962
|
+
},
|
|
14963
|
+
{ kind: SpanKind.INTERNAL }
|
|
14964
|
+
);
|
|
14965
|
+
if (tenantQueues.length === 0) {
|
|
14966
|
+
this.batchedSpanManager.incrementStat(loopId, "empty_iterations");
|
|
14967
|
+
return false;
|
|
14968
|
+
}
|
|
14969
|
+
this.batchedSpanManager.incrementStat(loopId, "tenants_selected", tenantQueues.length);
|
|
14970
|
+
this.batchedSpanManager.incrementStat(
|
|
14971
|
+
loopId,
|
|
14972
|
+
"queues_selected",
|
|
14973
|
+
tenantQueues.reduce((acc, t) => acc + t.queues.length, 0)
|
|
14974
|
+
);
|
|
14975
|
+
let messagesProcessed = 0;
|
|
14976
|
+
for (const { tenantId, queues } of tenantQueues) {
|
|
14977
|
+
for (const queueId of queues) {
|
|
14978
|
+
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14979
|
+
this.batchedSpanManager.incrementStat(loopId, "cooloff_skipped");
|
|
14980
|
+
continue;
|
|
14981
|
+
}
|
|
14982
|
+
if (this.concurrencyManager) {
|
|
14983
|
+
const isAtCapacity = await this.concurrencyManager.isAtCapacity("tenant", tenantId);
|
|
14984
|
+
if (isAtCapacity) {
|
|
14985
|
+
this.batchedSpanManager.incrementStat(loopId, "tenant_capacity_skipped");
|
|
14986
|
+
break;
|
|
14987
|
+
}
|
|
14988
|
+
}
|
|
14989
|
+
const processedFromQueue = await this.telemetry.trace(
|
|
14990
|
+
"claimAndPushToWorkerQueue",
|
|
14991
|
+
async (span) => {
|
|
14992
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
14993
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
14994
|
+
span.setAttribute(FairQueueAttributes.SHARD_ID, shardId.toString());
|
|
14995
|
+
const count = await this.#claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId);
|
|
14996
|
+
span.setAttribute("messages_claimed", count);
|
|
14997
|
+
return count;
|
|
14998
|
+
},
|
|
14999
|
+
{ kind: SpanKind.INTERNAL }
|
|
15000
|
+
);
|
|
15001
|
+
if (processedFromQueue > 0) {
|
|
15002
|
+
messagesProcessed += processedFromQueue;
|
|
15003
|
+
this.batchedSpanManager.incrementStat(loopId, "messages_claimed", processedFromQueue);
|
|
15004
|
+
if (this.scheduler.recordProcessedBatch) {
|
|
15005
|
+
await this.telemetry.trace(
|
|
15006
|
+
"recordProcessedBatch",
|
|
15007
|
+
async (span) => {
|
|
15008
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
15009
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
15010
|
+
span.setAttribute("count", processedFromQueue);
|
|
15011
|
+
await this.scheduler.recordProcessedBatch(tenantId, queueId, processedFromQueue);
|
|
15012
|
+
},
|
|
15013
|
+
{ kind: SpanKind.INTERNAL }
|
|
15014
|
+
);
|
|
15015
|
+
} else if (this.scheduler.recordProcessed) {
|
|
15016
|
+
for (let i = 0; i < processedFromQueue; i++) {
|
|
15017
|
+
await this.telemetry.trace(
|
|
15018
|
+
"recordProcessed",
|
|
15019
|
+
async (span) => {
|
|
15020
|
+
span.setAttribute(FairQueueAttributes.QUEUE_ID, queueId);
|
|
15021
|
+
span.setAttribute(FairQueueAttributes.TENANT_ID, tenantId);
|
|
15022
|
+
await this.scheduler.recordProcessed(tenantId, queueId);
|
|
15023
|
+
},
|
|
15024
|
+
{ kind: SpanKind.INTERNAL }
|
|
15025
|
+
);
|
|
15026
|
+
}
|
|
15027
|
+
}
|
|
15028
|
+
} else {
|
|
15029
|
+
this.batchedSpanManager.incrementStat(loopId, "claim_skipped");
|
|
15030
|
+
}
|
|
15031
|
+
}
|
|
15032
|
+
}
|
|
15033
|
+
return messagesProcessed > 0;
|
|
15034
|
+
}
|
|
15035
|
+
async #claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId) {
|
|
15036
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
15037
|
+
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
15038
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
15039
|
+
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
15040
|
+
id: queueId,
|
|
15041
|
+
tenantId,
|
|
15042
|
+
metadata: {}
|
|
15043
|
+
};
|
|
15044
|
+
let maxClaimCount = this.batchClaimSize;
|
|
15045
|
+
if (this.concurrencyManager) {
|
|
15046
|
+
const availableCapacity = await this.concurrencyManager.getAvailableCapacity(descriptor);
|
|
15047
|
+
if (availableCapacity === 0) {
|
|
15048
|
+
this.#incrementCooloff(queueId);
|
|
15049
|
+
return 0;
|
|
15050
|
+
}
|
|
15051
|
+
maxClaimCount = Math.min(maxClaimCount, availableCapacity);
|
|
15052
|
+
}
|
|
15053
|
+
if (this.globalRateLimiter) {
|
|
15054
|
+
const result = await this.globalRateLimiter.limit();
|
|
15055
|
+
if (!result.allowed && result.resetAt) {
|
|
15056
|
+
const waitMs = Math.max(0, result.resetAt - Date.now());
|
|
15057
|
+
if (waitMs > 0) {
|
|
15058
|
+
this.logger.debug("Global rate limit reached, waiting", { waitMs, loopId });
|
|
15059
|
+
await new Promise((resolve) => setTimeout(resolve, waitMs));
|
|
15060
|
+
}
|
|
15061
|
+
}
|
|
15062
|
+
}
|
|
15063
|
+
const claimedMessages = await this.visibilityManager.claimBatch(queueId, queueKey, queueItemsKey, loopId, maxClaimCount, this.visibilityTimeoutMs);
|
|
15064
|
+
if (claimedMessages.length === 0) {
|
|
15065
|
+
const removed = await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
15066
|
+
if (removed === 1) {
|
|
15067
|
+
this.queueDescriptorCache.delete(queueId);
|
|
15068
|
+
this.queueCooloffStates.delete(queueId);
|
|
15069
|
+
}
|
|
15070
|
+
return 0;
|
|
15071
|
+
}
|
|
15072
|
+
let processedCount = 0;
|
|
15073
|
+
for (let i = 0; i < claimedMessages.length; i++) {
|
|
15074
|
+
const message = claimedMessages[i];
|
|
15075
|
+
if (this.concurrencyManager) {
|
|
15076
|
+
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
15077
|
+
if (!reserved) {
|
|
15078
|
+
await this.visibilityManager.releaseBatch(
|
|
15079
|
+
claimedMessages.slice(i),
|
|
15080
|
+
queueId,
|
|
15081
|
+
queueKey,
|
|
15082
|
+
queueItemsKey,
|
|
15083
|
+
masterQueueKey
|
|
15084
|
+
);
|
|
15085
|
+
break;
|
|
15086
|
+
}
|
|
15087
|
+
}
|
|
15088
|
+
const workerQueueId = this.workerQueueResolver(message.payload);
|
|
15089
|
+
const messageKey = `${message.messageId}:${queueId}`;
|
|
15090
|
+
await this.workerQueueManager.push(workerQueueId, messageKey);
|
|
15091
|
+
processedCount++;
|
|
15092
|
+
}
|
|
15093
|
+
if (processedCount > 0) {
|
|
15094
|
+
this.#resetCooloff(queueId);
|
|
15095
|
+
}
|
|
15096
|
+
return processedCount;
|
|
15097
|
+
}
|
|
15098
|
+
// ============================================================================
|
|
15099
|
+
// Public API - Message Lifecycle (for external consumers)
|
|
15100
|
+
// ============================================================================
|
|
15101
|
+
/**
|
|
15102
|
+
* Get message data from in-flight storage.
|
|
15103
|
+
* External consumers use this to retrieve the stored message after popping from worker queue.
|
|
15104
|
+
*
|
|
15105
|
+
* @param messageId - The ID of the message
|
|
15106
|
+
* @param queueId - The queue ID the message belongs to
|
|
15107
|
+
* @returns The stored message or null if not found
|
|
15108
|
+
*/
|
|
15109
|
+
async getMessageData(messageId, queueId) {
|
|
15110
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
15111
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15112
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15113
|
+
if (!dataJson) {
|
|
15114
|
+
return null;
|
|
15115
|
+
}
|
|
15116
|
+
try {
|
|
15117
|
+
return JSON.parse(dataJson);
|
|
15118
|
+
} catch {
|
|
15119
|
+
this.logger.error("Failed to parse message data", { messageId, queueId });
|
|
15120
|
+
return null;
|
|
15121
|
+
}
|
|
15122
|
+
}
|
|
15123
|
+
/**
|
|
15124
|
+
* Extend the visibility timeout for a message.
|
|
15125
|
+
* External consumers should call this periodically during long-running processing.
|
|
15126
|
+
*
|
|
15127
|
+
* @param messageId - The ID of the message
|
|
15128
|
+
* @param queueId - The queue ID the message belongs to
|
|
15129
|
+
* @returns true if heartbeat was successful
|
|
15130
|
+
*/
|
|
15131
|
+
async heartbeatMessage(messageId, queueId) {
|
|
15132
|
+
return this.visibilityManager.heartbeat(messageId, queueId, this.heartbeatIntervalMs);
|
|
15133
|
+
}
|
|
15134
|
+
/**
|
|
15135
|
+
* Mark a message as successfully processed.
|
|
15136
|
+
* This removes the message from in-flight and releases concurrency.
|
|
15137
|
+
*
|
|
15138
|
+
* @param messageId - The ID of the message
|
|
15139
|
+
* @param queueId - The queue ID the message belongs to
|
|
15140
|
+
*/
|
|
15141
|
+
async completeMessage(messageId, queueId) {
|
|
15142
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
15143
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
15144
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
15145
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15146
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15147
|
+
let storedMessage = null;
|
|
15148
|
+
if (dataJson) {
|
|
15149
|
+
try {
|
|
15150
|
+
storedMessage = JSON.parse(dataJson);
|
|
15151
|
+
} catch {
|
|
15152
|
+
}
|
|
15153
|
+
}
|
|
15154
|
+
const descriptor = storedMessage ? this.queueDescriptorCache.get(queueId) ?? {
|
|
15155
|
+
id: queueId,
|
|
15156
|
+
tenantId: storedMessage.tenantId,
|
|
15157
|
+
metadata: storedMessage.metadata ?? {}
|
|
15158
|
+
} : { id: queueId, tenantId: "", metadata: {} };
|
|
15159
|
+
await this.visibilityManager.complete(messageId, queueId);
|
|
15160
|
+
if (this.concurrencyManager && storedMessage) {
|
|
15161
|
+
await this.concurrencyManager.release(descriptor, messageId);
|
|
15162
|
+
}
|
|
15163
|
+
const removed = await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
15164
|
+
if (removed === 1) {
|
|
15165
|
+
this.queueDescriptorCache.delete(queueId);
|
|
15166
|
+
this.queueCooloffStates.delete(queueId);
|
|
15167
|
+
}
|
|
15168
|
+
this.telemetry.recordComplete();
|
|
15169
|
+
this.logger.debug("Message completed", {
|
|
15170
|
+
messageId,
|
|
15171
|
+
queueId
|
|
15172
|
+
});
|
|
15173
|
+
}
|
|
15174
|
+
/**
|
|
15175
|
+
* Release a message back to the queue for processing by another consumer.
|
|
15176
|
+
* The message is placed at the back of the queue.
|
|
15177
|
+
*
|
|
15178
|
+
* @param messageId - The ID of the message
|
|
15179
|
+
* @param queueId - The queue ID the message belongs to
|
|
15180
|
+
*/
|
|
15181
|
+
async releaseMessage(messageId, queueId) {
|
|
15182
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
15183
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
15184
|
+
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
15185
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
15186
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15187
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15188
|
+
let storedMessage = null;
|
|
15189
|
+
if (dataJson) {
|
|
15190
|
+
try {
|
|
15191
|
+
storedMessage = JSON.parse(dataJson);
|
|
15192
|
+
} catch {
|
|
15193
|
+
}
|
|
15194
|
+
}
|
|
15195
|
+
const descriptor = storedMessage ? this.queueDescriptorCache.get(queueId) ?? {
|
|
15196
|
+
id: queueId,
|
|
15197
|
+
tenantId: storedMessage.tenantId,
|
|
15198
|
+
metadata: storedMessage.metadata ?? {}
|
|
15199
|
+
} : { id: queueId, tenantId: "", metadata: {} };
|
|
15200
|
+
await this.visibilityManager.release(
|
|
15201
|
+
messageId,
|
|
15202
|
+
queueId,
|
|
15203
|
+
queueKey,
|
|
15204
|
+
queueItemsKey,
|
|
15205
|
+
masterQueueKey,
|
|
15206
|
+
Date.now()
|
|
15207
|
+
// Put at back of queue
|
|
15208
|
+
);
|
|
15209
|
+
if (this.concurrencyManager && storedMessage) {
|
|
15210
|
+
await this.concurrencyManager.release(descriptor, messageId);
|
|
15211
|
+
}
|
|
15212
|
+
this.logger.debug("Message released", {
|
|
15213
|
+
messageId,
|
|
15214
|
+
queueId
|
|
15215
|
+
});
|
|
15216
|
+
}
|
|
15217
|
+
/**
|
|
15218
|
+
* Mark a message as failed. This will trigger retry logic if configured,
|
|
15219
|
+
* or move the message to the dead letter queue.
|
|
15220
|
+
*
|
|
15221
|
+
* @param messageId - The ID of the message
|
|
15222
|
+
* @param queueId - The queue ID the message belongs to
|
|
15223
|
+
* @param error - Optional error that caused the failure
|
|
15224
|
+
*/
|
|
15225
|
+
async failMessage(messageId, queueId, error) {
|
|
15226
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
15227
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
15228
|
+
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
15229
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
15230
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
15231
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
15232
|
+
if (!dataJson) {
|
|
15233
|
+
this.logger.error("Cannot fail message: not found in in-flight data", { messageId, queueId });
|
|
15234
|
+
return;
|
|
15235
|
+
}
|
|
15236
|
+
let storedMessage;
|
|
15237
|
+
try {
|
|
15238
|
+
storedMessage = JSON.parse(dataJson);
|
|
15239
|
+
} catch {
|
|
15240
|
+
this.logger.error("Cannot fail message: failed to parse stored message", {
|
|
15241
|
+
messageId,
|
|
15242
|
+
queueId
|
|
15243
|
+
});
|
|
15244
|
+
return;
|
|
15245
|
+
}
|
|
15246
|
+
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
15247
|
+
id: queueId,
|
|
15248
|
+
tenantId: storedMessage.tenantId,
|
|
15249
|
+
metadata: storedMessage.metadata ?? {}
|
|
15250
|
+
};
|
|
15251
|
+
await this.#handleMessageFailure(
|
|
15252
|
+
storedMessage,
|
|
15253
|
+
queueId,
|
|
15254
|
+
queueKey,
|
|
15255
|
+
queueItemsKey,
|
|
15256
|
+
masterQueueKey,
|
|
15257
|
+
descriptor,
|
|
15258
|
+
error
|
|
15259
|
+
);
|
|
15260
|
+
}
|
|
15261
|
+
// ============================================================================
|
|
15262
|
+
// Private - Message Processing Helpers
|
|
15263
|
+
// ============================================================================
|
|
15264
|
+
async #handleMessageFailure(storedMessage, queueId, queueKey, queueItemsKey, masterQueueKey, descriptor, error) {
|
|
15265
|
+
this.telemetry.recordFailure();
|
|
15266
|
+
if (this.retryStrategy) {
|
|
15267
|
+
const nextDelay = this.retryStrategy.getNextDelay(storedMessage.attempt, error);
|
|
15268
|
+
if (nextDelay !== null) {
|
|
15269
|
+
const updatedMessage = {
|
|
15270
|
+
...storedMessage,
|
|
15271
|
+
attempt: storedMessage.attempt + 1
|
|
15272
|
+
};
|
|
15273
|
+
await this.visibilityManager.release(
|
|
15274
|
+
storedMessage.id,
|
|
15275
|
+
queueId,
|
|
15276
|
+
queueKey,
|
|
15277
|
+
queueItemsKey,
|
|
15278
|
+
masterQueueKey,
|
|
15279
|
+
Date.now() + nextDelay
|
|
15280
|
+
);
|
|
15281
|
+
await this.redis.hset(queueItemsKey, storedMessage.id, JSON.stringify(updatedMessage));
|
|
15282
|
+
if (this.concurrencyManager) {
|
|
15283
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
15284
|
+
}
|
|
15285
|
+
this.telemetry.recordRetry();
|
|
15286
|
+
this.logger.debug("Message scheduled for retry", {
|
|
15287
|
+
messageId: storedMessage.id,
|
|
15288
|
+
queueId,
|
|
15289
|
+
attempt: storedMessage.attempt + 1,
|
|
15290
|
+
delayMs: nextDelay
|
|
15291
|
+
});
|
|
15292
|
+
return;
|
|
15293
|
+
}
|
|
15294
|
+
}
|
|
15295
|
+
await this.#moveToDeadLetterQueue(storedMessage, error?.message);
|
|
15296
|
+
if (this.concurrencyManager) {
|
|
15297
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
15298
|
+
}
|
|
15299
|
+
}
|
|
15300
|
+
async #moveToDeadLetterQueue(storedMessage, errorMessage) {
|
|
15301
|
+
if (!this.deadLetterQueueEnabled) {
|
|
15302
|
+
await this.visibilityManager.complete(storedMessage.id, storedMessage.queueId);
|
|
15303
|
+
return;
|
|
15304
|
+
}
|
|
15305
|
+
const dlqKey = this.keys.deadLetterQueueKey(storedMessage.tenantId);
|
|
15306
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(storedMessage.tenantId);
|
|
15307
|
+
this.masterQueue.getShardForQueue(storedMessage.queueId);
|
|
15308
|
+
const dlqMessage = {
|
|
15309
|
+
id: storedMessage.id,
|
|
15310
|
+
queueId: storedMessage.queueId,
|
|
15311
|
+
tenantId: storedMessage.tenantId,
|
|
15312
|
+
payload: storedMessage.payload,
|
|
15313
|
+
deadLetteredAt: Date.now(),
|
|
15314
|
+
attempts: storedMessage.attempt,
|
|
15315
|
+
lastError: errorMessage,
|
|
15316
|
+
originalTimestamp: storedMessage.timestamp
|
|
15317
|
+
};
|
|
15318
|
+
await this.visibilityManager.complete(storedMessage.id, storedMessage.queueId);
|
|
15319
|
+
const pipeline = this.redis.pipeline();
|
|
15320
|
+
pipeline.zadd(dlqKey, dlqMessage.deadLetteredAt, storedMessage.id);
|
|
15321
|
+
pipeline.hset(dlqDataKey, storedMessage.id, JSON.stringify(dlqMessage));
|
|
15322
|
+
await pipeline.exec();
|
|
15323
|
+
this.telemetry.recordDLQ();
|
|
15324
|
+
this.logger.info("Message moved to DLQ", {
|
|
15325
|
+
messageId: storedMessage.id,
|
|
15326
|
+
queueId: storedMessage.queueId,
|
|
15327
|
+
tenantId: storedMessage.tenantId,
|
|
15328
|
+
attempts: storedMessage.attempt,
|
|
15329
|
+
error: errorMessage
|
|
15330
|
+
});
|
|
15331
|
+
}
|
|
15332
|
+
// ============================================================================
|
|
15333
|
+
// Private - Reclaim Loop
|
|
15334
|
+
// ============================================================================
|
|
15335
|
+
async #runReclaimLoop() {
|
|
15336
|
+
try {
|
|
15337
|
+
for await (const _ of promises.setInterval(this.reclaimIntervalMs, null, {
|
|
15338
|
+
signal: this.abortController.signal
|
|
15339
|
+
})) {
|
|
15340
|
+
try {
|
|
15341
|
+
await this.#reclaimTimedOutMessages();
|
|
15342
|
+
} catch (error) {
|
|
15343
|
+
this.logger.error("Reclaim loop error", {
|
|
15344
|
+
error: error instanceof Error ? error.message : String(error)
|
|
15345
|
+
});
|
|
15346
|
+
}
|
|
15347
|
+
}
|
|
15348
|
+
} catch (error) {
|
|
15349
|
+
if (isAbortError(error)) {
|
|
15350
|
+
this.logger.debug("Reclaim loop aborted");
|
|
15351
|
+
return;
|
|
15352
|
+
}
|
|
15353
|
+
throw error;
|
|
15354
|
+
}
|
|
15355
|
+
}
|
|
15356
|
+
async #reclaimTimedOutMessages() {
|
|
15357
|
+
let totalReclaimed = 0;
|
|
15358
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
15359
|
+
const reclaimed = await this.visibilityManager.reclaimTimedOut(shardId, (queueId) => ({
|
|
15360
|
+
queueKey: this.keys.queueKey(queueId),
|
|
15361
|
+
queueItemsKey: this.keys.queueItemsKey(queueId),
|
|
15362
|
+
masterQueueKey: this.keys.masterQueueKey(this.masterQueue.getShardForQueue(queueId))
|
|
15363
|
+
}));
|
|
15364
|
+
totalReclaimed += reclaimed;
|
|
15365
|
+
}
|
|
15366
|
+
if (totalReclaimed > 0) {
|
|
15367
|
+
this.logger.info("Reclaimed timed-out messages", { count: totalReclaimed });
|
|
15368
|
+
}
|
|
15369
|
+
}
|
|
15370
|
+
// ============================================================================
|
|
15371
|
+
// Private - Cooloff State
|
|
15372
|
+
// ============================================================================
|
|
15373
|
+
#isInCooloff(queueId) {
|
|
15374
|
+
const state = this.queueCooloffStates.get(queueId);
|
|
15375
|
+
if (!state) return false;
|
|
15376
|
+
if (state.tag === "cooloff") {
|
|
15377
|
+
if (Date.now() >= state.expiresAt) {
|
|
15378
|
+
this.queueCooloffStates.delete(queueId);
|
|
15379
|
+
return false;
|
|
15380
|
+
}
|
|
15381
|
+
return true;
|
|
15382
|
+
}
|
|
15383
|
+
return false;
|
|
15384
|
+
}
|
|
15385
|
+
#incrementCooloff(queueId) {
|
|
15386
|
+
if (this.queueCooloffStates.size >= this.maxCooloffStatesSize) {
|
|
15387
|
+
this.logger.warn("Cooloff states cache hit size cap, clearing all entries", {
|
|
15388
|
+
size: this.queueCooloffStates.size,
|
|
15389
|
+
cap: this.maxCooloffStatesSize
|
|
15390
|
+
});
|
|
15391
|
+
this.queueCooloffStates.clear();
|
|
15392
|
+
}
|
|
15393
|
+
const state = this.queueCooloffStates.get(queueId) ?? {
|
|
15394
|
+
tag: "normal",
|
|
15395
|
+
consecutiveFailures: 0
|
|
15396
|
+
};
|
|
15397
|
+
if (state.tag === "normal") {
|
|
15398
|
+
const newFailures = state.consecutiveFailures + 1;
|
|
15399
|
+
if (newFailures >= this.cooloffThreshold) {
|
|
15400
|
+
this.queueCooloffStates.set(queueId, {
|
|
15401
|
+
tag: "cooloff",
|
|
15402
|
+
expiresAt: Date.now() + this.cooloffPeriodMs
|
|
15403
|
+
});
|
|
15404
|
+
this.logger.debug("Queue entered cooloff", {
|
|
15405
|
+
queueId,
|
|
15406
|
+
cooloffPeriodMs: this.cooloffPeriodMs,
|
|
15407
|
+
consecutiveFailures: newFailures
|
|
15408
|
+
});
|
|
15409
|
+
} else {
|
|
15410
|
+
this.queueCooloffStates.set(queueId, {
|
|
15411
|
+
tag: "normal",
|
|
15412
|
+
consecutiveFailures: newFailures
|
|
15413
|
+
});
|
|
15414
|
+
}
|
|
15415
|
+
}
|
|
15416
|
+
}
|
|
15417
|
+
#resetCooloff(queueId) {
|
|
15418
|
+
this.queueCooloffStates.delete(queueId);
|
|
15419
|
+
}
|
|
15420
|
+
// ============================================================================
|
|
15421
|
+
// Private - Helpers
|
|
15422
|
+
// ============================================================================
|
|
15423
|
+
#createSchedulerContext() {
|
|
15424
|
+
return {
|
|
15425
|
+
getCurrentConcurrency: async (groupName, groupId) => {
|
|
15426
|
+
if (!this.concurrencyManager) return 0;
|
|
15427
|
+
return this.concurrencyManager.getCurrentConcurrency(groupName, groupId);
|
|
15428
|
+
},
|
|
15429
|
+
getConcurrencyLimit: async (groupName, groupId) => {
|
|
15430
|
+
if (!this.concurrencyManager) return Infinity;
|
|
15431
|
+
return this.concurrencyManager.getConcurrencyLimit(groupName, groupId);
|
|
15432
|
+
},
|
|
15433
|
+
isAtCapacity: async (groupName, groupId) => {
|
|
15434
|
+
if (!this.concurrencyManager) return false;
|
|
15435
|
+
return this.concurrencyManager.isAtCapacity(groupName, groupId);
|
|
15436
|
+
},
|
|
15437
|
+
getQueueDescriptor: (queueId) => {
|
|
15438
|
+
return this.queueDescriptorCache.get(queueId) ?? {
|
|
15439
|
+
id: queueId,
|
|
15440
|
+
tenantId: this.keys.extractTenantId(queueId),
|
|
15441
|
+
metadata: {}
|
|
15442
|
+
};
|
|
15443
|
+
}
|
|
15444
|
+
};
|
|
15445
|
+
}
|
|
15446
|
+
// ============================================================================
|
|
15447
|
+
// Private - Redis Commands
|
|
15448
|
+
// ============================================================================
|
|
15449
|
+
#registerCommands() {
|
|
15450
|
+
this.redis.defineCommand("enqueueMessageAtomic", {
|
|
15451
|
+
numberOfKeys: 3,
|
|
15452
|
+
lua: `
|
|
15453
|
+
local queueKey = KEYS[1]
|
|
15454
|
+
local queueItemsKey = KEYS[2]
|
|
15455
|
+
local masterQueueKey = KEYS[3]
|
|
15456
|
+
|
|
15457
|
+
local queueId = ARGV[1]
|
|
15458
|
+
local messageId = ARGV[2]
|
|
15459
|
+
local timestamp = tonumber(ARGV[3])
|
|
15460
|
+
local payload = ARGV[4]
|
|
15461
|
+
|
|
15462
|
+
-- Add to sorted set (score = timestamp)
|
|
15463
|
+
redis.call('ZADD', queueKey, timestamp, messageId)
|
|
15464
|
+
|
|
15465
|
+
-- Store payload in hash
|
|
15466
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
15467
|
+
|
|
15468
|
+
-- Update master queue with oldest message timestamp
|
|
15469
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
15470
|
+
if #oldest >= 2 then
|
|
15471
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
15472
|
+
end
|
|
15473
|
+
|
|
15474
|
+
return 1
|
|
15475
|
+
`
|
|
15476
|
+
});
|
|
15477
|
+
this.redis.defineCommand("enqueueBatchAtomic", {
|
|
15478
|
+
numberOfKeys: 3,
|
|
15479
|
+
lua: `
|
|
15480
|
+
local queueKey = KEYS[1]
|
|
15481
|
+
local queueItemsKey = KEYS[2]
|
|
15482
|
+
local masterQueueKey = KEYS[3]
|
|
15483
|
+
|
|
15484
|
+
local queueId = ARGV[1]
|
|
15485
|
+
|
|
15486
|
+
-- Args after queueId are triples: [messageId, timestamp, payload, ...]
|
|
15487
|
+
for i = 2, #ARGV, 3 do
|
|
15488
|
+
local messageId = ARGV[i]
|
|
15489
|
+
local timestamp = tonumber(ARGV[i + 1])
|
|
15490
|
+
local payload = ARGV[i + 2]
|
|
15491
|
+
|
|
15492
|
+
-- Add to sorted set
|
|
15493
|
+
redis.call('ZADD', queueKey, timestamp, messageId)
|
|
15494
|
+
|
|
15495
|
+
-- Store payload in hash
|
|
15496
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
15497
|
+
end
|
|
15498
|
+
|
|
15499
|
+
-- Update master queue with oldest message timestamp
|
|
15500
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
15501
|
+
if #oldest >= 2 then
|
|
15502
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
15503
|
+
end
|
|
15504
|
+
|
|
15505
|
+
return (#ARGV - 1) / 3
|
|
15506
|
+
`
|
|
15507
|
+
});
|
|
15508
|
+
this.redis.defineCommand("updateMasterQueueIfEmpty", {
|
|
15509
|
+
numberOfKeys: 2,
|
|
15510
|
+
lua: `
|
|
15511
|
+
local masterQueueKey = KEYS[1]
|
|
15512
|
+
local queueKey = KEYS[2]
|
|
15513
|
+
local queueId = ARGV[1]
|
|
15514
|
+
|
|
15515
|
+
local count = redis.call('ZCARD', queueKey)
|
|
15516
|
+
if count == 0 then
|
|
15517
|
+
redis.call('ZREM', masterQueueKey, queueId)
|
|
15518
|
+
return 1
|
|
15519
|
+
else
|
|
15520
|
+
-- Update with oldest message timestamp
|
|
15521
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
15522
|
+
if #oldest >= 2 then
|
|
15523
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
15524
|
+
end
|
|
15525
|
+
return 0
|
|
15526
|
+
end
|
|
15527
|
+
`
|
|
15528
|
+
});
|
|
15529
|
+
if (this.workerQueueManager) {
|
|
15530
|
+
this.workerQueueManager.registerCommands(this.redis);
|
|
15531
|
+
}
|
|
15532
|
+
}
|
|
15533
|
+
};
|
|
15534
|
+
|
|
15535
|
+
exports.BaseScheduler = BaseScheduler;
|
|
15536
|
+
exports.BatchedSpanManager = BatchedSpanManager;
|
|
15537
|
+
exports.CallbackFairQueueKeyProducer = CallbackFairQueueKeyProducer;
|
|
15538
|
+
exports.ConcurrencyManager = ConcurrencyManager;
|
|
11730
15539
|
exports.CronSchema = CronSchema;
|
|
15540
|
+
exports.CustomRetry = CustomRetry;
|
|
15541
|
+
exports.DRRScheduler = DRRScheduler;
|
|
15542
|
+
exports.DefaultFairQueueKeyProducer = DefaultFairQueueKeyProducer;
|
|
15543
|
+
exports.ExponentialBackoffRetry = ExponentialBackoffRetry;
|
|
15544
|
+
exports.FairQueue = FairQueue;
|
|
15545
|
+
exports.FairQueueAttributes = FairQueueAttributes;
|
|
15546
|
+
exports.FairQueueTelemetry = FairQueueTelemetry;
|
|
15547
|
+
exports.FixedDelayRetry = FixedDelayRetry;
|
|
15548
|
+
exports.ImmediateRetry = ImmediateRetry;
|
|
15549
|
+
exports.LinearBackoffRetry = LinearBackoffRetry;
|
|
15550
|
+
exports.MasterQueue = MasterQueue;
|
|
15551
|
+
exports.MessagingAttributes = MessagingAttributes;
|
|
15552
|
+
exports.NoRetry = NoRetry;
|
|
15553
|
+
exports.NoopScheduler = NoopScheduler;
|
|
15554
|
+
exports.RoundRobinScheduler = RoundRobinScheduler;
|
|
11731
15555
|
exports.SimpleQueue = SimpleQueue;
|
|
15556
|
+
exports.VisibilityManager = VisibilityManager;
|
|
15557
|
+
exports.WeightedScheduler = WeightedScheduler;
|
|
11732
15558
|
exports.Worker = Worker;
|
|
15559
|
+
exports.WorkerQueueManager = WorkerQueueManager;
|
|
15560
|
+
exports.createDefaultRetryStrategy = createDefaultRetryStrategy;
|
|
15561
|
+
exports.defaultRetryOptions = defaultRetryOptions;
|
|
15562
|
+
exports.isAbortError = isAbortError;
|
|
15563
|
+
exports.noopTelemetry = noopTelemetry;
|
|
11733
15564
|
//# sourceMappingURL=index.cjs.map
|
|
11734
15565
|
//# sourceMappingURL=index.cjs.map
|