@trigger.dev/redis-worker 4.2.0 → 4.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +3423 -3
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1456 -5
- package/dist/index.d.ts +1456 -5
- package/dist/index.js +3397 -2
- package/dist/index.js.map +1 -1
- package/package.json +4 -2
package/dist/index.cjs
CHANGED
|
@@ -4,18 +4,21 @@ var process2 = require('process');
|
|
|
4
4
|
var os = require('os');
|
|
5
5
|
var tty = require('tty');
|
|
6
6
|
var logger$1 = require('@trigger.dev/core/logger');
|
|
7
|
-
var crypto = require('crypto');
|
|
7
|
+
var crypto$1 = require('crypto');
|
|
8
8
|
require('@trigger.dev/core/v3/utils/flattenAttributes');
|
|
9
9
|
var v3 = require('@trigger.dev/core/v3');
|
|
10
10
|
var serverOnly = require('@trigger.dev/core/v3/serverOnly');
|
|
11
11
|
var zod = require('zod');
|
|
12
12
|
var cronParser = require('cron-parser');
|
|
13
|
+
var promises = require('timers/promises');
|
|
14
|
+
var seedrandom = require('seedrandom');
|
|
13
15
|
|
|
14
16
|
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
15
17
|
|
|
16
18
|
var process2__default = /*#__PURE__*/_interopDefault(process2);
|
|
17
19
|
var os__default = /*#__PURE__*/_interopDefault(os);
|
|
18
20
|
var tty__default = /*#__PURE__*/_interopDefault(tty);
|
|
21
|
+
var seedrandom__default = /*#__PURE__*/_interopDefault(seedrandom);
|
|
19
22
|
|
|
20
23
|
var __create = Object.create;
|
|
21
24
|
var __defProp = Object.defineProperty;
|
|
@@ -9419,10 +9422,10 @@ var poolOffset;
|
|
|
9419
9422
|
function fillPool(bytes) {
|
|
9420
9423
|
if (!pool || pool.length < bytes) {
|
|
9421
9424
|
pool = Buffer.allocUnsafe(bytes * POOL_SIZE_MULTIPLIER);
|
|
9422
|
-
crypto.webcrypto.getRandomValues(pool);
|
|
9425
|
+
crypto$1.webcrypto.getRandomValues(pool);
|
|
9423
9426
|
poolOffset = 0;
|
|
9424
9427
|
} else if (poolOffset + bytes > pool.length) {
|
|
9425
|
-
crypto.webcrypto.getRandomValues(pool);
|
|
9428
|
+
crypto$1.webcrypto.getRandomValues(pool);
|
|
9426
9429
|
poolOffset = 0;
|
|
9427
9430
|
}
|
|
9428
9431
|
poolOffset += bytes;
|
|
@@ -11727,8 +11730,3425 @@ var Worker = class _Worker {
|
|
|
11727
11730
|
}
|
|
11728
11731
|
};
|
|
11729
11732
|
|
|
11733
|
+
// src/fair-queue/concurrency.ts
|
|
11734
|
+
var ConcurrencyManager = class {
|
|
11735
|
+
constructor(options) {
|
|
11736
|
+
this.options = options;
|
|
11737
|
+
this.redis = createRedisClient(options.redis);
|
|
11738
|
+
this.keys = options.keys;
|
|
11739
|
+
this.groups = options.groups;
|
|
11740
|
+
this.groupsByName = new Map(options.groups.map((g) => [g.name, g]));
|
|
11741
|
+
this.#registerCommands();
|
|
11742
|
+
}
|
|
11743
|
+
redis;
|
|
11744
|
+
keys;
|
|
11745
|
+
groups;
|
|
11746
|
+
groupsByName;
|
|
11747
|
+
// ============================================================================
|
|
11748
|
+
// Public Methods
|
|
11749
|
+
// ============================================================================
|
|
11750
|
+
/**
|
|
11751
|
+
* Check if a message can be processed given all concurrency constraints.
|
|
11752
|
+
* Checks all configured groups and returns the first one at capacity.
|
|
11753
|
+
*/
|
|
11754
|
+
async canProcess(queue) {
|
|
11755
|
+
for (const group of this.groups) {
|
|
11756
|
+
const groupId = group.extractGroupId(queue);
|
|
11757
|
+
const isAtCapacity = await this.isAtCapacity(group.name, groupId);
|
|
11758
|
+
if (isAtCapacity) {
|
|
11759
|
+
const state = await this.getState(group.name, groupId);
|
|
11760
|
+
return {
|
|
11761
|
+
allowed: false,
|
|
11762
|
+
blockedBy: state
|
|
11763
|
+
};
|
|
11764
|
+
}
|
|
11765
|
+
}
|
|
11766
|
+
return { allowed: true };
|
|
11767
|
+
}
|
|
11768
|
+
/**
|
|
11769
|
+
* Reserve concurrency slots for a message across all groups.
|
|
11770
|
+
* Atomic - either all groups are reserved or none.
|
|
11771
|
+
*
|
|
11772
|
+
* @returns true if reservation successful, false if any group is at capacity
|
|
11773
|
+
*/
|
|
11774
|
+
async reserve(queue, messageId) {
|
|
11775
|
+
const groupData = await Promise.all(
|
|
11776
|
+
this.groups.map(async (group) => {
|
|
11777
|
+
const groupId = group.extractGroupId(queue);
|
|
11778
|
+
const limit = await group.getLimit(groupId);
|
|
11779
|
+
return {
|
|
11780
|
+
key: this.keys.concurrencyKey(group.name, groupId),
|
|
11781
|
+
limit: limit || group.defaultLimit
|
|
11782
|
+
};
|
|
11783
|
+
})
|
|
11784
|
+
);
|
|
11785
|
+
const keys = groupData.map((g) => g.key);
|
|
11786
|
+
const limits = groupData.map((g) => g.limit.toString());
|
|
11787
|
+
const result = await this.redis.reserveConcurrency(keys.length, keys, messageId, ...limits);
|
|
11788
|
+
return result === 1;
|
|
11789
|
+
}
|
|
11790
|
+
/**
|
|
11791
|
+
* Release concurrency slots for a message across all groups.
|
|
11792
|
+
*/
|
|
11793
|
+
async release(queue, messageId) {
|
|
11794
|
+
const pipeline = this.redis.pipeline();
|
|
11795
|
+
for (const group of this.groups) {
|
|
11796
|
+
const groupId = group.extractGroupId(queue);
|
|
11797
|
+
const key = this.keys.concurrencyKey(group.name, groupId);
|
|
11798
|
+
pipeline.srem(key, messageId);
|
|
11799
|
+
}
|
|
11800
|
+
await pipeline.exec();
|
|
11801
|
+
}
|
|
11802
|
+
/**
|
|
11803
|
+
* Get current concurrency for a specific group.
|
|
11804
|
+
*/
|
|
11805
|
+
async getCurrentConcurrency(groupName, groupId) {
|
|
11806
|
+
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11807
|
+
return await this.redis.scard(key);
|
|
11808
|
+
}
|
|
11809
|
+
/**
|
|
11810
|
+
* Get concurrency limit for a specific group.
|
|
11811
|
+
*/
|
|
11812
|
+
async getConcurrencyLimit(groupName, groupId) {
|
|
11813
|
+
const group = this.groupsByName.get(groupName);
|
|
11814
|
+
if (!group) {
|
|
11815
|
+
throw new Error(`Unknown concurrency group: ${groupName}`);
|
|
11816
|
+
}
|
|
11817
|
+
return await group.getLimit(groupId) || group.defaultLimit;
|
|
11818
|
+
}
|
|
11819
|
+
/**
|
|
11820
|
+
* Check if a group is at capacity.
|
|
11821
|
+
*/
|
|
11822
|
+
async isAtCapacity(groupName, groupId) {
|
|
11823
|
+
const [current, limit] = await Promise.all([
|
|
11824
|
+
this.getCurrentConcurrency(groupName, groupId),
|
|
11825
|
+
this.getConcurrencyLimit(groupName, groupId)
|
|
11826
|
+
]);
|
|
11827
|
+
return current >= limit;
|
|
11828
|
+
}
|
|
11829
|
+
/**
|
|
11830
|
+
* Get full state for a group.
|
|
11831
|
+
*/
|
|
11832
|
+
async getState(groupName, groupId) {
|
|
11833
|
+
const [current, limit] = await Promise.all([
|
|
11834
|
+
this.getCurrentConcurrency(groupName, groupId),
|
|
11835
|
+
this.getConcurrencyLimit(groupName, groupId)
|
|
11836
|
+
]);
|
|
11837
|
+
return {
|
|
11838
|
+
groupName,
|
|
11839
|
+
groupId,
|
|
11840
|
+
current,
|
|
11841
|
+
limit
|
|
11842
|
+
};
|
|
11843
|
+
}
|
|
11844
|
+
/**
|
|
11845
|
+
* Get all active message IDs for a group.
|
|
11846
|
+
*/
|
|
11847
|
+
async getActiveMessages(groupName, groupId) {
|
|
11848
|
+
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11849
|
+
return await this.redis.smembers(key);
|
|
11850
|
+
}
|
|
11851
|
+
/**
|
|
11852
|
+
* Force-clear concurrency for a group (use with caution).
|
|
11853
|
+
* Useful for cleanup after crashes.
|
|
11854
|
+
*/
|
|
11855
|
+
async clearGroup(groupName, groupId) {
|
|
11856
|
+
const key = this.keys.concurrencyKey(groupName, groupId);
|
|
11857
|
+
await this.redis.del(key);
|
|
11858
|
+
}
|
|
11859
|
+
/**
|
|
11860
|
+
* Remove a specific message from concurrency tracking.
|
|
11861
|
+
* Useful for cleanup.
|
|
11862
|
+
*/
|
|
11863
|
+
async removeMessage(messageId, queue) {
|
|
11864
|
+
await this.release(queue, messageId);
|
|
11865
|
+
}
|
|
11866
|
+
/**
|
|
11867
|
+
* Get configured group names.
|
|
11868
|
+
*/
|
|
11869
|
+
getGroupNames() {
|
|
11870
|
+
return this.groups.map((g) => g.name);
|
|
11871
|
+
}
|
|
11872
|
+
/**
|
|
11873
|
+
* Close the Redis connection.
|
|
11874
|
+
*/
|
|
11875
|
+
async close() {
|
|
11876
|
+
await this.redis.quit();
|
|
11877
|
+
}
|
|
11878
|
+
// ============================================================================
|
|
11879
|
+
// Private Methods
|
|
11880
|
+
// ============================================================================
|
|
11881
|
+
#registerCommands() {
|
|
11882
|
+
this.redis.defineCommand("reserveConcurrency", {
|
|
11883
|
+
lua: `
|
|
11884
|
+
local numGroups = #KEYS
|
|
11885
|
+
local messageId = ARGV[1]
|
|
11886
|
+
|
|
11887
|
+
-- Check all groups first
|
|
11888
|
+
for i = 1, numGroups do
|
|
11889
|
+
local key = KEYS[i]
|
|
11890
|
+
local limit = tonumber(ARGV[1 + i]) -- Limits start at ARGV[2]
|
|
11891
|
+
local current = redis.call('SCARD', key)
|
|
11892
|
+
|
|
11893
|
+
if current >= limit then
|
|
11894
|
+
return 0 -- At capacity
|
|
11895
|
+
end
|
|
11896
|
+
end
|
|
11897
|
+
|
|
11898
|
+
-- All groups have capacity, add message to all
|
|
11899
|
+
for i = 1, numGroups do
|
|
11900
|
+
local key = KEYS[i]
|
|
11901
|
+
redis.call('SADD', key, messageId)
|
|
11902
|
+
end
|
|
11903
|
+
|
|
11904
|
+
return 1
|
|
11905
|
+
`
|
|
11906
|
+
});
|
|
11907
|
+
}
|
|
11908
|
+
};
|
|
11909
|
+
var MasterQueue = class {
|
|
11910
|
+
constructor(options) {
|
|
11911
|
+
this.options = options;
|
|
11912
|
+
this.redis = createRedisClient(options.redis);
|
|
11913
|
+
this.keys = options.keys;
|
|
11914
|
+
this.shardCount = Math.max(1, options.shardCount);
|
|
11915
|
+
this.#registerCommands();
|
|
11916
|
+
}
|
|
11917
|
+
redis;
|
|
11918
|
+
keys;
|
|
11919
|
+
shardCount;
|
|
11920
|
+
// ============================================================================
|
|
11921
|
+
// Public Methods
|
|
11922
|
+
// ============================================================================
|
|
11923
|
+
/**
|
|
11924
|
+
* Get the shard ID for a queue.
|
|
11925
|
+
* Uses consistent hashing based on queue ID.
|
|
11926
|
+
*/
|
|
11927
|
+
getShardForQueue(queueId) {
|
|
11928
|
+
return this.#hashToShard(queueId);
|
|
11929
|
+
}
|
|
11930
|
+
/**
|
|
11931
|
+
* Add a queue to its master queue shard.
|
|
11932
|
+
* Updates the score to the oldest message timestamp.
|
|
11933
|
+
*
|
|
11934
|
+
* @param queueId - The queue identifier
|
|
11935
|
+
* @param oldestMessageTimestamp - Timestamp of the oldest message in the queue
|
|
11936
|
+
*/
|
|
11937
|
+
async addQueue(queueId, oldestMessageTimestamp) {
|
|
11938
|
+
const shardId = this.getShardForQueue(queueId);
|
|
11939
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
11940
|
+
await this.redis.zadd(masterKey, oldestMessageTimestamp, queueId);
|
|
11941
|
+
}
|
|
11942
|
+
/**
|
|
11943
|
+
* Update a queue's score in the master queue.
|
|
11944
|
+
* This is typically called after dequeuing to update to the new oldest message.
|
|
11945
|
+
*
|
|
11946
|
+
* @param queueId - The queue identifier
|
|
11947
|
+
* @param newOldestTimestamp - New timestamp of the oldest message
|
|
11948
|
+
*/
|
|
11949
|
+
async updateQueueScore(queueId, newOldestTimestamp) {
|
|
11950
|
+
const shardId = this.getShardForQueue(queueId);
|
|
11951
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
11952
|
+
await this.redis.zadd(masterKey, newOldestTimestamp, queueId);
|
|
11953
|
+
}
|
|
11954
|
+
/**
|
|
11955
|
+
* Remove a queue from its master queue shard.
|
|
11956
|
+
* Called when a queue becomes empty.
|
|
11957
|
+
*
|
|
11958
|
+
* @param queueId - The queue identifier
|
|
11959
|
+
*/
|
|
11960
|
+
async removeQueue(queueId) {
|
|
11961
|
+
const shardId = this.getShardForQueue(queueId);
|
|
11962
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
11963
|
+
await this.redis.zrem(masterKey, queueId);
|
|
11964
|
+
}
|
|
11965
|
+
/**
|
|
11966
|
+
* Get queues from a shard, ordered by oldest message (lowest score first).
|
|
11967
|
+
*
|
|
11968
|
+
* @param shardId - The shard to query
|
|
11969
|
+
* @param limit - Maximum number of queues to return (default: 1000)
|
|
11970
|
+
* @param maxScore - Maximum score (timestamp) to include (default: now)
|
|
11971
|
+
*/
|
|
11972
|
+
async getQueuesFromShard(shardId, limit = 1e3, maxScore) {
|
|
11973
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
11974
|
+
const score = maxScore ?? Date.now();
|
|
11975
|
+
const results = await this.redis.zrangebyscore(
|
|
11976
|
+
masterKey,
|
|
11977
|
+
"-inf",
|
|
11978
|
+
score,
|
|
11979
|
+
"WITHSCORES",
|
|
11980
|
+
"LIMIT",
|
|
11981
|
+
0,
|
|
11982
|
+
limit
|
|
11983
|
+
);
|
|
11984
|
+
const queues = [];
|
|
11985
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
11986
|
+
const queueId = results[i];
|
|
11987
|
+
const scoreStr = results[i + 1];
|
|
11988
|
+
if (queueId && scoreStr) {
|
|
11989
|
+
queues.push({
|
|
11990
|
+
queueId,
|
|
11991
|
+
score: parseFloat(scoreStr),
|
|
11992
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
11993
|
+
});
|
|
11994
|
+
}
|
|
11995
|
+
}
|
|
11996
|
+
return queues;
|
|
11997
|
+
}
|
|
11998
|
+
/**
|
|
11999
|
+
* Get the number of queues in a shard.
|
|
12000
|
+
*/
|
|
12001
|
+
async getShardQueueCount(shardId) {
|
|
12002
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12003
|
+
return await this.redis.zcard(masterKey);
|
|
12004
|
+
}
|
|
12005
|
+
/**
|
|
12006
|
+
* Get total queue count across all shards.
|
|
12007
|
+
*/
|
|
12008
|
+
async getTotalQueueCount() {
|
|
12009
|
+
const counts = await Promise.all(
|
|
12010
|
+
Array.from({ length: this.shardCount }, (_, i) => this.getShardQueueCount(i))
|
|
12011
|
+
);
|
|
12012
|
+
return counts.reduce((sum, count) => sum + count, 0);
|
|
12013
|
+
}
|
|
12014
|
+
/**
|
|
12015
|
+
* Atomically add a queue to master queue only if queue has messages.
|
|
12016
|
+
* Uses Lua script for atomicity.
|
|
12017
|
+
*
|
|
12018
|
+
* @param queueId - The queue identifier
|
|
12019
|
+
* @param queueKey - The actual queue sorted set key
|
|
12020
|
+
* @returns Whether the queue was added to the master queue
|
|
12021
|
+
*/
|
|
12022
|
+
async addQueueIfNotEmpty(queueId, queueKey) {
|
|
12023
|
+
const shardId = this.getShardForQueue(queueId);
|
|
12024
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12025
|
+
const result = await this.redis.addQueueIfNotEmpty(masterKey, queueKey, queueId);
|
|
12026
|
+
return result === 1;
|
|
12027
|
+
}
|
|
12028
|
+
/**
|
|
12029
|
+
* Atomically remove a queue from master queue only if queue is empty.
|
|
12030
|
+
* Uses Lua script for atomicity.
|
|
12031
|
+
*
|
|
12032
|
+
* @param queueId - The queue identifier
|
|
12033
|
+
* @param queueKey - The actual queue sorted set key
|
|
12034
|
+
* @returns Whether the queue was removed from the master queue
|
|
12035
|
+
*/
|
|
12036
|
+
async removeQueueIfEmpty(queueId, queueKey) {
|
|
12037
|
+
const shardId = this.getShardForQueue(queueId);
|
|
12038
|
+
const masterKey = this.keys.masterQueueKey(shardId);
|
|
12039
|
+
const result = await this.redis.removeQueueIfEmpty(masterKey, queueKey, queueId);
|
|
12040
|
+
return result === 1;
|
|
12041
|
+
}
|
|
12042
|
+
/**
|
|
12043
|
+
* Close the Redis connection.
|
|
12044
|
+
*/
|
|
12045
|
+
async close() {
|
|
12046
|
+
await this.redis.quit();
|
|
12047
|
+
}
|
|
12048
|
+
// ============================================================================
|
|
12049
|
+
// Private Methods
|
|
12050
|
+
// ============================================================================
|
|
12051
|
+
/**
|
|
12052
|
+
* Map queue ID to shard using Jump Consistent Hash.
|
|
12053
|
+
* Provides better distribution than djb2 and minimal reshuffling when shard count changes.
|
|
12054
|
+
*/
|
|
12055
|
+
#hashToShard(queueId) {
|
|
12056
|
+
return serverOnly.jumpHash(queueId, this.shardCount);
|
|
12057
|
+
}
|
|
12058
|
+
#registerCommands() {
|
|
12059
|
+
this.redis.defineCommand("addQueueIfNotEmpty", {
|
|
12060
|
+
numberOfKeys: 2,
|
|
12061
|
+
lua: `
|
|
12062
|
+
local masterKey = KEYS[1]
|
|
12063
|
+
local queueKey = KEYS[2]
|
|
12064
|
+
local queueId = ARGV[1]
|
|
12065
|
+
|
|
12066
|
+
-- Check if queue has any messages
|
|
12067
|
+
local count = redis.call('ZCARD', queueKey)
|
|
12068
|
+
if count == 0 then
|
|
12069
|
+
return 0
|
|
12070
|
+
end
|
|
12071
|
+
|
|
12072
|
+
-- Get the oldest message timestamp (lowest score)
|
|
12073
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
12074
|
+
if #oldest == 0 then
|
|
12075
|
+
return 0
|
|
12076
|
+
end
|
|
12077
|
+
|
|
12078
|
+
local score = oldest[2]
|
|
12079
|
+
|
|
12080
|
+
-- Add to master queue with the oldest message score
|
|
12081
|
+
redis.call('ZADD', masterKey, score, queueId)
|
|
12082
|
+
return 1
|
|
12083
|
+
`
|
|
12084
|
+
});
|
|
12085
|
+
this.redis.defineCommand("removeQueueIfEmpty", {
|
|
12086
|
+
numberOfKeys: 2,
|
|
12087
|
+
lua: `
|
|
12088
|
+
local masterKey = KEYS[1]
|
|
12089
|
+
local queueKey = KEYS[2]
|
|
12090
|
+
local queueId = ARGV[1]
|
|
12091
|
+
|
|
12092
|
+
-- Check if queue is empty
|
|
12093
|
+
local count = redis.call('ZCARD', queueKey)
|
|
12094
|
+
if count > 0 then
|
|
12095
|
+
return 0
|
|
12096
|
+
end
|
|
12097
|
+
|
|
12098
|
+
-- Remove from master queue
|
|
12099
|
+
redis.call('ZREM', masterKey, queueId)
|
|
12100
|
+
return 1
|
|
12101
|
+
`
|
|
12102
|
+
});
|
|
12103
|
+
}
|
|
12104
|
+
};
|
|
12105
|
+
|
|
12106
|
+
// src/fair-queue/telemetry.ts
|
|
12107
|
+
var FairQueueAttributes = {
|
|
12108
|
+
QUEUE_ID: "fairqueue.queue_id",
|
|
12109
|
+
TENANT_ID: "fairqueue.tenant_id",
|
|
12110
|
+
MESSAGE_ID: "fairqueue.message_id",
|
|
12111
|
+
SHARD_ID: "fairqueue.shard_id",
|
|
12112
|
+
WORKER_QUEUE: "fairqueue.worker_queue",
|
|
12113
|
+
CONSUMER_ID: "fairqueue.consumer_id",
|
|
12114
|
+
ATTEMPT: "fairqueue.attempt",
|
|
12115
|
+
CONCURRENCY_GROUP: "fairqueue.concurrency_group",
|
|
12116
|
+
MESSAGE_COUNT: "fairqueue.message_count",
|
|
12117
|
+
RESULT: "fairqueue.result"
|
|
12118
|
+
};
|
|
12119
|
+
var MessagingAttributes = {
|
|
12120
|
+
SYSTEM: "messaging.system",
|
|
12121
|
+
OPERATION: "messaging.operation",
|
|
12122
|
+
MESSAGE_ID: "messaging.message_id",
|
|
12123
|
+
DESTINATION_NAME: "messaging.destination.name"
|
|
12124
|
+
};
|
|
12125
|
+
var FairQueueTelemetry = class {
|
|
12126
|
+
tracer;
|
|
12127
|
+
meter;
|
|
12128
|
+
metrics;
|
|
12129
|
+
name;
|
|
12130
|
+
constructor(options) {
|
|
12131
|
+
this.tracer = options.tracer;
|
|
12132
|
+
this.meter = options.meter;
|
|
12133
|
+
this.name = options.name ?? "fairqueue";
|
|
12134
|
+
if (this.meter) {
|
|
12135
|
+
this.#initializeMetrics();
|
|
12136
|
+
}
|
|
12137
|
+
}
|
|
12138
|
+
// ============================================================================
|
|
12139
|
+
// Tracing
|
|
12140
|
+
// ============================================================================
|
|
12141
|
+
/**
|
|
12142
|
+
* Create a traced span for an operation.
|
|
12143
|
+
* Returns the result of the function, or throws any error after recording it.
|
|
12144
|
+
*/
|
|
12145
|
+
async trace(name, fn, options) {
|
|
12146
|
+
if (!this.tracer) {
|
|
12147
|
+
return fn(noopSpan);
|
|
12148
|
+
}
|
|
12149
|
+
const spanOptions = {
|
|
12150
|
+
kind: options?.kind,
|
|
12151
|
+
attributes: {
|
|
12152
|
+
[MessagingAttributes.SYSTEM]: this.name,
|
|
12153
|
+
...options?.attributes
|
|
12154
|
+
}
|
|
12155
|
+
};
|
|
12156
|
+
return this.tracer.startActiveSpan(`${this.name}.${name}`, spanOptions, async (span) => {
|
|
12157
|
+
try {
|
|
12158
|
+
const result = await fn(span);
|
|
12159
|
+
return result;
|
|
12160
|
+
} catch (error) {
|
|
12161
|
+
if (error instanceof Error) {
|
|
12162
|
+
span.recordException(error);
|
|
12163
|
+
} else {
|
|
12164
|
+
span.recordException(new Error(String(error)));
|
|
12165
|
+
}
|
|
12166
|
+
throw error;
|
|
12167
|
+
} finally {
|
|
12168
|
+
span.end();
|
|
12169
|
+
}
|
|
12170
|
+
});
|
|
12171
|
+
}
|
|
12172
|
+
/**
|
|
12173
|
+
* Synchronous version of trace.
|
|
12174
|
+
*/
|
|
12175
|
+
traceSync(name, fn, options) {
|
|
12176
|
+
if (!this.tracer) {
|
|
12177
|
+
return fn(noopSpan);
|
|
12178
|
+
}
|
|
12179
|
+
const spanOptions = {
|
|
12180
|
+
kind: options?.kind,
|
|
12181
|
+
attributes: {
|
|
12182
|
+
[MessagingAttributes.SYSTEM]: this.name,
|
|
12183
|
+
...options?.attributes
|
|
12184
|
+
}
|
|
12185
|
+
};
|
|
12186
|
+
return this.tracer.startActiveSpan(`${this.name}.${name}`, spanOptions, (span) => {
|
|
12187
|
+
try {
|
|
12188
|
+
return fn(span);
|
|
12189
|
+
} catch (error) {
|
|
12190
|
+
if (error instanceof Error) {
|
|
12191
|
+
span.recordException(error);
|
|
12192
|
+
} else {
|
|
12193
|
+
span.recordException(new Error(String(error)));
|
|
12194
|
+
}
|
|
12195
|
+
throw error;
|
|
12196
|
+
} finally {
|
|
12197
|
+
span.end();
|
|
12198
|
+
}
|
|
12199
|
+
});
|
|
12200
|
+
}
|
|
12201
|
+
// ============================================================================
|
|
12202
|
+
// Metrics
|
|
12203
|
+
// ============================================================================
|
|
12204
|
+
/**
|
|
12205
|
+
* Record a message enqueued.
|
|
12206
|
+
*/
|
|
12207
|
+
recordEnqueue(attributes) {
|
|
12208
|
+
this.metrics?.messagesEnqueued.add(1, attributes);
|
|
12209
|
+
}
|
|
12210
|
+
/**
|
|
12211
|
+
* Record a batch of messages enqueued.
|
|
12212
|
+
*/
|
|
12213
|
+
recordEnqueueBatch(count, attributes) {
|
|
12214
|
+
this.metrics?.messagesEnqueued.add(count, attributes);
|
|
12215
|
+
}
|
|
12216
|
+
/**
|
|
12217
|
+
* Record a message completed successfully.
|
|
12218
|
+
*/
|
|
12219
|
+
recordComplete(attributes) {
|
|
12220
|
+
this.metrics?.messagesCompleted.add(1, attributes);
|
|
12221
|
+
}
|
|
12222
|
+
/**
|
|
12223
|
+
* Record a message processing failure.
|
|
12224
|
+
*/
|
|
12225
|
+
recordFailure(attributes) {
|
|
12226
|
+
this.metrics?.messagesFailed.add(1, attributes);
|
|
12227
|
+
}
|
|
12228
|
+
/**
|
|
12229
|
+
* Record a message retry.
|
|
12230
|
+
*/
|
|
12231
|
+
recordRetry(attributes) {
|
|
12232
|
+
this.metrics?.messagesRetried.add(1, attributes);
|
|
12233
|
+
}
|
|
12234
|
+
/**
|
|
12235
|
+
* Record a message sent to DLQ.
|
|
12236
|
+
*/
|
|
12237
|
+
recordDLQ(attributes) {
|
|
12238
|
+
this.metrics?.messagesToDLQ.add(1, attributes);
|
|
12239
|
+
}
|
|
12240
|
+
/**
|
|
12241
|
+
* Record message processing time.
|
|
12242
|
+
*
|
|
12243
|
+
* @param durationMs - Processing duration in milliseconds
|
|
12244
|
+
*/
|
|
12245
|
+
recordProcessingTime(durationMs, attributes) {
|
|
12246
|
+
this.metrics?.processingTime.record(durationMs, attributes);
|
|
12247
|
+
}
|
|
12248
|
+
/**
|
|
12249
|
+
* Record time a message spent waiting in queue.
|
|
12250
|
+
*
|
|
12251
|
+
* @param durationMs - Queue wait time in milliseconds
|
|
12252
|
+
*/
|
|
12253
|
+
recordQueueTime(durationMs, attributes) {
|
|
12254
|
+
this.metrics?.queueTime.record(durationMs, attributes);
|
|
12255
|
+
}
|
|
12256
|
+
/**
|
|
12257
|
+
* Register observable gauge callbacks.
|
|
12258
|
+
* Call this after FairQueue is initialized to register the gauge callbacks.
|
|
12259
|
+
*/
|
|
12260
|
+
registerGaugeCallbacks(callbacks) {
|
|
12261
|
+
if (!this.metrics) return;
|
|
12262
|
+
if (callbacks.getQueueLength && callbacks.observedQueues) {
|
|
12263
|
+
const getQueueLength = callbacks.getQueueLength;
|
|
12264
|
+
const queues = callbacks.observedQueues;
|
|
12265
|
+
this.metrics.queueLength.addCallback(async (observableResult) => {
|
|
12266
|
+
for (const queueId of queues) {
|
|
12267
|
+
const length = await getQueueLength(queueId);
|
|
12268
|
+
observableResult.observe(length, {
|
|
12269
|
+
[FairQueueAttributes.QUEUE_ID]: queueId
|
|
12270
|
+
});
|
|
12271
|
+
}
|
|
12272
|
+
});
|
|
12273
|
+
}
|
|
12274
|
+
if (callbacks.getMasterQueueLength && callbacks.shardCount) {
|
|
12275
|
+
const getMasterQueueLength = callbacks.getMasterQueueLength;
|
|
12276
|
+
const shardCount = callbacks.shardCount;
|
|
12277
|
+
this.metrics.masterQueueLength.addCallback(async (observableResult) => {
|
|
12278
|
+
for (let shardId = 0; shardId < shardCount; shardId++) {
|
|
12279
|
+
const length = await getMasterQueueLength(shardId);
|
|
12280
|
+
observableResult.observe(length, {
|
|
12281
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
12282
|
+
});
|
|
12283
|
+
}
|
|
12284
|
+
});
|
|
12285
|
+
}
|
|
12286
|
+
if (callbacks.getInflightCount && callbacks.shardCount) {
|
|
12287
|
+
const getInflightCount = callbacks.getInflightCount;
|
|
12288
|
+
const shardCount = callbacks.shardCount;
|
|
12289
|
+
this.metrics.inflightCount.addCallback(async (observableResult) => {
|
|
12290
|
+
for (let shardId = 0; shardId < shardCount; shardId++) {
|
|
12291
|
+
const count = await getInflightCount(shardId);
|
|
12292
|
+
observableResult.observe(count, {
|
|
12293
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
12294
|
+
});
|
|
12295
|
+
}
|
|
12296
|
+
});
|
|
12297
|
+
}
|
|
12298
|
+
if (callbacks.getDLQLength && callbacks.observedTenants) {
|
|
12299
|
+
const getDLQLength = callbacks.getDLQLength;
|
|
12300
|
+
const tenants = callbacks.observedTenants;
|
|
12301
|
+
this.metrics.dlqLength.addCallback(async (observableResult) => {
|
|
12302
|
+
for (const tenantId of tenants) {
|
|
12303
|
+
const length = await getDLQLength(tenantId);
|
|
12304
|
+
observableResult.observe(length, {
|
|
12305
|
+
[FairQueueAttributes.TENANT_ID]: tenantId
|
|
12306
|
+
});
|
|
12307
|
+
}
|
|
12308
|
+
});
|
|
12309
|
+
}
|
|
12310
|
+
}
|
|
12311
|
+
// ============================================================================
|
|
12312
|
+
// Helper Methods
|
|
12313
|
+
// ============================================================================
|
|
12314
|
+
/**
|
|
12315
|
+
* Create standard attributes for a message operation.
|
|
12316
|
+
*/
|
|
12317
|
+
messageAttributes(params) {
|
|
12318
|
+
const attrs = {};
|
|
12319
|
+
if (params.queueId) attrs[FairQueueAttributes.QUEUE_ID] = params.queueId;
|
|
12320
|
+
if (params.tenantId) attrs[FairQueueAttributes.TENANT_ID] = params.tenantId;
|
|
12321
|
+
if (params.messageId) attrs[FairQueueAttributes.MESSAGE_ID] = params.messageId;
|
|
12322
|
+
if (params.attempt !== void 0) attrs[FairQueueAttributes.ATTEMPT] = params.attempt;
|
|
12323
|
+
if (params.workerQueue) attrs[FairQueueAttributes.WORKER_QUEUE] = params.workerQueue;
|
|
12324
|
+
if (params.consumerId) attrs[FairQueueAttributes.CONSUMER_ID] = params.consumerId;
|
|
12325
|
+
return attrs;
|
|
12326
|
+
}
|
|
12327
|
+
/**
|
|
12328
|
+
* Check if telemetry is enabled.
|
|
12329
|
+
*/
|
|
12330
|
+
get isEnabled() {
|
|
12331
|
+
return !!this.tracer || !!this.meter;
|
|
12332
|
+
}
|
|
12333
|
+
/**
|
|
12334
|
+
* Check if tracing is enabled.
|
|
12335
|
+
*/
|
|
12336
|
+
get hasTracer() {
|
|
12337
|
+
return !!this.tracer;
|
|
12338
|
+
}
|
|
12339
|
+
/**
|
|
12340
|
+
* Check if metrics are enabled.
|
|
12341
|
+
*/
|
|
12342
|
+
get hasMetrics() {
|
|
12343
|
+
return !!this.meter;
|
|
12344
|
+
}
|
|
12345
|
+
// ============================================================================
|
|
12346
|
+
// Private Methods
|
|
12347
|
+
// ============================================================================
|
|
12348
|
+
#initializeMetrics() {
|
|
12349
|
+
if (!this.meter) return;
|
|
12350
|
+
this.metrics = {
|
|
12351
|
+
// Counters
|
|
12352
|
+
messagesEnqueued: this.meter.createCounter(`${this.name}.messages.enqueued`, {
|
|
12353
|
+
description: "Number of messages enqueued",
|
|
12354
|
+
unit: "messages"
|
|
12355
|
+
}),
|
|
12356
|
+
messagesCompleted: this.meter.createCounter(`${this.name}.messages.completed`, {
|
|
12357
|
+
description: "Number of messages completed successfully",
|
|
12358
|
+
unit: "messages"
|
|
12359
|
+
}),
|
|
12360
|
+
messagesFailed: this.meter.createCounter(`${this.name}.messages.failed`, {
|
|
12361
|
+
description: "Number of messages that failed processing",
|
|
12362
|
+
unit: "messages"
|
|
12363
|
+
}),
|
|
12364
|
+
messagesRetried: this.meter.createCounter(`${this.name}.messages.retried`, {
|
|
12365
|
+
description: "Number of message retries",
|
|
12366
|
+
unit: "messages"
|
|
12367
|
+
}),
|
|
12368
|
+
messagesToDLQ: this.meter.createCounter(`${this.name}.messages.dlq`, {
|
|
12369
|
+
description: "Number of messages sent to dead letter queue",
|
|
12370
|
+
unit: "messages"
|
|
12371
|
+
}),
|
|
12372
|
+
// Histograms
|
|
12373
|
+
processingTime: this.meter.createHistogram(`${this.name}.message.processing_time`, {
|
|
12374
|
+
description: "Message processing time",
|
|
12375
|
+
unit: "ms"
|
|
12376
|
+
}),
|
|
12377
|
+
queueTime: this.meter.createHistogram(`${this.name}.message.queue_time`, {
|
|
12378
|
+
description: "Time message spent waiting in queue",
|
|
12379
|
+
unit: "ms"
|
|
12380
|
+
}),
|
|
12381
|
+
// Observable gauges
|
|
12382
|
+
queueLength: this.meter.createObservableGauge(`${this.name}.queue.length`, {
|
|
12383
|
+
description: "Number of messages in a queue",
|
|
12384
|
+
unit: "messages"
|
|
12385
|
+
}),
|
|
12386
|
+
masterQueueLength: this.meter.createObservableGauge(`${this.name}.master_queue.length`, {
|
|
12387
|
+
description: "Number of queues in master queue shard",
|
|
12388
|
+
unit: "queues"
|
|
12389
|
+
}),
|
|
12390
|
+
inflightCount: this.meter.createObservableGauge(`${this.name}.inflight.count`, {
|
|
12391
|
+
description: "Number of messages currently being processed",
|
|
12392
|
+
unit: "messages"
|
|
12393
|
+
}),
|
|
12394
|
+
dlqLength: this.meter.createObservableGauge(`${this.name}.dlq.length`, {
|
|
12395
|
+
description: "Number of messages in dead letter queue",
|
|
12396
|
+
unit: "messages"
|
|
12397
|
+
})
|
|
12398
|
+
};
|
|
12399
|
+
}
|
|
12400
|
+
};
|
|
12401
|
+
var noopSpan = {
|
|
12402
|
+
spanContext: () => ({
|
|
12403
|
+
traceId: "",
|
|
12404
|
+
spanId: "",
|
|
12405
|
+
traceFlags: 0
|
|
12406
|
+
}),
|
|
12407
|
+
setAttribute: () => noopSpan,
|
|
12408
|
+
setAttributes: () => noopSpan,
|
|
12409
|
+
addEvent: () => noopSpan,
|
|
12410
|
+
addLink: () => noopSpan,
|
|
12411
|
+
addLinks: () => noopSpan,
|
|
12412
|
+
setStatus: () => noopSpan,
|
|
12413
|
+
updateName: () => noopSpan,
|
|
12414
|
+
end: () => {
|
|
12415
|
+
},
|
|
12416
|
+
isRecording: () => false,
|
|
12417
|
+
recordException: () => {
|
|
12418
|
+
}
|
|
12419
|
+
};
|
|
12420
|
+
var noopTelemetry = new FairQueueTelemetry({});
|
|
12421
|
+
var VisibilityManager = class {
|
|
12422
|
+
constructor(options) {
|
|
12423
|
+
this.options = options;
|
|
12424
|
+
this.redis = createRedisClient(options.redis);
|
|
12425
|
+
this.keys = options.keys;
|
|
12426
|
+
this.shardCount = options.shardCount;
|
|
12427
|
+
this.defaultTimeoutMs = options.defaultTimeoutMs;
|
|
12428
|
+
this.logger = options.logger ?? {
|
|
12429
|
+
debug: () => {
|
|
12430
|
+
},
|
|
12431
|
+
error: () => {
|
|
12432
|
+
}
|
|
12433
|
+
};
|
|
12434
|
+
this.#registerCommands();
|
|
12435
|
+
}
|
|
12436
|
+
redis;
|
|
12437
|
+
keys;
|
|
12438
|
+
shardCount;
|
|
12439
|
+
defaultTimeoutMs;
|
|
12440
|
+
logger;
|
|
12441
|
+
// ============================================================================
|
|
12442
|
+
// Public Methods
|
|
12443
|
+
// ============================================================================
|
|
12444
|
+
/**
|
|
12445
|
+
* Claim a message for processing.
|
|
12446
|
+
* Moves the message from its queue to the in-flight set with a visibility timeout.
|
|
12447
|
+
*
|
|
12448
|
+
* @param queueId - The queue to claim from
|
|
12449
|
+
* @param queueKey - The Redis key for the queue sorted set
|
|
12450
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12451
|
+
* @param consumerId - ID of the consumer claiming the message
|
|
12452
|
+
* @param timeoutMs - Visibility timeout in milliseconds
|
|
12453
|
+
* @returns Claim result with the message if successful
|
|
12454
|
+
*/
|
|
12455
|
+
async claim(queueId, queueKey, queueItemsKey, consumerId, timeoutMs) {
|
|
12456
|
+
const timeout = timeoutMs ?? this.defaultTimeoutMs;
|
|
12457
|
+
const deadline = Date.now() + timeout;
|
|
12458
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12459
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12460
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12461
|
+
const result = await this.redis.claimMessage(
|
|
12462
|
+
queueKey,
|
|
12463
|
+
queueItemsKey,
|
|
12464
|
+
inflightKey,
|
|
12465
|
+
inflightDataKey,
|
|
12466
|
+
queueId,
|
|
12467
|
+
consumerId,
|
|
12468
|
+
deadline.toString()
|
|
12469
|
+
);
|
|
12470
|
+
if (!result) {
|
|
12471
|
+
return { claimed: false };
|
|
12472
|
+
}
|
|
12473
|
+
const [messageId, payloadJson] = result;
|
|
12474
|
+
try {
|
|
12475
|
+
const payload = JSON.parse(payloadJson);
|
|
12476
|
+
const message = {
|
|
12477
|
+
messageId,
|
|
12478
|
+
queueId,
|
|
12479
|
+
payload,
|
|
12480
|
+
deadline,
|
|
12481
|
+
consumerId
|
|
12482
|
+
};
|
|
12483
|
+
this.logger.debug("Message claimed", {
|
|
12484
|
+
messageId,
|
|
12485
|
+
queueId,
|
|
12486
|
+
consumerId,
|
|
12487
|
+
deadline
|
|
12488
|
+
});
|
|
12489
|
+
return { claimed: true, message };
|
|
12490
|
+
} catch (error) {
|
|
12491
|
+
this.logger.error("Failed to parse claimed message", {
|
|
12492
|
+
messageId,
|
|
12493
|
+
queueId,
|
|
12494
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12495
|
+
});
|
|
12496
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12497
|
+
return { claimed: false };
|
|
12498
|
+
}
|
|
12499
|
+
}
|
|
12500
|
+
/**
|
|
12501
|
+
* Extend the visibility timeout for a message (heartbeat).
|
|
12502
|
+
*
|
|
12503
|
+
* @param messageId - The message ID
|
|
12504
|
+
* @param queueId - The queue ID
|
|
12505
|
+
* @param extendMs - Additional milliseconds to add to the deadline
|
|
12506
|
+
* @returns true if the heartbeat was successful
|
|
12507
|
+
*/
|
|
12508
|
+
async heartbeat(messageId, queueId, extendMs) {
|
|
12509
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12510
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12511
|
+
const member = this.#makeMember(messageId, queueId);
|
|
12512
|
+
const newDeadline = Date.now() + extendMs;
|
|
12513
|
+
const result = await this.redis.heartbeatMessage(
|
|
12514
|
+
inflightKey,
|
|
12515
|
+
member,
|
|
12516
|
+
newDeadline.toString()
|
|
12517
|
+
);
|
|
12518
|
+
const success = result === 1;
|
|
12519
|
+
if (success) {
|
|
12520
|
+
this.logger.debug("Heartbeat successful", {
|
|
12521
|
+
messageId,
|
|
12522
|
+
queueId,
|
|
12523
|
+
newDeadline
|
|
12524
|
+
});
|
|
12525
|
+
}
|
|
12526
|
+
return success;
|
|
12527
|
+
}
|
|
12528
|
+
/**
|
|
12529
|
+
* Mark a message as successfully processed.
|
|
12530
|
+
* Removes the message from in-flight tracking.
|
|
12531
|
+
*
|
|
12532
|
+
* @param messageId - The message ID
|
|
12533
|
+
* @param queueId - The queue ID
|
|
12534
|
+
*/
|
|
12535
|
+
async complete(messageId, queueId) {
|
|
12536
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12537
|
+
await this.#removeFromInflight(shardId, messageId, queueId);
|
|
12538
|
+
this.logger.debug("Message completed", {
|
|
12539
|
+
messageId,
|
|
12540
|
+
queueId
|
|
12541
|
+
});
|
|
12542
|
+
}
|
|
12543
|
+
/**
|
|
12544
|
+
* Release a message back to its queue.
|
|
12545
|
+
* Used when processing fails or consumer wants to retry later.
|
|
12546
|
+
*
|
|
12547
|
+
* @param messageId - The message ID
|
|
12548
|
+
* @param queueId - The queue ID
|
|
12549
|
+
* @param queueKey - The Redis key for the queue
|
|
12550
|
+
* @param queueItemsKey - The Redis key for the queue items hash
|
|
12551
|
+
* @param score - Optional score for the message (defaults to now)
|
|
12552
|
+
*/
|
|
12553
|
+
async release(messageId, queueId, queueKey, queueItemsKey, score) {
|
|
12554
|
+
const shardId = this.#getShardForQueue(queueId);
|
|
12555
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12556
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12557
|
+
const member = this.#makeMember(messageId, queueId);
|
|
12558
|
+
const messageScore = score ?? Date.now();
|
|
12559
|
+
await this.redis.releaseMessage(
|
|
12560
|
+
inflightKey,
|
|
12561
|
+
inflightDataKey,
|
|
12562
|
+
queueKey,
|
|
12563
|
+
queueItemsKey,
|
|
12564
|
+
member,
|
|
12565
|
+
messageId,
|
|
12566
|
+
messageScore.toString()
|
|
12567
|
+
);
|
|
12568
|
+
this.logger.debug("Message released", {
|
|
12569
|
+
messageId,
|
|
12570
|
+
queueId,
|
|
12571
|
+
score: messageScore
|
|
12572
|
+
});
|
|
12573
|
+
}
|
|
12574
|
+
/**
|
|
12575
|
+
* Reclaim timed-out messages from a shard.
|
|
12576
|
+
* Returns messages to their original queues.
|
|
12577
|
+
*
|
|
12578
|
+
* @param shardId - The shard to check
|
|
12579
|
+
* @param getQueueKeys - Function to get queue keys for a queue ID
|
|
12580
|
+
* @returns Number of messages reclaimed
|
|
12581
|
+
*/
|
|
12582
|
+
async reclaimTimedOut(shardId, getQueueKeys) {
|
|
12583
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12584
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12585
|
+
const now = Date.now();
|
|
12586
|
+
const timedOut = await this.redis.zrangebyscore(
|
|
12587
|
+
inflightKey,
|
|
12588
|
+
"-inf",
|
|
12589
|
+
now,
|
|
12590
|
+
"WITHSCORES",
|
|
12591
|
+
"LIMIT",
|
|
12592
|
+
0,
|
|
12593
|
+
100
|
|
12594
|
+
// Process in batches
|
|
12595
|
+
);
|
|
12596
|
+
let reclaimed = 0;
|
|
12597
|
+
for (let i = 0; i < timedOut.length; i += 2) {
|
|
12598
|
+
const member = timedOut[i];
|
|
12599
|
+
const originalScore = timedOut[i + 1];
|
|
12600
|
+
if (!member || !originalScore) {
|
|
12601
|
+
continue;
|
|
12602
|
+
}
|
|
12603
|
+
const { messageId, queueId } = this.#parseMember(member);
|
|
12604
|
+
const { queueKey, queueItemsKey } = getQueueKeys(queueId);
|
|
12605
|
+
try {
|
|
12606
|
+
const score = parseFloat(originalScore) || now;
|
|
12607
|
+
await this.redis.releaseMessage(
|
|
12608
|
+
inflightKey,
|
|
12609
|
+
inflightDataKey,
|
|
12610
|
+
queueKey,
|
|
12611
|
+
queueItemsKey,
|
|
12612
|
+
member,
|
|
12613
|
+
messageId,
|
|
12614
|
+
score.toString()
|
|
12615
|
+
);
|
|
12616
|
+
reclaimed++;
|
|
12617
|
+
this.logger.debug("Reclaimed timed-out message", {
|
|
12618
|
+
messageId,
|
|
12619
|
+
queueId,
|
|
12620
|
+
originalScore
|
|
12621
|
+
});
|
|
12622
|
+
} catch (error) {
|
|
12623
|
+
this.logger.error("Failed to reclaim message", {
|
|
12624
|
+
messageId,
|
|
12625
|
+
queueId,
|
|
12626
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12627
|
+
});
|
|
12628
|
+
}
|
|
12629
|
+
}
|
|
12630
|
+
return reclaimed;
|
|
12631
|
+
}
|
|
12632
|
+
/**
|
|
12633
|
+
* Get all in-flight messages for a shard.
|
|
12634
|
+
*/
|
|
12635
|
+
async getInflightMessages(shardId) {
|
|
12636
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12637
|
+
const results = await this.redis.zrange(inflightKey, 0, -1, "WITHSCORES");
|
|
12638
|
+
const messages = [];
|
|
12639
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
12640
|
+
const member = results[i];
|
|
12641
|
+
const deadlineStr = results[i + 1];
|
|
12642
|
+
if (!member || !deadlineStr) {
|
|
12643
|
+
continue;
|
|
12644
|
+
}
|
|
12645
|
+
const deadline = parseFloat(deadlineStr);
|
|
12646
|
+
const { messageId, queueId } = this.#parseMember(member);
|
|
12647
|
+
messages.push({ messageId, queueId, deadline });
|
|
12648
|
+
}
|
|
12649
|
+
return messages;
|
|
12650
|
+
}
|
|
12651
|
+
/**
|
|
12652
|
+
* Get count of in-flight messages for a shard.
|
|
12653
|
+
*/
|
|
12654
|
+
async getInflightCount(shardId) {
|
|
12655
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12656
|
+
return await this.redis.zcard(inflightKey);
|
|
12657
|
+
}
|
|
12658
|
+
/**
|
|
12659
|
+
* Get total in-flight count across all shards.
|
|
12660
|
+
*/
|
|
12661
|
+
async getTotalInflightCount() {
|
|
12662
|
+
const counts = await Promise.all(
|
|
12663
|
+
Array.from({ length: this.shardCount }, (_, i) => this.getInflightCount(i))
|
|
12664
|
+
);
|
|
12665
|
+
return counts.reduce((sum, count) => sum + count, 0);
|
|
12666
|
+
}
|
|
12667
|
+
/**
|
|
12668
|
+
* Close the Redis connection.
|
|
12669
|
+
*/
|
|
12670
|
+
async close() {
|
|
12671
|
+
await this.redis.quit();
|
|
12672
|
+
}
|
|
12673
|
+
// ============================================================================
|
|
12674
|
+
// Private Methods
|
|
12675
|
+
// ============================================================================
|
|
12676
|
+
/**
|
|
12677
|
+
* Map queue ID to shard using Jump Consistent Hash.
|
|
12678
|
+
* Must use same algorithm as MasterQueue for consistency.
|
|
12679
|
+
*/
|
|
12680
|
+
#getShardForQueue(queueId) {
|
|
12681
|
+
return serverOnly.jumpHash(queueId, this.shardCount);
|
|
12682
|
+
}
|
|
12683
|
+
#makeMember(messageId, queueId) {
|
|
12684
|
+
return `${messageId}:${queueId}`;
|
|
12685
|
+
}
|
|
12686
|
+
#parseMember(member) {
|
|
12687
|
+
const colonIndex = member.indexOf(":");
|
|
12688
|
+
if (colonIndex === -1) {
|
|
12689
|
+
return { messageId: member, queueId: "" };
|
|
12690
|
+
}
|
|
12691
|
+
return {
|
|
12692
|
+
messageId: member.substring(0, colonIndex),
|
|
12693
|
+
queueId: member.substring(colonIndex + 1)
|
|
12694
|
+
};
|
|
12695
|
+
}
|
|
12696
|
+
async #removeFromInflight(shardId, messageId, queueId) {
|
|
12697
|
+
const inflightKey = this.keys.inflightKey(shardId);
|
|
12698
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
12699
|
+
const member = this.#makeMember(messageId, queueId);
|
|
12700
|
+
const pipeline = this.redis.pipeline();
|
|
12701
|
+
pipeline.zrem(inflightKey, member);
|
|
12702
|
+
pipeline.hdel(inflightDataKey, messageId);
|
|
12703
|
+
await pipeline.exec();
|
|
12704
|
+
}
|
|
12705
|
+
#registerCommands() {
|
|
12706
|
+
this.redis.defineCommand("claimMessage", {
|
|
12707
|
+
numberOfKeys: 4,
|
|
12708
|
+
lua: `
|
|
12709
|
+
local queueKey = KEYS[1]
|
|
12710
|
+
local queueItemsKey = KEYS[2]
|
|
12711
|
+
local inflightKey = KEYS[3]
|
|
12712
|
+
local inflightDataKey = KEYS[4]
|
|
12713
|
+
|
|
12714
|
+
local queueId = ARGV[1]
|
|
12715
|
+
local consumerId = ARGV[2]
|
|
12716
|
+
local deadline = tonumber(ARGV[3])
|
|
12717
|
+
|
|
12718
|
+
-- Get oldest message from queue
|
|
12719
|
+
local items = redis.call('ZRANGE', queueKey, 0, 0)
|
|
12720
|
+
if #items == 0 then
|
|
12721
|
+
return nil
|
|
12722
|
+
end
|
|
12723
|
+
|
|
12724
|
+
local messageId = items[1]
|
|
12725
|
+
|
|
12726
|
+
-- Get message data
|
|
12727
|
+
local payload = redis.call('HGET', queueItemsKey, messageId)
|
|
12728
|
+
if not payload then
|
|
12729
|
+
-- Message data missing, remove from queue and return nil
|
|
12730
|
+
redis.call('ZREM', queueKey, messageId)
|
|
12731
|
+
return nil
|
|
12732
|
+
end
|
|
12733
|
+
|
|
12734
|
+
-- Remove from queue
|
|
12735
|
+
redis.call('ZREM', queueKey, messageId)
|
|
12736
|
+
redis.call('HDEL', queueItemsKey, messageId)
|
|
12737
|
+
|
|
12738
|
+
-- Add to in-flight set with deadline
|
|
12739
|
+
local member = messageId .. ':' .. queueId
|
|
12740
|
+
redis.call('ZADD', inflightKey, deadline, member)
|
|
12741
|
+
|
|
12742
|
+
-- Store message data for potential release
|
|
12743
|
+
redis.call('HSET', inflightDataKey, messageId, payload)
|
|
12744
|
+
|
|
12745
|
+
return {messageId, payload}
|
|
12746
|
+
`
|
|
12747
|
+
});
|
|
12748
|
+
this.redis.defineCommand("releaseMessage", {
|
|
12749
|
+
numberOfKeys: 4,
|
|
12750
|
+
lua: `
|
|
12751
|
+
local inflightKey = KEYS[1]
|
|
12752
|
+
local inflightDataKey = KEYS[2]
|
|
12753
|
+
local queueKey = KEYS[3]
|
|
12754
|
+
local queueItemsKey = KEYS[4]
|
|
12755
|
+
|
|
12756
|
+
local member = ARGV[1]
|
|
12757
|
+
local messageId = ARGV[2]
|
|
12758
|
+
local score = tonumber(ARGV[3])
|
|
12759
|
+
|
|
12760
|
+
-- Get message data from in-flight
|
|
12761
|
+
local payload = redis.call('HGET', inflightDataKey, messageId)
|
|
12762
|
+
if not payload then
|
|
12763
|
+
-- Message not in in-flight or already released
|
|
12764
|
+
return 0
|
|
12765
|
+
end
|
|
12766
|
+
|
|
12767
|
+
-- Remove from in-flight
|
|
12768
|
+
redis.call('ZREM', inflightKey, member)
|
|
12769
|
+
redis.call('HDEL', inflightDataKey, messageId)
|
|
12770
|
+
|
|
12771
|
+
-- Add back to queue
|
|
12772
|
+
redis.call('ZADD', queueKey, score, messageId)
|
|
12773
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
12774
|
+
|
|
12775
|
+
return 1
|
|
12776
|
+
`
|
|
12777
|
+
});
|
|
12778
|
+
this.redis.defineCommand("heartbeatMessage", {
|
|
12779
|
+
numberOfKeys: 1,
|
|
12780
|
+
lua: `
|
|
12781
|
+
local inflightKey = KEYS[1]
|
|
12782
|
+
local member = ARGV[1]
|
|
12783
|
+
local newDeadline = tonumber(ARGV[2])
|
|
12784
|
+
|
|
12785
|
+
-- Check if member exists in the in-flight set
|
|
12786
|
+
local score = redis.call('ZSCORE', inflightKey, member)
|
|
12787
|
+
if not score then
|
|
12788
|
+
return 0
|
|
12789
|
+
end
|
|
12790
|
+
|
|
12791
|
+
-- Update the deadline
|
|
12792
|
+
redis.call('ZADD', inflightKey, 'XX', newDeadline, member)
|
|
12793
|
+
return 1
|
|
12794
|
+
`
|
|
12795
|
+
});
|
|
12796
|
+
}
|
|
12797
|
+
};
|
|
12798
|
+
|
|
12799
|
+
// src/fair-queue/workerQueue.ts
|
|
12800
|
+
var WorkerQueueManager = class {
|
|
12801
|
+
constructor(options) {
|
|
12802
|
+
this.options = options;
|
|
12803
|
+
this.redis = createRedisClient(options.redis);
|
|
12804
|
+
this.keys = options.keys;
|
|
12805
|
+
this.logger = options.logger ?? {
|
|
12806
|
+
debug: () => {
|
|
12807
|
+
},
|
|
12808
|
+
error: () => {
|
|
12809
|
+
}
|
|
12810
|
+
};
|
|
12811
|
+
this.#registerCommands();
|
|
12812
|
+
}
|
|
12813
|
+
redis;
|
|
12814
|
+
keys;
|
|
12815
|
+
logger;
|
|
12816
|
+
// ============================================================================
|
|
12817
|
+
// Public Methods
|
|
12818
|
+
// ============================================================================
|
|
12819
|
+
/**
|
|
12820
|
+
* Push a message key to a worker queue.
|
|
12821
|
+
* Called after claiming a message from the message queue.
|
|
12822
|
+
*
|
|
12823
|
+
* @param workerQueueId - The worker queue identifier
|
|
12824
|
+
* @param messageKey - The message key to push (typically "messageId:queueId")
|
|
12825
|
+
*/
|
|
12826
|
+
async push(workerQueueId, messageKey) {
|
|
12827
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12828
|
+
await this.redis.rpush(workerQueueKey, messageKey);
|
|
12829
|
+
this.logger.debug("Pushed to worker queue", {
|
|
12830
|
+
workerQueueId,
|
|
12831
|
+
workerQueueKey,
|
|
12832
|
+
messageKey
|
|
12833
|
+
});
|
|
12834
|
+
}
|
|
12835
|
+
/**
|
|
12836
|
+
* Push multiple message keys to a worker queue.
|
|
12837
|
+
*
|
|
12838
|
+
* @param workerQueueId - The worker queue identifier
|
|
12839
|
+
* @param messageKeys - The message keys to push
|
|
12840
|
+
*/
|
|
12841
|
+
async pushBatch(workerQueueId, messageKeys) {
|
|
12842
|
+
if (messageKeys.length === 0) {
|
|
12843
|
+
return;
|
|
12844
|
+
}
|
|
12845
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12846
|
+
await this.redis.rpush(workerQueueKey, ...messageKeys);
|
|
12847
|
+
this.logger.debug("Pushed batch to worker queue", {
|
|
12848
|
+
workerQueueId,
|
|
12849
|
+
workerQueueKey,
|
|
12850
|
+
count: messageKeys.length
|
|
12851
|
+
});
|
|
12852
|
+
}
|
|
12853
|
+
/**
|
|
12854
|
+
* Blocking pop from a worker queue.
|
|
12855
|
+
* Waits until a message is available or timeout expires.
|
|
12856
|
+
*
|
|
12857
|
+
* @param workerQueueId - The worker queue identifier
|
|
12858
|
+
* @param timeoutSeconds - Maximum time to wait (0 = wait forever)
|
|
12859
|
+
* @param signal - Optional abort signal to cancel waiting
|
|
12860
|
+
* @returns The message key, or null if timeout
|
|
12861
|
+
*/
|
|
12862
|
+
async blockingPop(workerQueueId, timeoutSeconds, signal) {
|
|
12863
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12864
|
+
const blockingClient = this.redis.duplicate();
|
|
12865
|
+
try {
|
|
12866
|
+
if (signal) {
|
|
12867
|
+
const cleanup = () => {
|
|
12868
|
+
blockingClient.disconnect();
|
|
12869
|
+
};
|
|
12870
|
+
signal.addEventListener("abort", cleanup, { once: true });
|
|
12871
|
+
if (signal.aborted) {
|
|
12872
|
+
return null;
|
|
12873
|
+
}
|
|
12874
|
+
}
|
|
12875
|
+
const result = await blockingClient.blpop(workerQueueKey, timeoutSeconds);
|
|
12876
|
+
if (!result) {
|
|
12877
|
+
return null;
|
|
12878
|
+
}
|
|
12879
|
+
const [, messageKey] = result;
|
|
12880
|
+
this.logger.debug("Blocking pop received message", {
|
|
12881
|
+
workerQueueId,
|
|
12882
|
+
workerQueueKey,
|
|
12883
|
+
messageKey
|
|
12884
|
+
});
|
|
12885
|
+
return messageKey;
|
|
12886
|
+
} catch (error) {
|
|
12887
|
+
if (signal?.aborted) {
|
|
12888
|
+
return null;
|
|
12889
|
+
}
|
|
12890
|
+
this.logger.error("Blocking pop error", {
|
|
12891
|
+
workerQueueId,
|
|
12892
|
+
error: error instanceof Error ? error.message : String(error)
|
|
12893
|
+
});
|
|
12894
|
+
throw error;
|
|
12895
|
+
} finally {
|
|
12896
|
+
await blockingClient.quit().catch(() => {
|
|
12897
|
+
});
|
|
12898
|
+
}
|
|
12899
|
+
}
|
|
12900
|
+
/**
|
|
12901
|
+
* Non-blocking pop from a worker queue.
|
|
12902
|
+
*
|
|
12903
|
+
* @param workerQueueId - The worker queue identifier
|
|
12904
|
+
* @returns The message key and queue length, or null if empty
|
|
12905
|
+
*/
|
|
12906
|
+
async pop(workerQueueId) {
|
|
12907
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12908
|
+
const result = await this.redis.popWithLength(workerQueueKey);
|
|
12909
|
+
if (!result) {
|
|
12910
|
+
return null;
|
|
12911
|
+
}
|
|
12912
|
+
const [messageKey, queueLength] = result;
|
|
12913
|
+
this.logger.debug("Non-blocking pop received message", {
|
|
12914
|
+
workerQueueId,
|
|
12915
|
+
workerQueueKey,
|
|
12916
|
+
messageKey,
|
|
12917
|
+
queueLength
|
|
12918
|
+
});
|
|
12919
|
+
return { messageKey, queueLength: Number(queueLength) };
|
|
12920
|
+
}
|
|
12921
|
+
/**
|
|
12922
|
+
* Get the current length of a worker queue.
|
|
12923
|
+
*/
|
|
12924
|
+
async getLength(workerQueueId) {
|
|
12925
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12926
|
+
return await this.redis.llen(workerQueueKey);
|
|
12927
|
+
}
|
|
12928
|
+
/**
|
|
12929
|
+
* Peek at all messages in a worker queue without removing them.
|
|
12930
|
+
* Useful for debugging and tests.
|
|
12931
|
+
*/
|
|
12932
|
+
async peek(workerQueueId) {
|
|
12933
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12934
|
+
return await this.redis.lrange(workerQueueKey, 0, -1);
|
|
12935
|
+
}
|
|
12936
|
+
/**
|
|
12937
|
+
* Remove a specific message from the worker queue.
|
|
12938
|
+
* Used when a message needs to be removed without processing.
|
|
12939
|
+
*
|
|
12940
|
+
* @param workerQueueId - The worker queue identifier
|
|
12941
|
+
* @param messageKey - The message key to remove
|
|
12942
|
+
* @returns Number of removed items
|
|
12943
|
+
*/
|
|
12944
|
+
async remove(workerQueueId, messageKey) {
|
|
12945
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12946
|
+
return await this.redis.lrem(workerQueueKey, 0, messageKey);
|
|
12947
|
+
}
|
|
12948
|
+
/**
|
|
12949
|
+
* Clear all messages from a worker queue.
|
|
12950
|
+
*/
|
|
12951
|
+
async clear(workerQueueId) {
|
|
12952
|
+
const workerQueueKey = this.keys.workerQueueKey(workerQueueId);
|
|
12953
|
+
await this.redis.del(workerQueueKey);
|
|
12954
|
+
}
|
|
12955
|
+
/**
|
|
12956
|
+
* Close the Redis connection.
|
|
12957
|
+
*/
|
|
12958
|
+
async close() {
|
|
12959
|
+
await this.redis.quit();
|
|
12960
|
+
}
|
|
12961
|
+
// ============================================================================
|
|
12962
|
+
// Private - Register Commands
|
|
12963
|
+
// ============================================================================
|
|
12964
|
+
/**
|
|
12965
|
+
* Initialize custom Redis commands.
|
|
12966
|
+
*/
|
|
12967
|
+
#registerCommands() {
|
|
12968
|
+
this.redis.defineCommand("popWithLength", {
|
|
12969
|
+
numberOfKeys: 1,
|
|
12970
|
+
lua: `
|
|
12971
|
+
local workerQueueKey = KEYS[1]
|
|
12972
|
+
|
|
12973
|
+
-- Pop the first message
|
|
12974
|
+
local messageKey = redis.call('LPOP', workerQueueKey)
|
|
12975
|
+
if not messageKey then
|
|
12976
|
+
return nil
|
|
12977
|
+
end
|
|
12978
|
+
|
|
12979
|
+
-- Get remaining queue length
|
|
12980
|
+
local queueLength = redis.call('LLEN', workerQueueKey)
|
|
12981
|
+
|
|
12982
|
+
return {messageKey, queueLength}
|
|
12983
|
+
`
|
|
12984
|
+
});
|
|
12985
|
+
}
|
|
12986
|
+
/**
|
|
12987
|
+
* Register custom commands on an external Redis client.
|
|
12988
|
+
* Use this when initializing FairQueue with worker queues.
|
|
12989
|
+
*/
|
|
12990
|
+
registerCommands(redis) {
|
|
12991
|
+
redis.defineCommand("popWithLength", {
|
|
12992
|
+
numberOfKeys: 1,
|
|
12993
|
+
lua: `
|
|
12994
|
+
local workerQueueKey = KEYS[1]
|
|
12995
|
+
|
|
12996
|
+
-- Pop the first message
|
|
12997
|
+
local messageKey = redis.call('LPOP', workerQueueKey)
|
|
12998
|
+
if not messageKey then
|
|
12999
|
+
return nil
|
|
13000
|
+
end
|
|
13001
|
+
|
|
13002
|
+
-- Get remaining queue length
|
|
13003
|
+
local queueLength = redis.call('LLEN', workerQueueKey)
|
|
13004
|
+
|
|
13005
|
+
return {messageKey, queueLength}
|
|
13006
|
+
`
|
|
13007
|
+
});
|
|
13008
|
+
}
|
|
13009
|
+
};
|
|
13010
|
+
|
|
13011
|
+
// src/fair-queue/keyProducer.ts
|
|
13012
|
+
var DefaultFairQueueKeyProducer = class {
|
|
13013
|
+
prefix;
|
|
13014
|
+
separator;
|
|
13015
|
+
constructor(options = {}) {
|
|
13016
|
+
this.prefix = options.prefix ?? "fq";
|
|
13017
|
+
this.separator = options.separator ?? ":";
|
|
13018
|
+
}
|
|
13019
|
+
// ============================================================================
|
|
13020
|
+
// Master Queue Keys
|
|
13021
|
+
// ============================================================================
|
|
13022
|
+
masterQueueKey(shardId) {
|
|
13023
|
+
return this.#buildKey("master", shardId.toString());
|
|
13024
|
+
}
|
|
13025
|
+
// ============================================================================
|
|
13026
|
+
// Queue Keys
|
|
13027
|
+
// ============================================================================
|
|
13028
|
+
queueKey(queueId) {
|
|
13029
|
+
return this.#buildKey("queue", queueId);
|
|
13030
|
+
}
|
|
13031
|
+
queueItemsKey(queueId) {
|
|
13032
|
+
return this.#buildKey("queue", queueId, "items");
|
|
13033
|
+
}
|
|
13034
|
+
// ============================================================================
|
|
13035
|
+
// Concurrency Keys
|
|
13036
|
+
// ============================================================================
|
|
13037
|
+
concurrencyKey(groupName, groupId) {
|
|
13038
|
+
return this.#buildKey("concurrency", groupName, groupId);
|
|
13039
|
+
}
|
|
13040
|
+
// ============================================================================
|
|
13041
|
+
// In-Flight Keys
|
|
13042
|
+
// ============================================================================
|
|
13043
|
+
inflightKey(shardId) {
|
|
13044
|
+
return this.#buildKey("inflight", shardId.toString());
|
|
13045
|
+
}
|
|
13046
|
+
inflightDataKey(shardId) {
|
|
13047
|
+
return this.#buildKey("inflight", shardId.toString(), "data");
|
|
13048
|
+
}
|
|
13049
|
+
// ============================================================================
|
|
13050
|
+
// Worker Queue Keys
|
|
13051
|
+
// ============================================================================
|
|
13052
|
+
workerQueueKey(consumerId) {
|
|
13053
|
+
return this.#buildKey("worker", consumerId);
|
|
13054
|
+
}
|
|
13055
|
+
// ============================================================================
|
|
13056
|
+
// Dead Letter Queue Keys
|
|
13057
|
+
// ============================================================================
|
|
13058
|
+
deadLetterQueueKey(tenantId) {
|
|
13059
|
+
return this.#buildKey("dlq", tenantId);
|
|
13060
|
+
}
|
|
13061
|
+
deadLetterQueueDataKey(tenantId) {
|
|
13062
|
+
return this.#buildKey("dlq", tenantId, "data");
|
|
13063
|
+
}
|
|
13064
|
+
// ============================================================================
|
|
13065
|
+
// Extraction Methods
|
|
13066
|
+
// ============================================================================
|
|
13067
|
+
/**
|
|
13068
|
+
* Extract tenant ID from a queue ID.
|
|
13069
|
+
* Default implementation assumes queue IDs are formatted as: tenant:{tenantId}:...
|
|
13070
|
+
* Override this method for custom queue ID formats.
|
|
13071
|
+
*/
|
|
13072
|
+
extractTenantId(queueId) {
|
|
13073
|
+
const parts = queueId.split(this.separator);
|
|
13074
|
+
if (parts.length >= 2 && parts[0] === "tenant" && parts[1]) {
|
|
13075
|
+
return parts[1];
|
|
13076
|
+
}
|
|
13077
|
+
return parts[0] ?? "";
|
|
13078
|
+
}
|
|
13079
|
+
/**
|
|
13080
|
+
* Extract a group ID from a queue ID.
|
|
13081
|
+
* Default implementation looks for pattern: {groupName}:{groupId}:...
|
|
13082
|
+
* Override this method for custom queue ID formats.
|
|
13083
|
+
*/
|
|
13084
|
+
extractGroupId(groupName, queueId) {
|
|
13085
|
+
const parts = queueId.split(this.separator);
|
|
13086
|
+
for (let i = 0; i < parts.length - 1; i++) {
|
|
13087
|
+
if (parts[i] === groupName) {
|
|
13088
|
+
const nextPart = parts[i + 1];
|
|
13089
|
+
if (nextPart) {
|
|
13090
|
+
return nextPart;
|
|
13091
|
+
}
|
|
13092
|
+
}
|
|
13093
|
+
}
|
|
13094
|
+
return "";
|
|
13095
|
+
}
|
|
13096
|
+
// ============================================================================
|
|
13097
|
+
// Helper Methods
|
|
13098
|
+
// ============================================================================
|
|
13099
|
+
#buildKey(...parts) {
|
|
13100
|
+
return [this.prefix, ...parts].join(this.separator);
|
|
13101
|
+
}
|
|
13102
|
+
};
|
|
13103
|
+
var CallbackFairQueueKeyProducer = class extends DefaultFairQueueKeyProducer {
|
|
13104
|
+
tenantExtractor;
|
|
13105
|
+
groupExtractor;
|
|
13106
|
+
constructor(options) {
|
|
13107
|
+
super({ prefix: options.prefix, separator: options.separator });
|
|
13108
|
+
this.tenantExtractor = options.extractTenantId;
|
|
13109
|
+
this.groupExtractor = options.extractGroupId;
|
|
13110
|
+
}
|
|
13111
|
+
extractTenantId(queueId) {
|
|
13112
|
+
return this.tenantExtractor(queueId);
|
|
13113
|
+
}
|
|
13114
|
+
extractGroupId(groupName, queueId) {
|
|
13115
|
+
return this.groupExtractor(groupName, queueId);
|
|
13116
|
+
}
|
|
13117
|
+
};
|
|
13118
|
+
|
|
13119
|
+
// src/fair-queue/scheduler.ts
|
|
13120
|
+
var BaseScheduler = class {
|
|
13121
|
+
/**
|
|
13122
|
+
* Called after processing a message to update scheduler state.
|
|
13123
|
+
* Default implementation does nothing.
|
|
13124
|
+
*/
|
|
13125
|
+
async recordProcessed(_tenantId, _queueId) {
|
|
13126
|
+
}
|
|
13127
|
+
/**
|
|
13128
|
+
* Initialize the scheduler.
|
|
13129
|
+
* Default implementation does nothing.
|
|
13130
|
+
*/
|
|
13131
|
+
async initialize() {
|
|
13132
|
+
}
|
|
13133
|
+
/**
|
|
13134
|
+
* Cleanup scheduler resources.
|
|
13135
|
+
* Default implementation does nothing.
|
|
13136
|
+
*/
|
|
13137
|
+
async close() {
|
|
13138
|
+
}
|
|
13139
|
+
/**
|
|
13140
|
+
* Helper to group queues by tenant.
|
|
13141
|
+
*/
|
|
13142
|
+
groupQueuesByTenant(queues) {
|
|
13143
|
+
const grouped = /* @__PURE__ */ new Map();
|
|
13144
|
+
for (const { queueId, tenantId } of queues) {
|
|
13145
|
+
const existing = grouped.get(tenantId) ?? [];
|
|
13146
|
+
existing.push(queueId);
|
|
13147
|
+
grouped.set(tenantId, existing);
|
|
13148
|
+
}
|
|
13149
|
+
return grouped;
|
|
13150
|
+
}
|
|
13151
|
+
/**
|
|
13152
|
+
* Helper to convert grouped queues to TenantQueues array.
|
|
13153
|
+
*/
|
|
13154
|
+
toTenantQueuesArray(grouped) {
|
|
13155
|
+
return Array.from(grouped.entries()).map(([tenantId, queues]) => ({
|
|
13156
|
+
tenantId,
|
|
13157
|
+
queues
|
|
13158
|
+
}));
|
|
13159
|
+
}
|
|
13160
|
+
/**
|
|
13161
|
+
* Helper to filter out tenants at capacity.
|
|
13162
|
+
*/
|
|
13163
|
+
async filterAtCapacity(tenants, context2, groupName = "tenant") {
|
|
13164
|
+
const filtered = [];
|
|
13165
|
+
for (const tenant of tenants) {
|
|
13166
|
+
const isAtCapacity = await context2.isAtCapacity(groupName, tenant.tenantId);
|
|
13167
|
+
if (!isAtCapacity) {
|
|
13168
|
+
filtered.push(tenant);
|
|
13169
|
+
}
|
|
13170
|
+
}
|
|
13171
|
+
return filtered;
|
|
13172
|
+
}
|
|
13173
|
+
};
|
|
13174
|
+
var NoopScheduler = class extends BaseScheduler {
|
|
13175
|
+
async selectQueues(_masterQueueShard, _consumerId, _context) {
|
|
13176
|
+
return [];
|
|
13177
|
+
}
|
|
13178
|
+
};
|
|
13179
|
+
|
|
13180
|
+
// src/fair-queue/schedulers/drr.ts
|
|
13181
|
+
var DRRScheduler = class extends BaseScheduler {
|
|
13182
|
+
constructor(config) {
|
|
13183
|
+
super();
|
|
13184
|
+
this.config = config;
|
|
13185
|
+
this.redis = createRedisClient(config.redis);
|
|
13186
|
+
this.keys = config.keys;
|
|
13187
|
+
this.quantum = config.quantum;
|
|
13188
|
+
this.maxDeficit = config.maxDeficit;
|
|
13189
|
+
this.logger = config.logger ?? {
|
|
13190
|
+
debug: () => {
|
|
13191
|
+
},
|
|
13192
|
+
error: () => {
|
|
13193
|
+
}
|
|
13194
|
+
};
|
|
13195
|
+
this.#registerCommands();
|
|
13196
|
+
}
|
|
13197
|
+
redis;
|
|
13198
|
+
keys;
|
|
13199
|
+
quantum;
|
|
13200
|
+
maxDeficit;
|
|
13201
|
+
logger;
|
|
13202
|
+
// ============================================================================
|
|
13203
|
+
// FairScheduler Implementation
|
|
13204
|
+
// ============================================================================
|
|
13205
|
+
/**
|
|
13206
|
+
* Select queues for processing using DRR algorithm.
|
|
13207
|
+
*
|
|
13208
|
+
* Algorithm:
|
|
13209
|
+
* 1. Get all queues from the master shard
|
|
13210
|
+
* 2. Group by tenant
|
|
13211
|
+
* 3. Filter out tenants at concurrency capacity
|
|
13212
|
+
* 4. Add quantum to each tenant's deficit (atomically)
|
|
13213
|
+
* 5. Select queues from tenants with deficit >= 1
|
|
13214
|
+
* 6. Order tenants by deficit (highest first for fairness)
|
|
13215
|
+
*/
|
|
13216
|
+
async selectQueues(masterQueueShard, consumerId, context2) {
|
|
13217
|
+
const queues = await this.#getQueuesFromShard(masterQueueShard);
|
|
13218
|
+
if (queues.length === 0) {
|
|
13219
|
+
return [];
|
|
13220
|
+
}
|
|
13221
|
+
const queuesByTenant = this.groupQueuesByTenant(
|
|
13222
|
+
queues.map((q) => ({ queueId: q.queueId, tenantId: q.tenantId }))
|
|
13223
|
+
);
|
|
13224
|
+
const tenantIds = Array.from(queuesByTenant.keys());
|
|
13225
|
+
const deficits = await this.#addQuantumToTenants(tenantIds);
|
|
13226
|
+
const tenantData = await Promise.all(
|
|
13227
|
+
tenantIds.map(async (tenantId, index) => {
|
|
13228
|
+
const isAtCapacity = await context2.isAtCapacity("tenant", tenantId);
|
|
13229
|
+
return {
|
|
13230
|
+
tenantId,
|
|
13231
|
+
deficit: deficits[index] ?? 0,
|
|
13232
|
+
queues: queuesByTenant.get(tenantId) ?? [],
|
|
13233
|
+
isAtCapacity
|
|
13234
|
+
};
|
|
13235
|
+
})
|
|
13236
|
+
);
|
|
13237
|
+
const eligibleTenants = tenantData.filter(
|
|
13238
|
+
(t) => !t.isAtCapacity && t.deficit >= 1
|
|
13239
|
+
);
|
|
13240
|
+
const blockedTenants = tenantData.filter((t) => t.isAtCapacity);
|
|
13241
|
+
if (blockedTenants.length > 0) {
|
|
13242
|
+
this.logger.debug("DRR: tenants blocked by concurrency", {
|
|
13243
|
+
blockedCount: blockedTenants.length,
|
|
13244
|
+
blockedTenants: blockedTenants.map((t) => t.tenantId)
|
|
13245
|
+
});
|
|
13246
|
+
}
|
|
13247
|
+
eligibleTenants.sort((a, b) => b.deficit - a.deficit);
|
|
13248
|
+
this.logger.debug("DRR: queue selection complete", {
|
|
13249
|
+
totalQueues: queues.length,
|
|
13250
|
+
totalTenants: tenantIds.length,
|
|
13251
|
+
eligibleTenants: eligibleTenants.length,
|
|
13252
|
+
topTenantDeficit: eligibleTenants[0]?.deficit
|
|
13253
|
+
});
|
|
13254
|
+
return eligibleTenants.map((t) => ({
|
|
13255
|
+
tenantId: t.tenantId,
|
|
13256
|
+
queues: t.queues
|
|
13257
|
+
}));
|
|
13258
|
+
}
|
|
13259
|
+
/**
|
|
13260
|
+
* Record that a message was processed from a tenant.
|
|
13261
|
+
* Decrements the tenant's deficit.
|
|
13262
|
+
*/
|
|
13263
|
+
async recordProcessed(tenantId, _queueId) {
|
|
13264
|
+
await this.#decrementDeficit(tenantId);
|
|
13265
|
+
}
|
|
13266
|
+
async close() {
|
|
13267
|
+
await this.redis.quit();
|
|
13268
|
+
}
|
|
13269
|
+
// ============================================================================
|
|
13270
|
+
// Public Methods for Deficit Management
|
|
13271
|
+
// ============================================================================
|
|
13272
|
+
/**
|
|
13273
|
+
* Get the current deficit for a tenant.
|
|
13274
|
+
*/
|
|
13275
|
+
async getDeficit(tenantId) {
|
|
13276
|
+
const key = this.#deficitKey();
|
|
13277
|
+
const value = await this.redis.hget(key, tenantId);
|
|
13278
|
+
return value ? parseFloat(value) : 0;
|
|
13279
|
+
}
|
|
13280
|
+
/**
|
|
13281
|
+
* Reset deficit for a tenant.
|
|
13282
|
+
* Used when a tenant has no more active queues.
|
|
13283
|
+
*/
|
|
13284
|
+
async resetDeficit(tenantId) {
|
|
13285
|
+
const key = this.#deficitKey();
|
|
13286
|
+
await this.redis.hdel(key, tenantId);
|
|
13287
|
+
}
|
|
13288
|
+
/**
|
|
13289
|
+
* Get all tenant deficits.
|
|
13290
|
+
*/
|
|
13291
|
+
async getAllDeficits() {
|
|
13292
|
+
const key = this.#deficitKey();
|
|
13293
|
+
const data = await this.redis.hgetall(key);
|
|
13294
|
+
const result = /* @__PURE__ */ new Map();
|
|
13295
|
+
for (const [tenantId, value] of Object.entries(data)) {
|
|
13296
|
+
result.set(tenantId, parseFloat(value));
|
|
13297
|
+
}
|
|
13298
|
+
return result;
|
|
13299
|
+
}
|
|
13300
|
+
// ============================================================================
|
|
13301
|
+
// Private Methods
|
|
13302
|
+
// ============================================================================
|
|
13303
|
+
#deficitKey() {
|
|
13304
|
+
return `${this.keys.masterQueueKey(0).split(":")[0]}:drr:deficit`;
|
|
13305
|
+
}
|
|
13306
|
+
async #getQueuesFromShard(shardKey) {
|
|
13307
|
+
const now = Date.now();
|
|
13308
|
+
const results = await this.redis.zrangebyscore(
|
|
13309
|
+
shardKey,
|
|
13310
|
+
"-inf",
|
|
13311
|
+
now,
|
|
13312
|
+
"WITHSCORES",
|
|
13313
|
+
"LIMIT",
|
|
13314
|
+
0,
|
|
13315
|
+
1e3
|
|
13316
|
+
// Limit for performance
|
|
13317
|
+
);
|
|
13318
|
+
const queues = [];
|
|
13319
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
13320
|
+
const queueId = results[i];
|
|
13321
|
+
const scoreStr = results[i + 1];
|
|
13322
|
+
if (queueId && scoreStr) {
|
|
13323
|
+
queues.push({
|
|
13324
|
+
queueId,
|
|
13325
|
+
score: parseFloat(scoreStr),
|
|
13326
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
13327
|
+
});
|
|
13328
|
+
}
|
|
13329
|
+
}
|
|
13330
|
+
return queues;
|
|
13331
|
+
}
|
|
13332
|
+
/**
|
|
13333
|
+
* Add quantum to multiple tenants atomically.
|
|
13334
|
+
* Returns the new deficit values.
|
|
13335
|
+
*/
|
|
13336
|
+
async #addQuantumToTenants(tenantIds) {
|
|
13337
|
+
if (tenantIds.length === 0) {
|
|
13338
|
+
return [];
|
|
13339
|
+
}
|
|
13340
|
+
const key = this.#deficitKey();
|
|
13341
|
+
const results = await this.redis.drrAddQuantum(
|
|
13342
|
+
key,
|
|
13343
|
+
this.quantum.toString(),
|
|
13344
|
+
this.maxDeficit.toString(),
|
|
13345
|
+
...tenantIds
|
|
13346
|
+
);
|
|
13347
|
+
return results.map((r) => parseFloat(r));
|
|
13348
|
+
}
|
|
13349
|
+
/**
|
|
13350
|
+
* Decrement deficit for a tenant atomically.
|
|
13351
|
+
*/
|
|
13352
|
+
async #decrementDeficit(tenantId) {
|
|
13353
|
+
const key = this.#deficitKey();
|
|
13354
|
+
const result = await this.redis.drrDecrementDeficit(key, tenantId);
|
|
13355
|
+
return parseFloat(result);
|
|
13356
|
+
}
|
|
13357
|
+
#registerCommands() {
|
|
13358
|
+
this.redis.defineCommand("drrAddQuantum", {
|
|
13359
|
+
numberOfKeys: 1,
|
|
13360
|
+
lua: `
|
|
13361
|
+
local deficitKey = KEYS[1]
|
|
13362
|
+
local quantum = tonumber(ARGV[1])
|
|
13363
|
+
local maxDeficit = tonumber(ARGV[2])
|
|
13364
|
+
local results = {}
|
|
13365
|
+
|
|
13366
|
+
for i = 3, #ARGV do
|
|
13367
|
+
local tenantId = ARGV[i]
|
|
13368
|
+
|
|
13369
|
+
-- Add quantum to deficit
|
|
13370
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, quantum)
|
|
13371
|
+
newDeficit = tonumber(newDeficit)
|
|
13372
|
+
|
|
13373
|
+
-- Cap at maxDeficit
|
|
13374
|
+
if newDeficit > maxDeficit then
|
|
13375
|
+
redis.call('HSET', deficitKey, tenantId, maxDeficit)
|
|
13376
|
+
newDeficit = maxDeficit
|
|
13377
|
+
end
|
|
13378
|
+
|
|
13379
|
+
table.insert(results, tostring(newDeficit))
|
|
13380
|
+
end
|
|
13381
|
+
|
|
13382
|
+
return results
|
|
13383
|
+
`
|
|
13384
|
+
});
|
|
13385
|
+
this.redis.defineCommand("drrDecrementDeficit", {
|
|
13386
|
+
numberOfKeys: 1,
|
|
13387
|
+
lua: `
|
|
13388
|
+
local deficitKey = KEYS[1]
|
|
13389
|
+
local tenantId = ARGV[1]
|
|
13390
|
+
|
|
13391
|
+
local newDeficit = redis.call('HINCRBYFLOAT', deficitKey, tenantId, -1)
|
|
13392
|
+
newDeficit = tonumber(newDeficit)
|
|
13393
|
+
|
|
13394
|
+
-- Floor at 0
|
|
13395
|
+
if newDeficit < 0 then
|
|
13396
|
+
redis.call('HSET', deficitKey, tenantId, 0)
|
|
13397
|
+
newDeficit = 0
|
|
13398
|
+
end
|
|
13399
|
+
|
|
13400
|
+
return tostring(newDeficit)
|
|
13401
|
+
`
|
|
13402
|
+
});
|
|
13403
|
+
}
|
|
13404
|
+
};
|
|
13405
|
+
var defaultBiases = {
|
|
13406
|
+
concurrencyLimitBias: 0,
|
|
13407
|
+
availableCapacityBias: 0,
|
|
13408
|
+
queueAgeRandomization: 0
|
|
13409
|
+
};
|
|
13410
|
+
var WeightedScheduler = class extends BaseScheduler {
|
|
13411
|
+
constructor(config) {
|
|
13412
|
+
super();
|
|
13413
|
+
this.config = config;
|
|
13414
|
+
this.redis = createRedisClient(config.redis);
|
|
13415
|
+
this.keys = config.keys;
|
|
13416
|
+
this.rng = seedrandom__default.default(config.seed);
|
|
13417
|
+
this.biases = config.biases ?? defaultBiases;
|
|
13418
|
+
this.defaultTenantLimit = config.defaultTenantConcurrencyLimit ?? 100;
|
|
13419
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 100;
|
|
13420
|
+
this.reuseSnapshotCount = config.reuseSnapshotCount ?? 0;
|
|
13421
|
+
this.maximumTenantCount = config.maximumTenantCount ?? 0;
|
|
13422
|
+
}
|
|
13423
|
+
redis;
|
|
13424
|
+
keys;
|
|
13425
|
+
rng;
|
|
13426
|
+
biases;
|
|
13427
|
+
defaultTenantLimit;
|
|
13428
|
+
masterQueueLimit;
|
|
13429
|
+
reuseSnapshotCount;
|
|
13430
|
+
maximumTenantCount;
|
|
13431
|
+
// Snapshot cache
|
|
13432
|
+
snapshotCache = /* @__PURE__ */ new Map();
|
|
13433
|
+
// ============================================================================
|
|
13434
|
+
// FairScheduler Implementation
|
|
13435
|
+
// ============================================================================
|
|
13436
|
+
async selectQueues(masterQueueShard, consumerId, context2) {
|
|
13437
|
+
const snapshot = await this.#getOrCreateSnapshot(
|
|
13438
|
+
masterQueueShard,
|
|
13439
|
+
consumerId,
|
|
13440
|
+
context2
|
|
13441
|
+
);
|
|
13442
|
+
if (snapshot.queues.length === 0) {
|
|
13443
|
+
return [];
|
|
13444
|
+
}
|
|
13445
|
+
const shuffledTenants = this.#shuffleTenantsByWeight(snapshot);
|
|
13446
|
+
return shuffledTenants.map((tenantId) => ({
|
|
13447
|
+
tenantId,
|
|
13448
|
+
queues: this.#orderQueuesForTenant(snapshot, tenantId)
|
|
13449
|
+
}));
|
|
13450
|
+
}
|
|
13451
|
+
async close() {
|
|
13452
|
+
this.snapshotCache.clear();
|
|
13453
|
+
await this.redis.quit();
|
|
13454
|
+
}
|
|
13455
|
+
// ============================================================================
|
|
13456
|
+
// Private Methods
|
|
13457
|
+
// ============================================================================
|
|
13458
|
+
async #getOrCreateSnapshot(masterQueueShard, consumerId, context2) {
|
|
13459
|
+
const cacheKey = `${masterQueueShard}:${consumerId}`;
|
|
13460
|
+
if (this.reuseSnapshotCount > 0) {
|
|
13461
|
+
const cached = this.snapshotCache.get(cacheKey);
|
|
13462
|
+
if (cached && cached.reuseCount < this.reuseSnapshotCount) {
|
|
13463
|
+
this.snapshotCache.set(cacheKey, {
|
|
13464
|
+
snapshot: cached.snapshot,
|
|
13465
|
+
reuseCount: cached.reuseCount + 1
|
|
13466
|
+
});
|
|
13467
|
+
return cached.snapshot;
|
|
13468
|
+
}
|
|
13469
|
+
}
|
|
13470
|
+
const snapshot = await this.#createSnapshot(masterQueueShard, context2);
|
|
13471
|
+
if (this.reuseSnapshotCount > 0) {
|
|
13472
|
+
this.snapshotCache.set(cacheKey, { snapshot, reuseCount: 0 });
|
|
13473
|
+
}
|
|
13474
|
+
return snapshot;
|
|
13475
|
+
}
|
|
13476
|
+
async #createSnapshot(masterQueueShard, context2) {
|
|
13477
|
+
const now = Date.now();
|
|
13478
|
+
let rawQueues = await this.#getQueuesFromShard(masterQueueShard, now);
|
|
13479
|
+
if (rawQueues.length === 0) {
|
|
13480
|
+
return { id: crypto.randomUUID(), tenants: /* @__PURE__ */ new Map(), queues: [] };
|
|
13481
|
+
}
|
|
13482
|
+
if (this.maximumTenantCount > 0) {
|
|
13483
|
+
rawQueues = this.#selectTopTenantQueues(rawQueues);
|
|
13484
|
+
}
|
|
13485
|
+
const tenantIds = /* @__PURE__ */ new Set();
|
|
13486
|
+
const queuesByTenant = /* @__PURE__ */ new Map();
|
|
13487
|
+
for (const queue of rawQueues) {
|
|
13488
|
+
tenantIds.add(queue.tenantId);
|
|
13489
|
+
const tenantQueues = queuesByTenant.get(queue.tenantId) ?? [];
|
|
13490
|
+
tenantQueues.push({
|
|
13491
|
+
queueId: queue.queueId,
|
|
13492
|
+
age: now - queue.score
|
|
13493
|
+
});
|
|
13494
|
+
queuesByTenant.set(queue.tenantId, tenantQueues);
|
|
13495
|
+
}
|
|
13496
|
+
const tenants = /* @__PURE__ */ new Map();
|
|
13497
|
+
for (const tenantId of tenantIds) {
|
|
13498
|
+
const [current, limit] = await Promise.all([
|
|
13499
|
+
context2.getCurrentConcurrency("tenant", tenantId),
|
|
13500
|
+
context2.getConcurrencyLimit("tenant", tenantId)
|
|
13501
|
+
]);
|
|
13502
|
+
if (current >= limit) {
|
|
13503
|
+
continue;
|
|
13504
|
+
}
|
|
13505
|
+
tenants.set(tenantId, {
|
|
13506
|
+
tenantId,
|
|
13507
|
+
concurrency: { current, limit },
|
|
13508
|
+
queues: queuesByTenant.get(tenantId) ?? []
|
|
13509
|
+
});
|
|
13510
|
+
}
|
|
13511
|
+
const queues = rawQueues.filter((q) => tenants.has(q.tenantId)).map((q) => ({
|
|
13512
|
+
queueId: q.queueId,
|
|
13513
|
+
tenantId: q.tenantId,
|
|
13514
|
+
age: now - q.score
|
|
13515
|
+
}));
|
|
13516
|
+
return {
|
|
13517
|
+
id: crypto.randomUUID(),
|
|
13518
|
+
tenants,
|
|
13519
|
+
queues
|
|
13520
|
+
};
|
|
13521
|
+
}
|
|
13522
|
+
async #getQueuesFromShard(shardKey, maxScore) {
|
|
13523
|
+
const results = await this.redis.zrangebyscore(
|
|
13524
|
+
shardKey,
|
|
13525
|
+
"-inf",
|
|
13526
|
+
maxScore,
|
|
13527
|
+
"WITHSCORES",
|
|
13528
|
+
"LIMIT",
|
|
13529
|
+
0,
|
|
13530
|
+
this.masterQueueLimit
|
|
13531
|
+
);
|
|
13532
|
+
const queues = [];
|
|
13533
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
13534
|
+
const queueId = results[i];
|
|
13535
|
+
const scoreStr = results[i + 1];
|
|
13536
|
+
if (queueId && scoreStr) {
|
|
13537
|
+
queues.push({
|
|
13538
|
+
queueId,
|
|
13539
|
+
score: parseFloat(scoreStr),
|
|
13540
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
13541
|
+
});
|
|
13542
|
+
}
|
|
13543
|
+
}
|
|
13544
|
+
return queues;
|
|
13545
|
+
}
|
|
13546
|
+
#selectTopTenantQueues(queues) {
|
|
13547
|
+
const queuesByTenant = /* @__PURE__ */ new Map();
|
|
13548
|
+
for (const queue of queues) {
|
|
13549
|
+
const tenantQueues = queuesByTenant.get(queue.tenantId) ?? [];
|
|
13550
|
+
tenantQueues.push(queue);
|
|
13551
|
+
queuesByTenant.set(queue.tenantId, tenantQueues);
|
|
13552
|
+
}
|
|
13553
|
+
const tenantAges = Array.from(queuesByTenant.entries()).map(([tenantId, tQueues]) => {
|
|
13554
|
+
const avgAge = tQueues.reduce((sum, q) => sum + q.score, 0) / tQueues.length;
|
|
13555
|
+
return { tenantId, avgAge };
|
|
13556
|
+
});
|
|
13557
|
+
const maxAge = Math.max(...tenantAges.map((t) => t.avgAge));
|
|
13558
|
+
const weightedTenants = maxAge === 0 ? tenantAges.map((t) => ({
|
|
13559
|
+
tenantId: t.tenantId,
|
|
13560
|
+
weight: 1 / tenantAges.length
|
|
13561
|
+
})) : tenantAges.map((t) => ({
|
|
13562
|
+
tenantId: t.tenantId,
|
|
13563
|
+
weight: t.avgAge / maxAge
|
|
13564
|
+
}));
|
|
13565
|
+
const selectedTenants = /* @__PURE__ */ new Set();
|
|
13566
|
+
let remaining = [...weightedTenants];
|
|
13567
|
+
let totalWeight = remaining.reduce((sum, t) => sum + t.weight, 0);
|
|
13568
|
+
while (selectedTenants.size < this.maximumTenantCount && remaining.length > 0) {
|
|
13569
|
+
let random = this.rng() * totalWeight;
|
|
13570
|
+
let index = 0;
|
|
13571
|
+
while (random > 0 && index < remaining.length) {
|
|
13572
|
+
const item = remaining[index];
|
|
13573
|
+
if (item) {
|
|
13574
|
+
random -= item.weight;
|
|
13575
|
+
}
|
|
13576
|
+
index++;
|
|
13577
|
+
}
|
|
13578
|
+
index = Math.max(0, index - 1);
|
|
13579
|
+
const selected = remaining[index];
|
|
13580
|
+
if (selected) {
|
|
13581
|
+
selectedTenants.add(selected.tenantId);
|
|
13582
|
+
totalWeight -= selected.weight;
|
|
13583
|
+
remaining.splice(index, 1);
|
|
13584
|
+
}
|
|
13585
|
+
}
|
|
13586
|
+
return queues.filter((q) => selectedTenants.has(q.tenantId));
|
|
13587
|
+
}
|
|
13588
|
+
#shuffleTenantsByWeight(snapshot) {
|
|
13589
|
+
const tenantIds = Array.from(snapshot.tenants.keys());
|
|
13590
|
+
if (tenantIds.length === 0) {
|
|
13591
|
+
return [];
|
|
13592
|
+
}
|
|
13593
|
+
const { concurrencyLimitBias, availableCapacityBias } = this.biases;
|
|
13594
|
+
if (concurrencyLimitBias === 0 && availableCapacityBias === 0) {
|
|
13595
|
+
return this.#shuffle(tenantIds);
|
|
13596
|
+
}
|
|
13597
|
+
const maxLimit = Math.max(
|
|
13598
|
+
...tenantIds.map((id) => snapshot.tenants.get(id).concurrency.limit)
|
|
13599
|
+
);
|
|
13600
|
+
const weightedTenants = tenantIds.map((tenantId) => {
|
|
13601
|
+
const tenant = snapshot.tenants.get(tenantId);
|
|
13602
|
+
let weight = 1;
|
|
13603
|
+
if (concurrencyLimitBias > 0) {
|
|
13604
|
+
const normalizedLimit = maxLimit > 0 ? tenant.concurrency.limit / maxLimit : 0;
|
|
13605
|
+
weight *= 1 + Math.pow(normalizedLimit * concurrencyLimitBias, 2);
|
|
13606
|
+
}
|
|
13607
|
+
if (availableCapacityBias > 0) {
|
|
13608
|
+
const usedPercentage = tenant.concurrency.limit > 0 ? tenant.concurrency.current / tenant.concurrency.limit : 1;
|
|
13609
|
+
const availableBonus = 1 - usedPercentage;
|
|
13610
|
+
weight *= 1 + Math.pow(availableBonus * availableCapacityBias, 2);
|
|
13611
|
+
}
|
|
13612
|
+
return { tenantId, weight };
|
|
13613
|
+
});
|
|
13614
|
+
return this.#weightedShuffle(weightedTenants);
|
|
13615
|
+
}
|
|
13616
|
+
#orderQueuesForTenant(snapshot, tenantId) {
|
|
13617
|
+
const tenant = snapshot.tenants.get(tenantId);
|
|
13618
|
+
if (!tenant || tenant.queues.length === 0) {
|
|
13619
|
+
return [];
|
|
13620
|
+
}
|
|
13621
|
+
const queues = [...tenant.queues];
|
|
13622
|
+
const { queueAgeRandomization } = this.biases;
|
|
13623
|
+
if (queueAgeRandomization === 0) {
|
|
13624
|
+
return queues.sort((a, b) => b.age - a.age).map((q) => q.queueId);
|
|
13625
|
+
}
|
|
13626
|
+
const maxAge = Math.max(...queues.map((q) => q.age));
|
|
13627
|
+
const ageDenom = maxAge === 0 ? 1 : maxAge;
|
|
13628
|
+
const weightedQueues = queues.map((q) => ({
|
|
13629
|
+
queue: q,
|
|
13630
|
+
weight: 1 + q.age / ageDenom * queueAgeRandomization
|
|
13631
|
+
}));
|
|
13632
|
+
const result = [];
|
|
13633
|
+
let remaining = [...weightedQueues];
|
|
13634
|
+
let totalWeight = remaining.reduce((sum, q) => sum + q.weight, 0);
|
|
13635
|
+
while (remaining.length > 0) {
|
|
13636
|
+
let random = this.rng() * totalWeight;
|
|
13637
|
+
let index = 0;
|
|
13638
|
+
while (random > 0 && index < remaining.length) {
|
|
13639
|
+
const item = remaining[index];
|
|
13640
|
+
if (item) {
|
|
13641
|
+
random -= item.weight;
|
|
13642
|
+
}
|
|
13643
|
+
index++;
|
|
13644
|
+
}
|
|
13645
|
+
index = Math.max(0, index - 1);
|
|
13646
|
+
const selected = remaining[index];
|
|
13647
|
+
if (selected) {
|
|
13648
|
+
result.push(selected.queue.queueId);
|
|
13649
|
+
totalWeight -= selected.weight;
|
|
13650
|
+
remaining.splice(index, 1);
|
|
13651
|
+
}
|
|
13652
|
+
}
|
|
13653
|
+
return result;
|
|
13654
|
+
}
|
|
13655
|
+
#shuffle(array) {
|
|
13656
|
+
const result = [...array];
|
|
13657
|
+
for (let i = result.length - 1; i > 0; i--) {
|
|
13658
|
+
const j = Math.floor(this.rng() * (i + 1));
|
|
13659
|
+
const temp = result[i];
|
|
13660
|
+
const swapValue = result[j];
|
|
13661
|
+
if (temp !== void 0 && swapValue !== void 0) {
|
|
13662
|
+
result[i] = swapValue;
|
|
13663
|
+
result[j] = temp;
|
|
13664
|
+
}
|
|
13665
|
+
}
|
|
13666
|
+
return result;
|
|
13667
|
+
}
|
|
13668
|
+
#weightedShuffle(items) {
|
|
13669
|
+
const result = [];
|
|
13670
|
+
let remaining = [...items];
|
|
13671
|
+
let totalWeight = remaining.reduce((sum, item) => sum + item.weight, 0);
|
|
13672
|
+
while (remaining.length > 0) {
|
|
13673
|
+
let random = this.rng() * totalWeight;
|
|
13674
|
+
let index = 0;
|
|
13675
|
+
while (random > 0 && index < remaining.length) {
|
|
13676
|
+
const item = remaining[index];
|
|
13677
|
+
if (item) {
|
|
13678
|
+
random -= item.weight;
|
|
13679
|
+
}
|
|
13680
|
+
index++;
|
|
13681
|
+
}
|
|
13682
|
+
index = Math.max(0, index - 1);
|
|
13683
|
+
const selected = remaining[index];
|
|
13684
|
+
if (selected) {
|
|
13685
|
+
result.push(selected.tenantId);
|
|
13686
|
+
totalWeight -= selected.weight;
|
|
13687
|
+
remaining.splice(index, 1);
|
|
13688
|
+
}
|
|
13689
|
+
}
|
|
13690
|
+
return result;
|
|
13691
|
+
}
|
|
13692
|
+
};
|
|
13693
|
+
|
|
13694
|
+
// src/fair-queue/schedulers/roundRobin.ts
|
|
13695
|
+
var RoundRobinScheduler = class extends BaseScheduler {
|
|
13696
|
+
constructor(config) {
|
|
13697
|
+
super();
|
|
13698
|
+
this.config = config;
|
|
13699
|
+
this.redis = createRedisClient(config.redis);
|
|
13700
|
+
this.keys = config.keys;
|
|
13701
|
+
this.masterQueueLimit = config.masterQueueLimit ?? 1e3;
|
|
13702
|
+
}
|
|
13703
|
+
redis;
|
|
13704
|
+
keys;
|
|
13705
|
+
masterQueueLimit;
|
|
13706
|
+
// ============================================================================
|
|
13707
|
+
// FairScheduler Implementation
|
|
13708
|
+
// ============================================================================
|
|
13709
|
+
async selectQueues(masterQueueShard, consumerId, context2) {
|
|
13710
|
+
const now = Date.now();
|
|
13711
|
+
const queues = await this.#getQueuesFromShard(masterQueueShard, now);
|
|
13712
|
+
if (queues.length === 0) {
|
|
13713
|
+
return [];
|
|
13714
|
+
}
|
|
13715
|
+
const queuesByTenant = /* @__PURE__ */ new Map();
|
|
13716
|
+
const tenantOrder = [];
|
|
13717
|
+
for (const queue of queues) {
|
|
13718
|
+
if (!queuesByTenant.has(queue.tenantId)) {
|
|
13719
|
+
queuesByTenant.set(queue.tenantId, []);
|
|
13720
|
+
tenantOrder.push(queue.tenantId);
|
|
13721
|
+
}
|
|
13722
|
+
queuesByTenant.get(queue.tenantId).push(queue.queueId);
|
|
13723
|
+
}
|
|
13724
|
+
const lastServedIndex = await this.#getLastServedIndex(masterQueueShard);
|
|
13725
|
+
const rotatedTenants = this.#rotateArray(tenantOrder, lastServedIndex);
|
|
13726
|
+
const eligibleTenants = [];
|
|
13727
|
+
for (const tenantId of rotatedTenants) {
|
|
13728
|
+
const isAtCapacity = await context2.isAtCapacity("tenant", tenantId);
|
|
13729
|
+
if (!isAtCapacity) {
|
|
13730
|
+
const tenantQueues = queuesByTenant.get(tenantId) ?? [];
|
|
13731
|
+
eligibleTenants.push({
|
|
13732
|
+
tenantId,
|
|
13733
|
+
queues: tenantQueues
|
|
13734
|
+
});
|
|
13735
|
+
}
|
|
13736
|
+
}
|
|
13737
|
+
const firstEligible = eligibleTenants[0];
|
|
13738
|
+
if (firstEligible) {
|
|
13739
|
+
const firstTenantIndex = tenantOrder.indexOf(firstEligible.tenantId);
|
|
13740
|
+
await this.#setLastServedIndex(masterQueueShard, firstTenantIndex + 1);
|
|
13741
|
+
}
|
|
13742
|
+
return eligibleTenants;
|
|
13743
|
+
}
|
|
13744
|
+
async close() {
|
|
13745
|
+
await this.redis.quit();
|
|
13746
|
+
}
|
|
13747
|
+
// ============================================================================
|
|
13748
|
+
// Private Methods
|
|
13749
|
+
// ============================================================================
|
|
13750
|
+
async #getQueuesFromShard(shardKey, maxScore) {
|
|
13751
|
+
const results = await this.redis.zrangebyscore(
|
|
13752
|
+
shardKey,
|
|
13753
|
+
"-inf",
|
|
13754
|
+
maxScore,
|
|
13755
|
+
"WITHSCORES",
|
|
13756
|
+
"LIMIT",
|
|
13757
|
+
0,
|
|
13758
|
+
this.masterQueueLimit
|
|
13759
|
+
);
|
|
13760
|
+
const queues = [];
|
|
13761
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
13762
|
+
const queueId = results[i];
|
|
13763
|
+
const scoreStr = results[i + 1];
|
|
13764
|
+
if (queueId && scoreStr) {
|
|
13765
|
+
queues.push({
|
|
13766
|
+
queueId,
|
|
13767
|
+
score: parseFloat(scoreStr),
|
|
13768
|
+
tenantId: this.keys.extractTenantId(queueId)
|
|
13769
|
+
});
|
|
13770
|
+
}
|
|
13771
|
+
}
|
|
13772
|
+
return queues;
|
|
13773
|
+
}
|
|
13774
|
+
#lastServedKey(shardKey) {
|
|
13775
|
+
return `${shardKey}:rr:lastServed`;
|
|
13776
|
+
}
|
|
13777
|
+
async #getLastServedIndex(shardKey) {
|
|
13778
|
+
const key = this.#lastServedKey(shardKey);
|
|
13779
|
+
const value = await this.redis.get(key);
|
|
13780
|
+
return value ? parseInt(value, 10) : 0;
|
|
13781
|
+
}
|
|
13782
|
+
async #setLastServedIndex(shardKey, index) {
|
|
13783
|
+
const key = this.#lastServedKey(shardKey);
|
|
13784
|
+
await this.redis.set(key, index.toString());
|
|
13785
|
+
}
|
|
13786
|
+
#rotateArray(array, startIndex) {
|
|
13787
|
+
if (array.length === 0) return [];
|
|
13788
|
+
const normalizedIndex = startIndex % array.length;
|
|
13789
|
+
return [...array.slice(normalizedIndex), ...array.slice(0, normalizedIndex)];
|
|
13790
|
+
}
|
|
13791
|
+
};
|
|
13792
|
+
var ExponentialBackoffRetry = class {
|
|
13793
|
+
maxAttempts;
|
|
13794
|
+
options;
|
|
13795
|
+
constructor(options) {
|
|
13796
|
+
this.options = {
|
|
13797
|
+
maxAttempts: options?.maxAttempts ?? 12,
|
|
13798
|
+
factor: options?.factor ?? 2,
|
|
13799
|
+
minTimeoutInMs: options?.minTimeoutInMs ?? 1e3,
|
|
13800
|
+
maxTimeoutInMs: options?.maxTimeoutInMs ?? 36e5,
|
|
13801
|
+
// 1 hour
|
|
13802
|
+
randomize: options?.randomize ?? true
|
|
13803
|
+
};
|
|
13804
|
+
this.maxAttempts = this.options.maxAttempts ?? 12;
|
|
13805
|
+
}
|
|
13806
|
+
getNextDelay(attempt, _error) {
|
|
13807
|
+
if (attempt >= this.maxAttempts) {
|
|
13808
|
+
return null;
|
|
13809
|
+
}
|
|
13810
|
+
const delay = v3.calculateNextRetryDelay(this.options, attempt);
|
|
13811
|
+
return delay ?? null;
|
|
13812
|
+
}
|
|
13813
|
+
};
|
|
13814
|
+
var FixedDelayRetry = class {
|
|
13815
|
+
maxAttempts;
|
|
13816
|
+
delayMs;
|
|
13817
|
+
constructor(options) {
|
|
13818
|
+
this.maxAttempts = options.maxAttempts;
|
|
13819
|
+
this.delayMs = options.delayMs;
|
|
13820
|
+
}
|
|
13821
|
+
getNextDelay(attempt, _error) {
|
|
13822
|
+
if (attempt >= this.maxAttempts) {
|
|
13823
|
+
return null;
|
|
13824
|
+
}
|
|
13825
|
+
return this.delayMs;
|
|
13826
|
+
}
|
|
13827
|
+
};
|
|
13828
|
+
var LinearBackoffRetry = class {
|
|
13829
|
+
maxAttempts;
|
|
13830
|
+
baseDelayMs;
|
|
13831
|
+
maxDelayMs;
|
|
13832
|
+
constructor(options) {
|
|
13833
|
+
this.maxAttempts = options.maxAttempts;
|
|
13834
|
+
this.baseDelayMs = options.baseDelayMs;
|
|
13835
|
+
this.maxDelayMs = options.maxDelayMs ?? options.baseDelayMs * options.maxAttempts;
|
|
13836
|
+
}
|
|
13837
|
+
getNextDelay(attempt, _error) {
|
|
13838
|
+
if (attempt >= this.maxAttempts) {
|
|
13839
|
+
return null;
|
|
13840
|
+
}
|
|
13841
|
+
const delay = this.baseDelayMs * attempt;
|
|
13842
|
+
return Math.min(delay, this.maxDelayMs);
|
|
13843
|
+
}
|
|
13844
|
+
};
|
|
13845
|
+
var NoRetry = class {
|
|
13846
|
+
maxAttempts = 1;
|
|
13847
|
+
getNextDelay(_attempt, _error) {
|
|
13848
|
+
return null;
|
|
13849
|
+
}
|
|
13850
|
+
};
|
|
13851
|
+
var ImmediateRetry = class {
|
|
13852
|
+
maxAttempts;
|
|
13853
|
+
constructor(maxAttempts) {
|
|
13854
|
+
this.maxAttempts = maxAttempts;
|
|
13855
|
+
}
|
|
13856
|
+
getNextDelay(attempt, _error) {
|
|
13857
|
+
if (attempt >= this.maxAttempts) {
|
|
13858
|
+
return null;
|
|
13859
|
+
}
|
|
13860
|
+
return 0;
|
|
13861
|
+
}
|
|
13862
|
+
};
|
|
13863
|
+
var CustomRetry = class {
|
|
13864
|
+
maxAttempts;
|
|
13865
|
+
calculateDelay;
|
|
13866
|
+
constructor(options) {
|
|
13867
|
+
this.maxAttempts = options.maxAttempts;
|
|
13868
|
+
this.calculateDelay = options.calculateDelay;
|
|
13869
|
+
}
|
|
13870
|
+
getNextDelay(attempt, error) {
|
|
13871
|
+
if (attempt >= this.maxAttempts) {
|
|
13872
|
+
return null;
|
|
13873
|
+
}
|
|
13874
|
+
return this.calculateDelay(attempt, error);
|
|
13875
|
+
}
|
|
13876
|
+
};
|
|
13877
|
+
var defaultRetryOptions = {
|
|
13878
|
+
maxAttempts: 12,
|
|
13879
|
+
factor: 2,
|
|
13880
|
+
minTimeoutInMs: 1e3,
|
|
13881
|
+
maxTimeoutInMs: 36e5,
|
|
13882
|
+
randomize: true
|
|
13883
|
+
};
|
|
13884
|
+
function createDefaultRetryStrategy() {
|
|
13885
|
+
return new ExponentialBackoffRetry(defaultRetryOptions);
|
|
13886
|
+
}
|
|
13887
|
+
|
|
13888
|
+
// src/fair-queue/index.ts
|
|
13889
|
+
var FairQueue = class {
|
|
13890
|
+
constructor(options) {
|
|
13891
|
+
this.options = options;
|
|
13892
|
+
this.redis = createRedisClient(options.redis);
|
|
13893
|
+
this.keys = options.keys;
|
|
13894
|
+
this.scheduler = options.scheduler;
|
|
13895
|
+
this.logger = options.logger ?? new logger$1.Logger("FairQueue", "info");
|
|
13896
|
+
this.abortController = new AbortController();
|
|
13897
|
+
this.payloadSchema = options.payloadSchema;
|
|
13898
|
+
this.validateOnEnqueue = options.validateOnEnqueue ?? false;
|
|
13899
|
+
this.retryStrategy = options.retry?.strategy;
|
|
13900
|
+
this.deadLetterQueueEnabled = options.retry?.deadLetterQueue ?? true;
|
|
13901
|
+
this.shardCount = options.shardCount ?? 1;
|
|
13902
|
+
this.consumerCount = options.consumerCount ?? 1;
|
|
13903
|
+
this.consumerIntervalMs = options.consumerIntervalMs ?? 100;
|
|
13904
|
+
this.visibilityTimeoutMs = options.visibilityTimeoutMs ?? 3e4;
|
|
13905
|
+
this.heartbeatIntervalMs = options.heartbeatIntervalMs ?? this.visibilityTimeoutMs / 3;
|
|
13906
|
+
this.reclaimIntervalMs = options.reclaimIntervalMs ?? 5e3;
|
|
13907
|
+
this.workerQueueEnabled = options.workerQueue?.enabled ?? false;
|
|
13908
|
+
this.workerQueueBlockingTimeoutSeconds = options.workerQueue?.blockingTimeoutSeconds ?? 10;
|
|
13909
|
+
this.workerQueueResolver = options.workerQueue?.resolveWorkerQueue;
|
|
13910
|
+
this.cooloffEnabled = options.cooloff?.enabled ?? true;
|
|
13911
|
+
this.cooloffThreshold = options.cooloff?.threshold ?? 10;
|
|
13912
|
+
this.cooloffPeriodMs = options.cooloff?.periodMs ?? 1e4;
|
|
13913
|
+
this.globalRateLimiter = options.globalRateLimiter;
|
|
13914
|
+
this.telemetry = new FairQueueTelemetry({
|
|
13915
|
+
tracer: options.tracer,
|
|
13916
|
+
meter: options.meter,
|
|
13917
|
+
name: options.name ?? "fairqueue"
|
|
13918
|
+
});
|
|
13919
|
+
this.masterQueue = new MasterQueue({
|
|
13920
|
+
redis: options.redis,
|
|
13921
|
+
keys: options.keys,
|
|
13922
|
+
shardCount: this.shardCount
|
|
13923
|
+
});
|
|
13924
|
+
if (options.concurrencyGroups && options.concurrencyGroups.length > 0) {
|
|
13925
|
+
this.concurrencyManager = new ConcurrencyManager({
|
|
13926
|
+
redis: options.redis,
|
|
13927
|
+
keys: options.keys,
|
|
13928
|
+
groups: options.concurrencyGroups
|
|
13929
|
+
});
|
|
13930
|
+
}
|
|
13931
|
+
this.visibilityManager = new VisibilityManager({
|
|
13932
|
+
redis: options.redis,
|
|
13933
|
+
keys: options.keys,
|
|
13934
|
+
shardCount: this.shardCount,
|
|
13935
|
+
defaultTimeoutMs: this.visibilityTimeoutMs,
|
|
13936
|
+
logger: {
|
|
13937
|
+
debug: (msg, ctx) => this.logger.debug(msg, ctx),
|
|
13938
|
+
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
13939
|
+
}
|
|
13940
|
+
});
|
|
13941
|
+
if (this.workerQueueEnabled) {
|
|
13942
|
+
this.workerQueueManager = new WorkerQueueManager({
|
|
13943
|
+
redis: options.redis,
|
|
13944
|
+
keys: options.keys,
|
|
13945
|
+
logger: {
|
|
13946
|
+
debug: (msg, ctx) => this.logger.debug(msg, ctx),
|
|
13947
|
+
error: (msg, ctx) => this.logger.error(msg, ctx)
|
|
13948
|
+
}
|
|
13949
|
+
});
|
|
13950
|
+
}
|
|
13951
|
+
this.#registerCommands();
|
|
13952
|
+
if (options.startConsumers !== false) {
|
|
13953
|
+
this.start();
|
|
13954
|
+
}
|
|
13955
|
+
}
|
|
13956
|
+
redis;
|
|
13957
|
+
keys;
|
|
13958
|
+
scheduler;
|
|
13959
|
+
masterQueue;
|
|
13960
|
+
concurrencyManager;
|
|
13961
|
+
visibilityManager;
|
|
13962
|
+
workerQueueManager;
|
|
13963
|
+
telemetry;
|
|
13964
|
+
logger;
|
|
13965
|
+
// Configuration
|
|
13966
|
+
payloadSchema;
|
|
13967
|
+
validateOnEnqueue;
|
|
13968
|
+
retryStrategy;
|
|
13969
|
+
deadLetterQueueEnabled;
|
|
13970
|
+
shardCount;
|
|
13971
|
+
consumerCount;
|
|
13972
|
+
consumerIntervalMs;
|
|
13973
|
+
visibilityTimeoutMs;
|
|
13974
|
+
heartbeatIntervalMs;
|
|
13975
|
+
reclaimIntervalMs;
|
|
13976
|
+
workerQueueEnabled;
|
|
13977
|
+
workerQueueBlockingTimeoutSeconds;
|
|
13978
|
+
workerQueueResolver;
|
|
13979
|
+
// Cooloff state
|
|
13980
|
+
cooloffEnabled;
|
|
13981
|
+
cooloffThreshold;
|
|
13982
|
+
cooloffPeriodMs;
|
|
13983
|
+
queueCooloffStates = /* @__PURE__ */ new Map();
|
|
13984
|
+
// Global rate limiter
|
|
13985
|
+
globalRateLimiter;
|
|
13986
|
+
// Runtime state
|
|
13987
|
+
messageHandler;
|
|
13988
|
+
isRunning = false;
|
|
13989
|
+
abortController;
|
|
13990
|
+
masterQueueConsumerLoops = [];
|
|
13991
|
+
workerQueueConsumerLoops = [];
|
|
13992
|
+
reclaimLoop;
|
|
13993
|
+
// Queue descriptor cache for message processing
|
|
13994
|
+
queueDescriptorCache = /* @__PURE__ */ new Map();
|
|
13995
|
+
// ============================================================================
|
|
13996
|
+
// Public API - Telemetry
|
|
13997
|
+
// ============================================================================
|
|
13998
|
+
/**
|
|
13999
|
+
* Register observable gauge callbacks for telemetry.
|
|
14000
|
+
* Call this after FairQueue is created to enable gauge metrics.
|
|
14001
|
+
*
|
|
14002
|
+
* @param options.observedTenants - List of tenant IDs to observe for DLQ metrics
|
|
14003
|
+
*/
|
|
14004
|
+
registerTelemetryGauges(options) {
|
|
14005
|
+
this.telemetry.registerGaugeCallbacks({
|
|
14006
|
+
getMasterQueueLength: async (shardId) => {
|
|
14007
|
+
return await this.masterQueue.getShardQueueCount(shardId);
|
|
14008
|
+
},
|
|
14009
|
+
getInflightCount: async (shardId) => {
|
|
14010
|
+
return await this.visibilityManager.getInflightCount(shardId);
|
|
14011
|
+
},
|
|
14012
|
+
getDLQLength: async (tenantId) => {
|
|
14013
|
+
return await this.getDeadLetterQueueLength(tenantId);
|
|
14014
|
+
},
|
|
14015
|
+
shardCount: this.shardCount,
|
|
14016
|
+
observedTenants: options?.observedTenants
|
|
14017
|
+
});
|
|
14018
|
+
}
|
|
14019
|
+
// ============================================================================
|
|
14020
|
+
// Public API - Message Handler
|
|
14021
|
+
// ============================================================================
|
|
14022
|
+
/**
|
|
14023
|
+
* Set the message handler for processing dequeued messages.
|
|
14024
|
+
*/
|
|
14025
|
+
onMessage(handler) {
|
|
14026
|
+
this.messageHandler = handler;
|
|
14027
|
+
}
|
|
14028
|
+
// ============================================================================
|
|
14029
|
+
// Public API - Enqueueing
|
|
14030
|
+
// ============================================================================
|
|
14031
|
+
/**
|
|
14032
|
+
* Enqueue a single message to a queue.
|
|
14033
|
+
*/
|
|
14034
|
+
async enqueue(options) {
|
|
14035
|
+
return this.telemetry.trace(
|
|
14036
|
+
"enqueue",
|
|
14037
|
+
async (span) => {
|
|
14038
|
+
const messageId = options.messageId ?? nanoid();
|
|
14039
|
+
const timestamp = options.timestamp ?? Date.now();
|
|
14040
|
+
const queueKey = this.keys.queueKey(options.queueId);
|
|
14041
|
+
const queueItemsKey = this.keys.queueItemsKey(options.queueId);
|
|
14042
|
+
const shardId = this.masterQueue.getShardForQueue(options.queueId);
|
|
14043
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14044
|
+
if (this.validateOnEnqueue && this.payloadSchema) {
|
|
14045
|
+
const result = this.payloadSchema.safeParse(options.payload);
|
|
14046
|
+
if (!result.success) {
|
|
14047
|
+
throw new Error(`Payload validation failed: ${result.error.message}`);
|
|
14048
|
+
}
|
|
14049
|
+
}
|
|
14050
|
+
const descriptor = {
|
|
14051
|
+
id: options.queueId,
|
|
14052
|
+
tenantId: options.tenantId,
|
|
14053
|
+
metadata: options.metadata ?? {}
|
|
14054
|
+
};
|
|
14055
|
+
this.queueDescriptorCache.set(options.queueId, descriptor);
|
|
14056
|
+
const storedMessage = {
|
|
14057
|
+
id: messageId,
|
|
14058
|
+
queueId: options.queueId,
|
|
14059
|
+
tenantId: options.tenantId,
|
|
14060
|
+
payload: options.payload,
|
|
14061
|
+
timestamp,
|
|
14062
|
+
attempt: 1,
|
|
14063
|
+
workerQueue: this.workerQueueResolver ? this.workerQueueResolver({
|
|
14064
|
+
id: messageId,
|
|
14065
|
+
queueId: options.queueId,
|
|
14066
|
+
tenantId: options.tenantId,
|
|
14067
|
+
payload: options.payload,
|
|
14068
|
+
timestamp,
|
|
14069
|
+
attempt: 1,
|
|
14070
|
+
metadata: options.metadata
|
|
14071
|
+
}) : options.queueId,
|
|
14072
|
+
metadata: options.metadata
|
|
14073
|
+
};
|
|
14074
|
+
await this.redis.enqueueMessageAtomic(
|
|
14075
|
+
queueKey,
|
|
14076
|
+
queueItemsKey,
|
|
14077
|
+
masterQueueKey,
|
|
14078
|
+
options.queueId,
|
|
14079
|
+
messageId,
|
|
14080
|
+
timestamp.toString(),
|
|
14081
|
+
JSON.stringify(storedMessage)
|
|
14082
|
+
);
|
|
14083
|
+
span.setAttributes({
|
|
14084
|
+
[FairQueueAttributes.QUEUE_ID]: options.queueId,
|
|
14085
|
+
[FairQueueAttributes.TENANT_ID]: options.tenantId,
|
|
14086
|
+
[FairQueueAttributes.MESSAGE_ID]: messageId,
|
|
14087
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14088
|
+
});
|
|
14089
|
+
this.telemetry.recordEnqueue(
|
|
14090
|
+
this.telemetry.messageAttributes({
|
|
14091
|
+
queueId: options.queueId,
|
|
14092
|
+
tenantId: options.tenantId,
|
|
14093
|
+
messageId
|
|
14094
|
+
})
|
|
14095
|
+
);
|
|
14096
|
+
this.logger.debug("Message enqueued", {
|
|
14097
|
+
queueId: options.queueId,
|
|
14098
|
+
messageId,
|
|
14099
|
+
timestamp
|
|
14100
|
+
});
|
|
14101
|
+
return messageId;
|
|
14102
|
+
},
|
|
14103
|
+
{
|
|
14104
|
+
kind: SpanKind.PRODUCER,
|
|
14105
|
+
attributes: {
|
|
14106
|
+
[MessagingAttributes.OPERATION]: "publish"
|
|
14107
|
+
}
|
|
14108
|
+
}
|
|
14109
|
+
);
|
|
14110
|
+
}
|
|
14111
|
+
/**
|
|
14112
|
+
* Enqueue multiple messages to a queue.
|
|
14113
|
+
*/
|
|
14114
|
+
async enqueueBatch(options) {
|
|
14115
|
+
return this.telemetry.trace(
|
|
14116
|
+
"enqueueBatch",
|
|
14117
|
+
async (span) => {
|
|
14118
|
+
const queueKey = this.keys.queueKey(options.queueId);
|
|
14119
|
+
const queueItemsKey = this.keys.queueItemsKey(options.queueId);
|
|
14120
|
+
const shardId = this.masterQueue.getShardForQueue(options.queueId);
|
|
14121
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14122
|
+
const now = Date.now();
|
|
14123
|
+
const descriptor = {
|
|
14124
|
+
id: options.queueId,
|
|
14125
|
+
tenantId: options.tenantId,
|
|
14126
|
+
metadata: options.metadata ?? {}
|
|
14127
|
+
};
|
|
14128
|
+
this.queueDescriptorCache.set(options.queueId, descriptor);
|
|
14129
|
+
const messageIds = [];
|
|
14130
|
+
const args = [];
|
|
14131
|
+
for (const message of options.messages) {
|
|
14132
|
+
const messageId = message.messageId ?? nanoid();
|
|
14133
|
+
const timestamp = message.timestamp ?? now;
|
|
14134
|
+
if (this.validateOnEnqueue && this.payloadSchema) {
|
|
14135
|
+
const result = this.payloadSchema.safeParse(message.payload);
|
|
14136
|
+
if (!result.success) {
|
|
14137
|
+
throw new Error(
|
|
14138
|
+
`Payload validation failed for message ${messageId}: ${result.error.message}`
|
|
14139
|
+
);
|
|
14140
|
+
}
|
|
14141
|
+
}
|
|
14142
|
+
const storedMessage = {
|
|
14143
|
+
id: messageId,
|
|
14144
|
+
queueId: options.queueId,
|
|
14145
|
+
tenantId: options.tenantId,
|
|
14146
|
+
payload: message.payload,
|
|
14147
|
+
timestamp,
|
|
14148
|
+
attempt: 1,
|
|
14149
|
+
workerQueue: this.workerQueueResolver ? this.workerQueueResolver({
|
|
14150
|
+
id: messageId,
|
|
14151
|
+
queueId: options.queueId,
|
|
14152
|
+
tenantId: options.tenantId,
|
|
14153
|
+
payload: message.payload,
|
|
14154
|
+
timestamp,
|
|
14155
|
+
attempt: 1,
|
|
14156
|
+
metadata: options.metadata
|
|
14157
|
+
}) : options.queueId,
|
|
14158
|
+
metadata: options.metadata
|
|
14159
|
+
};
|
|
14160
|
+
messageIds.push(messageId);
|
|
14161
|
+
args.push(messageId, timestamp.toString(), JSON.stringify(storedMessage));
|
|
14162
|
+
}
|
|
14163
|
+
await this.redis.enqueueBatchAtomic(
|
|
14164
|
+
queueKey,
|
|
14165
|
+
queueItemsKey,
|
|
14166
|
+
masterQueueKey,
|
|
14167
|
+
options.queueId,
|
|
14168
|
+
...args
|
|
14169
|
+
);
|
|
14170
|
+
span.setAttributes({
|
|
14171
|
+
[FairQueueAttributes.QUEUE_ID]: options.queueId,
|
|
14172
|
+
[FairQueueAttributes.TENANT_ID]: options.tenantId,
|
|
14173
|
+
[FairQueueAttributes.MESSAGE_COUNT]: messageIds.length,
|
|
14174
|
+
[FairQueueAttributes.SHARD_ID]: shardId.toString()
|
|
14175
|
+
});
|
|
14176
|
+
this.telemetry.recordEnqueueBatch(
|
|
14177
|
+
messageIds.length,
|
|
14178
|
+
this.telemetry.messageAttributes({
|
|
14179
|
+
queueId: options.queueId,
|
|
14180
|
+
tenantId: options.tenantId
|
|
14181
|
+
})
|
|
14182
|
+
);
|
|
14183
|
+
this.logger.debug("Batch enqueued", {
|
|
14184
|
+
queueId: options.queueId,
|
|
14185
|
+
messageCount: messageIds.length
|
|
14186
|
+
});
|
|
14187
|
+
return messageIds;
|
|
14188
|
+
},
|
|
14189
|
+
{
|
|
14190
|
+
kind: SpanKind.PRODUCER,
|
|
14191
|
+
attributes: {
|
|
14192
|
+
[MessagingAttributes.OPERATION]: "publish"
|
|
14193
|
+
}
|
|
14194
|
+
}
|
|
14195
|
+
);
|
|
14196
|
+
}
|
|
14197
|
+
// ============================================================================
|
|
14198
|
+
// Public API - Dead Letter Queue
|
|
14199
|
+
// ============================================================================
|
|
14200
|
+
/**
|
|
14201
|
+
* Get messages from the dead letter queue for a tenant.
|
|
14202
|
+
*/
|
|
14203
|
+
async getDeadLetterMessages(tenantId, limit = 100) {
|
|
14204
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14205
|
+
return [];
|
|
14206
|
+
}
|
|
14207
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14208
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(tenantId);
|
|
14209
|
+
const results = await this.redis.zrange(dlqKey, 0, limit - 1, "WITHSCORES");
|
|
14210
|
+
const messages = [];
|
|
14211
|
+
for (let i = 0; i < results.length; i += 2) {
|
|
14212
|
+
const messageId = results[i];
|
|
14213
|
+
const deadLetteredAtStr = results[i + 1];
|
|
14214
|
+
if (!messageId || !deadLetteredAtStr) continue;
|
|
14215
|
+
const dataJson = await this.redis.hget(dlqDataKey, messageId);
|
|
14216
|
+
if (!dataJson) continue;
|
|
14217
|
+
try {
|
|
14218
|
+
const data = JSON.parse(dataJson);
|
|
14219
|
+
data.deadLetteredAt = parseFloat(deadLetteredAtStr);
|
|
14220
|
+
messages.push(data);
|
|
14221
|
+
} catch {
|
|
14222
|
+
this.logger.error("Failed to parse DLQ message", { messageId, tenantId });
|
|
14223
|
+
}
|
|
14224
|
+
}
|
|
14225
|
+
return messages;
|
|
14226
|
+
}
|
|
14227
|
+
/**
|
|
14228
|
+
* Redrive a message from DLQ back to its original queue.
|
|
14229
|
+
*/
|
|
14230
|
+
async redriveMessage(tenantId, messageId) {
|
|
14231
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14232
|
+
return false;
|
|
14233
|
+
}
|
|
14234
|
+
return this.telemetry.trace(
|
|
14235
|
+
"redriveMessage",
|
|
14236
|
+
async (span) => {
|
|
14237
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14238
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(tenantId);
|
|
14239
|
+
const dataJson = await this.redis.hget(dlqDataKey, messageId);
|
|
14240
|
+
if (!dataJson) {
|
|
14241
|
+
return false;
|
|
14242
|
+
}
|
|
14243
|
+
const dlqMessage = JSON.parse(dataJson);
|
|
14244
|
+
await this.enqueue({
|
|
14245
|
+
queueId: dlqMessage.queueId,
|
|
14246
|
+
tenantId: dlqMessage.tenantId,
|
|
14247
|
+
payload: dlqMessage.payload,
|
|
14248
|
+
messageId: dlqMessage.id,
|
|
14249
|
+
timestamp: Date.now()
|
|
14250
|
+
});
|
|
14251
|
+
const pipeline = this.redis.pipeline();
|
|
14252
|
+
pipeline.zrem(dlqKey, messageId);
|
|
14253
|
+
pipeline.hdel(dlqDataKey, messageId);
|
|
14254
|
+
await pipeline.exec();
|
|
14255
|
+
span.setAttributes({
|
|
14256
|
+
[FairQueueAttributes.TENANT_ID]: tenantId,
|
|
14257
|
+
[FairQueueAttributes.MESSAGE_ID]: messageId
|
|
14258
|
+
});
|
|
14259
|
+
this.logger.info("Redrived message from DLQ", { tenantId, messageId });
|
|
14260
|
+
return true;
|
|
14261
|
+
},
|
|
14262
|
+
{
|
|
14263
|
+
kind: SpanKind.PRODUCER,
|
|
14264
|
+
attributes: {
|
|
14265
|
+
[MessagingAttributes.OPERATION]: "redrive"
|
|
14266
|
+
}
|
|
14267
|
+
}
|
|
14268
|
+
);
|
|
14269
|
+
}
|
|
14270
|
+
/**
|
|
14271
|
+
* Redrive all messages from DLQ back to their original queues.
|
|
14272
|
+
*/
|
|
14273
|
+
async redriveAll(tenantId) {
|
|
14274
|
+
const messages = await this.getDeadLetterMessages(tenantId, 1e3);
|
|
14275
|
+
let count = 0;
|
|
14276
|
+
for (const message of messages) {
|
|
14277
|
+
const success = await this.redriveMessage(tenantId, message.id);
|
|
14278
|
+
if (success) count++;
|
|
14279
|
+
}
|
|
14280
|
+
return count;
|
|
14281
|
+
}
|
|
14282
|
+
/**
|
|
14283
|
+
* Purge all messages from a tenant's DLQ.
|
|
14284
|
+
*/
|
|
14285
|
+
async purgeDeadLetterQueue(tenantId) {
|
|
14286
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14287
|
+
return 0;
|
|
14288
|
+
}
|
|
14289
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14290
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(tenantId);
|
|
14291
|
+
const count = await this.redis.zcard(dlqKey);
|
|
14292
|
+
const pipeline = this.redis.pipeline();
|
|
14293
|
+
pipeline.del(dlqKey);
|
|
14294
|
+
pipeline.del(dlqDataKey);
|
|
14295
|
+
await pipeline.exec();
|
|
14296
|
+
this.logger.info("Purged DLQ", { tenantId, count });
|
|
14297
|
+
return count;
|
|
14298
|
+
}
|
|
14299
|
+
/**
|
|
14300
|
+
* Get the number of messages in a tenant's DLQ.
|
|
14301
|
+
*/
|
|
14302
|
+
async getDeadLetterQueueLength(tenantId) {
|
|
14303
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14304
|
+
return 0;
|
|
14305
|
+
}
|
|
14306
|
+
const dlqKey = this.keys.deadLetterQueueKey(tenantId);
|
|
14307
|
+
return await this.redis.zcard(dlqKey);
|
|
14308
|
+
}
|
|
14309
|
+
// ============================================================================
|
|
14310
|
+
// Public API - Lifecycle
|
|
14311
|
+
// ============================================================================
|
|
14312
|
+
/**
|
|
14313
|
+
* Start the consumer loops and reclaim loop.
|
|
14314
|
+
*/
|
|
14315
|
+
start() {
|
|
14316
|
+
if (this.isRunning) {
|
|
14317
|
+
return;
|
|
14318
|
+
}
|
|
14319
|
+
this.isRunning = true;
|
|
14320
|
+
this.abortController = new AbortController();
|
|
14321
|
+
if (this.workerQueueEnabled && this.workerQueueManager) {
|
|
14322
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14323
|
+
const loop = this.#runMasterQueueConsumerLoop(shardId);
|
|
14324
|
+
this.masterQueueConsumerLoops.push(loop);
|
|
14325
|
+
}
|
|
14326
|
+
for (let consumerId = 0; consumerId < this.consumerCount; consumerId++) {
|
|
14327
|
+
const loop = this.#runWorkerQueueConsumerLoop(consumerId);
|
|
14328
|
+
this.workerQueueConsumerLoops.push(loop);
|
|
14329
|
+
}
|
|
14330
|
+
} else {
|
|
14331
|
+
for (let consumerId = 0; consumerId < this.consumerCount; consumerId++) {
|
|
14332
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14333
|
+
const loop = this.#runDirectConsumerLoop(consumerId, shardId);
|
|
14334
|
+
this.masterQueueConsumerLoops.push(loop);
|
|
14335
|
+
}
|
|
14336
|
+
}
|
|
14337
|
+
}
|
|
14338
|
+
this.reclaimLoop = this.#runReclaimLoop();
|
|
14339
|
+
this.logger.info("FairQueue started", {
|
|
14340
|
+
consumerCount: this.consumerCount,
|
|
14341
|
+
shardCount: this.shardCount,
|
|
14342
|
+
workerQueueEnabled: this.workerQueueEnabled,
|
|
14343
|
+
consumerIntervalMs: this.consumerIntervalMs
|
|
14344
|
+
});
|
|
14345
|
+
}
|
|
14346
|
+
/**
|
|
14347
|
+
* Stop the consumer loops gracefully.
|
|
14348
|
+
*/
|
|
14349
|
+
async stop() {
|
|
14350
|
+
if (!this.isRunning) {
|
|
14351
|
+
return;
|
|
14352
|
+
}
|
|
14353
|
+
this.isRunning = false;
|
|
14354
|
+
this.abortController.abort();
|
|
14355
|
+
await Promise.allSettled([
|
|
14356
|
+
...this.masterQueueConsumerLoops,
|
|
14357
|
+
...this.workerQueueConsumerLoops,
|
|
14358
|
+
this.reclaimLoop
|
|
14359
|
+
]);
|
|
14360
|
+
this.masterQueueConsumerLoops = [];
|
|
14361
|
+
this.workerQueueConsumerLoops = [];
|
|
14362
|
+
this.reclaimLoop = void 0;
|
|
14363
|
+
this.logger.info("FairQueue stopped");
|
|
14364
|
+
}
|
|
14365
|
+
/**
|
|
14366
|
+
* Close all resources.
|
|
14367
|
+
*/
|
|
14368
|
+
async close() {
|
|
14369
|
+
await this.stop();
|
|
14370
|
+
await Promise.all([
|
|
14371
|
+
this.masterQueue.close(),
|
|
14372
|
+
this.concurrencyManager?.close(),
|
|
14373
|
+
this.visibilityManager.close(),
|
|
14374
|
+
this.workerQueueManager?.close(),
|
|
14375
|
+
this.scheduler.close?.(),
|
|
14376
|
+
this.redis.quit()
|
|
14377
|
+
]);
|
|
14378
|
+
}
|
|
14379
|
+
// ============================================================================
|
|
14380
|
+
// Public API - Inspection
|
|
14381
|
+
// ============================================================================
|
|
14382
|
+
/**
|
|
14383
|
+
* Get the number of messages in a queue.
|
|
14384
|
+
*/
|
|
14385
|
+
async getQueueLength(queueId) {
|
|
14386
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14387
|
+
return await this.redis.zcard(queueKey);
|
|
14388
|
+
}
|
|
14389
|
+
/**
|
|
14390
|
+
* Get total queue count across all shards.
|
|
14391
|
+
*/
|
|
14392
|
+
async getTotalQueueCount() {
|
|
14393
|
+
return await this.masterQueue.getTotalQueueCount();
|
|
14394
|
+
}
|
|
14395
|
+
/**
|
|
14396
|
+
* Get total in-flight message count.
|
|
14397
|
+
*/
|
|
14398
|
+
async getTotalInflightCount() {
|
|
14399
|
+
return await this.visibilityManager.getTotalInflightCount();
|
|
14400
|
+
}
|
|
14401
|
+
/**
|
|
14402
|
+
* Get the shard ID for a queue.
|
|
14403
|
+
*/
|
|
14404
|
+
getShardForQueue(queueId) {
|
|
14405
|
+
return this.masterQueue.getShardForQueue(queueId);
|
|
14406
|
+
}
|
|
14407
|
+
// ============================================================================
|
|
14408
|
+
// Private - Master Queue Consumer Loop (Two-Stage)
|
|
14409
|
+
// ============================================================================
|
|
14410
|
+
async #runMasterQueueConsumerLoop(shardId) {
|
|
14411
|
+
const loopId = `master-shard-${shardId}`;
|
|
14412
|
+
try {
|
|
14413
|
+
for await (const _ of promises.setInterval(this.consumerIntervalMs, null, {
|
|
14414
|
+
signal: this.abortController.signal
|
|
14415
|
+
})) {
|
|
14416
|
+
try {
|
|
14417
|
+
await this.#processMasterQueueShard(loopId, shardId);
|
|
14418
|
+
} catch (error) {
|
|
14419
|
+
this.logger.error("Master queue consumer error", {
|
|
14420
|
+
loopId,
|
|
14421
|
+
shardId,
|
|
14422
|
+
error: error instanceof Error ? error.message : String(error)
|
|
14423
|
+
});
|
|
14424
|
+
}
|
|
14425
|
+
}
|
|
14426
|
+
} catch (error) {
|
|
14427
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
14428
|
+
this.logger.debug("Master queue consumer aborted", { loopId });
|
|
14429
|
+
return;
|
|
14430
|
+
}
|
|
14431
|
+
throw error;
|
|
14432
|
+
}
|
|
14433
|
+
}
|
|
14434
|
+
async #processMasterQueueShard(loopId, shardId) {
|
|
14435
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14436
|
+
const context2 = this.#createSchedulerContext();
|
|
14437
|
+
const tenantQueues = await this.scheduler.selectQueues(masterQueueKey, loopId, context2);
|
|
14438
|
+
if (tenantQueues.length === 0) {
|
|
14439
|
+
return;
|
|
14440
|
+
}
|
|
14441
|
+
for (const { tenantId, queues } of tenantQueues) {
|
|
14442
|
+
for (const queueId of queues) {
|
|
14443
|
+
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14444
|
+
continue;
|
|
14445
|
+
}
|
|
14446
|
+
const processed = await this.#claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId);
|
|
14447
|
+
if (processed) {
|
|
14448
|
+
await this.scheduler.recordProcessed?.(tenantId, queueId);
|
|
14449
|
+
this.#resetCooloff(queueId);
|
|
14450
|
+
} else {
|
|
14451
|
+
this.#incrementCooloff(queueId);
|
|
14452
|
+
}
|
|
14453
|
+
}
|
|
14454
|
+
}
|
|
14455
|
+
}
|
|
14456
|
+
async #claimAndPushToWorkerQueue(loopId, queueId, tenantId, shardId) {
|
|
14457
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14458
|
+
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14459
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14460
|
+
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
14461
|
+
id: queueId,
|
|
14462
|
+
tenantId,
|
|
14463
|
+
metadata: {}
|
|
14464
|
+
};
|
|
14465
|
+
if (this.concurrencyManager) {
|
|
14466
|
+
const check = await this.concurrencyManager.canProcess(descriptor);
|
|
14467
|
+
if (!check.allowed) {
|
|
14468
|
+
return false;
|
|
14469
|
+
}
|
|
14470
|
+
}
|
|
14471
|
+
if (this.globalRateLimiter) {
|
|
14472
|
+
const result = await this.globalRateLimiter.limit();
|
|
14473
|
+
if (!result.allowed && result.resetAt) {
|
|
14474
|
+
const waitMs = Math.max(0, result.resetAt - Date.now());
|
|
14475
|
+
if (waitMs > 0) {
|
|
14476
|
+
this.logger.debug("Global rate limit reached, waiting", { waitMs, loopId });
|
|
14477
|
+
await new Promise((resolve) => setTimeout(resolve, waitMs));
|
|
14478
|
+
}
|
|
14479
|
+
}
|
|
14480
|
+
}
|
|
14481
|
+
const claimResult = await this.visibilityManager.claim(
|
|
14482
|
+
queueId,
|
|
14483
|
+
queueKey,
|
|
14484
|
+
queueItemsKey,
|
|
14485
|
+
loopId,
|
|
14486
|
+
this.visibilityTimeoutMs
|
|
14487
|
+
);
|
|
14488
|
+
if (!claimResult.claimed || !claimResult.message) {
|
|
14489
|
+
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14490
|
+
return false;
|
|
14491
|
+
}
|
|
14492
|
+
const { message } = claimResult;
|
|
14493
|
+
if (this.concurrencyManager) {
|
|
14494
|
+
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
14495
|
+
if (!reserved) {
|
|
14496
|
+
await this.visibilityManager.release(message.messageId, queueId, queueKey, queueItemsKey);
|
|
14497
|
+
return false;
|
|
14498
|
+
}
|
|
14499
|
+
}
|
|
14500
|
+
const workerQueueId = message.payload.workerQueue ?? queueId;
|
|
14501
|
+
const messageKey = `${message.messageId}:${queueId}`;
|
|
14502
|
+
await this.workerQueueManager.push(workerQueueId, messageKey);
|
|
14503
|
+
return true;
|
|
14504
|
+
}
|
|
14505
|
+
// ============================================================================
|
|
14506
|
+
// Private - Worker Queue Consumer Loop (Two-Stage)
|
|
14507
|
+
// ============================================================================
|
|
14508
|
+
async #runWorkerQueueConsumerLoop(consumerId) {
|
|
14509
|
+
const loopId = `worker-${consumerId}`;
|
|
14510
|
+
const workerQueueId = loopId;
|
|
14511
|
+
try {
|
|
14512
|
+
while (this.isRunning) {
|
|
14513
|
+
if (!this.messageHandler) {
|
|
14514
|
+
await new Promise((resolve) => setTimeout(resolve, this.consumerIntervalMs));
|
|
14515
|
+
continue;
|
|
14516
|
+
}
|
|
14517
|
+
try {
|
|
14518
|
+
const messageKey = await this.workerQueueManager.blockingPop(
|
|
14519
|
+
workerQueueId,
|
|
14520
|
+
this.workerQueueBlockingTimeoutSeconds,
|
|
14521
|
+
this.abortController.signal
|
|
14522
|
+
);
|
|
14523
|
+
if (!messageKey) {
|
|
14524
|
+
continue;
|
|
14525
|
+
}
|
|
14526
|
+
const colonIndex = messageKey.indexOf(":");
|
|
14527
|
+
if (colonIndex === -1) {
|
|
14528
|
+
this.logger.error("Invalid message key format", { messageKey });
|
|
14529
|
+
continue;
|
|
14530
|
+
}
|
|
14531
|
+
const messageId = messageKey.substring(0, colonIndex);
|
|
14532
|
+
const queueId = messageKey.substring(colonIndex + 1);
|
|
14533
|
+
await this.#processMessageFromWorkerQueue(loopId, messageId, queueId);
|
|
14534
|
+
} catch (error) {
|
|
14535
|
+
if (this.abortController.signal.aborted) {
|
|
14536
|
+
break;
|
|
14537
|
+
}
|
|
14538
|
+
this.logger.error("Worker queue consumer error", {
|
|
14539
|
+
loopId,
|
|
14540
|
+
error: error instanceof Error ? error.message : String(error)
|
|
14541
|
+
});
|
|
14542
|
+
}
|
|
14543
|
+
}
|
|
14544
|
+
} catch (error) {
|
|
14545
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
14546
|
+
this.logger.debug("Worker queue consumer aborted", { loopId });
|
|
14547
|
+
return;
|
|
14548
|
+
}
|
|
14549
|
+
throw error;
|
|
14550
|
+
}
|
|
14551
|
+
}
|
|
14552
|
+
async #processMessageFromWorkerQueue(loopId, messageId, queueId) {
|
|
14553
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14554
|
+
const inflightDataKey = this.keys.inflightDataKey(shardId);
|
|
14555
|
+
const dataJson = await this.redis.hget(inflightDataKey, messageId);
|
|
14556
|
+
if (!dataJson) {
|
|
14557
|
+
this.logger.error("Message not found in in-flight data", { messageId, queueId });
|
|
14558
|
+
return;
|
|
14559
|
+
}
|
|
14560
|
+
let storedMessage;
|
|
14561
|
+
try {
|
|
14562
|
+
storedMessage = JSON.parse(dataJson);
|
|
14563
|
+
} catch {
|
|
14564
|
+
this.logger.error("Failed to parse message data", { messageId, queueId });
|
|
14565
|
+
return;
|
|
14566
|
+
}
|
|
14567
|
+
await this.#processMessage(loopId, storedMessage, queueId);
|
|
14568
|
+
}
|
|
14569
|
+
// ============================================================================
|
|
14570
|
+
// Private - Direct Consumer Loop (No Worker Queue)
|
|
14571
|
+
// ============================================================================
|
|
14572
|
+
async #runDirectConsumerLoop(consumerId, shardId) {
|
|
14573
|
+
const loopId = `consumer-${consumerId}-shard-${shardId}`;
|
|
14574
|
+
try {
|
|
14575
|
+
for await (const _ of promises.setInterval(this.consumerIntervalMs, null, {
|
|
14576
|
+
signal: this.abortController.signal
|
|
14577
|
+
})) {
|
|
14578
|
+
if (!this.messageHandler) {
|
|
14579
|
+
continue;
|
|
14580
|
+
}
|
|
14581
|
+
try {
|
|
14582
|
+
await this.#processDirectIteration(loopId, shardId);
|
|
14583
|
+
} catch (error) {
|
|
14584
|
+
this.logger.error("Direct consumer iteration error", {
|
|
14585
|
+
loopId,
|
|
14586
|
+
error: error instanceof Error ? error.message : String(error)
|
|
14587
|
+
});
|
|
14588
|
+
}
|
|
14589
|
+
}
|
|
14590
|
+
} catch (error) {
|
|
14591
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
14592
|
+
this.logger.debug("Direct consumer loop aborted", { loopId });
|
|
14593
|
+
return;
|
|
14594
|
+
}
|
|
14595
|
+
throw error;
|
|
14596
|
+
}
|
|
14597
|
+
}
|
|
14598
|
+
async #processDirectIteration(loopId, shardId) {
|
|
14599
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14600
|
+
const context2 = this.#createSchedulerContext();
|
|
14601
|
+
const tenantQueues = await this.scheduler.selectQueues(masterQueueKey, loopId, context2);
|
|
14602
|
+
if (tenantQueues.length === 0) {
|
|
14603
|
+
return;
|
|
14604
|
+
}
|
|
14605
|
+
for (const { tenantId, queues } of tenantQueues) {
|
|
14606
|
+
let availableSlots = 1;
|
|
14607
|
+
if (this.concurrencyManager) {
|
|
14608
|
+
const [current, limit] = await Promise.all([
|
|
14609
|
+
this.concurrencyManager.getCurrentConcurrency("tenant", tenantId),
|
|
14610
|
+
this.concurrencyManager.getConcurrencyLimit("tenant", tenantId)
|
|
14611
|
+
]);
|
|
14612
|
+
availableSlots = Math.max(1, limit - current);
|
|
14613
|
+
}
|
|
14614
|
+
let slotsUsed = 0;
|
|
14615
|
+
queueLoop: for (const queueId of queues) {
|
|
14616
|
+
while (slotsUsed < availableSlots) {
|
|
14617
|
+
if (this.cooloffEnabled && this.#isInCooloff(queueId)) {
|
|
14618
|
+
break;
|
|
14619
|
+
}
|
|
14620
|
+
const processed = await this.#processOneMessage(loopId, queueId, tenantId, shardId);
|
|
14621
|
+
if (processed) {
|
|
14622
|
+
await this.scheduler.recordProcessed?.(tenantId, queueId);
|
|
14623
|
+
this.#resetCooloff(queueId);
|
|
14624
|
+
slotsUsed++;
|
|
14625
|
+
} else {
|
|
14626
|
+
this.#incrementCooloff(queueId);
|
|
14627
|
+
break;
|
|
14628
|
+
}
|
|
14629
|
+
}
|
|
14630
|
+
if (slotsUsed >= availableSlots) {
|
|
14631
|
+
break queueLoop;
|
|
14632
|
+
}
|
|
14633
|
+
}
|
|
14634
|
+
}
|
|
14635
|
+
}
|
|
14636
|
+
async #processOneMessage(loopId, queueId, tenantId, shardId) {
|
|
14637
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14638
|
+
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14639
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14640
|
+
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
14641
|
+
id: queueId,
|
|
14642
|
+
tenantId,
|
|
14643
|
+
metadata: {}
|
|
14644
|
+
};
|
|
14645
|
+
if (this.concurrencyManager) {
|
|
14646
|
+
const check = await this.concurrencyManager.canProcess(descriptor);
|
|
14647
|
+
if (!check.allowed) {
|
|
14648
|
+
return false;
|
|
14649
|
+
}
|
|
14650
|
+
}
|
|
14651
|
+
if (this.globalRateLimiter) {
|
|
14652
|
+
const result = await this.globalRateLimiter.limit();
|
|
14653
|
+
if (!result.allowed && result.resetAt) {
|
|
14654
|
+
const waitMs = Math.max(0, result.resetAt - Date.now());
|
|
14655
|
+
if (waitMs > 0) {
|
|
14656
|
+
this.logger.debug("Global rate limit reached, waiting", { waitMs, loopId });
|
|
14657
|
+
await new Promise((resolve) => setTimeout(resolve, waitMs));
|
|
14658
|
+
}
|
|
14659
|
+
}
|
|
14660
|
+
}
|
|
14661
|
+
const claimResult = await this.visibilityManager.claim(
|
|
14662
|
+
queueId,
|
|
14663
|
+
queueKey,
|
|
14664
|
+
queueItemsKey,
|
|
14665
|
+
loopId,
|
|
14666
|
+
this.visibilityTimeoutMs
|
|
14667
|
+
);
|
|
14668
|
+
if (!claimResult.claimed || !claimResult.message) {
|
|
14669
|
+
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14670
|
+
return false;
|
|
14671
|
+
}
|
|
14672
|
+
const { message } = claimResult;
|
|
14673
|
+
if (this.concurrencyManager) {
|
|
14674
|
+
const reserved = await this.concurrencyManager.reserve(descriptor, message.messageId);
|
|
14675
|
+
if (!reserved) {
|
|
14676
|
+
await this.visibilityManager.release(message.messageId, queueId, queueKey, queueItemsKey);
|
|
14677
|
+
return false;
|
|
14678
|
+
}
|
|
14679
|
+
}
|
|
14680
|
+
await this.#processMessage(loopId, message.payload, queueId);
|
|
14681
|
+
return true;
|
|
14682
|
+
}
|
|
14683
|
+
// ============================================================================
|
|
14684
|
+
// Private - Message Processing
|
|
14685
|
+
// ============================================================================
|
|
14686
|
+
async #processMessage(loopId, storedMessage, queueId) {
|
|
14687
|
+
const startTime = Date.now();
|
|
14688
|
+
const queueKey = this.keys.queueKey(queueId);
|
|
14689
|
+
const queueItemsKey = this.keys.queueItemsKey(queueId);
|
|
14690
|
+
const shardId = this.masterQueue.getShardForQueue(queueId);
|
|
14691
|
+
const masterQueueKey = this.keys.masterQueueKey(shardId);
|
|
14692
|
+
const descriptor = this.queueDescriptorCache.get(queueId) ?? {
|
|
14693
|
+
id: queueId,
|
|
14694
|
+
tenantId: storedMessage.tenantId,
|
|
14695
|
+
metadata: storedMessage.metadata ?? {}
|
|
14696
|
+
};
|
|
14697
|
+
let payload;
|
|
14698
|
+
if (this.payloadSchema) {
|
|
14699
|
+
const result = this.payloadSchema.safeParse(storedMessage.payload);
|
|
14700
|
+
if (!result.success) {
|
|
14701
|
+
this.logger.error("Payload validation failed on dequeue", {
|
|
14702
|
+
messageId: storedMessage.id,
|
|
14703
|
+
queueId,
|
|
14704
|
+
error: result.error.message
|
|
14705
|
+
});
|
|
14706
|
+
await this.#moveToDeadLetterQueue(storedMessage, "Payload validation failed");
|
|
14707
|
+
if (this.concurrencyManager) {
|
|
14708
|
+
try {
|
|
14709
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14710
|
+
} catch (releaseError) {
|
|
14711
|
+
this.logger.error("Failed to release concurrency slot after payload validation failure", {
|
|
14712
|
+
messageId: storedMessage.id,
|
|
14713
|
+
queueId,
|
|
14714
|
+
error: releaseError instanceof Error ? releaseError.message : String(releaseError)
|
|
14715
|
+
});
|
|
14716
|
+
}
|
|
14717
|
+
}
|
|
14718
|
+
return;
|
|
14719
|
+
}
|
|
14720
|
+
payload = result.data;
|
|
14721
|
+
} else {
|
|
14722
|
+
payload = storedMessage.payload;
|
|
14723
|
+
}
|
|
14724
|
+
const queueMessage = {
|
|
14725
|
+
id: storedMessage.id,
|
|
14726
|
+
queueId,
|
|
14727
|
+
payload,
|
|
14728
|
+
timestamp: storedMessage.timestamp,
|
|
14729
|
+
attempt: storedMessage.attempt,
|
|
14730
|
+
metadata: storedMessage.metadata
|
|
14731
|
+
};
|
|
14732
|
+
const queueTime = startTime - storedMessage.timestamp;
|
|
14733
|
+
this.telemetry.recordQueueTime(
|
|
14734
|
+
queueTime,
|
|
14735
|
+
this.telemetry.messageAttributes({
|
|
14736
|
+
queueId,
|
|
14737
|
+
tenantId: storedMessage.tenantId,
|
|
14738
|
+
messageId: storedMessage.id
|
|
14739
|
+
})
|
|
14740
|
+
);
|
|
14741
|
+
const handlerContext = {
|
|
14742
|
+
message: queueMessage,
|
|
14743
|
+
queue: descriptor,
|
|
14744
|
+
consumerId: loopId,
|
|
14745
|
+
heartbeat: async () => {
|
|
14746
|
+
return this.visibilityManager.heartbeat(
|
|
14747
|
+
storedMessage.id,
|
|
14748
|
+
queueId,
|
|
14749
|
+
this.heartbeatIntervalMs
|
|
14750
|
+
);
|
|
14751
|
+
},
|
|
14752
|
+
complete: async () => {
|
|
14753
|
+
await this.#completeMessage(storedMessage, queueId, queueKey, masterQueueKey, descriptor);
|
|
14754
|
+
this.telemetry.recordComplete(
|
|
14755
|
+
this.telemetry.messageAttributes({
|
|
14756
|
+
queueId,
|
|
14757
|
+
tenantId: storedMessage.tenantId,
|
|
14758
|
+
messageId: storedMessage.id
|
|
14759
|
+
})
|
|
14760
|
+
);
|
|
14761
|
+
this.telemetry.recordProcessingTime(
|
|
14762
|
+
Date.now() - startTime,
|
|
14763
|
+
this.telemetry.messageAttributes({
|
|
14764
|
+
queueId,
|
|
14765
|
+
tenantId: storedMessage.tenantId,
|
|
14766
|
+
messageId: storedMessage.id
|
|
14767
|
+
})
|
|
14768
|
+
);
|
|
14769
|
+
},
|
|
14770
|
+
release: async () => {
|
|
14771
|
+
await this.#releaseMessage(storedMessage, queueId, queueKey, queueItemsKey, descriptor);
|
|
14772
|
+
},
|
|
14773
|
+
fail: async (error) => {
|
|
14774
|
+
await this.#handleMessageFailure(
|
|
14775
|
+
storedMessage,
|
|
14776
|
+
queueId,
|
|
14777
|
+
queueKey,
|
|
14778
|
+
queueItemsKey,
|
|
14779
|
+
masterQueueKey,
|
|
14780
|
+
descriptor,
|
|
14781
|
+
error
|
|
14782
|
+
);
|
|
14783
|
+
}
|
|
14784
|
+
};
|
|
14785
|
+
try {
|
|
14786
|
+
await this.telemetry.trace(
|
|
14787
|
+
"processMessage",
|
|
14788
|
+
async (span) => {
|
|
14789
|
+
span.setAttributes({
|
|
14790
|
+
[FairQueueAttributes.QUEUE_ID]: queueId,
|
|
14791
|
+
[FairQueueAttributes.TENANT_ID]: storedMessage.tenantId,
|
|
14792
|
+
[FairQueueAttributes.MESSAGE_ID]: storedMessage.id,
|
|
14793
|
+
[FairQueueAttributes.ATTEMPT]: storedMessage.attempt,
|
|
14794
|
+
[FairQueueAttributes.CONSUMER_ID]: loopId
|
|
14795
|
+
});
|
|
14796
|
+
await this.messageHandler(handlerContext);
|
|
14797
|
+
},
|
|
14798
|
+
{
|
|
14799
|
+
kind: SpanKind.CONSUMER,
|
|
14800
|
+
attributes: {
|
|
14801
|
+
[MessagingAttributes.OPERATION]: "process"
|
|
14802
|
+
}
|
|
14803
|
+
}
|
|
14804
|
+
);
|
|
14805
|
+
} catch (error) {
|
|
14806
|
+
this.logger.error("Message handler error", {
|
|
14807
|
+
messageId: storedMessage.id,
|
|
14808
|
+
queueId,
|
|
14809
|
+
error: error instanceof Error ? error.message : String(error)
|
|
14810
|
+
});
|
|
14811
|
+
await handlerContext.fail(error instanceof Error ? error : new Error(String(error)));
|
|
14812
|
+
}
|
|
14813
|
+
}
|
|
14814
|
+
async #completeMessage(storedMessage, queueId, queueKey, masterQueueKey, descriptor) {
|
|
14815
|
+
this.masterQueue.getShardForQueue(queueId);
|
|
14816
|
+
await this.visibilityManager.complete(storedMessage.id, queueId);
|
|
14817
|
+
if (this.concurrencyManager) {
|
|
14818
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14819
|
+
}
|
|
14820
|
+
await this.redis.updateMasterQueueIfEmpty(masterQueueKey, queueKey, queueId);
|
|
14821
|
+
this.logger.debug("Message completed", {
|
|
14822
|
+
messageId: storedMessage.id,
|
|
14823
|
+
queueId
|
|
14824
|
+
});
|
|
14825
|
+
}
|
|
14826
|
+
async #releaseMessage(storedMessage, queueId, queueKey, queueItemsKey, descriptor) {
|
|
14827
|
+
await this.visibilityManager.release(
|
|
14828
|
+
storedMessage.id,
|
|
14829
|
+
queueId,
|
|
14830
|
+
queueKey,
|
|
14831
|
+
queueItemsKey,
|
|
14832
|
+
Date.now()
|
|
14833
|
+
// Put at back of queue
|
|
14834
|
+
);
|
|
14835
|
+
if (this.concurrencyManager) {
|
|
14836
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14837
|
+
}
|
|
14838
|
+
this.logger.debug("Message released", {
|
|
14839
|
+
messageId: storedMessage.id,
|
|
14840
|
+
queueId
|
|
14841
|
+
});
|
|
14842
|
+
}
|
|
14843
|
+
async #handleMessageFailure(storedMessage, queueId, queueKey, queueItemsKey, masterQueueKey, descriptor, error) {
|
|
14844
|
+
this.telemetry.recordFailure(
|
|
14845
|
+
this.telemetry.messageAttributes({
|
|
14846
|
+
queueId,
|
|
14847
|
+
tenantId: storedMessage.tenantId,
|
|
14848
|
+
messageId: storedMessage.id,
|
|
14849
|
+
attempt: storedMessage.attempt
|
|
14850
|
+
})
|
|
14851
|
+
);
|
|
14852
|
+
if (this.retryStrategy) {
|
|
14853
|
+
const nextDelay = this.retryStrategy.getNextDelay(storedMessage.attempt, error);
|
|
14854
|
+
if (nextDelay !== null) {
|
|
14855
|
+
const updatedMessage = {
|
|
14856
|
+
...storedMessage,
|
|
14857
|
+
attempt: storedMessage.attempt + 1
|
|
14858
|
+
};
|
|
14859
|
+
await this.visibilityManager.release(
|
|
14860
|
+
storedMessage.id,
|
|
14861
|
+
queueId,
|
|
14862
|
+
queueKey,
|
|
14863
|
+
queueItemsKey,
|
|
14864
|
+
Date.now() + nextDelay
|
|
14865
|
+
);
|
|
14866
|
+
await this.redis.hset(queueItemsKey, storedMessage.id, JSON.stringify(updatedMessage));
|
|
14867
|
+
if (this.concurrencyManager) {
|
|
14868
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14869
|
+
}
|
|
14870
|
+
this.telemetry.recordRetry(
|
|
14871
|
+
this.telemetry.messageAttributes({
|
|
14872
|
+
queueId,
|
|
14873
|
+
tenantId: storedMessage.tenantId,
|
|
14874
|
+
messageId: storedMessage.id,
|
|
14875
|
+
attempt: storedMessage.attempt + 1
|
|
14876
|
+
})
|
|
14877
|
+
);
|
|
14878
|
+
this.logger.debug("Message scheduled for retry", {
|
|
14879
|
+
messageId: storedMessage.id,
|
|
14880
|
+
queueId,
|
|
14881
|
+
attempt: storedMessage.attempt + 1,
|
|
14882
|
+
delayMs: nextDelay
|
|
14883
|
+
});
|
|
14884
|
+
return;
|
|
14885
|
+
}
|
|
14886
|
+
}
|
|
14887
|
+
await this.#moveToDeadLetterQueue(storedMessage, error?.message);
|
|
14888
|
+
if (this.concurrencyManager) {
|
|
14889
|
+
await this.concurrencyManager.release(descriptor, storedMessage.id);
|
|
14890
|
+
}
|
|
14891
|
+
}
|
|
14892
|
+
async #moveToDeadLetterQueue(storedMessage, errorMessage) {
|
|
14893
|
+
if (!this.deadLetterQueueEnabled) {
|
|
14894
|
+
await this.visibilityManager.complete(storedMessage.id, storedMessage.queueId);
|
|
14895
|
+
return;
|
|
14896
|
+
}
|
|
14897
|
+
const dlqKey = this.keys.deadLetterQueueKey(storedMessage.tenantId);
|
|
14898
|
+
const dlqDataKey = this.keys.deadLetterQueueDataKey(storedMessage.tenantId);
|
|
14899
|
+
this.masterQueue.getShardForQueue(storedMessage.queueId);
|
|
14900
|
+
const dlqMessage = {
|
|
14901
|
+
id: storedMessage.id,
|
|
14902
|
+
queueId: storedMessage.queueId,
|
|
14903
|
+
tenantId: storedMessage.tenantId,
|
|
14904
|
+
payload: storedMessage.payload,
|
|
14905
|
+
deadLetteredAt: Date.now(),
|
|
14906
|
+
attempts: storedMessage.attempt,
|
|
14907
|
+
lastError: errorMessage,
|
|
14908
|
+
originalTimestamp: storedMessage.timestamp
|
|
14909
|
+
};
|
|
14910
|
+
await this.visibilityManager.complete(storedMessage.id, storedMessage.queueId);
|
|
14911
|
+
const pipeline = this.redis.pipeline();
|
|
14912
|
+
pipeline.zadd(dlqKey, dlqMessage.deadLetteredAt, storedMessage.id);
|
|
14913
|
+
pipeline.hset(dlqDataKey, storedMessage.id, JSON.stringify(dlqMessage));
|
|
14914
|
+
await pipeline.exec();
|
|
14915
|
+
this.telemetry.recordDLQ(
|
|
14916
|
+
this.telemetry.messageAttributes({
|
|
14917
|
+
queueId: storedMessage.queueId,
|
|
14918
|
+
tenantId: storedMessage.tenantId,
|
|
14919
|
+
messageId: storedMessage.id,
|
|
14920
|
+
attempt: storedMessage.attempt
|
|
14921
|
+
})
|
|
14922
|
+
);
|
|
14923
|
+
this.logger.info("Message moved to DLQ", {
|
|
14924
|
+
messageId: storedMessage.id,
|
|
14925
|
+
queueId: storedMessage.queueId,
|
|
14926
|
+
tenantId: storedMessage.tenantId,
|
|
14927
|
+
attempts: storedMessage.attempt,
|
|
14928
|
+
error: errorMessage
|
|
14929
|
+
});
|
|
14930
|
+
}
|
|
14931
|
+
// ============================================================================
|
|
14932
|
+
// Private - Reclaim Loop
|
|
14933
|
+
// ============================================================================
|
|
14934
|
+
async #runReclaimLoop() {
|
|
14935
|
+
try {
|
|
14936
|
+
for await (const _ of promises.setInterval(this.reclaimIntervalMs, null, {
|
|
14937
|
+
signal: this.abortController.signal
|
|
14938
|
+
})) {
|
|
14939
|
+
try {
|
|
14940
|
+
await this.#reclaimTimedOutMessages();
|
|
14941
|
+
} catch (error) {
|
|
14942
|
+
this.logger.error("Reclaim loop error", {
|
|
14943
|
+
error: error instanceof Error ? error.message : String(error)
|
|
14944
|
+
});
|
|
14945
|
+
}
|
|
14946
|
+
}
|
|
14947
|
+
} catch (error) {
|
|
14948
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
14949
|
+
this.logger.debug("Reclaim loop aborted");
|
|
14950
|
+
return;
|
|
14951
|
+
}
|
|
14952
|
+
throw error;
|
|
14953
|
+
}
|
|
14954
|
+
}
|
|
14955
|
+
async #reclaimTimedOutMessages() {
|
|
14956
|
+
let totalReclaimed = 0;
|
|
14957
|
+
for (let shardId = 0; shardId < this.shardCount; shardId++) {
|
|
14958
|
+
const reclaimed = await this.visibilityManager.reclaimTimedOut(shardId, (queueId) => ({
|
|
14959
|
+
queueKey: this.keys.queueKey(queueId),
|
|
14960
|
+
queueItemsKey: this.keys.queueItemsKey(queueId)
|
|
14961
|
+
}));
|
|
14962
|
+
totalReclaimed += reclaimed;
|
|
14963
|
+
}
|
|
14964
|
+
if (totalReclaimed > 0) {
|
|
14965
|
+
this.logger.info("Reclaimed timed-out messages", { count: totalReclaimed });
|
|
14966
|
+
}
|
|
14967
|
+
}
|
|
14968
|
+
// ============================================================================
|
|
14969
|
+
// Private - Cooloff State
|
|
14970
|
+
// ============================================================================
|
|
14971
|
+
#isInCooloff(queueId) {
|
|
14972
|
+
const state = this.queueCooloffStates.get(queueId);
|
|
14973
|
+
if (!state) return false;
|
|
14974
|
+
if (state.tag === "cooloff") {
|
|
14975
|
+
if (Date.now() >= state.expiresAt) {
|
|
14976
|
+
this.queueCooloffStates.delete(queueId);
|
|
14977
|
+
return false;
|
|
14978
|
+
}
|
|
14979
|
+
return true;
|
|
14980
|
+
}
|
|
14981
|
+
return false;
|
|
14982
|
+
}
|
|
14983
|
+
#incrementCooloff(queueId) {
|
|
14984
|
+
const state = this.queueCooloffStates.get(queueId) ?? {
|
|
14985
|
+
tag: "normal",
|
|
14986
|
+
consecutiveFailures: 0
|
|
14987
|
+
};
|
|
14988
|
+
if (state.tag === "normal") {
|
|
14989
|
+
const newFailures = state.consecutiveFailures + 1;
|
|
14990
|
+
if (newFailures >= this.cooloffThreshold) {
|
|
14991
|
+
this.queueCooloffStates.set(queueId, {
|
|
14992
|
+
tag: "cooloff",
|
|
14993
|
+
expiresAt: Date.now() + this.cooloffPeriodMs
|
|
14994
|
+
});
|
|
14995
|
+
this.logger.debug("Queue entered cooloff", {
|
|
14996
|
+
queueId,
|
|
14997
|
+
cooloffPeriodMs: this.cooloffPeriodMs,
|
|
14998
|
+
consecutiveFailures: newFailures
|
|
14999
|
+
});
|
|
15000
|
+
} else {
|
|
15001
|
+
this.queueCooloffStates.set(queueId, {
|
|
15002
|
+
tag: "normal",
|
|
15003
|
+
consecutiveFailures: newFailures
|
|
15004
|
+
});
|
|
15005
|
+
}
|
|
15006
|
+
}
|
|
15007
|
+
}
|
|
15008
|
+
#resetCooloff(queueId) {
|
|
15009
|
+
this.queueCooloffStates.delete(queueId);
|
|
15010
|
+
}
|
|
15011
|
+
// ============================================================================
|
|
15012
|
+
// Private - Helpers
|
|
15013
|
+
// ============================================================================
|
|
15014
|
+
#createSchedulerContext() {
|
|
15015
|
+
return {
|
|
15016
|
+
getCurrentConcurrency: async (groupName, groupId) => {
|
|
15017
|
+
if (!this.concurrencyManager) return 0;
|
|
15018
|
+
return this.concurrencyManager.getCurrentConcurrency(groupName, groupId);
|
|
15019
|
+
},
|
|
15020
|
+
getConcurrencyLimit: async (groupName, groupId) => {
|
|
15021
|
+
if (!this.concurrencyManager) return Infinity;
|
|
15022
|
+
return this.concurrencyManager.getConcurrencyLimit(groupName, groupId);
|
|
15023
|
+
},
|
|
15024
|
+
isAtCapacity: async (groupName, groupId) => {
|
|
15025
|
+
if (!this.concurrencyManager) return false;
|
|
15026
|
+
return this.concurrencyManager.isAtCapacity(groupName, groupId);
|
|
15027
|
+
},
|
|
15028
|
+
getQueueDescriptor: (queueId) => {
|
|
15029
|
+
return this.queueDescriptorCache.get(queueId) ?? {
|
|
15030
|
+
id: queueId,
|
|
15031
|
+
tenantId: this.keys.extractTenantId(queueId),
|
|
15032
|
+
metadata: {}
|
|
15033
|
+
};
|
|
15034
|
+
}
|
|
15035
|
+
};
|
|
15036
|
+
}
|
|
15037
|
+
// ============================================================================
|
|
15038
|
+
// Private - Redis Commands
|
|
15039
|
+
// ============================================================================
|
|
15040
|
+
#registerCommands() {
|
|
15041
|
+
this.redis.defineCommand("enqueueMessageAtomic", {
|
|
15042
|
+
numberOfKeys: 3,
|
|
15043
|
+
lua: `
|
|
15044
|
+
local queueKey = KEYS[1]
|
|
15045
|
+
local queueItemsKey = KEYS[2]
|
|
15046
|
+
local masterQueueKey = KEYS[3]
|
|
15047
|
+
|
|
15048
|
+
local queueId = ARGV[1]
|
|
15049
|
+
local messageId = ARGV[2]
|
|
15050
|
+
local timestamp = tonumber(ARGV[3])
|
|
15051
|
+
local payload = ARGV[4]
|
|
15052
|
+
|
|
15053
|
+
-- Add to sorted set (score = timestamp)
|
|
15054
|
+
redis.call('ZADD', queueKey, timestamp, messageId)
|
|
15055
|
+
|
|
15056
|
+
-- Store payload in hash
|
|
15057
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
15058
|
+
|
|
15059
|
+
-- Update master queue with oldest message timestamp
|
|
15060
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
15061
|
+
if #oldest >= 2 then
|
|
15062
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
15063
|
+
end
|
|
15064
|
+
|
|
15065
|
+
return 1
|
|
15066
|
+
`
|
|
15067
|
+
});
|
|
15068
|
+
this.redis.defineCommand("enqueueBatchAtomic", {
|
|
15069
|
+
numberOfKeys: 3,
|
|
15070
|
+
lua: `
|
|
15071
|
+
local queueKey = KEYS[1]
|
|
15072
|
+
local queueItemsKey = KEYS[2]
|
|
15073
|
+
local masterQueueKey = KEYS[3]
|
|
15074
|
+
|
|
15075
|
+
local queueId = ARGV[1]
|
|
15076
|
+
|
|
15077
|
+
-- Args after queueId are triples: [messageId, timestamp, payload, ...]
|
|
15078
|
+
for i = 2, #ARGV, 3 do
|
|
15079
|
+
local messageId = ARGV[i]
|
|
15080
|
+
local timestamp = tonumber(ARGV[i + 1])
|
|
15081
|
+
local payload = ARGV[i + 2]
|
|
15082
|
+
|
|
15083
|
+
-- Add to sorted set
|
|
15084
|
+
redis.call('ZADD', queueKey, timestamp, messageId)
|
|
15085
|
+
|
|
15086
|
+
-- Store payload in hash
|
|
15087
|
+
redis.call('HSET', queueItemsKey, messageId, payload)
|
|
15088
|
+
end
|
|
15089
|
+
|
|
15090
|
+
-- Update master queue with oldest message timestamp
|
|
15091
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
15092
|
+
if #oldest >= 2 then
|
|
15093
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
15094
|
+
end
|
|
15095
|
+
|
|
15096
|
+
return (#ARGV - 1) / 3
|
|
15097
|
+
`
|
|
15098
|
+
});
|
|
15099
|
+
this.redis.defineCommand("updateMasterQueueIfEmpty", {
|
|
15100
|
+
numberOfKeys: 2,
|
|
15101
|
+
lua: `
|
|
15102
|
+
local masterQueueKey = KEYS[1]
|
|
15103
|
+
local queueKey = KEYS[2]
|
|
15104
|
+
local queueId = ARGV[1]
|
|
15105
|
+
|
|
15106
|
+
local count = redis.call('ZCARD', queueKey)
|
|
15107
|
+
if count == 0 then
|
|
15108
|
+
redis.call('ZREM', masterQueueKey, queueId)
|
|
15109
|
+
return 1
|
|
15110
|
+
else
|
|
15111
|
+
-- Update with oldest message timestamp
|
|
15112
|
+
local oldest = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
|
|
15113
|
+
if #oldest >= 2 then
|
|
15114
|
+
redis.call('ZADD', masterQueueKey, oldest[2], queueId)
|
|
15115
|
+
end
|
|
15116
|
+
return 0
|
|
15117
|
+
end
|
|
15118
|
+
`
|
|
15119
|
+
});
|
|
15120
|
+
if (this.workerQueueManager) {
|
|
15121
|
+
this.workerQueueManager.registerCommands(this.redis);
|
|
15122
|
+
}
|
|
15123
|
+
}
|
|
15124
|
+
};
|
|
15125
|
+
|
|
15126
|
+
exports.BaseScheduler = BaseScheduler;
|
|
15127
|
+
exports.CallbackFairQueueKeyProducer = CallbackFairQueueKeyProducer;
|
|
15128
|
+
exports.ConcurrencyManager = ConcurrencyManager;
|
|
11730
15129
|
exports.CronSchema = CronSchema;
|
|
15130
|
+
exports.CustomRetry = CustomRetry;
|
|
15131
|
+
exports.DRRScheduler = DRRScheduler;
|
|
15132
|
+
exports.DefaultFairQueueKeyProducer = DefaultFairQueueKeyProducer;
|
|
15133
|
+
exports.ExponentialBackoffRetry = ExponentialBackoffRetry;
|
|
15134
|
+
exports.FairQueue = FairQueue;
|
|
15135
|
+
exports.FairQueueAttributes = FairQueueAttributes;
|
|
15136
|
+
exports.FairQueueTelemetry = FairQueueTelemetry;
|
|
15137
|
+
exports.FixedDelayRetry = FixedDelayRetry;
|
|
15138
|
+
exports.ImmediateRetry = ImmediateRetry;
|
|
15139
|
+
exports.LinearBackoffRetry = LinearBackoffRetry;
|
|
15140
|
+
exports.MasterQueue = MasterQueue;
|
|
15141
|
+
exports.MessagingAttributes = MessagingAttributes;
|
|
15142
|
+
exports.NoRetry = NoRetry;
|
|
15143
|
+
exports.NoopScheduler = NoopScheduler;
|
|
15144
|
+
exports.RoundRobinScheduler = RoundRobinScheduler;
|
|
11731
15145
|
exports.SimpleQueue = SimpleQueue;
|
|
15146
|
+
exports.VisibilityManager = VisibilityManager;
|
|
15147
|
+
exports.WeightedScheduler = WeightedScheduler;
|
|
11732
15148
|
exports.Worker = Worker;
|
|
15149
|
+
exports.WorkerQueueManager = WorkerQueueManager;
|
|
15150
|
+
exports.createDefaultRetryStrategy = createDefaultRetryStrategy;
|
|
15151
|
+
exports.defaultRetryOptions = defaultRetryOptions;
|
|
15152
|
+
exports.noopTelemetry = noopTelemetry;
|
|
11733
15153
|
//# sourceMappingURL=index.cjs.map
|
|
11734
15154
|
//# sourceMappingURL=index.cjs.map
|