bunqueue 2.4.0 → 2.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/application/queueManager.d.ts.map +1 -1
- package/dist/application/queueManager.js +1 -37
- package/dist/application/queueManager.js.map +1 -1
- package/dist/application/statsManager.d.ts +10 -0
- package/dist/application/statsManager.d.ts.map +1 -1
- package/dist/application/statsManager.js +42 -0
- package/dist/application/statsManager.js.map +1 -1
- package/dist/client/jobConversion.d.ts +3 -76
- package/dist/client/jobConversion.d.ts.map +1 -1
- package/dist/client/jobConversion.js +2 -100
- package/dist/client/jobConversion.js.map +1 -1
- package/dist/client/jobConversionHelpers.d.ts +13 -0
- package/dist/client/jobConversionHelpers.d.ts.map +1 -0
- package/dist/client/jobConversionHelpers.js +105 -0
- package/dist/client/jobConversionHelpers.js.map +1 -0
- package/dist/client/jobConversionTypes.d.ts +81 -0
- package/dist/client/jobConversionTypes.d.ts.map +1 -0
- package/dist/client/jobConversionTypes.js +6 -0
- package/dist/client/jobConversionTypes.js.map +1 -0
- package/dist/client/worker/worker.d.ts +4 -95
- package/dist/client/worker/worker.d.ts.map +1 -1
- package/dist/client/worker/worker.js +50 -282
- package/dist/client/worker/worker.js.map +1 -1
- package/dist/client/worker/workerHeartbeat.d.ts +16 -0
- package/dist/client/worker/workerHeartbeat.d.ts.map +1 -0
- package/dist/client/worker/workerHeartbeat.js +44 -0
- package/dist/client/worker/workerHeartbeat.js.map +1 -0
- package/dist/client/worker/workerPull.d.ts +21 -0
- package/dist/client/worker/workerPull.d.ts.map +1 -0
- package/dist/client/worker/workerPull.js +60 -0
- package/dist/client/worker/workerPull.js.map +1 -0
- package/dist/client/worker/workerRateLimiter.d.ts +37 -0
- package/dist/client/worker/workerRateLimiter.d.ts.map +1 -0
- package/dist/client/worker/workerRateLimiter.js +84 -0
- package/dist/client/worker/workerRateLimiter.js.map +1 -0
- package/dist/domain/queue/shard.d.ts +8 -23
- package/dist/domain/queue/shard.d.ts.map +1 -1
- package/dist/domain/queue/shard.js +30 -90
- package/dist/domain/queue/shard.js.map +1 -1
- package/dist/domain/queue/shardCounters.d.ts +36 -0
- package/dist/domain/queue/shardCounters.d.ts.map +1 -0
- package/dist/domain/queue/shardCounters.js +68 -0
- package/dist/domain/queue/shardCounters.js.map +1 -0
- package/dist/domain/queue/waiterManager.d.ts +19 -0
- package/dist/domain/queue/waiterManager.d.ts.map +1 -0
- package/dist/domain/queue/waiterManager.js +64 -0
- package/dist/domain/queue/waiterManager.js.map +1 -0
- package/dist/shared/boundedMap.d.ts +31 -0
- package/dist/shared/boundedMap.d.ts.map +1 -0
- package/dist/shared/boundedMap.js +78 -0
- package/dist/shared/boundedMap.js.map +1 -0
- package/dist/shared/boundedSet.d.ts +27 -0
- package/dist/shared/boundedSet.d.ts.map +1 -0
- package/dist/shared/boundedSet.js +64 -0
- package/dist/shared/boundedSet.js.map +1 -0
- package/dist/shared/lru.d.ts +5 -197
- package/dist/shared/lru.d.ts.map +1 -1
- package/dist/shared/lru.js +5 -538
- package/dist/shared/lru.js.map +1 -1
- package/dist/shared/lruMap.d.ts +43 -0
- package/dist/shared/lruMap.d.ts.map +1 -0
- package/dist/shared/lruMap.js +142 -0
- package/dist/shared/lruMap.js.map +1 -0
- package/dist/shared/lruSet.d.ts +37 -0
- package/dist/shared/lruSet.d.ts.map +1 -0
- package/dist/shared/lruSet.js +106 -0
- package/dist/shared/lruSet.js.map +1 -0
- package/dist/shared/ttlMap.d.ts +82 -0
- package/dist/shared/ttlMap.d.ts.map +1 -0
- package/dist/shared/ttlMap.js +169 -0
- package/dist/shared/ttlMap.js.map +1 -0
- package/package.json +1 -1
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* WorkerHeartbeat - Heartbeat sending for TCP worker connections
|
|
3
|
+
* Sends periodic heartbeats to keep locks alive and prevent stall detection
|
|
4
|
+
*/
|
|
5
|
+
import type { EventEmitter } from 'events';
|
|
6
|
+
import type { TcpConnection } from './types';
|
|
7
|
+
export interface HeartbeatDeps {
|
|
8
|
+
readonly pulledJobIds: Set<string>;
|
|
9
|
+
readonly jobTokens: Map<string, string>;
|
|
10
|
+
readonly tcp: TcpConnection | null;
|
|
11
|
+
readonly useLocks: boolean;
|
|
12
|
+
readonly emitter: EventEmitter;
|
|
13
|
+
}
|
|
14
|
+
export declare function startHeartbeat(deps: HeartbeatDeps, intervalMs: number): ReturnType<typeof setInterval>;
|
|
15
|
+
export declare function sendHeartbeat(deps: HeartbeatDeps): Promise<void>;
|
|
16
|
+
//# sourceMappingURL=workerHeartbeat.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"workerHeartbeat.d.ts","sourceRoot":"","sources":["../../../src/client/worker/workerHeartbeat.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,QAAQ,CAAC;AAC3C,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,SAAS,CAAC;AAE7C,MAAM,WAAW,aAAa;IAC5B,QAAQ,CAAC,YAAY,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC;IACnC,QAAQ,CAAC,SAAS,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACxC,QAAQ,CAAC,GAAG,EAAE,aAAa,GAAG,IAAI,CAAC;IACnC,QAAQ,CAAC,QAAQ,EAAE,OAAO,CAAC;IAC3B,QAAQ,CAAC,OAAO,EAAE,YAAY,CAAC;CAChC;AAED,wBAAgB,cAAc,CAC5B,IAAI,EAAE,aAAa,EACnB,UAAU,EAAE,MAAM,GACjB,UAAU,CAAC,OAAO,WAAW,CAAC,CAEhC;AAED,wBAAsB,aAAa,CAAC,IAAI,EAAE,aAAa,GAAG,OAAO,CAAC,IAAI,CAAC,CA+BtE"}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* WorkerHeartbeat - Heartbeat sending for TCP worker connections
|
|
3
|
+
* Sends periodic heartbeats to keep locks alive and prevent stall detection
|
|
4
|
+
*/
|
|
5
|
+
export function startHeartbeat(deps, intervalMs) {
|
|
6
|
+
return setInterval(() => void sendHeartbeat(deps), intervalMs);
|
|
7
|
+
}
|
|
8
|
+
export async function sendHeartbeat(deps) {
|
|
9
|
+
// Send heartbeat for ALL pulled jobs (including buffered ones)
|
|
10
|
+
// This is critical: when locks are enabled, we need to renew them
|
|
11
|
+
// even for jobs sitting in the buffer waiting to be processed
|
|
12
|
+
if (deps.pulledJobIds.size === 0 || !deps.tcp)
|
|
13
|
+
return;
|
|
14
|
+
try {
|
|
15
|
+
// Always take a fresh snapshot - avoids race with job start/complete
|
|
16
|
+
const ids = Array.from(deps.pulledJobIds);
|
|
17
|
+
if (ids.length === 0)
|
|
18
|
+
return;
|
|
19
|
+
if (deps.useLocks) {
|
|
20
|
+
// With locks: include tokens for lock renewal
|
|
21
|
+
const tokens = ids.map((id) => deps.jobTokens.get(id) ?? '');
|
|
22
|
+
if (ids.length === 1) {
|
|
23
|
+
await deps.tcp.send({ cmd: 'JobHeartbeat', id: ids[0], token: tokens[0] || undefined });
|
|
24
|
+
}
|
|
25
|
+
else {
|
|
26
|
+
await deps.tcp.send({ cmd: 'JobHeartbeatB', ids, tokens });
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
else {
|
|
30
|
+
// Without locks: simple heartbeat for stall detection only
|
|
31
|
+
if (ids.length === 1) {
|
|
32
|
+
await deps.tcp.send({ cmd: 'JobHeartbeat', id: ids[0] });
|
|
33
|
+
}
|
|
34
|
+
else {
|
|
35
|
+
await deps.tcp.send({ cmd: 'JobHeartbeatB', ids });
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
catch (err) {
|
|
40
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
41
|
+
deps.emitter.emit('error', Object.assign(error, { context: 'heartbeat' }));
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
//# sourceMappingURL=workerHeartbeat.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"workerHeartbeat.js","sourceRoot":"","sources":["../../../src/client/worker/workerHeartbeat.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAaH,MAAM,UAAU,cAAc,CAC5B,IAAmB,EACnB,UAAkB;IAElB,OAAO,WAAW,CAAC,GAAG,EAAE,CAAC,KAAK,aAAa,CAAC,IAAI,CAAC,EAAE,UAAU,CAAC,CAAC;AACjE,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,aAAa,CAAC,IAAmB;IACrD,+DAA+D;IAC/D,kEAAkE;IAClE,8DAA8D;IAC9D,IAAI,IAAI,CAAC,YAAY,CAAC,IAAI,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG;QAAE,OAAO;IAEtD,IAAI,CAAC;QACH,qEAAqE;QACrE,MAAM,GAAG,GAAG,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC1C,IAAI,GAAG,CAAC,MAAM,KAAK,CAAC;YAAE,OAAO;QAE7B,IAAI,IAAI,CAAC,QAAQ,EAAE,CAAC;YAClB,8CAA8C;YAC9C,MAAM,MAAM,GAAG,GAAG,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC;YAC7D,IAAI,GAAG,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;gBACrB,MAAM,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,cAAc,EAAE,EAAE,EAAE,GAAG,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI,SAAS,EAAE,CAAC,CAAC;YAC1F,CAAC;iBAAM,CAAC;gBACN,MAAM,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,eAAe,EAAE,GAAG,EAAE,MAAM,EAAE,CAAC,CAAC;YAC7D,CAAC;QACH,CAAC;aAAM,CAAC;YACN,2DAA2D;YAC3D,IAAI,GAAG,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;gBACrB,MAAM,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,cAAc,EAAE,EAAE,EAAE,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;YAC3D,CAAC;iBAAM,CAAC;gBACN,MAAM,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,eAAe,EAAE,GAAG,EAAE,CAAC,CAAC;YACrD,CAAC;QACH,CAAC;IACH,CAAC;IAAC,OAAO,GAAG,EAAE,CAAC;QACb,MAAM,KAAK,GAAG,GAAG,YAAY,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC;QAClE,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,CAAC,KAAK,EAAE,EAAE,OAAO,EAAE,WAAW,EAAE,CAAC,CAAC,CAAC;IAC7E,CAAC;AACH,CAAC"}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* WorkerPull - Job pulling functions for embedded and TCP modes
|
|
3
|
+
* Handles batch pulling with optional lock-based ownership
|
|
4
|
+
*/
|
|
5
|
+
import type { Job as InternalJob } from '../../domain/types/job';
|
|
6
|
+
import type { TcpConnection } from './types';
|
|
7
|
+
export interface PullConfig {
|
|
8
|
+
readonly name: string;
|
|
9
|
+
readonly workerId: string;
|
|
10
|
+
readonly useLocks: boolean;
|
|
11
|
+
readonly pollTimeout: number;
|
|
12
|
+
}
|
|
13
|
+
export declare function pullEmbedded(config: PullConfig, count: number): Promise<Array<{
|
|
14
|
+
job: InternalJob;
|
|
15
|
+
token: string | null;
|
|
16
|
+
}>>;
|
|
17
|
+
export declare function pullTcp(config: PullConfig, tcp: TcpConnection, count: number, closing: boolean): Promise<Array<{
|
|
18
|
+
job: InternalJob;
|
|
19
|
+
token: string | null;
|
|
20
|
+
}>>;
|
|
21
|
+
//# sourceMappingURL=workerPull.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"workerPull.d.ts","sourceRoot":"","sources":["../../../src/client/worker/workerPull.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAGH,OAAO,KAAK,EAAE,GAAG,IAAI,WAAW,EAAE,MAAM,wBAAwB,CAAC;AACjE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,SAAS,CAAC;AAG7C,MAAM,WAAW,UAAU;IACzB,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IACtB,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,QAAQ,EAAE,OAAO,CAAC;IAC3B,QAAQ,CAAC,WAAW,EAAE,MAAM,CAAC;CAC9B;AAED,wBAAsB,YAAY,CAChC,MAAM,EAAE,UAAU,EAClB,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,KAAK,CAAC;IAAE,GAAG,EAAE,WAAW,CAAC;IAAC,KAAK,EAAE,MAAM,GAAG,IAAI,CAAA;CAAE,CAAC,CAAC,CAyB5D;AAED,wBAAsB,OAAO,CAC3B,MAAM,EAAE,UAAU,EAClB,GAAG,EAAE,aAAa,EAClB,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,OAAO,GACf,OAAO,CAAC,KAAK,CAAC;IAAE,GAAG,EAAE,WAAW,CAAC;IAAC,KAAK,EAAE,MAAM,GAAG,IAAI,CAAA;CAAE,CAAC,CAAC,CAuC5D"}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* WorkerPull - Job pulling functions for embedded and TCP modes
|
|
3
|
+
* Handles batch pulling with optional lock-based ownership
|
|
4
|
+
*/
|
|
5
|
+
import { getSharedManager } from '../manager';
|
|
6
|
+
import { parseJobFromResponse } from './jobParser';
|
|
7
|
+
export async function pullEmbedded(config, count) {
|
|
8
|
+
const manager = getSharedManager();
|
|
9
|
+
// Use lock-based pull only when useLocks is enabled
|
|
10
|
+
if (config.useLocks) {
|
|
11
|
+
if (count === 1) {
|
|
12
|
+
const { job, token } = await manager.pullWithLock(config.name, config.workerId, 0);
|
|
13
|
+
return job ? [{ job, token }] : [];
|
|
14
|
+
}
|
|
15
|
+
const { jobs, tokens } = await manager.pullBatchWithLock(config.name, count, config.workerId, 0);
|
|
16
|
+
return jobs.map((job, i) => ({ job, token: tokens[i] || null }));
|
|
17
|
+
}
|
|
18
|
+
// No locks - use regular pull
|
|
19
|
+
if (count === 1) {
|
|
20
|
+
const job = await manager.pull(config.name, 0);
|
|
21
|
+
return job ? [{ job, token: null }] : [];
|
|
22
|
+
}
|
|
23
|
+
const jobs = await manager.pullBatch(config.name, count, 0);
|
|
24
|
+
return jobs.map((job) => ({ job, token: null }));
|
|
25
|
+
}
|
|
26
|
+
export async function pullTcp(config, tcp, count, closing) {
|
|
27
|
+
if (closing)
|
|
28
|
+
return [];
|
|
29
|
+
// Build pull command - only request locks if useLocks is enabled
|
|
30
|
+
const cmd = {
|
|
31
|
+
cmd: count === 1 ? 'PULL' : 'PULLB',
|
|
32
|
+
queue: config.name,
|
|
33
|
+
timeout: config.pollTimeout,
|
|
34
|
+
count,
|
|
35
|
+
};
|
|
36
|
+
// Only request lock ownership when useLocks is enabled
|
|
37
|
+
if (config.useLocks) {
|
|
38
|
+
cmd.owner = config.workerId;
|
|
39
|
+
}
|
|
40
|
+
const response = await tcp.send(cmd);
|
|
41
|
+
if (!response.ok)
|
|
42
|
+
return [];
|
|
43
|
+
if (count === 1) {
|
|
44
|
+
const job = response.job;
|
|
45
|
+
// Only expect token if locks are enabled
|
|
46
|
+
const token = config.useLocks ? (response.token ?? null) : null;
|
|
47
|
+
if (job) {
|
|
48
|
+
return [{ job: parseJobFromResponse(job, config.name), token }];
|
|
49
|
+
}
|
|
50
|
+
return [];
|
|
51
|
+
}
|
|
52
|
+
const jobs = response.jobs;
|
|
53
|
+
// Only expect tokens if locks are enabled
|
|
54
|
+
const tokens = config.useLocks ? (response.tokens ?? []) : [];
|
|
55
|
+
return (jobs?.map((j, i) => ({
|
|
56
|
+
job: parseJobFromResponse(j, config.name),
|
|
57
|
+
token: tokens[i] || null,
|
|
58
|
+
})) ?? []);
|
|
59
|
+
}
|
|
60
|
+
//# sourceMappingURL=workerPull.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"workerPull.js","sourceRoot":"","sources":["../../../src/client/worker/workerPull.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAG9C,OAAO,EAAE,oBAAoB,EAAE,MAAM,aAAa,CAAC;AASnD,MAAM,CAAC,KAAK,UAAU,YAAY,CAChC,MAAkB,EAClB,KAAa;IAEb,MAAM,OAAO,GAAG,gBAAgB,EAAE,CAAC;IAEnC,oDAAoD;IACpD,IAAI,MAAM,CAAC,QAAQ,EAAE,CAAC;QACpB,IAAI,KAAK,KAAK,CAAC,EAAE,CAAC;YAChB,MAAM,EAAE,GAAG,EAAE,KAAK,EAAE,GAAG,MAAM,OAAO,CAAC,YAAY,CAAC,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;YACnF,OAAO,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;QACrC,CAAC;QACD,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,GAAG,MAAM,OAAO,CAAC,iBAAiB,CACtD,MAAM,CAAC,IAAI,EACX,KAAK,EACL,MAAM,CAAC,QAAQ,EACf,CAAC,CACF,CAAC;QACF,OAAO,IAAI,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,GAAG,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC,CAAC,CAAC;IACnE,CAAC;IAED,8BAA8B;IAC9B,IAAI,KAAK,KAAK,CAAC,EAAE,CAAC;QAChB,MAAM,GAAG,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC;QAC/C,OAAO,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE,GAAG,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;IAC3C,CAAC;IACD,MAAM,IAAI,GAAG,MAAM,OAAO,CAAC,SAAS,CAAC,MAAM,CAAC,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC,CAAC;IAC5D,OAAO,IAAI,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC,EAAE,GAAG,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC;AACnD,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,OAAO,CAC3B,MAAkB,EAClB,GAAkB,EAClB,KAAa,EACb,OAAgB;IAEhB,IAAI,OAAO;QAAE,OAAO,EAAE,CAAC;IAEvB,iEAAiE;IACjE,MAAM,GAAG,GAA4B;QACnC,GAAG,EAAE,KAAK,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO;QACnC,KAAK,EAAE,MAAM,CAAC,IAAI;QAClB,OAAO,EAAE,MAAM,CAAC,WAAW;QAC3B,KAAK;KACN,CAAC;IAEF,uDAAuD;IACvD,IAAI,MAAM,CAAC,QAAQ,EAAE,CAAC;QACpB,GAAG,CAAC,KAAK,GAAG,MAAM,CAAC,QAAQ,CAAC;IAC9B,CAAC;IAED,MAAM,QAAQ,GAAG,MAAM,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;IAErC,IAAI,CAAC,QAAQ,CAAC,EAAE;QAAE,OAAO,EAAE,CAAC;IAE5B,IAAI,KAAK,KAAK,CAAC,EAAE,CAAC;QAChB,MAAM,GAAG,GAAG,QAAQ,CAAC,GAAiD,CAAC;QACvE,yCAAyC;QACzC,MAAM,KAAK,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAE,QAAQ,CAAC,KAAmC,IAAI,IAAI,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;QAC/F,IAAI,GAAG,EAAE,CAAC;YACR,OAAO,CAAC,EAAE,GAAG,EAAE,oBAAoB,CAAC,GAAG,EAAE,MAAM,CAAC,IAAI,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;QAClE,CAAC;QACD,OAAO,EAAE,CAAC;IACZ,CAAC;IAED,MAAM,IAAI,GAAG,QAAQ,CAAC,IAAkD,CAAC;IACzE,0CAA0C;IAC1C,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAE,QAAQ,CAAC,MAA+B,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;IACxF,OAAO,CACL,IAAI,EAAE,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC;QACnB,GAAG,EAAE,oBAAoB,CAAC,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC;QACzC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,IAAI,IAAI;KACzB,CAAC,CAAC,IAAI,EAAE,CACV,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* WorkerRateLimiter - Rate limiting for worker job processing
|
|
3
|
+
* BullMQ v5 compatible sliding window rate limiter
|
|
4
|
+
*/
|
|
5
|
+
import type { RateLimiterOptions } from '../types';
|
|
6
|
+
export declare class WorkerRateLimiter {
|
|
7
|
+
private readonly limiter;
|
|
8
|
+
private limiterTokens;
|
|
9
|
+
private rateLimitExpiration;
|
|
10
|
+
constructor(limiter: RateLimiterOptions | null);
|
|
11
|
+
/**
|
|
12
|
+
* Check if rate limiter allows processing another job.
|
|
13
|
+
* Returns true if we can process, false if rate limited.
|
|
14
|
+
*/
|
|
15
|
+
canProcessWithinLimit(): boolean;
|
|
16
|
+
/** Record a job completion for rate limiting. */
|
|
17
|
+
recordJobForLimiter(): void;
|
|
18
|
+
/**
|
|
19
|
+
* Get time until rate limiter allows next job (ms).
|
|
20
|
+
* Returns 0 if not rate limited.
|
|
21
|
+
*/
|
|
22
|
+
getTimeUntilNextSlot(): number;
|
|
23
|
+
/** Get rate limiter info (for debugging/monitoring). */
|
|
24
|
+
getRateLimiterInfo(): {
|
|
25
|
+
current: number;
|
|
26
|
+
max: number;
|
|
27
|
+
duration: number;
|
|
28
|
+
} | null;
|
|
29
|
+
/**
|
|
30
|
+
* Apply rate limiting (BullMQ v5 compatible).
|
|
31
|
+
* The worker will not process jobs until the rate limit expires.
|
|
32
|
+
*/
|
|
33
|
+
rateLimit(expireTimeMs: number): void;
|
|
34
|
+
/** Check if worker is currently rate limited. */
|
|
35
|
+
isRateLimited(): boolean;
|
|
36
|
+
}
|
|
37
|
+
//# sourceMappingURL=workerRateLimiter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"workerRateLimiter.d.ts","sourceRoot":"","sources":["../../../src/client/worker/workerRateLimiter.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,UAAU,CAAC;AAEnD,qBAAa,iBAAiB;IAIhB,OAAO,CAAC,QAAQ,CAAC,OAAO;IAHpC,OAAO,CAAC,aAAa,CAAgB;IACrC,OAAO,CAAC,mBAAmB,CAAK;gBAEH,OAAO,EAAE,kBAAkB,GAAG,IAAI;IAE/D;;;OAGG;IACH,qBAAqB,IAAI,OAAO;IAahC,iDAAiD;IACjD,mBAAmB,IAAI,IAAI;IAK3B;;;OAGG;IACH,oBAAoB,IAAI,MAAM;IAkB9B,wDAAwD;IACxD,kBAAkB,IAAI;QAAE,OAAO,EAAE,MAAM,CAAC;QAAC,GAAG,EAAE,MAAM,CAAC;QAAC,QAAQ,EAAE,MAAM,CAAA;KAAE,GAAG,IAAI;IAc/E;;;OAGG;IACH,SAAS,CAAC,YAAY,EAAE,MAAM,GAAG,IAAI;IAcrC,iDAAiD;IACjD,aAAa,IAAI,OAAO;CAGzB"}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* WorkerRateLimiter - Rate limiting for worker job processing
|
|
3
|
+
* BullMQ v5 compatible sliding window rate limiter
|
|
4
|
+
*/
|
|
5
|
+
export class WorkerRateLimiter {
|
|
6
|
+
limiter;
|
|
7
|
+
limiterTokens = [];
|
|
8
|
+
rateLimitExpiration = 0;
|
|
9
|
+
constructor(limiter) {
|
|
10
|
+
this.limiter = limiter;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Check if rate limiter allows processing another job.
|
|
14
|
+
* Returns true if we can process, false if rate limited.
|
|
15
|
+
*/
|
|
16
|
+
canProcessWithinLimit() {
|
|
17
|
+
if (!this.limiter)
|
|
18
|
+
return true;
|
|
19
|
+
const now = Date.now();
|
|
20
|
+
const windowStart = now - this.limiter.duration;
|
|
21
|
+
// Remove expired tokens
|
|
22
|
+
this.limiterTokens = this.limiterTokens.filter((t) => t > windowStart);
|
|
23
|
+
// Check if we have capacity
|
|
24
|
+
return this.limiterTokens.length < this.limiter.max;
|
|
25
|
+
}
|
|
26
|
+
/** Record a job completion for rate limiting. */
|
|
27
|
+
recordJobForLimiter() {
|
|
28
|
+
if (!this.limiter)
|
|
29
|
+
return;
|
|
30
|
+
this.limiterTokens.push(Date.now());
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Get time until rate limiter allows next job (ms).
|
|
34
|
+
* Returns 0 if not rate limited.
|
|
35
|
+
*/
|
|
36
|
+
getTimeUntilNextSlot() {
|
|
37
|
+
if (!this.limiter)
|
|
38
|
+
return 0;
|
|
39
|
+
const now = Date.now();
|
|
40
|
+
const windowStart = now - this.limiter.duration;
|
|
41
|
+
// Remove expired tokens
|
|
42
|
+
this.limiterTokens = this.limiterTokens.filter((t) => t > windowStart);
|
|
43
|
+
if (this.limiterTokens.length < this.limiter.max) {
|
|
44
|
+
return 0;
|
|
45
|
+
}
|
|
46
|
+
// Find oldest token and calculate when it expires
|
|
47
|
+
const oldestToken = Math.min(...this.limiterTokens);
|
|
48
|
+
return oldestToken + this.limiter.duration - now;
|
|
49
|
+
}
|
|
50
|
+
/** Get rate limiter info (for debugging/monitoring). */
|
|
51
|
+
getRateLimiterInfo() {
|
|
52
|
+
if (!this.limiter)
|
|
53
|
+
return null;
|
|
54
|
+
const now = Date.now();
|
|
55
|
+
const windowStart = now - this.limiter.duration;
|
|
56
|
+
const currentTokens = this.limiterTokens.filter((t) => t > windowStart).length;
|
|
57
|
+
return {
|
|
58
|
+
current: currentTokens,
|
|
59
|
+
max: this.limiter.max,
|
|
60
|
+
duration: this.limiter.duration,
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Apply rate limiting (BullMQ v5 compatible).
|
|
65
|
+
* The worker will not process jobs until the rate limit expires.
|
|
66
|
+
*/
|
|
67
|
+
rateLimit(expireTimeMs) {
|
|
68
|
+
if (expireTimeMs <= 0)
|
|
69
|
+
return;
|
|
70
|
+
// Fill rate limiter tokens to block processing
|
|
71
|
+
if (this.limiter) {
|
|
72
|
+
const now = Date.now();
|
|
73
|
+
for (let i = 0; i < this.limiter.max; i++) {
|
|
74
|
+
this.limiterTokens.push(now + expireTimeMs - this.limiter.duration);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
this.rateLimitExpiration = Date.now() + expireTimeMs;
|
|
78
|
+
}
|
|
79
|
+
/** Check if worker is currently rate limited. */
|
|
80
|
+
isRateLimited() {
|
|
81
|
+
return Date.now() < this.rateLimitExpiration;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
//# sourceMappingURL=workerRateLimiter.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"workerRateLimiter.js","sourceRoot":"","sources":["../../../src/client/worker/workerRateLimiter.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAIH,MAAM,OAAO,iBAAiB;IAIC;IAHrB,aAAa,GAAa,EAAE,CAAC;IAC7B,mBAAmB,GAAG,CAAC,CAAC;IAEhC,YAA6B,OAAkC;QAAlC,YAAO,GAAP,OAAO,CAA2B;IAAG,CAAC;IAEnE;;;OAGG;IACH,qBAAqB;QACnB,IAAI,CAAC,IAAI,CAAC,OAAO;YAAE,OAAO,IAAI,CAAC;QAE/B,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QACvB,MAAM,WAAW,GAAG,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC;QAEhD,wBAAwB;QACxB,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,WAAW,CAAC,CAAC;QAEvE,4BAA4B;QAC5B,OAAO,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC;IACtD,CAAC;IAED,iDAAiD;IACjD,mBAAmB;QACjB,IAAI,CAAC,IAAI,CAAC,OAAO;YAAE,OAAO;QAC1B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,CAAC;IACtC,CAAC;IAED;;;OAGG;IACH,oBAAoB;QAClB,IAAI,CAAC,IAAI,CAAC,OAAO;YAAE,OAAO,CAAC,CAAC;QAE5B,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QACvB,MAAM,WAAW,GAAG,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC;QAEhD,wBAAwB;QACxB,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,WAAW,CAAC,CAAC;QAEvE,IAAI,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC;YACjD,OAAO,CAAC,CAAC;QACX,CAAC;QAED,kDAAkD;QAClD,MAAM,WAAW,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC;QACpD,OAAO,WAAW,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,GAAG,GAAG,CAAC;IACnD,CAAC;IAED,wDAAwD;IACxD,kBAAkB;QAChB,IAAI,CAAC,IAAI,CAAC,OAAO;YAAE,OAAO,IAAI,CAAC;QAE/B,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QACvB,MAAM,WAAW,GAAG,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC;QAChD,MAAM,aAAa,GAAG,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,WAAW,CAAC,CAAC,MAAM,CAAC;QAE/E,OAAO;YACL,OAAO,EAAE,aAAa;YACtB,GAAG,EAAE,IAAI,CAAC,OAAO,CAAC,GAAG;YACrB,QAAQ,EAAE,IAAI,CAAC,OAAO,CAAC,QAAQ;SAChC,CAAC;IACJ,CAAC;IAED;;;OAGG;IACH,SAAS,CAAC,YAAoB;QAC5B,IAAI,YAAY,IAAI,CAAC;YAAE,OAAO;QAE9B,+CAA+C;QAC/C,IAAI,IAAI,CAAC,OAAO,EAAE,CAAC;YACjB,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;YACvB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC;gBAC1C,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,GAAG,GAAG,YAAY,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC;YACtE,CAAC;QACH,CAAC;QAED,IAAI,CAAC,mBAAmB,GAAG,IAAI,CAAC,GAAG,EAAE,GAAG,YAAY,CAAC;IACvD,CAAC;IAED,iDAAiD;IACjD,aAAa;QACX,OAAO,IAAI,CAAC,GAAG,EAAE,GAAG,IAAI,CAAC,mBAAmB,CAAC;IAC/C,CAAC;CACF"}
|
|
@@ -8,6 +8,8 @@
|
|
|
8
8
|
* - LimiterManager: rate limiting + concurrency
|
|
9
9
|
* - DependencyTracker: job dependency tracking
|
|
10
10
|
* - TemporalManager: temporal index + delayed job tracking
|
|
11
|
+
* - WaiterManager: job availability notifications
|
|
12
|
+
* - ShardCounters: running counters for O(1) stats
|
|
11
13
|
*/
|
|
12
14
|
import type { Job, JobId } from '../types/job';
|
|
13
15
|
import type { QueueState } from '../types/queue';
|
|
@@ -16,15 +18,8 @@ import { FailureReason } from '../types/dlq';
|
|
|
16
18
|
import type { StallConfig } from '../types/stall';
|
|
17
19
|
import type { UniqueKeyEntry } from '../types/deduplication';
|
|
18
20
|
import { IndexedPriorityQueue } from './priorityQueue';
|
|
19
|
-
|
|
20
|
-
export
|
|
21
|
-
/** Total jobs in all queues (waiting + delayed) */
|
|
22
|
-
queuedJobs: number;
|
|
23
|
-
/** Jobs with runAt > now at time of push */
|
|
24
|
-
delayedJobs: number;
|
|
25
|
-
/** Total jobs in DLQ */
|
|
26
|
-
dlqJobs: number;
|
|
27
|
-
}
|
|
21
|
+
import { type ShardStats } from './shardCounters';
|
|
22
|
+
export type { ShardStats } from './shardCounters';
|
|
28
23
|
/**
|
|
29
24
|
* Shard contains:
|
|
30
25
|
* - Queues (waiting + delayed jobs)
|
|
@@ -46,22 +41,14 @@ export declare class Shard {
|
|
|
46
41
|
private readonly dependencyTracker;
|
|
47
42
|
/** Temporal manager for index and delayed jobs */
|
|
48
43
|
private readonly temporalManager;
|
|
49
|
-
/**
|
|
50
|
-
private readonly
|
|
44
|
+
/** Waiter manager for job availability notifications */
|
|
45
|
+
private readonly waiterManager;
|
|
46
|
+
/** Running counters for O(1) stats */
|
|
47
|
+
private readonly counters;
|
|
51
48
|
/** Active FIFO groups per queue */
|
|
52
49
|
readonly activeGroups: Map<string, Set<string>>;
|
|
53
|
-
/** Waiter entry with cancellation flag for O(1) cleanup */
|
|
54
|
-
private readonly waiters;
|
|
55
|
-
/** Threshold for triggering full waiters cleanup */
|
|
56
|
-
private static readonly WAITERS_CLEANUP_THRESHOLD;
|
|
57
|
-
/** Pending notification flag - set when notify() is called with no waiters */
|
|
58
|
-
private pendingNotification;
|
|
59
50
|
constructor();
|
|
60
|
-
/** Notify that jobs are available - wakes first non-cancelled waiter */
|
|
61
51
|
notify(): void;
|
|
62
|
-
/** Remove all cancelled waiters from the array */
|
|
63
|
-
private cleanupWaiters;
|
|
64
|
-
/** Wait for a job to become available (with timeout) */
|
|
65
52
|
waitForJob(timeoutMs: number): Promise<void>;
|
|
66
53
|
getQueue(name: string): IndexedPriorityQueue;
|
|
67
54
|
getState(name: string): QueueState;
|
|
@@ -87,7 +74,6 @@ export declare class Shard {
|
|
|
87
74
|
tryAcquireConcurrency(queue: string): boolean;
|
|
88
75
|
releaseConcurrency(queue: string): void;
|
|
89
76
|
get queueState(): Map<string, QueueState>;
|
|
90
|
-
/** Clear limiter data for a queue (rate limits, concurrency) */
|
|
91
77
|
clearQueueLimiters(queue: string): void;
|
|
92
78
|
releaseJobResources(queue: string, uniqueKey: string | null, groupId: string | null): void;
|
|
93
79
|
get waitingDeps(): Map<JobId, Job>;
|
|
@@ -104,7 +90,6 @@ export declare class Shard {
|
|
|
104
90
|
getStallConfig(queue: string): StallConfig;
|
|
105
91
|
setStallConfig(queue: string, config: Partial<StallConfig>): void;
|
|
106
92
|
addToDlq(job: Job, reason?: FailureReason, error?: string | null): DlqEntry;
|
|
107
|
-
/** Restore an existing DlqEntry (for recovery from persistence) */
|
|
108
93
|
restoreDlqEntry(queue: string, entry: DlqEntry): void;
|
|
109
94
|
getDlqEntries(queue: string): DlqEntry[];
|
|
110
95
|
getDlq(queue: string, count?: number): Job[];
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../src/domain/queue/shard.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../src/domain/queue/shard.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,KAAK,EAAE,GAAG,EAAE,KAAK,EAAE,MAAM,cAAc,CAAC;AAC/C,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,gBAAgB,CAAC;AACjD,OAAO,KAAK,EAAE,QAAQ,EAAE,SAAS,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AACnE,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAC7C,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAClD,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,wBAAwB,CAAC;AAC7D,OAAO,EAAE,oBAAoB,EAAE,MAAM,iBAAiB,CAAC;AAOvD,OAAO,EAAiB,KAAK,UAAU,EAAE,MAAM,iBAAiB,CAAC;AAGjE,YAAY,EAAE,UAAU,EAAE,MAAM,iBAAiB,CAAC;AAElD;;;;;;;GAOG;AACH,qBAAa,KAAK;IAChB,oCAAoC;IACpC,QAAQ,CAAC,MAAM,oCAA2C;IAE1D,2CAA2C;IAC3C,OAAO,CAAC,QAAQ,CAAC,gBAAgB,CAA0B;IAE3D,kBAAkB;IAClB,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAW;IAEtC,mDAAmD;IACnD,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAwB;IAEvD,yBAAyB;IACzB,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAA2B;IAE7D,kDAAkD;IAClD,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAyB;IAEzD,wDAAwD;IACxD,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAuB;IAErD,sCAAsC;IACtC,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAgB;IAEzC,mCAAmC;IACnC,QAAQ,CAAC,YAAY,2BAAkC;;IAgBvD,MAAM,IAAI,IAAI;IAId,UAAU,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAM5C,QAAQ,CAAC,IAAI,EAAE,MAAM,GAAG,oBAAoB;IAS5C,QAAQ,CAAC,IAAI,EAAE,MAAM,GAAG,UAAU;IAIlC,QAAQ,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO;IAI/B,KAAK,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAIzB,MAAM,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAO1B,iBAAiB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,OAAO;IAItD,iBAAiB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,cAAc,GAAG,IAAI;IAIpE,iBAAiB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,GAAG,IAAI;IAIjE,wBAAwB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,EAAE,GAAG,CAAC,EAAE,MAAM,GAAG,IAAI;IAItF,kBAAkB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,OAAO;IAIpE,gBAAgB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,IAAI;IAIlD,sBAAsB,IAAI,MAAM;IAIhC,IAAI,UAAU,IAAI,GAAG,CAAC,MAAM,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC,CAEzD;IAID,aAAa,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,OAAO;IAItD,aAAa,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,IAAI;IASnD,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,IAAI;IAMlD,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,GAAG,IAAI;IAIhD,cAAc,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAInC,mBAAmB,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO;IAI3C,cAAc,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,GAAG,IAAI;IAIlD,gBAAgB,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAIrC,qBAAqB,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO;IAI7C,kBAAkB,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAIvC,IAAI,UAAU,IAAI,GAAG,CAAC,MAAM,EAAE,UAAU,CAAC,CAExC;IAED,kBAAkB,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAMvC,mBAAmB,CAAC,KAAK,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,IAAI,EAAE,OAAO,EAAE,MAAM,GAAG,IAAI,GAAG,IAAI;IAQ1F,IAAI,WAAW,IAAI,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,CAEjC;IAED,IAAI,eAAe,IAAI,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAE5C;IAED,IAAI,eAAe,IAAI,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,CAErC;IAED,oBAAoB,CAAC,KAAK,EAAE,KAAK,EAAE,SAAS,EAAE,KAAK,EAAE,GAAG,IAAI;IAI5D,sBAAsB,CAAC,KAAK,EAAE,KAAK,EAAE,SAAS,EAAE,KAAK,EAAE,GAAG,IAAI;IAI9D,iBAAiB,CAAC,KAAK,EAAE,KAAK,GAAG,GAAG,CAAC,KAAK,CAAC,GAAG,SAAS;IAMvD,IAAI,GAAG,IAAI,GAAG,CAAC,MAAM,EAAE,QAAQ,EAAE,CAAC,CASjC;IAED,IAAI,SAAS,IAAI,GAAG,CAAC,MAAM,EAAE,SAAS,CAAC,CAMtC;IAED,IAAI,WAAW,IAAI,GAAG,CAAC,MAAM,EAAE,WAAW,CAAC,CAM1C;IAED,YAAY,CAAC,KAAK,EAAE,MAAM,GAAG,SAAS;IAItC,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,OAAO,CAAC,SAAS,CAAC,GAAG,IAAI;IAI7D,cAAc,CAAC,KAAK,EAAE,MAAM,GAAG,WAAW;IAI1C,cAAc,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,OAAO,CAAC,WAAW,CAAC,GAAG,IAAI;IAIjE,QAAQ,CACN,GAAG,EAAE,GAAG,EACR,MAAM,GAAE,aAAqC,EAC7C,KAAK,GAAE,MAAM,GAAG,IAAW,GAC1B,QAAQ;IAIX,eAAe,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,QAAQ,GAAG,IAAI;IAIrD,aAAa,CAAC,KAAK,EAAE,MAAM,GAAG,QAAQ,EAAE;IAIxC,MAAM,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAI5C,cAAc,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,SAAS,GAAG,QAAQ,EAAE;IAI5D,aAAa,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,GAAG,QAAQ,GAAG,IAAI;IAI3D,mBAAmB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,GAAE,MAAmB,GAAG,QAAQ,EAAE;IAIxE,iBAAiB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,GAAE,MAAmB,GAAG,QAAQ,EAAE;IAItE,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,GAAE,MAAmB,GAAG,MAAM;IAI7D,QAAQ,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM;IAM/B,eAAe,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM;IAItC,WAAW,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM;IAIlC,aAAa,IAAI,MAAM,EAAE;IAQzB,oBAAoB,CAAC,KAAK,EAAE,MAAM,GAAG,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC;IAaxD,QAAQ,IAAI,UAAU;IAItB,gBAAgB,IAAI;QAClB,aAAa,EAAE,MAAM,CAAC;QACtB,WAAW,EAAE,MAAM,CAAC;QACpB,YAAY,EAAE,MAAM,CAAC;QACrB,aAAa,EAAE,MAAM,CAAC;QACtB,OAAO,EAAE,MAAM,CAAC;KACjB;IAKD,eAAe,CACb,KAAK,EAAE,KAAK,EACZ,SAAS,EAAE,OAAO,EAClB,SAAS,CAAC,EAAE,MAAM,EAClB,KAAK,CAAC,EAAE,MAAM,EACd,KAAK,CAAC,EAAE,MAAM,GACb,IAAI;IAIP,eAAe,CAAC,KAAK,EAAE,KAAK,GAAG,IAAI;IAInC,YAAY,IAAI,IAAI;IAIpB,YAAY,CAAC,KAAK,GAAE,MAAU,GAAG,IAAI;IAIrC,mBAAmB,CAAC,GAAG,EAAE,MAAM,GAAG,IAAI;IAItC,mBAAmB,IAAI,IAAI;IAI3B,eAAe,IAAI,IAAI;IAMvB,UAAU,CACR,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,MAAM,GACZ,KAAK,CAAC;QAAE,KAAK,EAAE,KAAK,CAAC;QAAC,SAAS,EAAE,MAAM,CAAA;KAAE,CAAC;IAI7C,uBAAuB,CAAC,KAAK,EAAE,KAAK,GAAG,IAAI;IAI3C,0BAA0B,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAI/C,4BAA4B,IAAI,MAAM;IActC,KAAK,CAAC,KAAK,EAAE,MAAM,GAAG;QAAE,KAAK,EAAE,MAAM,CAAC;QAAC,MAAM,EAAE,KAAK,EAAE,CAAA;KAAE;IAiBxD,UAAU,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;CAsBhC"}
|
|
@@ -8,6 +8,8 @@
|
|
|
8
8
|
* - LimiterManager: rate limiting + concurrency
|
|
9
9
|
* - DependencyTracker: job dependency tracking
|
|
10
10
|
* - TemporalManager: temporal index + delayed job tracking
|
|
11
|
+
* - WaiterManager: job availability notifications
|
|
12
|
+
* - ShardCounters: running counters for O(1) stats
|
|
11
13
|
*/
|
|
12
14
|
import { IndexedPriorityQueue } from './priorityQueue';
|
|
13
15
|
import { UniqueKeyManager } from './uniqueKeyManager';
|
|
@@ -15,6 +17,8 @@ import { DlqShard } from './dlqShard';
|
|
|
15
17
|
import { LimiterManager } from './limiterManager';
|
|
16
18
|
import { DependencyTracker } from './dependencyTracker';
|
|
17
19
|
import { TemporalManager } from './temporalManager';
|
|
20
|
+
import { WaiterManager } from './waiterManager';
|
|
21
|
+
import { ShardCounters } from './shardCounters';
|
|
18
22
|
/**
|
|
19
23
|
* Shard contains:
|
|
20
24
|
* - Queues (waiting + delayed jobs)
|
|
@@ -36,76 +40,29 @@ export class Shard {
|
|
|
36
40
|
dependencyTracker = new DependencyTracker();
|
|
37
41
|
/** Temporal manager for index and delayed jobs */
|
|
38
42
|
temporalManager = new TemporalManager();
|
|
39
|
-
/**
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
dlqJobs: 0,
|
|
44
|
-
};
|
|
43
|
+
/** Waiter manager for job availability notifications */
|
|
44
|
+
waiterManager = new WaiterManager();
|
|
45
|
+
/** Running counters for O(1) stats */
|
|
46
|
+
counters;
|
|
45
47
|
/** Active FIFO groups per queue */
|
|
46
48
|
activeGroups = new Map();
|
|
47
|
-
/** Waiter entry with cancellation flag for O(1) cleanup */
|
|
48
|
-
waiters = [];
|
|
49
|
-
/** Threshold for triggering full waiters cleanup */
|
|
50
|
-
static WAITERS_CLEANUP_THRESHOLD = 1000;
|
|
51
|
-
/** Pending notification flag - set when notify() is called with no waiters */
|
|
52
|
-
pendingNotification = false;
|
|
53
49
|
constructor() {
|
|
50
|
+
this.counters = new ShardCounters(this.temporalManager);
|
|
54
51
|
this.dlqManager = new DlqShard({
|
|
55
52
|
incrementDlq: () => {
|
|
56
|
-
this.incrementDlq();
|
|
53
|
+
this.counters.incrementDlq();
|
|
57
54
|
},
|
|
58
55
|
decrementDlq: (count) => {
|
|
59
|
-
this.decrementDlq(count);
|
|
56
|
+
this.counters.decrementDlq(count);
|
|
60
57
|
},
|
|
61
58
|
});
|
|
62
59
|
}
|
|
63
|
-
|
|
60
|
+
// ============ Waiter Management (delegated) ============
|
|
64
61
|
notify() {
|
|
65
|
-
|
|
66
|
-
while (this.waiters.length > 0 && this.waiters[0].cancelled) {
|
|
67
|
-
this.waiters.shift();
|
|
68
|
-
}
|
|
69
|
-
// Wake the first active waiter
|
|
70
|
-
const waiter = this.waiters.shift();
|
|
71
|
-
if (waiter && !waiter.cancelled) {
|
|
72
|
-
waiter.resolve();
|
|
73
|
-
}
|
|
74
|
-
else {
|
|
75
|
-
// No active waiter - set pending flag so next waitForJob returns immediately
|
|
76
|
-
this.pendingNotification = true;
|
|
77
|
-
}
|
|
78
|
-
// Periodic full cleanup when array grows too large
|
|
79
|
-
if (this.waiters.length > Shard.WAITERS_CLEANUP_THRESHOLD) {
|
|
80
|
-
this.cleanupWaiters();
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
/** Remove all cancelled waiters from the array */
|
|
84
|
-
cleanupWaiters() {
|
|
85
|
-
const active = this.waiters.filter((w) => !w.cancelled);
|
|
86
|
-
this.waiters.length = 0;
|
|
87
|
-
this.waiters.push(...active);
|
|
62
|
+
this.waiterManager.notify();
|
|
88
63
|
}
|
|
89
|
-
/** Wait for a job to become available (with timeout) */
|
|
90
64
|
waitForJob(timeoutMs) {
|
|
91
|
-
|
|
92
|
-
return Promise.resolve();
|
|
93
|
-
// Check for pending notification - if set, clear it and return immediately
|
|
94
|
-
if (this.pendingNotification) {
|
|
95
|
-
this.pendingNotification = false;
|
|
96
|
-
return Promise.resolve();
|
|
97
|
-
}
|
|
98
|
-
return new Promise((resolve) => {
|
|
99
|
-
const waiter = { resolve, cancelled: false };
|
|
100
|
-
const cleanup = () => {
|
|
101
|
-
if (waiter.cancelled)
|
|
102
|
-
return;
|
|
103
|
-
waiter.cancelled = true;
|
|
104
|
-
resolve();
|
|
105
|
-
};
|
|
106
|
-
this.waiters.push(waiter);
|
|
107
|
-
setTimeout(cleanup, timeoutMs);
|
|
108
|
-
});
|
|
65
|
+
return this.waiterManager.waitForJob(timeoutMs);
|
|
109
66
|
}
|
|
110
67
|
// ============ Queue Operations ============
|
|
111
68
|
getQueue(name) {
|
|
@@ -127,7 +84,7 @@ export class Shard {
|
|
|
127
84
|
}
|
|
128
85
|
resume(name) {
|
|
129
86
|
this.limiterManager.resume(name);
|
|
130
|
-
this.notify();
|
|
87
|
+
this.waiterManager.notify();
|
|
131
88
|
}
|
|
132
89
|
// ============ Unique Key Management (delegated) ============
|
|
133
90
|
isUniqueAvailable(queue, key) {
|
|
@@ -194,7 +151,6 @@ export class Shard {
|
|
|
194
151
|
get queueState() {
|
|
195
152
|
return this.limiterManager.getStateMap();
|
|
196
153
|
}
|
|
197
|
-
/** Clear limiter data for a queue (rate limits, concurrency) */
|
|
198
154
|
clearQueueLimiters(queue) {
|
|
199
155
|
this.limiterManager.deleteQueue(queue);
|
|
200
156
|
}
|
|
@@ -266,7 +222,6 @@ export class Shard {
|
|
|
266
222
|
addToDlq(job, reason = "unknown" /* FailureReason.Unknown */, error = null) {
|
|
267
223
|
return this.dlqManager.add(job, reason, error);
|
|
268
224
|
}
|
|
269
|
-
/** Restore an existing DlqEntry (for recovery from persistence) */
|
|
270
225
|
restoreDlqEntry(queue, entry) {
|
|
271
226
|
this.dlqManager.restoreEntry(queue, entry);
|
|
272
227
|
}
|
|
@@ -322,49 +277,34 @@ export class Shard {
|
|
|
322
277
|
}
|
|
323
278
|
return counts;
|
|
324
279
|
}
|
|
325
|
-
// ============ Running Counters (
|
|
280
|
+
// ============ Running Counters (delegated) ============
|
|
326
281
|
getStats() {
|
|
327
|
-
return
|
|
282
|
+
return this.counters.getStats();
|
|
328
283
|
}
|
|
329
284
|
getInternalSizes() {
|
|
330
285
|
const sizes = this.temporalManager.getSizes();
|
|
331
|
-
return { ...sizes, waiters: this.
|
|
286
|
+
return { ...sizes, waiters: this.waiterManager.length };
|
|
332
287
|
}
|
|
333
288
|
incrementQueued(jobId, isDelayed, createdAt, queue, runAt) {
|
|
334
|
-
this.
|
|
335
|
-
if (isDelayed) {
|
|
336
|
-
this.stats.delayedJobs++;
|
|
337
|
-
if (runAt !== undefined) {
|
|
338
|
-
this.temporalManager.addDelayed(jobId, runAt);
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
if (createdAt !== undefined && queue !== undefined) {
|
|
342
|
-
this.temporalManager.addToIndex(createdAt, jobId, queue);
|
|
343
|
-
}
|
|
289
|
+
this.counters.incrementQueued(jobId, isDelayed, createdAt, queue, runAt);
|
|
344
290
|
}
|
|
345
291
|
decrementQueued(jobId) {
|
|
346
|
-
this.
|
|
347
|
-
if (this.temporalManager.removeDelayed(jobId)) {
|
|
348
|
-
this.stats.delayedJobs = Math.max(0, this.stats.delayedJobs - 1);
|
|
349
|
-
}
|
|
292
|
+
this.counters.decrementQueued(jobId);
|
|
350
293
|
}
|
|
351
294
|
incrementDlq() {
|
|
352
|
-
this.
|
|
295
|
+
this.counters.incrementDlq();
|
|
353
296
|
}
|
|
354
297
|
decrementDlq(count = 1) {
|
|
355
|
-
this.
|
|
298
|
+
this.counters.decrementDlq(count);
|
|
356
299
|
}
|
|
357
300
|
refreshDelayedCount(now) {
|
|
358
|
-
|
|
359
|
-
this.stats.delayedJobs = Math.max(0, this.stats.delayedJobs - readyCount);
|
|
301
|
+
this.counters.refreshDelayedCount(now);
|
|
360
302
|
}
|
|
361
303
|
resetQueuedCounters() {
|
|
362
|
-
this.
|
|
363
|
-
this.stats.delayedJobs = 0;
|
|
364
|
-
this.temporalManager.clearDelayed();
|
|
304
|
+
this.counters.resetQueuedCounters();
|
|
365
305
|
}
|
|
366
306
|
resetDlqCounter() {
|
|
367
|
-
this.
|
|
307
|
+
this.counters.resetDlqCounter();
|
|
368
308
|
}
|
|
369
309
|
// ============ Temporal Index (delegated) ============
|
|
370
310
|
getOldJobs(queue, thresholdMs, limit) {
|
|
@@ -400,8 +340,8 @@ export class Shard {
|
|
|
400
340
|
}
|
|
401
341
|
q.clear();
|
|
402
342
|
this.temporalManager.clearIndexForQueue(queue);
|
|
403
|
-
this.
|
|
404
|
-
this.
|
|
343
|
+
this.counters.adjustQueued(-count);
|
|
344
|
+
this.counters.syncDelayedCount();
|
|
405
345
|
return { count, jobIds };
|
|
406
346
|
}
|
|
407
347
|
obliterate(queue) {
|
|
@@ -410,13 +350,13 @@ export class Shard {
|
|
|
410
350
|
for (const job of q.values()) {
|
|
411
351
|
this.temporalManager.removeDelayed(job.id);
|
|
412
352
|
}
|
|
413
|
-
this.
|
|
353
|
+
this.counters.adjustQueued(-q.size);
|
|
414
354
|
}
|
|
415
355
|
const dlqCount = this.dlqManager.deleteQueue(queue);
|
|
416
356
|
if (dlqCount > 0) {
|
|
417
|
-
this.
|
|
357
|
+
this.counters.adjustDlq(-dlqCount);
|
|
418
358
|
}
|
|
419
|
-
this.
|
|
359
|
+
this.counters.syncDelayedCount();
|
|
420
360
|
this.temporalManager.clearIndexForQueue(queue);
|
|
421
361
|
this.queues.delete(queue);
|
|
422
362
|
this.uniqueKeyManager.clearQueue(queue);
|