adaptive-concurrency 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Limit.d.ts +29 -0
- package/dist/Limit.d.ts.map +1 -0
- package/dist/Limit.js +1 -0
- package/dist/LimitAllotment.d.ts +23 -0
- package/dist/LimitAllotment.d.ts.map +1 -0
- package/dist/LimitAllotment.js +1 -0
- package/dist/Limiter.d.ts +175 -0
- package/dist/Limiter.d.ts.map +1 -0
- package/dist/Limiter.js +240 -0
- package/dist/Listener.d.ts +23 -0
- package/dist/Listener.d.ts.map +1 -0
- package/dist/Listener.js +1 -0
- package/dist/ListenerSet.d.ts +12 -0
- package/dist/ListenerSet.d.ts.map +1 -0
- package/dist/ListenerSet.js +35 -0
- package/dist/MetricIds.d.ts +13 -0
- package/dist/MetricIds.d.ts.map +1 -0
- package/dist/MetricIds.js +12 -0
- package/dist/MetricRegistry.d.ts +66 -0
- package/dist/MetricRegistry.d.ts.map +1 -0
- package/dist/MetricRegistry.js +30 -0
- package/dist/RunResult.d.ts +33 -0
- package/dist/RunResult.d.ts.map +1 -0
- package/dist/RunResult.js +35 -0
- package/dist/StreamingLimit.d.ts +26 -0
- package/dist/StreamingLimit.d.ts.map +1 -0
- package/dist/StreamingLimit.js +1 -0
- package/dist/executors/AdaptiveExecutor.d.ts +50 -0
- package/dist/executors/AdaptiveExecutor.d.ts.map +1 -0
- package/dist/executors/AdaptiveExecutor.js +80 -0
- package/dist/index.d.ts +27 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +28 -0
- package/dist/limit/AIMDLimit.d.ts +37 -0
- package/dist/limit/AIMDLimit.d.ts.map +1 -0
- package/dist/limit/AIMDLimit.js +49 -0
- package/dist/limit/FixedLimit.d.ts +15 -0
- package/dist/limit/FixedLimit.d.ts.map +1 -0
- package/dist/limit/FixedLimit.js +23 -0
- package/dist/limit/Gradient2Limit.d.ts +122 -0
- package/dist/limit/Gradient2Limit.d.ts.map +1 -0
- package/dist/limit/Gradient2Limit.js +107 -0
- package/dist/limit/GradientLimit.d.ts +122 -0
- package/dist/limit/GradientLimit.d.ts.map +1 -0
- package/dist/limit/GradientLimit.js +108 -0
- package/dist/limit/SettableLimit.d.ts +18 -0
- package/dist/limit/SettableLimit.d.ts.map +1 -0
- package/dist/limit/SettableLimit.js +30 -0
- package/dist/limit/StreamingLimit.d.ts +26 -0
- package/dist/limit/StreamingLimit.d.ts.map +1 -0
- package/dist/limit/StreamingLimit.js +1 -0
- package/dist/limit/TracingLimitDecorator.d.ts +16 -0
- package/dist/limit/TracingLimitDecorator.d.ts.map +1 -0
- package/dist/limit/TracingLimitDecorator.js +23 -0
- package/dist/limit/VegasLimit.d.ts +85 -0
- package/dist/limit/VegasLimit.d.ts.map +1 -0
- package/dist/limit/VegasLimit.js +127 -0
- package/dist/limit/WindowedLimit.d.ts +48 -0
- package/dist/limit/WindowedLimit.d.ts.map +1 -0
- package/dist/limit/WindowedLimit.js +67 -0
- package/dist/limit/statistics/ExpMovingAverage.d.ts +21 -0
- package/dist/limit/statistics/ExpMovingAverage.d.ts.map +1 -0
- package/dist/limit/statistics/ExpMovingAverage.js +43 -0
- package/dist/limit/statistics/Minimum.d.ts +12 -0
- package/dist/limit/statistics/Minimum.d.ts.map +1 -0
- package/dist/limit/statistics/Minimum.js +22 -0
- package/dist/limit/statistics/MinimumValue.d.ts +12 -0
- package/dist/limit/statistics/MinimumValue.d.ts.map +1 -0
- package/dist/limit/statistics/MinimumValue.js +22 -0
- package/dist/limit/statistics/SingleMeasurement.d.ts +12 -0
- package/dist/limit/statistics/SingleMeasurement.d.ts.map +1 -0
- package/dist/limit/statistics/SingleMeasurement.js +21 -0
- package/dist/limit/statistics/StreamingStatistic.d.ts +29 -0
- package/dist/limit/statistics/StreamingStatistic.d.ts.map +1 -0
- package/dist/limit/statistics/StreamingStatistic.js +1 -0
- package/dist/limit/utils/index.d.ts +10 -0
- package/dist/limit/utils/index.d.ts.map +1 -0
- package/dist/limit/utils/index.js +19 -0
- package/dist/limit/window/AverageSampleWindow.d.ts +4 -0
- package/dist/limit/window/AverageSampleWindow.d.ts.map +1 -0
- package/dist/limit/window/AverageSampleWindow.js +46 -0
- package/dist/limit/window/PercentileSampleWindow.d.ts +38 -0
- package/dist/limit/window/PercentileSampleWindow.d.ts.map +1 -0
- package/dist/limit/window/PercentileSampleWindow.js +81 -0
- package/dist/limit/window/SampleWindow.d.ts +30 -0
- package/dist/limit/window/SampleWindow.d.ts.map +1 -0
- package/dist/limit/window/SampleWindow.js +1 -0
- package/dist/limiter/AbstractLimiter.d.ts +48 -0
- package/dist/limiter/AbstractLimiter.d.ts.map +1 -0
- package/dist/limiter/AbstractLimiter.js +78 -0
- package/dist/limiter/AbstractPartitionedLimiter.d.ts +66 -0
- package/dist/limiter/AbstractPartitionedLimiter.d.ts.map +1 -0
- package/dist/limiter/AbstractPartitionedLimiter.js +209 -0
- package/dist/limiter/BlockingLimiter.d.ts +55 -0
- package/dist/limiter/BlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/BlockingLimiter.js +111 -0
- package/dist/limiter/DelayedRejectStrategy.d.ts +32 -0
- package/dist/limiter/DelayedRejectStrategy.d.ts.map +1 -0
- package/dist/limiter/DelayedRejectStrategy.js +60 -0
- package/dist/limiter/DelayedThenBlockingRejection.d.ts +19 -0
- package/dist/limiter/DelayedThenBlockingRejection.d.ts.map +1 -0
- package/dist/limiter/DelayedThenBlockingRejection.js +26 -0
- package/dist/limiter/FifoBlockingRejection.d.ts +26 -0
- package/dist/limiter/FifoBlockingRejection.d.ts.map +1 -0
- package/dist/limiter/FifoBlockingRejection.js +77 -0
- package/dist/limiter/LifoBlockingLimiter.d.ts +53 -0
- package/dist/limiter/LifoBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/LifoBlockingLimiter.js +108 -0
- package/dist/limiter/LifoBlockingRejection.d.ts +31 -0
- package/dist/limiter/LifoBlockingRejection.d.ts.map +1 -0
- package/dist/limiter/LifoBlockingRejection.js +63 -0
- package/dist/limiter/PartitionedStrategy.d.ts +90 -0
- package/dist/limiter/PartitionedStrategy.d.ts.map +1 -0
- package/dist/limiter/PartitionedStrategy.js +183 -0
- package/dist/limiter/SimpleLimiter.d.ts +31 -0
- package/dist/limiter/SimpleLimiter.d.ts.map +1 -0
- package/dist/limiter/SimpleLimiter.js +119 -0
- package/dist/limiter/factories/index.d.ts +7 -0
- package/dist/limiter/factories/index.d.ts.map +1 -0
- package/dist/limiter/factories/index.js +6 -0
- package/dist/limiter/factories/makeBlockingLimiter.d.ts +6 -0
- package/dist/limiter/factories/makeBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makeBlockingLimiter.js +8 -0
- package/dist/limiter/factories/makeLifoBlockingLimiter.d.ts +8 -0
- package/dist/limiter/factories/makeLifoBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makeLifoBlockingLimiter.js +15 -0
- package/dist/limiter/factories/makePartitionedBlockingLimiter.d.ts +12 -0
- package/dist/limiter/factories/makePartitionedBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makePartitionedBlockingLimiter.js +35 -0
- package/dist/limiter/factories/makePartitionedLifoBlockingLimiter.d.ts +14 -0
- package/dist/limiter/factories/makePartitionedLifoBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makePartitionedLifoBlockingLimiter.js +38 -0
- package/dist/limiter/factories/makePartitionedLimiter.d.ts +11 -0
- package/dist/limiter/factories/makePartitionedLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makePartitionedLimiter.js +30 -0
- package/dist/limiter/factories/makeSimpleLimiter.d.ts +3 -0
- package/dist/limiter/factories/makeSimpleLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makeSimpleLimiter.js +9 -0
- package/dist/limiter/factories.d.ts +31 -0
- package/dist/limiter/factories.d.ts.map +1 -0
- package/dist/limiter/factories.js +74 -0
- package/dist/statistics/ExpMovingAverage.d.ts +21 -0
- package/dist/statistics/ExpMovingAverage.d.ts.map +1 -0
- package/dist/statistics/ExpMovingAverage.js +43 -0
- package/dist/statistics/MinimumValue.d.ts +12 -0
- package/dist/statistics/MinimumValue.d.ts.map +1 -0
- package/dist/statistics/MinimumValue.js +22 -0
- package/dist/statistics/MostRecentValue.d.ts +12 -0
- package/dist/statistics/MostRecentValue.d.ts.map +1 -0
- package/dist/statistics/MostRecentValue.js +21 -0
- package/dist/statistics/StreamingStatistic.d.ts +29 -0
- package/dist/statistics/StreamingStatistic.d.ts.map +1 -0
- package/dist/statistics/StreamingStatistic.js +1 -0
- package/dist/utils/index.d.ts +10 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +19 -0
- package/package.json +31 -0
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Limiter that blocks the caller when the limit has been reached. The caller
|
|
3
|
+
* is blocked (via a promise) until the limiter has been released, or a timeout
|
|
4
|
+
* is reached. This limiter is commonly used in batch clients that use the
|
|
5
|
+
* limiter as a back-pressure mechanism.
|
|
6
|
+
*
|
|
7
|
+
* Because JavaScript is single-threaded, "blocking" here means awaiting a
|
|
8
|
+
* promise that resolves when a token becomes available.
|
|
9
|
+
*/
|
|
10
|
+
export class BlockingLimiter {
|
|
11
|
+
static MAX_TIMEOUT = 60 * 60 * 1000; // 1 hour in ms
|
|
12
|
+
static wrap(delegate, timeout) {
|
|
13
|
+
const t = timeout ?? BlockingLimiter.MAX_TIMEOUT;
|
|
14
|
+
if (t > BlockingLimiter.MAX_TIMEOUT) {
|
|
15
|
+
throw new Error(`Timeout cannot be greater than ${BlockingLimiter.MAX_TIMEOUT} ms`);
|
|
16
|
+
}
|
|
17
|
+
return new BlockingLimiter(delegate, t);
|
|
18
|
+
}
|
|
19
|
+
delegate;
|
|
20
|
+
timeout;
|
|
21
|
+
/** Queue of waiters blocked until a token is released */
|
|
22
|
+
waiters = [];
|
|
23
|
+
constructor(delegate, timeout) {
|
|
24
|
+
this.delegate = delegate;
|
|
25
|
+
this.timeout = timeout;
|
|
26
|
+
}
|
|
27
|
+
/** Returns the timeout used when blocking to acquire a permit, in ms. */
|
|
28
|
+
getTimeout() {
|
|
29
|
+
return this.timeout;
|
|
30
|
+
}
|
|
31
|
+
async acquire(context, options = {}) {
|
|
32
|
+
const requestContext = context;
|
|
33
|
+
if (options.signal?.aborted) {
|
|
34
|
+
return undefined;
|
|
35
|
+
}
|
|
36
|
+
const deadline = performance.now() + this.timeout;
|
|
37
|
+
while (true) {
|
|
38
|
+
const remaining = deadline - performance.now();
|
|
39
|
+
if (remaining <= 0) {
|
|
40
|
+
return undefined;
|
|
41
|
+
}
|
|
42
|
+
// Try to acquire a token and return immediately if successful
|
|
43
|
+
const allotment = this.delegate.acquire(requestContext);
|
|
44
|
+
if (allotment) {
|
|
45
|
+
return this.wrapAllotment(allotment);
|
|
46
|
+
}
|
|
47
|
+
// We have reached the limit so block until a token is released
|
|
48
|
+
const acquired = await this.waitForRelease(remaining, options.signal);
|
|
49
|
+
if (!acquired) {
|
|
50
|
+
return undefined;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
waitForRelease(timeoutMs, signal) {
|
|
55
|
+
if (signal?.aborted) {
|
|
56
|
+
return Promise.resolve(false);
|
|
57
|
+
}
|
|
58
|
+
return new Promise((resolve) => {
|
|
59
|
+
let settled = false;
|
|
60
|
+
const settle = (acquired) => {
|
|
61
|
+
if (settled) {
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
settled = true;
|
|
65
|
+
cleanup();
|
|
66
|
+
resolve(acquired);
|
|
67
|
+
};
|
|
68
|
+
const waiter = {
|
|
69
|
+
wake: () => settle(true),
|
|
70
|
+
};
|
|
71
|
+
this.waiters.push(waiter);
|
|
72
|
+
const timer = setTimeout(() => settle(false), timeoutMs);
|
|
73
|
+
const onAbort = () => settle(false);
|
|
74
|
+
const cleanup = () => {
|
|
75
|
+
clearTimeout(timer);
|
|
76
|
+
signal?.removeEventListener("abort", onAbort);
|
|
77
|
+
const idx = this.waiters.indexOf(waiter);
|
|
78
|
+
if (idx !== -1) {
|
|
79
|
+
this.waiters.splice(idx, 1);
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
signal?.addEventListener("abort", onAbort, { once: true });
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
wrapAllotment(delegate) {
|
|
86
|
+
return {
|
|
87
|
+
reportSuccess: () => {
|
|
88
|
+
delegate.reportSuccess();
|
|
89
|
+
this.unblock();
|
|
90
|
+
},
|
|
91
|
+
reportIgnore: () => {
|
|
92
|
+
delegate.reportIgnore();
|
|
93
|
+
this.unblock();
|
|
94
|
+
},
|
|
95
|
+
reportDropped: () => {
|
|
96
|
+
delegate.reportDropped();
|
|
97
|
+
this.unblock();
|
|
98
|
+
},
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
unblock() {
|
|
102
|
+
// Wake all waiters so they can retry acquiring
|
|
103
|
+
const waiters = this.waiters.splice(0);
|
|
104
|
+
for (const waiter of waiters) {
|
|
105
|
+
waiter.wake();
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
toString() {
|
|
109
|
+
return `BlockingLimiter [${this.delegate}]`;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import type { LimitAllotment } from "../LimitAllotment.js";
|
|
2
|
+
import type { AllotmentUnavailableStrategy, AsyncAcquireResult, SyncAcquireResult } from "../Limiter.js";
|
|
3
|
+
export interface DelayedRejectStrategyOptions<ContextT> {
|
|
4
|
+
/**
|
|
5
|
+
* Milliseconds to wait before resolving with no allotment. Return 0 to reject
|
|
6
|
+
* immediately. With `PartitionedStrategy`, usually mirror `partitionResolver`
|
|
7
|
+
* here and look up delay from your own per-partition config.
|
|
8
|
+
*/
|
|
9
|
+
delayMsForContext: (context: ContextT) => number;
|
|
10
|
+
/**
|
|
11
|
+
* Maximum number of acquire attempts that may be waiting on a delay at once.
|
|
12
|
+
* When exceeded, new rejections skip the delay (immediate `undefined`).
|
|
13
|
+
*/
|
|
14
|
+
maxConcurrentDelays?: number;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* When the acquire strategy rejects, waits up to `delayMsForContext(context)`,
|
|
18
|
+
* then still returns no allotment. This adds a form of backpressure. Does not
|
|
19
|
+
* call `retry` — unlike {@link FifoBlockingRejection}, this is "delay then
|
|
20
|
+
* reject," not "wait for capacity." Compose with `PartitionedStrategy` via
|
|
21
|
+
* `Limiter.rejectionStrategy`; partition delays are not fields on `PartitionConfig`.
|
|
22
|
+
*/
|
|
23
|
+
export declare class DelayedRejectStrategy<ContextT> implements AllotmentUnavailableStrategy<ContextT, AsyncAcquireResult> {
|
|
24
|
+
private readonly delayMsForContext;
|
|
25
|
+
private readonly maxConcurrentDelays;
|
|
26
|
+
private concurrentDelays;
|
|
27
|
+
constructor(options: DelayedRejectStrategyOptions<ContextT>);
|
|
28
|
+
onAllotmentUnavailable(context: ContextT, _retry: (context: ContextT) => SyncAcquireResult, signal?: AbortSignal): Promise<LimitAllotment | undefined>;
|
|
29
|
+
onAllotmentReleased(): void;
|
|
30
|
+
private run;
|
|
31
|
+
}
|
|
32
|
+
//# sourceMappingURL=DelayedRejectStrategy.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"DelayedRejectStrategy.d.ts","sourceRoot":"","sources":["../../src/limiter/DelayedRejectStrategy.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAC3D,OAAO,KAAK,EACV,4BAA4B,EAC5B,kBAAkB,EAClB,iBAAiB,EAClB,MAAM,eAAe,CAAC;AAEvB,MAAM,WAAW,4BAA4B,CAAC,QAAQ;IACpD;;;;OAIG;IACH,iBAAiB,EAAE,CAAC,OAAO,EAAE,QAAQ,KAAK,MAAM,CAAC;IAEjD;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;CAC9B;AAED;;;;;;GAMG;AACH,qBAAa,qBAAqB,CAChC,QAAQ,CACR,YAAW,4BAA4B,CAAC,QAAQ,EAAE,kBAAkB,CAAC;IACrE,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAgC;IAClE,OAAO,CAAC,QAAQ,CAAC,mBAAmB,CAAS;IAC7C,OAAO,CAAC,gBAAgB,CAAK;gBAEjB,OAAO,EAAE,4BAA4B,CAAC,QAAQ,CAAC;IAS3D,sBAAsB,CACpB,OAAO,EAAE,QAAQ,EACjB,MAAM,EAAE,CAAC,OAAO,EAAE,QAAQ,KAAK,iBAAiB,EAChD,MAAM,CAAC,EAAE,WAAW,GACnB,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAItC,mBAAmB,IAAI,IAAI;YAIb,GAAG;CA0BlB"}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* When the acquire strategy rejects, waits up to `delayMsForContext(context)`,
|
|
3
|
+
* then still returns no allotment. This adds a form of backpressure. Does not
|
|
4
|
+
* call `retry` — unlike {@link FifoBlockingRejection}, this is "delay then
|
|
5
|
+
* reject," not "wait for capacity." Compose with `PartitionedStrategy` via
|
|
6
|
+
* `Limiter.rejectionStrategy`; partition delays are not fields on `PartitionConfig`.
|
|
7
|
+
*/
|
|
8
|
+
export class DelayedRejectStrategy {
|
|
9
|
+
delayMsForContext;
|
|
10
|
+
maxConcurrentDelays;
|
|
11
|
+
concurrentDelays = 0;
|
|
12
|
+
constructor(options) {
|
|
13
|
+
this.delayMsForContext = options.delayMsForContext;
|
|
14
|
+
const max = options.maxConcurrentDelays ?? 100;
|
|
15
|
+
if (max < 1 || !Number.isFinite(max)) {
|
|
16
|
+
throw new Error("maxConcurrentDelays must be a finite number >= 1");
|
|
17
|
+
}
|
|
18
|
+
this.maxConcurrentDelays = max;
|
|
19
|
+
}
|
|
20
|
+
onAllotmentUnavailable(context, _retry, signal) {
|
|
21
|
+
return this.run(context, signal);
|
|
22
|
+
}
|
|
23
|
+
onAllotmentReleased() {
|
|
24
|
+
// No waiters (unlike FIFO/LIFO blocking).
|
|
25
|
+
}
|
|
26
|
+
async run(context, signal) {
|
|
27
|
+
if (signal?.aborted) {
|
|
28
|
+
return undefined;
|
|
29
|
+
}
|
|
30
|
+
const ms = this.delayMsForContext(context);
|
|
31
|
+
if (ms <= 0) {
|
|
32
|
+
return undefined;
|
|
33
|
+
}
|
|
34
|
+
if (this.concurrentDelays >= this.maxConcurrentDelays) {
|
|
35
|
+
return undefined;
|
|
36
|
+
}
|
|
37
|
+
this.concurrentDelays++;
|
|
38
|
+
try {
|
|
39
|
+
await sleep(ms, signal);
|
|
40
|
+
}
|
|
41
|
+
finally {
|
|
42
|
+
this.concurrentDelays--;
|
|
43
|
+
}
|
|
44
|
+
return undefined;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
function sleep(ms, signal) {
|
|
48
|
+
return new Promise((resolve) => {
|
|
49
|
+
if (signal?.aborted) {
|
|
50
|
+
resolve();
|
|
51
|
+
return;
|
|
52
|
+
}
|
|
53
|
+
const t = setTimeout(resolve, ms);
|
|
54
|
+
const onAbort = () => {
|
|
55
|
+
clearTimeout(t);
|
|
56
|
+
resolve();
|
|
57
|
+
};
|
|
58
|
+
signal?.addEventListener("abort", onAbort, { once: true });
|
|
59
|
+
});
|
|
60
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import type { LimitAllotment } from "../LimitAllotment.js";
|
|
2
|
+
import type { AllotmentUnavailableStrategy, AsyncAcquireResult, SyncAcquireResult } from "../Limiter.js";
|
|
3
|
+
import type { DelayedRejectStrategy } from "./DelayedRejectStrategy.js";
|
|
4
|
+
/**
|
|
5
|
+
* Composes "delay then reject" backoff with blocking behavior. On rejection:
|
|
6
|
+
* first applies delayed-reject semantics, then delegates to a blocking strategy
|
|
7
|
+
* (FIFO/LIFO) to await capacity.
|
|
8
|
+
*/
|
|
9
|
+
export declare class DelayedThenBlockingRejection<ContextT> implements AllotmentUnavailableStrategy<ContextT, AsyncAcquireResult> {
|
|
10
|
+
private readonly delayStrategy;
|
|
11
|
+
private readonly blockingStrategy;
|
|
12
|
+
constructor(options: {
|
|
13
|
+
delayStrategy: DelayedRejectStrategy<ContextT>;
|
|
14
|
+
blockingStrategy: AllotmentUnavailableStrategy<ContextT, AsyncAcquireResult>;
|
|
15
|
+
});
|
|
16
|
+
onAllotmentUnavailable(context: ContextT, retry: (context: ContextT) => SyncAcquireResult, signal?: AbortSignal): Promise<LimitAllotment | undefined>;
|
|
17
|
+
onAllotmentReleased(): void;
|
|
18
|
+
}
|
|
19
|
+
//# sourceMappingURL=DelayedThenBlockingRejection.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"DelayedThenBlockingRejection.d.ts","sourceRoot":"","sources":["../../src/limiter/DelayedThenBlockingRejection.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAC3D,OAAO,KAAK,EACV,4BAA4B,EAC5B,kBAAkB,EAClB,iBAAiB,EAClB,MAAM,eAAe,CAAC;AACvB,OAAO,KAAK,EAAE,qBAAqB,EAAE,MAAM,4BAA4B,CAAC;AAExE;;;;GAIG;AACH,qBAAa,4BAA4B,CACvC,QAAQ,CACR,YAAW,4BAA4B,CAAC,QAAQ,EAAE,kBAAkB,CAAC;IACrE,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAkC;IAChE,OAAO,CAAC,QAAQ,CAAC,gBAAgB,CAG/B;gBAEU,OAAO,EAAE;QACnB,aAAa,EAAE,qBAAqB,CAAC,QAAQ,CAAC,CAAC;QAC/C,gBAAgB,EAAE,4BAA4B,CAC5C,QAAQ,EACR,kBAAkB,CACnB,CAAC;KACH;IAKK,sBAAsB,CAC1B,OAAO,EAAE,QAAQ,EACjB,KAAK,EAAE,CAAC,OAAO,EAAE,QAAQ,KAAK,iBAAiB,EAC/C,MAAM,CAAC,EAAE,WAAW,GACnB,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAYtC,mBAAmB,IAAI,IAAI;CAI5B"}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Composes "delay then reject" backoff with blocking behavior. On rejection:
|
|
3
|
+
* first applies delayed-reject semantics, then delegates to a blocking strategy
|
|
4
|
+
* (FIFO/LIFO) to await capacity.
|
|
5
|
+
*/
|
|
6
|
+
export class DelayedThenBlockingRejection {
|
|
7
|
+
delayStrategy;
|
|
8
|
+
blockingStrategy;
|
|
9
|
+
constructor(options) {
|
|
10
|
+
this.delayStrategy = options.delayStrategy;
|
|
11
|
+
this.blockingStrategy = options.blockingStrategy;
|
|
12
|
+
}
|
|
13
|
+
async onAllotmentUnavailable(context, retry, signal) {
|
|
14
|
+
await this.delayStrategy.onAllotmentUnavailable(context, retry, signal);
|
|
15
|
+
if (signal?.aborted) {
|
|
16
|
+
return undefined;
|
|
17
|
+
}
|
|
18
|
+
const allotment = retry(context);
|
|
19
|
+
return (allotment ??
|
|
20
|
+
this.blockingStrategy.onAllotmentUnavailable(context, retry, signal));
|
|
21
|
+
}
|
|
22
|
+
onAllotmentReleased() {
|
|
23
|
+
this.delayStrategy.onAllotmentReleased();
|
|
24
|
+
this.blockingStrategy.onAllotmentReleased();
|
|
25
|
+
}
|
|
26
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import type { LimitAllotment } from "../LimitAllotment.js";
|
|
2
|
+
import type { AllotmentUnavailableStrategy, AsyncAcquireResult, SyncAcquireResult } from "../Limiter.js";
|
|
3
|
+
/**
|
|
4
|
+
* Rejection strategy that blocks the caller in a FIFO queue when the limit
|
|
5
|
+
* has been reached, waiting for a slot to open up. This strategy favors
|
|
6
|
+
* fairness: callers are served in the order they arrived.
|
|
7
|
+
*
|
|
8
|
+
* Because JavaScript is single-threaded, "blocking" means awaiting a promise
|
|
9
|
+
* that resolves when a token becomes available.
|
|
10
|
+
*/
|
|
11
|
+
export declare class FifoBlockingRejection<ContextT> implements AllotmentUnavailableStrategy<ContextT, AsyncAcquireResult> {
|
|
12
|
+
private readonly timeout;
|
|
13
|
+
private readonly waiters;
|
|
14
|
+
constructor(options?: {
|
|
15
|
+
/**
|
|
16
|
+
* Maximum time in milliseconds to wait for a slot to become available.
|
|
17
|
+
* Default: 3_600_000 (1 hour).
|
|
18
|
+
*/
|
|
19
|
+
timeout?: number | undefined;
|
|
20
|
+
});
|
|
21
|
+
onAllotmentUnavailable(_context: ContextT, retry: (context: ContextT) => SyncAcquireResult, signal?: AbortSignal): Promise<LimitAllotment | undefined>;
|
|
22
|
+
onAllotmentReleased(): void;
|
|
23
|
+
private acquireAsync;
|
|
24
|
+
private waitForRelease;
|
|
25
|
+
}
|
|
26
|
+
//# sourceMappingURL=FifoBlockingRejection.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"FifoBlockingRejection.d.ts","sourceRoot":"","sources":["../../src/limiter/FifoBlockingRejection.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAC3D,OAAO,KAAK,EACV,4BAA4B,EAC5B,kBAAkB,EAClB,iBAAiB,EAClB,MAAM,eAAe,CAAC;AAMvB;;;;;;;GAOG;AACH,qBAAa,qBAAqB,CAChC,QAAQ,CACR,YAAW,4BAA4B,CAAC,QAAQ,EAAE,kBAAkB,CAAC;IACrE,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAS;IACjC,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAqB;gBAG3C,OAAO,GAAE;QACP;;;WAGG;QACH,OAAO,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;KACzB;IASR,sBAAsB,CACpB,QAAQ,EAAE,QAAQ,EAClB,KAAK,EAAE,CAAC,OAAO,EAAE,QAAQ,KAAK,iBAAiB,EAC/C,MAAM,CAAC,EAAE,WAAW,GACnB,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAItC,mBAAmB,IAAI,IAAI;YAOb,YAAY;IA6B1B,OAAO,CAAC,cAAc;CAoCvB"}
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
const MAX_TIMEOUT = 60 * 60 * 1000; // 1 hour
|
|
2
|
+
/**
|
|
3
|
+
* Rejection strategy that blocks the caller in a FIFO queue when the limit
|
|
4
|
+
* has been reached, waiting for a slot to open up. This strategy favors
|
|
5
|
+
* fairness: callers are served in the order they arrived.
|
|
6
|
+
*
|
|
7
|
+
* Because JavaScript is single-threaded, "blocking" means awaiting a promise
|
|
8
|
+
* that resolves when a token becomes available.
|
|
9
|
+
*/
|
|
10
|
+
export class FifoBlockingRejection {
|
|
11
|
+
timeout;
|
|
12
|
+
waiters = [];
|
|
13
|
+
constructor(options = {}) {
|
|
14
|
+
const t = options.timeout ?? MAX_TIMEOUT;
|
|
15
|
+
if (t > MAX_TIMEOUT) {
|
|
16
|
+
throw new Error(`Timeout cannot be greater than ${MAX_TIMEOUT} ms`);
|
|
17
|
+
}
|
|
18
|
+
this.timeout = t;
|
|
19
|
+
}
|
|
20
|
+
onAllotmentUnavailable(_context, retry, signal) {
|
|
21
|
+
return this.acquireAsync(_context, retry, signal);
|
|
22
|
+
}
|
|
23
|
+
onAllotmentReleased() {
|
|
24
|
+
const waiters = this.waiters.splice(0);
|
|
25
|
+
for (const waiter of waiters) {
|
|
26
|
+
waiter();
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
async acquireAsync(context, retry, signal) {
|
|
30
|
+
const deadline = performance.now() + this.timeout;
|
|
31
|
+
while (true) {
|
|
32
|
+
const remaining = deadline - performance.now();
|
|
33
|
+
if (remaining <= 0) {
|
|
34
|
+
return undefined;
|
|
35
|
+
}
|
|
36
|
+
if (signal?.aborted) {
|
|
37
|
+
return undefined;
|
|
38
|
+
}
|
|
39
|
+
const allotment = retry(context);
|
|
40
|
+
if (allotment) {
|
|
41
|
+
return allotment;
|
|
42
|
+
}
|
|
43
|
+
const acquired = await this.waitForRelease(remaining, signal);
|
|
44
|
+
if (!acquired) {
|
|
45
|
+
return undefined;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
waitForRelease(timeoutMs, signal) {
|
|
50
|
+
if (signal?.aborted) {
|
|
51
|
+
return Promise.resolve(false);
|
|
52
|
+
}
|
|
53
|
+
return new Promise((resolve) => {
|
|
54
|
+
let settled = false;
|
|
55
|
+
const settle = (acquired) => {
|
|
56
|
+
if (settled)
|
|
57
|
+
return;
|
|
58
|
+
settled = true;
|
|
59
|
+
cleanup();
|
|
60
|
+
resolve(acquired);
|
|
61
|
+
};
|
|
62
|
+
const waiter = () => settle(true);
|
|
63
|
+
this.waiters.push(waiter);
|
|
64
|
+
const timer = setTimeout(() => settle(false), timeoutMs);
|
|
65
|
+
const onAbort = () => settle(false);
|
|
66
|
+
const cleanup = () => {
|
|
67
|
+
clearTimeout(timer);
|
|
68
|
+
signal?.removeEventListener("abort", onAbort);
|
|
69
|
+
const idx = this.waiters.indexOf(waiter);
|
|
70
|
+
if (idx !== -1) {
|
|
71
|
+
this.waiters.splice(idx, 1);
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
signal?.addEventListener("abort", onAbort, { once: true });
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import type { Limiter } from "../Limiter.js";
|
|
2
|
+
import type { LimitAllotment } from "../LimitAllotment.js";
|
|
3
|
+
import type { AcquireOptions, AsyncLimiter } from "./BlockingLimiter.js";
|
|
4
|
+
/**
|
|
5
|
+
* Limiter decorator that blocks the caller when the limit has been reached.
|
|
6
|
+
* This strategy ensures the resource is properly protected but favors
|
|
7
|
+
* availability over latency by not fast failing requests when the limit has
|
|
8
|
+
* been reached. To help keep success latencies low and minimize timeouts any
|
|
9
|
+
* blocked requests are processed in last in/first out order.
|
|
10
|
+
*
|
|
11
|
+
* Use this limiter only when the calling model allows the limiter to be
|
|
12
|
+
* blocked (i.e. the caller can await).
|
|
13
|
+
*/
|
|
14
|
+
export interface LifoBlockingLimiterOptions<ContextT> {
|
|
15
|
+
/**
|
|
16
|
+
* Maximum number of blocked callers in the backlog. Default: 100
|
|
17
|
+
*/
|
|
18
|
+
backlogSize?: number;
|
|
19
|
+
/**
|
|
20
|
+
* Maximum timeout for callers blocked on the limiter, in milliseconds.
|
|
21
|
+
* Can be a fixed number or a function that derives the timeout from the
|
|
22
|
+
* request context (e.g. from a deadline). Default: 1000
|
|
23
|
+
*/
|
|
24
|
+
backlogTimeout?: number | ((context: ContextT) => number);
|
|
25
|
+
}
|
|
26
|
+
export declare class LifoBlockingLimiter<ContextT> implements AsyncLimiter<ContextT> {
|
|
27
|
+
private readonly delegate;
|
|
28
|
+
private readonly backlogSize;
|
|
29
|
+
private readonly getBacklogTimeout;
|
|
30
|
+
/**
|
|
31
|
+
* Fixed backlog timeout in milliseconds, or undefined if the timeout is
|
|
32
|
+
* derived from the request context.
|
|
33
|
+
*/
|
|
34
|
+
readonly fixedBacklogTimeoutMillis: number | undefined;
|
|
35
|
+
/**
|
|
36
|
+
* LIFO backlog of waiters. The front of the array is the most recently
|
|
37
|
+
* added (highest priority).
|
|
38
|
+
*/
|
|
39
|
+
private readonly backlog;
|
|
40
|
+
constructor(delegate: Limiter<ContextT>, options?: LifoBlockingLimiterOptions<ContextT>);
|
|
41
|
+
/**
|
|
42
|
+
* Acquire a token, waiting in a LIFO queue if the limit has been reached.
|
|
43
|
+
* Returns a promise that resolves to a LimitAllotment if acquired, or undefined
|
|
44
|
+
* if the backlog is full, timeout elapses, or the operation is aborted.
|
|
45
|
+
*/
|
|
46
|
+
acquire(this: LifoBlockingLimiter<void>): Promise<LimitAllotment | undefined>;
|
|
47
|
+
acquire(context: ContextT, options?: AcquireOptions): Promise<LimitAllotment | undefined>;
|
|
48
|
+
private waitForBacklog;
|
|
49
|
+
private wrapAllotment;
|
|
50
|
+
private unblock;
|
|
51
|
+
toString(): string;
|
|
52
|
+
}
|
|
53
|
+
//# sourceMappingURL=LifoBlockingLimiter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"LifoBlockingLimiter.d.ts","sourceRoot":"","sources":["../../src/limiter/LifoBlockingLimiter.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,eAAe,CAAC;AAC7C,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAC3D,OAAO,KAAK,EAAE,cAAc,EAAE,YAAY,EAAE,MAAM,sBAAsB,CAAC;AAEzE;;;;;;;;;GASG;AACH,MAAM,WAAW,0BAA0B,CAAC,QAAQ;IAClD;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,cAAc,CAAC,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,EAAE,QAAQ,KAAK,MAAM,CAAC,CAAC;CAC3D;AAOD,qBAAa,mBAAmB,CAAC,QAAQ,CAAE,YAAW,YAAY,CAAC,QAAQ,CAAC;IAC1E,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAoB;IAC7C,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAS;IACrC,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAgC;IAElE;;;OAGG;IACH,QAAQ,CAAC,yBAAyB,EAAE,MAAM,GAAG,SAAS,CAAC;IAEvD;;;OAGG;IACH,OAAO,CAAC,QAAQ,CAAC,OAAO,CAA+B;gBAGrD,QAAQ,EAAE,OAAO,CAAC,QAAQ,CAAC,EAC3B,OAAO,GAAE,0BAA0B,CAAC,QAAQ,CAAM;IAepD;;;;OAIG;IACH,OAAO,CAAC,IAAI,EAAE,mBAAmB,CAAC,IAAI,CAAC,GAAG,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAC7E,OAAO,CAAC,OAAO,EAAE,QAAQ,EAAE,OAAO,CAAC,EAAE,cAAc,GAAG,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAwBzF,OAAO,CAAC,cAAc;IAsCtB,OAAO,CAAC,aAAa;IAiBrB,OAAO,CAAC,OAAO;IAcf,QAAQ,IAAI,MAAM;CAGnB"}
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
export class LifoBlockingLimiter {
|
|
2
|
+
delegate;
|
|
3
|
+
backlogSize;
|
|
4
|
+
getBacklogTimeout;
|
|
5
|
+
/**
|
|
6
|
+
* Fixed backlog timeout in milliseconds, or undefined if the timeout is
|
|
7
|
+
* derived from the request context.
|
|
8
|
+
*/
|
|
9
|
+
fixedBacklogTimeoutMillis;
|
|
10
|
+
/**
|
|
11
|
+
* LIFO backlog of waiters. The front of the array is the most recently
|
|
12
|
+
* added (highest priority).
|
|
13
|
+
*/
|
|
14
|
+
backlog = [];
|
|
15
|
+
constructor(delegate, options = {}) {
|
|
16
|
+
this.delegate = delegate;
|
|
17
|
+
this.backlogSize = options.backlogSize ?? 100;
|
|
18
|
+
const timeout = options.backlogTimeout ?? 1_000;
|
|
19
|
+
if (typeof timeout === "number") {
|
|
20
|
+
this.getBacklogTimeout = () => timeout;
|
|
21
|
+
this.fixedBacklogTimeoutMillis = timeout;
|
|
22
|
+
}
|
|
23
|
+
else {
|
|
24
|
+
this.getBacklogTimeout = timeout;
|
|
25
|
+
this.fixedBacklogTimeoutMillis = undefined;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
async acquire(context, options = {}) {
|
|
29
|
+
const requestContext = context;
|
|
30
|
+
if (options.signal?.aborted) {
|
|
31
|
+
return undefined;
|
|
32
|
+
}
|
|
33
|
+
// Try to acquire a token and return immediately if successful
|
|
34
|
+
const allotment = this.delegate.acquire(requestContext);
|
|
35
|
+
if (allotment) {
|
|
36
|
+
return this.wrapAllotment(allotment);
|
|
37
|
+
}
|
|
38
|
+
// Restrict backlog size so the queue doesn't grow unbounded during an outage
|
|
39
|
+
if (this.backlog.length >= this.backlogSize) {
|
|
40
|
+
return undefined;
|
|
41
|
+
}
|
|
42
|
+
// Create a waiter and block until an allotment is released by another
|
|
43
|
+
// operation. Waiters will be unblocked in LIFO order.
|
|
44
|
+
const result = await this.waitForBacklog(requestContext, options.signal);
|
|
45
|
+
return result ? this.wrapAllotment(result) : undefined;
|
|
46
|
+
}
|
|
47
|
+
waitForBacklog(context, signal) {
|
|
48
|
+
return new Promise((resolve) => {
|
|
49
|
+
let settled = false;
|
|
50
|
+
const settle = (allotment) => {
|
|
51
|
+
if (settled) {
|
|
52
|
+
return;
|
|
53
|
+
}
|
|
54
|
+
settled = true;
|
|
55
|
+
cleanup();
|
|
56
|
+
resolve(allotment);
|
|
57
|
+
};
|
|
58
|
+
const waiter = {
|
|
59
|
+
context,
|
|
60
|
+
resolve: (allotment) => settle(allotment),
|
|
61
|
+
};
|
|
62
|
+
this.backlog.unshift(waiter);
|
|
63
|
+
const timer = setTimeout(() => settle(undefined), this.getBacklogTimeout(context));
|
|
64
|
+
const onAbort = () => settle(undefined);
|
|
65
|
+
const cleanup = () => {
|
|
66
|
+
clearTimeout(timer);
|
|
67
|
+
signal?.removeEventListener("abort", onAbort);
|
|
68
|
+
const idx = this.backlog.indexOf(waiter);
|
|
69
|
+
if (idx !== -1) {
|
|
70
|
+
this.backlog.splice(idx, 1);
|
|
71
|
+
}
|
|
72
|
+
};
|
|
73
|
+
signal?.addEventListener("abort", onAbort, { once: true });
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
wrapAllotment(delegate) {
|
|
77
|
+
return {
|
|
78
|
+
reportSuccess: () => {
|
|
79
|
+
delegate.reportSuccess();
|
|
80
|
+
this.unblock();
|
|
81
|
+
},
|
|
82
|
+
reportIgnore: () => {
|
|
83
|
+
delegate.reportIgnore();
|
|
84
|
+
this.unblock();
|
|
85
|
+
},
|
|
86
|
+
reportDropped: () => {
|
|
87
|
+
delegate.reportDropped();
|
|
88
|
+
this.unblock();
|
|
89
|
+
},
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
unblock() {
|
|
93
|
+
if (this.backlog.length === 0)
|
|
94
|
+
return;
|
|
95
|
+
// Peek at the front (most recently added) waiter
|
|
96
|
+
const waiter = this.backlog[0];
|
|
97
|
+
const allotment = this.delegate.acquire(waiter.context);
|
|
98
|
+
if (allotment) {
|
|
99
|
+
this.backlog.shift();
|
|
100
|
+
waiter.resolve(allotment);
|
|
101
|
+
}
|
|
102
|
+
// Still can't acquire the limit. unblock will be called again next time
|
|
103
|
+
// the limit is released.
|
|
104
|
+
}
|
|
105
|
+
toString() {
|
|
106
|
+
return `LifoBlockingLimiter [${this.delegate}]`;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import type { LimitAllotment } from "../LimitAllotment.js";
|
|
2
|
+
import type { AllotmentUnavailableStrategy, AsyncAcquireResult, SyncAcquireResult } from "../Limiter.js";
|
|
3
|
+
export interface LifoBlockingRejectionOptions<ContextT> {
|
|
4
|
+
/**
|
|
5
|
+
* Maximum number of blocked callers in the backlog. Default: 100
|
|
6
|
+
*/
|
|
7
|
+
backlogSize?: number | undefined;
|
|
8
|
+
/**
|
|
9
|
+
* Maximum timeout for callers blocked on the limiter, in milliseconds.
|
|
10
|
+
* Can be a fixed number or a function that derives the timeout from the
|
|
11
|
+
* request context (e.g. from a deadline). Default: 1000
|
|
12
|
+
*/
|
|
13
|
+
backlogTimeout?: number | ((context: ContextT) => number) | undefined;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Rejection strategy that blocks the caller in a LIFO queue when the limit
|
|
17
|
+
* has been reached. This strategy favors availability over latency by
|
|
18
|
+
* processing the most recently blocked request first, keeping success
|
|
19
|
+
* latencies low and minimizing timeouts.
|
|
20
|
+
*/
|
|
21
|
+
export declare class LifoBlockingRejection<ContextT> implements AllotmentUnavailableStrategy<ContextT, AsyncAcquireResult> {
|
|
22
|
+
private readonly backlogSize;
|
|
23
|
+
private readonly getBacklogTimeout;
|
|
24
|
+
private readonly backlog;
|
|
25
|
+
private retry;
|
|
26
|
+
constructor(options?: LifoBlockingRejectionOptions<ContextT>);
|
|
27
|
+
onAllotmentUnavailable(context: ContextT, retry: (context: ContextT) => SyncAcquireResult, signal?: AbortSignal): Promise<LimitAllotment | undefined>;
|
|
28
|
+
onAllotmentReleased(): void;
|
|
29
|
+
private waitForBacklog;
|
|
30
|
+
}
|
|
31
|
+
//# sourceMappingURL=LifoBlockingRejection.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"LifoBlockingRejection.d.ts","sourceRoot":"","sources":["../../src/limiter/LifoBlockingRejection.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAC3D,OAAO,KAAK,EACV,4BAA4B,EAC5B,kBAAkB,EAClB,iBAAiB,EAClB,MAAM,eAAe,CAAC;AAOvB,MAAM,WAAW,4BAA4B,CAAC,QAAQ;IACpD;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;IAEjC;;;;OAIG;IACH,cAAc,CAAC,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,EAAE,QAAQ,KAAK,MAAM,CAAC,GAAG,SAAS,CAAC;CACvE;AAED;;;;;GAKG;AACH,qBAAa,qBAAqB,CAChC,QAAQ,CACR,YAAW,4BAA4B,CAAC,QAAQ,EAAE,kBAAkB,CAAC;IACrE,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAS;IACrC,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAgC;IAClE,OAAO,CAAC,QAAQ,CAAC,OAAO,CAA+B;IAEvD,OAAO,CAAC,KAAK,CAAyD;gBAE1D,OAAO,GAAE,4BAA4B,CAAC,QAAQ,CAAM;IAQhE,sBAAsB,CACpB,OAAO,EAAE,QAAQ,EACjB,KAAK,EAAE,CAAC,OAAO,EAAE,QAAQ,KAAK,iBAAiB,EAC/C,MAAM,CAAC,EAAE,WAAW,GACnB,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAUtC,mBAAmB,IAAI,IAAI;IAW3B,OAAO,CAAC,cAAc;CAsCvB"}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Rejection strategy that blocks the caller in a LIFO queue when the limit
|
|
3
|
+
* has been reached. This strategy favors availability over latency by
|
|
4
|
+
* processing the most recently blocked request first, keeping success
|
|
5
|
+
* latencies low and minimizing timeouts.
|
|
6
|
+
*/
|
|
7
|
+
export class LifoBlockingRejection {
|
|
8
|
+
backlogSize;
|
|
9
|
+
getBacklogTimeout;
|
|
10
|
+
backlog = [];
|
|
11
|
+
retry;
|
|
12
|
+
constructor(options = {}) {
|
|
13
|
+
this.backlogSize = options.backlogSize ?? 100;
|
|
14
|
+
const timeout = options.backlogTimeout ?? 1_000;
|
|
15
|
+
this.getBacklogTimeout =
|
|
16
|
+
typeof timeout === "number" ? () => timeout : timeout;
|
|
17
|
+
}
|
|
18
|
+
onAllotmentUnavailable(context, retry, signal) {
|
|
19
|
+
this.retry = retry;
|
|
20
|
+
if (this.backlog.length >= this.backlogSize) {
|
|
21
|
+
return Promise.resolve(undefined);
|
|
22
|
+
}
|
|
23
|
+
return this.waitForBacklog(context, signal);
|
|
24
|
+
}
|
|
25
|
+
onAllotmentReleased() {
|
|
26
|
+
if (this.backlog.length === 0 || !this.retry)
|
|
27
|
+
return;
|
|
28
|
+
const waiter = this.backlog[0];
|
|
29
|
+
const allotment = this.retry(waiter.context);
|
|
30
|
+
if (allotment) {
|
|
31
|
+
this.backlog.shift();
|
|
32
|
+
waiter.resolve(allotment);
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
waitForBacklog(context, signal) {
|
|
36
|
+
return new Promise((resolve) => {
|
|
37
|
+
let settled = false;
|
|
38
|
+
const settle = (allotment) => {
|
|
39
|
+
if (settled)
|
|
40
|
+
return;
|
|
41
|
+
settled = true;
|
|
42
|
+
cleanup();
|
|
43
|
+
resolve(allotment);
|
|
44
|
+
};
|
|
45
|
+
const waiter = {
|
|
46
|
+
context,
|
|
47
|
+
resolve: (allotment) => settle(allotment),
|
|
48
|
+
};
|
|
49
|
+
this.backlog.unshift(waiter);
|
|
50
|
+
const timer = setTimeout(() => settle(undefined), this.getBacklogTimeout(context));
|
|
51
|
+
const onAbort = () => settle(undefined);
|
|
52
|
+
const cleanup = () => {
|
|
53
|
+
clearTimeout(timer);
|
|
54
|
+
signal?.removeEventListener("abort", onAbort);
|
|
55
|
+
const idx = this.backlog.indexOf(waiter);
|
|
56
|
+
if (idx !== -1) {
|
|
57
|
+
this.backlog.splice(idx, 1);
|
|
58
|
+
}
|
|
59
|
+
};
|
|
60
|
+
signal?.addEventListener("abort", onAbort, { once: true });
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
}
|