adaptive-concurrency 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Limit.d.ts +29 -0
- package/dist/Limit.d.ts.map +1 -0
- package/dist/Limit.js +1 -0
- package/dist/LimitAllotment.d.ts +23 -0
- package/dist/LimitAllotment.d.ts.map +1 -0
- package/dist/LimitAllotment.js +1 -0
- package/dist/Limiter.d.ts +175 -0
- package/dist/Limiter.d.ts.map +1 -0
- package/dist/Limiter.js +240 -0
- package/dist/Listener.d.ts +23 -0
- package/dist/Listener.d.ts.map +1 -0
- package/dist/Listener.js +1 -0
- package/dist/ListenerSet.d.ts +12 -0
- package/dist/ListenerSet.d.ts.map +1 -0
- package/dist/ListenerSet.js +35 -0
- package/dist/MetricIds.d.ts +13 -0
- package/dist/MetricIds.d.ts.map +1 -0
- package/dist/MetricIds.js +12 -0
- package/dist/MetricRegistry.d.ts +66 -0
- package/dist/MetricRegistry.d.ts.map +1 -0
- package/dist/MetricRegistry.js +30 -0
- package/dist/RunResult.d.ts +33 -0
- package/dist/RunResult.d.ts.map +1 -0
- package/dist/RunResult.js +35 -0
- package/dist/StreamingLimit.d.ts +26 -0
- package/dist/StreamingLimit.d.ts.map +1 -0
- package/dist/StreamingLimit.js +1 -0
- package/dist/executors/AdaptiveExecutor.d.ts +50 -0
- package/dist/executors/AdaptiveExecutor.d.ts.map +1 -0
- package/dist/executors/AdaptiveExecutor.js +80 -0
- package/dist/index.d.ts +27 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +28 -0
- package/dist/limit/AIMDLimit.d.ts +37 -0
- package/dist/limit/AIMDLimit.d.ts.map +1 -0
- package/dist/limit/AIMDLimit.js +49 -0
- package/dist/limit/FixedLimit.d.ts +15 -0
- package/dist/limit/FixedLimit.d.ts.map +1 -0
- package/dist/limit/FixedLimit.js +23 -0
- package/dist/limit/Gradient2Limit.d.ts +122 -0
- package/dist/limit/Gradient2Limit.d.ts.map +1 -0
- package/dist/limit/Gradient2Limit.js +107 -0
- package/dist/limit/GradientLimit.d.ts +122 -0
- package/dist/limit/GradientLimit.d.ts.map +1 -0
- package/dist/limit/GradientLimit.js +108 -0
- package/dist/limit/SettableLimit.d.ts +18 -0
- package/dist/limit/SettableLimit.d.ts.map +1 -0
- package/dist/limit/SettableLimit.js +30 -0
- package/dist/limit/StreamingLimit.d.ts +26 -0
- package/dist/limit/StreamingLimit.d.ts.map +1 -0
- package/dist/limit/StreamingLimit.js +1 -0
- package/dist/limit/TracingLimitDecorator.d.ts +16 -0
- package/dist/limit/TracingLimitDecorator.d.ts.map +1 -0
- package/dist/limit/TracingLimitDecorator.js +23 -0
- package/dist/limit/VegasLimit.d.ts +85 -0
- package/dist/limit/VegasLimit.d.ts.map +1 -0
- package/dist/limit/VegasLimit.js +127 -0
- package/dist/limit/WindowedLimit.d.ts +48 -0
- package/dist/limit/WindowedLimit.d.ts.map +1 -0
- package/dist/limit/WindowedLimit.js +67 -0
- package/dist/limit/statistics/ExpMovingAverage.d.ts +21 -0
- package/dist/limit/statistics/ExpMovingAverage.d.ts.map +1 -0
- package/dist/limit/statistics/ExpMovingAverage.js +43 -0
- package/dist/limit/statistics/Minimum.d.ts +12 -0
- package/dist/limit/statistics/Minimum.d.ts.map +1 -0
- package/dist/limit/statistics/Minimum.js +22 -0
- package/dist/limit/statistics/MinimumValue.d.ts +12 -0
- package/dist/limit/statistics/MinimumValue.d.ts.map +1 -0
- package/dist/limit/statistics/MinimumValue.js +22 -0
- package/dist/limit/statistics/SingleMeasurement.d.ts +12 -0
- package/dist/limit/statistics/SingleMeasurement.d.ts.map +1 -0
- package/dist/limit/statistics/SingleMeasurement.js +21 -0
- package/dist/limit/statistics/StreamingStatistic.d.ts +29 -0
- package/dist/limit/statistics/StreamingStatistic.d.ts.map +1 -0
- package/dist/limit/statistics/StreamingStatistic.js +1 -0
- package/dist/limit/utils/index.d.ts +10 -0
- package/dist/limit/utils/index.d.ts.map +1 -0
- package/dist/limit/utils/index.js +19 -0
- package/dist/limit/window/AverageSampleWindow.d.ts +4 -0
- package/dist/limit/window/AverageSampleWindow.d.ts.map +1 -0
- package/dist/limit/window/AverageSampleWindow.js +46 -0
- package/dist/limit/window/PercentileSampleWindow.d.ts +38 -0
- package/dist/limit/window/PercentileSampleWindow.d.ts.map +1 -0
- package/dist/limit/window/PercentileSampleWindow.js +81 -0
- package/dist/limit/window/SampleWindow.d.ts +30 -0
- package/dist/limit/window/SampleWindow.d.ts.map +1 -0
- package/dist/limit/window/SampleWindow.js +1 -0
- package/dist/limiter/AbstractLimiter.d.ts +48 -0
- package/dist/limiter/AbstractLimiter.d.ts.map +1 -0
- package/dist/limiter/AbstractLimiter.js +78 -0
- package/dist/limiter/AbstractPartitionedLimiter.d.ts +66 -0
- package/dist/limiter/AbstractPartitionedLimiter.d.ts.map +1 -0
- package/dist/limiter/AbstractPartitionedLimiter.js +209 -0
- package/dist/limiter/BlockingLimiter.d.ts +55 -0
- package/dist/limiter/BlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/BlockingLimiter.js +111 -0
- package/dist/limiter/DelayedRejectStrategy.d.ts +32 -0
- package/dist/limiter/DelayedRejectStrategy.d.ts.map +1 -0
- package/dist/limiter/DelayedRejectStrategy.js +60 -0
- package/dist/limiter/DelayedThenBlockingRejection.d.ts +19 -0
- package/dist/limiter/DelayedThenBlockingRejection.d.ts.map +1 -0
- package/dist/limiter/DelayedThenBlockingRejection.js +26 -0
- package/dist/limiter/FifoBlockingRejection.d.ts +26 -0
- package/dist/limiter/FifoBlockingRejection.d.ts.map +1 -0
- package/dist/limiter/FifoBlockingRejection.js +77 -0
- package/dist/limiter/LifoBlockingLimiter.d.ts +53 -0
- package/dist/limiter/LifoBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/LifoBlockingLimiter.js +108 -0
- package/dist/limiter/LifoBlockingRejection.d.ts +31 -0
- package/dist/limiter/LifoBlockingRejection.d.ts.map +1 -0
- package/dist/limiter/LifoBlockingRejection.js +63 -0
- package/dist/limiter/PartitionedStrategy.d.ts +90 -0
- package/dist/limiter/PartitionedStrategy.d.ts.map +1 -0
- package/dist/limiter/PartitionedStrategy.js +183 -0
- package/dist/limiter/SimpleLimiter.d.ts +31 -0
- package/dist/limiter/SimpleLimiter.d.ts.map +1 -0
- package/dist/limiter/SimpleLimiter.js +119 -0
- package/dist/limiter/factories/index.d.ts +7 -0
- package/dist/limiter/factories/index.d.ts.map +1 -0
- package/dist/limiter/factories/index.js +6 -0
- package/dist/limiter/factories/makeBlockingLimiter.d.ts +6 -0
- package/dist/limiter/factories/makeBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makeBlockingLimiter.js +8 -0
- package/dist/limiter/factories/makeLifoBlockingLimiter.d.ts +8 -0
- package/dist/limiter/factories/makeLifoBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makeLifoBlockingLimiter.js +15 -0
- package/dist/limiter/factories/makePartitionedBlockingLimiter.d.ts +12 -0
- package/dist/limiter/factories/makePartitionedBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makePartitionedBlockingLimiter.js +35 -0
- package/dist/limiter/factories/makePartitionedLifoBlockingLimiter.d.ts +14 -0
- package/dist/limiter/factories/makePartitionedLifoBlockingLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makePartitionedLifoBlockingLimiter.js +38 -0
- package/dist/limiter/factories/makePartitionedLimiter.d.ts +11 -0
- package/dist/limiter/factories/makePartitionedLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makePartitionedLimiter.js +30 -0
- package/dist/limiter/factories/makeSimpleLimiter.d.ts +3 -0
- package/dist/limiter/factories/makeSimpleLimiter.d.ts.map +1 -0
- package/dist/limiter/factories/makeSimpleLimiter.js +9 -0
- package/dist/limiter/factories.d.ts +31 -0
- package/dist/limiter/factories.d.ts.map +1 -0
- package/dist/limiter/factories.js +74 -0
- package/dist/statistics/ExpMovingAverage.d.ts +21 -0
- package/dist/statistics/ExpMovingAverage.d.ts.map +1 -0
- package/dist/statistics/ExpMovingAverage.js +43 -0
- package/dist/statistics/MinimumValue.d.ts +12 -0
- package/dist/statistics/MinimumValue.d.ts.map +1 -0
- package/dist/statistics/MinimumValue.js +22 -0
- package/dist/statistics/MostRecentValue.d.ts +12 -0
- package/dist/statistics/MostRecentValue.d.ts.map +1 -0
- package/dist/statistics/MostRecentValue.js +21 -0
- package/dist/statistics/StreamingStatistic.d.ts +29 -0
- package/dist/statistics/StreamingStatistic.d.ts.map +1 -0
- package/dist/statistics/StreamingStatistic.js +1 -0
- package/dist/utils/index.d.ts +10 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +19 -0
- package/package.json +31 -0
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import { ListenerSet } from "../ListenerSet.js";
|
|
2
|
+
import { MetricIds } from "../MetricIds.js";
|
|
3
|
+
import { NoopMetricRegistry } from "../MetricRegistry.js";
|
|
4
|
+
import { ExpMovingAverage } from "../statistics/ExpMovingAverage.js";
|
|
5
|
+
export class Gradient2Limit {
|
|
6
|
+
_limit;
|
|
7
|
+
limitListeners = new ListenerSet();
|
|
8
|
+
/** Estimated concurrency limit based on our algorithm */
|
|
9
|
+
estimatedLimit;
|
|
10
|
+
/**
|
|
11
|
+
* Tracks a measurement of the short time, and more volatile, RTT meant to
|
|
12
|
+
* represent the current system latency.
|
|
13
|
+
*/
|
|
14
|
+
lastRtt = 0;
|
|
15
|
+
/**
|
|
16
|
+
* Tracks a measurement of the long term, less volatile, RTT meant to
|
|
17
|
+
* represent the baseline latency. When the system is under load this number
|
|
18
|
+
* is expected to trend higher.
|
|
19
|
+
*/
|
|
20
|
+
longRtt;
|
|
21
|
+
/** Maximum allowed limit providing an upper bound failsafe */
|
|
22
|
+
maxLimit;
|
|
23
|
+
minLimit;
|
|
24
|
+
queueSize;
|
|
25
|
+
smoothing;
|
|
26
|
+
tolerance;
|
|
27
|
+
longRttSampleListener;
|
|
28
|
+
shortRttSampleListener;
|
|
29
|
+
queueSizeSampleListener;
|
|
30
|
+
constructor(options = {}) {
|
|
31
|
+
const initialLimit = options.initialLimit ?? 20;
|
|
32
|
+
this._limit = initialLimit;
|
|
33
|
+
this.estimatedLimit = initialLimit;
|
|
34
|
+
this.maxLimit = options.maxConcurrency ?? 200;
|
|
35
|
+
this.minLimit = options.minLimit ?? 20;
|
|
36
|
+
this.smoothing = options.smoothing ?? 0.2;
|
|
37
|
+
this.tolerance = options.rttTolerance ?? 1.5;
|
|
38
|
+
this.longRtt = new ExpMovingAverage(options.longWindow ?? 600, 10);
|
|
39
|
+
if (options.rttTolerance !== undefined && options.rttTolerance < 1.0) {
|
|
40
|
+
throw new Error("Tolerance must be >= 1.0");
|
|
41
|
+
}
|
|
42
|
+
const qs = options.queueSize ?? 4;
|
|
43
|
+
this.queueSize = typeof qs === "number" ? () => qs : qs;
|
|
44
|
+
const registry = options.metricRegistry ?? NoopMetricRegistry;
|
|
45
|
+
this.longRttSampleListener = registry.distribution(MetricIds.MIN_RTT_NAME);
|
|
46
|
+
this.shortRttSampleListener = registry.distribution(MetricIds.WINDOW_MIN_RTT_NAME);
|
|
47
|
+
this.queueSizeSampleListener = registry.distribution(MetricIds.WINDOW_QUEUE_SIZE_NAME);
|
|
48
|
+
}
|
|
49
|
+
addSample(_startTime, rtt, inflight, _didDrop) {
|
|
50
|
+
const newLimitNoFloor = this.computeNextLimitUnrounded(rtt, inflight);
|
|
51
|
+
this.estimatedLimit = newLimitNoFloor;
|
|
52
|
+
const newLimit = Math.floor(newLimitNoFloor);
|
|
53
|
+
this.applyNewLimit(newLimit);
|
|
54
|
+
}
|
|
55
|
+
get currentLimit() {
|
|
56
|
+
return this._limit;
|
|
57
|
+
}
|
|
58
|
+
applyNewLimit(newLimit) {
|
|
59
|
+
if (newLimit !== this._limit) {
|
|
60
|
+
this._limit = newLimit;
|
|
61
|
+
this.limitListeners.notify(newLimit);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
subscribe(consumer, options = {}) {
|
|
65
|
+
return this.limitListeners.subscribe(consumer, options);
|
|
66
|
+
}
|
|
67
|
+
computeNextLimitUnrounded(rtt, inflight) {
|
|
68
|
+
let estimatedLimit = this.estimatedLimit;
|
|
69
|
+
const queueSize = this.queueSize(Math.floor(estimatedLimit));
|
|
70
|
+
this.lastRtt = rtt;
|
|
71
|
+
const shortRtt = rtt;
|
|
72
|
+
const longRtt = this.longRtt.addSample(rtt);
|
|
73
|
+
this.shortRttSampleListener.addSample(shortRtt);
|
|
74
|
+
this.longRttSampleListener.addSample(longRtt);
|
|
75
|
+
this.queueSizeSampleListener.addSample(queueSize);
|
|
76
|
+
// If the long RTT is substantially larger than the short RTT then reduce
|
|
77
|
+
// the long RTT measurement. This can happen when latency returns to normal
|
|
78
|
+
// after a prolonged period of excessive load. Reducing the long RTT without
|
|
79
|
+
// waiting for the exponential smoothing helps bring the system back to
|
|
80
|
+
// steady state.
|
|
81
|
+
if (longRtt / shortRtt > 2) {
|
|
82
|
+
this.longRtt.update((current) => current * 0.95);
|
|
83
|
+
}
|
|
84
|
+
// Don't grow the limit if we are app limited
|
|
85
|
+
if (inflight < estimatedLimit / 2) {
|
|
86
|
+
return estimatedLimit;
|
|
87
|
+
}
|
|
88
|
+
// Rtt could be higher than rtt_noload because of smoothing rtt noload
|
|
89
|
+
// updates so set to 1.0 to indicate no queuing. Otherwise calculate the
|
|
90
|
+
// slope and don't allow it to be reduced by more than half to avoid
|
|
91
|
+
// aggressive load-shedding due to outliers.
|
|
92
|
+
const gradient = Math.max(0.5, Math.min(1.0, this.tolerance * longRtt / shortRtt));
|
|
93
|
+
let newLimit = estimatedLimit * gradient + queueSize;
|
|
94
|
+
newLimit = estimatedLimit * (1 - this.smoothing) + newLimit * this.smoothing;
|
|
95
|
+
newLimit = Math.max(this.minLimit, Math.min(this.maxLimit, newLimit));
|
|
96
|
+
return newLimit;
|
|
97
|
+
}
|
|
98
|
+
getLastRtt() {
|
|
99
|
+
return this.lastRtt;
|
|
100
|
+
}
|
|
101
|
+
getRttNoLoad() {
|
|
102
|
+
return this.longRtt.currentValue;
|
|
103
|
+
}
|
|
104
|
+
toString() {
|
|
105
|
+
return `Gradient2Limit [limit=${Math.floor(this.estimatedLimit)}]`;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import type { MetricRegistry } from "../MetricRegistry.js";
|
|
2
|
+
import type { AdaptiveLimit } from "./StreamingLimit.js";
|
|
3
|
+
/**
|
|
4
|
+
* Concurrency limit algorithm that adjusts the limit based on the gradient of
|
|
5
|
+
* change of the current average RTT and a long term exponentially smoothed
|
|
6
|
+
* average RTT. Unlike traditional congestion control algorithms we use average
|
|
7
|
+
* instead of minimum since RPC methods can be very bursty due to various
|
|
8
|
+
* factors such as non-homogenous request processing complexity as well as a
|
|
9
|
+
* wide distribution of data size. We have also found that using minimum can
|
|
10
|
+
* result in a bias towards an impractically low base RTT resulting in excessive
|
|
11
|
+
* load shedding. An exponential decay is applied to the base RTT so that the
|
|
12
|
+
* value is kept stable yet is allowed to adapt to long term changes in latency
|
|
13
|
+
* characteristics.
|
|
14
|
+
*
|
|
15
|
+
* The core algorithm re-calculates the limit every sampling window
|
|
16
|
+
* (e.g. 1 second) using the formula:
|
|
17
|
+
*
|
|
18
|
+
* // Calculate the gradient limiting to the range [0.5, 1.0] to filter outliers
|
|
19
|
+
* gradient = max(0.5, min(1.0, longtermRtt / currentRtt));
|
|
20
|
+
*
|
|
21
|
+
* // Calculate the new limit by applying the gradient and allowing for some queuing
|
|
22
|
+
* newLimit = gradient * currentLimit + queueSize;
|
|
23
|
+
*
|
|
24
|
+
* // Update the limit using a smoothing factor (default 0.2)
|
|
25
|
+
* newLimit = currentLimit * (1 - smoothing) + newLimit * smoothing
|
|
26
|
+
*
|
|
27
|
+
* The limit can be in one of three main states:
|
|
28
|
+
*
|
|
29
|
+
* 1. Steady state
|
|
30
|
+
* The average RTT is very stable and the current measurement whipsaws around
|
|
31
|
+
* this value, sometimes reducing the limit, sometimes increasing it.
|
|
32
|
+
*
|
|
33
|
+
* 2. Transition from steady state to load
|
|
34
|
+
* Either the RPS or latency has spiked. The gradient is < 1.0 due to a
|
|
35
|
+
* growing request queue that cannot be handled by the system. Excessive
|
|
36
|
+
* requests are rejected due to the low limit. The baseline RTT grows using
|
|
37
|
+
* exponential decay but lags the current measurement, which keeps the
|
|
38
|
+
* gradient < 1.0 and limit low.
|
|
39
|
+
*
|
|
40
|
+
* 3. Transition from load to steady state
|
|
41
|
+
* The system goes back to steady state after a prolonged period of excessive
|
|
42
|
+
* load. Requests aren't rejected and the sample RTT remains low. During this
|
|
43
|
+
* state the long term RTT may take some time to go back to normal and could
|
|
44
|
+
* potentially be several multiples higher than the current RTT.
|
|
45
|
+
*/
|
|
46
|
+
export interface Gradient2LimitOptions {
|
|
47
|
+
/** Initial limit used by the limiter. Default: 20 */
|
|
48
|
+
initialLimit?: number;
|
|
49
|
+
/**
|
|
50
|
+
* Minimum concurrency limit allowed. The minimum helps prevent the algorithm
|
|
51
|
+
* from adjusting the limit too far down. Note that this limit is not
|
|
52
|
+
* desirable when used as backpressure for batch apps. Default: 20
|
|
53
|
+
*/
|
|
54
|
+
minLimit?: number;
|
|
55
|
+
/**
|
|
56
|
+
* Maximum allowable concurrency. Any estimated concurrency will be capped at
|
|
57
|
+
* this value. Default: 200
|
|
58
|
+
*/
|
|
59
|
+
maxConcurrency?: number;
|
|
60
|
+
/**
|
|
61
|
+
* Smoothing factor to limit how aggressively the estimated limit can shrink
|
|
62
|
+
* when queuing has been detected. Value of 0.0 to 1.0 where 1.0 means the
|
|
63
|
+
* limit is completely replaced by the new estimate. Default: 0.2
|
|
64
|
+
*/
|
|
65
|
+
smoothing?: number;
|
|
66
|
+
/**
|
|
67
|
+
* Fixed amount the estimated limit can grow while latencies remain low.
|
|
68
|
+
* Can be a constant or a function of the current limit. Default: 4
|
|
69
|
+
*/
|
|
70
|
+
queueSize?: number | ((concurrency: number) => number);
|
|
71
|
+
/**
|
|
72
|
+
* Tolerance for changes in minimum latency. Value >= 1.0 indicating how
|
|
73
|
+
* much change in minimum latency is acceptable before reducing the limit.
|
|
74
|
+
* For example, a value of 2.0 means that a 2x increase in latency is
|
|
75
|
+
* acceptable. Default: 1.5
|
|
76
|
+
*/
|
|
77
|
+
rttTolerance?: number;
|
|
78
|
+
/**
|
|
79
|
+
* Number of samples in the long-term exponential average window.
|
|
80
|
+
* Default: 600
|
|
81
|
+
*/
|
|
82
|
+
longWindow?: number;
|
|
83
|
+
metricRegistry?: MetricRegistry;
|
|
84
|
+
}
|
|
85
|
+
export declare class GradientLimit implements AdaptiveLimit {
|
|
86
|
+
private _limit;
|
|
87
|
+
private readonly limitListeners;
|
|
88
|
+
/** Estimated concurrency limit based on our algorithm */
|
|
89
|
+
private estimatedLimit;
|
|
90
|
+
/**
|
|
91
|
+
* Tracks a measurement of the short time, and more volatile, RTT meant to
|
|
92
|
+
* represent the current system latency.
|
|
93
|
+
*/
|
|
94
|
+
private lastRtt;
|
|
95
|
+
/**
|
|
96
|
+
* Tracks a measurement of the long term, less volatile, RTT meant to
|
|
97
|
+
* represent the baseline latency. When the system is under load this number
|
|
98
|
+
* is expected to trend higher.
|
|
99
|
+
*/
|
|
100
|
+
private readonly longRtt;
|
|
101
|
+
/** Maximum allowed limit providing an upper bound failsafe */
|
|
102
|
+
private readonly maxLimit;
|
|
103
|
+
private readonly minLimit;
|
|
104
|
+
private readonly queueSize;
|
|
105
|
+
private readonly smoothing;
|
|
106
|
+
private readonly tolerance;
|
|
107
|
+
private readonly longRttSampleListener;
|
|
108
|
+
private readonly shortRttSampleListener;
|
|
109
|
+
private readonly queueSizeSampleListener;
|
|
110
|
+
constructor(options?: Gradient2LimitOptions);
|
|
111
|
+
addSample(_startTime: number, rtt: number, inflight: number, _didDrop: boolean): void;
|
|
112
|
+
get currentLimit(): number;
|
|
113
|
+
private applyNewLimit;
|
|
114
|
+
subscribe(consumer: (newLimit: number) => void, options?: {
|
|
115
|
+
signal?: AbortSignal;
|
|
116
|
+
}): () => void;
|
|
117
|
+
private computeNextLimitUnrounded;
|
|
118
|
+
getLastRtt(): number;
|
|
119
|
+
getRttNoLoad(): number;
|
|
120
|
+
toString(): string;
|
|
121
|
+
}
|
|
122
|
+
//# sourceMappingURL=GradientLimit.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"GradientLimit.d.ts","sourceRoot":"","sources":["../../src/limit/GradientLimit.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAsB,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAI/E,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AAEzD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA0CG;AACH,MAAM,WAAW,qBAAqB;IACpC,qDAAqD;IACrD,YAAY,CAAC,EAAE,MAAM,CAAC;IAEtB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;IAElB;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IAExB;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IAEnB;;;OAGG;IACH,SAAS,CAAC,EAAE,MAAM,GAAG,CAAC,CAAC,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAAC;IAEvD;;;;;OAKG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IAEtB;;;OAGG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IAEpB,cAAc,CAAC,EAAE,cAAc,CAAC;CACjC;AAED,qBAAa,aAAc,YAAW,aAAa;IACjD,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAqB;IAEpD,yDAAyD;IACzD,OAAO,CAAC,cAAc,CAAS;IAE/B;;;OAGG;IACH,OAAO,CAAC,OAAO,CAAK;IAEpB;;;;OAIG;IACH,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAqB;IAE7C,8DAA8D;IAC9D,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAElC,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAClC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAkC;IAC5D,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAS;IACnC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAS;IAEnC,OAAO,CAAC,QAAQ,CAAC,qBAAqB,CAAqB;IAC3D,OAAO,CAAC,QAAQ,CAAC,sBAAsB,CAAqB;IAC5D,OAAO,CAAC,QAAQ,CAAC,uBAAuB,CAAqB;gBAEjD,OAAO,GAAE,qBAA0B;IA4B/C,SAAS,CACP,UAAU,EAAE,MAAM,EAClB,GAAG,EAAE,MAAM,EACX,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,OAAO,GAChB,IAAI;IASP,IAAI,YAAY,IAAI,MAAM,CAEzB;IAED,OAAO,CAAC,aAAa;IAOrB,SAAS,CACP,QAAQ,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,EACpC,OAAO,GAAE;QAAE,MAAM,CAAC,EAAE,WAAW,CAAA;KAAO,GACrC,MAAM,IAAI;IAIb,OAAO,CAAC,yBAAyB;IA0CjC,UAAU,IAAI,MAAM;IAIpB,YAAY,IAAI,MAAM;IAItB,QAAQ,IAAI,MAAM;CAGnB"}
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { ListenerSet } from "../ListenerSet.js";
|
|
2
|
+
import { MetricIds } from "../MetricRegistry.js";
|
|
3
|
+
import { NoopMetricRegistry } from "../MetricRegistry.js";
|
|
4
|
+
import { ExpMovingAverage } from "../statistics/ExpMovingAverage.js";
|
|
5
|
+
export class GradientLimit {
|
|
6
|
+
_limit;
|
|
7
|
+
limitListeners = new ListenerSet();
|
|
8
|
+
/** Estimated concurrency limit based on our algorithm */
|
|
9
|
+
estimatedLimit;
|
|
10
|
+
/**
|
|
11
|
+
* Tracks a measurement of the short time, and more volatile, RTT meant to
|
|
12
|
+
* represent the current system latency.
|
|
13
|
+
*/
|
|
14
|
+
lastRtt = 0;
|
|
15
|
+
/**
|
|
16
|
+
* Tracks a measurement of the long term, less volatile, RTT meant to
|
|
17
|
+
* represent the baseline latency. When the system is under load this number
|
|
18
|
+
* is expected to trend higher.
|
|
19
|
+
*/
|
|
20
|
+
longRtt;
|
|
21
|
+
/** Maximum allowed limit providing an upper bound failsafe */
|
|
22
|
+
maxLimit;
|
|
23
|
+
minLimit;
|
|
24
|
+
queueSize;
|
|
25
|
+
smoothing;
|
|
26
|
+
tolerance;
|
|
27
|
+
longRttSampleListener;
|
|
28
|
+
shortRttSampleListener;
|
|
29
|
+
queueSizeSampleListener;
|
|
30
|
+
constructor(options = {}) {
|
|
31
|
+
const initialLimit = options.initialLimit ?? 20;
|
|
32
|
+
this._limit = initialLimit;
|
|
33
|
+
this.estimatedLimit = initialLimit;
|
|
34
|
+
this.maxLimit = options.maxConcurrency ?? 200;
|
|
35
|
+
this.minLimit = options.minLimit ?? 20;
|
|
36
|
+
this.smoothing = options.smoothing ?? 0.2;
|
|
37
|
+
this.tolerance = options.rttTolerance ?? 1.5;
|
|
38
|
+
this.longRtt = new ExpMovingAverage(options.longWindow ?? 600, 10);
|
|
39
|
+
if (options.rttTolerance !== undefined && options.rttTolerance < 1.0) {
|
|
40
|
+
throw new Error("Tolerance must be >= 1.0");
|
|
41
|
+
}
|
|
42
|
+
const qs = options.queueSize ?? 4;
|
|
43
|
+
this.queueSize = typeof qs === "number" ? () => qs : qs;
|
|
44
|
+
const registry = options.metricRegistry ?? NoopMetricRegistry;
|
|
45
|
+
this.longRttSampleListener = registry.distribution(MetricIds.MIN_RTT_NAME);
|
|
46
|
+
this.shortRttSampleListener = registry.distribution(MetricIds.WINDOW_MIN_RTT_NAME);
|
|
47
|
+
this.queueSizeSampleListener = registry.distribution(MetricIds.WINDOW_QUEUE_SIZE_NAME);
|
|
48
|
+
}
|
|
49
|
+
addSample(_startTime, rtt, inflight, _didDrop) {
|
|
50
|
+
const newLimitNoFloor = this.computeNextLimitUnrounded(rtt, inflight);
|
|
51
|
+
this.estimatedLimit = newLimitNoFloor;
|
|
52
|
+
const newLimit = Math.floor(newLimitNoFloor);
|
|
53
|
+
this.applyNewLimit(newLimit);
|
|
54
|
+
}
|
|
55
|
+
get currentLimit() {
|
|
56
|
+
return this._limit;
|
|
57
|
+
}
|
|
58
|
+
applyNewLimit(newLimit) {
|
|
59
|
+
if (newLimit !== this._limit) {
|
|
60
|
+
this._limit = newLimit;
|
|
61
|
+
this.limitListeners.notify(newLimit);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
subscribe(consumer, options = {}) {
|
|
65
|
+
return this.limitListeners.subscribe(consumer, options);
|
|
66
|
+
}
|
|
67
|
+
computeNextLimitUnrounded(rtt, inflight) {
|
|
68
|
+
let estimatedLimit = this.estimatedLimit;
|
|
69
|
+
const queueSize = this.queueSize(Math.floor(estimatedLimit));
|
|
70
|
+
this.lastRtt = rtt;
|
|
71
|
+
const shortRtt = rtt;
|
|
72
|
+
const longRtt = this.longRtt.addSample(rtt);
|
|
73
|
+
this.shortRttSampleListener.addSample(shortRtt);
|
|
74
|
+
this.longRttSampleListener.addSample(longRtt);
|
|
75
|
+
this.queueSizeSampleListener.addSample(queueSize);
|
|
76
|
+
// If the long RTT is substantially larger than the short RTT then reduce
|
|
77
|
+
// the long RTT measurement. This can happen when latency returns to normal
|
|
78
|
+
// after a prolonged period of excessive load. Reducing the long RTT without
|
|
79
|
+
// waiting for the exponential smoothing helps bring the system back to
|
|
80
|
+
// steady state.
|
|
81
|
+
if (longRtt / shortRtt > 2) {
|
|
82
|
+
this.longRtt.update((current) => current * 0.95);
|
|
83
|
+
}
|
|
84
|
+
// Don't grow the limit if we are app limited
|
|
85
|
+
if (inflight < estimatedLimit / 2) {
|
|
86
|
+
return estimatedLimit;
|
|
87
|
+
}
|
|
88
|
+
// Rtt could be higher than rtt_noload because of smoothing rtt noload
|
|
89
|
+
// updates so set to 1.0 to indicate no queuing. Otherwise calculate the
|
|
90
|
+
// slope and don't allow it to be reduced by more than half to avoid
|
|
91
|
+
// aggressive load-shedding due to outliers.
|
|
92
|
+
const gradient = Math.max(0.5, Math.min(1.0, (this.tolerance * longRtt) / shortRtt));
|
|
93
|
+
let newLimit = estimatedLimit * gradient + queueSize;
|
|
94
|
+
newLimit =
|
|
95
|
+
estimatedLimit * (1 - this.smoothing) + newLimit * this.smoothing;
|
|
96
|
+
newLimit = Math.max(this.minLimit, Math.min(this.maxLimit, newLimit));
|
|
97
|
+
return newLimit;
|
|
98
|
+
}
|
|
99
|
+
getLastRtt() {
|
|
100
|
+
return this.lastRtt;
|
|
101
|
+
}
|
|
102
|
+
getRttNoLoad() {
|
|
103
|
+
return this.longRtt.currentValue;
|
|
104
|
+
}
|
|
105
|
+
toString() {
|
|
106
|
+
return `Gradient2Limit [limit=${Math.floor(this.estimatedLimit)}]`;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { AdaptiveLimit } from "./StreamingLimit.js";
|
|
2
|
+
/**
|
|
3
|
+
* Limit to be used mostly for testing where the limit can be manually
|
|
4
|
+
* adjusted.
|
|
5
|
+
*/
|
|
6
|
+
export declare class SettableLimit implements AdaptiveLimit {
|
|
7
|
+
private _limit;
|
|
8
|
+
private readonly limitListeners;
|
|
9
|
+
constructor(limit: number);
|
|
10
|
+
get currentLimit(): number;
|
|
11
|
+
addSample(_startTime: number, _rtt: number, _inflight: number, _didDrop: boolean): void;
|
|
12
|
+
setLimit(limit: number): void;
|
|
13
|
+
subscribe(consumer: (newLimit: number) => void, options?: {
|
|
14
|
+
signal?: AbortSignal;
|
|
15
|
+
}): () => void;
|
|
16
|
+
toString(): string;
|
|
17
|
+
}
|
|
18
|
+
//# sourceMappingURL=SettableLimit.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"SettableLimit.d.ts","sourceRoot":"","sources":["../../src/limit/SettableLimit.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AAEzD;;;GAGG;AACH,qBAAa,aAAc,YAAW,aAAa;IACjD,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAiD;gBAEpE,KAAK,EAAE,MAAM;IAIzB,IAAI,YAAY,IAAI,MAAM,CAEzB;IAED,SAAS,CACP,UAAU,EAAE,MAAM,EAClB,IAAI,EAAE,MAAM,EACZ,SAAS,EAAE,MAAM,EACjB,QAAQ,EAAE,OAAO,GAChB,IAAI;IAIP,QAAQ,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAO7B,SAAS,CACP,QAAQ,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,EACpC,OAAO,GAAE;QAAE,MAAM,CAAC,EAAE,WAAW,CAAA;KAAO,GACrC,MAAM,IAAI;IAIb,QAAQ,IAAI,MAAM;CAGnB"}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { ListenerSet } from "../ListenerSet.js";
|
|
2
|
+
/**
|
|
3
|
+
* Limit to be used mostly for testing where the limit can be manually
|
|
4
|
+
* adjusted.
|
|
5
|
+
*/
|
|
6
|
+
export class SettableLimit {
|
|
7
|
+
_limit;
|
|
8
|
+
limitListeners = new ListenerSet();
|
|
9
|
+
constructor(limit) {
|
|
10
|
+
this._limit = limit;
|
|
11
|
+
}
|
|
12
|
+
get currentLimit() {
|
|
13
|
+
return this._limit;
|
|
14
|
+
}
|
|
15
|
+
addSample(_startTime, _rtt, _inflight, _didDrop) {
|
|
16
|
+
// No-op
|
|
17
|
+
}
|
|
18
|
+
setLimit(limit) {
|
|
19
|
+
if (limit !== this._limit) {
|
|
20
|
+
this._limit = limit;
|
|
21
|
+
this.limitListeners.notify(limit);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
subscribe(consumer, options = {}) {
|
|
25
|
+
return this.limitListeners.subscribe(consumer, options);
|
|
26
|
+
}
|
|
27
|
+
toString() {
|
|
28
|
+
return `SettableLimit [limit=${this.currentLimit}]`;
|
|
29
|
+
}
|
|
30
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Contract for an algorithm that maintains a concurrency limit from a set of
|
|
3
|
+
* RTT samples.
|
|
4
|
+
*/
|
|
5
|
+
export interface AdaptiveLimit {
|
|
6
|
+
/** Current estimated concurrency limit. */
|
|
7
|
+
get currentLimit(): number;
|
|
8
|
+
/**
|
|
9
|
+
* Subscribe to limit changes. The callback runs whenever the limit updates.
|
|
10
|
+
*
|
|
11
|
+
* Returns a function to unsubscribe. Optional AbortSignal support is
|
|
12
|
+
* provided for ergonomic cancellation.
|
|
13
|
+
*/
|
|
14
|
+
subscribe(consumer: (newLimit: number) => void, options?: {
|
|
15
|
+
signal?: AbortSignal;
|
|
16
|
+
}): () => void;
|
|
17
|
+
/**
|
|
18
|
+
* Adjust the estimated limit using a completed request sample.
|
|
19
|
+
* @param startTime Start time in fractional milliseconds (from performance.now())
|
|
20
|
+
* @param rtt Round trip time in fractional milliseconds
|
|
21
|
+
* @param inflight Number of inflight requests at the time the request started
|
|
22
|
+
* @param didDrop Whether the request was dropped (timeout or rejection)
|
|
23
|
+
*/
|
|
24
|
+
addSample(startTime: number, rtt: number, inflight: number, didDrop: boolean): void;
|
|
25
|
+
}
|
|
26
|
+
//# sourceMappingURL=StreamingLimit.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"StreamingLimit.d.ts","sourceRoot":"","sources":["../../src/limit/StreamingLimit.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,MAAM,WAAW,aAAa;IAC5B,2CAA2C;IAC3C,IAAI,YAAY,IAAI,MAAM,CAAC;IAE3B;;;;;OAKG;IACH,SAAS,CACP,QAAQ,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,EACpC,OAAO,CAAC,EAAE;QAAE,MAAM,CAAC,EAAE,WAAW,CAAA;KAAE,GACjC,MAAM,IAAI,CAAC;IAEd;;;;;;OAMG;IACH,SAAS,CAAC,SAAS,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,GAAG,IAAI,CAAC;CACrF"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import type { AdaptiveLimit } from "./StreamingLimit.js";
|
|
2
|
+
/**
|
|
3
|
+
* Limit decorator that logs each sample to the console at debug level
|
|
4
|
+
* before forwarding to the delegate.
|
|
5
|
+
*/
|
|
6
|
+
export declare class TracingLimitDecorator implements AdaptiveLimit {
|
|
7
|
+
private readonly delegate;
|
|
8
|
+
static wrap(delegate: AdaptiveLimit): TracingLimitDecorator;
|
|
9
|
+
constructor(delegate: AdaptiveLimit);
|
|
10
|
+
get currentLimit(): number;
|
|
11
|
+
addSample(startTime: number, rtt: number, inflight: number, didDrop: boolean): void;
|
|
12
|
+
subscribe(consumer: (newLimit: number) => void, options?: {
|
|
13
|
+
signal?: AbortSignal;
|
|
14
|
+
}): () => void;
|
|
15
|
+
}
|
|
16
|
+
//# sourceMappingURL=TracingLimitDecorator.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"TracingLimitDecorator.d.ts","sourceRoot":"","sources":["../../src/limit/TracingLimitDecorator.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AAEzD;;;GAGG;AACH,qBAAa,qBAAsB,YAAW,aAAa;IACzD,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAgB;IAEzC,MAAM,CAAC,IAAI,CAAC,QAAQ,EAAE,aAAa,GAAG,qBAAqB;gBAI/C,QAAQ,EAAE,aAAa;IAInC,IAAI,YAAY,IAAI,MAAM,CAEzB;IAED,SAAS,CAAC,SAAS,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,GAAG,IAAI;IAKnF,SAAS,CACP,QAAQ,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,EACpC,OAAO,CAAC,EAAE;QAAE,MAAM,CAAC,EAAE,WAAW,CAAA;KAAE,GACjC,MAAM,IAAI;CAGd"}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Limit decorator that logs each sample to the console at debug level
|
|
3
|
+
* before forwarding to the delegate.
|
|
4
|
+
*/
|
|
5
|
+
export class TracingLimitDecorator {
|
|
6
|
+
delegate;
|
|
7
|
+
static wrap(delegate) {
|
|
8
|
+
return new TracingLimitDecorator(delegate);
|
|
9
|
+
}
|
|
10
|
+
constructor(delegate) {
|
|
11
|
+
this.delegate = delegate;
|
|
12
|
+
}
|
|
13
|
+
get currentLimit() {
|
|
14
|
+
return this.delegate.currentLimit;
|
|
15
|
+
}
|
|
16
|
+
addSample(startTime, rtt, inflight, didDrop) {
|
|
17
|
+
console.debug(`maxInFlight=${inflight} rtt=${rtt.toFixed(3)} ms`);
|
|
18
|
+
this.delegate.addSample(startTime, rtt, inflight, didDrop);
|
|
19
|
+
}
|
|
20
|
+
subscribe(consumer, options) {
|
|
21
|
+
return this.delegate.subscribe(consumer, options);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import type { MetricRegistry } from "../MetricRegistry.js";
|
|
2
|
+
import type { AdaptiveLimit } from "./StreamingLimit.js";
|
|
3
|
+
/**
|
|
4
|
+
* Limiter based on TCP Vegas where the limit increases by alpha if the
|
|
5
|
+
* queue_use is small (< alpha) and decreases by alpha if the queue_use is
|
|
6
|
+
* large (> beta).
|
|
7
|
+
*
|
|
8
|
+
* Queue size is calculated using the formula:
|
|
9
|
+
* queue_use = limit - BWE * RTTnoLoad = limit * (1 - RTTnoLoad / RTTactual)
|
|
10
|
+
*
|
|
11
|
+
* For traditional TCP Vegas alpha is typically 2-3 and beta is typically 4-6.
|
|
12
|
+
* To allow for better growth and stability at higher limits we set
|
|
13
|
+
* alpha = max(3, 10% of the current limit)
|
|
14
|
+
* and
|
|
15
|
+
* beta = max(6, 20% of the current limit)
|
|
16
|
+
*/
|
|
17
|
+
export interface VegasLimitOptions {
|
|
18
|
+
initialLimit?: number;
|
|
19
|
+
maxConcurrency?: number;
|
|
20
|
+
smoothing?: number;
|
|
21
|
+
/**
|
|
22
|
+
* Optional strategy hooks that control threshold and growth/decay behavior.
|
|
23
|
+
*/
|
|
24
|
+
policy?: {
|
|
25
|
+
/**
|
|
26
|
+
* Function to compute the alpha threshold as a function of the current
|
|
27
|
+
* estimated limit. Alpha is the lower threshold for the estimated queue
|
|
28
|
+
* size; below it the limit is increased aggressively.
|
|
29
|
+
*/
|
|
30
|
+
alpha?(limit: number): number;
|
|
31
|
+
/**
|
|
32
|
+
* Function to compute the beta threshold as a function of the current
|
|
33
|
+
* estimated limit. Beta is the upper threshold; above it the limit is
|
|
34
|
+
* decreased.
|
|
35
|
+
*/
|
|
36
|
+
beta?(limit: number): number;
|
|
37
|
+
/**
|
|
38
|
+
* Threshold below which the limit is increased by beta (aggressive).
|
|
39
|
+
*/
|
|
40
|
+
threshold?(limit: number): number;
|
|
41
|
+
/**
|
|
42
|
+
* Compute the new limit when increasing.
|
|
43
|
+
*/
|
|
44
|
+
increase?(limit: number): number;
|
|
45
|
+
/**
|
|
46
|
+
* Compute the new limit when decreasing.
|
|
47
|
+
*/
|
|
48
|
+
decrease?(limit: number): number;
|
|
49
|
+
};
|
|
50
|
+
/**
|
|
51
|
+
* The limiter will probe for a new noload RTT every
|
|
52
|
+
* probeMultiplier * current_limit iterations. Default value is 30.
|
|
53
|
+
*/
|
|
54
|
+
probeMultiplier?: number;
|
|
55
|
+
metricRegistry?: MetricRegistry;
|
|
56
|
+
}
|
|
57
|
+
export type VegasLimitPolicy = Required<NonNullable<VegasLimitOptions["policy"]>>;
|
|
58
|
+
export declare class VegasLimit implements AdaptiveLimit {
|
|
59
|
+
private _limit;
|
|
60
|
+
private readonly limitListeners;
|
|
61
|
+
/** Estimated concurrency limit based on our algorithm */
|
|
62
|
+
private estimatedLimit;
|
|
63
|
+
private rttNoload;
|
|
64
|
+
/** Maximum allowed limit providing an upper bound failsafe */
|
|
65
|
+
private readonly maxLimit;
|
|
66
|
+
private readonly smoothing;
|
|
67
|
+
private readonly policy;
|
|
68
|
+
private readonly rttSampleListener;
|
|
69
|
+
private readonly probeMultiplier;
|
|
70
|
+
private probeCount;
|
|
71
|
+
private probeJitter;
|
|
72
|
+
constructor(options?: VegasLimitOptions);
|
|
73
|
+
private resetProbeJitter;
|
|
74
|
+
private shouldProbe;
|
|
75
|
+
addSample(startTime: number, rtt: number, inflight: number, didDrop: boolean): void;
|
|
76
|
+
get currentLimit(): number;
|
|
77
|
+
private applyNewLimit;
|
|
78
|
+
subscribe(consumer: (newLimit: number) => void, options?: {
|
|
79
|
+
signal?: AbortSignal;
|
|
80
|
+
}): () => void;
|
|
81
|
+
private computeNextLimit;
|
|
82
|
+
private updateEstimatedLimit;
|
|
83
|
+
toString(): string;
|
|
84
|
+
}
|
|
85
|
+
//# sourceMappingURL=VegasLimit.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"VegasLimit.d.ts","sourceRoot":"","sources":["../../src/limit/VegasLimit.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAsB,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAE/E,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AAezD;;;;;;;;;;;;;GAaG;AACH,MAAM,WAAW,iBAAiB;IAChC,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,SAAS,CAAC,EAAE,MAAM,CAAC;IAEnB;;OAEG;IACH,MAAM,CAAC,EAAE;QACP;;;;WAIG;QACH,KAAK,CAAC,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC;QAE9B;;;;WAIG;QACH,IAAI,CAAC,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC;QAE7B;;WAEG;QACH,SAAS,CAAC,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC;QAElC;;WAEG;QACH,QAAQ,CAAC,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC;QAEjC;;WAEG;QACH,QAAQ,CAAC,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC;KAClC,CAAC;IAEF;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IAEzB,cAAc,CAAC,EAAE,cAAc,CAAC;CACjC;AAED,MAAM,MAAM,gBAAgB,GAAG,QAAQ,CAAC,WAAW,CAAC,iBAAiB,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC;AAElF,qBAAa,UAAW,YAAW,aAAa;IAC9C,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAqB;IAEpD,yDAAyD;IACzD,OAAO,CAAC,cAAc,CAAS;IAE/B,OAAO,CAAC,SAAS,CAAK;IAEtB,8DAA8D;IAC9D,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAElC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAS;IACnC,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAmB;IAC1C,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAqB;IACvD,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAS;IACzC,OAAO,CAAC,UAAU,CAAK;IACvB,OAAO,CAAC,WAAW,CAAS;gBAEhB,OAAO,GAAE,iBAAsB;IAuB3C,OAAO,CAAC,gBAAgB;IAKxB,OAAO,CAAC,WAAW;IAInB,SAAS,CACP,SAAS,EAAE,MAAM,EACjB,GAAG,EAAE,MAAM,EACX,QAAQ,EAAE,MAAM,EAChB,OAAO,EAAE,OAAO,GACf,IAAI;IAIP,IAAI,YAAY,IAAI,MAAM,CAEzB;IAED,OAAO,CAAC,aAAa;IAOrB,SAAS,CACP,QAAQ,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,EACpC,OAAO,GAAE;QAAE,MAAM,CAAC,EAAE,WAAW,CAAA;KAAO,GACrC,MAAM,IAAI;IAIb,OAAO,CAAC,gBAAgB;IA6BxB,OAAO,CAAC,oBAAoB;IA0C5B,QAAQ,IAAI,MAAM;CAGnB"}
|