@stimulcross/rate-limiter 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +21 -0
- package/.github/workflows/node.yml +87 -0
- package/.husky/commit-msg +1 -0
- package/.husky/pre-commit +1 -0
- package/.megaignore +8 -0
- package/.prettierignore +3 -0
- package/LICENSE +21 -0
- package/README.md +7 -0
- package/commitlint.config.js +8 -0
- package/eslint.config.js +65 -0
- package/lint-staged.config.js +4 -0
- package/package.json +89 -0
- package/prettier.config.cjs +1 -0
- package/src/core/cancellable.ts +4 -0
- package/src/core/clock.ts +9 -0
- package/src/core/decision.ts +27 -0
- package/src/core/rate-limit-policy.ts +15 -0
- package/src/core/rate-limiter-status.ts +14 -0
- package/src/core/rate-limiter.ts +37 -0
- package/src/core/state-storage.ts +51 -0
- package/src/enums/rate-limit-error-code.ts +29 -0
- package/src/errors/custom.error.ts +14 -0
- package/src/errors/invalid-cost.error.ts +33 -0
- package/src/errors/rate-limit.error.ts +91 -0
- package/src/errors/rate-limiter-destroyed.error.ts +8 -0
- package/src/index.ts +11 -0
- package/src/interfaces/rate-limiter-options.ts +84 -0
- package/src/interfaces/rate-limiter-queue-options.ts +45 -0
- package/src/interfaces/rate-limiter-run-options.ts +58 -0
- package/src/limiters/abstract-rate-limiter.ts +206 -0
- package/src/limiters/composite.policy.ts +102 -0
- package/src/limiters/fixed-window/fixed-window.limiter.ts +121 -0
- package/src/limiters/fixed-window/fixed-window.options.ts +29 -0
- package/src/limiters/fixed-window/fixed-window.policy.ts +159 -0
- package/src/limiters/fixed-window/fixed-window.state.ts +10 -0
- package/src/limiters/fixed-window/fixed-window.status.ts +46 -0
- package/src/limiters/fixed-window/index.ts +4 -0
- package/src/limiters/generic-cell/generic-cell.limiter.ts +108 -0
- package/src/limiters/generic-cell/generic-cell.options.ts +23 -0
- package/src/limiters/generic-cell/generic-cell.policy.ts +115 -0
- package/src/limiters/generic-cell/generic-cell.state.ts +8 -0
- package/src/limiters/generic-cell/generic-cell.status.ts +54 -0
- package/src/limiters/generic-cell/index.ts +4 -0
- package/src/limiters/http-response-based/http-limit-info.extractor.ts +20 -0
- package/src/limiters/http-response-based/http-limit.info.ts +41 -0
- package/src/limiters/http-response-based/http-response-based-limiter.options.ts +18 -0
- package/src/limiters/http-response-based/http-response-based-limiter.state.ts +13 -0
- package/src/limiters/http-response-based/http-response-based-limiter.status.ts +74 -0
- package/src/limiters/http-response-based/http-response-based.limiter.ts +512 -0
- package/src/limiters/http-response-based/index.ts +6 -0
- package/src/limiters/leaky-bucket/index.ts +4 -0
- package/src/limiters/leaky-bucket/leaky-bucket.limiter.ts +105 -0
- package/src/limiters/leaky-bucket/leaky-bucket.options.ts +23 -0
- package/src/limiters/leaky-bucket/leaky-bucket.policy.ts +134 -0
- package/src/limiters/leaky-bucket/leaky-bucket.state.ts +9 -0
- package/src/limiters/leaky-bucket/leaky-bucket.status.ts +36 -0
- package/src/limiters/sliding-window-counter/index.ts +7 -0
- package/src/limiters/sliding-window-counter/sliding-window-counter.limiter.ts +76 -0
- package/src/limiters/sliding-window-counter/sliding-window-counter.options.ts +20 -0
- package/src/limiters/sliding-window-counter/sliding-window-counter.policy.ts +167 -0
- package/src/limiters/sliding-window-counter/sliding-window-counter.state.ts +10 -0
- package/src/limiters/sliding-window-counter/sliding-window-counter.status.ts +53 -0
- package/src/limiters/sliding-window-log/index.ts +4 -0
- package/src/limiters/sliding-window-log/sliding-window-log.limiter.ts +65 -0
- package/src/limiters/sliding-window-log/sliding-window-log.options.ts +20 -0
- package/src/limiters/sliding-window-log/sliding-window-log.policy.ts +166 -0
- package/src/limiters/sliding-window-log/sliding-window-log.state.ts +19 -0
- package/src/limiters/sliding-window-log/sliding-window-log.status.ts +44 -0
- package/src/limiters/token-bucket/index.ts +4 -0
- package/src/limiters/token-bucket/token-bucket.limiter.ts +110 -0
- package/src/limiters/token-bucket/token-bucket.options.ts +17 -0
- package/src/limiters/token-bucket/token-bucket.policy.ts +155 -0
- package/src/limiters/token-bucket/token-bucket.state.ts +10 -0
- package/src/limiters/token-bucket/token-bucket.status.ts +36 -0
- package/src/runtime/default-clock.ts +8 -0
- package/src/runtime/execution-tickets.ts +34 -0
- package/src/runtime/in-memory-state-store.ts +135 -0
- package/src/runtime/rate-limiter.executor.ts +286 -0
- package/src/runtime/semaphore.ts +31 -0
- package/src/runtime/task.ts +141 -0
- package/src/types/limit-behavior.ts +8 -0
- package/src/utils/generate-random-string.ts +16 -0
- package/src/utils/promise-with-resolvers.ts +23 -0
- package/src/utils/sanitize-error.ts +4 -0
- package/src/utils/sanitize-priority.ts +22 -0
- package/src/utils/validate-cost.ts +16 -0
- package/tests/integration/limiters/fixed-window.limiter.spec.ts +371 -0
- package/tests/integration/limiters/generic-cell.limiter.spec.ts +361 -0
- package/tests/integration/limiters/http-response-based.limiter.spec.ts +833 -0
- package/tests/integration/limiters/leaky-bucket.spec.ts +357 -0
- package/tests/integration/limiters/sliding-window-counter.limiter.spec.ts +175 -0
- package/tests/integration/limiters/sliding-window-log.spec.ts +185 -0
- package/tests/integration/limiters/token-bucket.limiter.spec.ts +363 -0
- package/tests/tsconfig.json +4 -0
- package/tests/unit/policies/composite.policy.spec.ts +244 -0
- package/tests/unit/policies/fixed-window.policy.spec.ts +260 -0
- package/tests/unit/policies/generic-cell.policy.spec.ts +178 -0
- package/tests/unit/policies/leaky-bucket.policy.spec.ts +215 -0
- package/tests/unit/policies/sliding-window-counter.policy.spec.ts +209 -0
- package/tests/unit/policies/sliding-window-log.policy.spec.ts +285 -0
- package/tests/unit/policies/token-bucket.policy.spec.ts +371 -0
- package/tests/unit/runtime/execution-tickets.spec.ts +121 -0
- package/tests/unit/runtime/in-memory-state-store.spec.ts +238 -0
- package/tests/unit/runtime/rate-limiter.executor.spec.ts +353 -0
- package/tests/unit/runtime/semaphore.spec.ts +98 -0
- package/tests/unit/runtime/task.spec.ts +182 -0
- package/tests/unit/utils/generate-random-string.spec.ts +51 -0
- package/tests/unit/utils/promise-with-resolvers.spec.ts +57 -0
- package/tests/unit/utils/sanitize-priority.spec.ts +46 -0
- package/tests/unit/utils/validate-cost.spec.ts +48 -0
- package/tsconfig.json +14 -0
- package/vitest.config.js +22 -0
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* The status of the HTTP Response Based rate limiter.
|
|
3
|
+
*
|
|
4
|
+
* This interface represents rate limit information extracted from HTTP response headers,
|
|
5
|
+
* such as RateLimit headers (RFC 6585) or custom headers provided by APIs (X-RateLimit-*).
|
|
6
|
+
*
|
|
7
|
+
* @remarks
|
|
8
|
+
* The status is updated after each API response and reflects the server-side rate limit state.
|
|
9
|
+
* Can be useful to check how many requests remain before hitting the rate limit.
|
|
10
|
+
*/
|
|
11
|
+
export interface HttpResponseBasedLimiterStatus {
|
|
12
|
+
/**
|
|
13
|
+
* Indicates whether the rate limiter is currently probing for the server's limit state.
|
|
14
|
+
*
|
|
15
|
+
* This is `true` when the rate limiter is waiting for the first response from the server
|
|
16
|
+
* to extract rate limit headers.
|
|
17
|
+
*/
|
|
18
|
+
isProbing: boolean;
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Indicates whether the rate limiter is in unlimited mode.
|
|
22
|
+
*
|
|
23
|
+
* This is `true` if a server did not send any rate limit headers.
|
|
24
|
+
*
|
|
25
|
+
* If this is `true`, the `lastKnownLimit`, `lastKnownRemaining`, and `lastKnownResetAt` values are `null`.
|
|
26
|
+
*/
|
|
27
|
+
isUnlimited: boolean;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* The number of requests remaining in the current rate limit window.
|
|
31
|
+
*
|
|
32
|
+
* @remarks
|
|
33
|
+
* This value is extracted from response headers such as `X-RateLimit-Remaining`
|
|
34
|
+
* or `RateLimit-Remaining`. It decrements with each request and resets when
|
|
35
|
+
* the time window expires.
|
|
36
|
+
*
|
|
37
|
+
* When this reaches 0, later requests may be delayed or rejected until the reset time.
|
|
38
|
+
*/
|
|
39
|
+
lastKnownRemaining: number | null;
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* The maximum number of requests allowed in the current rate limit window.
|
|
43
|
+
*
|
|
44
|
+
* @remarks
|
|
45
|
+
* This value is extracted from response headers such as `X-RateLimit-Limit`
|
|
46
|
+
* or `RateLimit-Limit`. It represents the total quota allocated by the API
|
|
47
|
+
* and typically remains constant across requests within the same window.
|
|
48
|
+
*/
|
|
49
|
+
lastKnownLimit: number | null;
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* The timestamp (in milliseconds since Unix epoch) when the rate limit window resets.
|
|
53
|
+
*
|
|
54
|
+
* `null` if the reset time is not known or not applicable.
|
|
55
|
+
*
|
|
56
|
+
* @remarks
|
|
57
|
+
* This value is extracted from response headers such as `X-RateLimit-Reset`
|
|
58
|
+
* or `RateLimit-Reset`. The exact semantics depend on the API specification:
|
|
59
|
+
*
|
|
60
|
+
* - **Full window reset**: When the entire rate limit quota is restored
|
|
61
|
+
* - **Next request availability**: When the next single request slot becomes available
|
|
62
|
+
* - **Sliding window**: When the oldest request in the window expires
|
|
63
|
+
*
|
|
64
|
+
* Always refer to your API's documentation to understand the reset behavior.
|
|
65
|
+
*/
|
|
66
|
+
lastKnownResetAt: number | null;
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* The timestamp (in milliseconds since Unix epoch) when the status was last synced with the server.
|
|
70
|
+
*
|
|
71
|
+
* `null` if the status has never been synced.
|
|
72
|
+
*/
|
|
73
|
+
lastSyncedAt: number | null;
|
|
74
|
+
}
|
|
@@ -0,0 +1,512 @@
|
|
|
1
|
+
import { Priority } from '@stimulcross/ds-policy-priority-queue';
|
|
2
|
+
import { createLogger, type Logger, LogLevel } from '@stimulcross/logger';
|
|
3
|
+
import { type HttpLimitInfoExtractor } from './http-limit-info.extractor.js';
|
|
4
|
+
import { type HttpLimitInfo } from './http-limit.info.js';
|
|
5
|
+
import { type HttpResponseBasedLimiterOptions } from './http-response-based-limiter.options.js';
|
|
6
|
+
import { type HttpResponseBasedLimiterState } from './http-response-based-limiter.state.js';
|
|
7
|
+
import { type HttpResponseBasedLimiterStatus } from './http-response-based-limiter.status.js';
|
|
8
|
+
import { type Clock } from '../../core/clock.js';
|
|
9
|
+
import { type RateLimiter } from '../../core/rate-limiter.js';
|
|
10
|
+
import { type StateStorage } from '../../core/state-storage.js';
|
|
11
|
+
import { RateLimitErrorCode } from '../../enums/rate-limit-error-code.js';
|
|
12
|
+
import { RateLimitError } from '../../errors/rate-limit.error.js';
|
|
13
|
+
import { RateLimiterDestroyedError } from '../../errors/rate-limiter-destroyed.error.js';
|
|
14
|
+
import { type IdGenerator, type KeyResolver } from '../../interfaces/rate-limiter-options.js';
|
|
15
|
+
import { type RateLimiterRunOptions } from '../../interfaces/rate-limiter-run-options.js';
|
|
16
|
+
import { defaultClock } from '../../runtime/default-clock.js';
|
|
17
|
+
import { InMemoryStateStore } from '../../runtime/in-memory-state-store.js';
|
|
18
|
+
import { RateLimiterExecutor } from '../../runtime/rate-limiter.executor.js';
|
|
19
|
+
import { type LimitBehavior } from '../../types/limit-behavior.js';
|
|
20
|
+
import { generateRandomString } from '../../utils/generate-random-string.js';
|
|
21
|
+
import { sanitizeError } from '../../utils/sanitize-error.js';
|
|
22
|
+
|
|
23
|
+
const TOO_MANY_REQUESTS_ERROR_CODE = 429;
|
|
24
|
+
|
|
25
|
+
const enum TokenReservationAction {
|
|
26
|
+
Probe = 1,
|
|
27
|
+
Follow = 2,
|
|
28
|
+
Wait = 3,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
interface RequestContext {
|
|
32
|
+
readonly id: string;
|
|
33
|
+
readonly key: string;
|
|
34
|
+
readonly signal?: AbortSignal;
|
|
35
|
+
startedAt: number;
|
|
36
|
+
isProbing: boolean;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* The options for single request execution.
|
|
41
|
+
*/
|
|
42
|
+
export type HttpHeadersLimiterRunOptions = Omit<RateLimiterRunOptions, 'cost'>;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* HTTP Response Based rate limiter.
|
|
46
|
+
*
|
|
47
|
+
* Designed for outbound requests to safely respect dynamic third-party API limits.
|
|
48
|
+
*
|
|
49
|
+
* This limiter synchronizes its internal state by extracting rate limit headers directly from the HTTP responses.
|
|
50
|
+
*
|
|
51
|
+
* Key features:
|
|
52
|
+
* - **Dynamic synchronization** - updates local capacity and reset schedules based on actual server responses
|
|
53
|
+
* - **Probing** - prevents 429 floods by pausing queued requests while a single "probe" fetches the latest limits
|
|
54
|
+
* - **Queueing & overflow** - optionally enqueues excess requests up to a maximum allowed overflow capacity
|
|
55
|
+
* - **Concurrency** - limits how many requests can be executed simultaneously
|
|
56
|
+
* - **Priority** - supports task priorities (with fairness and custom policy) to execute critical requests first
|
|
57
|
+
* - **Cancellation** - supports `AbortSignal` to safely remove pending requests from the queue
|
|
58
|
+
* - **Expiration** - automatically drops queued requests that wait longer than the allowed `maxWaitMs`
|
|
59
|
+
*/
|
|
60
|
+
export class HttpResponseBasedLimiter<TResponse> implements RateLimiter<HttpResponseBasedLimiterStatus> {
|
|
61
|
+
private readonly _logger: Logger;
|
|
62
|
+
private readonly _clock: Clock;
|
|
63
|
+
private readonly _store: StateStorage<HttpResponseBasedLimiterState>;
|
|
64
|
+
private readonly _executor: RateLimiterExecutor;
|
|
65
|
+
|
|
66
|
+
private readonly _pendingSyncs = new Map<string, { promise: Promise<void>; resolve: () => void }>();
|
|
67
|
+
private readonly _getStoreKey: KeyResolver;
|
|
68
|
+
private readonly _generateId: IdGenerator;
|
|
69
|
+
private readonly _extractLimitInfo: HttpLimitInfoExtractor<TResponse>;
|
|
70
|
+
private readonly _defaultLimitBehavior: LimitBehavior;
|
|
71
|
+
private readonly _maxWaitMs: number | undefined;
|
|
72
|
+
private readonly _fallbackResetDelayMs: number;
|
|
73
|
+
|
|
74
|
+
private _isDestroyed = false;
|
|
75
|
+
|
|
76
|
+
constructor(options: HttpResponseBasedLimiterOptions<TResponse>) {
|
|
77
|
+
this._logger = createLogger(new.target.name, { minLevel: 'WARNING', ...options.loggerOptions });
|
|
78
|
+
this._clock = options.clock ?? defaultClock;
|
|
79
|
+
this._store = options.store ?? new InMemoryStateStore<HttpResponseBasedLimiterState>(this._clock);
|
|
80
|
+
this._executor = new RateLimiterExecutor(this._logger, this._clock, options.queue);
|
|
81
|
+
|
|
82
|
+
this._getStoreKey =
|
|
83
|
+
typeof options.key === 'function'
|
|
84
|
+
? options.key
|
|
85
|
+
: (key?: string): string => (key ? `limiter:${key}` : 'limiter');
|
|
86
|
+
this._generateId = options.idGenerator ?? generateRandomString;
|
|
87
|
+
|
|
88
|
+
this._extractLimitInfo = options.limitInfoExtractor;
|
|
89
|
+
this._defaultLimitBehavior = options.limitBehavior ?? 'reject';
|
|
90
|
+
this._maxWaitMs = options.queue?.maxWaitMs;
|
|
91
|
+
this._fallbackResetDelayMs = options.fallbackResetDelayMs ?? 60_000;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
public async getStatus(key?: string): Promise<HttpResponseBasedLimiterStatus> {
|
|
95
|
+
const storeKey = this._getStoreKey(key);
|
|
96
|
+
const state = await this._store.get(storeKey);
|
|
97
|
+
|
|
98
|
+
if (state?.isUnlimited) {
|
|
99
|
+
return {
|
|
100
|
+
isProbing: false,
|
|
101
|
+
isUnlimited: true,
|
|
102
|
+
lastKnownLimit: null,
|
|
103
|
+
lastKnownRemaining: null,
|
|
104
|
+
lastKnownResetAt: null,
|
|
105
|
+
lastSyncedAt: state.lastSyncedAt,
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
isProbing: state?.isProbing ?? false,
|
|
111
|
+
isUnlimited: false,
|
|
112
|
+
lastKnownLimit: state?.lastKnownLimit ?? null,
|
|
113
|
+
lastKnownRemaining: state?.lastKnownRemaining ?? null,
|
|
114
|
+
lastKnownResetAt: state?.lastKnownResetAt ?? null,
|
|
115
|
+
lastSyncedAt: state?.lastSyncedAt ?? null,
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
public async run<T = TResponse>(fn: () => T | Promise<T>, options: HttpHeadersLimiterRunOptions = {}): Promise<T> {
|
|
120
|
+
const ctx: RequestContext = {
|
|
121
|
+
id: options.id ?? this._generateId(),
|
|
122
|
+
key: this._getStoreKey(options.key),
|
|
123
|
+
signal: options.signal,
|
|
124
|
+
startedAt: 0,
|
|
125
|
+
isProbing: false,
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
this._ensureCanExecute(ctx);
|
|
129
|
+
|
|
130
|
+
const { priority, limitBehavior, maxWaitMs } = options;
|
|
131
|
+
const finalLimitBehavior = limitBehavior ?? this._defaultLimitBehavior;
|
|
132
|
+
const finalMaxWaitMs = maxWaitMs ?? this._maxWaitMs;
|
|
133
|
+
const expiresAt = finalMaxWaitMs ? this._clock.now() + finalMaxWaitMs : undefined;
|
|
134
|
+
|
|
135
|
+
let currentRunAt = this._clock.now();
|
|
136
|
+
|
|
137
|
+
while (true) {
|
|
138
|
+
this._ensureCanExecute(ctx, priority);
|
|
139
|
+
|
|
140
|
+
if (ctx.signal?.aborted) {
|
|
141
|
+
throw new RateLimitError(RateLimitErrorCode.Cancelled);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
try {
|
|
145
|
+
return await this._executor.execute<T>(() => this._executeSingleRequest<T>(fn, ctx), currentRunAt, {
|
|
146
|
+
id: ctx.id,
|
|
147
|
+
key: ctx.key,
|
|
148
|
+
expiresAt,
|
|
149
|
+
priority,
|
|
150
|
+
signal: ctx.signal,
|
|
151
|
+
});
|
|
152
|
+
} catch (e) {
|
|
153
|
+
if (e instanceof RateLimitError && e.code === RateLimitErrorCode.LimitExceeded) {
|
|
154
|
+
if (finalLimitBehavior === 'reject') {
|
|
155
|
+
throw e;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
currentRunAt = e.retryAt ?? this._clock.now() + this._fallbackResetDelayMs;
|
|
159
|
+
|
|
160
|
+
this._shouldLogDebug &&
|
|
161
|
+
this._logger.debug(`[REQUEUE] [id: ${ctx.id}, key: ${ctx.key}] - Requeued to ${currentRunAt}`);
|
|
162
|
+
|
|
163
|
+
continue;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
throw e;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
public async clear(key?: string): Promise<void> {
|
|
172
|
+
this._executor.clear();
|
|
173
|
+
const storeKey = this._getStoreKey(key);
|
|
174
|
+
|
|
175
|
+
await this._store.acquireLock?.(storeKey);
|
|
176
|
+
|
|
177
|
+
try {
|
|
178
|
+
await this._store.delete(storeKey);
|
|
179
|
+
} finally {
|
|
180
|
+
await this._store.releaseLock?.(storeKey);
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
public async destroy(): Promise<void> {
|
|
185
|
+
if (this._isDestroyed) {
|
|
186
|
+
return;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
this._isDestroyed = true;
|
|
190
|
+
this._executor.clear();
|
|
191
|
+
|
|
192
|
+
for (const pending of this._pendingSyncs.values()) {
|
|
193
|
+
pending.resolve();
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
this._pendingSyncs.clear();
|
|
197
|
+
|
|
198
|
+
await this._store.destroy?.();
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
private get _shouldLogDebug(): boolean {
|
|
202
|
+
return this._logger.minLevel >= LogLevel.DEBUG;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
private async _executeSingleRequest<T>(fn: () => T | Promise<T>, ctx: RequestContext): Promise<T> {
|
|
206
|
+
while (true) {
|
|
207
|
+
await this._waitForSync(ctx);
|
|
208
|
+
|
|
209
|
+
ctx.startedAt = this._clock.now();
|
|
210
|
+
const action = await this._reserveLocalToken(ctx);
|
|
211
|
+
|
|
212
|
+
if (action === TokenReservationAction.Wait) {
|
|
213
|
+
continue;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
ctx.isProbing = action === TokenReservationAction.Probe;
|
|
217
|
+
break;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
try {
|
|
221
|
+
let response: TResponse | null = null;
|
|
222
|
+
let responseError: Error | null = null;
|
|
223
|
+
|
|
224
|
+
try {
|
|
225
|
+
response = (await fn()) as TResponse;
|
|
226
|
+
} catch (e) {
|
|
227
|
+
responseError = sanitizeError(e);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
const extractFinishedAt = this._clock.now();
|
|
231
|
+
const limitInfo = this._extractLimitInfo(response, responseError, extractFinishedAt);
|
|
232
|
+
|
|
233
|
+
await this._processLimitHeaders(ctx, limitInfo, responseError, extractFinishedAt);
|
|
234
|
+
|
|
235
|
+
if (responseError) {
|
|
236
|
+
throw responseError;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
return response as T;
|
|
240
|
+
} finally {
|
|
241
|
+
if (ctx.isProbing) {
|
|
242
|
+
this._resolvePendingSync(ctx);
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
private async _waitForSync(ctx: RequestContext): Promise<void> {
|
|
248
|
+
while (this._pendingSyncs.has(ctx.key)) {
|
|
249
|
+
if (ctx.signal?.aborted) {
|
|
250
|
+
throw new RateLimitError(RateLimitErrorCode.Cancelled);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
const pending = this._pendingSyncs.get(ctx.key);
|
|
254
|
+
|
|
255
|
+
if (!pending) {
|
|
256
|
+
break;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
this._shouldLogDebug &&
|
|
260
|
+
this._logger.debug(`[WAIT] [id: ${ctx.id}, key: ${ctx.key}] - Waiting for probe to sync state`);
|
|
261
|
+
|
|
262
|
+
await new Promise<void>((resolve, reject) => {
|
|
263
|
+
const onAbort = (): void => reject(new RateLimitError(RateLimitErrorCode.Cancelled));
|
|
264
|
+
|
|
265
|
+
if (ctx.signal) {
|
|
266
|
+
ctx.signal.addEventListener('abort', onAbort, { once: true });
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
void pending.promise.then(() => {
|
|
270
|
+
if (ctx.signal) {
|
|
271
|
+
ctx.signal.removeEventListener('abort', onAbort);
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
resolve();
|
|
275
|
+
});
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
private async _reserveLocalToken(ctx: RequestContext): Promise<TokenReservationAction> {
|
|
281
|
+
await this._store.acquireLock?.(ctx.key);
|
|
282
|
+
|
|
283
|
+
try {
|
|
284
|
+
const state = await this._store.get(ctx.key);
|
|
285
|
+
|
|
286
|
+
if (state?.isUnlimited) {
|
|
287
|
+
return TokenReservationAction.Follow;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
const lastKnownRemaining = state?.lastKnownRemaining ?? null;
|
|
291
|
+
const lastKnownResetAt = state?.lastKnownResetAt ?? Infinity;
|
|
292
|
+
const lastKnownLimit = state?.lastKnownLimit ?? 1;
|
|
293
|
+
|
|
294
|
+
const hasLocalProbe = this._pendingSyncs.has(ctx.key);
|
|
295
|
+
|
|
296
|
+
if (!state || ctx.startedAt >= lastKnownResetAt) {
|
|
297
|
+
this._setupLocalProbeLock(ctx.key);
|
|
298
|
+
|
|
299
|
+
const probeState: HttpResponseBasedLimiterState = {
|
|
300
|
+
isProbing: true,
|
|
301
|
+
isUnlimited: false,
|
|
302
|
+
lastKnownLimit,
|
|
303
|
+
lastKnownRemaining: 0,
|
|
304
|
+
lastKnownResetAt: ctx.startedAt + this._fallbackResetDelayMs,
|
|
305
|
+
lastSyncedAt: ctx.startedAt,
|
|
306
|
+
};
|
|
307
|
+
|
|
308
|
+
await this._store.set(ctx.key, probeState, this._fallbackResetDelayMs);
|
|
309
|
+
|
|
310
|
+
this._shouldLogDebug &&
|
|
311
|
+
this._logger.debug(
|
|
312
|
+
`[PROBE] [id: ${ctx.id}, key: ${ctx.key}] - Probing API for limits - ${this._getDebugStateString(probeState)}`,
|
|
313
|
+
);
|
|
314
|
+
|
|
315
|
+
return TokenReservationAction.Probe;
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
if (state.isProbing) {
|
|
319
|
+
if (hasLocalProbe) {
|
|
320
|
+
return TokenReservationAction.Wait;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
throw new RateLimitError(RateLimitErrorCode.LimitExceeded, lastKnownResetAt);
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
if (lastKnownRemaining !== null && lastKnownRemaining <= 0) {
|
|
327
|
+
throw new RateLimitError(RateLimitErrorCode.LimitExceeded, lastKnownResetAt);
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
const newState: HttpResponseBasedLimiterState = {
|
|
331
|
+
isProbing: false,
|
|
332
|
+
isUnlimited: state.isUnlimited,
|
|
333
|
+
lastKnownLimit,
|
|
334
|
+
lastKnownRemaining: (lastKnownRemaining ?? 1) - 1,
|
|
335
|
+
lastKnownResetAt,
|
|
336
|
+
lastSyncedAt: state.lastSyncedAt,
|
|
337
|
+
};
|
|
338
|
+
|
|
339
|
+
const ttl = Math.max(1000, lastKnownResetAt - ctx.startedAt);
|
|
340
|
+
await this._store.set(ctx.key, newState, ttl);
|
|
341
|
+
|
|
342
|
+
this._shouldLogDebug &&
|
|
343
|
+
this._logger.debug(
|
|
344
|
+
`[RSRV] [id: ${ctx.id}, key: ${ctx.key}] - Local state - ${this._getDebugStateString(newState)}`,
|
|
345
|
+
);
|
|
346
|
+
|
|
347
|
+
return TokenReservationAction.Follow;
|
|
348
|
+
} finally {
|
|
349
|
+
await this._store.releaseLock?.(ctx.key);
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
private _setupLocalProbeLock(key: string): void {
|
|
354
|
+
if (!this._pendingSyncs.has(key)) {
|
|
355
|
+
let resolveSync!: () => void;
|
|
356
|
+
|
|
357
|
+
const promise = new Promise<void>(resolve => {
|
|
358
|
+
resolveSync = resolve;
|
|
359
|
+
});
|
|
360
|
+
|
|
361
|
+
this._pendingSyncs.set(key, { promise, resolve: resolveSync });
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
private _resolvePendingSync(ctx: RequestContext): void {
|
|
366
|
+
const pending = this._pendingSyncs.get(ctx.key);
|
|
367
|
+
|
|
368
|
+
if (pending) {
|
|
369
|
+
this._shouldLogDebug && this._logger.debug(`[UNLOCK] [id: ${ctx.id}, key: ${ctx.key}] - Probing completed`);
|
|
370
|
+
|
|
371
|
+
pending.resolve();
|
|
372
|
+
this._pendingSyncs.delete(ctx.key);
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
private async _processLimitHeaders(
|
|
377
|
+
ctx: RequestContext,
|
|
378
|
+
limitInfo: HttpLimitInfo | null,
|
|
379
|
+
responseError: Error | null,
|
|
380
|
+
extractFinishedAt: number,
|
|
381
|
+
): Promise<void> {
|
|
382
|
+
if (limitInfo) {
|
|
383
|
+
await this._syncStateWithServer(ctx, limitInfo, extractFinishedAt);
|
|
384
|
+
|
|
385
|
+
if (limitInfo.statusCode === TOO_MANY_REQUESTS_ERROR_CODE) {
|
|
386
|
+
const resetAt = limitInfo.resetAt ?? 0;
|
|
387
|
+
const retryAt = extractFinishedAt >= resetAt ? extractFinishedAt + this._fallbackResetDelayMs : resetAt;
|
|
388
|
+
|
|
389
|
+
throw new RateLimitError(RateLimitErrorCode.LimitExceeded, retryAt);
|
|
390
|
+
}
|
|
391
|
+
} else if (responseError) {
|
|
392
|
+
if (ctx.isProbing) {
|
|
393
|
+
await this._rollbackProbingState(ctx);
|
|
394
|
+
}
|
|
395
|
+
} else {
|
|
396
|
+
await this._setUnlimited(ctx, extractFinishedAt);
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
private async _rollbackProbingState(ctx: RequestContext): Promise<void> {
|
|
401
|
+
await this._store.acquireLock?.(ctx.key);
|
|
402
|
+
|
|
403
|
+
try {
|
|
404
|
+
const state = await this._store.get(ctx.key);
|
|
405
|
+
|
|
406
|
+
if (state?.isProbing) {
|
|
407
|
+
await this._store.delete(ctx.key);
|
|
408
|
+
|
|
409
|
+
this._shouldLogDebug &&
|
|
410
|
+
this._logger.debug(
|
|
411
|
+
`[ROLLBACK] [id: ${ctx.id}, key: ${ctx.key}] - Probing failed, state cleared for next follower`,
|
|
412
|
+
);
|
|
413
|
+
}
|
|
414
|
+
} finally {
|
|
415
|
+
await this._store.releaseLock?.(ctx.key);
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
private async _syncStateWithServer(ctx: RequestContext, info: Partial<HttpLimitInfo>, now: number): Promise<void> {
|
|
420
|
+
await this._store.acquireLock?.(ctx.key);
|
|
421
|
+
|
|
422
|
+
try {
|
|
423
|
+
const currentState = await this._store.get(ctx.key);
|
|
424
|
+
|
|
425
|
+
if (currentState && (currentState.lastSyncedAt ?? 0) > ctx.startedAt) {
|
|
426
|
+
this._shouldLogDebug &&
|
|
427
|
+
this._logger.trace(
|
|
428
|
+
`[SYNC SKIP] [id: ${ctx.id}, key: ${ctx.key}] - Newer request already updated state - ${this._getDebugStateString(currentState)}`,
|
|
429
|
+
);
|
|
430
|
+
return;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
const actualResetAt =
|
|
434
|
+
info.resetAt ??
|
|
435
|
+
(currentState?.isProbing ? undefined : currentState?.lastKnownResetAt) ??
|
|
436
|
+
now + this._fallbackResetDelayMs;
|
|
437
|
+
const isExhausted = info.statusCode === TOO_MANY_REQUESTS_ERROR_CODE || info.remaining === 0;
|
|
438
|
+
|
|
439
|
+
const newState: HttpResponseBasedLimiterState = {
|
|
440
|
+
isProbing: false,
|
|
441
|
+
isUnlimited: false,
|
|
442
|
+
lastKnownLimit: info.limit ?? currentState?.lastKnownLimit ?? 1,
|
|
443
|
+
lastKnownRemaining: isExhausted ? 0 : (info.remaining ?? currentState?.lastKnownRemaining ?? 1),
|
|
444
|
+
lastKnownResetAt: actualResetAt,
|
|
445
|
+
lastSyncedAt: ctx.startedAt,
|
|
446
|
+
};
|
|
447
|
+
|
|
448
|
+
const ttl = Math.max(1000, actualResetAt - now + 60_000);
|
|
449
|
+
await this._store.set(ctx.key, newState, ttl);
|
|
450
|
+
|
|
451
|
+
this._shouldLogDebug &&
|
|
452
|
+
this._logger.debug(`[SYNC] [id: ${ctx.id}, key: ${ctx.key}] - ${this._getDebugStateString(newState)}`);
|
|
453
|
+
} catch (e) {
|
|
454
|
+
this._logger.error(`[ERR] [id: ${ctx.id}, key: ${ctx.key}] - Failed to sync state with server limits}`, e);
|
|
455
|
+
} finally {
|
|
456
|
+
await this._store.releaseLock?.(ctx.key);
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
private async _setUnlimited(ctx: RequestContext, now: number): Promise<void> {
|
|
461
|
+
await this._store.acquireLock?.(ctx.key);
|
|
462
|
+
|
|
463
|
+
try {
|
|
464
|
+
const currentState = await this._store.get(ctx.key);
|
|
465
|
+
|
|
466
|
+
if (currentState && (currentState.lastSyncedAt ?? 0) > ctx.startedAt) {
|
|
467
|
+
this._shouldLogDebug &&
|
|
468
|
+
this._logger.debug(
|
|
469
|
+
`[SYNC SKIP] [id: ${ctx.id}, key: ${ctx.key}] - Newer request already updated state - ${this._getDebugStateString(currentState)}`,
|
|
470
|
+
);
|
|
471
|
+
return;
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
const ttl = this._fallbackResetDelayMs;
|
|
475
|
+
const unlimitedState: HttpResponseBasedLimiterState = {
|
|
476
|
+
isProbing: false,
|
|
477
|
+
isUnlimited: true,
|
|
478
|
+
lastKnownLimit: null,
|
|
479
|
+
lastKnownRemaining: null,
|
|
480
|
+
lastKnownResetAt: now + ttl,
|
|
481
|
+
lastSyncedAt: ctx.startedAt,
|
|
482
|
+
};
|
|
483
|
+
await this._store.set(ctx.key, unlimitedState, ttl);
|
|
484
|
+
|
|
485
|
+
this._shouldLogDebug &&
|
|
486
|
+
this._logger.debug(
|
|
487
|
+
`[UNLM] [id: ${ctx.id}, key: ${ctx.key}] - Set to unlimited - ${this._getDebugStateString(unlimitedState)}`,
|
|
488
|
+
);
|
|
489
|
+
} finally {
|
|
490
|
+
await this._store.releaseLock?.(ctx.key);
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
private _ensureCanExecute(ctx: RequestContext, priority?: Priority): void {
|
|
495
|
+
if (this._isDestroyed) {
|
|
496
|
+
throw new RateLimiterDestroyedError();
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
if (this._executor.isQueueFull) {
|
|
500
|
+
this._shouldLogDebug &&
|
|
501
|
+
this._logger.debug(
|
|
502
|
+
`[DROP OVERFLOW] [id: ${ctx.id}, key: ${ctx.key}] - prt: ${priority ?? Priority.Normal} | q: ${this._executor.queueSize}/${this._executor.queueCapacity}`,
|
|
503
|
+
);
|
|
504
|
+
|
|
505
|
+
throw new RateLimitError(RateLimitErrorCode.QueueOverflow);
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
private _getDebugStateString(state: HttpResponseBasedLimiterState): string {
|
|
510
|
+
return `probe: ${state.isProbing}, unl: ${state.isUnlimited}, lim: ${state.lastKnownLimit}, rem: ${state.lastKnownRemaining}, rst: ${state.lastKnownResetAt}, sync: ${state.lastSyncedAt}`;
|
|
511
|
+
}
|
|
512
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export type { HttpResponseBasedLimiterState } from './http-response-based-limiter.state.js';
|
|
2
|
+
export type { HttpResponseBasedLimiterStatus } from './http-response-based-limiter.status.js';
|
|
3
|
+
export type { HttpResponseBasedLimiterOptions } from './http-response-based-limiter.options.js';
|
|
4
|
+
export type { HttpLimitInfo } from './http-limit.info.js';
|
|
5
|
+
export type { HttpLimitInfoExtractor } from './http-limit-info.extractor.js';
|
|
6
|
+
export { type HttpHeadersLimiterRunOptions, HttpResponseBasedLimiter } from './http-response-based.limiter.js';
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import { LogLevel } from '@stimulcross/logger';
|
|
2
|
+
import { type LeakyBucketOptions } from './leaky-bucket.options.js';
|
|
3
|
+
import { LeakyBucketPolicy } from './leaky-bucket.policy.js';
|
|
4
|
+
import { type LeakyBucketState } from './leaky-bucket.state.js';
|
|
5
|
+
import { type LeakyBucketStatus } from './leaky-bucket.status.js';
|
|
6
|
+
import { type Decision } from '../../core/decision.js';
|
|
7
|
+
import { RateLimitErrorCode } from '../../enums/rate-limit-error-code.js';
|
|
8
|
+
import { RateLimitError } from '../../errors/rate-limit.error.js';
|
|
9
|
+
import { type LimitBehavior } from '../../types/limit-behavior.js';
|
|
10
|
+
import { AbstractRateLimiter, type ExecutionContext } from '../abstract-rate-limiter.js';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Leaky Bucket rate limiter.
|
|
14
|
+
*
|
|
15
|
+
* Designed primarily for client-side use to respect third-party limits or protect resources.
|
|
16
|
+
* While this can be used as a server-side limiter with custom distributed storage
|
|
17
|
+
* (e.g., Redis), it is best-effort and not recommended due to high network round-trip latency.
|
|
18
|
+
*
|
|
19
|
+
* Key features:
|
|
20
|
+
* - **Queueing & overflow** - optionally enqueues excess requests up to a maximum allowed overflow capacity
|
|
21
|
+
* - **Concurrency** - limits how many requests can be executed simultaneously
|
|
22
|
+
* - **Priority** - supports task priorities (with fairness and custom policy) to execute critical requests first
|
|
23
|
+
* - **Cancellation** - supports `AbortSignal` to safely remove pending requests from the queue
|
|
24
|
+
* - **Expiration** - automatically drops queued requests that wait longer than the allowed `maxWaitMs`
|
|
25
|
+
* - **Auto-rollback** - reverts spent quota if an enqueued task is canceled or expired
|
|
26
|
+
*/
|
|
27
|
+
export class LeakyBucketLimiter extends AbstractRateLimiter<LeakyBucketState, LeakyBucketStatus> {
|
|
28
|
+
private readonly _defaultLimitBehaviour: LimitBehavior;
|
|
29
|
+
private readonly _maxWaitMs: number | undefined;
|
|
30
|
+
|
|
31
|
+
protected override readonly _policy: LeakyBucketPolicy;
|
|
32
|
+
|
|
33
|
+
constructor(options: LeakyBucketOptions) {
|
|
34
|
+
super(options);
|
|
35
|
+
|
|
36
|
+
this._defaultLimitBehaviour = options.limitBehavior ?? 'reject';
|
|
37
|
+
|
|
38
|
+
if (options.queue?.maxWaitMs) {
|
|
39
|
+
this._maxWaitMs = options.queue.maxWaitMs;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
this._policy = new LeakyBucketPolicy(options.capacity, options.leakRate);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
protected override async _runInternal<T>(fn: () => T | Promise<T>, ctx: ExecutionContext): Promise<T> {
|
|
46
|
+
const now = this._clock.now();
|
|
47
|
+
const baseTtlMs = Math.ceil(this._policy.capacity / (this._policy.leakRate / 1000));
|
|
48
|
+
|
|
49
|
+
let runAt: number;
|
|
50
|
+
let storeTtlMs: number;
|
|
51
|
+
|
|
52
|
+
await this._store.acquireLock?.(ctx.key);
|
|
53
|
+
|
|
54
|
+
try {
|
|
55
|
+
const state = (await this._store.get(ctx.key)) ?? this._policy.getInitialState();
|
|
56
|
+
const finalLimitBehavior = ctx.limitBehavior ?? this._defaultLimitBehaviour;
|
|
57
|
+
|
|
58
|
+
const { decision, nextState } = this._policy.evaluate(
|
|
59
|
+
state,
|
|
60
|
+
now,
|
|
61
|
+
ctx.cost,
|
|
62
|
+
finalLimitBehavior === 'enqueue',
|
|
63
|
+
);
|
|
64
|
+
|
|
65
|
+
if (decision.kind === 'deny') {
|
|
66
|
+
this._logger.debug(`[DENY] [id: ${ctx.id}, key: ${ctx.key}] - Retry: +${decision.retryAt - now}ms`);
|
|
67
|
+
throw new RateLimitError(RateLimitErrorCode.LimitExceeded, decision.retryAt);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
runAt = decision.kind === 'delay' ? decision.runAt : now;
|
|
71
|
+
storeTtlMs = Math.max(baseTtlMs, runAt - now + baseTtlMs);
|
|
72
|
+
|
|
73
|
+
await this._store.set(ctx.key, nextState, storeTtlMs);
|
|
74
|
+
|
|
75
|
+
this._printDebug(decision, nextState, now, ctx);
|
|
76
|
+
} finally {
|
|
77
|
+
await this._store.releaseLock?.(ctx.key);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
const finalMaxWaitMs = ctx.maxWaitMs ?? this._maxWaitMs;
|
|
81
|
+
const expiresAt = finalMaxWaitMs ? now + finalMaxWaitMs : undefined;
|
|
82
|
+
|
|
83
|
+
return await this._execute(fn, runAt, storeTtlMs, ctx, expiresAt);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
protected override _getDebugStateString(state: LeakyBucketState): string {
|
|
87
|
+
return `lvl: ${state.level.toFixed(2)}/${this._policy.capacity}`;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
private _printDebug(decision: Decision, nextState: LeakyBucketState, now: number, ctx: ExecutionContext): void {
|
|
91
|
+
if (this._logger.minLevel < LogLevel.DEBUG) {
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
const debugStateString = `lvl: ${nextState.level.toFixed(2)}/${this._policy.capacity}`;
|
|
96
|
+
|
|
97
|
+
if (decision.kind === 'delay') {
|
|
98
|
+
this._logger.debug(
|
|
99
|
+
`[DELAY] [id: ${ctx.id}, key: ${ctx.key}] +${decision.runAt - now}ms - ${debugStateString}`,
|
|
100
|
+
);
|
|
101
|
+
} else {
|
|
102
|
+
this._logger.debug(`[ALLOW] [id: ${ctx.id}, key: ${ctx.key}] - ${debugStateString} `);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|