@stimulcross/rate-limiter 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/interfaces/rate-limiter-run-options.d.ts +6 -0
- package/lib/limiters/abstract-rate-limiter.d.ts +1 -0
- package/lib/limiters/abstract-rate-limiter.js +4 -2
- package/lib/limiters/http-response-based/http-response-based.limiter.js +1 -1
- package/lib/runtime/rate-limiter.executor.d.ts +1 -0
- package/lib/runtime/rate-limiter.executor.js +6 -1
- package/package.json +1 -2
|
@@ -48,5 +48,11 @@ export interface RateLimiterRunOptions {
|
|
|
48
48
|
* @default Infinity
|
|
49
49
|
*/
|
|
50
50
|
maxWaitMs?: number;
|
|
51
|
+
/**
|
|
52
|
+
* Forces the task to be enqueued even if the queue has reached maximum capacity.
|
|
53
|
+
*
|
|
54
|
+
* @default false
|
|
55
|
+
*/
|
|
56
|
+
shouldForceEnqueue?: boolean;
|
|
51
57
|
}
|
|
52
58
|
//# sourceMappingURL=rate-limiter-run-options.d.ts.map
|
|
@@ -18,6 +18,7 @@ export interface ExecutionContext {
|
|
|
18
18
|
readonly priority?: Priority;
|
|
19
19
|
readonly signal?: AbortSignal;
|
|
20
20
|
readonly maxWaitMs?: number;
|
|
21
|
+
readonly shouldForceEnqueue?: boolean;
|
|
21
22
|
}
|
|
22
23
|
/** @internal */
|
|
23
24
|
export declare abstract class AbstractRateLimiter<TState extends object, TStatus extends RateLimiterStatus | RateLimiterStatus[], TResult = unknown> implements RateLimiter<TStatus> {
|
|
@@ -18,7 +18,7 @@ export class AbstractRateLimiter {
|
|
|
18
18
|
_generateId;
|
|
19
19
|
_isDestroyed = false;
|
|
20
20
|
constructor(options) {
|
|
21
|
-
this._logger = createLogger({ context: new.target.name,
|
|
21
|
+
this._logger = createLogger({ context: new.target.name, ...options?.loggerOptions });
|
|
22
22
|
this._clock = options?.clock ?? defaultClock;
|
|
23
23
|
this._store = options?.store ?? new InMemoryStateStore(this._clock);
|
|
24
24
|
this._executor = new RateLimiterExecutor(this._logger, this._clock, options?.queue);
|
|
@@ -37,6 +37,7 @@ export class AbstractRateLimiter {
|
|
|
37
37
|
priority: options.priority,
|
|
38
38
|
signal: options.signal,
|
|
39
39
|
maxWaitMs: options.maxWaitMs,
|
|
40
|
+
shouldForceEnqueue: options.shouldForceEnqueue,
|
|
40
41
|
};
|
|
41
42
|
await this._ensureCanExecute(ctx);
|
|
42
43
|
return await this._runInternal(fn, ctx);
|
|
@@ -79,6 +80,7 @@ export class AbstractRateLimiter {
|
|
|
79
80
|
priority: ctx.priority,
|
|
80
81
|
signal: ctx.signal,
|
|
81
82
|
expiresAt,
|
|
83
|
+
shouldForceEnqueue: ctx.shouldForceEnqueue,
|
|
82
84
|
});
|
|
83
85
|
}
|
|
84
86
|
catch (e) {
|
|
@@ -107,7 +109,7 @@ export class AbstractRateLimiter {
|
|
|
107
109
|
return e instanceof RateLimitError && e.code !== RateLimitErrorCode.LimitExceeded;
|
|
108
110
|
}
|
|
109
111
|
async _ensureCanExecute(ctx) {
|
|
110
|
-
if (this._executor.isQueueFull) {
|
|
112
|
+
if (this._executor.isQueueFull && !ctx.shouldForceEnqueue) {
|
|
111
113
|
this._shouldPrintDebug &&
|
|
112
114
|
this._logger.debug(`[DROP OVERFLOW] [id: ${ctx.id}, key: ${ctx.key}] - prt: ${ctx.priority ?? Priority.Normal} | q: ${this._executor.queueSize}/${this._executor.queueCapacity}`);
|
|
113
115
|
let retryAt;
|
|
@@ -45,7 +45,7 @@ export class HttpResponseBasedLimiter {
|
|
|
45
45
|
_fallbackResetDelayMs;
|
|
46
46
|
_isDestroyed = false;
|
|
47
47
|
constructor(options) {
|
|
48
|
-
this._logger = createLogger({ context: new.target.name,
|
|
48
|
+
this._logger = createLogger({ context: new.target.name, ...options.loggerOptions });
|
|
49
49
|
this._clock = options.clock ?? defaultClock;
|
|
50
50
|
this._store = options.store ?? new InMemoryStateStore(this._clock);
|
|
51
51
|
this._executor = new RateLimiterExecutor(this._logger, this._clock, options.queue);
|
|
@@ -46,8 +46,13 @@ export class RateLimiterExecutor {
|
|
|
46
46
|
priorityQueue.remove(task);
|
|
47
47
|
this._drain();
|
|
48
48
|
});
|
|
49
|
+
const isEnqueued = this._queue.enqueue(task, task.priority, options.shouldForceEnqueue);
|
|
50
|
+
if (!isEnqueued) {
|
|
51
|
+
this._shouldPrintDebug &&
|
|
52
|
+
this._logger.debug(`[DROP OVERFLOW] [id: ${options.id}, key: ${options.key}] - ${this._getStateDebugString(task.priority)}`);
|
|
53
|
+
throw new RateLimitError(RateLimitErrorCode.QueueOverflow);
|
|
54
|
+
}
|
|
49
55
|
this._tickets.add(runAt);
|
|
50
|
-
this._queue.enqueue(task, task.priority);
|
|
51
56
|
if (task.expiresAt !== undefined) {
|
|
52
57
|
this._expiryHeap.push(task);
|
|
53
58
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@stimulcross/rate-limiter",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.8",
|
|
4
4
|
"description": "A collection of client-side rate limiters for Node.js and browsers.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -56,7 +56,6 @@
|
|
|
56
56
|
"@commitlint/cli": "^20.4.4",
|
|
57
57
|
"@leancodepl/resolve-eslint-flat-config": "^9.7.4",
|
|
58
58
|
"@stimulcross/commitlint-config": "^2.0.0",
|
|
59
|
-
"@stimulcross/eslint-config-node": "^2.0.0",
|
|
60
59
|
"@stimulcross/eslint-config-typescript": "^2.0.0",
|
|
61
60
|
"@stimulcross/prettier-config": "^2.0.0",
|
|
62
61
|
"@types/node": "^25.5.0",
|