@limitkit/core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,702 @@
1
+ /**
2
+ * Literals of supported rate limiting algorithms.
3
+ *
4
+ * Each algorithm offers different trade-offs in terms of accuracy, memory usage, and behavior:
5
+ * - **FixedWindow**: Simple, resets at fixed intervals (e.g., every minute)
6
+ * - **SlidingWindow**: Memory-intensive but more accurate than fixed window
7
+ * - **SlidingWindowCounter**: Hybrid approach with better accuracy than fixed window
8
+ * - **TokenBucket**: Allows burst traffic while maintaining average rate
9
+ * - **LeakyBucket**: Smooths traffic flow, good for queue management
10
+ * - **GCRA**: Generic Cell Rate Algorithm, precise and memory-efficient for telecom use cases
11
+ * - **Custom**: User-defined rate limiting algorithm
12
+ */
13
+ type AlgorithmName = "fixed-window" | "sliding-window" | "sliding-window-counter" | "token-bucket" | "leaky-bucket" | "gcra" | (string & {});
14
+ interface BaseConfig {
15
+ name: AlgorithmName;
16
+ }
17
+ /**
18
+ * Configuration shared by window-based algorithms (FixedWindow, SlidingWindow, SlidingWindowCounter).
19
+ */
20
+ interface WindowConfig {
21
+ /**
22
+ * Window duration in seconds. Resets occur at this interval.
23
+ */
24
+ window: number;
25
+ /**
26
+ * Maximum number of requests allowed within the window.
27
+ */
28
+ limit: number;
29
+ }
30
+ interface FixedWindowConfig extends BaseConfig, WindowConfig {
31
+ name: "fixed-window";
32
+ }
33
+ interface SlidingWindowConfig extends BaseConfig, WindowConfig {
34
+ name: "sliding-window";
35
+ }
36
+ interface SlidingWindowCounterConfig extends BaseConfig, WindowConfig {
37
+ name: "sliding-window-counter";
38
+ }
39
+ interface TokenBucketConfig extends BaseConfig {
40
+ name: "token-bucket";
41
+ /**
42
+ * Number of tokens to add back to the bucket per second.
43
+ */
44
+ refillRate: number;
45
+ /**
46
+ * Maximum capacity of the bucket (total tokens it can hold).
47
+ */
48
+ capacity: number;
49
+ }
50
+ interface LeakyBucketConfig extends BaseConfig {
51
+ name: "leaky-bucket";
52
+ /**
53
+ * Number of requests to process and leak from the queue per second.
54
+ */
55
+ leakRate: number;
56
+ /**
57
+ * Maximum number of requests that can be queued at once.
58
+ */
59
+ capacity: number;
60
+ }
61
+ interface GCRAConfig extends BaseConfig {
62
+ name: "gcra";
63
+ /**
64
+ * Time interval between request allowances in seconds (1/max-rate bucket).
65
+ */
66
+ interval: number;
67
+ /**
68
+ * Number of requests that can arrive simultaneously without penalty.
69
+ */
70
+ burst: number;
71
+ }
72
+ /**
73
+ * Configuration for custom rate limiting algorithms
74
+ */
75
+ interface CustomConfig extends BaseConfig {
76
+ [key: string]: any;
77
+ }
78
+ /**
79
+ * Configuration for supported rate limiting algorithms.
80
+ *
81
+ * Use the appropriate algorithm configuration based on your use case:
82
+ * - Window-based (FixedWindow, SlidingWindow, SlidingWindowCounter): Simple rate limiting
83
+ * - TokenBucket: Supports traffic bursts while maintaining average rate
84
+ * - LeakyBucket: Smooths traffic, prevents bursts
85
+ * - GCRA: Precise rate limiting with low memory overhead
86
+ * - Custom: Custom algorithm
87
+ */
88
+ type AlgorithmConfig = FixedWindowConfig | SlidingWindowConfig | SlidingWindowCounterConfig | TokenBucketConfig | LeakyBucketConfig | GCRAConfig | CustomConfig;
89
+
90
+ /**
91
+ * Represents an algorithm that can be executed
92
+ * @template TConfig - The configuration schema for the algorithm.
93
+ */
94
+ interface Algorithm<TConfig extends AlgorithmConfig> {
95
+ /**
96
+ * Readonly algorithm configuration
97
+ */
98
+ readonly config: TConfig;
99
+ /**
100
+ * Validate algorithm configuration values
101
+ * @returns {void} If the configuration is valid
102
+ * @throws BadArgumentsException If any of the values violate conditions
103
+ */
104
+ validate(): void;
105
+ }
106
+
107
+ /**
108
+ * Defines a single rate limiting rule with its associated algorithm and constraints.
109
+ *
110
+ * Rules are evaluated in order, and the rate limiter returns the result of the first
111
+ * rule that reaches its limit. This allows for layered rate limiting (e.g., per-user
112
+ * and per-IP limits simultaneously).
113
+ *
114
+ * @template C The context type used to dynamically determine rule parameters.
115
+ */
116
+ interface LimitRule<C = unknown> {
117
+ /**
118
+ * Unique name/identifier for this rule, used for tracking which rule caused a rate limit.
119
+ * Appears in debug results when the rule is exceeded.
120
+ */
121
+ name: string;
122
+ /**
123
+ * The rate limiting key that groups requests together.
124
+ *
125
+ * Can be:
126
+ * - A **fixed string**: All requests use the same limit (e.g., "global-api-limit")
127
+ * - A **function**: Dynamically determines the key per request (e.g., extract user ID from context)
128
+ * - An **async function**: For async key resolution (e.g., lookup user tier from database)
129
+ *
130
+ * Example: `(ctx) => ctx.userId` to apply per-user rate limits
131
+ */
132
+ key: string | ((ctx: C) => string | Promise<string>);
133
+ /**
134
+ * Optional cost/weight of each request. Defaults to 1 if not specified.
135
+ *
136
+ * Can be:
137
+ * - A **fixed number**: Every request costs the same (e.g., 1)
138
+ * - A **function**: Different requests have different costs (e.g., expensive operations cost more)
139
+ * - An **async function**: For async cost calculation
140
+ *
141
+ * Useful for implementing tiered request costs where some operations are more resource-intensive
142
+ * and should count as multiple requests against the rate limit.
143
+ */
144
+ cost?: number | ((ctx: C) => number | Promise<number>);
145
+ /**
146
+ * The rate limiting algorithm and its configuration.
147
+ *
148
+ * Can be:
149
+ * - A **fixed policy**: Same algorithm for all requests (e.g., 100 requests per minute)
150
+ * - A **function**: Dynamically choose algorithm per request (e.g., stricter limits for free tier users)
151
+ * - An **async function**: For async policy resolution (e.g., fetch limits from a service)
152
+ */
153
+ policy: PolicyResolver<C>;
154
+ }
155
+ /**
156
+ * Resolver function type for rate limit policies.
157
+ */
158
+ type PolicyResolver<C> = Algorithm<AlgorithmConfig> | ((ctx: C) => Algorithm<AlgorithmConfig> | Promise<Algorithm<AlgorithmConfig>>);
159
+
160
+ /**
161
+ * Result of a rate limit check for a single request.
162
+ *
163
+ * Indicates whether the request is allowed, the maximum number of requests can be made, how many requests remain in the current
164
+ * window, when the limit resets and how many seconds to wait before retrying.
165
+ */
166
+ interface RateLimitResult {
167
+ /**
168
+ * Whether the request is allowed (true = within limits, false = limit exceeded).
169
+ */
170
+ allowed: boolean;
171
+ /**
172
+ * The maximum number of requests the client can make
173
+ */
174
+ limit: number;
175
+ /**
176
+ * Number of requests remaining in the current rate limit window.
177
+ * When `allowed` is false, this is typically 0.
178
+ */
179
+ remaining: number;
180
+ /**
181
+ * Unix timestamp (in milliseconds) when the rate limit counter fully resets.
182
+ * Useful for implementing client-side backoff strategies.
183
+ */
184
+ reset: number;
185
+ /**
186
+ * If the request is rate limited, suggests how many seconds to wait before retrying.
187
+ * Clients should use exponential backoff and add jitter, rather than strictly following this value.
188
+ * Only present when `allowed` is false.
189
+ */
190
+ retryAfter?: number;
191
+ }
192
+ /**
193
+ * Extended rate limit result with debug information.
194
+ *
195
+ * Returned when debug mode is enabled on the RateLimiter. Includes details about
196
+ * all evaluated rules and which rule caused the rate limit (if any).
197
+ */
198
+ interface DebugLimitResult extends RateLimitResult {
199
+ /**
200
+ * The name of the rule that caused the rate limit to be exceeded.
201
+ * If the request was allowed, this is null.
202
+ */
203
+ failedRule: string | null;
204
+ /**
205
+ * An array of results from the first rule to the first failed one
206
+ */
207
+ details: (RateLimitResult & {
208
+ name: string;
209
+ })[];
210
+ }
211
+
212
+ /**
213
+ * Interface for a rate limiter that enforces rate limit rules.
214
+ *
215
+ * @template C The context type passed to the limiter to determine dynamic rule values.
216
+ */
217
+ interface Limiter<C = unknown> {
218
+ /**
219
+ * Check if a request is allowed under the configured rate limits.
220
+ *
221
+ * Evaluates all configured rules in order and returns the result of the first rule
222
+ * that limits the request. If all rules allow the request, returns a positive result.
223
+ *
224
+ * @param ctx - Context object containing information about the request (e.g., user ID, IP address).
225
+ * Used to dynamically determine rule keys, costs, and policies.
226
+ * @returns A promise that resolves to the result of the rate limit check, including
227
+ * whether the request is allowed and when the limit resets.
228
+ */
229
+ consume(ctx: C): Promise<RateLimitResult>;
230
+ }
231
+
232
+ /**
233
+ * Interface for a storage backend that tracks rate limiting state.
234
+ *
235
+ * Implementations can use various backends (in-memory, Redis, DynamoDB, etc.)
236
+ * to persist the request counts and windows needed by rate limiting algorithms.
237
+ */
238
+ interface Store {
239
+ /**
240
+ * Process a request and update the stored rate limit state for the given key.
241
+ *
242
+ * Atomically updates the counter for the given key based on the specified algorithm
243
+ * and returns the result (whether the request is allowed and when the limit resets).
244
+ * @template TConfig - Algorithm-dependent configuration schema
245
+ * @param key - The rate limiting key that identifies what entity is being limited
246
+ * (e.g., "user-123", "ip-192.168.1.1"). Requests with the same key
247
+ * share the same rate limit quota.
248
+ * @param algorithm - The rate limiting algorithm configuration to apply.
249
+ * @param now - Unix timestamp in millisecond
250
+ * @param cost - The cost/weight of this request. Defaults to 1. Higher costs consume
251
+ * more quota (useful for charging different amounts for different operations).
252
+ * @returns A promise that resolves to the rate limit check result.
253
+ */
254
+ consume<TConfig extends AlgorithmConfig>(key: string, algorithm: Algorithm<TConfig>, now: number, cost?: number): Promise<RateLimitResult>;
255
+ }
256
+
257
+ /**
258
+ * Represents a configuration object for the rate limiter
259
+ */
260
+ interface RateLimitConfig<C = unknown> {
261
+ /**
262
+ * A set of rate limiting rules to apply.
263
+ */
264
+ rules: LimitRule<C>[];
265
+ /**
266
+ * The storage backend for tracking rate limit state.
267
+ */
268
+ store: Store;
269
+ /**
270
+ * Optional. When true, returns detailed information about
271
+ * each evaluated rule. Useful for troubleshooting. Defaults to false.
272
+ */
273
+ debug?: boolean;
274
+ }
275
+
276
+ /**
277
+ * Core rate limiter implementation that enforces rate limiting rules.
278
+ *
279
+ * The RateLimiter evaluates rules in order and returns the result of the first rule
280
+ * that limits the request. If all rules allow the request, it returns a positive result.
281
+ *
282
+ * Use cases:
283
+ * - API rate limiting (requests per second/minute)
284
+ * - Preventing brute force attacks
285
+ * - Protecting backend resources from traffic spikes
286
+ * - Multi-tier rate limiting (e.g., per-user AND per-IP limits simultaneously)
287
+ *
288
+ * @template C The context type that contains information about each request.
289
+ * Passed to rule resolvers to dynamically determine keys, costs, and policies.
290
+ *
291
+ * @example
292
+ * ```typescript
293
+ * const limiter = new RateLimiter({
294
+ * store: redisStore,
295
+ * rules: [
296
+ * {
297
+ * name: 'per-user-limit',
298
+ * key: (ctx) => ctx.userId,
299
+ * policy: new RedisFixedWindow({ name: 'fixed-window', window: 60, limit: 100 })
300
+ * }
301
+ * ]
302
+ * });
303
+ *
304
+ * const result = await limiter.consume({ userId: 'user-123' });
305
+ * if (!result.allowed) {
306
+ * return 429 with headers: Retry-After: result.retryAfter
307
+ * }
308
+ * ```
309
+ * @see Limiter
310
+ * @see LimitRule
311
+ * @see Store
312
+ */
313
+ declare class RateLimiter<C = unknown> implements Limiter<C> {
314
+ private rules;
315
+ private debug;
316
+ private store;
317
+ /**
318
+ * Create a new rate limiter instance.
319
+ * @throws {EmptyRulesException} If the list of rules is empty
320
+ * @param config - Configuration for the rate limiter
321
+ * @see RateLimitConfig
322
+ */
323
+ constructor({ rules, debug, store }: RateLimitConfig<C>);
324
+ /**
325
+ * Return the configuration object
326
+ * @returns {RateLimitConfig<C>}
327
+ */
328
+ get config(): RateLimitConfig<C>;
329
+ /**
330
+ * Check if a request should be allowed under the configured rate limits.
331
+ *
332
+ * Evaluates each rule in order from left to right. Returns as soon as a rule is exceeded (request denied).
333
+ * If all rules allow the request, returns the result of the last rule evaluated.
334
+ *
335
+ * Each rule resolution (key, cost, policy) can be static or dynamic:
336
+ * - Static: evaluated once and reused
337
+ * - Dynamic: evaluated per request based on context
338
+ * - Async: evaluated asynchronously (e.g., database lookups)
339
+ *
340
+ * @param ctx - Request context passed to rule resolvers to determine dynamic values.
341
+ * @returns Promise resolving to the rate limit result. If debug mode is enabled,
342
+ * includes details about each evaluated rule and which rule failed (if any).
343
+ *
344
+ * @example
345
+ * ```typescript
346
+ * const result = await limiter.consume({
347
+ * userId: 'user-123',
348
+ * ip: '192.168.1.1',
349
+ * endpoint: '/api/search'
350
+ * });
351
+ *
352
+ * if (!result.allowed) {
353
+ * console.log(`Rate limited. Retry in ${result.retryAfter} seconds`);
354
+ * }
355
+ * ```
356
+ */
357
+ consume(ctx: C): Promise<RateLimitResult>;
358
+ }
359
+
360
+ declare class BadArgumentsException extends Error {
361
+ constructor(message: string);
362
+ }
363
+
364
+ declare class EmptyRulesException extends Error {
365
+ constructor();
366
+ }
367
+
368
+ declare class UnknownAlgorithmException extends Error {
369
+ constructor(algorithm: string);
370
+ }
371
+
372
+ /**
373
+ * Prepend additional data to user-defined rate limiting keys, which include:
374
+ * * Rate limiting algorithm name e.g., `"fixed-window"`, `"sliding-window"`
375
+ * * SHA-256 hash of the algorithm config object (deterministic order guaranteed)
376
+ *
377
+ * @warning Avoid nested or non-primitive key-value pairs to ensure deterministic hash value
378
+ *
379
+ * The modified key will have the format: `ratelimit:{algorithm_name}:{sha256_hash}:{key}`
380
+ * @param config The algorithm config object
381
+ * @param key The user-defined key
382
+ * @returns {string} A modified key with the format above
383
+ */
384
+ declare function addConfigToKey(config: AlgorithmConfig, key: string): string;
385
+
386
+ /**
387
+ * Merge two arrays of rules by name such that:
388
+ * * Local rules override global rules if the name matches
389
+ * * New local rules are appended
390
+ * @param globalRules The global rules to be overriden
391
+ * @param localRules The local rules to be appended or to override global rules
392
+ * @returns {LimitRule<C>[]} A new list of rules merged from `globalRules` and `localRules`
393
+ */
394
+ declare function mergeRules<C>(globalRules?: LimitRule<C>[], localRules?: LimitRule<C>[]): LimitRule<C>[];
395
+
396
+ /**
397
+ * Base implementation of the **Fixed Window** rate limiting algorithm.
398
+ *
399
+ * This class provides the **shared configuration and validation logic**
400
+ * for fixed window rate limiting but does **not perform rate limiting itself**.
401
+ *
402
+ * Concrete implementations must extend this class and provide the execution
403
+ * logic for a specific storage backend (e.g. Redis, in-memory, database).
404
+ *
405
+ * ## Purpose
406
+ * Separating algorithm configuration from storage execution allows the same
407
+ * algorithm definition to be reused across multiple stores.
408
+ *
409
+ * For example:
410
+ *
411
+ * - `InMemoryFixedWindow` — executes the algorithm using in-memory state
412
+ * - `RedisFixedWindow` — executes the algorithm using Redis + Lua scripts
413
+ *
414
+ * ## Usage
415
+ * End users typically **do not use this class directly**. Instead they should
416
+ * use a store-specific implementation:
417
+ *
418
+ * ```ts
419
+ * import { InMemoryFixedWindow } from "@limitkit/memory";
420
+ *
421
+ * const limiter = new InMemoryFixedWindow({
422
+ * name: "fixed-window",
423
+ * limit: 100,
424
+ * window: 60
425
+ * });
426
+ * ```
427
+ *
428
+ * @abstract
429
+ * @implements {Algorithm<FixedWindowConfig>}
430
+ */
431
+ declare abstract class FixedWindow implements Algorithm<FixedWindowConfig> {
432
+ readonly config: FixedWindowConfig;
433
+ constructor(config: FixedWindowConfig);
434
+ /**
435
+ * Validates the fixed window configuration.
436
+ *
437
+ * Ensures the configured window size and request limit are positive values.
438
+ *
439
+ * @throws BadArgumentsException
440
+ * Thrown if:
441
+ * - `limit <= 0`
442
+ * - `window <= 0`
443
+ */
444
+ validate(): void;
445
+ }
446
+
447
+ /**
448
+ * Base implementation of the **Sliding Window** rate limiting algorithm.
449
+ *
450
+ * This class provides the **shared configuration and validation logic**
451
+ * for sliding window rate limiting but does **not perform rate limiting itself**.
452
+ *
453
+ * Concrete implementations must extend this class and provide the execution
454
+ * logic for a specific storage backend (e.g. Redis, in-memory, database).
455
+ *
456
+ * ## Purpose
457
+ * Separating algorithm configuration from storage execution allows the same
458
+ * algorithm definition to be reused across multiple stores.
459
+ *
460
+ * For example:
461
+ *
462
+ * - `InMemorySlidingWindow` — executes the algorithm using in-memory state
463
+ * - `RedisSlidingWindow` — executes the algorithm using Redis + Lua scripts
464
+ *
465
+ * ## Usage
466
+ * End users typically **do not use this class directly**. Instead they should
467
+ * use a store-specific implementation:
468
+ *
469
+ * ```ts
470
+ * import { InMemorySlidingWindow } from "@limitkit/memory";
471
+ *
472
+ * const limiter = new InMemorySlidingWindow({
473
+ * name: "sliding-window",
474
+ * limit: 100,
475
+ * window: 60
476
+ * });
477
+ * ```
478
+ *
479
+ * @abstract
480
+ * @implements {Algorithm<SlidingWindowConfig>}
481
+ */
482
+ declare abstract class SlidingWindow implements Algorithm<SlidingWindowConfig> {
483
+ readonly config: SlidingWindowConfig;
484
+ constructor(config: SlidingWindowConfig);
485
+ /**
486
+ * Validates the sliding window configuration.
487
+ *
488
+ * Ensures the configured window size and request limit are positive values.
489
+ *
490
+ * @throws BadArgumentsException
491
+ * Thrown if:
492
+ * - `limit <= 0`
493
+ * - `window <= 0`
494
+ */
495
+ validate(): void;
496
+ }
497
+
498
+ /**
499
+ * Base implementation of the **Sliding Window Counter** rate limiting algorithm.
500
+ *
501
+ * This class provides the **shared configuration and validation logic**
502
+ * for sliding window counter rate limiting but does **not perform rate limiting itself**.
503
+ *
504
+ * Concrete implementations must extend this class and provide the execution
505
+ * logic for a specific storage backend (e.g. Redis, in-memory, database).
506
+ *
507
+ * ## Purpose
508
+ * Separating algorithm configuration from storage execution allows the same
509
+ * algorithm definition to be reused across multiple stores.
510
+ *
511
+ * For example:
512
+ *
513
+ * - `InMemorySlidingWindowCounter` — executes the algorithm using in-memory state
514
+ * - `RedisSlidingWindowCounter` — executes the algorithm using Redis + Lua scripts
515
+ *
516
+ * ## Usage
517
+ * End users typically **do not use this class directly**. Instead they should
518
+ * use a store-specific implementation:
519
+ *
520
+ * ```ts
521
+ * import { InMemorySlidingWindowCounter } from "@limitkit/memory";
522
+ *
523
+ * const limiter = new InMemorySlidingWindowCounter({
524
+ * name: "sliding-window-counter",
525
+ * limit: 100,
526
+ * window: 60
527
+ * });
528
+ * ```
529
+ *
530
+ * @abstract
531
+ * @implements {Algorithm<SlidingWindowCounterConfig>}
532
+ */
533
+ declare abstract class SlidingWindowCounter implements Algorithm<SlidingWindowCounterConfig> {
534
+ readonly config: SlidingWindowCounterConfig;
535
+ constructor(config: SlidingWindowCounterConfig);
536
+ /**
537
+ * Validates the sliding window counter configuration.
538
+ *
539
+ * Ensures the configured window size and request limit are positive values.
540
+ *
541
+ * @throws BadArgumentsException
542
+ * Thrown if:
543
+ * - `limit <= 0`
544
+ * - `window <= 0`
545
+ */
546
+ validate(): void;
547
+ }
548
+
549
+ /**
550
+ * Base implementation of the **Token Bucket** rate limiting algorithm.
551
+ *
552
+ * This class provides the **shared configuration and validation logic**
553
+ * for token bucket rate limiting but does **not perform rate limiting itself**.
554
+ *
555
+ * Concrete implementations must extend this class and provide the execution
556
+ * logic for a specific storage backend (e.g. Redis, in-memory, database).
557
+ *
558
+ * ## Purpose
559
+ * Separating algorithm configuration from storage execution allows the same
560
+ * algorithm definition to be reused across multiple stores.
561
+ *
562
+ * For example:
563
+ *
564
+ * - `InMemoryTokenBucket` — executes the algorithm using in-memory state
565
+ * - `RedisTokenBucket` — executes the algorithm using Redis + Lua scripts
566
+ *
567
+ * ## Usage
568
+ * End users typically **do not use this class directly**. Instead they should
569
+ * use a store-specific implementation:
570
+ *
571
+ * ```ts
572
+ * import { InMemoryTokenBucket } from "@limitkit/memory";
573
+ *
574
+ * const limiter = new InMemoryTokenBucket({
575
+ * name: "token-bucket",
576
+ * capacity: 100,
577
+ * refillRate: 5
578
+ * });
579
+ * ```
580
+ *
581
+ * @abstract
582
+ * @implements {Algorithm<TokenBucketConfig>}
583
+ */
584
+ declare abstract class TokenBucket implements Algorithm<TokenBucketConfig> {
585
+ readonly config: TokenBucketConfig;
586
+ constructor(config: TokenBucketConfig);
587
+ /**
588
+ * Validates the token bucket configuration.
589
+ *
590
+ * Ensures the configured capacity and refill rate are positive values.
591
+ *
592
+ * @throws BadArgumentsException
593
+ * Thrown if:
594
+ * - `capacity <= 0`
595
+ * - `refillRate <= 0`
596
+ */
597
+ validate(): void;
598
+ }
599
+
600
+ /**
601
+ * Base implementation of the **Leaky Bucket** rate limiting algorithm.
602
+ *
603
+ * This class provides the **shared configuration and validation logic**
604
+ * for leaky bucket rate limiting but does **not perform rate limiting itself**.
605
+ *
606
+ * Concrete implementations must extend this class and provide the execution
607
+ * logic for a specific storage backend (e.g. Redis, in-memory, database).
608
+ *
609
+ * ## Purpose
610
+ * Separating algorithm configuration from storage execution allows the same
611
+ * algorithm definition to be reused across multiple stores.
612
+ *
613
+ * For example:
614
+ *
615
+ * - `InMemoryLeakyBucket` — executes the algorithm using in-memory state
616
+ * - `RedisLeakyBucket` — executes the algorithm using Redis + Lua scripts
617
+ *
618
+ * ## Usage
619
+ * End users typically **do not use this class directly**. Instead they should
620
+ * use a store-specific implementation:
621
+ *
622
+ * ```ts
623
+ * import { InMemoryLeakyBucket } from "@limitkit/memory";
624
+ *
625
+ * const limiter = new InMemoryLeakyBucket({
626
+ * name: "leaky-bucket",
627
+ * capacity: 100,
628
+ * leakRate: 5
629
+ * });
630
+ * ```
631
+ *
632
+ * @abstract
633
+ * @implements {Algorithm<LeakyBucketConfig>}
634
+ */
635
+ declare abstract class LeakyBucket implements Algorithm<LeakyBucketConfig> {
636
+ readonly config: LeakyBucketConfig;
637
+ constructor(config: LeakyBucketConfig);
638
+ /**
639
+ * Validates the leaky bucket configuration.
640
+ *
641
+ * Ensures the configured capacity and leak rate are positive values.
642
+ *
643
+ * @throws BadArgumentsException
644
+ * Thrown if:
645
+ * - `capacity <= 0`
646
+ * - `leakRate <= 0`
647
+ */
648
+ validate(): void;
649
+ }
650
+
651
+ /**
652
+ * Base implementation of the **GCRA** rate limiting algorithm.
653
+ *
654
+ * This class provides the **shared configuration and validation logic**
655
+ * for sliding window counter rate limiting but does **not perform rate limiting itself**.
656
+ *
657
+ * Concrete implementations must extend this class and provide the execution
658
+ * logic for a specific storage backend (e.g. Redis, in-memory, database).
659
+ *
660
+ * ## Purpose
661
+ * Separating algorithm configuration from storage execution allows the same
662
+ * algorithm definition to be reused across multiple stores.
663
+ *
664
+ * For example:
665
+ *
666
+ * - `InMemoryGCRA` — executes the algorithm using in-memory state
667
+ * - `RedisGCRA` — executes the algorithm using Redis + Lua scripts
668
+ *
669
+ * ## Usage
670
+ * End users typically **do not use this class directly**. Instead they should
671
+ * use a store-specific implementation:
672
+ *
673
+ * ```ts
674
+ * import { InMemoryGCRA } from "@limitkit/memory";
675
+ *
676
+ * const limiter = new InMemoryGCRA({
677
+ * name: "gcra",
678
+ * burst: 5,
679
+ * interval: 1
680
+ * });
681
+ * ```
682
+ *
683
+ * @abstract
684
+ * @implements {Algorithm<GCRAConfig>}
685
+ */
686
+ declare abstract class GCRA implements Algorithm<GCRAConfig> {
687
+ readonly config: GCRAConfig;
688
+ constructor(config: GCRAConfig);
689
+ /**
690
+ * Validates the GCRA configuration.
691
+ *
692
+ * Ensures the configured burst and interval are positive values.
693
+ *
694
+ * @throws BadArgumentsException
695
+ * Thrown if:
696
+ * - `burst <= 0`
697
+ * - `interval <= 0`
698
+ */
699
+ validate(): void;
700
+ }
701
+
702
+ export { type Algorithm, type AlgorithmConfig, type AlgorithmName, BadArgumentsException, type BaseConfig, type CustomConfig, type DebugLimitResult, EmptyRulesException, FixedWindow, type FixedWindowConfig, GCRA, type GCRAConfig, LeakyBucket, type LeakyBucketConfig, type LimitRule, type Limiter, type RateLimitConfig, type RateLimitResult, RateLimiter, SlidingWindow, type SlidingWindowConfig, SlidingWindowCounter, type SlidingWindowCounterConfig, type Store, TokenBucket, type TokenBucketConfig, UnknownAlgorithmException, type WindowConfig, addConfigToKey, mergeRules };