@upstash/ratelimit 0.4.1 → 0.4.3-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # Upstash RateLimit
1
+ # Upstash Rate Limit
2
2
 
3
3
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
4
4
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
@@ -86,7 +86,13 @@ import { Redis } from "@upstash/redis";
86
86
  const ratelimit = new Ratelimit({
87
87
  redis: Redis.fromEnv(),
88
88
  limiter: Ratelimit.slidingWindow(10, "10 s"),
89
- analytics: true
89
+ analytics: true,
90
+ /**
91
+ * Optional prefix for the keys used in redis. This is useful if you want to share a redis
92
+ * instance with other applications and want to avoid key collisions. The default prefix is
93
+ * "@upstash/ratelimit"
94
+ */
95
+ prefix: "@upstash/ratelimit",
90
96
  });
91
97
 
92
98
  // Use a constant string to limit all requests with a single ratelimit
@@ -224,6 +230,41 @@ instance outside of your handler function. While the function is still hot, the
224
230
  ratelimiter can block requests without having to request data from redis, thus
225
231
  saving time and money.
226
232
 
233
+
234
+ ## Using multiple limits
235
+
236
+ Sometimes you might want to apply different limits to different users. For example you might want to allow 10 requests per 10 seconds for free users, but 60 requests per 10 seconds for paid users.
237
+
238
+ Here's how you could do that:
239
+
240
+ ```ts
241
+ import { Redis } from "@upstash/redis"
242
+ import { Ratelimit from "@upstash/ratelimit"
243
+
244
+ const redis = Redis.fromEnv()
245
+
246
+ const ratelimit = {
247
+ free: new Ratelimit({
248
+ redis,
249
+ analytics: true,
250
+ prefix: "ratelimit:free",
251
+ limiter: Ratelimit.slidingWindow(10, "10s"),
252
+ }),
253
+ paid: new Ratelimit({
254
+ redis,
255
+ analytics: true,
256
+ prefix: "ratelimit:paid",
257
+ limiter: Ratelimit.slidingWindow(60, "10s"),
258
+ })
259
+ }
260
+
261
+
262
+ await ratelimit.free.limit(ip)
263
+ // or for a paid user you might have an email or userId available:
264
+ await ratelimit.paid.limit(userId)
265
+
266
+ ```
267
+
227
268
  ## MultiRegion replicated ratelimiting
228
269
 
229
270
  Using a single redis instance has the downside of providing low latencies only
@@ -411,7 +452,7 @@ const ratelimit = new Ratelimit({
411
452
  You can enable analytics to get a better understanding of how your ratelimiting
412
453
  is performing. This is done by setting `analytics: true` in the options.
413
454
 
414
- All data is stored in the same Redis database.
455
+ All data is stored in the same Redis database and writing analytics uses 1 command per `.limit` invocation.
415
456
 
416
457
  ```ts
417
458
  const ratelimit = new Ratelimit({
package/dist/index.d.ts CHANGED
@@ -1,8 +1,3 @@
1
- import { Redis } from '@upstash/redis';
2
-
3
- type Unit = "ms" | "s" | "m" | "h" | "d";
4
- type Duration = `${number} ${Unit}` | `${number}${Unit}`;
5
-
6
1
  /**
7
2
  * EphemeralCache is used to block certain identifiers right away in case they have already exceedd the ratelimit.
8
3
  */
@@ -69,6 +64,13 @@ type RatelimitResponse = {
69
64
  type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
70
65
  cache?: EphemeralCache;
71
66
  }) => Promise<RatelimitResponse>;
67
+ /**
68
+ * This is all we need from the redis sdk.
69
+ */
70
+ interface Redis {
71
+ sadd: <TData>(key: string, ...members: TData[]) => Promise<number>;
72
+ eval: <TArgs extends unknown[], TData = unknown>(...args: [script: string, keys: string[], args: TArgs]) => Promise<TData>;
73
+ }
72
74
 
73
75
  type Geo = {
74
76
  country?: string;
@@ -113,6 +115,9 @@ declare class Analytics {
113
115
  }>>;
114
116
  }
115
117
 
118
+ type Unit = "ms" | "s" | "m" | "h" | "d";
119
+ type Duration = `${number} ${Unit}` | `${number}${Unit}`;
120
+
116
121
  type RatelimitConfig<TContext> = {
117
122
  /**
118
123
  * The ratelimiter function to use.
@@ -161,7 +166,7 @@ type RatelimitConfig<TContext> = {
161
166
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
162
167
  * https://upstash.com/ratelimit
163
168
  *
164
- * @default true
169
+ * @default false
165
170
  */
166
171
  analytics?: boolean;
167
172
  };
@@ -212,7 +217,7 @@ declare abstract class Ratelimit<TContext extends Context> {
212
217
  /**
213
218
  * Block until the request may pass or timeout is reached.
214
219
  *
215
- * This method returns a promsie that resolves as soon as the request may be processed
220
+ * This method returns a promise that resolves as soon as the request may be processed
216
221
  * or after the timeoue has been reached.
217
222
  *
218
223
  * Use this if you want to delay the request until it is ready to get processed.
@@ -234,23 +239,20 @@ declare abstract class Ratelimit<TContext extends Context> {
234
239
  blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
235
240
  }
236
241
 
237
- type RegionRatelimitConfig = {
242
+ type MultiRegionRatelimitConfig = {
238
243
  /**
239
- * Instance of `@upstash/redis`
244
+ * Instances of `@upstash/redis`
240
245
  * @see https://github.com/upstash/upstash-redis#quick-start
241
246
  */
242
- redis: Redis;
247
+ redis: Redis[];
243
248
  /**
244
249
  * The ratelimiter function to use.
245
250
  *
246
251
  * Choose one of the predefined ones or implement your own.
247
252
  * Available algorithms are exposed via static methods:
248
- * - Ratelimiter.fixedWindow
249
- * - Ratelimiter.slidingLogs
250
- * - Ratelimiter.slidingWindow
251
- * - Ratelimiter.tokenBucket
253
+ * - MultiRegionRatelimit.fixedWindow
252
254
  */
253
- limiter: Algorithm<RegionContext>;
255
+ limiter: Algorithm<MultiRegionContext>;
254
256
  /**
255
257
  * All keys in redis are prefixed with this.
256
258
  *
@@ -271,7 +273,7 @@ type RegionRatelimitConfig = {
271
273
  * Set to `false` to disable.
272
274
  *
273
275
  * If left undefined, a map is created automatically, but it can only work
274
- * if the map or the ratelimit instance is created outside your serverless function handler.
276
+ * if the map or th ratelimit instance is created outside your serverless function handler.
275
277
  */
276
278
  ephemeralCache?: Map<string, number> | false;
277
279
  /**
@@ -293,21 +295,21 @@ type RegionRatelimitConfig = {
293
295
  *
294
296
  * @example
295
297
  * ```ts
296
- * const { limit } = new Ratelimit({
298
+ * const { limit } = new MultiRegionRatelimit({
297
299
  * redis: Redis.fromEnv(),
298
- * limiter: Ratelimit.slidingWindow(
299
- * "30 m", // interval of 30 minutes
300
+ * limiter: MultiRegionRatelimit.fixedWindow(
300
301
  * 10, // Allow 10 requests per window of 30 minutes
302
+ * "30 m", // interval of 30 minutes
301
303
  * )
302
304
  * })
303
305
  *
304
306
  * ```
305
307
  */
306
- declare class RegionRatelimit extends Ratelimit<RegionContext> {
308
+ declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
307
309
  /**
308
310
  * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
309
311
  */
310
- constructor(config: RegionRatelimitConfig);
312
+ constructor(config: MultiRegionRatelimitConfig);
311
313
  /**
312
314
  * Each requests inside a fixed time increases a counter.
313
315
  * Once the counter reaches a maxmimum allowed number, all further requests are
@@ -334,7 +336,7 @@ declare class RegionRatelimit extends Ratelimit<RegionContext> {
334
336
  /**
335
337
  * The duration in which `tokens` requests are allowed.
336
338
  */
337
- window: Duration): Algorithm<RegionContext>;
339
+ window: Duration): Algorithm<MultiRegionContext>;
338
340
  /**
339
341
  * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
340
342
  * costs than `slidingLogs` and improved boundary behavior by calcualting a
@@ -359,86 +361,26 @@ declare class RegionRatelimit extends Ratelimit<RegionContext> {
359
361
  /**
360
362
  * The duration in which `tokens` requests are allowed.
361
363
  */
362
- window: Duration): Algorithm<RegionContext>;
363
- /**
364
- * You have a bucket filled with `{maxTokens}` tokens that refills constantly
365
- * at `{refillRate}` per `{interval}`.
366
- * Every request will remove one token from the bucket and if there is no
367
- * token to take, the request is rejected.
368
- *
369
- * **Pro:**
370
- *
371
- * - Bursts of requests are smoothed out and you can process them at a constant
372
- * rate.
373
- * - Allows to set a higher initial burst limit by setting `maxTokens` higher
374
- * than `refillRate`
375
- */
376
- static tokenBucket(
377
- /**
378
- * How many tokens are refilled per `interval`
379
- *
380
- * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
381
- */
382
- refillRate: number,
383
- /**
384
- * The interval for the `refillRate`
385
- */
386
- interval: Duration,
387
- /**
388
- * Maximum number of tokens.
389
- * A newly created bucket starts with this many tokens.
390
- * Useful to allow higher burst limits.
391
- */
392
- maxTokens: number): Algorithm<RegionContext>;
393
- /**
394
- * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
395
- * it asynchronously.
396
- * This is experimental and not yet recommended for production use.
397
- *
398
- * @experimental
399
- *
400
- * Each requests inside a fixed time increases a counter.
401
- * Once the counter reaches a maxmimum allowed number, all further requests are
402
- * rejected.
403
- *
404
- * **Pro:**
405
- *
406
- * - Newer requests are not starved by old ones.
407
- * - Low storage cost.
408
- *
409
- * **Con:**
410
- *
411
- * A burst of requests near the boundary of a window can result in a very
412
- * high request rate because two windows will be filled with requests quickly.
413
- *
414
- * @param tokens - How many requests a user can make in each time window.
415
- * @param window - A fixed timeframe
416
- */
417
- static cachedFixedWindow(
418
- /**
419
- * How many requests are allowed per window.
420
- */
421
- tokens: number,
422
- /**
423
- * The duration in which `tokens` requests are allowed.
424
- */
425
- window: Duration): Algorithm<RegionContext>;
364
+ window: Duration): Algorithm<MultiRegionContext>;
426
365
  }
427
366
 
428
- type MultiRegionRatelimitConfig = {
367
+ type RegionRatelimitConfig = {
429
368
  /**
430
- * Instances of `@upstash/redis`
369
+ * Instance of `@upstash/redis`
431
370
  * @see https://github.com/upstash/upstash-redis#quick-start
432
371
  */
433
- redis: Redis[];
372
+ redis: Redis;
434
373
  /**
435
374
  * The ratelimiter function to use.
436
375
  *
437
376
  * Choose one of the predefined ones or implement your own.
438
377
  * Available algorithms are exposed via static methods:
439
- * - MultiRegionRatelimit.fixedWindow
378
+ * - Ratelimiter.fixedWindow
379
+ * - Ratelimiter.slidingLogs
380
+ * - Ratelimiter.slidingWindow
381
+ * - Ratelimiter.tokenBucket
440
382
  */
441
- limiter: Algorithm<MultiRegionContext>;
383
+ limiter: Algorithm<RegionContext>;
442
384
  /**
443
385
  * All keys in redis are prefixed with this.
444
386
  *
@@ -459,7 +401,7 @@ type MultiRegionRatelimitConfig = {
459
401
  * Set to `false` to disable.
460
402
  *
461
403
  * If left undefined, a map is created automatically, but it can only work
462
- * if the map or th ratelimit instance is created outside your serverless function handler.
404
+ * if the map or the ratelimit instance is created outside your serverless function handler.
463
405
  */
464
406
  ephemeralCache?: Map<string, number> | false;
465
407
  /**
@@ -481,21 +423,21 @@ type MultiRegionRatelimitConfig = {
481
423
  *
482
424
  * @example
483
425
  * ```ts
484
- * const { limit } = new MultiRegionRatelimit({
426
+ * const { limit } = new Ratelimit({
485
427
  * redis: Redis.fromEnv(),
486
- * limiter: MultiRegionRatelimit.fixedWindow(
487
- * 10, // Allow 10 requests per window of 30 minutes
428
+ * limiter: Ratelimit.slidingWindow(
488
429
  * "30 m", // interval of 30 minutes
430
+ * 10, // Allow 10 requests per window of 30 minutes
489
431
  * )
490
432
  * })
491
433
  *
492
434
  * ```
493
435
  */
494
- declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
436
+ declare class RegionRatelimit extends Ratelimit<RegionContext> {
495
437
  /**
496
438
  * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
497
439
  */
498
- constructor(config: MultiRegionRatelimitConfig);
440
+ constructor(config: RegionRatelimitConfig);
499
441
  /**
500
442
  * Each requests inside a fixed time increases a counter.
501
443
  * Once the counter reaches a maxmimum allowed number, all further requests are
@@ -522,7 +464,7 @@ declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
522
464
  /**
523
465
  * The duration in which `tokens` requests are allowed.
524
466
  */
525
- window: Duration): Algorithm<MultiRegionContext>;
467
+ window: Duration): Algorithm<RegionContext>;
526
468
  /**
527
469
  * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
528
470
  * costs than `slidingLogs` and improved boundary behavior by calcualting a
@@ -547,7 +489,70 @@ declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
547
489
  /**
548
490
  * The duration in which `tokens` requests are allowed.
549
491
  */
550
- window: Duration): Algorithm<MultiRegionContext>;
492
+ window: Duration): Algorithm<RegionContext>;
493
+ /**
494
+ * You have a bucket filled with `{maxTokens}` tokens that refills constantly
495
+ * at `{refillRate}` per `{interval}`.
496
+ * Every request will remove one token from the bucket and if there is no
497
+ * token to take, the request is rejected.
498
+ *
499
+ * **Pro:**
500
+ *
501
+ * - Bursts of requests are smoothed out and you can process them at a constant
502
+ * rate.
503
+ * - Allows to set a higher initial burst limit by setting `maxTokens` higher
504
+ * than `refillRate`
505
+ */
506
+ static tokenBucket(
507
+ /**
508
+ * How many tokens are refilled per `interval`
509
+ *
510
+ * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
511
+ */
512
+ refillRate: number,
513
+ /**
514
+ * The interval for the `refillRate`
515
+ */
516
+ interval: Duration,
517
+ /**
518
+ * Maximum number of tokens.
519
+ * A newly created bucket starts with this many tokens.
520
+ * Useful to allow higher burst limits.
521
+ */
522
+ maxTokens: number): Algorithm<RegionContext>;
523
+ /**
524
+ * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
525
+ * it asynchronously.
526
+ * This is experimental and not yet recommended for production use.
527
+ *
528
+ * @experimental
529
+ *
530
+ * Each requests inside a fixed time increases a counter.
531
+ * Once the counter reaches a maxmimum allowed number, all further requests are
532
+ * rejected.
533
+ *
534
+ * **Pro:**
535
+ *
536
+ * - Newer requests are not starved by old ones.
537
+ * - Low storage cost.
538
+ *
539
+ * **Con:**
540
+ *
541
+ * A burst of requests near the boundary of a window can result in a very
542
+ * high request rate because two windows will be filled with requests quickly.
543
+ *
544
+ * @param tokens - How many requests a user can make in each time window.
545
+ * @param window - A fixed timeframe
546
+ */
547
+ static cachedFixedWindow(
548
+ /**
549
+ * How many requests are allowed per window.
550
+ */
551
+ tokens: number,
552
+ /**
553
+ * The duration in which `tokens` requests are allowed.
554
+ */
555
+ window: Duration): Algorithm<RegionContext>;
551
556
  }
552
557
 
553
558
  export { Algorithm, Analytics, AnalyticsConfig, MultiRegionRatelimit, MultiRegionRatelimitConfig, RegionRatelimit as Ratelimit, RegionRatelimitConfig as RatelimitConfig };