@upstash/ratelimit 1.0.1 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,6 +3,10 @@
3
3
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
4
4
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
5
5
 
6
+ > [!NOTE]
7
+ > **This project is in GA Stage.**
8
+ > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality.
9
+
6
10
  It is the only connectionless (HTTP based) rate limiting library and designed
7
11
  for:
8
12
 
@@ -15,44 +19,6 @@ for:
15
19
  - WebAssembly
16
20
  - and other environments where HTTP is preferred over TCP.
17
21
 
18
- <!-- toc -->
19
-
20
- - [Docs](#docs)
21
- - [Quick Start](#quick-start)
22
- - [Install](#install)
23
- - [npm](#npm)
24
- - [Deno](#deno)
25
- - [Create database](#create-database)
26
- - [Use it](#use-it)
27
- - [Block until ready](#block-until-ready)
28
- - [Ephemeral Cache](#ephemeral-cache)
29
- - [MultiRegion replicated ratelimiting](#multiregion-replicated-ratelimiting)
30
- - [Usage](#usage)
31
- - [Asynchronous synchronization between databases](#asynchronous-synchronization-between-databases)
32
- - [Example](#example)
33
- - [Ratelimiting algorithms](#ratelimiting-algorithms)
34
- - [Fixed Window](#fixed-window)
35
- - [Pros:](#pros)
36
- - [Cons:](#cons)
37
- - [Usage:](#usage)
38
- - [Sliding Window](#sliding-window)
39
- - [Pros:](#pros-1)
40
- - [Cons:](#cons-1)
41
- - [Usage:](#usage-1)
42
- - [Token Bucket](#token-bucket)
43
- - [Pros:](#pros-2)
44
- - [Cons:](#cons-2)
45
- - [Usage:](#usage-2)
46
- - [Contributing](#contributing)
47
- - [Database](#database)
48
- - [Running tests](#running-tests)
49
-
50
- <!-- tocstop -->
51
-
52
- ## Docs
53
-
54
- [doc.deno.land](https://deno.land/x/upstash_ratelimit/packages/sdk/src/index.ts)
55
-
56
22
  ## Quick Start
57
23
 
58
24
  ### Install
@@ -73,7 +39,7 @@ import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest"
73
39
 
74
40
  Create a new redis database on [upstash](https://console.upstash.com/)
75
41
 
76
- ### Use it
42
+ ### Basic Usage
77
43
 
78
44
  See [here](https://github.com/upstash/upstash-redis#quick-start) for
79
45
  documentation on how to create a redis instance.
@@ -163,322 +129,11 @@ export type RatelimitResponse = {
163
129
  };
164
130
  ````
165
131
 
166
- ### Timeout
167
-
168
- You can define an optional timeout in milliseconds, after which the request will
169
- be allowed to pass regardless of what the current limit is. This can be useful
170
- if you don't want network issues to cause your application to reject requests.
171
-
172
- ```ts
173
- const ratelimit = new Ratelimit({
174
- redis: Redis.fromEnv(),
175
- limiter: Ratelimit.slidingWindow(10, "10 s"),
176
- timeout: 1000, // 1 second
177
- analytics: true
178
- });
179
- ```
180
-
181
- ### Block until ready
182
-
183
- In case you don't want to reject a request immediately but wait until it can be
184
- processed, we also provide
185
-
186
- ```ts
187
- ratelimit.blockUntilReady(identifier: string, timeout: number): Promise<RatelimitResponse>
188
- ```
189
-
190
- It is very similar to the `limit` method and takes an identifier and returns the
191
- same response. However if the current limit has already been exceeded, it will
192
- automatically wait until the next window starts and will try again. Setting the
193
- timeout parameter (in milliseconds) will cause the returned Promise to resolve
194
- in a finite amount of time.
195
-
196
- ```ts
197
- // Create a new ratelimiter, that allows 10 requests per 10 seconds
198
- const ratelimit = new Ratelimit({
199
- redis: Redis.fromEnv(),
200
- limiter: Ratelimit.slidingWindow(10, "10 s"),
201
- analytics: true
202
- });
203
-
204
- // `blockUntilReady` returns a promise that resolves as soon as the request is allowed to be processed, or after 30 seconds
205
- const { success } = await ratelimit.blockUntilReady("id", 30_000);
206
-
207
- if (!success) {
208
- return "Unable to process, even after 30 seconds";
209
- }
210
- doExpensiveCalculation();
211
- return "Here you go!";
212
- ```
213
-
214
- ### Ephemeral Cache
215
-
216
- For extreme load or denial of service attacks, it might be too expensive to call
217
- redis for every incoming request, just to find out it should be blocked because
218
- they have exceeded the limit.
219
-
220
- You can use an ephemeral in memory cache by passing the `ephemeralCache` option:
221
-
222
- ```ts
223
- const cache = new Map(); // must be outside of your serverless function handler
224
-
225
- // ...
226
-
227
- const ratelimit = new Ratelimit({
228
- // ...
229
- ephemeralCache: cache,
230
- });
231
- ```
232
-
233
- If enabled, the ratelimiter will keep a global cache of identifiers and their
234
- reset timestamps, that have exhausted their ratelimit. In serverless
235
- environments this is only possible if you create the cache or ratelimiter
236
- instance outside of your handler function. While the function is still hot, the
237
- ratelimiter can block requests without having to request data from redis, thus
238
- saving time and money.
239
-
240
-
241
- ## Using multiple limits
242
-
243
- Sometimes you might want to apply different limits to different users. For example you might want to allow 10 requests per 10 seconds for free users, but 60 requests per 10 seconds for paid users.
244
-
245
- Here's how you could do that:
246
-
247
- ```ts
248
- import { Redis } from "@upstash/redis"
249
- import { Ratelimit } from "@upstash/ratelimit"
250
-
251
- const redis = Redis.fromEnv()
252
-
253
- const ratelimit = {
254
- free: new Ratelimit({
255
- redis,
256
- analytics: true,
257
- prefix: "ratelimit:free",
258
- limiter: Ratelimit.slidingWindow(10, "10s"),
259
- }),
260
- paid: new Ratelimit({
261
- redis,
262
- analytics: true,
263
- prefix: "ratelimit:paid",
264
- limiter: Ratelimit.slidingWindow(60, "10s"),
265
- })
266
- }
267
-
268
-
269
- await ratelimit.free.limit(ip)
270
- // or for a paid user you might have an email or userId available:
271
- await ratelimit.paid.limit(userId)
272
-
273
- ```
274
-
275
- ## MultiRegion replicated ratelimiting
276
-
277
- Using a single redis instance has the downside of providing low latencies only
278
- to the part of your userbase closest to the deployed db. That's why we also
279
- built `MultiRegionRatelimit` which replicates the state across multiple redis
280
- databases as well as offering lower latencies to more of your users.
281
-
282
- `MultiRegionRatelimit` does this by checking the current limit in the closest db
283
- and returning immediately. Only afterwards will the state be asynchronously
284
- replicated to the other datbases leveraging
285
- [CRDTs](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type). Due
286
- to the nature of distributed systems, there is no way to guarantee the set
287
- ratelimit is not exceeded by a small margin. This is the tradeoff for reduced
288
- global latency.
289
-
290
- ### Usage
291
-
292
- The api is the same, except for asking for multiple redis instances:
293
-
294
- ```ts
295
- import { MultiRegionRatelimit } from "@upstash/ratelimit"; // for deno: see above
296
- import { Redis } from "@upstash/redis";
297
-
298
- // Create a new ratelimiter, that allows 10 requests per 10 seconds
299
- const ratelimit = new MultiRegionRatelimit({
300
- redis: [
301
- new Redis({
302
- /* auth */
303
- }),
304
- new Redis({
305
- /* auth */
306
- }),
307
- new Redis({
308
- /* auth */
309
- }),
310
- ],
311
- limiter: MultiRegionRatelimit.slidingWindow(10, "10 s"),
312
- analytics: true
313
- });
314
-
315
- // Use a constant string to limit all requests with a single ratelimit
316
- // Or use a userID, apiKey or ip address for individual limits.
317
- const identifier = "api";
318
- const { success } = await ratelimit.limit(identifier);
319
- ```
320
-
321
- ### Asynchronous synchronization between databases
322
-
323
- The MultiRegion setup will do some synchronization between databases after
324
- returning the current limit. This can lead to problems on Cloudflare Workers and
325
- therefore Vercel Edge functions, because dangling promises must be taken care
326
- of:
327
-
328
- **Vercel Edge:**
329
- [docs](https://nextjs.org/docs/api-reference/next/server#nextfetchevent)
330
-
331
- ```ts
332
- const { pending } = await ratelimit.limit("id");
333
- event.waitUntil(pending);
334
- ```
335
-
336
- **Cloudflare Worker:**
337
- [docs](https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker)
338
-
339
- ```ts
340
- const { pending } = await ratelimit.limit("id");
341
- context.waitUntil(pending);
342
- ```
343
-
344
- ### Example
345
-
346
- Let's assume you have customers in the US and Europe. In this case you can
347
- create 2 regional redis databases on [Upstash](https://console.upstash.com) and
348
- your users will enjoy the latency of whichever db is closest to them.
349
-
350
- ## Ratelimiting algorithms
351
-
352
- We provide different algorithms to use out of the box. Each has pros and cons.
353
-
354
- ### Fixed Window
355
-
356
- This algorithm divides time into fixed durations/windows. For example each
357
- window is 10 seconds long. When a new request comes in, the current time is used
358
- to determine the window and a counter is increased. If the counter is larger
359
- than the set limit, the request is rejected.
360
-
361
- #### Pros:
362
-
363
- - Very cheap in terms of data size and computation
364
- - Newer requests are not starved due to a high burst in the past
365
-
366
- #### Cons:
367
-
368
- - Can cause high bursts at the window boundaries to leak through
369
- - Causes request stampedes if many users are trying to access your server,
370
- whenever a new window begins
371
-
372
- #### Usage:
373
-
374
- Create a new ratelimiter, that allows 10 requests per 10 seconds.
375
-
376
- ```ts
377
- const ratelimit = new Ratelimit({
378
- redis: Redis.fromEnv(),
379
- limiter: Ratelimit.fixedWindow(10, "10 s"),
380
- analytics: true
381
- });
382
- ```
383
-
384
- ### Sliding Window
385
-
386
- Builds on top of fixed window but instead of a fixed window, we use a rolling
387
- window. Take this example: We have a rate limit of 10 requests per 1 minute. We
388
- divide time into 1 minute slices, just like in the fixed window algorithm.
389
- Window 1 will be from 00:00:00 to 00:01:00 (HH:MM:SS). Let's assume it is
390
- currently 00:01:15 and we have received 4 requests in the first window and 5
391
- requests so far in the current window. The approximation to determine if the
392
- request should pass works like this:
393
-
394
- ```ts
395
- limit = 10
396
-
397
- // 4 request from the old window, weighted + requests in current window
398
- rate = 4 * ((60 - 15) / 60) + 5 = 8
399
-
400
- return rate < limit // True means we should allow the request
401
- ```
402
-
403
- #### Pros:
404
-
405
- - Solves the issue near boundary from fixed window.
406
-
407
- #### Cons:
408
-
409
- - More expensive in terms of storage and computation
410
- - Is only an approximation, because it assumes a uniform request flow in the
411
- previous window, but this is fine in most cases
412
-
413
- #### Usage:
414
-
415
- Create a new ratelimiter, that allows 10 requests per 10 seconds.
416
-
417
- ```ts
418
- const ratelimit = new Ratelimit({
419
- redis: Redis.fromEnv(),
420
- limiter: Ratelimit.slidingWindow(10, "10 s"),
421
- analytics: true
422
- });
423
- ```
424
-
425
- ### Token Bucket
426
-
427
- _Not yet supported for `MultiRegionRatelimit`_
428
-
429
- Consider a bucket filled with `{maxTokens}` tokens that refills constantly at
430
- `{refillRate}` per `{interval}`. Every request will remove one token from the
431
- bucket and if there is no token to take, the request is rejected.
432
-
433
- #### Pros:
434
-
435
- - Bursts of requests are smoothed out and you can process them at a constant
436
- rate.
437
- - Allows to set a higher initial burst limit by setting `maxTokens` higher than
438
- `refillRate`
439
-
440
- #### Cons:
441
-
442
- - Expensive in terms of computation
443
-
444
- #### Usage:
445
-
446
- Create a new bucket, that refills 5 tokens every 10 seconds and has a maximum
447
- size of 10.
448
-
449
- ```ts
450
- const ratelimit = new Ratelimit({
451
- redis: Redis.fromEnv(),
452
- limiter: Ratelimit.tokenBucket(5, "10 s", 10),
453
- analytics: true
454
- });
455
- ```
456
-
457
- ## Analytics
458
-
459
- You can enable analytics to get a better understanding of how your ratelimiting
460
- is performing. This is done by setting `analytics: true` in the options.
461
-
462
- All data is stored in the same Redis database and writing analytics uses 1 command per `.limit` invocation.
463
-
464
- ```ts
465
- const ratelimit = new Ratelimit({
466
- redis: Redis.fromEnv(),
467
- limiter: Ratelimit.tokenBucket(5, "10 s", 10),
468
- analytics: true // <- Enable analytics
469
- });
470
- ```
471
-
472
- Go to the [Ratelimit Dashboard](https://console.upstash.com/ratelimit) and select the database you are using.
473
-
474
- If you are using a custom prefix, you need to use the same in the dashboard's top right corner.
475
-
476
- ![Ratelimit Dashboard](/.github/img/dashboard.png)
477
-
132
+ ### Docs
133
+ See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for details.
478
134
 
479
135
  ## Contributing
480
136
 
481
-
482
137
  ### Database
483
138
 
484
139
  Create a new redis database on [upstash](https://console.upstash.com/) and copy
package/dist/index.d.mts CHANGED
@@ -61,7 +61,7 @@ type RatelimitResponse = {
61
61
  */
62
62
  pending: Promise<unknown>;
63
63
  };
64
- type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
64
+ type Algorithm<TContext> = (ctx: TContext, identifier: string, rate?: number, opts?: {
65
65
  cache?: EphemeralCache;
66
66
  }) => Promise<RatelimitResponse>;
67
67
  /**
@@ -69,6 +69,9 @@ type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
69
69
  */
70
70
  interface Redis {
71
71
  sadd: <TData>(key: string, ...members: TData[]) => Promise<number>;
72
+ hset: <TValue>(key: string, obj: {
73
+ [key: string]: TValue;
74
+ }) => Promise<number>;
72
75
  eval: <TArgs extends unknown[], TData = unknown>(...args: [script: string, keys: string[], args: TArgs]) => Promise<TData>;
73
76
  }
74
77
 
@@ -209,9 +212,27 @@ declare abstract class Ratelimit<TContext extends Context> {
209
212
  * }
210
213
  * return "Yes"
211
214
  * ```
215
+ *
216
+ * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
217
+ *
218
+ * Usage with `req.rate`
219
+ * @example
220
+ * ```ts
221
+ * const ratelimit = new Ratelimit({
222
+ * redis: Redis.fromEnv(),
223
+ * limiter: Ratelimit.slidingWindow(100, "10 s")
224
+ * })
225
+ *
226
+ * const { success } = await ratelimit.limit(id, {rate: 10})
227
+ * if (!success){
228
+ * return "Nope"
229
+ * }
230
+ * return "Yes"
231
+ * ```
212
232
  */
213
233
  limit: (identifier: string, req?: {
214
234
  geo?: Geo;
235
+ rate?: number;
215
236
  }) => Promise<RatelimitResponse>;
216
237
  /**
217
238
  * Block until the request may pass or timeout is reached.
@@ -285,7 +306,7 @@ type MultiRegionRatelimitConfig = {
285
306
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
286
307
  * https://console.upstash.com/ratelimit
287
308
  *
288
- * @default true
309
+ * @default false
289
310
  */
290
311
  analytics?: boolean;
291
312
  };
@@ -412,7 +433,7 @@ type RegionRatelimitConfig = {
412
433
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
413
434
  * https://console.upstash.com/ratelimit
414
435
  *
415
- * @default true
436
+ * @default false
416
437
  */
417
438
  analytics?: boolean;
418
439
  };
package/dist/index.d.ts CHANGED
@@ -61,7 +61,7 @@ type RatelimitResponse = {
61
61
  */
62
62
  pending: Promise<unknown>;
63
63
  };
64
- type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
64
+ type Algorithm<TContext> = (ctx: TContext, identifier: string, rate?: number, opts?: {
65
65
  cache?: EphemeralCache;
66
66
  }) => Promise<RatelimitResponse>;
67
67
  /**
@@ -69,6 +69,9 @@ type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
69
69
  */
70
70
  interface Redis {
71
71
  sadd: <TData>(key: string, ...members: TData[]) => Promise<number>;
72
+ hset: <TValue>(key: string, obj: {
73
+ [key: string]: TValue;
74
+ }) => Promise<number>;
72
75
  eval: <TArgs extends unknown[], TData = unknown>(...args: [script: string, keys: string[], args: TArgs]) => Promise<TData>;
73
76
  }
74
77
 
@@ -209,9 +212,27 @@ declare abstract class Ratelimit<TContext extends Context> {
209
212
  * }
210
213
  * return "Yes"
211
214
  * ```
215
+ *
216
+ * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
217
+ *
218
+ * Usage with `req.rate`
219
+ * @example
220
+ * ```ts
221
+ * const ratelimit = new Ratelimit({
222
+ * redis: Redis.fromEnv(),
223
+ * limiter: Ratelimit.slidingWindow(100, "10 s")
224
+ * })
225
+ *
226
+ * const { success } = await ratelimit.limit(id, {rate: 10})
227
+ * if (!success){
228
+ * return "Nope"
229
+ * }
230
+ * return "Yes"
231
+ * ```
212
232
  */
213
233
  limit: (identifier: string, req?: {
214
234
  geo?: Geo;
235
+ rate?: number;
215
236
  }) => Promise<RatelimitResponse>;
216
237
  /**
217
238
  * Block until the request may pass or timeout is reached.
@@ -285,7 +306,7 @@ type MultiRegionRatelimitConfig = {
285
306
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
286
307
  * https://console.upstash.com/ratelimit
287
308
  *
288
- * @default true
309
+ * @default false
289
310
  */
290
311
  analytics?: boolean;
291
312
  };
@@ -412,7 +433,7 @@ type RegionRatelimitConfig = {
412
433
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
413
434
  * https://console.upstash.com/ratelimit
414
435
  *
415
- * @default true
436
+ * @default false
416
437
  */
417
438
  analytics?: boolean;
418
439
  };