@upstash/ratelimit 1.0.1 → 1.1.0-canary-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,10 @@
1
1
  # Upstash Rate Limit
2
2
 
3
3
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
4
- ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
4
+ [![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)](https://www.npmjs.com/package/ratelimit)
5
+
6
+ > [!NOTE] > **This project is in GA Stage.**
7
+ > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality.
5
8
 
6
9
  It is the only connectionless (HTTP based) rate limiting library and designed
7
10
  for:
@@ -15,44 +18,6 @@ for:
15
18
  - WebAssembly
16
19
  - and other environments where HTTP is preferred over TCP.
17
20
 
18
- <!-- toc -->
19
-
20
- - [Docs](#docs)
21
- - [Quick Start](#quick-start)
22
- - [Install](#install)
23
- - [npm](#npm)
24
- - [Deno](#deno)
25
- - [Create database](#create-database)
26
- - [Use it](#use-it)
27
- - [Block until ready](#block-until-ready)
28
- - [Ephemeral Cache](#ephemeral-cache)
29
- - [MultiRegion replicated ratelimiting](#multiregion-replicated-ratelimiting)
30
- - [Usage](#usage)
31
- - [Asynchronous synchronization between databases](#asynchronous-synchronization-between-databases)
32
- - [Example](#example)
33
- - [Ratelimiting algorithms](#ratelimiting-algorithms)
34
- - [Fixed Window](#fixed-window)
35
- - [Pros:](#pros)
36
- - [Cons:](#cons)
37
- - [Usage:](#usage)
38
- - [Sliding Window](#sliding-window)
39
- - [Pros:](#pros-1)
40
- - [Cons:](#cons-1)
41
- - [Usage:](#usage-1)
42
- - [Token Bucket](#token-bucket)
43
- - [Pros:](#pros-2)
44
- - [Cons:](#cons-2)
45
- - [Usage:](#usage-2)
46
- - [Contributing](#contributing)
47
- - [Database](#database)
48
- - [Running tests](#running-tests)
49
-
50
- <!-- tocstop -->
51
-
52
- ## Docs
53
-
54
- [doc.deno.land](https://deno.land/x/upstash_ratelimit/packages/sdk/src/index.ts)
55
-
56
21
  ## Quick Start
57
22
 
58
23
  ### Install
@@ -66,14 +31,14 @@ npm install @upstash/ratelimit
66
31
  #### Deno
67
32
 
68
33
  ```ts
69
- import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest"
34
+ import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest";
70
35
  ```
71
36
 
72
37
  ### Create database
73
38
 
74
39
  Create a new redis database on [upstash](https://console.upstash.com/)
75
40
 
76
- ### Use it
41
+ ### Basic Usage
77
42
 
78
43
  See [here](https://github.com/upstash/upstash-redis#quick-start) for
79
44
  documentation on how to create a redis instance.
@@ -139,346 +104,54 @@ export type RatelimitResponse = {
139
104
 
140
105
  /**
141
106
  * For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
107
+ * Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
142
108
  * In most case you can simply ignore this.
143
109
  *
144
- * On Vercel Edge or Cloudflare workers, you need to explicitely handle the pending Promise like this:
145
- *
146
- * **Vercel Edge:**
147
- * https://nextjs.org/docs/api-reference/next/server#nextfetchevent
110
+ * On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
148
111
  *
149
112
  * ```ts
150
113
  * const { pending } = await ratelimit.limit("id")
151
- * event.waitUntil(pending)
114
+ * context.waitUntil(pending)
152
115
  * ```
153
116
  *
154
- * **Cloudflare Worker:**
155
- * https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker
156
- *
157
- * ```ts
158
- * const { pending } = await ratelimit.limit("id")
159
- * context.waitUntil(pending)
117
+ * See `waitUntil` documentation in
118
+ * [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
119
+ * and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
120
+ * for more details.
160
121
  * ```
161
122
  */
162
123
  pending: Promise<unknown>;
163
124
  };
164
125
  ````
165
126
 
166
- ### Timeout
167
-
168
- You can define an optional timeout in milliseconds, after which the request will
169
- be allowed to pass regardless of what the current limit is. This can be useful
170
- if you don't want network issues to cause your application to reject requests.
171
-
172
- ```ts
173
- const ratelimit = new Ratelimit({
174
- redis: Redis.fromEnv(),
175
- limiter: Ratelimit.slidingWindow(10, "10 s"),
176
- timeout: 1000, // 1 second
177
- analytics: true
178
- });
179
- ```
180
-
181
- ### Block until ready
182
-
183
- In case you don't want to reject a request immediately but wait until it can be
184
- processed, we also provide
127
+ ### Using with CloudFlare Workers and Vercel Edge
185
128
 
186
- ```ts
187
- ratelimit.blockUntilReady(identifier: string, timeout: number): Promise<RatelimitResponse>
188
- ```
189
-
190
- It is very similar to the `limit` method and takes an identifier and returns the
191
- same response. However if the current limit has already been exceeded, it will
192
- automatically wait until the next window starts and will try again. Setting the
193
- timeout parameter (in milliseconds) will cause the returned Promise to resolve
194
- in a finite amount of time.
195
-
196
- ```ts
197
- // Create a new ratelimiter, that allows 10 requests per 10 seconds
198
- const ratelimit = new Ratelimit({
199
- redis: Redis.fromEnv(),
200
- limiter: Ratelimit.slidingWindow(10, "10 s"),
201
- analytics: true
202
- });
129
+ When we use CloudFlare Workers and Vercel Edge, we need to be careful about
130
+ making sure that the rate limiting operations complete correctly before the runtime ends
131
+ after returning the response.
203
132
 
204
- // `blockUntilReady` returns a promise that resolves as soon as the request is allowed to be processed, or after 30 seconds
205
- const { success } = await ratelimit.blockUntilReady("id", 30_000);
133
+ This is important in two cases where we do some operations in the backgroung asynchronously after `limit` is called:
206
134
 
207
- if (!success) {
208
- return "Unable to process, even after 30 seconds";
209
- }
210
- doExpensiveCalculation();
211
- return "Here you go!";
212
- ```
213
-
214
- ### Ephemeral Cache
135
+ 1. Using MultiRegion: synchronize Redis instances in different regions
136
+ 2. Enabling analytics: send analytics to Redis
215
137
 
216
- For extreme load or denial of service attacks, it might be too expensive to call
217
- redis for every incoming request, just to find out it should be blocked because
218
- they have exceeded the limit.
138
+ In these cases, we need to wait for these operations to finish before sending the response to the user. Otherwise, the runtime will end and we won't be able to complete our chores.
219
139
 
220
- You can use an ephemeral in memory cache by passing the `ephemeralCache` option:
221
-
222
- ```ts
223
- const cache = new Map(); // must be outside of your serverless function handler
224
-
225
- // ...
226
-
227
- const ratelimit = new Ratelimit({
228
- // ...
229
- ephemeralCache: cache,
230
- });
231
- ```
232
-
233
- If enabled, the ratelimiter will keep a global cache of identifiers and their
234
- reset timestamps, that have exhausted their ratelimit. In serverless
235
- environments this is only possible if you create the cache or ratelimiter
236
- instance outside of your handler function. While the function is still hot, the
237
- ratelimiter can block requests without having to request data from redis, thus
238
- saving time and money.
239
-
240
-
241
- ## Using multiple limits
242
-
243
- Sometimes you might want to apply different limits to different users. For example you might want to allow 10 requests per 10 seconds for free users, but 60 requests per 10 seconds for paid users.
244
-
245
- Here's how you could do that:
246
-
247
- ```ts
248
- import { Redis } from "@upstash/redis"
249
- import { Ratelimit } from "@upstash/ratelimit"
250
-
251
- const redis = Redis.fromEnv()
252
-
253
- const ratelimit = {
254
- free: new Ratelimit({
255
- redis,
256
- analytics: true,
257
- prefix: "ratelimit:free",
258
- limiter: Ratelimit.slidingWindow(10, "10s"),
259
- }),
260
- paid: new Ratelimit({
261
- redis,
262
- analytics: true,
263
- prefix: "ratelimit:paid",
264
- limiter: Ratelimit.slidingWindow(60, "10s"),
265
- })
266
- }
267
-
268
-
269
- await ratelimit.free.limit(ip)
270
- // or for a paid user you might have an email or userId available:
271
- await ratelimit.paid.limit(userId)
272
-
273
- ```
274
-
275
- ## MultiRegion replicated ratelimiting
276
-
277
- Using a single redis instance has the downside of providing low latencies only
278
- to the part of your userbase closest to the deployed db. That's why we also
279
- built `MultiRegionRatelimit` which replicates the state across multiple redis
280
- databases as well as offering lower latencies to more of your users.
281
-
282
- `MultiRegionRatelimit` does this by checking the current limit in the closest db
283
- and returning immediately. Only afterwards will the state be asynchronously
284
- replicated to the other datbases leveraging
285
- [CRDTs](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type). Due
286
- to the nature of distributed systems, there is no way to guarantee the set
287
- ratelimit is not exceeded by a small margin. This is the tradeoff for reduced
288
- global latency.
289
-
290
- ### Usage
291
-
292
- The api is the same, except for asking for multiple redis instances:
293
-
294
- ```ts
295
- import { MultiRegionRatelimit } from "@upstash/ratelimit"; // for deno: see above
296
- import { Redis } from "@upstash/redis";
297
-
298
- // Create a new ratelimiter, that allows 10 requests per 10 seconds
299
- const ratelimit = new MultiRegionRatelimit({
300
- redis: [
301
- new Redis({
302
- /* auth */
303
- }),
304
- new Redis({
305
- /* auth */
306
- }),
307
- new Redis({
308
- /* auth */
309
- }),
310
- ],
311
- limiter: MultiRegionRatelimit.slidingWindow(10, "10 s"),
312
- analytics: true
313
- });
314
-
315
- // Use a constant string to limit all requests with a single ratelimit
316
- // Or use a userID, apiKey or ip address for individual limits.
317
- const identifier = "api";
318
- const { success } = await ratelimit.limit(identifier);
319
- ```
320
-
321
- ### Asynchronous synchronization between databases
322
-
323
- The MultiRegion setup will do some synchronization between databases after
324
- returning the current limit. This can lead to problems on Cloudflare Workers and
325
- therefore Vercel Edge functions, because dangling promises must be taken care
326
- of:
327
-
328
- **Vercel Edge:**
329
- [docs](https://nextjs.org/docs/api-reference/next/server#nextfetchevent)
330
-
331
- ```ts
332
- const { pending } = await ratelimit.limit("id");
333
- event.waitUntil(pending);
334
- ```
335
-
336
- **Cloudflare Worker:**
337
- [docs](https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker)
140
+ In order to wait for these operations to finish, use the `pending` promise:
338
141
 
339
142
  ```ts
340
143
  const { pending } = await ratelimit.limit("id");
341
144
  context.waitUntil(pending);
342
145
  ```
343
146
 
344
- ### Example
345
-
346
- Let's assume you have customers in the US and Europe. In this case you can
347
- create 2 regional redis databases on [Upstash](https://console.upstash.com) and
348
- your users will enjoy the latency of whichever db is closest to them.
349
-
350
- ## Ratelimiting algorithms
351
-
352
- We provide different algorithms to use out of the box. Each has pros and cons.
353
-
354
- ### Fixed Window
355
-
356
- This algorithm divides time into fixed durations/windows. For example each
357
- window is 10 seconds long. When a new request comes in, the current time is used
358
- to determine the window and a counter is increased. If the counter is larger
359
- than the set limit, the request is rejected.
360
-
361
- #### Pros:
362
-
363
- - Very cheap in terms of data size and computation
364
- - Newer requests are not starved due to a high burst in the past
365
-
366
- #### Cons:
367
-
368
- - Can cause high bursts at the window boundaries to leak through
369
- - Causes request stampedes if many users are trying to access your server,
370
- whenever a new window begins
371
-
372
- #### Usage:
373
-
374
- Create a new ratelimiter, that allows 10 requests per 10 seconds.
375
-
376
- ```ts
377
- const ratelimit = new Ratelimit({
378
- redis: Redis.fromEnv(),
379
- limiter: Ratelimit.fixedWindow(10, "10 s"),
380
- analytics: true
381
- });
382
- ```
383
-
384
- ### Sliding Window
385
-
386
- Builds on top of fixed window but instead of a fixed window, we use a rolling
387
- window. Take this example: We have a rate limit of 10 requests per 1 minute. We
388
- divide time into 1 minute slices, just like in the fixed window algorithm.
389
- Window 1 will be from 00:00:00 to 00:01:00 (HH:MM:SS). Let's assume it is
390
- currently 00:01:15 and we have received 4 requests in the first window and 5
391
- requests so far in the current window. The approximation to determine if the
392
- request should pass works like this:
393
-
394
- ```ts
395
- limit = 10
396
-
397
- // 4 request from the old window, weighted + requests in current window
398
- rate = 4 * ((60 - 15) / 60) + 5 = 8
399
-
400
- return rate < limit // True means we should allow the request
401
- ```
402
-
403
- #### Pros:
404
-
405
- - Solves the issue near boundary from fixed window.
406
-
407
- #### Cons:
408
-
409
- - More expensive in terms of storage and computation
410
- - Is only an approximation, because it assumes a uniform request flow in the
411
- previous window, but this is fine in most cases
412
-
413
- #### Usage:
414
-
415
- Create a new ratelimiter, that allows 10 requests per 10 seconds.
416
-
417
- ```ts
418
- const ratelimit = new Ratelimit({
419
- redis: Redis.fromEnv(),
420
- limiter: Ratelimit.slidingWindow(10, "10 s"),
421
- analytics: true
422
- });
423
- ```
424
-
425
- ### Token Bucket
147
+ See `waitUntil` documentation in [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) for more details.
426
148
 
427
- _Not yet supported for `MultiRegionRatelimit`_
428
-
429
- Consider a bucket filled with `{maxTokens}` tokens that refills constantly at
430
- `{refillRate}` per `{interval}`. Every request will remove one token from the
431
- bucket and if there is no token to take, the request is rejected.
432
-
433
- #### Pros:
434
-
435
- - Bursts of requests are smoothed out and you can process them at a constant
436
- rate.
437
- - Allows to set a higher initial burst limit by setting `maxTokens` higher than
438
- `refillRate`
439
-
440
- #### Cons:
441
-
442
- - Expensive in terms of computation
443
-
444
- #### Usage:
445
-
446
- Create a new bucket, that refills 5 tokens every 10 seconds and has a maximum
447
- size of 10.
448
-
449
- ```ts
450
- const ratelimit = new Ratelimit({
451
- redis: Redis.fromEnv(),
452
- limiter: Ratelimit.tokenBucket(5, "10 s", 10),
453
- analytics: true
454
- });
455
- ```
456
-
457
- ## Analytics
458
-
459
- You can enable analytics to get a better understanding of how your ratelimiting
460
- is performing. This is done by setting `analytics: true` in the options.
461
-
462
- All data is stored in the same Redis database and writing analytics uses 1 command per `.limit` invocation.
463
-
464
- ```ts
465
- const ratelimit = new Ratelimit({
466
- redis: Redis.fromEnv(),
467
- limiter: Ratelimit.tokenBucket(5, "10 s", 10),
468
- analytics: true // <- Enable analytics
469
- });
470
- ```
471
-
472
- Go to the [Ratelimit Dashboard](https://console.upstash.com/ratelimit) and select the database you are using.
473
-
474
- If you are using a custom prefix, you need to use the same in the dashboard's top right corner.
475
-
476
- ![Ratelimit Dashboard](/.github/img/dashboard.png)
149
+ ### Docs
477
150
 
151
+ See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for details.
478
152
 
479
153
  ## Contributing
480
154
 
481
-
482
155
  ### Database
483
156
 
484
157
  Create a new redis database on [upstash](https://console.upstash.com/) and copy
package/dist/index.d.mts CHANGED
@@ -10,6 +10,8 @@ interface EphemeralCache {
10
10
  set: (key: string, value: number) => void;
11
11
  get: (key: string) => number | null;
12
12
  incr: (key: string) => number;
13
+ pop: (key: string) => void;
14
+ empty: () => void;
13
15
  }
14
16
  type RegionContext = {
15
17
  redis: Redis;
@@ -39,36 +41,39 @@ type RatelimitResponse = {
39
41
  reset: number;
40
42
  /**
41
43
  * For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
44
+ * Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
42
45
  * In most case you can simply ignore this.
43
46
  *
44
47
  * On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
45
48
  *
46
- * **Vercel Edge:**
47
- * https://nextjs.org/docs/api-reference/next/server#nextfetchevent
48
- *
49
49
  * ```ts
50
50
  * const { pending } = await ratelimit.limit("id")
51
- * event.waitUntil(pending)
51
+ * context.waitUntil(pending)
52
52
  * ```
53
53
  *
54
- * **Cloudflare Worker:**
55
- * https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker
56
- *
57
- * ```ts
58
- * const { pending } = await ratelimit.limit("id")
59
- * context.waitUntil(pending)
54
+ * See `waitUntil` documentation in
55
+ * [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
56
+ * and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
57
+ * for more details.
60
58
  * ```
61
59
  */
62
60
  pending: Promise<unknown>;
63
61
  };
64
- type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
65
- cache?: EphemeralCache;
66
- }) => Promise<RatelimitResponse>;
62
+ type Algorithm<TContext> = () => {
63
+ limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
64
+ cache?: EphemeralCache;
65
+ }) => Promise<RatelimitResponse>;
66
+ getRemaining: (ctx: TContext, identifier: string) => Promise<number>;
67
+ resetTokens: (ctx: TContext, identifier: string) => void;
68
+ };
67
69
  /**
68
70
  * This is all we need from the redis sdk.
69
71
  */
70
72
  interface Redis {
71
73
  sadd: <TData>(key: string, ...members: TData[]) => Promise<number>;
74
+ hset: <TValue>(key: string, obj: {
75
+ [key: string]: TValue;
76
+ }) => Promise<number>;
72
77
  eval: <TArgs extends unknown[], TData = unknown>(...args: [script: string, keys: string[], args: TArgs]) => Promise<TData>;
73
78
  }
74
79
 
@@ -209,9 +214,27 @@ declare abstract class Ratelimit<TContext extends Context> {
209
214
  * }
210
215
  * return "Yes"
211
216
  * ```
217
+ *
218
+ * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
219
+ *
220
+ * Usage with `req.rate`
221
+ * @example
222
+ * ```ts
223
+ * const ratelimit = new Ratelimit({
224
+ * redis: Redis.fromEnv(),
225
+ * limiter: Ratelimit.slidingWindow(100, "10 s")
226
+ * })
227
+ *
228
+ * const { success } = await ratelimit.limit(id, {rate: 10})
229
+ * if (!success){
230
+ * return "Nope"
231
+ * }
232
+ * return "Yes"
233
+ * ```
212
234
  */
213
235
  limit: (identifier: string, req?: {
214
236
  geo?: Geo;
237
+ rate?: number;
215
238
  }) => Promise<RatelimitResponse>;
216
239
  /**
217
240
  * Block until the request may pass or timeout is reached.
@@ -236,6 +259,8 @@ declare abstract class Ratelimit<TContext extends Context> {
236
259
  * ```
237
260
  */
238
261
  blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
262
+ resetUsedTokens: (identifier: string) => Promise<void>;
263
+ getRemaining: (identifier: string) => Promise<number>;
239
264
  }
240
265
 
241
266
  type MultiRegionRatelimitConfig = {
@@ -285,7 +310,7 @@ type MultiRegionRatelimitConfig = {
285
310
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
286
311
  * https://console.upstash.com/ratelimit
287
312
  *
288
- * @default true
313
+ * @default false
289
314
  */
290
315
  analytics?: boolean;
291
316
  };
@@ -412,7 +437,7 @@ type RegionRatelimitConfig = {
412
437
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
413
438
  * https://console.upstash.com/ratelimit
414
439
  *
415
- * @default true
440
+ * @default false
416
441
  */
417
442
  analytics?: boolean;
418
443
  };
package/dist/index.d.ts CHANGED
@@ -10,6 +10,8 @@ interface EphemeralCache {
10
10
  set: (key: string, value: number) => void;
11
11
  get: (key: string) => number | null;
12
12
  incr: (key: string) => number;
13
+ pop: (key: string) => void;
14
+ empty: () => void;
13
15
  }
14
16
  type RegionContext = {
15
17
  redis: Redis;
@@ -39,36 +41,39 @@ type RatelimitResponse = {
39
41
  reset: number;
40
42
  /**
41
43
  * For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
44
+ * Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
42
45
  * In most case you can simply ignore this.
43
46
  *
44
47
  * On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
45
48
  *
46
- * **Vercel Edge:**
47
- * https://nextjs.org/docs/api-reference/next/server#nextfetchevent
48
- *
49
49
  * ```ts
50
50
  * const { pending } = await ratelimit.limit("id")
51
- * event.waitUntil(pending)
51
+ * context.waitUntil(pending)
52
52
  * ```
53
53
  *
54
- * **Cloudflare Worker:**
55
- * https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker
56
- *
57
- * ```ts
58
- * const { pending } = await ratelimit.limit("id")
59
- * context.waitUntil(pending)
54
+ * See `waitUntil` documentation in
55
+ * [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
56
+ * and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
57
+ * for more details.
60
58
  * ```
61
59
  */
62
60
  pending: Promise<unknown>;
63
61
  };
64
- type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
65
- cache?: EphemeralCache;
66
- }) => Promise<RatelimitResponse>;
62
+ type Algorithm<TContext> = () => {
63
+ limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
64
+ cache?: EphemeralCache;
65
+ }) => Promise<RatelimitResponse>;
66
+ getRemaining: (ctx: TContext, identifier: string) => Promise<number>;
67
+ resetTokens: (ctx: TContext, identifier: string) => void;
68
+ };
67
69
  /**
68
70
  * This is all we need from the redis sdk.
69
71
  */
70
72
  interface Redis {
71
73
  sadd: <TData>(key: string, ...members: TData[]) => Promise<number>;
74
+ hset: <TValue>(key: string, obj: {
75
+ [key: string]: TValue;
76
+ }) => Promise<number>;
72
77
  eval: <TArgs extends unknown[], TData = unknown>(...args: [script: string, keys: string[], args: TArgs]) => Promise<TData>;
73
78
  }
74
79
 
@@ -209,9 +214,27 @@ declare abstract class Ratelimit<TContext extends Context> {
209
214
  * }
210
215
  * return "Yes"
211
216
  * ```
217
+ *
218
+ * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
219
+ *
220
+ * Usage with `req.rate`
221
+ * @example
222
+ * ```ts
223
+ * const ratelimit = new Ratelimit({
224
+ * redis: Redis.fromEnv(),
225
+ * limiter: Ratelimit.slidingWindow(100, "10 s")
226
+ * })
227
+ *
228
+ * const { success } = await ratelimit.limit(id, {rate: 10})
229
+ * if (!success){
230
+ * return "Nope"
231
+ * }
232
+ * return "Yes"
233
+ * ```
212
234
  */
213
235
  limit: (identifier: string, req?: {
214
236
  geo?: Geo;
237
+ rate?: number;
215
238
  }) => Promise<RatelimitResponse>;
216
239
  /**
217
240
  * Block until the request may pass or timeout is reached.
@@ -236,6 +259,8 @@ declare abstract class Ratelimit<TContext extends Context> {
236
259
  * ```
237
260
  */
238
261
  blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
262
+ resetUsedTokens: (identifier: string) => Promise<void>;
263
+ getRemaining: (identifier: string) => Promise<number>;
239
264
  }
240
265
 
241
266
  type MultiRegionRatelimitConfig = {
@@ -285,7 +310,7 @@ type MultiRegionRatelimitConfig = {
285
310
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
286
311
  * https://console.upstash.com/ratelimit
287
312
  *
288
- * @default true
313
+ * @default false
289
314
  */
290
315
  analytics?: boolean;
291
316
  };
@@ -412,7 +437,7 @@ type RegionRatelimitConfig = {
412
437
  * If enabled, the ratelimiter will store analytics data in redis, which you can check out at
413
438
  * https://console.upstash.com/ratelimit
414
439
  *
415
- * @default true
440
+ * @default false
416
441
  */
417
442
  analytics?: boolean;
418
443
  };