@upstash/ratelimit 1.0.3 → 1.1.0-canary-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,10 +1,9 @@
1
1
  # Upstash Rate Limit
2
2
 
3
3
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
4
- ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
4
+ [![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)](https://www.npmjs.com/package/ratelimit)
5
5
 
6
- > [!NOTE]
7
- > **This project is in GA Stage.**
6
+ > [!NOTE] > **This project is in GA Stage.**
8
7
  > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality.
9
8
 
10
9
  It is the only connectionless (HTTP based) rate limiting library and designed
@@ -32,7 +31,7 @@ npm install @upstash/ratelimit
32
31
  #### Deno
33
32
 
34
33
  ```ts
35
- import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest"
34
+ import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest";
36
35
  ```
37
36
 
38
37
  ### Create database
@@ -105,31 +104,50 @@ export type RatelimitResponse = {
105
104
 
106
105
  /**
107
106
  * For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
107
+ * Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
108
108
  * In most case you can simply ignore this.
109
109
  *
110
- * On Vercel Edge or Cloudflare workers, you need to explicitely handle the pending Promise like this:
111
- *
112
- * **Vercel Edge:**
113
- * https://nextjs.org/docs/api-reference/next/server#nextfetchevent
110
+ * On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
114
111
  *
115
112
  * ```ts
116
113
  * const { pending } = await ratelimit.limit("id")
117
- * event.waitUntil(pending)
114
+ * context.waitUntil(pending)
118
115
  * ```
119
116
  *
120
- * **Cloudflare Worker:**
121
- * https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker
122
- *
123
- * ```ts
124
- * const { pending } = await ratelimit.limit("id")
125
- * context.waitUntil(pending)
117
+ * See `waitUntil` documentation in
118
+ * [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
119
+ * and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
120
+ * for more details.
126
121
  * ```
127
122
  */
128
123
  pending: Promise<unknown>;
129
124
  };
130
125
  ````
131
126
 
127
+ ### Using with CloudFlare Workers and Vercel Edge
128
+
129
+ When we use CloudFlare Workers and Vercel Edge, we need to be careful about
130
+ making sure that the rate limiting operations complete correctly before the runtime ends
131
+ after returning the response.
132
+
133
+ This is important in two cases where we do some operations in the backgroung asynchronously after `limit` is called:
134
+
135
+ 1. Using MultiRegion: synchronize Redis instances in different regions
136
+ 2. Enabling analytics: send analytics to Redis
137
+
138
+ In these cases, we need to wait for these operations to finish before sending the response to the user. Otherwise, the runtime will end and we won't be able to complete our chores.
139
+
140
+ In order to wait for these operations to finish, use the `pending` promise:
141
+
142
+ ```ts
143
+ const { pending } = await ratelimit.limit("id");
144
+ context.waitUntil(pending);
145
+ ```
146
+
147
+ See `waitUntil` documentation in [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) for more details.
148
+
132
149
  ### Docs
150
+
133
151
  See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for details.
134
152
 
135
153
  ## Contributing
package/dist/index.d.mts CHANGED
@@ -10,6 +10,8 @@ interface EphemeralCache {
10
10
  set: (key: string, value: number) => void;
11
11
  get: (key: string) => number | null;
12
12
  incr: (key: string) => number;
13
+ pop: (key: string) => void;
14
+ empty: () => void;
13
15
  }
14
16
  type RegionContext = {
15
17
  redis: Redis;
@@ -39,31 +41,31 @@ type RatelimitResponse = {
39
41
  reset: number;
40
42
  /**
41
43
  * For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
44
+ * Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
42
45
  * In most case you can simply ignore this.
43
46
  *
44
47
  * On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
45
48
  *
46
- * **Vercel Edge:**
47
- * https://nextjs.org/docs/api-reference/next/server#nextfetchevent
48
- *
49
49
  * ```ts
50
50
  * const { pending } = await ratelimit.limit("id")
51
- * event.waitUntil(pending)
51
+ * context.waitUntil(pending)
52
52
  * ```
53
53
  *
54
- * **Cloudflare Worker:**
55
- * https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker
56
- *
57
- * ```ts
58
- * const { pending } = await ratelimit.limit("id")
59
- * context.waitUntil(pending)
54
+ * See `waitUntil` documentation in
55
+ * [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
56
+ * and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
57
+ * for more details.
60
58
  * ```
61
59
  */
62
60
  pending: Promise<unknown>;
63
61
  };
64
- type Algorithm<TContext> = (ctx: TContext, identifier: string, rate?: number, opts?: {
65
- cache?: EphemeralCache;
66
- }) => Promise<RatelimitResponse>;
62
+ type Algorithm<TContext> = () => {
63
+ limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
64
+ cache?: EphemeralCache;
65
+ }) => Promise<RatelimitResponse>;
66
+ getRemaining: (ctx: TContext, identifier: string) => Promise<number>;
67
+ resetTokens: (ctx: TContext, identifier: string) => void;
68
+ };
67
69
  /**
68
70
  * This is all we need from the redis sdk.
69
71
  */
@@ -257,6 +259,8 @@ declare abstract class Ratelimit<TContext extends Context> {
257
259
  * ```
258
260
  */
259
261
  blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
262
+ resetUsedTokens: (identifier: string) => Promise<void>;
263
+ getRemaining: (identifier: string) => Promise<number>;
260
264
  }
261
265
 
262
266
  type MultiRegionRatelimitConfig = {
package/dist/index.d.ts CHANGED
@@ -10,6 +10,8 @@ interface EphemeralCache {
10
10
  set: (key: string, value: number) => void;
11
11
  get: (key: string) => number | null;
12
12
  incr: (key: string) => number;
13
+ pop: (key: string) => void;
14
+ empty: () => void;
13
15
  }
14
16
  type RegionContext = {
15
17
  redis: Redis;
@@ -39,31 +41,31 @@ type RatelimitResponse = {
39
41
  reset: number;
40
42
  /**
41
43
  * For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
44
+ * Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
42
45
  * In most case you can simply ignore this.
43
46
  *
44
47
  * On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
45
48
  *
46
- * **Vercel Edge:**
47
- * https://nextjs.org/docs/api-reference/next/server#nextfetchevent
48
- *
49
49
  * ```ts
50
50
  * const { pending } = await ratelimit.limit("id")
51
- * event.waitUntil(pending)
51
+ * context.waitUntil(pending)
52
52
  * ```
53
53
  *
54
- * **Cloudflare Worker:**
55
- * https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker
56
- *
57
- * ```ts
58
- * const { pending } = await ratelimit.limit("id")
59
- * context.waitUntil(pending)
54
+ * See `waitUntil` documentation in
55
+ * [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
56
+ * and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
57
+ * for more details.
60
58
  * ```
61
59
  */
62
60
  pending: Promise<unknown>;
63
61
  };
64
- type Algorithm<TContext> = (ctx: TContext, identifier: string, rate?: number, opts?: {
65
- cache?: EphemeralCache;
66
- }) => Promise<RatelimitResponse>;
62
+ type Algorithm<TContext> = () => {
63
+ limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
64
+ cache?: EphemeralCache;
65
+ }) => Promise<RatelimitResponse>;
66
+ getRemaining: (ctx: TContext, identifier: string) => Promise<number>;
67
+ resetTokens: (ctx: TContext, identifier: string) => void;
68
+ };
67
69
  /**
68
70
  * This is all we need from the redis sdk.
69
71
  */
@@ -257,6 +259,8 @@ declare abstract class Ratelimit<TContext extends Context> {
257
259
  * ```
258
260
  */
259
261
  blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
262
+ resetUsedTokens: (identifier: string) => Promise<void>;
263
+ getRemaining: (identifier: string) => Promise<number>;
260
264
  }
261
265
 
262
266
  type MultiRegionRatelimitConfig = {