@upstash/ratelimit 1.0.3 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +33 -15
- package/dist/index.d.mts +16 -13
- package/dist/index.d.ts +16 -13
- package/dist/index.js +543 -291
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +543 -291
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
# Upstash Rate Limit
|
|
2
2
|
|
|
3
3
|
[](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
|
|
4
|
-

|
|
4
|
+
[](https://www.npmjs.com/package/ratelimit)
|
|
5
5
|
|
|
6
|
-
> [!NOTE]
|
|
7
|
-
> **This project is in GA Stage.**
|
|
6
|
+
> [!NOTE] > **This project is in GA Stage.**
|
|
8
7
|
> The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality.
|
|
9
8
|
|
|
10
9
|
It is the only connectionless (HTTP based) rate limiting library and designed
|
|
@@ -32,7 +31,7 @@ npm install @upstash/ratelimit
|
|
|
32
31
|
#### Deno
|
|
33
32
|
|
|
34
33
|
```ts
|
|
35
|
-
import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest"
|
|
34
|
+
import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest";
|
|
36
35
|
```
|
|
37
36
|
|
|
38
37
|
### Create database
|
|
@@ -105,31 +104,50 @@ export type RatelimitResponse = {
|
|
|
105
104
|
|
|
106
105
|
/**
|
|
107
106
|
* For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
|
|
107
|
+
* Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
|
|
108
108
|
* In most case you can simply ignore this.
|
|
109
109
|
*
|
|
110
|
-
* On Vercel Edge or Cloudflare workers, you need to
|
|
111
|
-
*
|
|
112
|
-
* **Vercel Edge:**
|
|
113
|
-
* https://nextjs.org/docs/api-reference/next/server#nextfetchevent
|
|
110
|
+
* On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
|
|
114
111
|
*
|
|
115
112
|
* ```ts
|
|
116
113
|
* const { pending } = await ratelimit.limit("id")
|
|
117
|
-
*
|
|
114
|
+
* context.waitUntil(pending)
|
|
118
115
|
* ```
|
|
119
116
|
*
|
|
120
|
-
*
|
|
121
|
-
* https://developers.cloudflare.com/workers/runtime-apis/fetch
|
|
122
|
-
*
|
|
123
|
-
*
|
|
124
|
-
* const { pending } = await ratelimit.limit("id")
|
|
125
|
-
* context.waitUntil(pending)
|
|
117
|
+
* See `waitUntil` documentation in
|
|
118
|
+
* [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
|
|
119
|
+
* and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
|
|
120
|
+
* for more details.
|
|
126
121
|
* ```
|
|
127
122
|
*/
|
|
128
123
|
pending: Promise<unknown>;
|
|
129
124
|
};
|
|
130
125
|
````
|
|
131
126
|
|
|
127
|
+
### Using with CloudFlare Workers and Vercel Edge
|
|
128
|
+
|
|
129
|
+
When we use CloudFlare Workers and Vercel Edge, we need to be careful about
|
|
130
|
+
making sure that the rate limiting operations complete correctly before the runtime ends
|
|
131
|
+
after returning the response.
|
|
132
|
+
|
|
133
|
+
This is important in two cases where we do some operations in the backgroung asynchronously after `limit` is called:
|
|
134
|
+
|
|
135
|
+
1. Using MultiRegion: synchronize Redis instances in different regions
|
|
136
|
+
2. Enabling analytics: send analytics to Redis
|
|
137
|
+
|
|
138
|
+
In these cases, we need to wait for these operations to finish before sending the response to the user. Otherwise, the runtime will end and we won't be able to complete our chores.
|
|
139
|
+
|
|
140
|
+
In order to wait for these operations to finish, use the `pending` promise:
|
|
141
|
+
|
|
142
|
+
```ts
|
|
143
|
+
const { pending } = await ratelimit.limit("id");
|
|
144
|
+
context.waitUntil(pending);
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
See `waitUntil` documentation in [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) for more details.
|
|
148
|
+
|
|
132
149
|
### Docs
|
|
150
|
+
|
|
133
151
|
See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for details.
|
|
134
152
|
|
|
135
153
|
## Contributing
|
package/dist/index.d.mts
CHANGED
|
@@ -10,6 +10,7 @@ interface EphemeralCache {
|
|
|
10
10
|
set: (key: string, value: number) => void;
|
|
11
11
|
get: (key: string) => number | null;
|
|
12
12
|
incr: (key: string) => number;
|
|
13
|
+
empty: () => void;
|
|
13
14
|
}
|
|
14
15
|
type RegionContext = {
|
|
15
16
|
redis: Redis;
|
|
@@ -39,31 +40,31 @@ type RatelimitResponse = {
|
|
|
39
40
|
reset: number;
|
|
40
41
|
/**
|
|
41
42
|
* For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
|
|
43
|
+
* Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
|
|
42
44
|
* In most case you can simply ignore this.
|
|
43
45
|
*
|
|
44
46
|
* On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
|
|
45
47
|
*
|
|
46
|
-
* **Vercel Edge:**
|
|
47
|
-
* https://nextjs.org/docs/api-reference/next/server#nextfetchevent
|
|
48
|
-
*
|
|
49
48
|
* ```ts
|
|
50
49
|
* const { pending } = await ratelimit.limit("id")
|
|
51
|
-
*
|
|
50
|
+
* context.waitUntil(pending)
|
|
52
51
|
* ```
|
|
53
52
|
*
|
|
54
|
-
*
|
|
55
|
-
* https://developers.cloudflare.com/workers/runtime-apis/fetch
|
|
56
|
-
*
|
|
57
|
-
*
|
|
58
|
-
* const { pending } = await ratelimit.limit("id")
|
|
59
|
-
* context.waitUntil(pending)
|
|
53
|
+
* See `waitUntil` documentation in
|
|
54
|
+
* [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
|
|
55
|
+
* and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
|
|
56
|
+
* for more details.
|
|
60
57
|
* ```
|
|
61
58
|
*/
|
|
62
59
|
pending: Promise<unknown>;
|
|
63
60
|
};
|
|
64
|
-
type Algorithm<TContext> = (
|
|
65
|
-
|
|
66
|
-
|
|
61
|
+
type Algorithm<TContext> = () => {
|
|
62
|
+
limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
|
|
63
|
+
cache?: EphemeralCache;
|
|
64
|
+
}) => Promise<RatelimitResponse>;
|
|
65
|
+
getRemaining: (ctx: TContext, identifier: string) => Promise<number>;
|
|
66
|
+
resetTokens: (ctx: TContext, identifier: string) => void;
|
|
67
|
+
};
|
|
67
68
|
/**
|
|
68
69
|
* This is all we need from the redis sdk.
|
|
69
70
|
*/
|
|
@@ -257,6 +258,8 @@ declare abstract class Ratelimit<TContext extends Context> {
|
|
|
257
258
|
* ```
|
|
258
259
|
*/
|
|
259
260
|
blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
|
|
261
|
+
resetUsedTokens: (identifier: string) => Promise<void>;
|
|
262
|
+
getRemaining: (identifier: string) => Promise<number>;
|
|
260
263
|
}
|
|
261
264
|
|
|
262
265
|
type MultiRegionRatelimitConfig = {
|
package/dist/index.d.ts
CHANGED
|
@@ -10,6 +10,7 @@ interface EphemeralCache {
|
|
|
10
10
|
set: (key: string, value: number) => void;
|
|
11
11
|
get: (key: string) => number | null;
|
|
12
12
|
incr: (key: string) => number;
|
|
13
|
+
empty: () => void;
|
|
13
14
|
}
|
|
14
15
|
type RegionContext = {
|
|
15
16
|
redis: Redis;
|
|
@@ -39,31 +40,31 @@ type RatelimitResponse = {
|
|
|
39
40
|
reset: number;
|
|
40
41
|
/**
|
|
41
42
|
* For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
|
|
43
|
+
* Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
|
|
42
44
|
* In most case you can simply ignore this.
|
|
43
45
|
*
|
|
44
46
|
* On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
|
|
45
47
|
*
|
|
46
|
-
* **Vercel Edge:**
|
|
47
|
-
* https://nextjs.org/docs/api-reference/next/server#nextfetchevent
|
|
48
|
-
*
|
|
49
48
|
* ```ts
|
|
50
49
|
* const { pending } = await ratelimit.limit("id")
|
|
51
|
-
*
|
|
50
|
+
* context.waitUntil(pending)
|
|
52
51
|
* ```
|
|
53
52
|
*
|
|
54
|
-
*
|
|
55
|
-
* https://developers.cloudflare.com/workers/runtime-apis/fetch
|
|
56
|
-
*
|
|
57
|
-
*
|
|
58
|
-
* const { pending } = await ratelimit.limit("id")
|
|
59
|
-
* context.waitUntil(pending)
|
|
53
|
+
* See `waitUntil` documentation in
|
|
54
|
+
* [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
|
|
55
|
+
* and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
|
|
56
|
+
* for more details.
|
|
60
57
|
* ```
|
|
61
58
|
*/
|
|
62
59
|
pending: Promise<unknown>;
|
|
63
60
|
};
|
|
64
|
-
type Algorithm<TContext> = (
|
|
65
|
-
|
|
66
|
-
|
|
61
|
+
type Algorithm<TContext> = () => {
|
|
62
|
+
limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
|
|
63
|
+
cache?: EphemeralCache;
|
|
64
|
+
}) => Promise<RatelimitResponse>;
|
|
65
|
+
getRemaining: (ctx: TContext, identifier: string) => Promise<number>;
|
|
66
|
+
resetTokens: (ctx: TContext, identifier: string) => void;
|
|
67
|
+
};
|
|
67
68
|
/**
|
|
68
69
|
* This is all we need from the redis sdk.
|
|
69
70
|
*/
|
|
@@ -257,6 +258,8 @@ declare abstract class Ratelimit<TContext extends Context> {
|
|
|
257
258
|
* ```
|
|
258
259
|
*/
|
|
259
260
|
blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
|
|
261
|
+
resetUsedTokens: (identifier: string) => Promise<void>;
|
|
262
|
+
getRemaining: (identifier: string) => Promise<number>;
|
|
260
263
|
}
|
|
261
264
|
|
|
262
265
|
type MultiRegionRatelimitConfig = {
|