@upstash/ratelimit 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,7 +5,6 @@ An HTTP/REST based Redis client built on top of Upstash REST API.
5
5
 
6
6
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
7
7
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
8
- ![npm bundle size](https://img.shields.io/bundlephobia/minzip/@upstash/ratelimit)
9
8
 
10
9
  It is the only connectionless (HTTP based) ratelimiter and designed for:
11
10
 
@@ -17,6 +16,38 @@ It is the only connectionless (HTTP based) ratelimiter and designed for:
17
16
  - WebAssembly
18
17
  - and other environments where HTTP is preferred over TCP.
19
18
 
19
+ <!-- toc -->
20
+
21
+ - [Quick Start](#quick-start)
22
+ - [Install](#install)
23
+ - [npm](#npm)
24
+ - [Deno](#deno)
25
+ - [Create database](#create-database)
26
+ - [Use it](#use-it)
27
+ - [Block until ready](#block-until-ready)
28
+ - [Globally replicated ratelimiting](#globally-replicated-ratelimiting)
29
+ - [Usage](#usage)
30
+ - [Example](#example)
31
+ - [Ratelimiting algorithms](#ratelimiting-algorithms)
32
+ - [Fixed Window](#fixed-window)
33
+ - [Pros:](#pros)
34
+ - [Cons:](#cons)
35
+ - [Usage:](#usage)
36
+ - [Sliding Window](#sliding-window)
37
+ - [Pros:](#pros-1)
38
+ - [Cons:](#cons-1)
39
+ - [Usage:](#usage-1)
40
+ - [Token Bucket](#token-bucket)
41
+ - [Pros:](#pros-2)
42
+ - [Cons:](#cons-2)
43
+ - [Usage:](#usage-2)
44
+ - [Contributing](#contributing)
45
+ - [Install Deno](#install-deno)
46
+ - [Database](#database)
47
+ - [Running tests](#running-tests)
48
+
49
+ <!-- tocstop -->
50
+
20
51
  ## Quick Start
21
52
 
22
53
  ### Install
@@ -30,7 +61,7 @@ npm install @upstash/ratelimit
30
61
  #### Deno
31
62
 
32
63
  ```ts
33
- import { Redis } from "https://deno.land/x/upstash_ratelimit/mod.ts";
64
+ import { Ratelimit } from "https://deno.land/x/upstash_ratelimit/mod.ts";
34
65
  ```
35
66
 
36
67
  ### Create database
@@ -124,6 +155,51 @@ doExpensiveCalculation();
124
155
  return "Here you go!";
125
156
  ```
126
157
 
158
+ ## Globally replicated ratelimiting
159
+
160
+ Using a single redis instance has the downside of providing low latencies to the
161
+ part of your userbase closest to the deployed db. That's why we also built
162
+ `GlobalRatelimit` which replicates the state across multiple redis databases as
163
+ well as offering lower latencies to more of your users.
164
+
165
+ `GlobalRatelimit` does this by checking the current limit in the closest db and
166
+ returning immediately. Only afterwards will the state be asynchronously
167
+ replicated to the other datbases leveraging
168
+ [CRDTs](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type). Due
169
+ to the nature of distributed systems, there is no way to guarantee the set
170
+ ratelimit is not exceeded by a small margin. This is the tradeoff for reduced
171
+ global latency.
172
+
173
+ ### Usage
174
+
175
+ The api is the same, except for asking for multiple redis instances:
176
+
177
+ ```ts
178
+ import { GlobalRatelimit } from "@upstash/ratelimit"; // for deno: see above
179
+ import { Redis } from "@upstash/redis";
180
+
181
+ // Create a new ratelimiter, that allows 10 requests per 10 seconds
182
+ const ratelimit = new GlobalRatelimit({
183
+ redis: [
184
+ new Redis({/* auth */}),
185
+ new Redis({/* auth */}),
186
+ new Redis({/* auth */}),
187
+ ],
188
+ limiter: Ratelimit.slidingWindow(10, "10 s"),
189
+ });
190
+
191
+ // Use a constant string to limit all requests with a single ratelimit
192
+ // Or use a userID, apiKey or ip address for individual limits.
193
+ const identifier = "api";
194
+ const { success } = await ratelimit.limit(identifier);
195
+ ```
196
+
197
+ ### Example
198
+
199
+ Let's assume you have customers in the US and Europe. In this case you can
200
+ create 2 regional redis databases on [Upastash](https://console.upstash.com) and
201
+ your users will enjoy the latency of whichever db is closest to them.
202
+
127
203
  ## Ratelimiting algorithms
128
204
 
129
205
  We provide different algorithms to use out of the box. Each has pros and cons.
@@ -199,6 +275,8 @@ const ratelimit = new Ratelimit({
199
275
 
200
276
  ### Token Bucket
201
277
 
278
+ _Not yet supported for `GlobalRatelimit`_
279
+
202
280
  Consider a bucket filled with `{maxTokens}` tokens that refills constantly at
203
281
  `{refillRate}` per `{interval}`. Every request will remove one token from the
204
282
  bucket and if there is no token to take, the request is rejected.
package/esm/global.js ADDED
@@ -0,0 +1,228 @@
1
+ import { ms } from "./duration.js";
2
+ import { Ratelimit } from "./ratelimit.js";
3
+ /**
4
+ * Ratelimiter using serverless redis from https://upstash.com/
5
+ *
6
+ * @example
7
+ * ```ts
8
+ * const { limit } = new GlobalRatelimit({
9
+ * redis: Redis.fromEnv(),
10
+ * limiter: GlobalRatelimit.fixedWindow(
11
+ * 10, // Allow 10 requests per window of 30 minutes
12
+ * "30 m", // interval of 30 minutes
13
+ * )
14
+ * })
15
+ *
16
+ * ```
17
+ */
18
+ export class GlobalRatelimit extends Ratelimit {
19
+ /**
20
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
21
+ */
22
+ constructor(config) {
23
+ super({
24
+ prefix: config.prefix,
25
+ limiter: config.limiter,
26
+ ctx: { redis: config.redis },
27
+ });
28
+ }
29
+ /**
30
+ * Each requests inside a fixed time increases a counter.
31
+ * Once the counter reaches a maxmimum allowed number, all further requests are
32
+ * rejected.
33
+ *
34
+ * **Pro:**
35
+ *
36
+ * - Newer requests are not starved by old ones.
37
+ * - Low storage cost.
38
+ *
39
+ * **Con:**
40
+ *
41
+ * A burst of requests near the boundary of a window can result in a very
42
+ * high request rate because two windows will be filled with requests quickly.
43
+ *
44
+ * @param tokens - How many requests a user can make in each time window.
45
+ * @param window - A fixed timeframe
46
+ */
47
+ static fixedWindow(
48
+ /**
49
+ * How many requests are allowed per window.
50
+ */
51
+ tokens,
52
+ /**
53
+ * The duration in which `tokens` requests are allowed.
54
+ */
55
+ window) {
56
+ const windowDuration = ms(window);
57
+ const script = `
58
+ local key = KEYS[1]
59
+ local id = ARGV[1]
60
+ local window = ARGV[2]
61
+
62
+ redis.call("SADD", key, id)
63
+ local members = redis.call("SMEMBERS", key)
64
+ if #members == 1 then
65
+ -- The first time this key is set, the value will be 1.
66
+ -- So we only need the expire command once
67
+ redis.call("PEXPIRE", key, window)
68
+ end
69
+
70
+ return members
71
+ `;
72
+ return async function (ctx, identifier) {
73
+ const requestID = crypto.randomUUID();
74
+ const bucket = Math.floor(Date.now() / windowDuration);
75
+ const key = [identifier, bucket].join(":");
76
+ const dbs = ctx.redis.map((redis) => ({
77
+ redis,
78
+ request: redis.eval(script, [key], [requestID, windowDuration]),
79
+ }));
80
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
81
+ const usedTokens = firstResponse.length;
82
+ const remaining = tokens - usedTokens - 1;
83
+ /**
84
+ * If the length between two databases does not match, we sync the two databases
85
+ */
86
+ async function sync() {
87
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
88
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
89
+ for (const db of dbs) {
90
+ const ids = await db.request;
91
+ /**
92
+ * If the bucket in this db is already full, it doesn't matter which ids it contains.
93
+ * So we do not have to sync.
94
+ */
95
+ if (ids.length >= tokens) {
96
+ continue;
97
+ }
98
+ const diff = allIDs.filter((id) => !ids.includes(id));
99
+ /**
100
+ * Don't waste a request if there is nothing to send
101
+ */
102
+ if (diff.length === 0) {
103
+ continue;
104
+ }
105
+ await db.redis.sadd(key, ...allIDs);
106
+ }
107
+ }
108
+ /**
109
+ * Do not await sync. This should not run in the critical path.
110
+ */
111
+ sync();
112
+ return {
113
+ success: remaining > 0,
114
+ limit: tokens,
115
+ remaining,
116
+ reset: (bucket + 1) * windowDuration,
117
+ };
118
+ };
119
+ }
120
+ /**
121
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
122
+ * costs than `slidingLogs` and improved boundary behavior by calcualting a
123
+ * weighted score between two windows.
124
+ *
125
+ * **Pro:**
126
+ *
127
+ * Good performance allows this to scale to very high loads.
128
+ *
129
+ * **Con:**
130
+ *
131
+ * Nothing major.
132
+ *
133
+ * @param tokens - How many requests a user can make in each time window.
134
+ * @param window - The duration in which the user can max X requests.
135
+ */
136
+ static slidingWindow(
137
+ /**
138
+ * How many requests are allowed per window.
139
+ */
140
+ tokens,
141
+ /**
142
+ * The duration in which `tokens` requests are allowed.
143
+ */
144
+ window) {
145
+ const windowSize = ms(window);
146
+ const script = `
147
+ local currentKey = KEYS[1] -- identifier including prefixes
148
+ local previousKey = KEYS[2] -- key of the previous bucket
149
+ local tokens = tonumber(ARGV[1]) -- tokens per window
150
+ local now = ARGV[2] -- current timestamp in milliseconds
151
+ local window = ARGV[3] -- interval in milliseconds
152
+ local requestID = ARGV[4] -- uuid for this request
153
+
154
+
155
+ local currentMembers = redis.call("SMEMBERS", currentKey)
156
+ local requestsInCurrentWindow = #currentMembers
157
+ local previousMembers = redis.call("SMEMBERS", previousKey)
158
+ local requestsInPreviousWindow = #previousMembers
159
+
160
+ local percentageInCurrent = ( now % window) / window
161
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
162
+ return {currentMembers, previousMembers}
163
+ end
164
+
165
+ redis.call("SADD", currentKey, requestID)
166
+ table.insert(currentMembers, requestID)
167
+ if requestsInCurrentWindow == 0 then
168
+ -- The first time this key is set, the value will be 1.
169
+ -- So we only need the expire command once
170
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
171
+ end
172
+ return {currentMembers, previousMembers}
173
+ `;
174
+ const windowDuration = ms(window);
175
+ return async function (ctx, identifier) {
176
+ const requestID = crypto.randomUUID();
177
+ const now = Date.now();
178
+ const currentWindow = Math.floor(now / windowSize);
179
+ const currentKey = [identifier, currentWindow].join(":");
180
+ const previousWindow = currentWindow - windowSize;
181
+ const previousKey = [identifier, previousWindow].join(":");
182
+ const dbs = ctx.redis.map((redis) => ({
183
+ redis,
184
+ request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]),
185
+ }));
186
+ const percentageInCurrent = (now % windowDuration) / windowDuration;
187
+ const [current, previous] = await Promise.any(dbs.map((s) => s.request));
188
+ const usedTokens = previous.length * (1 - percentageInCurrent) +
189
+ current.length;
190
+ const remaining = tokens - usedTokens;
191
+ /**
192
+ * If a database differs from the consensus, we sync it
193
+ */
194
+ async function sync() {
195
+ const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
196
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
197
+ for (const db of dbs) {
198
+ const [ids] = await db.request;
199
+ /**
200
+ * If the bucket in this db is already full, it doesn't matter which ids it contains.
201
+ * So we do not have to sync.
202
+ */
203
+ if (ids.length >= tokens) {
204
+ continue;
205
+ }
206
+ const diff = allIDs.filter((id) => !ids.includes(id));
207
+ /**
208
+ * Don't waste a request if there is nothing to send
209
+ */
210
+ if (diff.length === 0) {
211
+ continue;
212
+ }
213
+ await db.redis.sadd(currentKey, ...allIDs);
214
+ }
215
+ }
216
+ /**
217
+ * Do not await sync. This should not run in the critical path.
218
+ */
219
+ sync();
220
+ return {
221
+ success: remaining > 0,
222
+ limit: tokens,
223
+ remaining,
224
+ reset: (currentWindow + 1) * windowDuration,
225
+ };
226
+ };
227
+ }
228
+ }
package/esm/mod.js CHANGED
@@ -1 +1,2 @@
1
1
  export { RegionRatelimit as Ratelimit } from "./region.js";
2
+ export { GlobalRatelimit } from "./global.js";
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "main": "./script/mod.js",
4
4
  "types": "./types/mod.d.ts",
5
5
  "name": "@upstash/ratelimit",
6
- "version": "v0.1.1",
6
+ "version": "v0.1.2",
7
7
  "description": "A serverless ratelimiter built on top of Upstash REST API.",
8
8
  "repository": {
9
9
  "type": "git",
@@ -0,0 +1,232 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GlobalRatelimit = void 0;
4
+ const duration_js_1 = require("./duration.js");
5
+ const ratelimit_js_1 = require("./ratelimit.js");
6
+ /**
7
+ * Ratelimiter using serverless redis from https://upstash.com/
8
+ *
9
+ * @example
10
+ * ```ts
11
+ * const { limit } = new GlobalRatelimit({
12
+ * redis: Redis.fromEnv(),
13
+ * limiter: GlobalRatelimit.fixedWindow(
14
+ * 10, // Allow 10 requests per window of 30 minutes
15
+ * "30 m", // interval of 30 minutes
16
+ * )
17
+ * })
18
+ *
19
+ * ```
20
+ */
21
+ class GlobalRatelimit extends ratelimit_js_1.Ratelimit {
22
+ /**
23
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
24
+ */
25
+ constructor(config) {
26
+ super({
27
+ prefix: config.prefix,
28
+ limiter: config.limiter,
29
+ ctx: { redis: config.redis },
30
+ });
31
+ }
32
+ /**
33
+ * Each requests inside a fixed time increases a counter.
34
+ * Once the counter reaches a maxmimum allowed number, all further requests are
35
+ * rejected.
36
+ *
37
+ * **Pro:**
38
+ *
39
+ * - Newer requests are not starved by old ones.
40
+ * - Low storage cost.
41
+ *
42
+ * **Con:**
43
+ *
44
+ * A burst of requests near the boundary of a window can result in a very
45
+ * high request rate because two windows will be filled with requests quickly.
46
+ *
47
+ * @param tokens - How many requests a user can make in each time window.
48
+ * @param window - A fixed timeframe
49
+ */
50
+ static fixedWindow(
51
+ /**
52
+ * How many requests are allowed per window.
53
+ */
54
+ tokens,
55
+ /**
56
+ * The duration in which `tokens` requests are allowed.
57
+ */
58
+ window) {
59
+ const windowDuration = (0, duration_js_1.ms)(window);
60
+ const script = `
61
+ local key = KEYS[1]
62
+ local id = ARGV[1]
63
+ local window = ARGV[2]
64
+
65
+ redis.call("SADD", key, id)
66
+ local members = redis.call("SMEMBERS", key)
67
+ if #members == 1 then
68
+ -- The first time this key is set, the value will be 1.
69
+ -- So we only need the expire command once
70
+ redis.call("PEXPIRE", key, window)
71
+ end
72
+
73
+ return members
74
+ `;
75
+ return async function (ctx, identifier) {
76
+ const requestID = crypto.randomUUID();
77
+ const bucket = Math.floor(Date.now() / windowDuration);
78
+ const key = [identifier, bucket].join(":");
79
+ const dbs = ctx.redis.map((redis) => ({
80
+ redis,
81
+ request: redis.eval(script, [key], [requestID, windowDuration]),
82
+ }));
83
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
84
+ const usedTokens = firstResponse.length;
85
+ const remaining = tokens - usedTokens - 1;
86
+ /**
87
+ * If the length between two databases does not match, we sync the two databases
88
+ */
89
+ async function sync() {
90
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
91
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
92
+ for (const db of dbs) {
93
+ const ids = await db.request;
94
+ /**
95
+ * If the bucket in this db is already full, it doesn't matter which ids it contains.
96
+ * So we do not have to sync.
97
+ */
98
+ if (ids.length >= tokens) {
99
+ continue;
100
+ }
101
+ const diff = allIDs.filter((id) => !ids.includes(id));
102
+ /**
103
+ * Don't waste a request if there is nothing to send
104
+ */
105
+ if (diff.length === 0) {
106
+ continue;
107
+ }
108
+ await db.redis.sadd(key, ...allIDs);
109
+ }
110
+ }
111
+ /**
112
+ * Do not await sync. This should not run in the critical path.
113
+ */
114
+ sync();
115
+ return {
116
+ success: remaining > 0,
117
+ limit: tokens,
118
+ remaining,
119
+ reset: (bucket + 1) * windowDuration,
120
+ };
121
+ };
122
+ }
123
+ /**
124
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
125
+ * costs than `slidingLogs` and improved boundary behavior by calcualting a
126
+ * weighted score between two windows.
127
+ *
128
+ * **Pro:**
129
+ *
130
+ * Good performance allows this to scale to very high loads.
131
+ *
132
+ * **Con:**
133
+ *
134
+ * Nothing major.
135
+ *
136
+ * @param tokens - How many requests a user can make in each time window.
137
+ * @param window - The duration in which the user can max X requests.
138
+ */
139
+ static slidingWindow(
140
+ /**
141
+ * How many requests are allowed per window.
142
+ */
143
+ tokens,
144
+ /**
145
+ * The duration in which `tokens` requests are allowed.
146
+ */
147
+ window) {
148
+ const windowSize = (0, duration_js_1.ms)(window);
149
+ const script = `
150
+ local currentKey = KEYS[1] -- identifier including prefixes
151
+ local previousKey = KEYS[2] -- key of the previous bucket
152
+ local tokens = tonumber(ARGV[1]) -- tokens per window
153
+ local now = ARGV[2] -- current timestamp in milliseconds
154
+ local window = ARGV[3] -- interval in milliseconds
155
+ local requestID = ARGV[4] -- uuid for this request
156
+
157
+
158
+ local currentMembers = redis.call("SMEMBERS", currentKey)
159
+ local requestsInCurrentWindow = #currentMembers
160
+ local previousMembers = redis.call("SMEMBERS", previousKey)
161
+ local requestsInPreviousWindow = #previousMembers
162
+
163
+ local percentageInCurrent = ( now % window) / window
164
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
165
+ return {currentMembers, previousMembers}
166
+ end
167
+
168
+ redis.call("SADD", currentKey, requestID)
169
+ table.insert(currentMembers, requestID)
170
+ if requestsInCurrentWindow == 0 then
171
+ -- The first time this key is set, the value will be 1.
172
+ -- So we only need the expire command once
173
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
174
+ end
175
+ return {currentMembers, previousMembers}
176
+ `;
177
+ const windowDuration = (0, duration_js_1.ms)(window);
178
+ return async function (ctx, identifier) {
179
+ const requestID = crypto.randomUUID();
180
+ const now = Date.now();
181
+ const currentWindow = Math.floor(now / windowSize);
182
+ const currentKey = [identifier, currentWindow].join(":");
183
+ const previousWindow = currentWindow - windowSize;
184
+ const previousKey = [identifier, previousWindow].join(":");
185
+ const dbs = ctx.redis.map((redis) => ({
186
+ redis,
187
+ request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]),
188
+ }));
189
+ const percentageInCurrent = (now % windowDuration) / windowDuration;
190
+ const [current, previous] = await Promise.any(dbs.map((s) => s.request));
191
+ const usedTokens = previous.length * (1 - percentageInCurrent) +
192
+ current.length;
193
+ const remaining = tokens - usedTokens;
194
+ /**
195
+ * If a database differs from the consensus, we sync it
196
+ */
197
+ async function sync() {
198
+ const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
199
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
200
+ for (const db of dbs) {
201
+ const [ids] = await db.request;
202
+ /**
203
+ * If the bucket in this db is already full, it doesn't matter which ids it contains.
204
+ * So we do not have to sync.
205
+ */
206
+ if (ids.length >= tokens) {
207
+ continue;
208
+ }
209
+ const diff = allIDs.filter((id) => !ids.includes(id));
210
+ /**
211
+ * Don't waste a request if there is nothing to send
212
+ */
213
+ if (diff.length === 0) {
214
+ continue;
215
+ }
216
+ await db.redis.sadd(currentKey, ...allIDs);
217
+ }
218
+ }
219
+ /**
220
+ * Do not await sync. This should not run in the critical path.
221
+ */
222
+ sync();
223
+ return {
224
+ success: remaining > 0,
225
+ limit: tokens,
226
+ remaining,
227
+ reset: (currentWindow + 1) * windowDuration,
228
+ };
229
+ };
230
+ }
231
+ }
232
+ exports.GlobalRatelimit = GlobalRatelimit;
package/script/mod.js CHANGED
@@ -1,5 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Ratelimit = void 0;
3
+ exports.GlobalRatelimit = exports.Ratelimit = void 0;
4
4
  var region_js_1 = require("./region.js");
5
5
  Object.defineProperty(exports, "Ratelimit", { enumerable: true, get: function () { return region_js_1.RegionRatelimit; } });
6
+ var global_js_1 = require("./global.js");
7
+ Object.defineProperty(exports, "GlobalRatelimit", { enumerable: true, get: function () { return global_js_1.GlobalRatelimit; } });
@@ -0,0 +1,98 @@
1
+ import type { Duration } from "./duration.js";
2
+ import type { Algorithm, GlobalContext } from "./types.js";
3
+ import { Ratelimit } from "./ratelimit.js";
4
+ import type { Redis } from "./types.js";
5
+ export declare type GlobalRatelimitConfig = {
6
+ /**
7
+ * Instances of `@upstash/redis`
8
+ * @see https://github.com/upstash/upstash-redis#quick-start
9
+ */
10
+ redis: Redis[];
11
+ /**
12
+ * The ratelimiter function to use.
13
+ *
14
+ * Choose one of the predefined ones or implement your own.
15
+ * Available algorithms are exposed via static methods:
16
+ * - GlobalRatelimit.fixedWindow
17
+ */
18
+ limiter: Algorithm<GlobalContext>;
19
+ /**
20
+ * All keys in redis are prefixed with this.
21
+ *
22
+ * @default `@upstash/ratelimit`
23
+ */
24
+ prefix?: string;
25
+ };
26
+ /**
27
+ * Ratelimiter using serverless redis from https://upstash.com/
28
+ *
29
+ * @example
30
+ * ```ts
31
+ * const { limit } = new GlobalRatelimit({
32
+ * redis: Redis.fromEnv(),
33
+ * limiter: GlobalRatelimit.fixedWindow(
34
+ * 10, // Allow 10 requests per window of 30 minutes
35
+ * "30 m", // interval of 30 minutes
36
+ * )
37
+ * })
38
+ *
39
+ * ```
40
+ */
41
+ export declare class GlobalRatelimit extends Ratelimit<GlobalContext> {
42
+ /**
43
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
44
+ */
45
+ constructor(config: GlobalRatelimitConfig);
46
+ /**
47
+ * Each requests inside a fixed time increases a counter.
48
+ * Once the counter reaches a maxmimum allowed number, all further requests are
49
+ * rejected.
50
+ *
51
+ * **Pro:**
52
+ *
53
+ * - Newer requests are not starved by old ones.
54
+ * - Low storage cost.
55
+ *
56
+ * **Con:**
57
+ *
58
+ * A burst of requests near the boundary of a window can result in a very
59
+ * high request rate because two windows will be filled with requests quickly.
60
+ *
61
+ * @param tokens - How many requests a user can make in each time window.
62
+ * @param window - A fixed timeframe
63
+ */
64
+ static fixedWindow(
65
+ /**
66
+ * How many requests are allowed per window.
67
+ */
68
+ tokens: number,
69
+ /**
70
+ * The duration in which `tokens` requests are allowed.
71
+ */
72
+ window: Duration): Algorithm<GlobalContext>;
73
+ /**
74
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
75
+ * costs than `slidingLogs` and improved boundary behavior by calcualting a
76
+ * weighted score between two windows.
77
+ *
78
+ * **Pro:**
79
+ *
80
+ * Good performance allows this to scale to very high loads.
81
+ *
82
+ * **Con:**
83
+ *
84
+ * Nothing major.
85
+ *
86
+ * @param tokens - How many requests a user can make in each time window.
87
+ * @param window - The duration in which the user can max X requests.
88
+ */
89
+ static slidingWindow(
90
+ /**
91
+ * How many requests are allowed per window.
92
+ */
93
+ tokens: number,
94
+ /**
95
+ * The duration in which `tokens` requests are allowed.
96
+ */
97
+ window: Duration): Algorithm<GlobalContext>;
98
+ }
package/types/mod.d.ts CHANGED
@@ -1,3 +1,5 @@
1
1
  export { RegionRatelimit as Ratelimit } from "./region.js";
2
2
  export type { RegionRatelimitConfig as RatelimitConfig } from "./region.js";
3
+ export { GlobalRatelimit } from "./global.js";
4
+ export type { GlobalRatelimitConfig } from "./global.js";
3
5
  export type { Algorithm } from "./types.js";