@upstash/ratelimit 0.1.0-alpha.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,7 +5,6 @@ An HTTP/REST based Redis client built on top of Upstash REST API.
5
5
 
6
6
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
7
7
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
8
- ![npm bundle size](https://img.shields.io/bundlephobia/minzip/@upstash/ratelimit)
9
8
 
10
9
  It is the only connectionless (HTTP based) ratelimiter and designed for:
11
10
 
@@ -17,6 +16,38 @@ It is the only connectionless (HTTP based) ratelimiter and designed for:
17
16
  - WebAssembly
18
17
  - and other environments where HTTP is preferred over TCP.
19
18
 
19
+ <!-- toc -->
20
+
21
+ - [Quick Start](#quick-start)
22
+ - [Install](#install)
23
+ - [npm](#npm)
24
+ - [Deno](#deno)
25
+ - [Create database](#create-database)
26
+ - [Use it](#use-it)
27
+ - [Block until ready](#block-until-ready)
28
+ - [Globally replicated ratelimiting](#globally-replicated-ratelimiting)
29
+ - [Usage](#usage)
30
+ - [Example](#example)
31
+ - [Ratelimiting algorithms](#ratelimiting-algorithms)
32
+ - [Fixed Window](#fixed-window)
33
+ - [Pros:](#pros)
34
+ - [Cons:](#cons)
35
+ - [Usage:](#usage)
36
+ - [Sliding Window](#sliding-window)
37
+ - [Pros:](#pros-1)
38
+ - [Cons:](#cons-1)
39
+ - [Usage:](#usage-1)
40
+ - [Token Bucket](#token-bucket)
41
+ - [Pros:](#pros-2)
42
+ - [Cons:](#cons-2)
43
+ - [Usage:](#usage-2)
44
+ - [Contributing](#contributing)
45
+ - [Install Deno](#install-deno)
46
+ - [Database](#database)
47
+ - [Running tests](#running-tests)
48
+
49
+ <!-- tocstop -->
50
+
20
51
  ## Quick Start
21
52
 
22
53
  ### Install
@@ -30,7 +61,7 @@ npm install @upstash/ratelimit
30
61
  #### Deno
31
62
 
32
63
  ```ts
33
- import { Redis } from "https://deno.land/x/upstash_ratelimit/mod.ts";
64
+ import { Ratelimit } from "https://deno.land/x/upstash_ratelimit/mod.ts";
34
65
  ```
35
66
 
36
67
  ### Create database
@@ -96,7 +127,10 @@ export type RatelimitResponse = {
96
127
 
97
128
  In case you don't want to reject a request immediately but wait until it can be
98
129
  processed, we also provide
99
- `ratelimit.blockUntilReady(identifier: stirng, timeout: number): Promise<RatelimitResponse>`
130
+
131
+ ```ts
132
+ ratelimit.blockUntilReady(identifier: string, timeout: number): Promise<RatelimitResponse>
133
+ ```
100
134
 
101
135
  It is very similar to the `limit` method and takes an identifier and returns the
102
136
  same response. However if the current limit has already been exceeded, it will
@@ -121,6 +155,51 @@ doExpensiveCalculation();
121
155
  return "Here you go!";
122
156
  ```
123
157
 
158
+ ## Globally replicated ratelimiting
159
+
160
+ Using a single redis instance has the downside of providing low latencies to the
161
+ part of your userbase closest to the deployed db. That's why we also built
162
+ `GlobalRatelimit` which replicates the state across multiple redis databases as
163
+ well as offering lower latencies to more of your users.
164
+
165
+ `GlobalRatelimit` does this by checking the current limit in the closest db and
166
+ returning immediately. Only afterwards will the state be asynchronously
167
+ replicated to the other datbases leveraging
168
+ [CRDTs](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type). Due
169
+ to the nature of distributed systems, there is no way to guarantee the set
170
+ ratelimit is not exceeded by a small margin. This is the tradeoff for reduced
171
+ global latency.
172
+
173
+ ### Usage
174
+
175
+ The api is the same, except for asking for multiple redis instances:
176
+
177
+ ```ts
178
+ import { GlobalRatelimit } from "@upstash/ratelimit"; // for deno: see above
179
+ import { Redis } from "@upstash/redis";
180
+
181
+ // Create a new ratelimiter, that allows 10 requests per 10 seconds
182
+ const ratelimit = new GlobalRatelimit({
183
+ redis: [
184
+ new Redis({/* auth */}),
185
+ new Redis({/* auth */}),
186
+ new Redis({/* auth */}),
187
+ ],
188
+ limiter: Ratelimit.slidingWindow(10, "10 s"),
189
+ });
190
+
191
+ // Use a constant string to limit all requests with a single ratelimit
192
+ // Or use a userID, apiKey or ip address for individual limits.
193
+ const identifier = "api";
194
+ const { success } = await ratelimit.limit(identifier);
195
+ ```
196
+
197
+ ### Example
198
+
199
+ Let's assume you have customers in the US and Europe. In this case you can
200
+ create 2 regional redis databases on [Upastash](https://console.upstash.com) and
201
+ your users will enjoy the latency of whichever db is closest to them.
202
+
124
203
  ## Ratelimiting algorithms
125
204
 
126
205
  We provide different algorithms to use out of the box. Each has pros and cons.
@@ -158,7 +237,7 @@ const ratelimit = new Ratelimit({
158
237
 
159
238
  Builds on top of fixed window but instead of a fixed window, we use a rolling
160
239
  window. Take this example: We have a rate limit of 10 requests per 1 minute. We
161
- dividie time into 1 minute slices, just like in the fixed window algorithm.
240
+ divide time into 1 minute slices, just like in the fixed window algorithm.
162
241
  Window 1 will be from 00:00:00 to 00:01:00 (HH:MM:SS). Let's assume it is
163
242
  currently 00:01:15 and we have received 4 requests in the first window and 5
164
243
  requests so far in the current window. The approximation to determine if the
@@ -196,6 +275,8 @@ const ratelimit = new Ratelimit({
196
275
 
197
276
  ### Token Bucket
198
277
 
278
+ _Not yet supported for `GlobalRatelimit`_
279
+
199
280
  Consider a bucket filled with `{maxTokens}` tokens that refills constantly at
200
281
  `{refillRate}` per `{interval}`. Every request will remove one token from the
201
282
  bucket and if there is no token to take, the request is rejected.
package/esm/global.js ADDED
@@ -0,0 +1,228 @@
1
+ import { ms } from "./duration.js";
2
+ import { Ratelimit } from "./ratelimit.js";
3
+ /**
4
+ * Ratelimiter using serverless redis from https://upstash.com/
5
+ *
6
+ * @example
7
+ * ```ts
8
+ * const { limit } = new GlobalRatelimit({
9
+ * redis: Redis.fromEnv(),
10
+ * limiter: GlobalRatelimit.fixedWindow(
11
+ * 10, // Allow 10 requests per window of 30 minutes
12
+ * "30 m", // interval of 30 minutes
13
+ * )
14
+ * })
15
+ *
16
+ * ```
17
+ */
18
+ export class GlobalRatelimit extends Ratelimit {
19
+ /**
20
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
21
+ */
22
+ constructor(config) {
23
+ super({
24
+ prefix: config.prefix,
25
+ limiter: config.limiter,
26
+ ctx: { redis: config.redis },
27
+ });
28
+ }
29
+ /**
30
+ * Each requests inside a fixed time increases a counter.
31
+ * Once the counter reaches a maxmimum allowed number, all further requests are
32
+ * rejected.
33
+ *
34
+ * **Pro:**
35
+ *
36
+ * - Newer requests are not starved by old ones.
37
+ * - Low storage cost.
38
+ *
39
+ * **Con:**
40
+ *
41
+ * A burst of requests near the boundary of a window can result in a very
42
+ * high request rate because two windows will be filled with requests quickly.
43
+ *
44
+ * @param tokens - How many requests a user can make in each time window.
45
+ * @param window - A fixed timeframe
46
+ */
47
+ static fixedWindow(
48
+ /**
49
+ * How many requests are allowed per window.
50
+ */
51
+ tokens,
52
+ /**
53
+ * The duration in which `tokens` requests are allowed.
54
+ */
55
+ window) {
56
+ const windowDuration = ms(window);
57
+ const script = `
58
+ local key = KEYS[1]
59
+ local id = ARGV[1]
60
+ local window = ARGV[2]
61
+
62
+ redis.call("SADD", key, id)
63
+ local members = redis.call("SMEMBERS", key)
64
+ if #members == 1 then
65
+ -- The first time this key is set, the value will be 1.
66
+ -- So we only need the expire command once
67
+ redis.call("PEXPIRE", key, window)
68
+ end
69
+
70
+ return members
71
+ `;
72
+ return async function (ctx, identifier) {
73
+ const requestID = crypto.randomUUID();
74
+ const bucket = Math.floor(Date.now() / windowDuration);
75
+ const key = [identifier, bucket].join(":");
76
+ const dbs = ctx.redis.map((redis) => ({
77
+ redis,
78
+ request: redis.eval(script, [key], [requestID, windowDuration]),
79
+ }));
80
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
81
+ const usedTokens = firstResponse.length;
82
+ const remaining = tokens - usedTokens - 1;
83
+ /**
84
+ * If the length between two databases does not match, we sync the two databases
85
+ */
86
+ async function sync() {
87
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
88
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
89
+ for (const db of dbs) {
90
+ const ids = await db.request;
91
+ /**
92
+ * If the bucket in this db is already full, it doesn't matter which ids it contains.
93
+ * So we do not have to sync.
94
+ */
95
+ if (ids.length >= tokens) {
96
+ continue;
97
+ }
98
+ const diff = allIDs.filter((id) => !ids.includes(id));
99
+ /**
100
+ * Don't waste a request if there is nothing to send
101
+ */
102
+ if (diff.length === 0) {
103
+ continue;
104
+ }
105
+ await db.redis.sadd(key, ...allIDs);
106
+ }
107
+ }
108
+ /**
109
+ * Do not await sync. This should not run in the critical path.
110
+ */
111
+ sync();
112
+ return {
113
+ success: remaining > 0,
114
+ limit: tokens,
115
+ remaining,
116
+ reset: (bucket + 1) * windowDuration,
117
+ };
118
+ };
119
+ }
120
+ /**
121
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
122
+ * costs than `slidingLogs` and improved boundary behavior by calcualting a
123
+ * weighted score between two windows.
124
+ *
125
+ * **Pro:**
126
+ *
127
+ * Good performance allows this to scale to very high loads.
128
+ *
129
+ * **Con:**
130
+ *
131
+ * Nothing major.
132
+ *
133
+ * @param tokens - How many requests a user can make in each time window.
134
+ * @param window - The duration in which the user can max X requests.
135
+ */
136
+ static slidingWindow(
137
+ /**
138
+ * How many requests are allowed per window.
139
+ */
140
+ tokens,
141
+ /**
142
+ * The duration in which `tokens` requests are allowed.
143
+ */
144
+ window) {
145
+ const windowSize = ms(window);
146
+ const script = `
147
+ local currentKey = KEYS[1] -- identifier including prefixes
148
+ local previousKey = KEYS[2] -- key of the previous bucket
149
+ local tokens = tonumber(ARGV[1]) -- tokens per window
150
+ local now = ARGV[2] -- current timestamp in milliseconds
151
+ local window = ARGV[3] -- interval in milliseconds
152
+ local requestID = ARGV[4] -- uuid for this request
153
+
154
+
155
+ local currentMembers = redis.call("SMEMBERS", currentKey)
156
+ local requestsInCurrentWindow = #currentMembers
157
+ local previousMembers = redis.call("SMEMBERS", previousKey)
158
+ local requestsInPreviousWindow = #previousMembers
159
+
160
+ local percentageInCurrent = ( now % window) / window
161
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
162
+ return {currentMembers, previousMembers}
163
+ end
164
+
165
+ redis.call("SADD", currentKey, requestID)
166
+ table.insert(currentMembers, requestID)
167
+ if requestsInCurrentWindow == 0 then
168
+ -- The first time this key is set, the value will be 1.
169
+ -- So we only need the expire command once
170
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
171
+ end
172
+ return {currentMembers, previousMembers}
173
+ `;
174
+ const windowDuration = ms(window);
175
+ return async function (ctx, identifier) {
176
+ const requestID = crypto.randomUUID();
177
+ const now = Date.now();
178
+ const currentWindow = Math.floor(now / windowSize);
179
+ const currentKey = [identifier, currentWindow].join(":");
180
+ const previousWindow = currentWindow - windowSize;
181
+ const previousKey = [identifier, previousWindow].join(":");
182
+ const dbs = ctx.redis.map((redis) => ({
183
+ redis,
184
+ request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]),
185
+ }));
186
+ const percentageInCurrent = (now % windowDuration) / windowDuration;
187
+ const [current, previous] = await Promise.any(dbs.map((s) => s.request));
188
+ const usedTokens = previous.length * (1 - percentageInCurrent) +
189
+ current.length;
190
+ const remaining = tokens - usedTokens;
191
+ /**
192
+ * If a database differs from the consensus, we sync it
193
+ */
194
+ async function sync() {
195
+ const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
196
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
197
+ for (const db of dbs) {
198
+ const [ids] = await db.request;
199
+ /**
200
+ * If the bucket in this db is already full, it doesn't matter which ids it contains.
201
+ * So we do not have to sync.
202
+ */
203
+ if (ids.length >= tokens) {
204
+ continue;
205
+ }
206
+ const diff = allIDs.filter((id) => !ids.includes(id));
207
+ /**
208
+ * Don't waste a request if there is nothing to send
209
+ */
210
+ if (diff.length === 0) {
211
+ continue;
212
+ }
213
+ await db.redis.sadd(currentKey, ...allIDs);
214
+ }
215
+ }
216
+ /**
217
+ * Do not await sync. This should not run in the critical path.
218
+ */
219
+ sync();
220
+ return {
221
+ success: remaining > 0,
222
+ limit: tokens,
223
+ remaining,
224
+ reset: (currentWindow + 1) * windowDuration,
225
+ };
226
+ };
227
+ }
228
+ }
package/esm/mod.js CHANGED
@@ -1,2 +1,2 @@
1
- export * from "./ratelimiter.js";
2
- export * from "./types.js";
1
+ export { RegionRatelimit as Ratelimit } from "./region.js";
2
+ export { GlobalRatelimit } from "./global.js";
@@ -0,0 +1,129 @@
1
+ /**
2
+ * Ratelimiter using serverless redis from https://upstash.com/
3
+ *
4
+ * @example
5
+ * ```ts
6
+ * const { limit } = new Ratelimit({
7
+ * redis: Redis.fromEnv(),
8
+ * limiter: Ratelimit.slidingWindow(
9
+ * 10, // Allow 10 requests per window of 30 minutes
10
+ * "30 m", // interval of 30 minutes
11
+ * )
12
+ * })
13
+ *
14
+ * ```
15
+ */
16
+ export class Ratelimit {
17
+ constructor(config) {
18
+ Object.defineProperty(this, "limiter", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: void 0
23
+ });
24
+ Object.defineProperty(this, "ctx", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: void 0
29
+ });
30
+ Object.defineProperty(this, "prefix", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: void 0
35
+ });
36
+ /**
37
+ * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
38
+ *
39
+ * Use this if you want to reject all requests that you can not handle right now.
40
+ *
41
+ * @example
42
+ * ```ts
43
+ * const ratelimit = new Ratelimit({
44
+ * redis: Redis.fromEnv(),
45
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
46
+ * })
47
+ *
48
+ * const { success } = await ratelimit.limit(id)
49
+ * if (!success){
50
+ * return "Nope"
51
+ * }
52
+ * return "Yes"
53
+ * ```
54
+ */
55
+ Object.defineProperty(this, "limit", {
56
+ enumerable: true,
57
+ configurable: true,
58
+ writable: true,
59
+ value: async (identifier) => {
60
+ const key = [this.prefix, identifier].join(":");
61
+ return await this.limiter(this.ctx, key);
62
+ }
63
+ });
64
+ /**
65
+ * Block until the request may pass or timeout is reached.
66
+ *
67
+ * This method returns a promsie that resolves as soon as the request may be processed
68
+ * or after the timeoue has been reached.
69
+ *
70
+ * Use this if you want to delay the request until it is ready to get processed.
71
+ *
72
+ * @example
73
+ * ```ts
74
+ * const ratelimit = new Ratelimit({
75
+ * redis: Redis.fromEnv(),
76
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
77
+ * })
78
+ *
79
+ * const { success } = await ratelimit.blockUntilReady(id, 60_000)
80
+ * if (!success){
81
+ * return "Nope"
82
+ * }
83
+ * return "Yes"
84
+ * ```
85
+ */
86
+ Object.defineProperty(this, "blockUntilReady", {
87
+ enumerable: true,
88
+ configurable: true,
89
+ writable: true,
90
+ value: async (
91
+ /**
92
+ * An identifier per user or api.
93
+ * Choose a userID, or api token, or ip address.
94
+ *
95
+ * If you want to globally limit your api, you can set a constant string.
96
+ */
97
+ identifier,
98
+ /**
99
+ * Maximum duration to wait in milliseconds.
100
+ * After this time the request will be denied.
101
+ */
102
+ timeout) => {
103
+ if (timeout <= 0) {
104
+ throw new Error("timeout must be positive");
105
+ }
106
+ let res;
107
+ const deadline = Date.now() + timeout;
108
+ while (true) {
109
+ res = await this.limit(identifier);
110
+ if (res.success) {
111
+ break;
112
+ }
113
+ if (res.reset === 0) {
114
+ throw new Error("This should not happen");
115
+ }
116
+ const wait = Math.min(res.reset, deadline) - Date.now();
117
+ await new Promise((r) => setTimeout(r, wait));
118
+ if (Date.now() > deadline) {
119
+ break;
120
+ }
121
+ }
122
+ return res;
123
+ }
124
+ });
125
+ this.ctx = config.ctx;
126
+ this.limiter = config.limiter;
127
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
128
+ }
129
+ }
@@ -1,4 +1,5 @@
1
1
  import { ms } from "./duration.js";
2
+ import { Ratelimit } from "./ratelimit.js";
2
3
  /**
3
4
  * Ratelimiter using serverless redis from https://upstash.com/
4
5
  *
@@ -14,121 +15,16 @@ import { ms } from "./duration.js";
14
15
  *
15
16
  * ```
16
17
  */
17
- export class Ratelimit {
18
+ export class RegionRatelimit extends Ratelimit {
18
19
  /**
19
20
  * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
20
21
  */
21
22
  constructor(config) {
22
- Object.defineProperty(this, "redis", {
23
- enumerable: true,
24
- configurable: true,
25
- writable: true,
26
- value: void 0
23
+ super({
24
+ prefix: config.prefix,
25
+ limiter: config.limiter,
26
+ ctx: { redis: config.redis },
27
27
  });
28
- Object.defineProperty(this, "limiter", {
29
- enumerable: true,
30
- configurable: true,
31
- writable: true,
32
- value: void 0
33
- });
34
- Object.defineProperty(this, "prefix", {
35
- enumerable: true,
36
- configurable: true,
37
- writable: true,
38
- value: void 0
39
- });
40
- /**
41
- * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
42
- *
43
- * Use this if you want to reject all requests that you can not handle right now.
44
- *
45
- * @example
46
- * ```ts
47
- * const ratelimit = new Ratelimit({
48
- * redis: Redis.fromEnv(),
49
- * limiter: Ratelimit.slidingWindow(10, "10 s")
50
- * })
51
- *
52
- * const { success } = await ratelimit.limit(id)
53
- * if (!success){
54
- * return "Nope"
55
- * }
56
- * return "Yes"
57
- * ```
58
- */
59
- Object.defineProperty(this, "limit", {
60
- enumerable: true,
61
- configurable: true,
62
- writable: true,
63
- value: async (identifier) => {
64
- const key = [this.prefix, identifier].join(":");
65
- return await this.limiter({ redis: this.redis }, key);
66
- }
67
- });
68
- /**
69
- * Block until the request may pass or timeout is reached.
70
- *
71
- * This method returns a promsie that resolves as soon as the request may be processed
72
- * or after the timeoue has been reached.
73
- *
74
- * Use this if you want to delay the request until it is ready to get processed.
75
- *
76
- * @example
77
- * ```ts
78
- * const ratelimit = new Ratelimit({
79
- * redis: Redis.fromEnv(),
80
- * limiter: Ratelimit.slidingWindow(10, "10 s")
81
- * })
82
- *
83
- * const { success } = await ratelimit.blockUntilReady(id, 60_000)
84
- * if (!success){
85
- * return "Nope"
86
- * }
87
- * return "Yes"
88
- * ```
89
- */
90
- Object.defineProperty(this, "blockUntilReady", {
91
- enumerable: true,
92
- configurable: true,
93
- writable: true,
94
- value: async (
95
- /**
96
- * An identifier per user or api.
97
- * Choose a userID, or api token, or ip address.
98
- *
99
- * If you want to globally limit your api, you can set a constant string.
100
- */
101
- identifier,
102
- /**
103
- * Maximum duration to wait in milliseconds.
104
- * After this time the request will be denied.
105
- */
106
- timeout) => {
107
- if (timeout <= 0) {
108
- throw new Error("timeout must be positive");
109
- }
110
- let res;
111
- const deadline = Date.now() + timeout;
112
- while (true) {
113
- res = await this.limit(identifier);
114
- if (res.success) {
115
- break;
116
- }
117
- if (res.reset === 0) {
118
- throw new Error("This should not happen");
119
- }
120
- const wait = Math.min(res.reset, deadline) - Date.now();
121
- await new Promise((r) => setTimeout(r, wait));
122
- if (Date.now() > deadline) {
123
- break;
124
- }
125
- }
126
- return res;
127
- }
128
- });
129
- this.redis = config.redis;
130
- this.limiter = config.limiter;
131
- this.prefix = config.prefix ?? "@upstash/ratelimit";
132
28
  }
133
29
  /**
134
30
  * Each requests inside a fixed time increases a counter.
@@ -183,54 +79,6 @@ export class Ratelimit {
183
79
  };
184
80
  };
185
81
  }
186
- // /**
187
- // * For each request all past requests in the last `{window}` are summed up and
188
- // * if they exceed `{tokens}`, the request will be rejected.
189
- // *
190
- // * **Pro:**
191
- // *
192
- // * Does not have the problem of `fixedWindow` at the window boundaries.
193
- // *
194
- // * **Con:**
195
- // *
196
- // * More expensive to store and compute, which makes this unsuitable for apis
197
- // * with very high traffic.
198
- // *
199
- // * @param window - The duration in which the user can max X requests.
200
- // * @param tokens - How many requests a user can make in each time window.
201
- // */
202
- // static slidingLogs(window: Duration, tokens: number): Ratelimiter {
203
- // const script = `
204
- // local key = KEYS[1] -- identifier including prefixes
205
- // local windowStart = ARGV[1] -- timestamp of window start
206
- // local windowEnd = ARGV[2] -- timestamp of window end
207
- // local tokens = ARGV[3] -- tokens per window
208
- // local now = ARGV[4] -- current timestamp
209
- // local count = redis.call("ZCOUNT", key, windowStart, windowEnd)
210
- // if count < tonumber(tokens) then
211
- // -- Log the current request
212
- // redis.call("ZADD", key, now, now)
213
- // -- Remove all previous requests that are outside the window
214
- // redis.call("ZREMRANGEBYSCORE", key, "-inf", windowStart - 1)
215
- // end
216
- // return count
217
- // `;
218
- // return async function (ctx: Context, identifier: string) {
219
- // const windowEnd = Date.now();
220
- // const windowStart = windowEnd - ms(window);
221
- // const count = (await ctx.redis.eval(
222
- // script,
223
- // [identifier],
224
- // [windowStart, windowEnd, tokens, Date.now()]
225
- // )) as number;
226
- // return {
227
- // success: count < tokens,
228
- // limit: tokens,
229
- // remaining: Math.max(0, tokens - count - 1),
230
- // reset: windowEnd,
231
- // };
232
- // };
233
- // }
234
82
  /**
235
83
  * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
236
84
  * costs than `slidingLogs` and improved boundary behavior by calcualting a