@upstash/ratelimit 0.4.1 → 0.4.3-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,30 +26,6 @@ __export(src_exports, {
26
26
  });
27
27
  module.exports = __toCommonJS(src_exports);
28
28
 
29
- // src/duration.ts
30
- function ms(d) {
31
- const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
32
- if (!match) {
33
- throw new Error(`Unable to parse window size: ${d}`);
34
- }
35
- const time = parseInt(match[1]);
36
- const unit = match[2];
37
- switch (unit) {
38
- case "ms":
39
- return time;
40
- case "s":
41
- return time * 1e3;
42
- case "m":
43
- return time * 1e3 * 60;
44
- case "h":
45
- return time * 1e3 * 60 * 60;
46
- case "d":
47
- return time * 1e3 * 60 * 60 * 24;
48
- default:
49
- throw new Error(`Unable to parse window size: ${d}`);
50
- }
51
- }
52
-
53
29
  // src/analytics.ts
54
30
  var import_core_analytics = require("@upstash/core-analytics");
55
31
  var Analytics = class {
@@ -57,6 +33,7 @@ var Analytics = class {
57
33
  table = "events";
58
34
  constructor(config) {
59
35
  this.analytics = new import_core_analytics.Analytics({
36
+ // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk
60
37
  redis: config.redis,
61
38
  window: "1h",
62
39
  prefix: config.prefix ?? "@upstash/ratelimit",
@@ -147,6 +124,30 @@ var Cache = class {
147
124
  }
148
125
  };
149
126
 
127
+ // src/duration.ts
128
+ function ms(d) {
129
+ const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
130
+ if (!match) {
131
+ throw new Error(`Unable to parse window size: ${d}`);
132
+ }
133
+ const time = parseInt(match[1]);
134
+ const unit = match[2];
135
+ switch (unit) {
136
+ case "ms":
137
+ return time;
138
+ case "s":
139
+ return time * 1e3;
140
+ case "m":
141
+ return time * 1e3 * 60;
142
+ case "h":
143
+ return time * 1e3 * 60 * 60;
144
+ case "d":
145
+ return time * 1e3 * 60 * 60 * 24;
146
+ default:
147
+ throw new Error(`Unable to parse window size: ${d}`);
148
+ }
149
+ }
150
+
150
151
  // src/ratelimit.ts
151
152
  var Ratelimit = class {
152
153
  limiter;
@@ -159,7 +160,7 @@ var Ratelimit = class {
159
160
  this.limiter = config.limiter;
160
161
  this.timeout = config.timeout ?? 5e3;
161
162
  this.prefix = config.prefix ?? "@upstash/ratelimit";
162
- this.analytics = config.analytics !== false ? new Analytics({
163
+ this.analytics = config.analytics ? new Analytics({
163
164
  redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,
164
165
  prefix: this.prefix
165
166
  }) : void 0;
@@ -235,7 +236,7 @@ var Ratelimit = class {
235
236
  /**
236
237
  * Block until the request may pass or timeout is reached.
237
238
  *
238
- * This method returns a promsie that resolves as soon as the request may be processed
239
+ * This method returns a promise that resolves as soon as the request may be processed
239
240
  * or after the timeoue has been reached.
240
241
  *
241
242
  * Use this if you want to delay the request until it is ready to get processed.
@@ -278,6 +279,224 @@ var Ratelimit = class {
278
279
  };
279
280
  };
280
281
 
282
+ // src/multi.ts
283
+ function randomId() {
284
+ let result = "";
285
+ const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
286
+ const charactersLength = characters.length;
287
+ for (let i = 0; i < 16; i++) {
288
+ result += characters.charAt(Math.floor(Math.random() * charactersLength));
289
+ }
290
+ return result;
291
+ }
292
+ var MultiRegionRatelimit = class extends Ratelimit {
293
+ /**
294
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
295
+ */
296
+ constructor(config) {
297
+ super({
298
+ prefix: config.prefix,
299
+ limiter: config.limiter,
300
+ timeout: config.timeout,
301
+ analytics: config.analytics,
302
+ ctx: {
303
+ redis: config.redis,
304
+ cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
305
+ }
306
+ });
307
+ }
308
+ /**
309
+ * Each requests inside a fixed time increases a counter.
310
+ * Once the counter reaches a maxmimum allowed number, all further requests are
311
+ * rejected.
312
+ *
313
+ * **Pro:**
314
+ *
315
+ * - Newer requests are not starved by old ones.
316
+ * - Low storage cost.
317
+ *
318
+ * **Con:**
319
+ *
320
+ * A burst of requests near the boundary of a window can result in a very
321
+ * high request rate because two windows will be filled with requests quickly.
322
+ *
323
+ * @param tokens - How many requests a user can make in each time window.
324
+ * @param window - A fixed timeframe
325
+ */
326
+ static fixedWindow(tokens, window) {
327
+ const windowDuration = ms(window);
328
+ const script = `
329
+ local key = KEYS[1]
330
+ local id = ARGV[1]
331
+ local window = ARGV[2]
332
+
333
+ redis.call("SADD", key, id)
334
+ local members = redis.call("SMEMBERS", key)
335
+ if #members == 1 then
336
+ -- The first time this key is set, the value will be 1.
337
+ -- So we only need the expire command once
338
+ redis.call("PEXPIRE", key, window)
339
+ end
340
+
341
+ return members
342
+ `;
343
+ return async function(ctx, identifier) {
344
+ if (ctx.cache) {
345
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
346
+ if (blocked) {
347
+ return {
348
+ success: false,
349
+ limit: tokens,
350
+ remaining: 0,
351
+ reset: reset2,
352
+ pending: Promise.resolve()
353
+ };
354
+ }
355
+ }
356
+ const requestID = randomId();
357
+ const bucket = Math.floor(Date.now() / windowDuration);
358
+ const key = [identifier, bucket].join(":");
359
+ const dbs = ctx.redis.map((redis) => ({
360
+ redis,
361
+ request: redis.eval(script, [key], [requestID, windowDuration])
362
+ }));
363
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
364
+ const usedTokens = firstResponse.length;
365
+ const remaining = tokens - usedTokens - 1;
366
+ async function sync() {
367
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
368
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
369
+ for (const db of dbs) {
370
+ const ids = await db.request;
371
+ if (ids.length >= tokens) {
372
+ continue;
373
+ }
374
+ const diff = allIDs.filter((id) => !ids.includes(id));
375
+ if (diff.length === 0) {
376
+ continue;
377
+ }
378
+ await db.redis.sadd(key, ...allIDs);
379
+ }
380
+ }
381
+ const success = remaining > 0;
382
+ const reset = (bucket + 1) * windowDuration;
383
+ if (ctx.cache && !success) {
384
+ ctx.cache.blockUntil(identifier, reset);
385
+ }
386
+ return {
387
+ success,
388
+ limit: tokens,
389
+ remaining,
390
+ reset,
391
+ pending: sync()
392
+ };
393
+ };
394
+ }
395
+ /**
396
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
397
+ * costs than `slidingLogs` and improved boundary behavior by calcualting a
398
+ * weighted score between two windows.
399
+ *
400
+ * **Pro:**
401
+ *
402
+ * Good performance allows this to scale to very high loads.
403
+ *
404
+ * **Con:**
405
+ *
406
+ * Nothing major.
407
+ *
408
+ * @param tokens - How many requests a user can make in each time window.
409
+ * @param window - The duration in which the user can max X requests.
410
+ */
411
+ static slidingWindow(tokens, window) {
412
+ const windowSize = ms(window);
413
+ const script = `
414
+ local currentKey = KEYS[1] -- identifier including prefixes
415
+ local previousKey = KEYS[2] -- key of the previous bucket
416
+ local tokens = tonumber(ARGV[1]) -- tokens per window
417
+ local now = ARGV[2] -- current timestamp in milliseconds
418
+ local window = ARGV[3] -- interval in milliseconds
419
+ local requestID = ARGV[4] -- uuid for this request
420
+
421
+
422
+ local currentMembers = redis.call("SMEMBERS", currentKey)
423
+ local requestsInCurrentWindow = #currentMembers
424
+ local previousMembers = redis.call("SMEMBERS", previousKey)
425
+ local requestsInPreviousWindow = #previousMembers
426
+
427
+ local percentageInCurrent = ( now % window) / window
428
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
429
+ return {currentMembers, previousMembers}
430
+ end
431
+
432
+ redis.call("SADD", currentKey, requestID)
433
+ table.insert(currentMembers, requestID)
434
+ if requestsInCurrentWindow == 0 then
435
+ -- The first time this key is set, the value will be 1.
436
+ -- So we only need the expire command once
437
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
438
+ end
439
+ return {currentMembers, previousMembers}
440
+ `;
441
+ const windowDuration = ms(window);
442
+ return async function(ctx, identifier) {
443
+ if (ctx.cache) {
444
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
445
+ if (blocked) {
446
+ return {
447
+ success: false,
448
+ limit: tokens,
449
+ remaining: 0,
450
+ reset: reset2,
451
+ pending: Promise.resolve()
452
+ };
453
+ }
454
+ }
455
+ const requestID = randomId();
456
+ const now = Date.now();
457
+ const currentWindow = Math.floor(now / windowSize);
458
+ const currentKey = [identifier, currentWindow].join(":");
459
+ const previousWindow = currentWindow - windowSize;
460
+ const previousKey = [identifier, previousWindow].join(":");
461
+ const dbs = ctx.redis.map((redis) => ({
462
+ redis,
463
+ request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID])
464
+ }));
465
+ const percentageInCurrent = now % windowDuration / windowDuration;
466
+ const [current, previous] = await Promise.any(dbs.map((s) => s.request));
467
+ const usedTokens = previous.length * (1 - percentageInCurrent) + current.length;
468
+ const remaining = tokens - usedTokens;
469
+ async function sync() {
470
+ const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
471
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
472
+ for (const db of dbs) {
473
+ const [ids] = await db.request;
474
+ if (ids.length >= tokens) {
475
+ continue;
476
+ }
477
+ const diff = allIDs.filter((id) => !ids.includes(id));
478
+ if (diff.length === 0) {
479
+ continue;
480
+ }
481
+ await db.redis.sadd(currentKey, ...allIDs);
482
+ }
483
+ }
484
+ const success = remaining > 0;
485
+ const reset = (currentWindow + 1) * windowDuration;
486
+ if (ctx.cache && !success) {
487
+ ctx.cache.blockUntil(identifier, reset);
488
+ }
489
+ return {
490
+ success,
491
+ limit: tokens,
492
+ remaining,
493
+ reset,
494
+ pending: sync()
495
+ };
496
+ };
497
+ }
498
+ };
499
+
281
500
  // src/single.ts
282
501
  var RegionRatelimit = class extends Ratelimit {
283
502
  /**
@@ -600,224 +819,6 @@ var RegionRatelimit = class extends Ratelimit {
600
819
  };
601
820
  }
602
821
  };
603
-
604
- // src/multi.ts
605
- function randomId() {
606
- let result = "";
607
- const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
608
- const charactersLength = characters.length;
609
- for (let i = 0; i < 16; i++) {
610
- result += characters.charAt(Math.floor(Math.random() * charactersLength));
611
- }
612
- return result;
613
- }
614
- var MultiRegionRatelimit = class extends Ratelimit {
615
- /**
616
- * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
617
- */
618
- constructor(config) {
619
- super({
620
- prefix: config.prefix,
621
- limiter: config.limiter,
622
- timeout: config.timeout,
623
- analytics: config.analytics,
624
- ctx: {
625
- redis: config.redis,
626
- cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
627
- }
628
- });
629
- }
630
- /**
631
- * Each requests inside a fixed time increases a counter.
632
- * Once the counter reaches a maxmimum allowed number, all further requests are
633
- * rejected.
634
- *
635
- * **Pro:**
636
- *
637
- * - Newer requests are not starved by old ones.
638
- * - Low storage cost.
639
- *
640
- * **Con:**
641
- *
642
- * A burst of requests near the boundary of a window can result in a very
643
- * high request rate because two windows will be filled with requests quickly.
644
- *
645
- * @param tokens - How many requests a user can make in each time window.
646
- * @param window - A fixed timeframe
647
- */
648
- static fixedWindow(tokens, window) {
649
- const windowDuration = ms(window);
650
- const script = `
651
- local key = KEYS[1]
652
- local id = ARGV[1]
653
- local window = ARGV[2]
654
-
655
- redis.call("SADD", key, id)
656
- local members = redis.call("SMEMBERS", key)
657
- if #members == 1 then
658
- -- The first time this key is set, the value will be 1.
659
- -- So we only need the expire command once
660
- redis.call("PEXPIRE", key, window)
661
- end
662
-
663
- return members
664
- `;
665
- return async function(ctx, identifier) {
666
- if (ctx.cache) {
667
- const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
668
- if (blocked) {
669
- return {
670
- success: false,
671
- limit: tokens,
672
- remaining: 0,
673
- reset: reset2,
674
- pending: Promise.resolve()
675
- };
676
- }
677
- }
678
- const requestID = randomId();
679
- const bucket = Math.floor(Date.now() / windowDuration);
680
- const key = [identifier, bucket].join(":");
681
- const dbs = ctx.redis.map((redis) => ({
682
- redis,
683
- request: redis.eval(script, [key], [requestID, windowDuration])
684
- }));
685
- const firstResponse = await Promise.any(dbs.map((s) => s.request));
686
- const usedTokens = firstResponse.length;
687
- const remaining = tokens - usedTokens - 1;
688
- async function sync() {
689
- const individualIDs = await Promise.all(dbs.map((s) => s.request));
690
- const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
691
- for (const db of dbs) {
692
- const ids = await db.request;
693
- if (ids.length >= tokens) {
694
- continue;
695
- }
696
- const diff = allIDs.filter((id) => !ids.includes(id));
697
- if (diff.length === 0) {
698
- continue;
699
- }
700
- await db.redis.sadd(key, ...allIDs);
701
- }
702
- }
703
- const success = remaining > 0;
704
- const reset = (bucket + 1) * windowDuration;
705
- if (ctx.cache && !success) {
706
- ctx.cache.blockUntil(identifier, reset);
707
- }
708
- return {
709
- success,
710
- limit: tokens,
711
- remaining,
712
- reset,
713
- pending: sync()
714
- };
715
- };
716
- }
717
- /**
718
- * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
719
- * costs than `slidingLogs` and improved boundary behavior by calcualting a
720
- * weighted score between two windows.
721
- *
722
- * **Pro:**
723
- *
724
- * Good performance allows this to scale to very high loads.
725
- *
726
- * **Con:**
727
- *
728
- * Nothing major.
729
- *
730
- * @param tokens - How many requests a user can make in each time window.
731
- * @param window - The duration in which the user can max X requests.
732
- */
733
- static slidingWindow(tokens, window) {
734
- const windowSize = ms(window);
735
- const script = `
736
- local currentKey = KEYS[1] -- identifier including prefixes
737
- local previousKey = KEYS[2] -- key of the previous bucket
738
- local tokens = tonumber(ARGV[1]) -- tokens per window
739
- local now = ARGV[2] -- current timestamp in milliseconds
740
- local window = ARGV[3] -- interval in milliseconds
741
- local requestID = ARGV[4] -- uuid for this request
742
-
743
-
744
- local currentMembers = redis.call("SMEMBERS", currentKey)
745
- local requestsInCurrentWindow = #currentMembers
746
- local previousMembers = redis.call("SMEMBERS", previousKey)
747
- local requestsInPreviousWindow = #previousMembers
748
-
749
- local percentageInCurrent = ( now % window) / window
750
- if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
751
- return {currentMembers, previousMembers}
752
- end
753
-
754
- redis.call("SADD", currentKey, requestID)
755
- table.insert(currentMembers, requestID)
756
- if requestsInCurrentWindow == 0 then
757
- -- The first time this key is set, the value will be 1.
758
- -- So we only need the expire command once
759
- redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
760
- end
761
- return {currentMembers, previousMembers}
762
- `;
763
- const windowDuration = ms(window);
764
- return async function(ctx, identifier) {
765
- if (ctx.cache) {
766
- const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
767
- if (blocked) {
768
- return {
769
- success: false,
770
- limit: tokens,
771
- remaining: 0,
772
- reset: reset2,
773
- pending: Promise.resolve()
774
- };
775
- }
776
- }
777
- const requestID = randomId();
778
- const now = Date.now();
779
- const currentWindow = Math.floor(now / windowSize);
780
- const currentKey = [identifier, currentWindow].join(":");
781
- const previousWindow = currentWindow - windowSize;
782
- const previousKey = [identifier, previousWindow].join(":");
783
- const dbs = ctx.redis.map((redis) => ({
784
- redis,
785
- request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID])
786
- }));
787
- const percentageInCurrent = now % windowDuration / windowDuration;
788
- const [current, previous] = await Promise.any(dbs.map((s) => s.request));
789
- const usedTokens = previous.length * (1 - percentageInCurrent) + current.length;
790
- const remaining = tokens - usedTokens;
791
- async function sync() {
792
- const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
793
- const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
794
- for (const db of dbs) {
795
- const [ids] = await db.request;
796
- if (ids.length >= tokens) {
797
- continue;
798
- }
799
- const diff = allIDs.filter((id) => !ids.includes(id));
800
- if (diff.length === 0) {
801
- continue;
802
- }
803
- await db.redis.sadd(currentKey, ...allIDs);
804
- }
805
- }
806
- const success = remaining > 0;
807
- const reset = (currentWindow + 1) * windowDuration;
808
- if (ctx.cache && !success) {
809
- ctx.cache.blockUntil(identifier, reset);
810
- }
811
- return {
812
- success,
813
- limit: tokens,
814
- remaining,
815
- reset,
816
- pending: sync()
817
- };
818
- };
819
- }
820
- };
821
822
  // Annotate the CommonJS export names for ESM import in node:
822
823
  0 && (module.exports = {
823
824
  Analytics,