@upstash/ratelimit 1.1.0 → 1.1.2-canary

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # Upstash Rate Limit
2
2
 
3
3
  [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
4
- [![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)](https://www.npmjs.com/package/ratelimit)
4
+ [![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)](https://www.npmjs.com/package/@upstash/ratelimit)
5
5
 
6
6
  > [!NOTE] > **This project is in GA Stage.**
7
7
  > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality.
@@ -36,13 +36,10 @@ import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest";
36
36
 
37
37
  ### Create database
38
38
 
39
- Create a new redis database on [upstash](https://console.upstash.com/)
39
+ Create a new redis database on [upstash](https://console.upstash.com/). See [here](https://github.com/upstash/upstash-redis#quick-start) for documentation on how to create a redis instance.
40
40
 
41
41
  ### Basic Usage
42
42
 
43
- See [here](https://github.com/upstash/upstash-redis#quick-start) for
44
- documentation on how to create a redis instance.
45
-
46
43
  ```ts
47
44
  import { Ratelimit } from "@upstash/ratelimit"; // for deno: see above
48
45
  import { Redis } from "@upstash/redis"; // see below for cloudflare and fastly adapters
@@ -124,6 +121,11 @@ export type RatelimitResponse = {
124
121
  };
125
122
  ````
126
123
 
124
+ ### Docs
125
+
126
+ See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for more information details.
127
+
128
+
127
129
  ### Using with CloudFlare Workers and Vercel Edge
128
130
 
129
131
  When we use CloudFlare Workers and Vercel Edge, we need to be careful about
@@ -146,10 +148,6 @@ context.waitUntil(pending);
146
148
 
147
149
  See `waitUntil` documentation in [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) for more details.
148
150
 
149
- ### Docs
150
-
151
- See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for details.
152
-
153
151
  ## Contributing
154
152
 
155
153
  ### Database
package/dist/index.d.mts CHANGED
@@ -1,3 +1,5 @@
1
+ import { Aggregate } from '@upstash/core-analytics';
2
+
1
3
  /**
2
4
  * EphemeralCache is used to block certain identifiers right away in case they have already exceeded the ratelimit.
3
5
  */
@@ -10,6 +12,7 @@ interface EphemeralCache {
10
12
  set: (key: string, value: number) => void;
11
13
  get: (key: string) => number | null;
12
14
  incr: (key: string) => number;
15
+ pop: (key: string) => void;
13
16
  empty: () => void;
14
17
  }
15
18
  type RegionContext = {
@@ -110,13 +113,22 @@ declare class Analytics {
110
113
  cf?: Geo;
111
114
  }): Geo;
112
115
  record(event: Event): Promise<void>;
113
- series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<({
114
- time: number;
115
- } & Record<string, number>)[]>;
116
+ series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<Aggregate[]>;
116
117
  getUsage(cutoff?: number): Promise<Record<string, {
117
118
  success: number;
118
119
  blocked: number;
119
120
  }>>;
121
+ getUsageOverTime<TFilter extends keyof Omit<Event, "time">>(timestampCount: number, groupby: TFilter): Promise<Aggregate[]>;
122
+ getMostAllowedBlocked(timestampCount: number, getTop?: number): Promise<{
123
+ allowed: {
124
+ identifier: string;
125
+ count: number;
126
+ }[];
127
+ blocked: {
128
+ identifier: string;
129
+ count: number;
130
+ }[];
131
+ }>;
120
132
  }
121
133
 
122
134
  type Unit = "ms" | "s" | "m" | "h" | "d";
package/dist/index.d.ts CHANGED
@@ -1,3 +1,5 @@
1
+ import { Aggregate } from '@upstash/core-analytics';
2
+
1
3
  /**
2
4
  * EphemeralCache is used to block certain identifiers right away in case they have already exceeded the ratelimit.
3
5
  */
@@ -10,6 +12,7 @@ interface EphemeralCache {
10
12
  set: (key: string, value: number) => void;
11
13
  get: (key: string) => number | null;
12
14
  incr: (key: string) => number;
15
+ pop: (key: string) => void;
13
16
  empty: () => void;
14
17
  }
15
18
  type RegionContext = {
@@ -110,13 +113,22 @@ declare class Analytics {
110
113
  cf?: Geo;
111
114
  }): Geo;
112
115
  record(event: Event): Promise<void>;
113
- series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<({
114
- time: number;
115
- } & Record<string, number>)[]>;
116
+ series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<Aggregate[]>;
116
117
  getUsage(cutoff?: number): Promise<Record<string, {
117
118
  success: number;
118
119
  blocked: number;
119
120
  }>>;
121
+ getUsageOverTime<TFilter extends keyof Omit<Event, "time">>(timestampCount: number, groupby: TFilter): Promise<Aggregate[]>;
122
+ getMostAllowedBlocked(timestampCount: number, getTop?: number): Promise<{
123
+ allowed: {
124
+ identifier: string;
125
+ count: number;
126
+ }[];
127
+ blocked: {
128
+ identifier: string;
129
+ count: number;
130
+ }[];
131
+ }>;
120
132
  }
121
133
 
122
134
  type Unit = "ms" | "s" | "m" | "h" | "d";
package/dist/index.js CHANGED
@@ -60,30 +60,27 @@ var Analytics = class {
60
60
  await this.analytics.ingest(this.table, event);
61
61
  }
62
62
  async series(filter, cutoff) {
63
- const records = await this.analytics.query(this.table, {
64
- filter: [filter],
65
- range: [cutoff, Date.now()]
66
- });
67
- return records;
63
+ const timestampCount = Math.min(
64
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
65
+ 256
66
+ );
67
+ return this.analytics.aggregateBucketsWithPipeline(this.table, filter, timestampCount);
68
68
  }
69
69
  async getUsage(cutoff = 0) {
70
- const records = await this.analytics.aggregateBy(this.table, "identifier", {
71
- range: [cutoff, Date.now()]
72
- });
73
- const usage = {};
74
- for (const bucket of records) {
75
- for (const [k, v] of Object.entries(bucket)) {
76
- if (k === "time") {
77
- continue;
78
- }
79
- if (!usage[k]) {
80
- usage[k] = { success: 0, blocked: 0 };
81
- }
82
- usage[k].success += v.true ?? 0;
83
- usage[k].blocked += v.false ?? 0;
84
- }
85
- }
86
- return usage;
70
+ const timestampCount = Math.min(
71
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
72
+ 256
73
+ );
74
+ const records = await this.analytics.getAllowedBlocked(this.table, timestampCount);
75
+ return records;
76
+ }
77
+ async getUsageOverTime(timestampCount, groupby) {
78
+ const result = await this.analytics.aggregateBucketsWithPipeline(this.table, groupby, timestampCount);
79
+ return result;
80
+ }
81
+ async getMostAllowedBlocked(timestampCount, getTop) {
82
+ getTop = getTop ?? 5;
83
+ return this.analytics.getMostAllowedBlocked(this.table, timestampCount, getTop);
87
84
  }
88
85
  };
89
86
 
@@ -122,6 +119,9 @@ var Cache = class {
122
119
  this.cache.set(key, value);
123
120
  return value;
124
121
  }
122
+ pop(key) {
123
+ this.cache.delete(key);
124
+ }
125
125
  empty() {
126
126
  this.cache.clear();
127
127
  }
@@ -160,7 +160,7 @@ var fixedWindowLimitScript = `
160
160
 
161
161
  redis.call("HSET", key, id, incrementBy)
162
162
  local fields = redis.call("HGETALL", key)
163
- if #fields == 1 and tonumber(fields[1])==incrementBy then
163
+ if #fields == 2 and tonumber(fields[2])==incrementBy then
164
164
  -- The first time this key is set, and the value will be equal to incrementBy.
165
165
  -- So we only need the expire command once
166
166
  redis.call("PEXPIRE", key, window)
@@ -570,6 +570,9 @@ var MultiRegionRatelimit = class extends Ratelimit {
570
570
  },
571
571
  async resetTokens(ctx, identifier) {
572
572
  const pattern = [identifier, "*"].join(":");
573
+ if (ctx.cache) {
574
+ ctx.cache.pop(identifier);
575
+ }
573
576
  for (const db of ctx.redis) {
574
577
  await db.eval(resetScript, [pattern], [null]);
575
578
  }
@@ -615,6 +618,9 @@ var MultiRegionRatelimit = class extends Ratelimit {
615
618
  }));
616
619
  const percentageInCurrent = now % windowDuration / windowDuration;
617
620
  const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));
621
+ if (success) {
622
+ current.push(requestId, incrementBy.toString());
623
+ }
618
624
  const previousUsedTokens = previous.reduce((accTokens, usedToken, index) => {
619
625
  let parsedToken = 0;
620
626
  if (index % 2) {
@@ -629,26 +635,30 @@ var MultiRegionRatelimit = class extends Ratelimit {
629
635
  }
630
636
  return accTokens + parsedToken;
631
637
  }, 0);
632
- const previousPartialUsed = previousUsedTokens * (1 - percentageInCurrent);
638
+ const previousPartialUsed = Math.ceil(previousUsedTokens * (1 - percentageInCurrent));
633
639
  const usedTokens = previousPartialUsed + currentUsedTokens;
634
640
  const remaining = tokens - usedTokens;
635
641
  async function sync() {
636
642
  const res = await Promise.all(dbs.map((s) => s.request));
637
- const allCurrentIds = res.flatMap(([current2]) => current2).reduce((accCurrentIds, curr, index) => {
638
- if (index % 2 === 0) {
639
- accCurrentIds.push(curr);
640
- }
641
- return accCurrentIds;
642
- }, []);
643
+ const allCurrentIds = Array.from(
644
+ new Set(
645
+ res.flatMap(([current2]) => current2).reduce((acc, curr, index) => {
646
+ if (index % 2 === 0) {
647
+ acc.push(curr);
648
+ }
649
+ return acc;
650
+ }, [])
651
+ ).values()
652
+ );
643
653
  for (const db of dbs) {
644
- const [_current, previous2, _success] = await db.request;
645
- const dbIds = previous2.reduce((ids, currentId, index) => {
654
+ const [current2, _previous, _success] = await db.request;
655
+ const dbIds = current2.reduce((ids, currentId, index) => {
646
656
  if (index % 2 === 0) {
647
657
  ids.push(currentId);
648
658
  }
649
659
  return ids;
650
660
  }, []);
651
- const usedDbTokens = previous2.reduce((accTokens, usedToken, index) => {
661
+ const usedDbTokens = current2.reduce((accTokens, usedToken, index) => {
652
662
  let parsedToken = 0;
653
663
  if (index % 2) {
654
664
  parsedToken = Number.parseInt(usedToken);
@@ -699,6 +709,9 @@ var MultiRegionRatelimit = class extends Ratelimit {
699
709
  },
700
710
  async resetTokens(ctx, identifier) {
701
711
  const pattern = [identifier, "*"].join(":");
712
+ if (ctx.cache) {
713
+ ctx.cache.pop(identifier);
714
+ }
702
715
  for (const db of ctx.redis) {
703
716
  await db.eval(resetScript, [pattern], [null]);
704
717
  }
@@ -947,6 +960,9 @@ var RegionRatelimit = class extends Ratelimit {
947
960
  },
948
961
  async resetTokens(ctx, identifier) {
949
962
  const pattern = [identifier, "*"].join(":");
963
+ if (ctx.cache) {
964
+ ctx.cache.pop(identifier);
965
+ }
950
966
  await ctx.redis.eval(resetScript, [pattern], [null]);
951
967
  }
952
968
  });
@@ -1022,6 +1038,9 @@ var RegionRatelimit = class extends Ratelimit {
1022
1038
  },
1023
1039
  async resetTokens(ctx, identifier) {
1024
1040
  const pattern = [identifier, "*"].join(":");
1041
+ if (ctx.cache) {
1042
+ ctx.cache.pop(identifier);
1043
+ }
1025
1044
  await ctx.redis.eval(resetScript, [pattern], [null]);
1026
1045
  }
1027
1046
  });
@@ -1084,6 +1103,9 @@ var RegionRatelimit = class extends Ratelimit {
1084
1103
  },
1085
1104
  async resetTokens(ctx, identifier) {
1086
1105
  const pattern = identifier;
1106
+ if (ctx.cache) {
1107
+ ctx.cache.pop(identifier);
1108
+ }
1087
1109
  await ctx.redis.eval(resetScript, [pattern], [null]);
1088
1110
  }
1089
1111
  });
@@ -1176,7 +1198,7 @@ var RegionRatelimit = class extends Ratelimit {
1176
1198
  if (!ctx.cache) {
1177
1199
  throw new Error("This algorithm requires a cache");
1178
1200
  }
1179
- ctx.cache.empty();
1201
+ ctx.cache.pop(identifier);
1180
1202
  await ctx.redis.eval(resetScript, [pattern], [null]);
1181
1203
  }
1182
1204
  });
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/analytics.ts","../src/cache.ts","../src/duration.ts","../src/lua-scripts/multi.ts","../src/lua-scripts/reset.ts","../src/ratelimit.ts","../src/multi.ts","../src/lua-scripts/single.ts","../src/single.ts"],"sourcesContent":["import { Analytics } from \"./analytics\";\nimport type { AnalyticsConfig } from \"./analytics\";\nimport { MultiRegionRatelimit } from \"./multi\";\nimport type { MultiRegionRatelimitConfig } from \"./multi\";\nimport { RegionRatelimit as Ratelimit } from \"./single\";\nimport type { RegionRatelimitConfig as RatelimitConfig } from \"./single\";\nimport type { Algorithm } from \"./types\";\n\nexport {\n Ratelimit,\n type RatelimitConfig,\n MultiRegionRatelimit,\n type MultiRegionRatelimitConfig,\n type Algorithm,\n Analytics,\n type AnalyticsConfig,\n};\n","import { Analytics as CoreAnalytics } from \"@upstash/core-analytics\";\nimport type { Redis } from \"./types\";\n\nexport type Geo = {\n country?: string;\n city?: string;\n region?: string;\n ip?: string;\n};\nexport type Event = Geo & {\n identifier: string;\n time: number;\n success: boolean;\n};\n\nexport type AnalyticsConfig = {\n redis: Redis;\n prefix?: string;\n};\n\n/**\n * The Analytics package is experimental and can change at any time.\n */\nexport class Analytics {\n private readonly analytics: CoreAnalytics;\n private readonly table = \"events\";\n\n constructor(config: AnalyticsConfig) {\n this.analytics = new CoreAnalytics({\n // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk\n redis: config.redis,\n window: \"1h\",\n prefix: config.prefix ?? \"@upstash/ratelimit\",\n retention: \"90d\",\n });\n }\n\n /**\n * Try to extract the geo information from the request\n *\n * This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties\n * @param req\n * @returns\n */\n public extractGeo(req: { geo?: Geo; cf?: Geo }): Geo {\n if (typeof req.geo !== \"undefined\") {\n return req.geo;\n }\n if (typeof req.cf !== \"undefined\") {\n return req.cf;\n }\n\n return {};\n }\n\n public async record(event: Event): Promise<void> {\n await this.analytics.ingest(this.table, event);\n }\n\n async series<TFilter extends keyof Omit<Event, \"time\">>(\n filter: TFilter,\n cutoff: number,\n ): Promise<({ time: number } & Record<string, number>)[]> {\n const records = await this.analytics.query(this.table, {\n filter: [filter],\n range: [cutoff, Date.now()],\n });\n return records;\n }\n public async getUsage(cutoff = 0): Promise<Record<string, { success: number; blocked: number }>> {\n const records = await this.analytics.aggregateBy(this.table, \"identifier\", {\n range: [cutoff, Date.now()],\n });\n const usage = {} as Record<string, { success: number; blocked: number }>;\n for (const bucket of records) {\n for (const [k, v] of Object.entries(bucket)) {\n if (k === \"time\") {\n continue;\n }\n\n if (!usage[k]) {\n usage[k] = { success: 0, blocked: 0 };\n }\n // @ts-ignore\n usage[k].success += v.true ?? 0;\n // @ts-ignore\n usage[k].blocked += v.false ?? 0;\n }\n }\n return usage;\n }\n}\n","import type { EphemeralCache } from \"./types\";\n\nexport class Cache implements EphemeralCache {\n /**\n * Stores identifier -> reset (in milliseconds)\n */\n private readonly cache: Map<string, number>;\n\n constructor(cache: Map<string, number>) {\n this.cache = cache;\n }\n\n public isBlocked(identifier: string): { blocked: boolean; reset: number } {\n if (!this.cache.has(identifier)) {\n return { blocked: false, reset: 0 };\n }\n const reset = this.cache.get(identifier)!;\n if (reset < Date.now()) {\n this.cache.delete(identifier);\n return { blocked: false, reset: 0 };\n }\n\n return { blocked: true, reset: reset };\n }\n\n public blockUntil(identifier: string, reset: number): void {\n this.cache.set(identifier, reset);\n }\n\n public set(key: string, value: number): void {\n this.cache.set(key, value);\n }\n public get(key: string): number | null {\n return this.cache.get(key) || null;\n }\n\n public incr(key: string): number {\n let value = this.cache.get(key) ?? 0;\n value += 1;\n this.cache.set(key, value);\n return value;\n }\n\n public empty(): void {\n this.cache.clear()\n }\n}\n","type Unit = \"ms\" | \"s\" | \"m\" | \"h\" | \"d\";\nexport type Duration = `${number} ${Unit}` | `${number}${Unit}`;\n\n/**\n * Convert a human readable duration to milliseconds\n */\nexport function ms(d: Duration): number {\n const match = d.match(/^(\\d+)\\s?(ms|s|m|h|d)$/);\n if (!match) {\n throw new Error(`Unable to parse window size: ${d}`);\n }\n const time = Number.parseInt(match[1]);\n const unit = match[2] as Unit;\n\n switch (unit) {\n case \"ms\":\n return time;\n case \"s\":\n return time * 1000;\n case \"m\":\n return time * 1000 * 60;\n case \"h\":\n return time * 1000 * 60 * 60;\n case \"d\":\n return time * 1000 * 60 * 60 * 24;\n\n default:\n throw new Error(`Unable to parse window size: ${d}`);\n }\n}\n","export const fixedWindowLimitScript = `\n\tlocal key = KEYS[1]\n\tlocal id = ARGV[1]\n\tlocal window = ARGV[2]\n\tlocal incrementBy = tonumber(ARGV[3])\n\n\tredis.call(\"HSET\", key, id, incrementBy)\n\tlocal fields = redis.call(\"HGETALL\", key)\n\tif #fields == 1 and tonumber(fields[1])==incrementBy then\n\t-- The first time this key is set, and the value will be equal to incrementBy.\n\t-- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", key, window)\n\tend\n\n\treturn fields\n`;\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local fields = redis.call(\"HGETALL\", key)\n\n return fields\n `;\n\nexport const slidingWindowLimitScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal tokens = tonumber(ARGV[1]) -- tokens per window\n\tlocal now = ARGV[2] -- current timestamp in milliseconds\n\tlocal window = ARGV[3] -- interval in milliseconds\n\tlocal requestId = ARGV[4] -- uuid for this request\n\tlocal incrementBy = tonumber(ARGV[5]) -- custom rate, default is 1\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n\tif requestsInPreviousWindow * (1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then\n\t return {currentFields, previousFields, false}\n\tend\n\n\tredis.call(\"HSET\", currentKey, requestId, incrementBy)\n\n\tif requestsInCurrentWindow == 0 then \n\t -- The first time this key is set, the value will be equal to incrementBy.\n\t -- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n\tend\n\treturn {currentFields, previousFields, true}\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal now \t= ARGV[1] -- current timestamp in milliseconds\n \tlocal window \t= ARGV[2] -- interval in milliseconds\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n \trequestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\t\n\treturn requestsInCurrentWindow + requestsInPreviousWindow\n`;\n","export const resetScript = `\n local pattern = KEYS[1]\n\n -- Initialize cursor to start from 0\n local cursor = \"0\"\n\n repeat\n -- Scan for keys matching the pattern\n local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern)\n\n -- Extract cursor for the next iteration\n cursor = scan_result[1]\n\n -- Extract keys from the scan result\n local keys = scan_result[2]\n\n for i=1, #keys do\n redis.call('DEL', keys[i])\n end\n\n -- Continue scanning until cursor is 0 (end of keyspace)\n until cursor == \"0\"\n `;\n","import { Analytics, type Geo } from \"./analytics\";\nimport { Cache } from \"./cache\";\nimport type { Algorithm, Context, RatelimitResponse } from \"./types\";\n\nexport class TimeoutError extends Error {\n constructor() {\n super(\"Timeout\");\n this.name = \"TimeoutError\";\n }\n}\nexport type RatelimitConfig<TContext> = {\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n\n limiter: Algorithm<TContext>;\n\n ctx: TContext;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n *\n * @default 5000\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * ),\n * })\n *\n * ```\n */\nexport abstract class Ratelimit<TContext extends Context> {\n protected readonly limiter: Algorithm<TContext>;\n\n protected readonly ctx: TContext;\n\n protected readonly prefix: string;\n\n protected readonly timeout: number;\n\n protected readonly analytics?: Analytics;\n\n constructor(config: RatelimitConfig<TContext>) {\n this.ctx = config.ctx;\n this.limiter = config.limiter;\n this.timeout = config.timeout ?? 5000;\n this.prefix = config.prefix ?? \"@upstash/ratelimit\";\n this.analytics = config.analytics\n ? new Analytics({\n redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,\n prefix: this.prefix,\n })\n : undefined;\n\n if (config.ephemeralCache instanceof Map) {\n this.ctx.cache = new Cache(config.ephemeralCache);\n } else if (typeof config.ephemeralCache === \"undefined\") {\n this.ctx.cache = new Cache(new Map());\n }\n }\n\n /**\n * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.\n *\n * Use this if you want to reject all requests that you can not handle right now.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n *\n * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.\n *\n * Usage with `req.rate`\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(100, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id, {rate: 10})\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public limit = async (\n identifier: string,\n req?: { geo?: Geo; rate?: number },\n ): Promise<RatelimitResponse> => {\n const key = [this.prefix, identifier].join(\":\");\n let timeoutId: any = null;\n try {\n const arr: Promise<RatelimitResponse>[] = [this.limiter().limit(this.ctx, key, req?.rate)];\n if (this.timeout > 0) {\n arr.push(\n new Promise((resolve) => {\n timeoutId = setTimeout(() => {\n resolve({\n success: true,\n limit: 0,\n remaining: 0,\n reset: 0,\n pending: Promise.resolve(),\n });\n }, this.timeout);\n }),\n );\n }\n\n const res = await Promise.race(arr);\n if (this.analytics) {\n try {\n const geo = req ? this.analytics.extractGeo(req) : undefined;\n const analyticsP = this.analytics\n .record({\n identifier,\n time: Date.now(),\n success: res.success,\n ...geo,\n })\n .catch((err) => {\n console.warn(\"Failed to record analytics\", err);\n });\n res.pending = Promise.all([res.pending, analyticsP]);\n } catch (err) {\n console.warn(\"Failed to record analytics\", err);\n }\n }\n return res;\n } finally {\n if (timeoutId) {\n clearTimeout(timeoutId);\n }\n }\n };\n\n /**\n * Block until the request may pass or timeout is reached.\n *\n * This method returns a promise that resolves as soon as the request may be processed\n * or after the timeout has been reached.\n *\n * Use this if you want to delay the request until it is ready to get processed.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.blockUntilReady(id, 60_000)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public blockUntilReady = async (\n /**\n * An identifier per user or api.\n * Choose a userID, or api token, or ip address.\n *\n * If you want to limit your api across all users, you can set a constant string.\n */\n identifier: string,\n /**\n * Maximum duration to wait in milliseconds.\n * After this time the request will be denied.\n */\n timeout: number,\n ): Promise<RatelimitResponse> => {\n if (timeout <= 0) {\n throw new Error(\"timeout must be positive\");\n }\n let res: RatelimitResponse;\n\n const deadline = Date.now() + timeout;\n while (true) {\n res = await this.limit(identifier);\n if (res.success) {\n break;\n }\n if (res.reset === 0) {\n throw new Error(\"This should not happen\");\n }\n\n const wait = Math.min(res.reset, deadline) - Date.now();\n await new Promise((r) => setTimeout(r, wait));\n\n if (Date.now() > deadline) {\n break;\n }\n }\n return res!;\n };\n\n public resetUsedTokens = async (identifier: string) => {\n const pattern = [this.prefix, identifier].join(\":\");\n await this.limiter().resetTokens(this.ctx, pattern);\n };\n\n public getRemaining = async (identifier: string): Promise<number> => {\n const pattern = [this.prefix, identifier].join(\":\");\n\n return await this.limiter().getRemaining(this.ctx, pattern);\n };\n}\n","import { Cache } from \"./cache\";\nimport type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport {\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n} from \"./lua-scripts/multi\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, MultiRegionContext } from \"./types\";\n\nimport type { Redis } from \"./types\";\n\nfunction randomId(): string {\n let result = \"\";\n const characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\";\n const charactersLength = characters.length;\n for (let i = 0; i < 16; i++) {\n result += characters.charAt(Math.floor(Math.random() * charactersLength));\n }\n return result;\n}\n\nexport type MultiRegionRatelimitConfig = {\n /**\n * Instances of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis[];\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - MultiRegionRatelimit.fixedWindow\n */\n limiter: Algorithm<MultiRegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new MultiRegionRatelimit({\n * redis: Redis.fromEnv(),\n * limiter: MultiRegionRatelimit.fixedWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.\n */\n constructor(config: MultiRegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : undefined,\n },\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const requestId = randomId();\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n fixedWindowLimitScript,\n [key],\n [requestId, windowDuration, incrementBy],\n ) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const remaining = tokens - usedTokens;\n\n /**\n * If the length between two databases does not match, we sync the two databases\n */\n async function sync() {\n const individualIDs = await Promise.all(dbs.map((s) => s.request));\n\n const allIDs = Array.from(\n new Set(\n individualIDs\n .flatMap((_) => _)\n .reduce((acc: string[], curr, index) => {\n if (index % 2 === 0) {\n acc.push(curr);\n }\n return acc;\n }, []),\n ).values(),\n );\n\n for (const db of dbs) {\n const usedDbTokens = (await db.request).reduce(\n (accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n },\n 0,\n );\n\n const dbIds = (await db.request).reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allIDs.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(key, { [requestId]: incrementBy });\n }\n }\n }\n\n /**\n * Do not await sync. This should not run in the critical path.\n */\n\n const success = remaining > 0;\n const reset = (bucket + 1) * windowDuration;\n\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining,\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(fixedWindowRemainingTokensScript, [key], [null]) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowSize = ms(window);\n\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n // if (ctx.cache) {\n // const { blocked, reset } = ctx.cache.isBlocked(identifier);\n // if (blocked) {\n // return {\n // success: false,\n // limit: tokens,\n // remaining: 0,\n // reset: reset,\n // pending: Promise.resolve(),\n // };\n // }\n // }\n\n const requestId = randomId();\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowDuration, requestId, incrementBy],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<[string[], string[], 1 | null]>,\n }));\n\n const percentageInCurrent = (now % windowDuration) / windowDuration;\n const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));\n\n const previousUsedTokens = previous.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const currentUsedTokens = current.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const previousPartialUsed = previousUsedTokens * (1 - percentageInCurrent);\n\n const usedTokens = previousPartialUsed + currentUsedTokens;\n\n const remaining = tokens - usedTokens;\n\n /**\n * If a database differs from the consensus, we sync it\n */\n async function sync() {\n const res = await Promise.all(dbs.map((s) => s.request));\n const allCurrentIds = res\n .flatMap(([current]) => current)\n .reduce((accCurrentIds: string[], curr, index) => {\n if (index % 2 === 0) {\n accCurrentIds.push(curr);\n }\n return accCurrentIds;\n }, []);\n\n for (const db of dbs) {\n const [_current, previous, _success] = await db.request;\n const dbIds = previous.reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n\n const usedDbTokens = previous.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allCurrentIds.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(currentKey, { [requestId]: incrementBy });\n }\n }\n }\n\n // const success = remaining >= 0;\n const reset = (currentWindow + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success: Boolean(success),\n limit: tokens,\n remaining: Math.max(0, remaining),\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<number>,\n }));\n\n const usedTokens = await Promise.any(dbs.map((s) => s.request));\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n}\n","export const fixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n\n return r\n`;\n\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n `;\n\nexport const slidingWindowLimitScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local tokens = tonumber(ARGV[1]) -- tokens per window\n local now = ARGV[2] -- current timestamp in milliseconds\n local window = ARGV[3] -- interval in milliseconds\n local incrementBy = ARGV[4] -- increment rate per request at a given value, default is 1\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then\n return -1\n end\n\n local newValue = redis.call(\"INCRBY\", currentKey, incrementBy)\n if newValue == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n end\n return tokens - ( newValue + requestsInPreviousWindow )\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local now = ARGV[1] -- current timestamp in milliseconds\n local window = ARGV[2] -- interval in milliseconds\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\n return requestsInPreviousWindow + requestsInCurrentWindow\n`;\n\nexport const tokenBucketLimitScript = `\n local key = KEYS[1] -- identifier including prefixes\n local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens\n local interval = tonumber(ARGV[2]) -- size of the window in milliseconds\n local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval\n local now = tonumber(ARGV[4]) -- current timestamp in milliseconds\n local incrementBy = tonumber(ARGV[5]) -- how many tokens to consume, default is 1\n \n local bucket = redis.call(\"HMGET\", key, \"refilledAt\", \"tokens\")\n \n local refilledAt\n local tokens\n\n if bucket[1] == false then\n refilledAt = now\n tokens = maxTokens\n else\n refilledAt = tonumber(bucket[1])\n tokens = tonumber(bucket[2])\n end\n \n if now >= refilledAt + interval then\n local numRefills = math.floor((now - refilledAt) / interval)\n tokens = math.min(maxTokens, tokens + numRefills * refillRate)\n\n refilledAt = refilledAt + numRefills * interval\n end\n\n if tokens == 0 then\n return {-1, refilledAt + interval}\n end\n\n local remaining = tokens - incrementBy\n local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval\n \n redis.call(\"HSET\", key, \"refilledAt\", refilledAt, \"tokens\", remaining)\n redis.call(\"PEXPIRE\", key, expireAt)\n return {remaining, refilledAt + interval}\n`;\n\nexport const tokenBucketRemainingTokensScript = `\n local key = KEYS[1]\n local maxTokens = tonumber(ARGV[1])\n \n local bucket = redis.call(\"HMGET\", key, \"tokens\")\n\n if bucket[1] == false then\n return maxTokens\n end\n \n return tonumber(bucket[1])\n`;\n\nexport const cachedFixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == incrementBy then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n \n return r\n`;\n\nexport const cachedFixedWindowRemainingTokenScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n`;\n","import type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport {\n cachedFixedWindowLimitScript,\n cachedFixedWindowRemainingTokenScript,\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n tokenBucketLimitScript,\n tokenBucketRemainingTokensScript,\n} from \"./lua-scripts/single\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, RegionContext } from \"./types\";\nimport type { Redis } from \"./types\";\n\nexport type RegionRatelimitConfig = {\n /**\n * Instance of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis;\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n limiter: Algorithm<RegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * \"30 m\", // interval of 30 minutes\n * 10, // Allow 10 requests per window of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class RegionRatelimit extends Ratelimit<RegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.\n */\n\n constructor(config: RegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n },\n ephemeralCache: config.ephemeralCache,\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n fixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n\n const success = usedTokensAfterUpdate <= tokens;\n\n const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate);\n\n const reset = (bucket + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: tokens,\n remaining: remainingTokens,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n fixedWindowRemainingTokensScript,\n [key],\n [null],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowSize = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const remainingTokens = (await ctx.redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowSize, incrementBy],\n )) as number;\n\n const success = remainingTokens >= 0;\n\n const reset = (currentWindow + 1) * windowSize;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining: Math.max(0, remainingTokens),\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const now = Date.now();\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * You have a bucket filled with `{maxTokens}` tokens that refills constantly\n * at `{refillRate}` per `{interval}`.\n * Every request will remove one token from the bucket and if there is no\n * token to take, the request is rejected.\n *\n * **Pro:**\n *\n * - Bursts of requests are smoothed out and you can process them at a constant\n * rate.\n * - Allows to set a higher initial burst limit by setting `maxTokens` higher\n * than `refillRate`\n */\n static tokenBucket(\n /**\n * How many tokens are refilled per `interval`\n *\n * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.\n */\n refillRate: number,\n /**\n * The interval for the `refillRate`\n */\n interval: Duration,\n /**\n * Maximum number of tokens.\n * A newly created bucket starts with this many tokens.\n * Useful to allow higher burst limits.\n */\n maxTokens: number,\n ): Algorithm<RegionContext> {\n const intervalDuration = ms(interval);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: maxTokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const now = Date.now();\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const [remaining, reset] = (await ctx.redis.eval(\n tokenBucketLimitScript,\n [identifier],\n [maxTokens, intervalDuration, refillRate, now, incrementBy],\n )) as [number, number];\n\n const success = remaining >= 0;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: maxTokens,\n remaining,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const remainingTokens = (await ctx.redis.eval(\n tokenBucketRemainingTokensScript,\n [identifier],\n [maxTokens],\n )) as number;\n return remainingTokens;\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = identifier;\n\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n /**\n * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates\n * it asynchronously.\n * This is experimental and not yet recommended for production use.\n *\n * @experimental\n *\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static cachedFixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const reset = (bucket + 1) * windowDuration;\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedTokensAfterUpdate = ctx.cache.incr(key);\n const success = cachedTokensAfterUpdate < tokens;\n\n const pending = success\n ? ctx.redis\n .eval(cachedFixedWindowLimitScript, [key], [windowDuration, incrementBy])\n .then((t) => {\n ctx.cache!.set(key, t as number);\n })\n : Promise.resolve();\n\n return {\n success,\n limit: tokens,\n remaining: tokens - cachedTokensAfterUpdate,\n reset: reset,\n pending,\n };\n }\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n cachedFixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n ctx.cache.set(key, usedTokensAfterUpdate);\n const remaining = tokens - usedTokensAfterUpdate;\n\n return {\n success: remaining >= 0,\n limit: tokens,\n remaining,\n reset: reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedUsedTokens = ctx.cache.get(key) ?? 0;\n return Math.max(0, tokens - cachedUsedTokens);\n }\n\n const usedTokens = (await ctx.redis.eval(\n cachedFixedWindowRemainingTokenScript,\n [key],\n [null],\n )) as number;\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n // Empty the cache\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n ctx.cache.empty()\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,4BAA2C;AAuBpC,IAAM,YAAN,MAAgB;AAAA,EACJ;AAAA,EACA,QAAQ;AAAA,EAEzB,YAAY,QAAyB;AACnC,SAAK,YAAY,IAAI,sBAAAA,UAAc;AAAA;AAAA,MAEjC,OAAO,OAAO;AAAA,MACd,QAAQ;AAAA,MACR,QAAQ,OAAO,UAAU;AAAA,MACzB,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASO,WAAW,KAAmC;AACnD,QAAI,OAAO,IAAI,QAAQ,aAAa;AAClC,aAAO,IAAI;AAAA,IACb;AACA,QAAI,OAAO,IAAI,OAAO,aAAa;AACjC,aAAO,IAAI;AAAA,IACb;AAEA,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,MAAa,OAAO,OAA6B;AAC/C,UAAM,KAAK,UAAU,OAAO,KAAK,OAAO,KAAK;AAAA,EAC/C;AAAA,EAEA,MAAM,OACJ,QACA,QACwD;AACxD,UAAM,UAAU,MAAM,KAAK,UAAU,MAAM,KAAK,OAAO;AAAA,MACrD,QAAQ,CAAC,MAAM;AAAA,MACf,OAAO,CAAC,QAAQ,KAAK,IAAI,CAAC;AAAA,IAC5B,CAAC;AACD,WAAO;AAAA,EACT;AAAA,EACA,MAAa,SAAS,SAAS,GAAkE;AAC/F,UAAM,UAAU,MAAM,KAAK,UAAU,YAAY,KAAK,OAAO,cAAc;AAAA,MACzE,OAAO,CAAC,QAAQ,KAAK,IAAI,CAAC;AAAA,IAC5B,CAAC;AACD,UAAM,QAAQ,CAAC;AACf,eAAW,UAAU,SAAS;AAC5B,iBAAW,CAAC,GAAG,CAAC,KAAK,OAAO,QAAQ,MAAM,GAAG;AAC3C,YAAI,MAAM,QAAQ;AAChB;AAAA,QACF;AAEA,YAAI,CAAC,MAAM,CAAC,GAAG;AACb,gBAAM,CAAC,IAAI,EAAE,SAAS,GAAG,SAAS,EAAE;AAAA,QACtC;AAEA,cAAM,CAAC,EAAE,WAAW,EAAE,QAAQ;AAE9B,cAAM,CAAC,EAAE,WAAW,EAAE,SAAS;AAAA,MACjC;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;;;ACzFO,IAAM,QAAN,MAAsC;AAAA;AAAA;AAAA;AAAA,EAI1B;AAAA,EAEjB,YAAY,OAA4B;AACtC,SAAK,QAAQ;AAAA,EACf;AAAA,EAEO,UAAU,YAAyD;AACxE,QAAI,CAAC,KAAK,MAAM,IAAI,UAAU,GAAG;AAC/B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AACA,UAAM,QAAQ,KAAK,MAAM,IAAI,UAAU;AACvC,QAAI,QAAQ,KAAK,IAAI,GAAG;AACtB,WAAK,MAAM,OAAO,UAAU;AAC5B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AAEA,WAAO,EAAE,SAAS,MAAM,MAAa;AAAA,EACvC;AAAA,EAEO,WAAW,YAAoB,OAAqB;AACzD,SAAK,MAAM,IAAI,YAAY,KAAK;AAAA,EAClC;AAAA,EAEO,IAAI,KAAa,OAAqB;AAC3C,SAAK,MAAM,IAAI,KAAK,KAAK;AAAA,EAC3B;AAAA,EACO,IAAI,KAA4B;AACrC,WAAO,KAAK,MAAM,IAAI,GAAG,KAAK;AAAA,EAChC;AAAA,EAEO,KAAK,KAAqB;AAC/B,QAAI,QAAQ,KAAK,MAAM,IAAI,GAAG,KAAK;AACnC,aAAS;AACT,SAAK,MAAM,IAAI,KAAK,KAAK;AACzB,WAAO;AAAA,EACT;AAAA,EAEO,QAAc;AACnB,SAAK,MAAM,MAAM;AAAA,EACnB;AACF;;;ACxCO,SAAS,GAAG,GAAqB;AACtC,QAAM,QAAQ,EAAE,MAAM,wBAAwB;AAC9C,MAAI,CAAC,OAAO;AACV,UAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACrD;AACA,QAAM,OAAO,OAAO,SAAS,MAAM,CAAC,CAAC;AACrC,QAAM,OAAO,MAAM,CAAC;AAEpB,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO,OAAO;AAAA,IAChB,KAAK;AACH,aAAO,OAAO,MAAO;AAAA,IACvB,KAAK;AACH,aAAO,OAAO,MAAO,KAAK;AAAA,IAC5B,KAAK;AACH,aAAO,OAAO,MAAO,KAAK,KAAK;AAAA,IAEjC;AACE,YAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACvD;AACF;;;AC7BO,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgB/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AASzC,IAAM,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAoCjC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D3C,IAAM,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACkFpB,IAAe,YAAf,MAAmD;AAAA,EACrC;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEnB,YAAY,QAAmC;AAC7C,SAAK,MAAM,OAAO;AAClB,SAAK,UAAU,OAAO;AACtB,SAAK,UAAU,OAAO,WAAW;AACjC,SAAK,SAAS,OAAO,UAAU;AAC/B,SAAK,YAAY,OAAO,YACpB,IAAI,UAAU;AAAA,MACZ,OAAO,MAAM,QAAQ,KAAK,IAAI,KAAK,IAAI,KAAK,IAAI,MAAM,CAAC,IAAI,KAAK,IAAI;AAAA,MACpE,QAAQ,KAAK;AAAA,IACf,CAAC,IACD;AAEJ,QAAI,OAAO,0BAA0B,KAAK;AACxC,WAAK,IAAI,QAAQ,IAAI,MAAM,OAAO,cAAc;AAAA,IAClD,WAAW,OAAO,OAAO,mBAAmB,aAAa;AACvD,WAAK,IAAI,QAAQ,IAAI,MAAM,oBAAI,IAAI,CAAC;AAAA,IACtC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsCO,QAAQ,OACb,YACA,QAC+B;AAC/B,UAAM,MAAM,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAC9C,QAAI,YAAiB;AACrB,QAAI;AACF,YAAM,MAAoC,CAAC,KAAK,QAAQ,EAAE,MAAM,KAAK,KAAK,KAAK,KAAK,IAAI,CAAC;AACzF,UAAI,KAAK,UAAU,GAAG;AACpB,YAAI;AAAA,UACF,IAAI,QAAQ,CAAC,YAAY;AACvB,wBAAY,WAAW,MAAM;AAC3B,sBAAQ;AAAA,gBACN,SAAS;AAAA,gBACT,OAAO;AAAA,gBACP,WAAW;AAAA,gBACX,OAAO;AAAA,gBACP,SAAS,QAAQ,QAAQ;AAAA,cAC3B,CAAC;AAAA,YACH,GAAG,KAAK,OAAO;AAAA,UACjB,CAAC;AAAA,QACH;AAAA,MACF;AAEA,YAAM,MAAM,MAAM,QAAQ,KAAK,GAAG;AAClC,UAAI,KAAK,WAAW;AAClB,YAAI;AACF,gBAAM,MAAM,MAAM,KAAK,UAAU,WAAW,GAAG,IAAI;AACnD,gBAAM,aAAa,KAAK,UACrB,OAAO;AAAA,YACN;AAAA,YACA,MAAM,KAAK,IAAI;AAAA,YACf,SAAS,IAAI;AAAA,YACb,GAAG;AAAA,UACL,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,oBAAQ,KAAK,8BAA8B,GAAG;AAAA,UAChD,CAAC;AACH,cAAI,UAAU,QAAQ,IAAI,CAAC,IAAI,SAAS,UAAU,CAAC;AAAA,QACrD,SAAS,KAAK;AACZ,kBAAQ,KAAK,8BAA8B,GAAG;AAAA,QAChD;AAAA,MACF;AACA,aAAO;AAAA,IACT,UAAE;AACA,UAAI,WAAW;AACb,qBAAa,SAAS;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBO,kBAAkB,OAOvB,YAKA,YAC+B;AAC/B,QAAI,WAAW,GAAG;AAChB,YAAM,IAAI,MAAM,0BAA0B;AAAA,IAC5C;AACA,QAAI;AAEJ,UAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,WAAO,MAAM;AACX,YAAM,MAAM,KAAK,MAAM,UAAU;AACjC,UAAI,IAAI,SAAS;AACf;AAAA,MACF;AACA,UAAI,IAAI,UAAU,GAAG;AACnB,cAAM,IAAI,MAAM,wBAAwB;AAAA,MAC1C;AAEA,YAAM,OAAO,KAAK,IAAI,IAAI,OAAO,QAAQ,IAAI,KAAK,IAAI;AACtD,YAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,IAAI,CAAC;AAE5C,UAAI,KAAK,IAAI,IAAI,UAAU;AACzB;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEO,kBAAkB,OAAO,eAAuB;AACrD,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAClD,UAAM,KAAK,QAAQ,EAAE,YAAY,KAAK,KAAK,OAAO;AAAA,EACpD;AAAA,EAEO,eAAe,OAAO,eAAwC;AACnE,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAElD,WAAO,MAAM,KAAK,QAAQ,EAAE,aAAa,KAAK,KAAK,OAAO;AAAA,EAC5D;AACF;;;AC/PA,SAAS,WAAmB;AAC1B,MAAI,SAAS;AACb,QAAM,aAAa;AACnB,QAAM,mBAAmB,WAAW;AACpC,WAAS,IAAI,GAAG,IAAI,IAAI,KAAK;AAC3B,cAAU,WAAW,OAAO,KAAK,MAAM,KAAK,OAAO,IAAI,gBAAgB,CAAC;AAAA,EAC1E;AACA,SAAO;AACT;AAwEO,IAAM,uBAAN,cAAmC,UAA8B;AAAA;AAAA;AAAA;AAAA,EAItE,YAAY,QAAoC;AAC9C,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,QACd,OAAO,OAAO,iBAAiB,IAAI,MAAM,OAAO,cAAc,IAAI;AAAA,MACpE;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC+B;AAC/B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AACtE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAC,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,YAAY,SAAS;AAC3B,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,GAAG;AAAA,YACJ,CAAC,WAAW,gBAAgB,WAAW;AAAA,UACzC;AAAA,QACF,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,gBAAM,SAAS,MAAM;AAAA,YACnB,IAAI;AAAA,cACF,cACG,QAAQ,CAAC,MAAM,CAAC,EAChB,OAAO,CAAC,KAAe,MAAM,UAAU;AACtC,oBAAI,QAAQ,MAAM,GAAG;AACnB,sBAAI,KAAK,IAAI;AAAA,gBACf;AACA,uBAAO;AAAA,cACT,GAAG,CAAC,CAAC;AAAA,YACT,EAAE,OAAO;AAAA,UACX;AAEA,qBAAW,MAAM,KAAK;AACpB,kBAAM,gBAAgB,MAAM,GAAG,SAAS;AAAA,cACtC,CAAC,WAAmB,WAAW,UAAU;AACvC,oBAAI,cAAc;AAClB,oBAAI,QAAQ,GAAG;AACb,gCAAc,OAAO,SAAS,SAAS;AAAA,gBACzC;AAEA,uBAAO,YAAY;AAAA,cACrB;AAAA,cACA;AAAA,YACF;AAEA,kBAAM,SAAS,MAAM,GAAG,SAAS,OAAO,CAAC,KAAe,WAAW,UAAU;AAC3E,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAKL,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,OAAO,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAItD,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWC,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,KAAK,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YACvD;AAAA,UACF;AAAA,QACF;AAMA,cAAM,UAAU,YAAY;AAC5B,cAAM,SAAS,SAAS,KAAK;AAE7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM,KAAK,kCAAkC,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC;AAAA,QACrE,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AACjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC+B;AAC/B,UAAM,aAAa,GAAG,MAAM;AAE5B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AActE,cAAM,YAAY,SAAS;AAC3B,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AACzD,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,QAAQ,KAAK,gBAAgB,WAAW,WAAW;AAAA;AAAA,UAEtD;AAAA,QACF,EAAE;AAEF,cAAM,sBAAuB,MAAM,iBAAkB;AACrD,cAAM,CAAC,SAAS,UAAU,OAAO,IAAI,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEhF,cAAM,qBAAqB,SAAS,OAAO,CAAC,WAAmB,WAAW,UAAU;AAClF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,oBAAoB,QAAQ,OAAO,CAAC,WAAmB,WAAW,UAAU;AAChF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,sBAAsB,sBAAsB,IAAI;AAEtD,cAAM,aAAa,sBAAsB;AAEzC,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,MAAM,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AACvD,gBAAM,gBAAgB,IACnB,QAAQ,CAAC,CAACC,QAAO,MAAMA,QAAO,EAC9B,OAAO,CAAC,eAAyB,MAAM,UAAU;AAChD,gBAAI,QAAQ,MAAM,GAAG;AACnB,4BAAc,KAAK,IAAI;AAAA,YACzB;AACA,mBAAO;AAAA,UACT,GAAG,CAAC,CAAC;AAEP,qBAAW,MAAM,KAAK;AACpB,kBAAM,CAAC,UAAUC,WAAU,QAAQ,IAAI,MAAM,GAAG;AAChD,kBAAM,QAAQA,UAAS,OAAO,CAAC,KAAe,WAAW,UAAU;AACjE,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAEL,kBAAM,eAAeA,UAAS,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC5E,kBAAI,cAAc;AAClB,kBAAI,QAAQ,GAAG;AACb,8BAAc,OAAO,SAAS,SAAS;AAAA,cACzC;AAEA,qBAAO,YAAY;AAAA,YACrB,GAAG,CAAC;AAKJ,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,cAAc,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAI7D,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWF,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,YAAY,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YAC9D;AAAA,UACF;AAAA,QACF;AAGA,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL,SAAS,QAAQ,OAAO;AAAA,UACxB,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,SAAS;AAAA,UAChC;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,KAAK,UAAU;AAAA;AAAA,UAElB;AAAA,QACF,EAAE;AAEF,cAAM,aAAa,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAC9D,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;;;AC9dO,IAAMG,0BAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAe/B,IAAMC,oCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAWzC,IAAMC,4BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiCjC,IAAMC,sCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAuB3C,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAwC/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAazC,IAAM,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAerC,IAAM,wCAAwC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D9C,IAAM,kBAAN,cAA8B,UAAyB;AAAA;AAAA;AAAA;AAAA,EAK5D,YAAY,QAA+B;AACzC,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,MAChB;AAAA,MACA,gBAAgB,OAAO;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAChC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAC,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7CC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AAEA,cAAM,UAAU,yBAAyB;AAEzC,cAAM,kBAAkB,KAAK,IAAI,GAAG,SAAS,qBAAqB;AAElE,cAAM,SAAS,SAAS,KAAK;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW;AAAA,UACX;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC0B;AAC1B,UAAM,aAAa,GAAG,MAAM;AAC5B,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAF,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvCG;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,QAAQ,KAAK,YAAY,WAAW;AAAA,QACvC;AAEA,cAAM,UAAU,mBAAmB;AAEnC,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,eAAe;AAAA,UACtC;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,MAAM,KAAK,IAAI;AACrB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,KAAK,UAAU;AAAA,QAClB;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,OAAO,YAML,YAIA,UAMA,WAC0B;AAC1B,UAAM,mBAAmB,GAAG,QAAQ;AACpC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAJ,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,CAAC,WAAW,KAAK,IAAK,MAAM,IAAI,MAAM;AAAA,UAC1C;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,WAAW,kBAAkB,YAAY,KAAK,WAAW;AAAA,QAC5D;AAEA,cAAM,UAAU,aAAa;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvC;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,SAAS;AAAA,QACZ;AACA,eAAO;AAAA,MACT;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU;AAEhB,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,OAAO,kBAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,SAAS,SAAS,KAAK;AAC7B,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,0BAA0B,IAAI,MAAM,KAAK,GAAG;AAClD,gBAAM,UAAU,0BAA0B;AAE1C,gBAAM,UAAU,UACZ,IAAI,MACH,KAAK,8BAA8B,CAAC,GAAG,GAAG,CAAC,gBAAgB,WAAW,CAAC,EACvE,KAAK,CAAC,MAAM;AACX,gBAAI,MAAO,IAAI,KAAK,CAAW;AAAA,UACjC,CAAC,IACD,QAAQ,QAAQ;AAEpB,iBAAO;AAAA,YACL;AAAA,YACA,OAAO;AAAA,YACP,WAAW,SAAS;AAAA,YACpB;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAEA,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7C;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AACA,YAAI,MAAM,IAAI,KAAK,qBAAqB;AACxC,cAAM,YAAY,SAAS;AAE3B,eAAO;AAAA,UACL,SAAS,aAAa;AAAA,UACtB,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AAEA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,mBAAmB,IAAI,MAAM,IAAI,GAAG,KAAK;AAC/C,iBAAO,KAAK,IAAI,GAAG,SAAS,gBAAgB;AAAA,QAC9C;AAEA,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AACA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,YAAI,MAAM,MAAM;AAChB,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AACF;","names":["CoreAnalytics","reset","requestId","current","previous","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript","reset","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript"]}
1
+ {"version":3,"sources":["../src/index.ts","../src/analytics.ts","../src/cache.ts","../src/duration.ts","../src/lua-scripts/multi.ts","../src/lua-scripts/reset.ts","../src/ratelimit.ts","../src/multi.ts","../src/lua-scripts/single.ts","../src/single.ts"],"sourcesContent":["import { Analytics } from \"./analytics\";\nimport type { AnalyticsConfig } from \"./analytics\";\nimport { MultiRegionRatelimit } from \"./multi\";\nimport type { MultiRegionRatelimitConfig } from \"./multi\";\nimport { RegionRatelimit as Ratelimit } from \"./single\";\nimport type { RegionRatelimitConfig as RatelimitConfig } from \"./single\";\nimport type { Algorithm } from \"./types\";\n\nexport {\n Ratelimit,\n type RatelimitConfig,\n MultiRegionRatelimit,\n type MultiRegionRatelimitConfig,\n type Algorithm,\n Analytics,\n type AnalyticsConfig,\n};\n","import { Analytics as CoreAnalytics, Aggregate } from \"@upstash/core-analytics\";\nimport type { Redis } from \"./types\";\n\nexport type Geo = {\n country?: string;\n city?: string;\n region?: string;\n ip?: string;\n};\nexport type Event = Geo & {\n identifier: string;\n time: number;\n success: boolean;\n};\n\nexport type AnalyticsConfig = {\n redis: Redis;\n prefix?: string;\n};\n\n/**\n * The Analytics package is experimental and can change at any time.\n */\nexport class Analytics {\n private readonly analytics: CoreAnalytics;\n private readonly table = \"events\";\n\n constructor(config: AnalyticsConfig) {\n this.analytics = new CoreAnalytics({\n // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk\n redis: config.redis,\n window: \"1h\",\n prefix: config.prefix ?? \"@upstash/ratelimit\",\n retention: \"90d\",\n });\n }\n\n /**\n * Try to extract the geo information from the request\n *\n * This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties\n * @param req\n * @returns\n */\n public extractGeo(req: { geo?: Geo; cf?: Geo }): Geo {\n if (typeof req.geo !== \"undefined\") {\n return req.geo;\n }\n if (typeof req.cf !== \"undefined\") {\n return req.cf;\n }\n\n return {};\n }\n\n public async record(event: Event): Promise<void> {\n await this.analytics.ingest(this.table, event);\n }\n\n public async series<TFilter extends keyof Omit<Event, \"time\">>(\n filter: TFilter,\n cutoff: number,\n ): Promise<Aggregate[]> {\n const timestampCount = Math.min(\n (\n this.analytics.getBucket(Date.now())\n - this.analytics.getBucket(cutoff)\n ) / (60 * 60 * 1000),\n 256\n )\n return this.analytics.aggregateBucketsWithPipeline(this.table, filter, timestampCount)\n }\n\n public async getUsage(cutoff = 0): Promise<Record<string, { success: number; blocked: number }>> {\n \n const timestampCount = Math.min(\n (\n this.analytics.getBucket(Date.now())\n - this.analytics.getBucket(cutoff)\n ) / (60 * 60 * 1000),\n 256\n )\n const records = await this.analytics.getAllowedBlocked(this.table, timestampCount)\n return records;\n }\n\n public async getUsageOverTime<TFilter extends keyof Omit<Event, \"time\">>(\n timestampCount: number, groupby: TFilter\n ): Promise<Aggregate[]> {\n const result = await this.analytics.aggregateBucketsWithPipeline(this.table, groupby, timestampCount)\n return result\n }\n\n public async getMostAllowedBlocked(timestampCount: number, getTop?: number) {\n getTop = getTop ?? 5\n return this.analytics.getMostAllowedBlocked(this.table, timestampCount, getTop)\n }\n}\n","import type { EphemeralCache } from \"./types\";\n\nexport class Cache implements EphemeralCache {\n /**\n * Stores identifier -> reset (in milliseconds)\n */\n private readonly cache: Map<string, number>;\n\n constructor(cache: Map<string, number>) {\n this.cache = cache;\n }\n\n public isBlocked(identifier: string): { blocked: boolean; reset: number } {\n if (!this.cache.has(identifier)) {\n return { blocked: false, reset: 0 };\n }\n const reset = this.cache.get(identifier)!;\n if (reset < Date.now()) {\n this.cache.delete(identifier);\n return { blocked: false, reset: 0 };\n }\n\n return { blocked: true, reset: reset };\n }\n\n public blockUntil(identifier: string, reset: number): void {\n this.cache.set(identifier, reset);\n }\n\n public set(key: string, value: number): void {\n this.cache.set(key, value);\n }\n public get(key: string): number | null {\n return this.cache.get(key) || null;\n }\n\n public incr(key: string): number {\n let value = this.cache.get(key) ?? 0;\n value += 1;\n this.cache.set(key, value);\n return value;\n }\n\n public pop(key: string): void {\n this.cache.delete(key)\n }\n\n public empty(): void {\n this.cache.clear()\n }\n}\n","type Unit = \"ms\" | \"s\" | \"m\" | \"h\" | \"d\";\nexport type Duration = `${number} ${Unit}` | `${number}${Unit}`;\n\n/**\n * Convert a human readable duration to milliseconds\n */\nexport function ms(d: Duration): number {\n const match = d.match(/^(\\d+)\\s?(ms|s|m|h|d)$/);\n if (!match) {\n throw new Error(`Unable to parse window size: ${d}`);\n }\n const time = Number.parseInt(match[1]);\n const unit = match[2] as Unit;\n\n switch (unit) {\n case \"ms\":\n return time;\n case \"s\":\n return time * 1000;\n case \"m\":\n return time * 1000 * 60;\n case \"h\":\n return time * 1000 * 60 * 60;\n case \"d\":\n return time * 1000 * 60 * 60 * 24;\n\n default:\n throw new Error(`Unable to parse window size: ${d}`);\n }\n}\n","export const fixedWindowLimitScript = `\n\tlocal key = KEYS[1]\n\tlocal id = ARGV[1]\n\tlocal window = ARGV[2]\n\tlocal incrementBy = tonumber(ARGV[3])\n\n\tredis.call(\"HSET\", key, id, incrementBy)\n\tlocal fields = redis.call(\"HGETALL\", key)\n\tif #fields == 2 and tonumber(fields[2])==incrementBy then\n\t-- The first time this key is set, and the value will be equal to incrementBy.\n\t-- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", key, window)\n\tend\n\n\treturn fields\n`;\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local fields = redis.call(\"HGETALL\", key)\n\n return fields\n `;\n\nexport const slidingWindowLimitScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal tokens = tonumber(ARGV[1]) -- tokens per window\n\tlocal now = ARGV[2] -- current timestamp in milliseconds\n\tlocal window = ARGV[3] -- interval in milliseconds\n\tlocal requestId = ARGV[4] -- uuid for this request\n\tlocal incrementBy = tonumber(ARGV[5]) -- custom rate, default is 1\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n\tif requestsInPreviousWindow * (1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then\n\t return {currentFields, previousFields, false}\n\tend\n\n\tredis.call(\"HSET\", currentKey, requestId, incrementBy)\n\n\tif requestsInCurrentWindow == 0 then \n\t -- The first time this key is set, the value will be equal to incrementBy.\n\t -- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n\tend\n\treturn {currentFields, previousFields, true}\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal now \t= ARGV[1] -- current timestamp in milliseconds\n \tlocal window \t= ARGV[2] -- interval in milliseconds\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n \trequestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\t\n\treturn requestsInCurrentWindow + requestsInPreviousWindow\n`;\n","export const resetScript = `\n local pattern = KEYS[1]\n\n -- Initialize cursor to start from 0\n local cursor = \"0\"\n\n repeat\n -- Scan for keys matching the pattern\n local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern)\n\n -- Extract cursor for the next iteration\n cursor = scan_result[1]\n\n -- Extract keys from the scan result\n local keys = scan_result[2]\n\n for i=1, #keys do\n redis.call('DEL', keys[i])\n end\n\n -- Continue scanning until cursor is 0 (end of keyspace)\n until cursor == \"0\"\n `;\n","import { Analytics, type Geo } from \"./analytics\";\nimport { Cache } from \"./cache\";\nimport type { Algorithm, Context, RatelimitResponse } from \"./types\";\n\nexport class TimeoutError extends Error {\n constructor() {\n super(\"Timeout\");\n this.name = \"TimeoutError\";\n }\n}\nexport type RatelimitConfig<TContext> = {\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n\n limiter: Algorithm<TContext>;\n\n ctx: TContext;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n *\n * @default 5000\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * ),\n * })\n *\n * ```\n */\nexport abstract class Ratelimit<TContext extends Context> {\n protected readonly limiter: Algorithm<TContext>;\n\n protected readonly ctx: TContext;\n\n protected readonly prefix: string;\n\n protected readonly timeout: number;\n\n protected readonly analytics?: Analytics;\n\n constructor(config: RatelimitConfig<TContext>) {\n this.ctx = config.ctx;\n this.limiter = config.limiter;\n this.timeout = config.timeout ?? 5000;\n this.prefix = config.prefix ?? \"@upstash/ratelimit\";\n this.analytics = config.analytics\n ? new Analytics({\n redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,\n prefix: this.prefix,\n })\n : undefined;\n\n if (config.ephemeralCache instanceof Map) {\n this.ctx.cache = new Cache(config.ephemeralCache);\n } else if (typeof config.ephemeralCache === \"undefined\") {\n this.ctx.cache = new Cache(new Map());\n }\n }\n\n /**\n * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.\n *\n * Use this if you want to reject all requests that you can not handle right now.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n *\n * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.\n *\n * Usage with `req.rate`\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(100, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id, {rate: 10})\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public limit = async (\n identifier: string,\n req?: { geo?: Geo; rate?: number },\n ): Promise<RatelimitResponse> => {\n const key = [this.prefix, identifier].join(\":\");\n let timeoutId: any = null;\n try {\n const arr: Promise<RatelimitResponse>[] = [this.limiter().limit(this.ctx, key, req?.rate)];\n if (this.timeout > 0) {\n arr.push(\n new Promise((resolve) => {\n timeoutId = setTimeout(() => {\n resolve({\n success: true,\n limit: 0,\n remaining: 0,\n reset: 0,\n pending: Promise.resolve(),\n });\n }, this.timeout);\n }),\n );\n }\n\n const res = await Promise.race(arr);\n if (this.analytics) {\n try {\n const geo = req ? this.analytics.extractGeo(req) : undefined;\n const analyticsP = this.analytics\n .record({\n identifier,\n time: Date.now(),\n success: res.success,\n ...geo,\n })\n .catch((err) => {\n console.warn(\"Failed to record analytics\", err);\n });\n res.pending = Promise.all([res.pending, analyticsP]);\n } catch (err) {\n console.warn(\"Failed to record analytics\", err);\n }\n }\n return res;\n } finally {\n if (timeoutId) {\n clearTimeout(timeoutId);\n }\n }\n };\n\n /**\n * Block until the request may pass or timeout is reached.\n *\n * This method returns a promise that resolves as soon as the request may be processed\n * or after the timeout has been reached.\n *\n * Use this if you want to delay the request until it is ready to get processed.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.blockUntilReady(id, 60_000)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public blockUntilReady = async (\n /**\n * An identifier per user or api.\n * Choose a userID, or api token, or ip address.\n *\n * If you want to limit your api across all users, you can set a constant string.\n */\n identifier: string,\n /**\n * Maximum duration to wait in milliseconds.\n * After this time the request will be denied.\n */\n timeout: number,\n ): Promise<RatelimitResponse> => {\n if (timeout <= 0) {\n throw new Error(\"timeout must be positive\");\n }\n let res: RatelimitResponse;\n\n const deadline = Date.now() + timeout;\n while (true) {\n res = await this.limit(identifier);\n if (res.success) {\n break;\n }\n if (res.reset === 0) {\n throw new Error(\"This should not happen\");\n }\n\n const wait = Math.min(res.reset, deadline) - Date.now();\n await new Promise((r) => setTimeout(r, wait));\n\n if (Date.now() > deadline) {\n break;\n }\n }\n return res!;\n };\n\n public resetUsedTokens = async (identifier: string) => {\n const pattern = [this.prefix, identifier].join(\":\");\n await this.limiter().resetTokens(this.ctx, pattern);\n };\n\n public getRemaining = async (identifier: string): Promise<number> => {\n const pattern = [this.prefix, identifier].join(\":\");\n\n return await this.limiter().getRemaining(this.ctx, pattern);\n };\n}\n","import { Cache } from \"./cache\";\nimport type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport {\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n} from \"./lua-scripts/multi\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, MultiRegionContext } from \"./types\";\n\nimport type { Redis } from \"./types\";\n\nfunction randomId(): string {\n let result = \"\";\n const characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\";\n const charactersLength = characters.length;\n for (let i = 0; i < 16; i++) {\n result += characters.charAt(Math.floor(Math.random() * charactersLength));\n }\n return result;\n}\n\nexport type MultiRegionRatelimitConfig = {\n /**\n * Instances of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis[];\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - MultiRegionRatelimit.fixedWindow\n */\n limiter: Algorithm<MultiRegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new MultiRegionRatelimit({\n * redis: Redis.fromEnv(),\n * limiter: MultiRegionRatelimit.fixedWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.\n */\n constructor(config: MultiRegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : undefined,\n },\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const requestId = randomId();\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n fixedWindowLimitScript,\n [key],\n [requestId, windowDuration, incrementBy],\n ) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const remaining = tokens - usedTokens;\n\n /**\n * If the length between two databases does not match, we sync the two databases\n */\n async function sync() {\n const individualIDs = await Promise.all(dbs.map((s) => s.request));\n\n const allIDs = Array.from(\n new Set(\n individualIDs\n .flatMap((_) => _)\n .reduce((acc: string[], curr, index) => {\n if (index % 2 === 0) {\n acc.push(curr);\n }\n return acc;\n }, []),\n ).values(),\n );\n\n for (const db of dbs) {\n const usedDbTokens = (await db.request).reduce(\n (accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n },\n 0,\n );\n\n const dbIds = (await db.request).reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allIDs.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(key, { [requestId]: incrementBy });\n }\n }\n }\n\n /**\n * Do not await sync. This should not run in the critical path.\n */\n\n const success = remaining > 0;\n const reset = (bucket + 1) * windowDuration;\n\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining,\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(fixedWindowRemainingTokensScript, [key], [null]) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowSize = ms(window);\n\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n // if (ctx.cache) {\n // const { blocked, reset } = ctx.cache.isBlocked(identifier);\n // if (blocked) {\n // return {\n // success: false,\n // limit: tokens,\n // remaining: 0,\n // reset: reset,\n // pending: Promise.resolve(),\n // };\n // }\n // }\n\n const requestId = randomId();\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowDuration, requestId, incrementBy],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<[string[], string[], 1 | null]>,\n }));\n\n const percentageInCurrent = (now % windowDuration) / windowDuration;\n const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));\n\n // in the case of success, the new request is not included in the current array.\n // add it manually\n if (success) {\n current.push(requestId, incrementBy.toString())\n }\n\n const previousUsedTokens = previous.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const currentUsedTokens = current.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const previousPartialUsed = Math.ceil(previousUsedTokens * (1 - percentageInCurrent));\n\n const usedTokens = previousPartialUsed + currentUsedTokens;\n\n const remaining = tokens - usedTokens;\n\n /**\n * If a database differs from the consensus, we sync it\n */\n async function sync() {\n const res = await Promise.all(dbs.map((s) => s.request));\n\n const allCurrentIds = Array.from(\n new Set(\n res\n .flatMap(([current]) => current)\n .reduce((acc: string[], curr, index) => {\n if (index % 2 === 0) {\n acc.push(curr);\n }\n return acc;\n }, []),\n ).values(),\n );\n\n for (const db of dbs) {\n const [current, _previous, _success] = await db.request;\n const dbIds = current.reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n\n const usedDbTokens = current.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allCurrentIds.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(currentKey, { [requestId]: incrementBy });\n }\n }\n }\n\n // const success = remaining >= 0;\n const reset = (currentWindow + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success: Boolean(success),\n limit: tokens,\n remaining: Math.max(0, remaining),\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<number>,\n }));\n\n const usedTokens = await Promise.any(dbs.map((s) => s.request));\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n}\n","export const fixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n\n return r\n`;\n\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n `;\n\nexport const slidingWindowLimitScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local tokens = tonumber(ARGV[1]) -- tokens per window\n local now = ARGV[2] -- current timestamp in milliseconds\n local window = ARGV[3] -- interval in milliseconds\n local incrementBy = ARGV[4] -- increment rate per request at a given value, default is 1\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then\n return -1\n end\n\n local newValue = redis.call(\"INCRBY\", currentKey, incrementBy)\n if newValue == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n end\n return tokens - ( newValue + requestsInPreviousWindow )\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local now = ARGV[1] -- current timestamp in milliseconds\n local window = ARGV[2] -- interval in milliseconds\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\n return requestsInPreviousWindow + requestsInCurrentWindow\n`;\n\nexport const tokenBucketLimitScript = `\n local key = KEYS[1] -- identifier including prefixes\n local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens\n local interval = tonumber(ARGV[2]) -- size of the window in milliseconds\n local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval\n local now = tonumber(ARGV[4]) -- current timestamp in milliseconds\n local incrementBy = tonumber(ARGV[5]) -- how many tokens to consume, default is 1\n \n local bucket = redis.call(\"HMGET\", key, \"refilledAt\", \"tokens\")\n \n local refilledAt\n local tokens\n\n if bucket[1] == false then\n refilledAt = now\n tokens = maxTokens\n else\n refilledAt = tonumber(bucket[1])\n tokens = tonumber(bucket[2])\n end\n \n if now >= refilledAt + interval then\n local numRefills = math.floor((now - refilledAt) / interval)\n tokens = math.min(maxTokens, tokens + numRefills * refillRate)\n\n refilledAt = refilledAt + numRefills * interval\n end\n\n if tokens == 0 then\n return {-1, refilledAt + interval}\n end\n\n local remaining = tokens - incrementBy\n local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval\n \n redis.call(\"HSET\", key, \"refilledAt\", refilledAt, \"tokens\", remaining)\n redis.call(\"PEXPIRE\", key, expireAt)\n return {remaining, refilledAt + interval}\n`;\n\nexport const tokenBucketRemainingTokensScript = `\n local key = KEYS[1]\n local maxTokens = tonumber(ARGV[1])\n \n local bucket = redis.call(\"HMGET\", key, \"tokens\")\n\n if bucket[1] == false then\n return maxTokens\n end\n \n return tonumber(bucket[1])\n`;\n\nexport const cachedFixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == incrementBy then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n \n return r\n`;\n\nexport const cachedFixedWindowRemainingTokenScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n`;\n","import type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport {\n cachedFixedWindowLimitScript,\n cachedFixedWindowRemainingTokenScript,\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n tokenBucketLimitScript,\n tokenBucketRemainingTokensScript,\n} from \"./lua-scripts/single\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, RegionContext } from \"./types\";\nimport type { Redis } from \"./types\";\n\nexport type RegionRatelimitConfig = {\n /**\n * Instance of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis;\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n limiter: Algorithm<RegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * \"30 m\", // interval of 30 minutes\n * 10, // Allow 10 requests per window of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class RegionRatelimit extends Ratelimit<RegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.\n */\n\n constructor(config: RegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n },\n ephemeralCache: config.ephemeralCache,\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n fixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n\n const success = usedTokensAfterUpdate <= tokens;\n\n const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate);\n\n const reset = (bucket + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: tokens,\n remaining: remainingTokens,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n fixedWindowRemainingTokensScript,\n [key],\n [null],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowSize = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const remainingTokens = (await ctx.redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowSize, incrementBy],\n )) as number;\n\n const success = remainingTokens >= 0;\n\n const reset = (currentWindow + 1) * windowSize;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining: Math.max(0, remainingTokens),\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const now = Date.now();\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * You have a bucket filled with `{maxTokens}` tokens that refills constantly\n * at `{refillRate}` per `{interval}`.\n * Every request will remove one token from the bucket and if there is no\n * token to take, the request is rejected.\n *\n * **Pro:**\n *\n * - Bursts of requests are smoothed out and you can process them at a constant\n * rate.\n * - Allows to set a higher initial burst limit by setting `maxTokens` higher\n * than `refillRate`\n */\n static tokenBucket(\n /**\n * How many tokens are refilled per `interval`\n *\n * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.\n */\n refillRate: number,\n /**\n * The interval for the `refillRate`\n */\n interval: Duration,\n /**\n * Maximum number of tokens.\n * A newly created bucket starts with this many tokens.\n * Useful to allow higher burst limits.\n */\n maxTokens: number,\n ): Algorithm<RegionContext> {\n const intervalDuration = ms(interval);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: maxTokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const now = Date.now();\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const [remaining, reset] = (await ctx.redis.eval(\n tokenBucketLimitScript,\n [identifier],\n [maxTokens, intervalDuration, refillRate, now, incrementBy],\n )) as [number, number];\n\n const success = remaining >= 0;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: maxTokens,\n remaining,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const remainingTokens = (await ctx.redis.eval(\n tokenBucketRemainingTokensScript,\n [identifier],\n [maxTokens],\n )) as number;\n return remainingTokens;\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = identifier;\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n /**\n * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates\n * it asynchronously.\n * This is experimental and not yet recommended for production use.\n *\n * @experimental\n *\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static cachedFixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const reset = (bucket + 1) * windowDuration;\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedTokensAfterUpdate = ctx.cache.incr(key);\n const success = cachedTokensAfterUpdate < tokens;\n\n const pending = success\n ? ctx.redis\n .eval(cachedFixedWindowLimitScript, [key], [windowDuration, incrementBy])\n .then((t) => {\n ctx.cache!.set(key, t as number);\n })\n : Promise.resolve();\n\n return {\n success,\n limit: tokens,\n remaining: tokens - cachedTokensAfterUpdate,\n reset: reset,\n pending,\n };\n }\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n cachedFixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n ctx.cache.set(key, usedTokensAfterUpdate);\n const remaining = tokens - usedTokensAfterUpdate;\n\n return {\n success: remaining >= 0,\n limit: tokens,\n remaining,\n reset: reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedUsedTokens = ctx.cache.get(key) ?? 0;\n return Math.max(0, tokens - cachedUsedTokens);\n }\n\n const usedTokens = (await ctx.redis.eval(\n cachedFixedWindowRemainingTokenScript,\n [key],\n [null],\n )) as number;\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n // Empty the cache\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n ctx.cache.pop(identifier)\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,4BAAsD;AAuB/C,IAAM,YAAN,MAAgB;AAAA,EACJ;AAAA,EACA,QAAQ;AAAA,EAEzB,YAAY,QAAyB;AACnC,SAAK,YAAY,IAAI,sBAAAA,UAAc;AAAA;AAAA,MAEjC,OAAO,OAAO;AAAA,MACd,QAAQ;AAAA,MACR,QAAQ,OAAO,UAAU;AAAA,MACzB,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASO,WAAW,KAAmC;AACnD,QAAI,OAAO,IAAI,QAAQ,aAAa;AAClC,aAAO,IAAI;AAAA,IACb;AACA,QAAI,OAAO,IAAI,OAAO,aAAa;AACjC,aAAO,IAAI;AAAA,IACb;AAEA,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,MAAa,OAAO,OAA6B;AAC/C,UAAM,KAAK,UAAU,OAAO,KAAK,OAAO,KAAK;AAAA,EAC/C;AAAA,EAEA,MAAa,OACX,QACA,QACsB;AACtB,UAAM,iBAAiB,KAAK;AAAA,OAExB,KAAK,UAAU,UAAU,KAAK,IAAI,CAAC,IACjC,KAAK,UAAU,UAAU,MAAM,MAC9B,KAAK,KAAK;AAAA,MACf;AAAA,IACF;AACA,WAAO,KAAK,UAAU,6BAA6B,KAAK,OAAO,QAAQ,cAAc;AAAA,EACvF;AAAA,EAEA,MAAa,SAAS,SAAS,GAAkE;AAE/F,UAAM,iBAAiB,KAAK;AAAA,OAExB,KAAK,UAAU,UAAU,KAAK,IAAI,CAAC,IACjC,KAAK,UAAU,UAAU,MAAM,MAC9B,KAAK,KAAK;AAAA,MACf;AAAA,IACF;AACA,UAAM,UAAU,MAAM,KAAK,UAAU,kBAAkB,KAAK,OAAO,cAAc;AACjF,WAAO;AAAA,EACT;AAAA,EAEA,MAAa,iBACX,gBAAwB,SACF;AACtB,UAAM,SAAS,MAAM,KAAK,UAAU,6BAA6B,KAAK,OAAO,SAAS,cAAc;AACpG,WAAO;AAAA,EACT;AAAA,EAEA,MAAa,sBAAsB,gBAAwB,QAAiB;AAC1E,aAAS,UAAU;AACnB,WAAO,KAAK,UAAU,sBAAsB,KAAK,OAAO,gBAAgB,MAAM;AAAA,EAChF;AACF;;;AC/FO,IAAM,QAAN,MAAsC;AAAA;AAAA;AAAA;AAAA,EAI1B;AAAA,EAEjB,YAAY,OAA4B;AACtC,SAAK,QAAQ;AAAA,EACf;AAAA,EAEO,UAAU,YAAyD;AACxE,QAAI,CAAC,KAAK,MAAM,IAAI,UAAU,GAAG;AAC/B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AACA,UAAM,QAAQ,KAAK,MAAM,IAAI,UAAU;AACvC,QAAI,QAAQ,KAAK,IAAI,GAAG;AACtB,WAAK,MAAM,OAAO,UAAU;AAC5B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AAEA,WAAO,EAAE,SAAS,MAAM,MAAa;AAAA,EACvC;AAAA,EAEO,WAAW,YAAoB,OAAqB;AACzD,SAAK,MAAM,IAAI,YAAY,KAAK;AAAA,EAClC;AAAA,EAEO,IAAI,KAAa,OAAqB;AAC3C,SAAK,MAAM,IAAI,KAAK,KAAK;AAAA,EAC3B;AAAA,EACO,IAAI,KAA4B;AACrC,WAAO,KAAK,MAAM,IAAI,GAAG,KAAK;AAAA,EAChC;AAAA,EAEO,KAAK,KAAqB;AAC/B,QAAI,QAAQ,KAAK,MAAM,IAAI,GAAG,KAAK;AACnC,aAAS;AACT,SAAK,MAAM,IAAI,KAAK,KAAK;AACzB,WAAO;AAAA,EACT;AAAA,EAEO,IAAI,KAAmB;AAC5B,SAAK,MAAM,OAAO,GAAG;AAAA,EACvB;AAAA,EAEO,QAAc;AACnB,SAAK,MAAM,MAAM;AAAA,EACnB;AACF;;;AC5CO,SAAS,GAAG,GAAqB;AACtC,QAAM,QAAQ,EAAE,MAAM,wBAAwB;AAC9C,MAAI,CAAC,OAAO;AACV,UAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACrD;AACA,QAAM,OAAO,OAAO,SAAS,MAAM,CAAC,CAAC;AACrC,QAAM,OAAO,MAAM,CAAC;AAEpB,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO,OAAO;AAAA,IAChB,KAAK;AACH,aAAO,OAAO,MAAO;AAAA,IACvB,KAAK;AACH,aAAO,OAAO,MAAO,KAAK;AAAA,IAC5B,KAAK;AACH,aAAO,OAAO,MAAO,KAAK,KAAK;AAAA,IAEjC;AACE,YAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACvD;AACF;;;AC7BO,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgB/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AASzC,IAAM,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAoCjC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D3C,IAAM,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACkFpB,IAAe,YAAf,MAAmD;AAAA,EACrC;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEnB,YAAY,QAAmC;AAC7C,SAAK,MAAM,OAAO;AAClB,SAAK,UAAU,OAAO;AACtB,SAAK,UAAU,OAAO,WAAW;AACjC,SAAK,SAAS,OAAO,UAAU;AAC/B,SAAK,YAAY,OAAO,YACpB,IAAI,UAAU;AAAA,MACZ,OAAO,MAAM,QAAQ,KAAK,IAAI,KAAK,IAAI,KAAK,IAAI,MAAM,CAAC,IAAI,KAAK,IAAI;AAAA,MACpE,QAAQ,KAAK;AAAA,IACf,CAAC,IACD;AAEJ,QAAI,OAAO,0BAA0B,KAAK;AACxC,WAAK,IAAI,QAAQ,IAAI,MAAM,OAAO,cAAc;AAAA,IAClD,WAAW,OAAO,OAAO,mBAAmB,aAAa;AACvD,WAAK,IAAI,QAAQ,IAAI,MAAM,oBAAI,IAAI,CAAC;AAAA,IACtC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsCO,QAAQ,OACb,YACA,QAC+B;AAC/B,UAAM,MAAM,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAC9C,QAAI,YAAiB;AACrB,QAAI;AACF,YAAM,MAAoC,CAAC,KAAK,QAAQ,EAAE,MAAM,KAAK,KAAK,KAAK,KAAK,IAAI,CAAC;AACzF,UAAI,KAAK,UAAU,GAAG;AACpB,YAAI;AAAA,UACF,IAAI,QAAQ,CAAC,YAAY;AACvB,wBAAY,WAAW,MAAM;AAC3B,sBAAQ;AAAA,gBACN,SAAS;AAAA,gBACT,OAAO;AAAA,gBACP,WAAW;AAAA,gBACX,OAAO;AAAA,gBACP,SAAS,QAAQ,QAAQ;AAAA,cAC3B,CAAC;AAAA,YACH,GAAG,KAAK,OAAO;AAAA,UACjB,CAAC;AAAA,QACH;AAAA,MACF;AAEA,YAAM,MAAM,MAAM,QAAQ,KAAK,GAAG;AAClC,UAAI,KAAK,WAAW;AAClB,YAAI;AACF,gBAAM,MAAM,MAAM,KAAK,UAAU,WAAW,GAAG,IAAI;AACnD,gBAAM,aAAa,KAAK,UACrB,OAAO;AAAA,YACN;AAAA,YACA,MAAM,KAAK,IAAI;AAAA,YACf,SAAS,IAAI;AAAA,YACb,GAAG;AAAA,UACL,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,oBAAQ,KAAK,8BAA8B,GAAG;AAAA,UAChD,CAAC;AACH,cAAI,UAAU,QAAQ,IAAI,CAAC,IAAI,SAAS,UAAU,CAAC;AAAA,QACrD,SAAS,KAAK;AACZ,kBAAQ,KAAK,8BAA8B,GAAG;AAAA,QAChD;AAAA,MACF;AACA,aAAO;AAAA,IACT,UAAE;AACA,UAAI,WAAW;AACb,qBAAa,SAAS;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBO,kBAAkB,OAOvB,YAKA,YAC+B;AAC/B,QAAI,WAAW,GAAG;AAChB,YAAM,IAAI,MAAM,0BAA0B;AAAA,IAC5C;AACA,QAAI;AAEJ,UAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,WAAO,MAAM;AACX,YAAM,MAAM,KAAK,MAAM,UAAU;AACjC,UAAI,IAAI,SAAS;AACf;AAAA,MACF;AACA,UAAI,IAAI,UAAU,GAAG;AACnB,cAAM,IAAI,MAAM,wBAAwB;AAAA,MAC1C;AAEA,YAAM,OAAO,KAAK,IAAI,IAAI,OAAO,QAAQ,IAAI,KAAK,IAAI;AACtD,YAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,IAAI,CAAC;AAE5C,UAAI,KAAK,IAAI,IAAI,UAAU;AACzB;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEO,kBAAkB,OAAO,eAAuB;AACrD,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAClD,UAAM,KAAK,QAAQ,EAAE,YAAY,KAAK,KAAK,OAAO;AAAA,EACpD;AAAA,EAEO,eAAe,OAAO,eAAwC;AACnE,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAElD,WAAO,MAAM,KAAK,QAAQ,EAAE,aAAa,KAAK,KAAK,OAAO;AAAA,EAC5D;AACF;;;AC/PA,SAAS,WAAmB;AAC1B,MAAI,SAAS;AACb,QAAM,aAAa;AACnB,QAAM,mBAAmB,WAAW;AACpC,WAAS,IAAI,GAAG,IAAI,IAAI,KAAK;AAC3B,cAAU,WAAW,OAAO,KAAK,MAAM,KAAK,OAAO,IAAI,gBAAgB,CAAC;AAAA,EAC1E;AACA,SAAO;AACT;AAwEO,IAAM,uBAAN,cAAmC,UAA8B;AAAA;AAAA;AAAA;AAAA,EAItE,YAAY,QAAoC;AAC9C,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,QACd,OAAO,OAAO,iBAAiB,IAAI,MAAM,OAAO,cAAc,IAAI;AAAA,MACpE;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC+B;AAC/B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AACtE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAC,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,YAAY,SAAS;AAC3B,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,GAAG;AAAA,YACJ,CAAC,WAAW,gBAAgB,WAAW;AAAA,UACzC;AAAA,QACF,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,gBAAM,SAAS,MAAM;AAAA,YACnB,IAAI;AAAA,cACF,cACG,QAAQ,CAAC,MAAM,CAAC,EAChB,OAAO,CAAC,KAAe,MAAM,UAAU;AACtC,oBAAI,QAAQ,MAAM,GAAG;AACnB,sBAAI,KAAK,IAAI;AAAA,gBACf;AACA,uBAAO;AAAA,cACT,GAAG,CAAC,CAAC;AAAA,YACT,EAAE,OAAO;AAAA,UACX;AAEA,qBAAW,MAAM,KAAK;AACpB,kBAAM,gBAAgB,MAAM,GAAG,SAAS;AAAA,cACtC,CAAC,WAAmB,WAAW,UAAU;AACvC,oBAAI,cAAc;AAClB,oBAAI,QAAQ,GAAG;AACb,gCAAc,OAAO,SAAS,SAAS;AAAA,gBACzC;AAEA,uBAAO,YAAY;AAAA,cACrB;AAAA,cACA;AAAA,YACF;AAEA,kBAAM,SAAS,MAAM,GAAG,SAAS,OAAO,CAAC,KAAe,WAAW,UAAU;AAC3E,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAKL,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,OAAO,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAItD,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWC,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,KAAK,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YACvD;AAAA,UACF;AAAA,QACF;AAMA,cAAM,UAAU,YAAY;AAC5B,cAAM,SAAS,SAAS,KAAK;AAE7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM,KAAK,kCAAkC,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC;AAAA,QACrE,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AACjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC+B;AAC/B,UAAM,aAAa,GAAG,MAAM;AAE5B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AActE,cAAM,YAAY,SAAS;AAC3B,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AACzD,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,QAAQ,KAAK,gBAAgB,WAAW,WAAW;AAAA;AAAA,UAEtD;AAAA,QACF,EAAE;AAEF,cAAM,sBAAuB,MAAM,iBAAkB;AACrD,cAAM,CAAC,SAAS,UAAU,OAAO,IAAI,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAIhF,YAAI,SAAS;AACX,kBAAQ,KAAK,WAAW,YAAY,SAAS,CAAC;AAAA,QAChD;AAEA,cAAM,qBAAqB,SAAS,OAAO,CAAC,WAAmB,WAAW,UAAU;AAClF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,oBAAoB,QAAQ,OAAO,CAAC,WAAmB,WAAW,UAAU;AAChF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,sBAAsB,KAAK,KAAK,sBAAsB,IAAI,oBAAoB;AAEpF,cAAM,aAAa,sBAAsB;AAEzC,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,MAAM,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEvD,gBAAM,gBAAgB,MAAM;AAAA,YAC1B,IAAI;AAAA,cACF,IACG,QAAQ,CAAC,CAACC,QAAO,MAAMA,QAAO,EAC9B,OAAO,CAAC,KAAe,MAAM,UAAU;AACtC,oBAAI,QAAQ,MAAM,GAAG;AACnB,sBAAI,KAAK,IAAI;AAAA,gBACf;AACA,uBAAO;AAAA,cACT,GAAG,CAAC,CAAC;AAAA,YACT,EAAE,OAAO;AAAA,UACX;AAEA,qBAAW,MAAM,KAAK;AACpB,kBAAM,CAACA,UAAS,WAAW,QAAQ,IAAI,MAAM,GAAG;AAChD,kBAAM,QAAQA,SAAQ,OAAO,CAAC,KAAe,WAAW,UAAU;AAChE,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAEL,kBAAM,eAAeA,SAAQ,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC3E,kBAAI,cAAc;AAClB,kBAAI,QAAQ,GAAG;AACb,8BAAc,OAAO,SAAS,SAAS;AAAA,cACzC;AAEA,qBAAO,YAAY;AAAA,YACrB,GAAG,CAAC;AAKJ,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,cAAc,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAI7D,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWD,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,YAAY,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YAC9D;AAAA,UACF;AAAA,QACF;AAGA,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL,SAAS,QAAQ,OAAO;AAAA,UACxB,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,SAAS;AAAA,UAChC;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,KAAK,UAAU;AAAA;AAAA,UAElB;AAAA,QACF,EAAE;AAEF,cAAM,aAAa,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAC9D,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;;;AC/eO,IAAME,0BAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAe/B,IAAMC,oCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAWzC,IAAMC,4BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiCjC,IAAMC,sCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAuB3C,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAwC/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAazC,IAAM,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAerC,IAAM,wCAAwC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D9C,IAAM,kBAAN,cAA8B,UAAyB;AAAA;AAAA;AAAA;AAAA,EAK5D,YAAY,QAA+B;AACzC,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,MAChB;AAAA,MACA,gBAAgB,OAAO;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAChC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAC,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7CC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AAEA,cAAM,UAAU,yBAAyB;AAEzC,cAAM,kBAAkB,KAAK,IAAI,GAAG,SAAS,qBAAqB;AAElE,cAAM,SAAS,SAAS,KAAK;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW;AAAA,UACX;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC0B;AAC1B,UAAM,aAAa,GAAG,MAAM;AAC5B,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAF,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvCG;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,QAAQ,KAAK,YAAY,WAAW;AAAA,QACvC;AAEA,cAAM,UAAU,mBAAmB;AAEnC,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,eAAe;AAAA,UACtC;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,MAAM,KAAK,IAAI;AACrB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,KAAK,UAAU;AAAA,QAClB;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,OAAO,YAML,YAIA,UAMA,WAC0B;AAC1B,UAAM,mBAAmB,GAAG,QAAQ;AACpC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAJ,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,CAAC,WAAW,KAAK,IAAK,MAAM,IAAI,MAAM;AAAA,UAC1C;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,WAAW,kBAAkB,YAAY,KAAK,WAAW;AAAA,QAC5D;AAEA,cAAM,UAAU,aAAa;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvC;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,SAAS;AAAA,QACZ;AACA,eAAO;AAAA,MACT;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU;AAChB,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,OAAO,kBAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,SAAS,SAAS,KAAK;AAC7B,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,0BAA0B,IAAI,MAAM,KAAK,GAAG;AAClD,gBAAM,UAAU,0BAA0B;AAE1C,gBAAM,UAAU,UACZ,IAAI,MACH,KAAK,8BAA8B,CAAC,GAAG,GAAG,CAAC,gBAAgB,WAAW,CAAC,EACvE,KAAK,CAAC,MAAM;AACX,gBAAI,MAAO,IAAI,KAAK,CAAW;AAAA,UACjC,CAAC,IACD,QAAQ,QAAQ;AAEpB,iBAAO;AAAA,YACL;AAAA,YACA,OAAO;AAAA,YACP,WAAW,SAAS;AAAA,YACpB;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAEA,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7C;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AACA,YAAI,MAAM,IAAI,KAAK,qBAAqB;AACxC,cAAM,YAAY,SAAS;AAE3B,eAAO;AAAA,UACL,SAAS,aAAa;AAAA,UACtB,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AAEA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,mBAAmB,IAAI,MAAM,IAAI,GAAG,KAAK;AAC/C,iBAAO,KAAK,IAAI,GAAG,SAAS,gBAAgB;AAAA,QAC9C;AAEA,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AACA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,YAAI,MAAM,IAAI,UAAU;AACxB,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AACF;","names":["CoreAnalytics","reset","requestId","current","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript","reset","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript"]}
package/dist/index.mjs CHANGED
@@ -32,30 +32,27 @@ var Analytics = class {
32
32
  await this.analytics.ingest(this.table, event);
33
33
  }
34
34
  async series(filter, cutoff) {
35
- const records = await this.analytics.query(this.table, {
36
- filter: [filter],
37
- range: [cutoff, Date.now()]
38
- });
39
- return records;
35
+ const timestampCount = Math.min(
36
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
37
+ 256
38
+ );
39
+ return this.analytics.aggregateBucketsWithPipeline(this.table, filter, timestampCount);
40
40
  }
41
41
  async getUsage(cutoff = 0) {
42
- const records = await this.analytics.aggregateBy(this.table, "identifier", {
43
- range: [cutoff, Date.now()]
44
- });
45
- const usage = {};
46
- for (const bucket of records) {
47
- for (const [k, v] of Object.entries(bucket)) {
48
- if (k === "time") {
49
- continue;
50
- }
51
- if (!usage[k]) {
52
- usage[k] = { success: 0, blocked: 0 };
53
- }
54
- usage[k].success += v.true ?? 0;
55
- usage[k].blocked += v.false ?? 0;
56
- }
57
- }
58
- return usage;
42
+ const timestampCount = Math.min(
43
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
44
+ 256
45
+ );
46
+ const records = await this.analytics.getAllowedBlocked(this.table, timestampCount);
47
+ return records;
48
+ }
49
+ async getUsageOverTime(timestampCount, groupby) {
50
+ const result = await this.analytics.aggregateBucketsWithPipeline(this.table, groupby, timestampCount);
51
+ return result;
52
+ }
53
+ async getMostAllowedBlocked(timestampCount, getTop) {
54
+ getTop = getTop ?? 5;
55
+ return this.analytics.getMostAllowedBlocked(this.table, timestampCount, getTop);
59
56
  }
60
57
  };
61
58
 
@@ -94,6 +91,9 @@ var Cache = class {
94
91
  this.cache.set(key, value);
95
92
  return value;
96
93
  }
94
+ pop(key) {
95
+ this.cache.delete(key);
96
+ }
97
97
  empty() {
98
98
  this.cache.clear();
99
99
  }
@@ -132,7 +132,7 @@ var fixedWindowLimitScript = `
132
132
 
133
133
  redis.call("HSET", key, id, incrementBy)
134
134
  local fields = redis.call("HGETALL", key)
135
- if #fields == 1 and tonumber(fields[1])==incrementBy then
135
+ if #fields == 2 and tonumber(fields[2])==incrementBy then
136
136
  -- The first time this key is set, and the value will be equal to incrementBy.
137
137
  -- So we only need the expire command once
138
138
  redis.call("PEXPIRE", key, window)
@@ -542,6 +542,9 @@ var MultiRegionRatelimit = class extends Ratelimit {
542
542
  },
543
543
  async resetTokens(ctx, identifier) {
544
544
  const pattern = [identifier, "*"].join(":");
545
+ if (ctx.cache) {
546
+ ctx.cache.pop(identifier);
547
+ }
545
548
  for (const db of ctx.redis) {
546
549
  await db.eval(resetScript, [pattern], [null]);
547
550
  }
@@ -587,6 +590,9 @@ var MultiRegionRatelimit = class extends Ratelimit {
587
590
  }));
588
591
  const percentageInCurrent = now % windowDuration / windowDuration;
589
592
  const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));
593
+ if (success) {
594
+ current.push(requestId, incrementBy.toString());
595
+ }
590
596
  const previousUsedTokens = previous.reduce((accTokens, usedToken, index) => {
591
597
  let parsedToken = 0;
592
598
  if (index % 2) {
@@ -601,26 +607,30 @@ var MultiRegionRatelimit = class extends Ratelimit {
601
607
  }
602
608
  return accTokens + parsedToken;
603
609
  }, 0);
604
- const previousPartialUsed = previousUsedTokens * (1 - percentageInCurrent);
610
+ const previousPartialUsed = Math.ceil(previousUsedTokens * (1 - percentageInCurrent));
605
611
  const usedTokens = previousPartialUsed + currentUsedTokens;
606
612
  const remaining = tokens - usedTokens;
607
613
  async function sync() {
608
614
  const res = await Promise.all(dbs.map((s) => s.request));
609
- const allCurrentIds = res.flatMap(([current2]) => current2).reduce((accCurrentIds, curr, index) => {
610
- if (index % 2 === 0) {
611
- accCurrentIds.push(curr);
612
- }
613
- return accCurrentIds;
614
- }, []);
615
+ const allCurrentIds = Array.from(
616
+ new Set(
617
+ res.flatMap(([current2]) => current2).reduce((acc, curr, index) => {
618
+ if (index % 2 === 0) {
619
+ acc.push(curr);
620
+ }
621
+ return acc;
622
+ }, [])
623
+ ).values()
624
+ );
615
625
  for (const db of dbs) {
616
- const [_current, previous2, _success] = await db.request;
617
- const dbIds = previous2.reduce((ids, currentId, index) => {
626
+ const [current2, _previous, _success] = await db.request;
627
+ const dbIds = current2.reduce((ids, currentId, index) => {
618
628
  if (index % 2 === 0) {
619
629
  ids.push(currentId);
620
630
  }
621
631
  return ids;
622
632
  }, []);
623
- const usedDbTokens = previous2.reduce((accTokens, usedToken, index) => {
633
+ const usedDbTokens = current2.reduce((accTokens, usedToken, index) => {
624
634
  let parsedToken = 0;
625
635
  if (index % 2) {
626
636
  parsedToken = Number.parseInt(usedToken);
@@ -671,6 +681,9 @@ var MultiRegionRatelimit = class extends Ratelimit {
671
681
  },
672
682
  async resetTokens(ctx, identifier) {
673
683
  const pattern = [identifier, "*"].join(":");
684
+ if (ctx.cache) {
685
+ ctx.cache.pop(identifier);
686
+ }
674
687
  for (const db of ctx.redis) {
675
688
  await db.eval(resetScript, [pattern], [null]);
676
689
  }
@@ -919,6 +932,9 @@ var RegionRatelimit = class extends Ratelimit {
919
932
  },
920
933
  async resetTokens(ctx, identifier) {
921
934
  const pattern = [identifier, "*"].join(":");
935
+ if (ctx.cache) {
936
+ ctx.cache.pop(identifier);
937
+ }
922
938
  await ctx.redis.eval(resetScript, [pattern], [null]);
923
939
  }
924
940
  });
@@ -994,6 +1010,9 @@ var RegionRatelimit = class extends Ratelimit {
994
1010
  },
995
1011
  async resetTokens(ctx, identifier) {
996
1012
  const pattern = [identifier, "*"].join(":");
1013
+ if (ctx.cache) {
1014
+ ctx.cache.pop(identifier);
1015
+ }
997
1016
  await ctx.redis.eval(resetScript, [pattern], [null]);
998
1017
  }
999
1018
  });
@@ -1056,6 +1075,9 @@ var RegionRatelimit = class extends Ratelimit {
1056
1075
  },
1057
1076
  async resetTokens(ctx, identifier) {
1058
1077
  const pattern = identifier;
1078
+ if (ctx.cache) {
1079
+ ctx.cache.pop(identifier);
1080
+ }
1059
1081
  await ctx.redis.eval(resetScript, [pattern], [null]);
1060
1082
  }
1061
1083
  });
@@ -1148,7 +1170,7 @@ var RegionRatelimit = class extends Ratelimit {
1148
1170
  if (!ctx.cache) {
1149
1171
  throw new Error("This algorithm requires a cache");
1150
1172
  }
1151
- ctx.cache.empty();
1173
+ ctx.cache.pop(identifier);
1152
1174
  await ctx.redis.eval(resetScript, [pattern], [null]);
1153
1175
  }
1154
1176
  });
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/analytics.ts","../src/cache.ts","../src/duration.ts","../src/lua-scripts/multi.ts","../src/lua-scripts/reset.ts","../src/ratelimit.ts","../src/multi.ts","../src/lua-scripts/single.ts","../src/single.ts"],"sourcesContent":["import { Analytics as CoreAnalytics } from \"@upstash/core-analytics\";\nimport type { Redis } from \"./types\";\n\nexport type Geo = {\n country?: string;\n city?: string;\n region?: string;\n ip?: string;\n};\nexport type Event = Geo & {\n identifier: string;\n time: number;\n success: boolean;\n};\n\nexport type AnalyticsConfig = {\n redis: Redis;\n prefix?: string;\n};\n\n/**\n * The Analytics package is experimental and can change at any time.\n */\nexport class Analytics {\n private readonly analytics: CoreAnalytics;\n private readonly table = \"events\";\n\n constructor(config: AnalyticsConfig) {\n this.analytics = new CoreAnalytics({\n // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk\n redis: config.redis,\n window: \"1h\",\n prefix: config.prefix ?? \"@upstash/ratelimit\",\n retention: \"90d\",\n });\n }\n\n /**\n * Try to extract the geo information from the request\n *\n * This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties\n * @param req\n * @returns\n */\n public extractGeo(req: { geo?: Geo; cf?: Geo }): Geo {\n if (typeof req.geo !== \"undefined\") {\n return req.geo;\n }\n if (typeof req.cf !== \"undefined\") {\n return req.cf;\n }\n\n return {};\n }\n\n public async record(event: Event): Promise<void> {\n await this.analytics.ingest(this.table, event);\n }\n\n async series<TFilter extends keyof Omit<Event, \"time\">>(\n filter: TFilter,\n cutoff: number,\n ): Promise<({ time: number } & Record<string, number>)[]> {\n const records = await this.analytics.query(this.table, {\n filter: [filter],\n range: [cutoff, Date.now()],\n });\n return records;\n }\n public async getUsage(cutoff = 0): Promise<Record<string, { success: number; blocked: number }>> {\n const records = await this.analytics.aggregateBy(this.table, \"identifier\", {\n range: [cutoff, Date.now()],\n });\n const usage = {} as Record<string, { success: number; blocked: number }>;\n for (const bucket of records) {\n for (const [k, v] of Object.entries(bucket)) {\n if (k === \"time\") {\n continue;\n }\n\n if (!usage[k]) {\n usage[k] = { success: 0, blocked: 0 };\n }\n // @ts-ignore\n usage[k].success += v.true ?? 0;\n // @ts-ignore\n usage[k].blocked += v.false ?? 0;\n }\n }\n return usage;\n }\n}\n","import type { EphemeralCache } from \"./types\";\n\nexport class Cache implements EphemeralCache {\n /**\n * Stores identifier -> reset (in milliseconds)\n */\n private readonly cache: Map<string, number>;\n\n constructor(cache: Map<string, number>) {\n this.cache = cache;\n }\n\n public isBlocked(identifier: string): { blocked: boolean; reset: number } {\n if (!this.cache.has(identifier)) {\n return { blocked: false, reset: 0 };\n }\n const reset = this.cache.get(identifier)!;\n if (reset < Date.now()) {\n this.cache.delete(identifier);\n return { blocked: false, reset: 0 };\n }\n\n return { blocked: true, reset: reset };\n }\n\n public blockUntil(identifier: string, reset: number): void {\n this.cache.set(identifier, reset);\n }\n\n public set(key: string, value: number): void {\n this.cache.set(key, value);\n }\n public get(key: string): number | null {\n return this.cache.get(key) || null;\n }\n\n public incr(key: string): number {\n let value = this.cache.get(key) ?? 0;\n value += 1;\n this.cache.set(key, value);\n return value;\n }\n\n public empty(): void {\n this.cache.clear()\n }\n}\n","type Unit = \"ms\" | \"s\" | \"m\" | \"h\" | \"d\";\nexport type Duration = `${number} ${Unit}` | `${number}${Unit}`;\n\n/**\n * Convert a human readable duration to milliseconds\n */\nexport function ms(d: Duration): number {\n const match = d.match(/^(\\d+)\\s?(ms|s|m|h|d)$/);\n if (!match) {\n throw new Error(`Unable to parse window size: ${d}`);\n }\n const time = Number.parseInt(match[1]);\n const unit = match[2] as Unit;\n\n switch (unit) {\n case \"ms\":\n return time;\n case \"s\":\n return time * 1000;\n case \"m\":\n return time * 1000 * 60;\n case \"h\":\n return time * 1000 * 60 * 60;\n case \"d\":\n return time * 1000 * 60 * 60 * 24;\n\n default:\n throw new Error(`Unable to parse window size: ${d}`);\n }\n}\n","export const fixedWindowLimitScript = `\n\tlocal key = KEYS[1]\n\tlocal id = ARGV[1]\n\tlocal window = ARGV[2]\n\tlocal incrementBy = tonumber(ARGV[3])\n\n\tredis.call(\"HSET\", key, id, incrementBy)\n\tlocal fields = redis.call(\"HGETALL\", key)\n\tif #fields == 1 and tonumber(fields[1])==incrementBy then\n\t-- The first time this key is set, and the value will be equal to incrementBy.\n\t-- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", key, window)\n\tend\n\n\treturn fields\n`;\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local fields = redis.call(\"HGETALL\", key)\n\n return fields\n `;\n\nexport const slidingWindowLimitScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal tokens = tonumber(ARGV[1]) -- tokens per window\n\tlocal now = ARGV[2] -- current timestamp in milliseconds\n\tlocal window = ARGV[3] -- interval in milliseconds\n\tlocal requestId = ARGV[4] -- uuid for this request\n\tlocal incrementBy = tonumber(ARGV[5]) -- custom rate, default is 1\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n\tif requestsInPreviousWindow * (1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then\n\t return {currentFields, previousFields, false}\n\tend\n\n\tredis.call(\"HSET\", currentKey, requestId, incrementBy)\n\n\tif requestsInCurrentWindow == 0 then \n\t -- The first time this key is set, the value will be equal to incrementBy.\n\t -- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n\tend\n\treturn {currentFields, previousFields, true}\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal now \t= ARGV[1] -- current timestamp in milliseconds\n \tlocal window \t= ARGV[2] -- interval in milliseconds\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n \trequestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\t\n\treturn requestsInCurrentWindow + requestsInPreviousWindow\n`;\n","export const resetScript = `\n local pattern = KEYS[1]\n\n -- Initialize cursor to start from 0\n local cursor = \"0\"\n\n repeat\n -- Scan for keys matching the pattern\n local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern)\n\n -- Extract cursor for the next iteration\n cursor = scan_result[1]\n\n -- Extract keys from the scan result\n local keys = scan_result[2]\n\n for i=1, #keys do\n redis.call('DEL', keys[i])\n end\n\n -- Continue scanning until cursor is 0 (end of keyspace)\n until cursor == \"0\"\n `;\n","import { Analytics, type Geo } from \"./analytics\";\nimport { Cache } from \"./cache\";\nimport type { Algorithm, Context, RatelimitResponse } from \"./types\";\n\nexport class TimeoutError extends Error {\n constructor() {\n super(\"Timeout\");\n this.name = \"TimeoutError\";\n }\n}\nexport type RatelimitConfig<TContext> = {\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n\n limiter: Algorithm<TContext>;\n\n ctx: TContext;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n *\n * @default 5000\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * ),\n * })\n *\n * ```\n */\nexport abstract class Ratelimit<TContext extends Context> {\n protected readonly limiter: Algorithm<TContext>;\n\n protected readonly ctx: TContext;\n\n protected readonly prefix: string;\n\n protected readonly timeout: number;\n\n protected readonly analytics?: Analytics;\n\n constructor(config: RatelimitConfig<TContext>) {\n this.ctx = config.ctx;\n this.limiter = config.limiter;\n this.timeout = config.timeout ?? 5000;\n this.prefix = config.prefix ?? \"@upstash/ratelimit\";\n this.analytics = config.analytics\n ? new Analytics({\n redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,\n prefix: this.prefix,\n })\n : undefined;\n\n if (config.ephemeralCache instanceof Map) {\n this.ctx.cache = new Cache(config.ephemeralCache);\n } else if (typeof config.ephemeralCache === \"undefined\") {\n this.ctx.cache = new Cache(new Map());\n }\n }\n\n /**\n * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.\n *\n * Use this if you want to reject all requests that you can not handle right now.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n *\n * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.\n *\n * Usage with `req.rate`\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(100, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id, {rate: 10})\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public limit = async (\n identifier: string,\n req?: { geo?: Geo; rate?: number },\n ): Promise<RatelimitResponse> => {\n const key = [this.prefix, identifier].join(\":\");\n let timeoutId: any = null;\n try {\n const arr: Promise<RatelimitResponse>[] = [this.limiter().limit(this.ctx, key, req?.rate)];\n if (this.timeout > 0) {\n arr.push(\n new Promise((resolve) => {\n timeoutId = setTimeout(() => {\n resolve({\n success: true,\n limit: 0,\n remaining: 0,\n reset: 0,\n pending: Promise.resolve(),\n });\n }, this.timeout);\n }),\n );\n }\n\n const res = await Promise.race(arr);\n if (this.analytics) {\n try {\n const geo = req ? this.analytics.extractGeo(req) : undefined;\n const analyticsP = this.analytics\n .record({\n identifier,\n time: Date.now(),\n success: res.success,\n ...geo,\n })\n .catch((err) => {\n console.warn(\"Failed to record analytics\", err);\n });\n res.pending = Promise.all([res.pending, analyticsP]);\n } catch (err) {\n console.warn(\"Failed to record analytics\", err);\n }\n }\n return res;\n } finally {\n if (timeoutId) {\n clearTimeout(timeoutId);\n }\n }\n };\n\n /**\n * Block until the request may pass or timeout is reached.\n *\n * This method returns a promise that resolves as soon as the request may be processed\n * or after the timeout has been reached.\n *\n * Use this if you want to delay the request until it is ready to get processed.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.blockUntilReady(id, 60_000)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public blockUntilReady = async (\n /**\n * An identifier per user or api.\n * Choose a userID, or api token, or ip address.\n *\n * If you want to limit your api across all users, you can set a constant string.\n */\n identifier: string,\n /**\n * Maximum duration to wait in milliseconds.\n * After this time the request will be denied.\n */\n timeout: number,\n ): Promise<RatelimitResponse> => {\n if (timeout <= 0) {\n throw new Error(\"timeout must be positive\");\n }\n let res: RatelimitResponse;\n\n const deadline = Date.now() + timeout;\n while (true) {\n res = await this.limit(identifier);\n if (res.success) {\n break;\n }\n if (res.reset === 0) {\n throw new Error(\"This should not happen\");\n }\n\n const wait = Math.min(res.reset, deadline) - Date.now();\n await new Promise((r) => setTimeout(r, wait));\n\n if (Date.now() > deadline) {\n break;\n }\n }\n return res!;\n };\n\n public resetUsedTokens = async (identifier: string) => {\n const pattern = [this.prefix, identifier].join(\":\");\n await this.limiter().resetTokens(this.ctx, pattern);\n };\n\n public getRemaining = async (identifier: string): Promise<number> => {\n const pattern = [this.prefix, identifier].join(\":\");\n\n return await this.limiter().getRemaining(this.ctx, pattern);\n };\n}\n","import { Cache } from \"./cache\";\nimport type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport {\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n} from \"./lua-scripts/multi\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, MultiRegionContext } from \"./types\";\n\nimport type { Redis } from \"./types\";\n\nfunction randomId(): string {\n let result = \"\";\n const characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\";\n const charactersLength = characters.length;\n for (let i = 0; i < 16; i++) {\n result += characters.charAt(Math.floor(Math.random() * charactersLength));\n }\n return result;\n}\n\nexport type MultiRegionRatelimitConfig = {\n /**\n * Instances of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis[];\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - MultiRegionRatelimit.fixedWindow\n */\n limiter: Algorithm<MultiRegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new MultiRegionRatelimit({\n * redis: Redis.fromEnv(),\n * limiter: MultiRegionRatelimit.fixedWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.\n */\n constructor(config: MultiRegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : undefined,\n },\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const requestId = randomId();\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n fixedWindowLimitScript,\n [key],\n [requestId, windowDuration, incrementBy],\n ) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const remaining = tokens - usedTokens;\n\n /**\n * If the length between two databases does not match, we sync the two databases\n */\n async function sync() {\n const individualIDs = await Promise.all(dbs.map((s) => s.request));\n\n const allIDs = Array.from(\n new Set(\n individualIDs\n .flatMap((_) => _)\n .reduce((acc: string[], curr, index) => {\n if (index % 2 === 0) {\n acc.push(curr);\n }\n return acc;\n }, []),\n ).values(),\n );\n\n for (const db of dbs) {\n const usedDbTokens = (await db.request).reduce(\n (accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n },\n 0,\n );\n\n const dbIds = (await db.request).reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allIDs.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(key, { [requestId]: incrementBy });\n }\n }\n }\n\n /**\n * Do not await sync. This should not run in the critical path.\n */\n\n const success = remaining > 0;\n const reset = (bucket + 1) * windowDuration;\n\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining,\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(fixedWindowRemainingTokensScript, [key], [null]) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowSize = ms(window);\n\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n // if (ctx.cache) {\n // const { blocked, reset } = ctx.cache.isBlocked(identifier);\n // if (blocked) {\n // return {\n // success: false,\n // limit: tokens,\n // remaining: 0,\n // reset: reset,\n // pending: Promise.resolve(),\n // };\n // }\n // }\n\n const requestId = randomId();\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowDuration, requestId, incrementBy],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<[string[], string[], 1 | null]>,\n }));\n\n const percentageInCurrent = (now % windowDuration) / windowDuration;\n const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));\n\n const previousUsedTokens = previous.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const currentUsedTokens = current.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const previousPartialUsed = previousUsedTokens * (1 - percentageInCurrent);\n\n const usedTokens = previousPartialUsed + currentUsedTokens;\n\n const remaining = tokens - usedTokens;\n\n /**\n * If a database differs from the consensus, we sync it\n */\n async function sync() {\n const res = await Promise.all(dbs.map((s) => s.request));\n const allCurrentIds = res\n .flatMap(([current]) => current)\n .reduce((accCurrentIds: string[], curr, index) => {\n if (index % 2 === 0) {\n accCurrentIds.push(curr);\n }\n return accCurrentIds;\n }, []);\n\n for (const db of dbs) {\n const [_current, previous, _success] = await db.request;\n const dbIds = previous.reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n\n const usedDbTokens = previous.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allCurrentIds.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(currentKey, { [requestId]: incrementBy });\n }\n }\n }\n\n // const success = remaining >= 0;\n const reset = (currentWindow + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success: Boolean(success),\n limit: tokens,\n remaining: Math.max(0, remaining),\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<number>,\n }));\n\n const usedTokens = await Promise.any(dbs.map((s) => s.request));\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n}\n","export const fixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n\n return r\n`;\n\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n `;\n\nexport const slidingWindowLimitScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local tokens = tonumber(ARGV[1]) -- tokens per window\n local now = ARGV[2] -- current timestamp in milliseconds\n local window = ARGV[3] -- interval in milliseconds\n local incrementBy = ARGV[4] -- increment rate per request at a given value, default is 1\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then\n return -1\n end\n\n local newValue = redis.call(\"INCRBY\", currentKey, incrementBy)\n if newValue == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n end\n return tokens - ( newValue + requestsInPreviousWindow )\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local now = ARGV[1] -- current timestamp in milliseconds\n local window = ARGV[2] -- interval in milliseconds\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\n return requestsInPreviousWindow + requestsInCurrentWindow\n`;\n\nexport const tokenBucketLimitScript = `\n local key = KEYS[1] -- identifier including prefixes\n local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens\n local interval = tonumber(ARGV[2]) -- size of the window in milliseconds\n local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval\n local now = tonumber(ARGV[4]) -- current timestamp in milliseconds\n local incrementBy = tonumber(ARGV[5]) -- how many tokens to consume, default is 1\n \n local bucket = redis.call(\"HMGET\", key, \"refilledAt\", \"tokens\")\n \n local refilledAt\n local tokens\n\n if bucket[1] == false then\n refilledAt = now\n tokens = maxTokens\n else\n refilledAt = tonumber(bucket[1])\n tokens = tonumber(bucket[2])\n end\n \n if now >= refilledAt + interval then\n local numRefills = math.floor((now - refilledAt) / interval)\n tokens = math.min(maxTokens, tokens + numRefills * refillRate)\n\n refilledAt = refilledAt + numRefills * interval\n end\n\n if tokens == 0 then\n return {-1, refilledAt + interval}\n end\n\n local remaining = tokens - incrementBy\n local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval\n \n redis.call(\"HSET\", key, \"refilledAt\", refilledAt, \"tokens\", remaining)\n redis.call(\"PEXPIRE\", key, expireAt)\n return {remaining, refilledAt + interval}\n`;\n\nexport const tokenBucketRemainingTokensScript = `\n local key = KEYS[1]\n local maxTokens = tonumber(ARGV[1])\n \n local bucket = redis.call(\"HMGET\", key, \"tokens\")\n\n if bucket[1] == false then\n return maxTokens\n end\n \n return tonumber(bucket[1])\n`;\n\nexport const cachedFixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == incrementBy then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n \n return r\n`;\n\nexport const cachedFixedWindowRemainingTokenScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n`;\n","import type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport {\n cachedFixedWindowLimitScript,\n cachedFixedWindowRemainingTokenScript,\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n tokenBucketLimitScript,\n tokenBucketRemainingTokensScript,\n} from \"./lua-scripts/single\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, RegionContext } from \"./types\";\nimport type { Redis } from \"./types\";\n\nexport type RegionRatelimitConfig = {\n /**\n * Instance of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis;\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n limiter: Algorithm<RegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * \"30 m\", // interval of 30 minutes\n * 10, // Allow 10 requests per window of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class RegionRatelimit extends Ratelimit<RegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.\n */\n\n constructor(config: RegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n },\n ephemeralCache: config.ephemeralCache,\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n fixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n\n const success = usedTokensAfterUpdate <= tokens;\n\n const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate);\n\n const reset = (bucket + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: tokens,\n remaining: remainingTokens,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n fixedWindowRemainingTokensScript,\n [key],\n [null],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowSize = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const remainingTokens = (await ctx.redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowSize, incrementBy],\n )) as number;\n\n const success = remainingTokens >= 0;\n\n const reset = (currentWindow + 1) * windowSize;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining: Math.max(0, remainingTokens),\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const now = Date.now();\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * You have a bucket filled with `{maxTokens}` tokens that refills constantly\n * at `{refillRate}` per `{interval}`.\n * Every request will remove one token from the bucket and if there is no\n * token to take, the request is rejected.\n *\n * **Pro:**\n *\n * - Bursts of requests are smoothed out and you can process them at a constant\n * rate.\n * - Allows to set a higher initial burst limit by setting `maxTokens` higher\n * than `refillRate`\n */\n static tokenBucket(\n /**\n * How many tokens are refilled per `interval`\n *\n * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.\n */\n refillRate: number,\n /**\n * The interval for the `refillRate`\n */\n interval: Duration,\n /**\n * Maximum number of tokens.\n * A newly created bucket starts with this many tokens.\n * Useful to allow higher burst limits.\n */\n maxTokens: number,\n ): Algorithm<RegionContext> {\n const intervalDuration = ms(interval);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: maxTokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const now = Date.now();\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const [remaining, reset] = (await ctx.redis.eval(\n tokenBucketLimitScript,\n [identifier],\n [maxTokens, intervalDuration, refillRate, now, incrementBy],\n )) as [number, number];\n\n const success = remaining >= 0;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: maxTokens,\n remaining,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const remainingTokens = (await ctx.redis.eval(\n tokenBucketRemainingTokensScript,\n [identifier],\n [maxTokens],\n )) as number;\n return remainingTokens;\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = identifier;\n\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n /**\n * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates\n * it asynchronously.\n * This is experimental and not yet recommended for production use.\n *\n * @experimental\n *\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static cachedFixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const reset = (bucket + 1) * windowDuration;\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedTokensAfterUpdate = ctx.cache.incr(key);\n const success = cachedTokensAfterUpdate < tokens;\n\n const pending = success\n ? ctx.redis\n .eval(cachedFixedWindowLimitScript, [key], [windowDuration, incrementBy])\n .then((t) => {\n ctx.cache!.set(key, t as number);\n })\n : Promise.resolve();\n\n return {\n success,\n limit: tokens,\n remaining: tokens - cachedTokensAfterUpdate,\n reset: reset,\n pending,\n };\n }\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n cachedFixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n ctx.cache.set(key, usedTokensAfterUpdate);\n const remaining = tokens - usedTokensAfterUpdate;\n\n return {\n success: remaining >= 0,\n limit: tokens,\n remaining,\n reset: reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedUsedTokens = ctx.cache.get(key) ?? 0;\n return Math.max(0, tokens - cachedUsedTokens);\n }\n\n const usedTokens = (await ctx.redis.eval(\n cachedFixedWindowRemainingTokenScript,\n [key],\n [null],\n )) as number;\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n // Empty the cache\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n ctx.cache.empty()\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n}\n"],"mappings":";AAAA,SAAS,aAAa,qBAAqB;AAuBpC,IAAM,YAAN,MAAgB;AAAA,EACJ;AAAA,EACA,QAAQ;AAAA,EAEzB,YAAY,QAAyB;AACnC,SAAK,YAAY,IAAI,cAAc;AAAA;AAAA,MAEjC,OAAO,OAAO;AAAA,MACd,QAAQ;AAAA,MACR,QAAQ,OAAO,UAAU;AAAA,MACzB,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASO,WAAW,KAAmC;AACnD,QAAI,OAAO,IAAI,QAAQ,aAAa;AAClC,aAAO,IAAI;AAAA,IACb;AACA,QAAI,OAAO,IAAI,OAAO,aAAa;AACjC,aAAO,IAAI;AAAA,IACb;AAEA,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,MAAa,OAAO,OAA6B;AAC/C,UAAM,KAAK,UAAU,OAAO,KAAK,OAAO,KAAK;AAAA,EAC/C;AAAA,EAEA,MAAM,OACJ,QACA,QACwD;AACxD,UAAM,UAAU,MAAM,KAAK,UAAU,MAAM,KAAK,OAAO;AAAA,MACrD,QAAQ,CAAC,MAAM;AAAA,MACf,OAAO,CAAC,QAAQ,KAAK,IAAI,CAAC;AAAA,IAC5B,CAAC;AACD,WAAO;AAAA,EACT;AAAA,EACA,MAAa,SAAS,SAAS,GAAkE;AAC/F,UAAM,UAAU,MAAM,KAAK,UAAU,YAAY,KAAK,OAAO,cAAc;AAAA,MACzE,OAAO,CAAC,QAAQ,KAAK,IAAI,CAAC;AAAA,IAC5B,CAAC;AACD,UAAM,QAAQ,CAAC;AACf,eAAW,UAAU,SAAS;AAC5B,iBAAW,CAAC,GAAG,CAAC,KAAK,OAAO,QAAQ,MAAM,GAAG;AAC3C,YAAI,MAAM,QAAQ;AAChB;AAAA,QACF;AAEA,YAAI,CAAC,MAAM,CAAC,GAAG;AACb,gBAAM,CAAC,IAAI,EAAE,SAAS,GAAG,SAAS,EAAE;AAAA,QACtC;AAEA,cAAM,CAAC,EAAE,WAAW,EAAE,QAAQ;AAE9B,cAAM,CAAC,EAAE,WAAW,EAAE,SAAS;AAAA,MACjC;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;;;ACzFO,IAAM,QAAN,MAAsC;AAAA;AAAA;AAAA;AAAA,EAI1B;AAAA,EAEjB,YAAY,OAA4B;AACtC,SAAK,QAAQ;AAAA,EACf;AAAA,EAEO,UAAU,YAAyD;AACxE,QAAI,CAAC,KAAK,MAAM,IAAI,UAAU,GAAG;AAC/B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AACA,UAAM,QAAQ,KAAK,MAAM,IAAI,UAAU;AACvC,QAAI,QAAQ,KAAK,IAAI,GAAG;AACtB,WAAK,MAAM,OAAO,UAAU;AAC5B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AAEA,WAAO,EAAE,SAAS,MAAM,MAAa;AAAA,EACvC;AAAA,EAEO,WAAW,YAAoB,OAAqB;AACzD,SAAK,MAAM,IAAI,YAAY,KAAK;AAAA,EAClC;AAAA,EAEO,IAAI,KAAa,OAAqB;AAC3C,SAAK,MAAM,IAAI,KAAK,KAAK;AAAA,EAC3B;AAAA,EACO,IAAI,KAA4B;AACrC,WAAO,KAAK,MAAM,IAAI,GAAG,KAAK;AAAA,EAChC;AAAA,EAEO,KAAK,KAAqB;AAC/B,QAAI,QAAQ,KAAK,MAAM,IAAI,GAAG,KAAK;AACnC,aAAS;AACT,SAAK,MAAM,IAAI,KAAK,KAAK;AACzB,WAAO;AAAA,EACT;AAAA,EAEO,QAAc;AACnB,SAAK,MAAM,MAAM;AAAA,EACnB;AACF;;;ACxCO,SAAS,GAAG,GAAqB;AACtC,QAAM,QAAQ,EAAE,MAAM,wBAAwB;AAC9C,MAAI,CAAC,OAAO;AACV,UAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACrD;AACA,QAAM,OAAO,OAAO,SAAS,MAAM,CAAC,CAAC;AACrC,QAAM,OAAO,MAAM,CAAC;AAEpB,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO,OAAO;AAAA,IAChB,KAAK;AACH,aAAO,OAAO,MAAO;AAAA,IACvB,KAAK;AACH,aAAO,OAAO,MAAO,KAAK;AAAA,IAC5B,KAAK;AACH,aAAO,OAAO,MAAO,KAAK,KAAK;AAAA,IAEjC;AACE,YAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACvD;AACF;;;AC7BO,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgB/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AASzC,IAAM,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAoCjC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D3C,IAAM,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACkFpB,IAAe,YAAf,MAAmD;AAAA,EACrC;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEnB,YAAY,QAAmC;AAC7C,SAAK,MAAM,OAAO;AAClB,SAAK,UAAU,OAAO;AACtB,SAAK,UAAU,OAAO,WAAW;AACjC,SAAK,SAAS,OAAO,UAAU;AAC/B,SAAK,YAAY,OAAO,YACpB,IAAI,UAAU;AAAA,MACZ,OAAO,MAAM,QAAQ,KAAK,IAAI,KAAK,IAAI,KAAK,IAAI,MAAM,CAAC,IAAI,KAAK,IAAI;AAAA,MACpE,QAAQ,KAAK;AAAA,IACf,CAAC,IACD;AAEJ,QAAI,OAAO,0BAA0B,KAAK;AACxC,WAAK,IAAI,QAAQ,IAAI,MAAM,OAAO,cAAc;AAAA,IAClD,WAAW,OAAO,OAAO,mBAAmB,aAAa;AACvD,WAAK,IAAI,QAAQ,IAAI,MAAM,oBAAI,IAAI,CAAC;AAAA,IACtC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsCO,QAAQ,OACb,YACA,QAC+B;AAC/B,UAAM,MAAM,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAC9C,QAAI,YAAiB;AACrB,QAAI;AACF,YAAM,MAAoC,CAAC,KAAK,QAAQ,EAAE,MAAM,KAAK,KAAK,KAAK,KAAK,IAAI,CAAC;AACzF,UAAI,KAAK,UAAU,GAAG;AACpB,YAAI;AAAA,UACF,IAAI,QAAQ,CAAC,YAAY;AACvB,wBAAY,WAAW,MAAM;AAC3B,sBAAQ;AAAA,gBACN,SAAS;AAAA,gBACT,OAAO;AAAA,gBACP,WAAW;AAAA,gBACX,OAAO;AAAA,gBACP,SAAS,QAAQ,QAAQ;AAAA,cAC3B,CAAC;AAAA,YACH,GAAG,KAAK,OAAO;AAAA,UACjB,CAAC;AAAA,QACH;AAAA,MACF;AAEA,YAAM,MAAM,MAAM,QAAQ,KAAK,GAAG;AAClC,UAAI,KAAK,WAAW;AAClB,YAAI;AACF,gBAAM,MAAM,MAAM,KAAK,UAAU,WAAW,GAAG,IAAI;AACnD,gBAAM,aAAa,KAAK,UACrB,OAAO;AAAA,YACN;AAAA,YACA,MAAM,KAAK,IAAI;AAAA,YACf,SAAS,IAAI;AAAA,YACb,GAAG;AAAA,UACL,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,oBAAQ,KAAK,8BAA8B,GAAG;AAAA,UAChD,CAAC;AACH,cAAI,UAAU,QAAQ,IAAI,CAAC,IAAI,SAAS,UAAU,CAAC;AAAA,QACrD,SAAS,KAAK;AACZ,kBAAQ,KAAK,8BAA8B,GAAG;AAAA,QAChD;AAAA,MACF;AACA,aAAO;AAAA,IACT,UAAE;AACA,UAAI,WAAW;AACb,qBAAa,SAAS;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBO,kBAAkB,OAOvB,YAKA,YAC+B;AAC/B,QAAI,WAAW,GAAG;AAChB,YAAM,IAAI,MAAM,0BAA0B;AAAA,IAC5C;AACA,QAAI;AAEJ,UAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,WAAO,MAAM;AACX,YAAM,MAAM,KAAK,MAAM,UAAU;AACjC,UAAI,IAAI,SAAS;AACf;AAAA,MACF;AACA,UAAI,IAAI,UAAU,GAAG;AACnB,cAAM,IAAI,MAAM,wBAAwB;AAAA,MAC1C;AAEA,YAAM,OAAO,KAAK,IAAI,IAAI,OAAO,QAAQ,IAAI,KAAK,IAAI;AACtD,YAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,IAAI,CAAC;AAE5C,UAAI,KAAK,IAAI,IAAI,UAAU;AACzB;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEO,kBAAkB,OAAO,eAAuB;AACrD,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAClD,UAAM,KAAK,QAAQ,EAAE,YAAY,KAAK,KAAK,OAAO;AAAA,EACpD;AAAA,EAEO,eAAe,OAAO,eAAwC;AACnE,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAElD,WAAO,MAAM,KAAK,QAAQ,EAAE,aAAa,KAAK,KAAK,OAAO;AAAA,EAC5D;AACF;;;AC/PA,SAAS,WAAmB;AAC1B,MAAI,SAAS;AACb,QAAM,aAAa;AACnB,QAAM,mBAAmB,WAAW;AACpC,WAAS,IAAI,GAAG,IAAI,IAAI,KAAK;AAC3B,cAAU,WAAW,OAAO,KAAK,MAAM,KAAK,OAAO,IAAI,gBAAgB,CAAC;AAAA,EAC1E;AACA,SAAO;AACT;AAwEO,IAAM,uBAAN,cAAmC,UAA8B;AAAA;AAAA;AAAA;AAAA,EAItE,YAAY,QAAoC;AAC9C,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,QACd,OAAO,OAAO,iBAAiB,IAAI,MAAM,OAAO,cAAc,IAAI;AAAA,MACpE;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC+B;AAC/B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AACtE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAA,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,YAAY,SAAS;AAC3B,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,GAAG;AAAA,YACJ,CAAC,WAAW,gBAAgB,WAAW;AAAA,UACzC;AAAA,QACF,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,gBAAM,SAAS,MAAM;AAAA,YACnB,IAAI;AAAA,cACF,cACG,QAAQ,CAAC,MAAM,CAAC,EAChB,OAAO,CAAC,KAAe,MAAM,UAAU;AACtC,oBAAI,QAAQ,MAAM,GAAG;AACnB,sBAAI,KAAK,IAAI;AAAA,gBACf;AACA,uBAAO;AAAA,cACT,GAAG,CAAC,CAAC;AAAA,YACT,EAAE,OAAO;AAAA,UACX;AAEA,qBAAW,MAAM,KAAK;AACpB,kBAAM,gBAAgB,MAAM,GAAG,SAAS;AAAA,cACtC,CAAC,WAAmB,WAAW,UAAU;AACvC,oBAAI,cAAc;AAClB,oBAAI,QAAQ,GAAG;AACb,gCAAc,OAAO,SAAS,SAAS;AAAA,gBACzC;AAEA,uBAAO,YAAY;AAAA,cACrB;AAAA,cACA;AAAA,YACF;AAEA,kBAAM,SAAS,MAAM,GAAG,SAAS,OAAO,CAAC,KAAe,WAAW,UAAU;AAC3E,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAKL,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,OAAO,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAItD,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWC,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,KAAK,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YACvD;AAAA,UACF;AAAA,QACF;AAMA,cAAM,UAAU,YAAY;AAC5B,cAAM,SAAS,SAAS,KAAK;AAE7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM,KAAK,kCAAkC,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC;AAAA,QACrE,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AACjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC+B;AAC/B,UAAM,aAAa,GAAG,MAAM;AAE5B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AActE,cAAM,YAAY,SAAS;AAC3B,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AACzD,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,QAAQ,KAAK,gBAAgB,WAAW,WAAW;AAAA;AAAA,UAEtD;AAAA,QACF,EAAE;AAEF,cAAM,sBAAuB,MAAM,iBAAkB;AACrD,cAAM,CAAC,SAAS,UAAU,OAAO,IAAI,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEhF,cAAM,qBAAqB,SAAS,OAAO,CAAC,WAAmB,WAAW,UAAU;AAClF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,oBAAoB,QAAQ,OAAO,CAAC,WAAmB,WAAW,UAAU;AAChF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,sBAAsB,sBAAsB,IAAI;AAEtD,cAAM,aAAa,sBAAsB;AAEzC,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,MAAM,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AACvD,gBAAM,gBAAgB,IACnB,QAAQ,CAAC,CAACC,QAAO,MAAMA,QAAO,EAC9B,OAAO,CAAC,eAAyB,MAAM,UAAU;AAChD,gBAAI,QAAQ,MAAM,GAAG;AACnB,4BAAc,KAAK,IAAI;AAAA,YACzB;AACA,mBAAO;AAAA,UACT,GAAG,CAAC,CAAC;AAEP,qBAAW,MAAM,KAAK;AACpB,kBAAM,CAAC,UAAUC,WAAU,QAAQ,IAAI,MAAM,GAAG;AAChD,kBAAM,QAAQA,UAAS,OAAO,CAAC,KAAe,WAAW,UAAU;AACjE,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAEL,kBAAM,eAAeA,UAAS,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC5E,kBAAI,cAAc;AAClB,kBAAI,QAAQ,GAAG;AACb,8BAAc,OAAO,SAAS,SAAS;AAAA,cACzC;AAEA,qBAAO,YAAY;AAAA,YACrB,GAAG,CAAC;AAKJ,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,cAAc,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAI7D,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWF,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,YAAY,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YAC9D;AAAA,UACF;AAAA,QACF;AAGA,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL,SAAS,QAAQ,OAAO;AAAA,UACxB,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,SAAS;AAAA,UAChC;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,KAAK,UAAU;AAAA;AAAA,UAElB;AAAA,QACF,EAAE;AAEF,cAAM,aAAa,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAC9D,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;;;AC9dO,IAAMG,0BAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAe/B,IAAMC,oCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAWzC,IAAMC,4BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiCjC,IAAMC,sCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAuB3C,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAwC/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAazC,IAAM,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAerC,IAAM,wCAAwC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D9C,IAAM,kBAAN,cAA8B,UAAyB;AAAA;AAAA;AAAA;AAAA,EAK5D,YAAY,QAA+B;AACzC,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,MAChB;AAAA,MACA,gBAAgB,OAAO;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAChC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAC,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7CC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AAEA,cAAM,UAAU,yBAAyB;AAEzC,cAAM,kBAAkB,KAAK,IAAI,GAAG,SAAS,qBAAqB;AAElE,cAAM,SAAS,SAAS,KAAK;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW;AAAA,UACX;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC0B;AAC1B,UAAM,aAAa,GAAG,MAAM;AAC5B,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAF,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvCG;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,QAAQ,KAAK,YAAY,WAAW;AAAA,QACvC;AAEA,cAAM,UAAU,mBAAmB;AAEnC,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,eAAe;AAAA,UACtC;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,MAAM,KAAK,IAAI;AACrB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,KAAK,UAAU;AAAA,QAClB;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,OAAO,YAML,YAIA,UAMA,WAC0B;AAC1B,UAAM,mBAAmB,GAAG,QAAQ;AACpC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAJ,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,CAAC,WAAW,KAAK,IAAK,MAAM,IAAI,MAAM;AAAA,UAC1C;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,WAAW,kBAAkB,YAAY,KAAK,WAAW;AAAA,QAC5D;AAEA,cAAM,UAAU,aAAa;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvC;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,SAAS;AAAA,QACZ;AACA,eAAO;AAAA,MACT;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU;AAEhB,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,OAAO,kBAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,SAAS,SAAS,KAAK;AAC7B,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,0BAA0B,IAAI,MAAM,KAAK,GAAG;AAClD,gBAAM,UAAU,0BAA0B;AAE1C,gBAAM,UAAU,UACZ,IAAI,MACH,KAAK,8BAA8B,CAAC,GAAG,GAAG,CAAC,gBAAgB,WAAW,CAAC,EACvE,KAAK,CAAC,MAAM;AACX,gBAAI,MAAO,IAAI,KAAK,CAAW;AAAA,UACjC,CAAC,IACD,QAAQ,QAAQ;AAEpB,iBAAO;AAAA,YACL;AAAA,YACA,OAAO;AAAA,YACP,WAAW,SAAS;AAAA,YACpB;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAEA,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7C;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AACA,YAAI,MAAM,IAAI,KAAK,qBAAqB;AACxC,cAAM,YAAY,SAAS;AAE3B,eAAO;AAAA,UACL,SAAS,aAAa;AAAA,UACtB,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AAEA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,mBAAmB,IAAI,MAAM,IAAI,GAAG,KAAK;AAC/C,iBAAO,KAAK,IAAI,GAAG,SAAS,gBAAgB;AAAA,QAC9C;AAEA,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AACA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,YAAI,MAAM,MAAM;AAChB,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AACF;","names":["reset","requestId","current","previous","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript","reset","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript"]}
1
+ {"version":3,"sources":["../src/analytics.ts","../src/cache.ts","../src/duration.ts","../src/lua-scripts/multi.ts","../src/lua-scripts/reset.ts","../src/ratelimit.ts","../src/multi.ts","../src/lua-scripts/single.ts","../src/single.ts"],"sourcesContent":["import { Analytics as CoreAnalytics, Aggregate } from \"@upstash/core-analytics\";\nimport type { Redis } from \"./types\";\n\nexport type Geo = {\n country?: string;\n city?: string;\n region?: string;\n ip?: string;\n};\nexport type Event = Geo & {\n identifier: string;\n time: number;\n success: boolean;\n};\n\nexport type AnalyticsConfig = {\n redis: Redis;\n prefix?: string;\n};\n\n/**\n * The Analytics package is experimental and can change at any time.\n */\nexport class Analytics {\n private readonly analytics: CoreAnalytics;\n private readonly table = \"events\";\n\n constructor(config: AnalyticsConfig) {\n this.analytics = new CoreAnalytics({\n // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk\n redis: config.redis,\n window: \"1h\",\n prefix: config.prefix ?? \"@upstash/ratelimit\",\n retention: \"90d\",\n });\n }\n\n /**\n * Try to extract the geo information from the request\n *\n * This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties\n * @param req\n * @returns\n */\n public extractGeo(req: { geo?: Geo; cf?: Geo }): Geo {\n if (typeof req.geo !== \"undefined\") {\n return req.geo;\n }\n if (typeof req.cf !== \"undefined\") {\n return req.cf;\n }\n\n return {};\n }\n\n public async record(event: Event): Promise<void> {\n await this.analytics.ingest(this.table, event);\n }\n\n public async series<TFilter extends keyof Omit<Event, \"time\">>(\n filter: TFilter,\n cutoff: number,\n ): Promise<Aggregate[]> {\n const timestampCount = Math.min(\n (\n this.analytics.getBucket(Date.now())\n - this.analytics.getBucket(cutoff)\n ) / (60 * 60 * 1000),\n 256\n )\n return this.analytics.aggregateBucketsWithPipeline(this.table, filter, timestampCount)\n }\n\n public async getUsage(cutoff = 0): Promise<Record<string, { success: number; blocked: number }>> {\n \n const timestampCount = Math.min(\n (\n this.analytics.getBucket(Date.now())\n - this.analytics.getBucket(cutoff)\n ) / (60 * 60 * 1000),\n 256\n )\n const records = await this.analytics.getAllowedBlocked(this.table, timestampCount)\n return records;\n }\n\n public async getUsageOverTime<TFilter extends keyof Omit<Event, \"time\">>(\n timestampCount: number, groupby: TFilter\n ): Promise<Aggregate[]> {\n const result = await this.analytics.aggregateBucketsWithPipeline(this.table, groupby, timestampCount)\n return result\n }\n\n public async getMostAllowedBlocked(timestampCount: number, getTop?: number) {\n getTop = getTop ?? 5\n return this.analytics.getMostAllowedBlocked(this.table, timestampCount, getTop)\n }\n}\n","import type { EphemeralCache } from \"./types\";\n\nexport class Cache implements EphemeralCache {\n /**\n * Stores identifier -> reset (in milliseconds)\n */\n private readonly cache: Map<string, number>;\n\n constructor(cache: Map<string, number>) {\n this.cache = cache;\n }\n\n public isBlocked(identifier: string): { blocked: boolean; reset: number } {\n if (!this.cache.has(identifier)) {\n return { blocked: false, reset: 0 };\n }\n const reset = this.cache.get(identifier)!;\n if (reset < Date.now()) {\n this.cache.delete(identifier);\n return { blocked: false, reset: 0 };\n }\n\n return { blocked: true, reset: reset };\n }\n\n public blockUntil(identifier: string, reset: number): void {\n this.cache.set(identifier, reset);\n }\n\n public set(key: string, value: number): void {\n this.cache.set(key, value);\n }\n public get(key: string): number | null {\n return this.cache.get(key) || null;\n }\n\n public incr(key: string): number {\n let value = this.cache.get(key) ?? 0;\n value += 1;\n this.cache.set(key, value);\n return value;\n }\n\n public pop(key: string): void {\n this.cache.delete(key)\n }\n\n public empty(): void {\n this.cache.clear()\n }\n}\n","type Unit = \"ms\" | \"s\" | \"m\" | \"h\" | \"d\";\nexport type Duration = `${number} ${Unit}` | `${number}${Unit}`;\n\n/**\n * Convert a human readable duration to milliseconds\n */\nexport function ms(d: Duration): number {\n const match = d.match(/^(\\d+)\\s?(ms|s|m|h|d)$/);\n if (!match) {\n throw new Error(`Unable to parse window size: ${d}`);\n }\n const time = Number.parseInt(match[1]);\n const unit = match[2] as Unit;\n\n switch (unit) {\n case \"ms\":\n return time;\n case \"s\":\n return time * 1000;\n case \"m\":\n return time * 1000 * 60;\n case \"h\":\n return time * 1000 * 60 * 60;\n case \"d\":\n return time * 1000 * 60 * 60 * 24;\n\n default:\n throw new Error(`Unable to parse window size: ${d}`);\n }\n}\n","export const fixedWindowLimitScript = `\n\tlocal key = KEYS[1]\n\tlocal id = ARGV[1]\n\tlocal window = ARGV[2]\n\tlocal incrementBy = tonumber(ARGV[3])\n\n\tredis.call(\"HSET\", key, id, incrementBy)\n\tlocal fields = redis.call(\"HGETALL\", key)\n\tif #fields == 2 and tonumber(fields[2])==incrementBy then\n\t-- The first time this key is set, and the value will be equal to incrementBy.\n\t-- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", key, window)\n\tend\n\n\treturn fields\n`;\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local fields = redis.call(\"HGETALL\", key)\n\n return fields\n `;\n\nexport const slidingWindowLimitScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal tokens = tonumber(ARGV[1]) -- tokens per window\n\tlocal now = ARGV[2] -- current timestamp in milliseconds\n\tlocal window = ARGV[3] -- interval in milliseconds\n\tlocal requestId = ARGV[4] -- uuid for this request\n\tlocal incrementBy = tonumber(ARGV[5]) -- custom rate, default is 1\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n\tif requestsInPreviousWindow * (1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then\n\t return {currentFields, previousFields, false}\n\tend\n\n\tredis.call(\"HSET\", currentKey, requestId, incrementBy)\n\n\tif requestsInCurrentWindow == 0 then \n\t -- The first time this key is set, the value will be equal to incrementBy.\n\t -- So we only need the expire command once\n\t redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n\tend\n\treturn {currentFields, previousFields, true}\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n\tlocal currentKey = KEYS[1] -- identifier including prefixes\n\tlocal previousKey = KEYS[2] -- key of the previous bucket\n\tlocal now \t= ARGV[1] -- current timestamp in milliseconds\n \tlocal window \t= ARGV[2] -- interval in milliseconds\n\n\tlocal currentFields = redis.call(\"HGETALL\", currentKey)\n\tlocal requestsInCurrentWindow = 0\n\tfor i = 2, #currentFields, 2 do\n\trequestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])\n\tend\n\n\tlocal previousFields = redis.call(\"HGETALL\", previousKey)\n\tlocal requestsInPreviousWindow = 0\n\tfor i = 2, #previousFields, 2 do\n\trequestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])\n\tend\n\n\tlocal percentageInCurrent = ( now % window) / window\n \trequestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\t\n\treturn requestsInCurrentWindow + requestsInPreviousWindow\n`;\n","export const resetScript = `\n local pattern = KEYS[1]\n\n -- Initialize cursor to start from 0\n local cursor = \"0\"\n\n repeat\n -- Scan for keys matching the pattern\n local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern)\n\n -- Extract cursor for the next iteration\n cursor = scan_result[1]\n\n -- Extract keys from the scan result\n local keys = scan_result[2]\n\n for i=1, #keys do\n redis.call('DEL', keys[i])\n end\n\n -- Continue scanning until cursor is 0 (end of keyspace)\n until cursor == \"0\"\n `;\n","import { Analytics, type Geo } from \"./analytics\";\nimport { Cache } from \"./cache\";\nimport type { Algorithm, Context, RatelimitResponse } from \"./types\";\n\nexport class TimeoutError extends Error {\n constructor() {\n super(\"Timeout\");\n this.name = \"TimeoutError\";\n }\n}\nexport type RatelimitConfig<TContext> = {\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n\n limiter: Algorithm<TContext>;\n\n ctx: TContext;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n *\n * @default 5000\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * ),\n * })\n *\n * ```\n */\nexport abstract class Ratelimit<TContext extends Context> {\n protected readonly limiter: Algorithm<TContext>;\n\n protected readonly ctx: TContext;\n\n protected readonly prefix: string;\n\n protected readonly timeout: number;\n\n protected readonly analytics?: Analytics;\n\n constructor(config: RatelimitConfig<TContext>) {\n this.ctx = config.ctx;\n this.limiter = config.limiter;\n this.timeout = config.timeout ?? 5000;\n this.prefix = config.prefix ?? \"@upstash/ratelimit\";\n this.analytics = config.analytics\n ? new Analytics({\n redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,\n prefix: this.prefix,\n })\n : undefined;\n\n if (config.ephemeralCache instanceof Map) {\n this.ctx.cache = new Cache(config.ephemeralCache);\n } else if (typeof config.ephemeralCache === \"undefined\") {\n this.ctx.cache = new Cache(new Map());\n }\n }\n\n /**\n * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.\n *\n * Use this if you want to reject all requests that you can not handle right now.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n *\n * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.\n *\n * Usage with `req.rate`\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(100, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.limit(id, {rate: 10})\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public limit = async (\n identifier: string,\n req?: { geo?: Geo; rate?: number },\n ): Promise<RatelimitResponse> => {\n const key = [this.prefix, identifier].join(\":\");\n let timeoutId: any = null;\n try {\n const arr: Promise<RatelimitResponse>[] = [this.limiter().limit(this.ctx, key, req?.rate)];\n if (this.timeout > 0) {\n arr.push(\n new Promise((resolve) => {\n timeoutId = setTimeout(() => {\n resolve({\n success: true,\n limit: 0,\n remaining: 0,\n reset: 0,\n pending: Promise.resolve(),\n });\n }, this.timeout);\n }),\n );\n }\n\n const res = await Promise.race(arr);\n if (this.analytics) {\n try {\n const geo = req ? this.analytics.extractGeo(req) : undefined;\n const analyticsP = this.analytics\n .record({\n identifier,\n time: Date.now(),\n success: res.success,\n ...geo,\n })\n .catch((err) => {\n console.warn(\"Failed to record analytics\", err);\n });\n res.pending = Promise.all([res.pending, analyticsP]);\n } catch (err) {\n console.warn(\"Failed to record analytics\", err);\n }\n }\n return res;\n } finally {\n if (timeoutId) {\n clearTimeout(timeoutId);\n }\n }\n };\n\n /**\n * Block until the request may pass or timeout is reached.\n *\n * This method returns a promise that resolves as soon as the request may be processed\n * or after the timeout has been reached.\n *\n * Use this if you want to delay the request until it is ready to get processed.\n *\n * @example\n * ```ts\n * const ratelimit = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(10, \"10 s\")\n * })\n *\n * const { success } = await ratelimit.blockUntilReady(id, 60_000)\n * if (!success){\n * return \"Nope\"\n * }\n * return \"Yes\"\n * ```\n */\n public blockUntilReady = async (\n /**\n * An identifier per user or api.\n * Choose a userID, or api token, or ip address.\n *\n * If you want to limit your api across all users, you can set a constant string.\n */\n identifier: string,\n /**\n * Maximum duration to wait in milliseconds.\n * After this time the request will be denied.\n */\n timeout: number,\n ): Promise<RatelimitResponse> => {\n if (timeout <= 0) {\n throw new Error(\"timeout must be positive\");\n }\n let res: RatelimitResponse;\n\n const deadline = Date.now() + timeout;\n while (true) {\n res = await this.limit(identifier);\n if (res.success) {\n break;\n }\n if (res.reset === 0) {\n throw new Error(\"This should not happen\");\n }\n\n const wait = Math.min(res.reset, deadline) - Date.now();\n await new Promise((r) => setTimeout(r, wait));\n\n if (Date.now() > deadline) {\n break;\n }\n }\n return res!;\n };\n\n public resetUsedTokens = async (identifier: string) => {\n const pattern = [this.prefix, identifier].join(\":\");\n await this.limiter().resetTokens(this.ctx, pattern);\n };\n\n public getRemaining = async (identifier: string): Promise<number> => {\n const pattern = [this.prefix, identifier].join(\":\");\n\n return await this.limiter().getRemaining(this.ctx, pattern);\n };\n}\n","import { Cache } from \"./cache\";\nimport type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport {\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n} from \"./lua-scripts/multi\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, MultiRegionContext } from \"./types\";\n\nimport type { Redis } from \"./types\";\n\nfunction randomId(): string {\n let result = \"\";\n const characters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\";\n const charactersLength = characters.length;\n for (let i = 0; i < 16; i++) {\n result += characters.charAt(Math.floor(Math.random() * charactersLength));\n }\n return result;\n}\n\nexport type MultiRegionRatelimitConfig = {\n /**\n * Instances of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis[];\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - MultiRegionRatelimit.fixedWindow\n */\n limiter: Algorithm<MultiRegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new MultiRegionRatelimit({\n * redis: Redis.fromEnv(),\n * limiter: MultiRegionRatelimit.fixedWindow(\n * 10, // Allow 10 requests per window of 30 minutes\n * \"30 m\", // interval of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.\n */\n constructor(config: MultiRegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : undefined,\n },\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const requestId = randomId();\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n fixedWindowLimitScript,\n [key],\n [requestId, windowDuration, incrementBy],\n ) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const remaining = tokens - usedTokens;\n\n /**\n * If the length between two databases does not match, we sync the two databases\n */\n async function sync() {\n const individualIDs = await Promise.all(dbs.map((s) => s.request));\n\n const allIDs = Array.from(\n new Set(\n individualIDs\n .flatMap((_) => _)\n .reduce((acc: string[], curr, index) => {\n if (index % 2 === 0) {\n acc.push(curr);\n }\n return acc;\n }, []),\n ).values(),\n );\n\n for (const db of dbs) {\n const usedDbTokens = (await db.request).reduce(\n (accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n },\n 0,\n );\n\n const dbIds = (await db.request).reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allIDs.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(key, { [requestId]: incrementBy });\n }\n }\n }\n\n /**\n * Do not await sync. This should not run in the critical path.\n */\n\n const success = remaining > 0;\n const reset = (bucket + 1) * windowDuration;\n\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining,\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(fixedWindowRemainingTokensScript, [key], [null]) as Promise<string[]>,\n }));\n\n // The firstResponse is an array of string at every EVEN indexes and rate at which the tokens are used at every ODD indexes\n const firstResponse = await Promise.any(dbs.map((s) => s.request));\n const usedTokens = firstResponse.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<MultiRegionContext> {\n const windowSize = ms(window);\n\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: MultiRegionContext, identifier: string, rate?: number) {\n // if (ctx.cache) {\n // const { blocked, reset } = ctx.cache.isBlocked(identifier);\n // if (blocked) {\n // return {\n // success: false,\n // limit: tokens,\n // remaining: 0,\n // reset: reset,\n // pending: Promise.resolve(),\n // };\n // }\n // }\n\n const requestId = randomId();\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowDuration, requestId, incrementBy],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<[string[], string[], 1 | null]>,\n }));\n\n const percentageInCurrent = (now % windowDuration) / windowDuration;\n const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));\n\n // in the case of success, the new request is not included in the current array.\n // add it manually\n if (success) {\n current.push(requestId, incrementBy.toString())\n }\n\n const previousUsedTokens = previous.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const currentUsedTokens = current.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n\n const previousPartialUsed = Math.ceil(previousUsedTokens * (1 - percentageInCurrent));\n\n const usedTokens = previousPartialUsed + currentUsedTokens;\n\n const remaining = tokens - usedTokens;\n\n /**\n * If a database differs from the consensus, we sync it\n */\n async function sync() {\n const res = await Promise.all(dbs.map((s) => s.request));\n\n const allCurrentIds = Array.from(\n new Set(\n res\n .flatMap(([current]) => current)\n .reduce((acc: string[], curr, index) => {\n if (index % 2 === 0) {\n acc.push(curr);\n }\n return acc;\n }, []),\n ).values(),\n );\n\n for (const db of dbs) {\n const [current, _previous, _success] = await db.request;\n const dbIds = current.reduce((ids: string[], currentId, index) => {\n if (index % 2 === 0) {\n ids.push(currentId);\n }\n return ids;\n }, []);\n\n const usedDbTokens = current.reduce((accTokens: number, usedToken, index) => {\n let parsedToken = 0;\n if (index % 2) {\n parsedToken = Number.parseInt(usedToken);\n }\n\n return accTokens + parsedToken;\n }, 0);\n /**\n * If the bucket in this db is already full, it doesn't matter which ids it contains.\n * So we do not have to sync.\n */\n if (usedDbTokens >= tokens) {\n continue;\n }\n const diff = allCurrentIds.filter((id) => !dbIds.includes(id));\n /**\n * Don't waste a request if there is nothing to send\n */\n if (diff.length === 0) {\n continue;\n }\n\n for (const requestId of diff) {\n await db.redis.hset(currentKey, { [requestId]: incrementBy });\n }\n }\n }\n\n // const success = remaining >= 0;\n const reset = (currentWindow + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success: Boolean(success),\n limit: tokens,\n remaining: Math.max(0, remaining),\n reset,\n pending: sync(),\n };\n },\n async getRemaining(ctx: MultiRegionContext, identifier: string) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const dbs = ctx.redis.map((redis) => ({\n redis,\n request: redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n // lua seems to return `1` for true and `null` for false\n ) as Promise<number>,\n }));\n\n const usedTokens = await Promise.any(dbs.map((s) => s.request));\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: MultiRegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n for (const db of ctx.redis) {\n await db.eval(resetScript, [pattern], [null]);\n }\n },\n });\n }\n}\n","export const fixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n\n return r\n`;\n\nexport const fixedWindowRemainingTokensScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n `;\n\nexport const slidingWindowLimitScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local tokens = tonumber(ARGV[1]) -- tokens per window\n local now = ARGV[2] -- current timestamp in milliseconds\n local window = ARGV[3] -- interval in milliseconds\n local incrementBy = ARGV[4] -- increment rate per request at a given value, default is 1\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then\n return -1\n end\n\n local newValue = redis.call(\"INCRBY\", currentKey, incrementBy)\n if newValue == tonumber(incrementBy) then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second\n end\n return tokens - ( newValue + requestsInPreviousWindow )\n`;\n\nexport const slidingWindowRemainingTokensScript = `\n local currentKey = KEYS[1] -- identifier including prefixes\n local previousKey = KEYS[2] -- key of the previous bucket\n local now = ARGV[1] -- current timestamp in milliseconds\n local window = ARGV[2] -- interval in milliseconds\n\n local requestsInCurrentWindow = redis.call(\"GET\", currentKey)\n if requestsInCurrentWindow == false then\n requestsInCurrentWindow = 0\n end\n\n local requestsInPreviousWindow = redis.call(\"GET\", previousKey)\n if requestsInPreviousWindow == false then\n requestsInPreviousWindow = 0\n end\n\n local percentageInCurrent = ( now % window ) / window\n -- weighted requests to consider from the previous window\n requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)\n\n return requestsInPreviousWindow + requestsInCurrentWindow\n`;\n\nexport const tokenBucketLimitScript = `\n local key = KEYS[1] -- identifier including prefixes\n local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens\n local interval = tonumber(ARGV[2]) -- size of the window in milliseconds\n local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval\n local now = tonumber(ARGV[4]) -- current timestamp in milliseconds\n local incrementBy = tonumber(ARGV[5]) -- how many tokens to consume, default is 1\n \n local bucket = redis.call(\"HMGET\", key, \"refilledAt\", \"tokens\")\n \n local refilledAt\n local tokens\n\n if bucket[1] == false then\n refilledAt = now\n tokens = maxTokens\n else\n refilledAt = tonumber(bucket[1])\n tokens = tonumber(bucket[2])\n end\n \n if now >= refilledAt + interval then\n local numRefills = math.floor((now - refilledAt) / interval)\n tokens = math.min(maxTokens, tokens + numRefills * refillRate)\n\n refilledAt = refilledAt + numRefills * interval\n end\n\n if tokens == 0 then\n return {-1, refilledAt + interval}\n end\n\n local remaining = tokens - incrementBy\n local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval\n \n redis.call(\"HSET\", key, \"refilledAt\", refilledAt, \"tokens\", remaining)\n redis.call(\"PEXPIRE\", key, expireAt)\n return {remaining, refilledAt + interval}\n`;\n\nexport const tokenBucketRemainingTokensScript = `\n local key = KEYS[1]\n local maxTokens = tonumber(ARGV[1])\n \n local bucket = redis.call(\"HMGET\", key, \"tokens\")\n\n if bucket[1] == false then\n return maxTokens\n end\n \n return tonumber(bucket[1])\n`;\n\nexport const cachedFixedWindowLimitScript = `\n local key = KEYS[1]\n local window = ARGV[1]\n local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1\n\n local r = redis.call(\"INCRBY\", key, incrementBy)\n if r == incrementBy then\n -- The first time this key is set, the value will be equal to incrementBy.\n -- So we only need the expire command once\n redis.call(\"PEXPIRE\", key, window)\n end\n \n return r\n`;\n\nexport const cachedFixedWindowRemainingTokenScript = `\n local key = KEYS[1]\n local tokens = 0\n\n local value = redis.call('GET', key)\n if value then\n tokens = value\n end\n return tokens\n`;\n","import type { Duration } from \"./duration\";\nimport { ms } from \"./duration\";\nimport { resetScript } from \"./lua-scripts/reset\";\nimport {\n cachedFixedWindowLimitScript,\n cachedFixedWindowRemainingTokenScript,\n fixedWindowLimitScript,\n fixedWindowRemainingTokensScript,\n slidingWindowLimitScript,\n slidingWindowRemainingTokensScript,\n tokenBucketLimitScript,\n tokenBucketRemainingTokensScript,\n} from \"./lua-scripts/single\";\nimport { Ratelimit } from \"./ratelimit\";\nimport type { Algorithm, RegionContext } from \"./types\";\nimport type { Redis } from \"./types\";\n\nexport type RegionRatelimitConfig = {\n /**\n * Instance of `@upstash/redis`\n * @see https://github.com/upstash/upstash-redis#quick-start\n */\n redis: Redis;\n /**\n * The ratelimiter function to use.\n *\n * Choose one of the predefined ones or implement your own.\n * Available algorithms are exposed via static methods:\n * - Ratelimiter.fixedWindow\n * - Ratelimiter.slidingWindow\n * - Ratelimiter.tokenBucket\n */\n limiter: Algorithm<RegionContext>;\n /**\n * All keys in redis are prefixed with this.\n *\n * @default `@upstash/ratelimit`\n */\n prefix?: string;\n\n /**\n * If enabled, the ratelimiter will keep a global cache of identifiers, that have\n * exhausted their ratelimit. In serverless environments this is only possible if\n * you create the ratelimiter instance outside of your handler function. While the\n * function is still hot, the ratelimiter can block requests without having to\n * request data from redis, thus saving time and money.\n *\n * Whenever an identifier has exceeded its limit, the ratelimiter will add it to an\n * internal list together with its reset timestamp. If the same identifier makes a\n * new request before it is reset, we can immediately reject it.\n *\n * Set to `false` to disable.\n *\n * If left undefined, a map is created automatically, but it can only work\n * if the map or the ratelimit instance is created outside your serverless function handler.\n */\n ephemeralCache?: Map<string, number> | false;\n\n /**\n * If set, the ratelimiter will allow requests to pass after this many milliseconds.\n *\n * Use this if you want to allow requests in case of network problems\n */\n timeout?: number;\n\n /**\n * If enabled, the ratelimiter will store analytics data in redis, which you can check out at\n * https://console.upstash.com/ratelimit\n *\n * @default false\n */\n analytics?: boolean;\n};\n\n/**\n * Ratelimiter using serverless redis from https://upstash.com/\n *\n * @example\n * ```ts\n * const { limit } = new Ratelimit({\n * redis: Redis.fromEnv(),\n * limiter: Ratelimit.slidingWindow(\n * \"30 m\", // interval of 30 minutes\n * 10, // Allow 10 requests per window of 30 minutes\n * )\n * })\n *\n * ```\n */\nexport class RegionRatelimit extends Ratelimit<RegionContext> {\n /**\n * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.\n */\n\n constructor(config: RegionRatelimitConfig) {\n super({\n prefix: config.prefix,\n limiter: config.limiter,\n timeout: config.timeout,\n analytics: config.analytics,\n ctx: {\n redis: config.redis,\n },\n ephemeralCache: config.ephemeralCache,\n });\n }\n\n /**\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static fixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n fixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n\n const success = usedTokensAfterUpdate <= tokens;\n\n const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate);\n\n const reset = (bucket + 1) * windowDuration;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: tokens,\n remaining: remainingTokens,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n fixedWindowRemainingTokensScript,\n [key],\n [null],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * Combined approach of `slidingLogs` and `fixedWindow` with lower storage\n * costs than `slidingLogs` and improved boundary behavior by calculating a\n * weighted score between two windows.\n *\n * **Pro:**\n *\n * Good performance allows this to scale to very high loads.\n *\n * **Con:**\n *\n * Nothing major.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - The duration in which the user can max X requests.\n */\n static slidingWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowSize = ms(window);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n const now = Date.now();\n\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: tokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const remainingTokens = (await ctx.redis.eval(\n slidingWindowLimitScript,\n [currentKey, previousKey],\n [tokens, now, windowSize, incrementBy],\n )) as number;\n\n const success = remainingTokens >= 0;\n\n const reset = (currentWindow + 1) * windowSize;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n return {\n success,\n limit: tokens,\n remaining: Math.max(0, remainingTokens),\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const now = Date.now();\n const currentWindow = Math.floor(now / windowSize);\n const currentKey = [identifier, currentWindow].join(\":\");\n const previousWindow = currentWindow - 1;\n const previousKey = [identifier, previousWindow].join(\":\");\n\n const usedTokens = (await ctx.redis.eval(\n slidingWindowRemainingTokensScript,\n [currentKey, previousKey],\n [now, windowSize],\n )) as number;\n\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n\n /**\n * You have a bucket filled with `{maxTokens}` tokens that refills constantly\n * at `{refillRate}` per `{interval}`.\n * Every request will remove one token from the bucket and if there is no\n * token to take, the request is rejected.\n *\n * **Pro:**\n *\n * - Bursts of requests are smoothed out and you can process them at a constant\n * rate.\n * - Allows to set a higher initial burst limit by setting `maxTokens` higher\n * than `refillRate`\n */\n static tokenBucket(\n /**\n * How many tokens are refilled per `interval`\n *\n * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.\n */\n refillRate: number,\n /**\n * The interval for the `refillRate`\n */\n interval: Duration,\n /**\n * Maximum number of tokens.\n * A newly created bucket starts with this many tokens.\n * Useful to allow higher burst limits.\n */\n maxTokens: number,\n ): Algorithm<RegionContext> {\n const intervalDuration = ms(interval);\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (ctx.cache) {\n const { blocked, reset } = ctx.cache.isBlocked(identifier);\n if (blocked) {\n return {\n success: false,\n limit: maxTokens,\n remaining: 0,\n reset: reset,\n pending: Promise.resolve(),\n };\n }\n }\n\n const now = Date.now();\n\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const [remaining, reset] = (await ctx.redis.eval(\n tokenBucketLimitScript,\n [identifier],\n [maxTokens, intervalDuration, refillRate, now, incrementBy],\n )) as [number, number];\n\n const success = remaining >= 0;\n if (ctx.cache && !success) {\n ctx.cache.blockUntil(identifier, reset);\n }\n\n return {\n success,\n limit: maxTokens,\n remaining,\n reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n const remainingTokens = (await ctx.redis.eval(\n tokenBucketRemainingTokensScript,\n [identifier],\n [maxTokens],\n )) as number;\n return remainingTokens;\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = identifier;\n if (ctx.cache) {\n ctx.cache.pop(identifier)\n }\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n /**\n * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates\n * it asynchronously.\n * This is experimental and not yet recommended for production use.\n *\n * @experimental\n *\n * Each request inside a fixed time increases a counter.\n * Once the counter reaches the maximum allowed number, all further requests are\n * rejected.\n *\n * **Pro:**\n *\n * - Newer requests are not starved by old ones.\n * - Low storage cost.\n *\n * **Con:**\n *\n * A burst of requests near the boundary of a window can result in a very\n * high request rate because two windows will be filled with requests quickly.\n *\n * @param tokens - How many requests a user can make in each time window.\n * @param window - A fixed timeframe\n */\n static cachedFixedWindow(\n /**\n * How many requests are allowed per window.\n */\n tokens: number,\n /**\n * The duration in which `tokens` requests are allowed.\n */\n window: Duration,\n ): Algorithm<RegionContext> {\n const windowDuration = ms(window);\n\n return () => ({\n async limit(ctx: RegionContext, identifier: string, rate?: number) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n const reset = (bucket + 1) * windowDuration;\n const incrementBy = rate ? Math.max(1, rate) : 1;\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedTokensAfterUpdate = ctx.cache.incr(key);\n const success = cachedTokensAfterUpdate < tokens;\n\n const pending = success\n ? ctx.redis\n .eval(cachedFixedWindowLimitScript, [key], [windowDuration, incrementBy])\n .then((t) => {\n ctx.cache!.set(key, t as number);\n })\n : Promise.resolve();\n\n return {\n success,\n limit: tokens,\n remaining: tokens - cachedTokensAfterUpdate,\n reset: reset,\n pending,\n };\n }\n\n const usedTokensAfterUpdate = (await ctx.redis.eval(\n cachedFixedWindowLimitScript,\n [key],\n [windowDuration, incrementBy],\n )) as number;\n ctx.cache.set(key, usedTokensAfterUpdate);\n const remaining = tokens - usedTokensAfterUpdate;\n\n return {\n success: remaining >= 0,\n limit: tokens,\n remaining,\n reset: reset,\n pending: Promise.resolve(),\n };\n },\n async getRemaining(ctx: RegionContext, identifier: string) {\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n\n const bucket = Math.floor(Date.now() / windowDuration);\n const key = [identifier, bucket].join(\":\");\n\n const hit = typeof ctx.cache.get(key) === \"number\";\n if (hit) {\n const cachedUsedTokens = ctx.cache.get(key) ?? 0;\n return Math.max(0, tokens - cachedUsedTokens);\n }\n\n const usedTokens = (await ctx.redis.eval(\n cachedFixedWindowRemainingTokenScript,\n [key],\n [null],\n )) as number;\n return Math.max(0, tokens - usedTokens);\n },\n async resetTokens(ctx: RegionContext, identifier: string) {\n const pattern = [identifier, \"*\"].join(\":\");\n // Empty the cache\n if (!ctx.cache) {\n throw new Error(\"This algorithm requires a cache\");\n }\n ctx.cache.pop(identifier)\n await ctx.redis.eval(resetScript, [pattern], [null]);\n },\n });\n }\n}\n"],"mappings":";AAAA,SAAS,aAAa,qBAAgC;AAuB/C,IAAM,YAAN,MAAgB;AAAA,EACJ;AAAA,EACA,QAAQ;AAAA,EAEzB,YAAY,QAAyB;AACnC,SAAK,YAAY,IAAI,cAAc;AAAA;AAAA,MAEjC,OAAO,OAAO;AAAA,MACd,QAAQ;AAAA,MACR,QAAQ,OAAO,UAAU;AAAA,MACzB,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASO,WAAW,KAAmC;AACnD,QAAI,OAAO,IAAI,QAAQ,aAAa;AAClC,aAAO,IAAI;AAAA,IACb;AACA,QAAI,OAAO,IAAI,OAAO,aAAa;AACjC,aAAO,IAAI;AAAA,IACb;AAEA,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,MAAa,OAAO,OAA6B;AAC/C,UAAM,KAAK,UAAU,OAAO,KAAK,OAAO,KAAK;AAAA,EAC/C;AAAA,EAEA,MAAa,OACX,QACA,QACsB;AACtB,UAAM,iBAAiB,KAAK;AAAA,OAExB,KAAK,UAAU,UAAU,KAAK,IAAI,CAAC,IACjC,KAAK,UAAU,UAAU,MAAM,MAC9B,KAAK,KAAK;AAAA,MACf;AAAA,IACF;AACA,WAAO,KAAK,UAAU,6BAA6B,KAAK,OAAO,QAAQ,cAAc;AAAA,EACvF;AAAA,EAEA,MAAa,SAAS,SAAS,GAAkE;AAE/F,UAAM,iBAAiB,KAAK;AAAA,OAExB,KAAK,UAAU,UAAU,KAAK,IAAI,CAAC,IACjC,KAAK,UAAU,UAAU,MAAM,MAC9B,KAAK,KAAK;AAAA,MACf;AAAA,IACF;AACA,UAAM,UAAU,MAAM,KAAK,UAAU,kBAAkB,KAAK,OAAO,cAAc;AACjF,WAAO;AAAA,EACT;AAAA,EAEA,MAAa,iBACX,gBAAwB,SACF;AACtB,UAAM,SAAS,MAAM,KAAK,UAAU,6BAA6B,KAAK,OAAO,SAAS,cAAc;AACpG,WAAO;AAAA,EACT;AAAA,EAEA,MAAa,sBAAsB,gBAAwB,QAAiB;AAC1E,aAAS,UAAU;AACnB,WAAO,KAAK,UAAU,sBAAsB,KAAK,OAAO,gBAAgB,MAAM;AAAA,EAChF;AACF;;;AC/FO,IAAM,QAAN,MAAsC;AAAA;AAAA;AAAA;AAAA,EAI1B;AAAA,EAEjB,YAAY,OAA4B;AACtC,SAAK,QAAQ;AAAA,EACf;AAAA,EAEO,UAAU,YAAyD;AACxE,QAAI,CAAC,KAAK,MAAM,IAAI,UAAU,GAAG;AAC/B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AACA,UAAM,QAAQ,KAAK,MAAM,IAAI,UAAU;AACvC,QAAI,QAAQ,KAAK,IAAI,GAAG;AACtB,WAAK,MAAM,OAAO,UAAU;AAC5B,aAAO,EAAE,SAAS,OAAO,OAAO,EAAE;AAAA,IACpC;AAEA,WAAO,EAAE,SAAS,MAAM,MAAa;AAAA,EACvC;AAAA,EAEO,WAAW,YAAoB,OAAqB;AACzD,SAAK,MAAM,IAAI,YAAY,KAAK;AAAA,EAClC;AAAA,EAEO,IAAI,KAAa,OAAqB;AAC3C,SAAK,MAAM,IAAI,KAAK,KAAK;AAAA,EAC3B;AAAA,EACO,IAAI,KAA4B;AACrC,WAAO,KAAK,MAAM,IAAI,GAAG,KAAK;AAAA,EAChC;AAAA,EAEO,KAAK,KAAqB;AAC/B,QAAI,QAAQ,KAAK,MAAM,IAAI,GAAG,KAAK;AACnC,aAAS;AACT,SAAK,MAAM,IAAI,KAAK,KAAK;AACzB,WAAO;AAAA,EACT;AAAA,EAEO,IAAI,KAAmB;AAC5B,SAAK,MAAM,OAAO,GAAG;AAAA,EACvB;AAAA,EAEO,QAAc;AACnB,SAAK,MAAM,MAAM;AAAA,EACnB;AACF;;;AC5CO,SAAS,GAAG,GAAqB;AACtC,QAAM,QAAQ,EAAE,MAAM,wBAAwB;AAC9C,MAAI,CAAC,OAAO;AACV,UAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACrD;AACA,QAAM,OAAO,OAAO,SAAS,MAAM,CAAC,CAAC;AACrC,QAAM,OAAO,MAAM,CAAC;AAEpB,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO,OAAO;AAAA,IAChB,KAAK;AACH,aAAO,OAAO,MAAO;AAAA,IACvB,KAAK;AACH,aAAO,OAAO,MAAO,KAAK;AAAA,IAC5B,KAAK;AACH,aAAO,OAAO,MAAO,KAAK,KAAK;AAAA,IAEjC;AACE,YAAM,IAAI,MAAM,gCAAgC,CAAC,EAAE;AAAA,EACvD;AACF;;;AC7BO,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgB/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AASzC,IAAM,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAoCjC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D3C,IAAM,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACkFpB,IAAe,YAAf,MAAmD;AAAA,EACrC;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEA;AAAA,EAEnB,YAAY,QAAmC;AAC7C,SAAK,MAAM,OAAO;AAClB,SAAK,UAAU,OAAO;AACtB,SAAK,UAAU,OAAO,WAAW;AACjC,SAAK,SAAS,OAAO,UAAU;AAC/B,SAAK,YAAY,OAAO,YACpB,IAAI,UAAU;AAAA,MACZ,OAAO,MAAM,QAAQ,KAAK,IAAI,KAAK,IAAI,KAAK,IAAI,MAAM,CAAC,IAAI,KAAK,IAAI;AAAA,MACpE,QAAQ,KAAK;AAAA,IACf,CAAC,IACD;AAEJ,QAAI,OAAO,0BAA0B,KAAK;AACxC,WAAK,IAAI,QAAQ,IAAI,MAAM,OAAO,cAAc;AAAA,IAClD,WAAW,OAAO,OAAO,mBAAmB,aAAa;AACvD,WAAK,IAAI,QAAQ,IAAI,MAAM,oBAAI,IAAI,CAAC;AAAA,IACtC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsCO,QAAQ,OACb,YACA,QAC+B;AAC/B,UAAM,MAAM,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAC9C,QAAI,YAAiB;AACrB,QAAI;AACF,YAAM,MAAoC,CAAC,KAAK,QAAQ,EAAE,MAAM,KAAK,KAAK,KAAK,KAAK,IAAI,CAAC;AACzF,UAAI,KAAK,UAAU,GAAG;AACpB,YAAI;AAAA,UACF,IAAI,QAAQ,CAAC,YAAY;AACvB,wBAAY,WAAW,MAAM;AAC3B,sBAAQ;AAAA,gBACN,SAAS;AAAA,gBACT,OAAO;AAAA,gBACP,WAAW;AAAA,gBACX,OAAO;AAAA,gBACP,SAAS,QAAQ,QAAQ;AAAA,cAC3B,CAAC;AAAA,YACH,GAAG,KAAK,OAAO;AAAA,UACjB,CAAC;AAAA,QACH;AAAA,MACF;AAEA,YAAM,MAAM,MAAM,QAAQ,KAAK,GAAG;AAClC,UAAI,KAAK,WAAW;AAClB,YAAI;AACF,gBAAM,MAAM,MAAM,KAAK,UAAU,WAAW,GAAG,IAAI;AACnD,gBAAM,aAAa,KAAK,UACrB,OAAO;AAAA,YACN;AAAA,YACA,MAAM,KAAK,IAAI;AAAA,YACf,SAAS,IAAI;AAAA,YACb,GAAG;AAAA,UACL,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,oBAAQ,KAAK,8BAA8B,GAAG;AAAA,UAChD,CAAC;AACH,cAAI,UAAU,QAAQ,IAAI,CAAC,IAAI,SAAS,UAAU,CAAC;AAAA,QACrD,SAAS,KAAK;AACZ,kBAAQ,KAAK,8BAA8B,GAAG;AAAA,QAChD;AAAA,MACF;AACA,aAAO;AAAA,IACT,UAAE;AACA,UAAI,WAAW;AACb,qBAAa,SAAS;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBO,kBAAkB,OAOvB,YAKA,YAC+B;AAC/B,QAAI,WAAW,GAAG;AAChB,YAAM,IAAI,MAAM,0BAA0B;AAAA,IAC5C;AACA,QAAI;AAEJ,UAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,WAAO,MAAM;AACX,YAAM,MAAM,KAAK,MAAM,UAAU;AACjC,UAAI,IAAI,SAAS;AACf;AAAA,MACF;AACA,UAAI,IAAI,UAAU,GAAG;AACnB,cAAM,IAAI,MAAM,wBAAwB;AAAA,MAC1C;AAEA,YAAM,OAAO,KAAK,IAAI,IAAI,OAAO,QAAQ,IAAI,KAAK,IAAI;AACtD,YAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,IAAI,CAAC;AAE5C,UAAI,KAAK,IAAI,IAAI,UAAU;AACzB;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEO,kBAAkB,OAAO,eAAuB;AACrD,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAClD,UAAM,KAAK,QAAQ,EAAE,YAAY,KAAK,KAAK,OAAO;AAAA,EACpD;AAAA,EAEO,eAAe,OAAO,eAAwC;AACnE,UAAM,UAAU,CAAC,KAAK,QAAQ,UAAU,EAAE,KAAK,GAAG;AAElD,WAAO,MAAM,KAAK,QAAQ,EAAE,aAAa,KAAK,KAAK,OAAO;AAAA,EAC5D;AACF;;;AC/PA,SAAS,WAAmB;AAC1B,MAAI,SAAS;AACb,QAAM,aAAa;AACnB,QAAM,mBAAmB,WAAW;AACpC,WAAS,IAAI,GAAG,IAAI,IAAI,KAAK;AAC3B,cAAU,WAAW,OAAO,KAAK,MAAM,KAAK,OAAO,IAAI,gBAAgB,CAAC;AAAA,EAC1E;AACA,SAAO;AACT;AAwEO,IAAM,uBAAN,cAAmC,UAA8B;AAAA;AAAA;AAAA;AAAA,EAItE,YAAY,QAAoC;AAC9C,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,QACd,OAAO,OAAO,iBAAiB,IAAI,MAAM,OAAO,cAAc,IAAI;AAAA,MACpE;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC+B;AAC/B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AACtE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAA,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,YAAY,SAAS;AAC3B,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,GAAG;AAAA,YACJ,CAAC,WAAW,gBAAgB,WAAW;AAAA,UACzC;AAAA,QACF,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEjE,gBAAM,SAAS,MAAM;AAAA,YACnB,IAAI;AAAA,cACF,cACG,QAAQ,CAAC,MAAM,CAAC,EAChB,OAAO,CAAC,KAAe,MAAM,UAAU;AACtC,oBAAI,QAAQ,MAAM,GAAG;AACnB,sBAAI,KAAK,IAAI;AAAA,gBACf;AACA,uBAAO;AAAA,cACT,GAAG,CAAC,CAAC;AAAA,YACT,EAAE,OAAO;AAAA,UACX;AAEA,qBAAW,MAAM,KAAK;AACpB,kBAAM,gBAAgB,MAAM,GAAG,SAAS;AAAA,cACtC,CAAC,WAAmB,WAAW,UAAU;AACvC,oBAAI,cAAc;AAClB,oBAAI,QAAQ,GAAG;AACb,gCAAc,OAAO,SAAS,SAAS;AAAA,gBACzC;AAEA,uBAAO,YAAY;AAAA,cACrB;AAAA,cACA;AAAA,YACF;AAEA,kBAAM,SAAS,MAAM,GAAG,SAAS,OAAO,CAAC,KAAe,WAAW,UAAU;AAC3E,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAKL,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,OAAO,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAItD,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWC,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,KAAK,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YACvD;AAAA,UACF;AAAA,QACF;AAMA,cAAM,UAAU,YAAY;AAC5B,cAAM,SAAS,SAAS,KAAK;AAE7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAsD,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpF;AAAA,UACA,SAAS,MAAM,KAAK,kCAAkC,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC;AAAA,QACrE,EAAE;AAGF,cAAM,gBAAgB,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AACjE,cAAM,aAAa,cAAc,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC/E,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC+B;AAC/B,UAAM,aAAa,GAAG,MAAM;AAE5B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAyB,YAAoB,MAAe;AActE,cAAM,YAAY,SAAS;AAC3B,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AACzD,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,QAAQ,KAAK,gBAAgB,WAAW,WAAW;AAAA;AAAA,UAEtD;AAAA,QACF,EAAE;AAEF,cAAM,sBAAuB,MAAM,iBAAkB;AACrD,cAAM,CAAC,SAAS,UAAU,OAAO,IAAI,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAIhF,YAAI,SAAS;AACX,kBAAQ,KAAK,WAAW,YAAY,SAAS,CAAC;AAAA,QAChD;AAEA,cAAM,qBAAqB,SAAS,OAAO,CAAC,WAAmB,WAAW,UAAU;AAClF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,oBAAoB,QAAQ,OAAO,CAAC,WAAmB,WAAW,UAAU;AAChF,cAAI,cAAc;AAClB,cAAI,QAAQ,GAAG;AACb,0BAAc,OAAO,SAAS,SAAS;AAAA,UACzC;AAEA,iBAAO,YAAY;AAAA,QACrB,GAAG,CAAC;AAEJ,cAAM,sBAAsB,KAAK,KAAK,sBAAsB,IAAI,oBAAoB;AAEpF,cAAM,aAAa,sBAAsB;AAEzC,cAAM,YAAY,SAAS;AAK3B,uBAAe,OAAO;AACpB,gBAAM,MAAM,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAEvD,gBAAM,gBAAgB,MAAM;AAAA,YAC1B,IAAI;AAAA,cACF,IACG,QAAQ,CAAC,CAACC,QAAO,MAAMA,QAAO,EAC9B,OAAO,CAAC,KAAe,MAAM,UAAU;AACtC,oBAAI,QAAQ,MAAM,GAAG;AACnB,sBAAI,KAAK,IAAI;AAAA,gBACf;AACA,uBAAO;AAAA,cACT,GAAG,CAAC,CAAC;AAAA,YACT,EAAE,OAAO;AAAA,UACX;AAEA,qBAAW,MAAM,KAAK;AACpB,kBAAM,CAACA,UAAS,WAAW,QAAQ,IAAI,MAAM,GAAG;AAChD,kBAAM,QAAQA,SAAQ,OAAO,CAAC,KAAe,WAAW,UAAU;AAChE,kBAAI,QAAQ,MAAM,GAAG;AACnB,oBAAI,KAAK,SAAS;AAAA,cACpB;AACA,qBAAO;AAAA,YACT,GAAG,CAAC,CAAC;AAEL,kBAAM,eAAeA,SAAQ,OAAO,CAAC,WAAmB,WAAW,UAAU;AAC3E,kBAAI,cAAc;AAClB,kBAAI,QAAQ,GAAG;AACb,8BAAc,OAAO,SAAS,SAAS;AAAA,cACzC;AAEA,qBAAO,YAAY;AAAA,YACrB,GAAG,CAAC;AAKJ,gBAAI,gBAAgB,QAAQ;AAC1B;AAAA,YACF;AACA,kBAAM,OAAO,cAAc,OAAO,CAAC,OAAO,CAAC,MAAM,SAAS,EAAE,CAAC;AAI7D,gBAAI,KAAK,WAAW,GAAG;AACrB;AAAA,YACF;AAEA,uBAAWD,cAAa,MAAM;AAC5B,oBAAM,GAAG,MAAM,KAAK,YAAY,EAAE,CAACA,UAAS,GAAG,YAAY,CAAC;AAAA,YAC9D;AAAA,UACF;AAAA,QACF;AAGA,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL,SAAS,QAAQ,OAAO;AAAA,UACxB,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,SAAS;AAAA,UAChC;AAAA,UACA,SAAS,KAAK;AAAA,QAChB;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAyB,YAAoB;AAC9D,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,MAAM,IAAI,MAAM,IAAI,CAAC,WAAW;AAAA,UACpC;AAAA,UACA,SAAS,MAAM;AAAA,YACb;AAAA,YACA,CAAC,YAAY,WAAW;AAAA,YACxB,CAAC,KAAK,UAAU;AAAA;AAAA,UAElB;AAAA,QACF,EAAE;AAEF,cAAM,aAAa,MAAM,QAAQ,IAAI,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAC9D,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAyB,YAAoB;AAC7D,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,mBAAW,MAAM,IAAI,OAAO;AAC1B,gBAAM,GAAG,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,QAC9C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;;;AC/eO,IAAME,0BAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAe/B,IAAMC,oCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAWzC,IAAMC,4BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiCjC,IAAMC,sCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAuB3C,IAAM,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAwC/B,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAazC,IAAM,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAerC,IAAM,wCAAwC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC7D9C,IAAM,kBAAN,cAA8B,UAAyB;AAAA;AAAA;AAAA;AAAA,EAK5D,YAAY,QAA+B;AACzC,UAAM;AAAA,MACJ,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,SAAS,OAAO;AAAA,MAChB,WAAW,OAAO;AAAA,MAClB,KAAK;AAAA,QACH,OAAO,OAAO;AAAA,MAChB;AAAA,MACA,gBAAgB,OAAO;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAAO,YAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAChC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAC,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7CC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AAEA,cAAM,UAAU,yBAAyB;AAEzC,cAAM,kBAAkB,KAAK,IAAI,GAAG,SAAS,qBAAqB;AAElE,cAAM,SAAS,SAAS,KAAK;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW;AAAA,UACX;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAAO,cAIL,QAIA,QAC0B;AAC1B,UAAM,aAAa,GAAG,MAAM;AAC5B,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAF,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvCG;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,QAAQ,KAAK,YAAY,WAAW;AAAA,QACvC;AAEA,cAAM,UAAU,mBAAmB;AAEnC,cAAM,SAAS,gBAAgB,KAAK;AACpC,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AACA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP,WAAW,KAAK,IAAI,GAAG,eAAe;AAAA,UACtC;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,MAAM,KAAK,IAAI;AACrB,cAAM,gBAAgB,KAAK,MAAM,MAAM,UAAU;AACjD,cAAM,aAAa,CAAC,YAAY,aAAa,EAAE,KAAK,GAAG;AACvD,cAAM,iBAAiB,gBAAgB;AACvC,cAAM,cAAc,CAAC,YAAY,cAAc,EAAE,KAAK,GAAG;AAEzD,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClCC;AAAA,UACA,CAAC,YAAY,WAAW;AAAA,UACxB,CAAC,KAAK,UAAU;AAAA,QAClB;AAEA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,OAAO,YAML,YAIA,UAMA,WAC0B;AAC1B,UAAM,mBAAmB,GAAG,QAAQ;AACpC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,IAAI,OAAO;AACb,gBAAM,EAAE,SAAS,OAAAJ,OAAM,IAAI,IAAI,MAAM,UAAU,UAAU;AACzD,cAAI,SAAS;AACX,mBAAO;AAAA,cACL,SAAS;AAAA,cACT,OAAO;AAAA,cACP,WAAW;AAAA,cACX,OAAOA;AAAA,cACP,SAAS,QAAQ,QAAQ;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AAEA,cAAM,MAAM,KAAK,IAAI;AAErB,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,CAAC,WAAW,KAAK,IAAK,MAAM,IAAI,MAAM;AAAA,UAC1C;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,WAAW,kBAAkB,YAAY,KAAK,WAAW;AAAA,QAC5D;AAEA,cAAM,UAAU,aAAa;AAC7B,YAAI,IAAI,SAAS,CAAC,SAAS;AACzB,cAAI,MAAM,WAAW,YAAY,KAAK;AAAA,QACxC;AAEA,eAAO;AAAA,UACL;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,cAAM,kBAAmB,MAAM,IAAI,MAAM;AAAA,UACvC;AAAA,UACA,CAAC,UAAU;AAAA,UACX,CAAC,SAAS;AAAA,QACZ;AACA,eAAO;AAAA,MACT;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU;AAChB,YAAI,IAAI,OAAO;AACb,cAAI,MAAM,IAAI,UAAU;AAAA,QAC1B;AACA,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,OAAO,kBAIL,QAIA,QAC0B;AAC1B,UAAM,iBAAiB,GAAG,MAAM;AAEhC,WAAO,OAAO;AAAA,MACZ,MAAM,MAAM,KAAoB,YAAoB,MAAe;AACjE,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AACzC,cAAM,SAAS,SAAS,KAAK;AAC7B,cAAM,cAAc,OAAO,KAAK,IAAI,GAAG,IAAI,IAAI;AAE/C,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,0BAA0B,IAAI,MAAM,KAAK,GAAG;AAClD,gBAAM,UAAU,0BAA0B;AAE1C,gBAAM,UAAU,UACZ,IAAI,MACH,KAAK,8BAA8B,CAAC,GAAG,GAAG,CAAC,gBAAgB,WAAW,CAAC,EACvE,KAAK,CAAC,MAAM;AACX,gBAAI,MAAO,IAAI,KAAK,CAAW;AAAA,UACjC,CAAC,IACD,QAAQ,QAAQ;AAEpB,iBAAO;AAAA,YACL;AAAA,YACA,OAAO;AAAA,YACP,WAAW,SAAS;AAAA,YACpB;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAEA,cAAM,wBAAyB,MAAM,IAAI,MAAM;AAAA,UAC7C;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,gBAAgB,WAAW;AAAA,QAC9B;AACA,YAAI,MAAM,IAAI,KAAK,qBAAqB;AACxC,cAAM,YAAY,SAAS;AAE3B,eAAO;AAAA,UACL,SAAS,aAAa;AAAA,UACtB,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA,SAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,MACA,MAAM,aAAa,KAAoB,YAAoB;AACzD,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AAEA,cAAM,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,cAAc;AACrD,cAAM,MAAM,CAAC,YAAY,MAAM,EAAE,KAAK,GAAG;AAEzC,cAAM,MAAM,OAAO,IAAI,MAAM,IAAI,GAAG,MAAM;AAC1C,YAAI,KAAK;AACP,gBAAM,mBAAmB,IAAI,MAAM,IAAI,GAAG,KAAK;AAC/C,iBAAO,KAAK,IAAI,GAAG,SAAS,gBAAgB;AAAA,QAC9C;AAEA,cAAM,aAAc,MAAM,IAAI,MAAM;AAAA,UAClC;AAAA,UACA,CAAC,GAAG;AAAA,UACJ,CAAC,IAAI;AAAA,QACP;AACA,eAAO,KAAK,IAAI,GAAG,SAAS,UAAU;AAAA,MACxC;AAAA,MACA,MAAM,YAAY,KAAoB,YAAoB;AACxD,cAAM,UAAU,CAAC,YAAY,GAAG,EAAE,KAAK,GAAG;AAE1C,YAAI,CAAC,IAAI,OAAO;AACd,gBAAM,IAAI,MAAM,iCAAiC;AAAA,QACnD;AACA,YAAI,MAAM,IAAI,UAAU;AACxB,cAAM,IAAI,MAAM,KAAK,aAAa,CAAC,OAAO,GAAG,CAAC,IAAI,CAAC;AAAA,MACrD;AAAA,IACF;AAAA,EACF;AACF;","names":["reset","requestId","current","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript","reset","fixedWindowLimitScript","fixedWindowRemainingTokensScript","slidingWindowLimitScript","slidingWindowRemainingTokensScript"]}
package/package.json CHANGED
@@ -1 +1 @@
1
- { "name": "@upstash/ratelimit", "version": "v1.1.0", "main": "./dist/index.js", "types": "./dist/index.d.ts", "files": [ "dist" ], "scripts": { "build": "tsup", "test": "bun test src --coverage", "fmt": "bunx @biomejs/biome check --apply ./src" }, "devDependencies": { "@upstash/redis": "^1.28.3", "bun-types": "latest", "rome": "^11.0.0", "turbo": "^1.10.15", "tsup": "^7.2.0", "typescript": "^5.0.0" }, "dependencies": { "@upstash/core-analytics": "^0.0.7" }, "license": "MIT" }
1
+ { "name": "@upstash/ratelimit", "version": "v1.1.2-canary", "main": "./dist/index.js", "types": "./dist/index.d.ts", "files": [ "dist" ], "scripts": { "build": "tsup", "test": "bun test src --coverage", "fmt": "bunx @biomejs/biome check --apply ./src" }, "devDependencies": { "@upstash/redis": "^1.28.3", "bun-types": "latest", "rome": "^11.0.0", "turbo": "^1.10.15", "tsup": "^7.2.0", "typescript": "^5.0.0" }, "dependencies": { "@upstash/core-analytics": "^0.0.8" }, "license": "MIT" }