@opennextjs/cloudflare 1.0.4 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/cloudflare-context.d.ts +5 -0
- package/dist/api/config.d.ts +5 -1
- package/dist/api/config.js +8 -1
- package/dist/api/durable-objects/bucket-cache-purge.d.ts +7 -0
- package/dist/api/durable-objects/bucket-cache-purge.js +75 -0
- package/dist/api/durable-objects/bucket-cache-purge.spec.d.ts +1 -0
- package/dist/api/durable-objects/bucket-cache-purge.spec.js +121 -0
- package/dist/api/overrides/cache-purge/index.d.ts +12 -0
- package/dist/api/overrides/cache-purge/index.js +26 -0
- package/dist/api/overrides/internal.d.ts +2 -0
- package/dist/api/overrides/internal.js +52 -0
- package/dist/api/overrides/queue/do-queue.js +1 -1
- package/dist/api/overrides/queue/queue-cache.d.ts +36 -0
- package/dist/api/overrides/queue/queue-cache.js +93 -0
- package/dist/api/overrides/queue/queue-cache.spec.d.ts +1 -0
- package/dist/api/overrides/queue/queue-cache.spec.js +92 -0
- package/dist/api/overrides/tag-cache/d1-next-tag-cache.js +2 -1
- package/dist/api/overrides/tag-cache/do-sharded-tag-cache.d.ts +20 -0
- package/dist/api/overrides/tag-cache/do-sharded-tag-cache.js +70 -7
- package/dist/api/overrides/tag-cache/do-sharded-tag-cache.spec.js +81 -1
- package/dist/cli/build/open-next/compileDurableObjects.js +1 -0
- package/dist/cli/templates/worker.d.ts +1 -0
- package/dist/cli/templates/worker.js +2 -0
- package/package.json +2 -2
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { GetPlatformProxyOptions } from "wrangler";
|
|
2
|
+
import type { BucketCachePurge } from "./durable-objects/bucket-cache-purge.js";
|
|
2
3
|
import type { DOQueueHandler } from "./durable-objects/queue.js";
|
|
3
4
|
import type { DOShardedTagCache } from "./durable-objects/sharded-tag-cache.js";
|
|
4
5
|
import type { PREFIX_ENV_NAME as KV_CACHE_PREFIX_ENV_NAME } from "./overrides/incremental-cache/kv-incremental-cache.js";
|
|
@@ -21,6 +22,10 @@ declare global {
|
|
|
21
22
|
NEXT_CACHE_DO_QUEUE_RETRY_INTERVAL_MS?: string;
|
|
22
23
|
NEXT_CACHE_DO_QUEUE_MAX_RETRIES?: string;
|
|
23
24
|
NEXT_CACHE_DO_QUEUE_DISABLE_SQLITE?: string;
|
|
25
|
+
NEXT_CACHE_DO_PURGE?: DurableObjectNamespace<BucketCachePurge>;
|
|
26
|
+
NEXT_CACHE_DO_PURGE_BUFFER_TIME_IN_SECONDS?: string;
|
|
27
|
+
CACHE_PURGE_ZONE_ID?: string;
|
|
28
|
+
CACHE_PURGE_API_TOKEN?: string;
|
|
24
29
|
}
|
|
25
30
|
}
|
|
26
31
|
export type CloudflareContext<CfProperties extends Record<string, unknown> = IncomingRequestCfProperties, Context = ExecutionContext> = {
|
package/dist/api/config.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { BuildOptions } from "@opennextjs/aws/build/helper";
|
|
2
2
|
import { BaseOverride, LazyLoadedOverride, OpenNextConfig as AwsOpenNextConfig } from "@opennextjs/aws/types/open-next";
|
|
3
|
-
import type { IncrementalCache, Queue, TagCache } from "@opennextjs/aws/types/overrides";
|
|
3
|
+
import type { CDNInvalidationHandler, IncrementalCache, Queue, TagCache } from "@opennextjs/aws/types/overrides";
|
|
4
4
|
export type Override<T extends BaseOverride> = "dummy" | T | LazyLoadedOverride<T>;
|
|
5
5
|
/**
|
|
6
6
|
* Cloudflare specific overrides.
|
|
@@ -20,6 +20,10 @@ export type CloudflareOverrides = {
|
|
|
20
20
|
* Sets the revalidation queue implementation
|
|
21
21
|
*/
|
|
22
22
|
queue?: "direct" | Override<Queue>;
|
|
23
|
+
/**
|
|
24
|
+
* Sets the automatic cache purge implementation
|
|
25
|
+
*/
|
|
26
|
+
cachePurge?: Override<CDNInvalidationHandler>;
|
|
23
27
|
/**
|
|
24
28
|
* Enable cache interception
|
|
25
29
|
* Should be `false` when PPR is used
|
package/dist/api/config.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* @returns the OpenNext configuration object
|
|
6
6
|
*/
|
|
7
7
|
export function defineCloudflareConfig(config = {}) {
|
|
8
|
-
const { incrementalCache, tagCache, queue, enableCacheInterception = false } = config;
|
|
8
|
+
const { incrementalCache, tagCache, queue, cachePurge, enableCacheInterception = false } = config;
|
|
9
9
|
return {
|
|
10
10
|
default: {
|
|
11
11
|
override: {
|
|
@@ -15,6 +15,7 @@ export function defineCloudflareConfig(config = {}) {
|
|
|
15
15
|
incrementalCache: resolveIncrementalCache(incrementalCache),
|
|
16
16
|
tagCache: resolveTagCache(tagCache),
|
|
17
17
|
queue: resolveQueue(queue),
|
|
18
|
+
cdnInvalidation: resolveCdnInvalidation(cachePurge),
|
|
18
19
|
},
|
|
19
20
|
routePreloadingBehavior: "withWaitUntil",
|
|
20
21
|
},
|
|
@@ -57,6 +58,12 @@ function resolveQueue(value = "dummy") {
|
|
|
57
58
|
}
|
|
58
59
|
return typeof value === "function" ? value : () => value;
|
|
59
60
|
}
|
|
61
|
+
function resolveCdnInvalidation(value = "dummy") {
|
|
62
|
+
if (typeof value === "string") {
|
|
63
|
+
return value;
|
|
64
|
+
}
|
|
65
|
+
return typeof value === "function" ? value : () => value;
|
|
66
|
+
}
|
|
60
67
|
/**
|
|
61
68
|
* @param buildOpts build options from AWS
|
|
62
69
|
* @returns The OpenConfig specific to cloudflare
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { DurableObject } from "cloudflare:workers";
|
|
2
|
+
export declare class BucketCachePurge extends DurableObject<CloudflareEnv> {
|
|
3
|
+
bufferTimeInSeconds: number;
|
|
4
|
+
constructor(state: DurableObjectState, env: CloudflareEnv);
|
|
5
|
+
purgeCacheByTags(tags: string[]): Promise<void>;
|
|
6
|
+
alarm(): Promise<void>;
|
|
7
|
+
}
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import { DurableObject } from "cloudflare:workers";
|
|
2
|
+
import { internalPurgeCacheByTags } from "../overrides/internal";
|
|
3
|
+
const DEFAULT_BUFFER_TIME_IN_SECONDS = 5;
|
|
4
|
+
// https://developers.cloudflare.com/cache/how-to/purge-cache/#hostname-tag-prefix-url-and-purge-everything-limits
|
|
5
|
+
const MAX_NUMBER_OF_TAGS_PER_PURGE = 100;
|
|
6
|
+
export class BucketCachePurge extends DurableObject {
|
|
7
|
+
bufferTimeInSeconds;
|
|
8
|
+
constructor(state, env) {
|
|
9
|
+
super(state, env);
|
|
10
|
+
this.bufferTimeInSeconds = env.NEXT_CACHE_DO_PURGE_BUFFER_TIME_IN_SECONDS
|
|
11
|
+
? parseInt(env.NEXT_CACHE_DO_PURGE_BUFFER_TIME_IN_SECONDS)
|
|
12
|
+
: DEFAULT_BUFFER_TIME_IN_SECONDS; // Default buffer time
|
|
13
|
+
// Initialize the sql table if it doesn't exist
|
|
14
|
+
state.blockConcurrencyWhile(async () => {
|
|
15
|
+
state.storage.sql.exec(`
|
|
16
|
+
CREATE TABLE IF NOT EXISTS cache_purge (
|
|
17
|
+
tag TEXT NOT NULL
|
|
18
|
+
);
|
|
19
|
+
CREATE UNIQUE INDEX IF NOT EXISTS tag_index ON cache_purge (tag);
|
|
20
|
+
`);
|
|
21
|
+
});
|
|
22
|
+
}
|
|
23
|
+
async purgeCacheByTags(tags) {
|
|
24
|
+
for (const tag of tags) {
|
|
25
|
+
// Insert the tag into the sql table
|
|
26
|
+
this.ctx.storage.sql.exec(`
|
|
27
|
+
INSERT OR REPLACE INTO cache_purge (tag)
|
|
28
|
+
VALUES (?)`, [tag]);
|
|
29
|
+
}
|
|
30
|
+
const nextAlarm = await this.ctx.storage.getAlarm();
|
|
31
|
+
if (!nextAlarm) {
|
|
32
|
+
// Set an alarm to trigger the cache purge
|
|
33
|
+
this.ctx.storage.setAlarm(Date.now() + this.bufferTimeInSeconds * 1000);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
async alarm() {
|
|
37
|
+
let tags = this.ctx.storage.sql
|
|
38
|
+
.exec(`
|
|
39
|
+
SELECT * FROM cache_purge LIMIT ${MAX_NUMBER_OF_TAGS_PER_PURGE}
|
|
40
|
+
`)
|
|
41
|
+
.toArray();
|
|
42
|
+
do {
|
|
43
|
+
if (tags.length === 0) {
|
|
44
|
+
// No tags to purge, we can stop
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
const result = await internalPurgeCacheByTags(this.env, tags.map((row) => row.tag));
|
|
48
|
+
// For every other error, we just remove the tags from the sql table
|
|
49
|
+
// and continue
|
|
50
|
+
if (result === "rate-limit-exceeded") {
|
|
51
|
+
// Rate limit exceeded, we need to wait for the next alarm
|
|
52
|
+
// and try again
|
|
53
|
+
// We throw here to take advantage of the built-in retry
|
|
54
|
+
throw new Error("Rate limit exceeded");
|
|
55
|
+
}
|
|
56
|
+
// Delete the tags from the sql table
|
|
57
|
+
this.ctx.storage.sql.exec(`
|
|
58
|
+
DELETE FROM cache_purge
|
|
59
|
+
WHERE tag IN (${tags.map(() => "?").join(",")})
|
|
60
|
+
`, tags.map((row) => row.tag));
|
|
61
|
+
if (tags.length < MAX_NUMBER_OF_TAGS_PER_PURGE) {
|
|
62
|
+
// If we have less than MAX_NUMBER_OF_TAGS_PER_PURGE tags, we can stop
|
|
63
|
+
tags = [];
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
// Otherwise, we need to get the next 100 tags
|
|
67
|
+
tags = this.ctx.storage.sql
|
|
68
|
+
.exec(`
|
|
69
|
+
SELECT * FROM cache_purge LIMIT ${MAX_NUMBER_OF_TAGS_PER_PURGE}
|
|
70
|
+
`)
|
|
71
|
+
.toArray();
|
|
72
|
+
}
|
|
73
|
+
} while (tags.length >= 0);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import { describe, expect, it, vi } from "vitest";
|
|
2
|
+
import * as internal from "../overrides/internal";
|
|
3
|
+
import { BucketCachePurge } from "./bucket-cache-purge";
|
|
4
|
+
vi.mock("cloudflare:workers", () => ({
|
|
5
|
+
DurableObject: class {
|
|
6
|
+
ctx;
|
|
7
|
+
env;
|
|
8
|
+
constructor(ctx, env) {
|
|
9
|
+
this.ctx = ctx;
|
|
10
|
+
this.env = env;
|
|
11
|
+
}
|
|
12
|
+
},
|
|
13
|
+
}));
|
|
14
|
+
const createBucketCachePurge = () => {
|
|
15
|
+
const mockState = {
|
|
16
|
+
waitUntil: vi.fn(),
|
|
17
|
+
blockConcurrencyWhile: vi.fn().mockImplementation(async (fn) => fn()),
|
|
18
|
+
storage: {
|
|
19
|
+
setAlarm: vi.fn(),
|
|
20
|
+
getAlarm: vi.fn(),
|
|
21
|
+
sql: {
|
|
22
|
+
exec: vi.fn().mockImplementation(() => ({
|
|
23
|
+
one: vi.fn(),
|
|
24
|
+
toArray: vi.fn().mockReturnValue([]),
|
|
25
|
+
})),
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
};
|
|
29
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
30
|
+
return new BucketCachePurge(mockState, {});
|
|
31
|
+
};
|
|
32
|
+
describe("BucketCachePurge", () => {
|
|
33
|
+
it("should block concurrency while creating the table", async () => {
|
|
34
|
+
const cache = createBucketCachePurge();
|
|
35
|
+
// @ts-expect-error - testing private method
|
|
36
|
+
expect(cache.ctx.blockConcurrencyWhile).toHaveBeenCalled();
|
|
37
|
+
// @ts-expect-error - testing private method
|
|
38
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenCalledWith(expect.stringContaining("CREATE TABLE IF NOT EXISTS cache_purge"));
|
|
39
|
+
});
|
|
40
|
+
describe("purgeCacheByTags", () => {
|
|
41
|
+
it("should insert tags into the sql table", async () => {
|
|
42
|
+
const cache = createBucketCachePurge();
|
|
43
|
+
const tags = ["tag1", "tag2"];
|
|
44
|
+
await cache.purgeCacheByTags(tags);
|
|
45
|
+
// @ts-expect-error - testing private method
|
|
46
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenCalledWith(expect.stringContaining("INSERT OR REPLACE INTO cache_purge"), [tags[0]]);
|
|
47
|
+
// @ts-expect-error - testing private method
|
|
48
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenCalledWith(expect.stringContaining("INSERT OR REPLACE INTO cache_purge"), [tags[1]]);
|
|
49
|
+
});
|
|
50
|
+
it("should set an alarm if no alarm is set", async () => {
|
|
51
|
+
const cache = createBucketCachePurge();
|
|
52
|
+
// @ts-expect-error - testing private method
|
|
53
|
+
cache.ctx.storage.getAlarm.mockResolvedValueOnce(null);
|
|
54
|
+
await cache.purgeCacheByTags(["tag"]);
|
|
55
|
+
// @ts-expect-error - testing private method
|
|
56
|
+
expect(cache.ctx.storage.setAlarm).toHaveBeenCalled();
|
|
57
|
+
});
|
|
58
|
+
it("should not set an alarm if one is already set", async () => {
|
|
59
|
+
const cache = createBucketCachePurge();
|
|
60
|
+
// @ts-expect-error - testing private method
|
|
61
|
+
cache.ctx.storage.getAlarm.mockResolvedValueOnce(true);
|
|
62
|
+
await cache.purgeCacheByTags(["tag"]);
|
|
63
|
+
// @ts-expect-error - testing private method
|
|
64
|
+
expect(cache.ctx.storage.setAlarm).not.toHaveBeenCalled();
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
describe("alarm", () => {
|
|
68
|
+
it("should purge cache by tags and delete them from the sql table", async () => {
|
|
69
|
+
const cache = createBucketCachePurge();
|
|
70
|
+
// @ts-expect-error - testing private method
|
|
71
|
+
cache.ctx.storage.sql.exec.mockReturnValueOnce({
|
|
72
|
+
toArray: () => [{ tag: "tag1" }, { tag: "tag2" }],
|
|
73
|
+
});
|
|
74
|
+
await cache.alarm();
|
|
75
|
+
// @ts-expect-error - testing private method
|
|
76
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenCalledWith(expect.stringContaining("DELETE FROM cache_purge"), ["tag1", "tag2"]);
|
|
77
|
+
});
|
|
78
|
+
it("should not purge cache if no tags are found", async () => {
|
|
79
|
+
const cache = createBucketCachePurge();
|
|
80
|
+
// @ts-expect-error - testing private method
|
|
81
|
+
cache.ctx.storage.sql.exec.mockReturnValueOnce({
|
|
82
|
+
toArray: () => [],
|
|
83
|
+
});
|
|
84
|
+
await cache.alarm();
|
|
85
|
+
// @ts-expect-error - testing private method
|
|
86
|
+
expect(cache.ctx.storage.sql.exec).not.toHaveBeenCalledWith(expect.stringContaining("DELETE FROM cache_purge"), []);
|
|
87
|
+
});
|
|
88
|
+
it("should call internalPurgeCacheByTags with the correct tags", async () => {
|
|
89
|
+
const cache = createBucketCachePurge();
|
|
90
|
+
const tags = ["tag1", "tag2"];
|
|
91
|
+
// @ts-expect-error - testing private method
|
|
92
|
+
cache.ctx.storage.sql.exec.mockReturnValueOnce({
|
|
93
|
+
toArray: () => tags.map((tag) => ({ tag })),
|
|
94
|
+
});
|
|
95
|
+
const internalPurgeCacheByTagsSpy = vi.spyOn(internal, "internalPurgeCacheByTags");
|
|
96
|
+
await cache.alarm();
|
|
97
|
+
expect(internalPurgeCacheByTagsSpy).toHaveBeenCalledWith(
|
|
98
|
+
// @ts-expect-error - testing private method
|
|
99
|
+
cache.env, tags);
|
|
100
|
+
// @ts-expect-error - testing private method 1st is constructor, 2nd is to get the tags and 3rd is to delete them
|
|
101
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenCalledTimes(3);
|
|
102
|
+
});
|
|
103
|
+
it("should continue until all tags are purged", async () => {
|
|
104
|
+
const cache = createBucketCachePurge();
|
|
105
|
+
const tags = Array.from({ length: 100 }, (_, i) => `tag${i}`);
|
|
106
|
+
// @ts-expect-error - testing private method
|
|
107
|
+
cache.ctx.storage.sql.exec.mockReturnValueOnce({
|
|
108
|
+
toArray: () => tags.map((tag) => ({ tag })),
|
|
109
|
+
});
|
|
110
|
+
const internalPurgeCacheByTagsSpy = vi.spyOn(internal, "internalPurgeCacheByTags");
|
|
111
|
+
await cache.alarm();
|
|
112
|
+
expect(internalPurgeCacheByTagsSpy).toHaveBeenCalledWith(
|
|
113
|
+
// @ts-expect-error - testing private method
|
|
114
|
+
cache.env, tags);
|
|
115
|
+
// @ts-expect-error - testing private method 1st is constructor, 2nd is to get the tags and 3rd is to delete them, 4th is to get the next 100 tags
|
|
116
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenCalledTimes(4);
|
|
117
|
+
// @ts-expect-error - testing private method
|
|
118
|
+
expect(cache.ctx.storage.sql.exec).toHaveBeenLastCalledWith(expect.stringContaining("SELECT * FROM cache_purge LIMIT 100"));
|
|
119
|
+
});
|
|
120
|
+
});
|
|
121
|
+
});
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
interface PurgeOptions {
|
|
2
|
+
type: "durableObject" | "direct";
|
|
3
|
+
}
|
|
4
|
+
export declare const purgeCache: ({ type }: PurgeOptions) => {
|
|
5
|
+
name: string;
|
|
6
|
+
invalidatePaths(paths: {
|
|
7
|
+
initialPath: string;
|
|
8
|
+
rawPath: string;
|
|
9
|
+
resolvedRoutes: import("@opennextjs/aws/types/open-next").ResolvedRoute[];
|
|
10
|
+
}[]): Promise<void>;
|
|
11
|
+
};
|
|
12
|
+
export {};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { getCloudflareContext } from "../../cloudflare-context";
|
|
2
|
+
import { debugCache, internalPurgeCacheByTags } from "../internal.js";
|
|
3
|
+
export const purgeCache = ({ type = "direct" }) => {
|
|
4
|
+
return {
|
|
5
|
+
name: "cloudflare",
|
|
6
|
+
async invalidatePaths(paths) {
|
|
7
|
+
const { env } = getCloudflareContext();
|
|
8
|
+
const tags = paths.map((path) => `_N_T_${path.rawPath}`);
|
|
9
|
+
debugCache("cdnInvalidation", "Invalidating paths:", tags);
|
|
10
|
+
if (type === "direct") {
|
|
11
|
+
await internalPurgeCacheByTags(env, tags);
|
|
12
|
+
}
|
|
13
|
+
else {
|
|
14
|
+
const durableObject = env.NEXT_CACHE_DO_PURGE;
|
|
15
|
+
if (!durableObject) {
|
|
16
|
+
debugCache("cdnInvalidation", "No durable object found. Skipping cache purge.");
|
|
17
|
+
return;
|
|
18
|
+
}
|
|
19
|
+
const id = durableObject.idFromName("cache-purge");
|
|
20
|
+
const obj = durableObject.get(id);
|
|
21
|
+
await obj.purgeCacheByTags(tags);
|
|
22
|
+
}
|
|
23
|
+
debugCache("cdnInvalidation", "Invalidated paths:", tags);
|
|
24
|
+
},
|
|
25
|
+
};
|
|
26
|
+
};
|
|
@@ -12,3 +12,5 @@ export type KeyOptions = {
|
|
|
12
12
|
buildId: string | undefined;
|
|
13
13
|
};
|
|
14
14
|
export declare function computeCacheKey(key: string, options: KeyOptions): string;
|
|
15
|
+
export declare function purgeCacheByTags(tags: string[]): Promise<void>;
|
|
16
|
+
export declare function internalPurgeCacheByTags(env: CloudflareEnv, tags: string[]): Promise<"missing-credentials" | "rate-limit-exceeded" | "purge-failed" | "purge-success">;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { createHash } from "node:crypto";
|
|
2
|
+
import { getCloudflareContext } from "../cloudflare-context.js";
|
|
2
3
|
export const debugCache = (name, ...args) => {
|
|
3
4
|
if (process.env.NEXT_PRIVATE_DEBUG_CACHE) {
|
|
4
5
|
console.log(`[${name}] `, ...args);
|
|
@@ -11,3 +12,54 @@ export function computeCacheKey(key, options) {
|
|
|
11
12
|
const hash = createHash("sha256").update(key).digest("hex");
|
|
12
13
|
return `${prefix}/${buildId}/${hash}.${cacheType}`.replace(/\/+/g, "/");
|
|
13
14
|
}
|
|
15
|
+
export async function purgeCacheByTags(tags) {
|
|
16
|
+
const { env } = getCloudflareContext();
|
|
17
|
+
// We have a durable object for purging cache
|
|
18
|
+
// We should use it
|
|
19
|
+
if (env.NEXT_CACHE_DO_PURGE) {
|
|
20
|
+
const durableObject = env.NEXT_CACHE_DO_PURGE;
|
|
21
|
+
const id = durableObject.idFromName("cache-purge");
|
|
22
|
+
const obj = durableObject.get(id);
|
|
23
|
+
await obj.purgeCacheByTags(tags);
|
|
24
|
+
}
|
|
25
|
+
else {
|
|
26
|
+
// We don't have a durable object for purging cache
|
|
27
|
+
// We should use the API directly
|
|
28
|
+
await internalPurgeCacheByTags(env, tags);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
export async function internalPurgeCacheByTags(env, tags) {
|
|
32
|
+
if (!env.CACHE_PURGE_ZONE_ID && !env.CACHE_PURGE_API_TOKEN) {
|
|
33
|
+
// THIS IS A NO-OP
|
|
34
|
+
debugCache("purgeCacheByTags", "No cache zone ID or API token provided. Skipping cache purge.");
|
|
35
|
+
return "missing-credentials";
|
|
36
|
+
}
|
|
37
|
+
try {
|
|
38
|
+
const response = await fetch(`https://api.cloudflare.com/client/v4/zones/${env.CACHE_PURGE_ZONE_ID}/purge_cache`, {
|
|
39
|
+
headers: {
|
|
40
|
+
Authorization: `Bearer ${env.CACHE_PURGE_API_TOKEN}`,
|
|
41
|
+
"Content-Type": "application/json",
|
|
42
|
+
},
|
|
43
|
+
method: "POST",
|
|
44
|
+
body: JSON.stringify({
|
|
45
|
+
tags,
|
|
46
|
+
}),
|
|
47
|
+
});
|
|
48
|
+
if (response.status === 429) {
|
|
49
|
+
// Rate limit exceeded
|
|
50
|
+
debugCache("purgeCacheByTags", "Rate limit exceeded. Skipping cache purge.");
|
|
51
|
+
return "rate-limit-exceeded";
|
|
52
|
+
}
|
|
53
|
+
const bodyResponse = (await response.json());
|
|
54
|
+
if (!bodyResponse.success) {
|
|
55
|
+
debugCache("purgeCacheByTags", "Cache purge failed. Errors:", bodyResponse.errors.map((error) => `${error.code}: ${error.message}`));
|
|
56
|
+
return "purge-failed";
|
|
57
|
+
}
|
|
58
|
+
debugCache("purgeCacheByTags", "Cache purged successfully for tags:", tags);
|
|
59
|
+
return "purge-success";
|
|
60
|
+
}
|
|
61
|
+
catch (error) {
|
|
62
|
+
console.error("Error purging cache by tags:", error);
|
|
63
|
+
return "purge-failed";
|
|
64
|
+
}
|
|
65
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { IgnorableError } from "@opennextjs/aws/utils/error.js";
|
|
2
2
|
import { getCloudflareContext } from "../../cloudflare-context";
|
|
3
3
|
export default {
|
|
4
|
-
name: "
|
|
4
|
+
name: "durable-queue",
|
|
5
5
|
send: async (msg) => {
|
|
6
6
|
const durableObject = getCloudflareContext().env.NEXT_CACHE_DO_QUEUE;
|
|
7
7
|
if (!durableObject)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import type { Queue, QueueMessage } from "@opennextjs/aws/types/overrides";
|
|
2
|
+
interface QueueCachingOptions {
|
|
3
|
+
/**
|
|
4
|
+
* The TTL for the regional cache in seconds.
|
|
5
|
+
* @default 5
|
|
6
|
+
*/
|
|
7
|
+
regionalCacheTtlSec?: number;
|
|
8
|
+
/**
|
|
9
|
+
* Whether to wait for the queue ack before returning.
|
|
10
|
+
* When set to false, the cache will be populated asap and the queue will be called after.
|
|
11
|
+
* When set to true, the cache will be populated only after the queue ack is received.
|
|
12
|
+
* @default false
|
|
13
|
+
*/
|
|
14
|
+
waitForQueueAck?: boolean;
|
|
15
|
+
}
|
|
16
|
+
declare class QueueCache implements Queue {
|
|
17
|
+
private originalQueue;
|
|
18
|
+
readonly name: string;
|
|
19
|
+
readonly regionalCacheTtlSec: number;
|
|
20
|
+
readonly waitForQueueAck: boolean;
|
|
21
|
+
cache: Cache | undefined;
|
|
22
|
+
localCache: Map<string, number>;
|
|
23
|
+
constructor(originalQueue: Queue, options: QueueCachingOptions);
|
|
24
|
+
send(msg: QueueMessage): Promise<void>;
|
|
25
|
+
private getCache;
|
|
26
|
+
private getCacheUrlString;
|
|
27
|
+
private getCacheKey;
|
|
28
|
+
private putToCache;
|
|
29
|
+
private isInCache;
|
|
30
|
+
/**
|
|
31
|
+
* Remove any value older than the TTL from the local cache
|
|
32
|
+
*/
|
|
33
|
+
private clearLocalCache;
|
|
34
|
+
}
|
|
35
|
+
declare const _default: (originalQueue: Queue, opts?: QueueCachingOptions) => QueueCache;
|
|
36
|
+
export default _default;
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import { error } from "@opennextjs/aws/adapters/logger.js";
|
|
2
|
+
const DEFAULT_QUEUE_CACHE_TTL_SEC = 5;
|
|
3
|
+
class QueueCache {
|
|
4
|
+
originalQueue;
|
|
5
|
+
name;
|
|
6
|
+
regionalCacheTtlSec;
|
|
7
|
+
waitForQueueAck;
|
|
8
|
+
cache;
|
|
9
|
+
// Local mapping from key to insertedAtSec
|
|
10
|
+
localCache = new Map();
|
|
11
|
+
constructor(originalQueue, options) {
|
|
12
|
+
this.originalQueue = originalQueue;
|
|
13
|
+
this.name = `cached-${originalQueue.name}`;
|
|
14
|
+
this.regionalCacheTtlSec = options.regionalCacheTtlSec ?? DEFAULT_QUEUE_CACHE_TTL_SEC;
|
|
15
|
+
this.waitForQueueAck = options.waitForQueueAck ?? false;
|
|
16
|
+
}
|
|
17
|
+
async send(msg) {
|
|
18
|
+
try {
|
|
19
|
+
const isCached = await this.isInCache(msg);
|
|
20
|
+
if (isCached) {
|
|
21
|
+
return;
|
|
22
|
+
}
|
|
23
|
+
if (!this.waitForQueueAck) {
|
|
24
|
+
await this.putToCache(msg);
|
|
25
|
+
await this.originalQueue.send(msg);
|
|
26
|
+
}
|
|
27
|
+
else {
|
|
28
|
+
await this.originalQueue.send(msg);
|
|
29
|
+
await this.putToCache(msg);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
catch (e) {
|
|
33
|
+
error("Error sending message to queue", e);
|
|
34
|
+
}
|
|
35
|
+
finally {
|
|
36
|
+
this.clearLocalCache();
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
async getCache() {
|
|
40
|
+
if (!this.cache) {
|
|
41
|
+
this.cache = await caches.open("durable-queue");
|
|
42
|
+
}
|
|
43
|
+
return this.cache;
|
|
44
|
+
}
|
|
45
|
+
getCacheUrlString(msg) {
|
|
46
|
+
return `queue/${msg.MessageGroupId}/${msg.MessageDeduplicationId}`;
|
|
47
|
+
}
|
|
48
|
+
getCacheKey(msg) {
|
|
49
|
+
return "http://local.cache" + this.getCacheUrlString(msg);
|
|
50
|
+
}
|
|
51
|
+
async putToCache(msg) {
|
|
52
|
+
this.localCache.set(this.getCacheUrlString(msg), Date.now());
|
|
53
|
+
const cacheKey = this.getCacheKey(msg);
|
|
54
|
+
const cache = await this.getCache();
|
|
55
|
+
await cache.put(cacheKey, new Response(null, {
|
|
56
|
+
status: 200,
|
|
57
|
+
headers: {
|
|
58
|
+
"Cache-Control": `max-age=${this.regionalCacheTtlSec}`,
|
|
59
|
+
// Tag cache is set to the value of the soft tag assigned by Next.js
|
|
60
|
+
// This way you can invalidate this cache as well as any other regional cache
|
|
61
|
+
"Cache-Tag": `_N_T_/${msg.MessageBody.url}`,
|
|
62
|
+
},
|
|
63
|
+
}));
|
|
64
|
+
}
|
|
65
|
+
async isInCache(msg) {
|
|
66
|
+
if (this.localCache.has(this.getCacheUrlString(msg))) {
|
|
67
|
+
const insertedAt = this.localCache.get(this.getCacheUrlString(msg));
|
|
68
|
+
if (Date.now() - insertedAt < this.regionalCacheTtlSec * 1000) {
|
|
69
|
+
return true;
|
|
70
|
+
}
|
|
71
|
+
this.localCache.delete(this.getCacheUrlString(msg));
|
|
72
|
+
return false;
|
|
73
|
+
}
|
|
74
|
+
const cacheKey = this.getCacheKey(msg);
|
|
75
|
+
const cache = await this.getCache();
|
|
76
|
+
const cachedResponse = await cache.match(cacheKey);
|
|
77
|
+
if (cachedResponse) {
|
|
78
|
+
return true;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Remove any value older than the TTL from the local cache
|
|
83
|
+
*/
|
|
84
|
+
clearLocalCache() {
|
|
85
|
+
const insertAtSecMax = Date.now() - this.regionalCacheTtlSec * 1000;
|
|
86
|
+
for (const [key, insertAtSec] of this.localCache.entries()) {
|
|
87
|
+
if (insertAtSec < insertAtSecMax) {
|
|
88
|
+
this.localCache.delete(key);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
export default (originalQueue, opts = {}) => new QueueCache(originalQueue, opts);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
|
|
2
|
+
import queueCache from "./queue-cache";
|
|
3
|
+
const mockedQueue = {
|
|
4
|
+
name: "mocked-queue",
|
|
5
|
+
send: vi.fn(),
|
|
6
|
+
};
|
|
7
|
+
const generateMessage = () => ({
|
|
8
|
+
MessageGroupId: "test",
|
|
9
|
+
MessageBody: {
|
|
10
|
+
eTag: "test",
|
|
11
|
+
url: "test",
|
|
12
|
+
host: "test",
|
|
13
|
+
lastModified: Date.now(),
|
|
14
|
+
},
|
|
15
|
+
MessageDeduplicationId: "test",
|
|
16
|
+
});
|
|
17
|
+
const mockedPut = vi.fn();
|
|
18
|
+
const mockedMatch = vi.fn().mockReturnValue(null);
|
|
19
|
+
describe("queue-cache", () => {
|
|
20
|
+
beforeEach(() => {
|
|
21
|
+
// @ts-ignore
|
|
22
|
+
globalThis.caches = {
|
|
23
|
+
open: vi.fn().mockReturnValue({
|
|
24
|
+
put: mockedPut,
|
|
25
|
+
match: mockedMatch,
|
|
26
|
+
}),
|
|
27
|
+
};
|
|
28
|
+
});
|
|
29
|
+
afterEach(() => {
|
|
30
|
+
vi.resetAllMocks();
|
|
31
|
+
});
|
|
32
|
+
test("should send the message to the original queue", async () => {
|
|
33
|
+
const msg = generateMessage();
|
|
34
|
+
const queue = queueCache(mockedQueue, {});
|
|
35
|
+
expect(queue.name).toBe("cached-mocked-queue");
|
|
36
|
+
await queue.send(msg);
|
|
37
|
+
expect(mockedQueue.send).toHaveBeenCalledWith(msg);
|
|
38
|
+
});
|
|
39
|
+
test("should use the local cache", async () => {
|
|
40
|
+
const msg = generateMessage();
|
|
41
|
+
const queue = queueCache(mockedQueue, {});
|
|
42
|
+
await queue.send(msg);
|
|
43
|
+
expect(queue.localCache.size).toBe(1);
|
|
44
|
+
expect(queue.localCache.has(`queue/test/test`)).toBe(true);
|
|
45
|
+
expect(mockedPut).toHaveBeenCalled();
|
|
46
|
+
const spiedHas = vi.spyOn(queue.localCache, "has");
|
|
47
|
+
await queue.send(msg);
|
|
48
|
+
expect(spiedHas).toHaveBeenCalled();
|
|
49
|
+
expect(mockedQueue.send).toHaveBeenCalledTimes(1);
|
|
50
|
+
expect(mockedMatch).toHaveBeenCalledTimes(1);
|
|
51
|
+
});
|
|
52
|
+
test("should clear the local cache after 5s", async () => {
|
|
53
|
+
vi.useFakeTimers();
|
|
54
|
+
const msg = generateMessage();
|
|
55
|
+
const queue = queueCache(mockedQueue, {});
|
|
56
|
+
await queue.send(msg);
|
|
57
|
+
expect(queue.localCache.size).toBe(1);
|
|
58
|
+
expect(queue.localCache.has(`queue/test/test`)).toBe(true);
|
|
59
|
+
vi.advanceTimersByTime(5001);
|
|
60
|
+
const alteredMsg = generateMessage();
|
|
61
|
+
alteredMsg.MessageGroupId = "test2";
|
|
62
|
+
await queue.send(alteredMsg);
|
|
63
|
+
expect(queue.localCache.size).toBe(1);
|
|
64
|
+
console.log(queue.localCache);
|
|
65
|
+
expect(queue.localCache.has(`queue/test2/test`)).toBe(true);
|
|
66
|
+
expect(queue.localCache.has(`queue/test/test`)).toBe(false);
|
|
67
|
+
vi.useRealTimers();
|
|
68
|
+
});
|
|
69
|
+
test("should use the regional cache if not in local cache", async () => {
|
|
70
|
+
const msg = generateMessage();
|
|
71
|
+
const queue = queueCache(mockedQueue, {});
|
|
72
|
+
await queue.send(msg);
|
|
73
|
+
expect(mockedMatch).toHaveBeenCalledTimes(1);
|
|
74
|
+
expect(mockedPut).toHaveBeenCalledTimes(1);
|
|
75
|
+
expect(queue.localCache.size).toBe(1);
|
|
76
|
+
expect(queue.localCache.has(`queue/test/test`)).toBe(true);
|
|
77
|
+
// We need to delete the local cache to test the regional cache
|
|
78
|
+
queue.localCache.delete(`queue/test/test`);
|
|
79
|
+
const spiedHas = vi.spyOn(queue.localCache, "has");
|
|
80
|
+
await queue.send(msg);
|
|
81
|
+
expect(spiedHas).toHaveBeenCalled();
|
|
82
|
+
expect(mockedMatch).toHaveBeenCalledTimes(2);
|
|
83
|
+
});
|
|
84
|
+
test("should return early if the message is in the regional cache", async () => {
|
|
85
|
+
const msg = generateMessage();
|
|
86
|
+
const queue = queueCache(mockedQueue, {});
|
|
87
|
+
mockedMatch.mockReturnValueOnce(new Response(null, { status: 200 }));
|
|
88
|
+
const spiedSend = mockedQueue.send;
|
|
89
|
+
await queue.send(msg);
|
|
90
|
+
expect(spiedSend).not.toHaveBeenCalled();
|
|
91
|
+
});
|
|
92
|
+
});
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { error } from "@opennextjs/aws/adapters/logger.js";
|
|
2
2
|
import { getCloudflareContext } from "../../cloudflare-context.js";
|
|
3
|
-
import { debugCache, FALLBACK_BUILD_ID } from "../internal.js";
|
|
3
|
+
import { debugCache, FALLBACK_BUILD_ID, purgeCacheByTags } from "../internal.js";
|
|
4
4
|
export const NAME = "d1-next-mode-tag-cache";
|
|
5
5
|
export const BINDING_NAME = "NEXT_TAG_CACHE_D1";
|
|
6
6
|
export class D1NextModeTagCache {
|
|
@@ -52,6 +52,7 @@ export class D1NextModeTagCache {
|
|
|
52
52
|
await db.batch(tags.map((tag) => db
|
|
53
53
|
.prepare(`INSERT INTO revalidations (tag, revalidatedAt) VALUES (?, ?)`)
|
|
54
54
|
.bind(this.getCacheKey(tag), Date.now())));
|
|
55
|
+
await purgeCacheByTags(tags);
|
|
55
56
|
}
|
|
56
57
|
getConfig() {
|
|
57
58
|
const db = getCloudflareContext().env[BINDING_NAME];
|
|
@@ -2,6 +2,9 @@ import type { NextModeTagCache } from "@opennextjs/aws/types/overrides.js";
|
|
|
2
2
|
export declare const DEFAULT_WRITE_RETRIES = 3;
|
|
3
3
|
export declare const DEFAULT_NUM_SHARDS = 4;
|
|
4
4
|
export declare const NAME = "do-sharded-tag-cache";
|
|
5
|
+
export declare const DEFAULT_REGION: "enam";
|
|
6
|
+
export declare const AVAILABLE_REGIONS: readonly ["enam", "weur", "apac", "sam", "afr", "oc"];
|
|
7
|
+
type AllowedDurableObjectRegion = (typeof AVAILABLE_REGIONS)[number];
|
|
5
8
|
interface ShardedDOTagCacheOptions {
|
|
6
9
|
/**
|
|
7
10
|
* The number of shards that will be used.
|
|
@@ -49,6 +52,18 @@ interface ShardedDOTagCacheOptions {
|
|
|
49
52
|
shardReplication?: {
|
|
50
53
|
numberOfSoftReplicas: number;
|
|
51
54
|
numberOfHardReplicas: number;
|
|
55
|
+
/**
|
|
56
|
+
* Enable regional replication for the shards.
|
|
57
|
+
*
|
|
58
|
+
* If not set, no regional replication will be performed and durable objects will be created without a location hint
|
|
59
|
+
*
|
|
60
|
+
* Can be used to reduce latency for users in different regions and to spread the load across multiple regions.
|
|
61
|
+
*
|
|
62
|
+
* This will increase the number of durable objects created, as each shard will be replicated in all regions.
|
|
63
|
+
*/
|
|
64
|
+
regionalReplication?: {
|
|
65
|
+
defaultRegion: AllowedDurableObjectRegion;
|
|
66
|
+
};
|
|
52
67
|
};
|
|
53
68
|
/**
|
|
54
69
|
* The number of retries to perform when writing tags
|
|
@@ -62,11 +77,13 @@ interface DOIdOptions {
|
|
|
62
77
|
numberOfReplicas: number;
|
|
63
78
|
shardType: "soft" | "hard";
|
|
64
79
|
replicaId?: number;
|
|
80
|
+
region?: DurableObjectLocationHint;
|
|
65
81
|
}
|
|
66
82
|
export declare class DOId {
|
|
67
83
|
options: DOIdOptions;
|
|
68
84
|
shardId: string;
|
|
69
85
|
replicaId: number;
|
|
86
|
+
region?: DurableObjectLocationHint;
|
|
70
87
|
constructor(options: DOIdOptions);
|
|
71
88
|
private generateRandomNumberBetween;
|
|
72
89
|
get key(): string;
|
|
@@ -83,6 +100,8 @@ declare class ShardedDOTagCache implements NextModeTagCache {
|
|
|
83
100
|
readonly numSoftReplicas: number;
|
|
84
101
|
readonly numHardReplicas: number;
|
|
85
102
|
readonly maxWriteRetries: number;
|
|
103
|
+
readonly enableRegionalReplication: boolean;
|
|
104
|
+
readonly defaultRegion: AllowedDurableObjectRegion;
|
|
86
105
|
localCache?: Cache;
|
|
87
106
|
constructor(opts?: ShardedDOTagCacheOptions);
|
|
88
107
|
private getDurableObjectStub;
|
|
@@ -94,6 +113,7 @@ declare class ShardedDOTagCache implements NextModeTagCache {
|
|
|
94
113
|
* @returns An array of TagCacheDOId and tag
|
|
95
114
|
*/
|
|
96
115
|
private generateDOIdArray;
|
|
116
|
+
getClosestRegion(): "enam" | "sam" | "weur" | "apac" | "oc" | "afr";
|
|
97
117
|
/**
|
|
98
118
|
* Same tags are guaranteed to be in the same shard
|
|
99
119
|
* @param tags
|
|
@@ -2,26 +2,30 @@ import { debug, error } from "@opennextjs/aws/adapters/logger.js";
|
|
|
2
2
|
import { generateShardId } from "@opennextjs/aws/core/routing/queue.js";
|
|
3
3
|
import { IgnorableError } from "@opennextjs/aws/utils/error.js";
|
|
4
4
|
import { getCloudflareContext } from "../../cloudflare-context";
|
|
5
|
-
import { debugCache } from "../internal";
|
|
5
|
+
import { debugCache, purgeCacheByTags } from "../internal";
|
|
6
6
|
export const DEFAULT_WRITE_RETRIES = 3;
|
|
7
7
|
export const DEFAULT_NUM_SHARDS = 4;
|
|
8
8
|
export const NAME = "do-sharded-tag-cache";
|
|
9
9
|
const SOFT_TAG_PREFIX = "_N_T_/";
|
|
10
|
+
export const DEFAULT_REGION = "enam";
|
|
11
|
+
export const AVAILABLE_REGIONS = ["enam", "weur", "apac", "sam", "afr", "oc"];
|
|
10
12
|
export class DOId {
|
|
11
13
|
options;
|
|
12
14
|
shardId;
|
|
13
15
|
replicaId;
|
|
16
|
+
region;
|
|
14
17
|
constructor(options) {
|
|
15
18
|
this.options = options;
|
|
16
|
-
const { baseShardId, shardType, numberOfReplicas, replicaId } = options;
|
|
19
|
+
const { baseShardId, shardType, numberOfReplicas, replicaId, region } = options;
|
|
17
20
|
this.shardId = `tag-${shardType};${baseShardId}`;
|
|
18
21
|
this.replicaId = replicaId ?? this.generateRandomNumberBetween(1, numberOfReplicas);
|
|
22
|
+
this.region = region;
|
|
19
23
|
}
|
|
20
24
|
generateRandomNumberBetween(min, max) {
|
|
21
25
|
return Math.floor(Math.random() * (max - min + 1) + min);
|
|
22
26
|
}
|
|
23
27
|
get key() {
|
|
24
|
-
return `${this.shardId};replica-${this.replicaId}`;
|
|
28
|
+
return `${this.shardId};replica-${this.replicaId}${this.region ? `;region-${this.region}` : ""}`;
|
|
25
29
|
}
|
|
26
30
|
}
|
|
27
31
|
class ShardedDOTagCache {
|
|
@@ -31,19 +35,27 @@ class ShardedDOTagCache {
|
|
|
31
35
|
numSoftReplicas;
|
|
32
36
|
numHardReplicas;
|
|
33
37
|
maxWriteRetries;
|
|
38
|
+
enableRegionalReplication;
|
|
39
|
+
defaultRegion;
|
|
34
40
|
localCache;
|
|
35
41
|
constructor(opts = { baseShardSize: DEFAULT_NUM_SHARDS }) {
|
|
36
42
|
this.opts = opts;
|
|
37
43
|
this.numSoftReplicas = opts.shardReplication?.numberOfSoftReplicas ?? 1;
|
|
38
44
|
this.numHardReplicas = opts.shardReplication?.numberOfHardReplicas ?? 1;
|
|
39
45
|
this.maxWriteRetries = opts.maxWriteRetries ?? DEFAULT_WRITE_RETRIES;
|
|
46
|
+
this.enableRegionalReplication = Boolean(opts.shardReplication?.regionalReplication);
|
|
47
|
+
this.defaultRegion = opts.shardReplication?.regionalReplication?.defaultRegion ?? DEFAULT_REGION;
|
|
40
48
|
}
|
|
41
49
|
getDurableObjectStub(doId) {
|
|
42
50
|
const durableObject = getCloudflareContext().env.NEXT_TAG_CACHE_DO_SHARDED;
|
|
43
51
|
if (!durableObject)
|
|
44
52
|
throw new IgnorableError("No durable object binding for cache revalidation");
|
|
45
53
|
const id = durableObject.idFromName(doId.key);
|
|
46
|
-
|
|
54
|
+
debug("[shardedTagCache] - Accessing Durable Object : ", {
|
|
55
|
+
key: doId.key,
|
|
56
|
+
region: doId.region,
|
|
57
|
+
});
|
|
58
|
+
return durableObject.get(id, { locationHint: doId.region });
|
|
47
59
|
}
|
|
48
60
|
/**
|
|
49
61
|
* Generates a list of DO ids for the shards and replicas
|
|
@@ -55,9 +67,14 @@ class ShardedDOTagCache {
|
|
|
55
67
|
generateDOIdArray({ tags, shardType, generateAllReplicas = false, }) {
|
|
56
68
|
let replicaIndexes = [1];
|
|
57
69
|
const isSoft = shardType === "soft";
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
70
|
+
let numReplicas = 1;
|
|
71
|
+
if (this.opts.shardReplication) {
|
|
72
|
+
numReplicas = isSoft ? this.numSoftReplicas : this.numHardReplicas;
|
|
73
|
+
replicaIndexes = generateAllReplicas
|
|
74
|
+
? Array.from({ length: numReplicas }, (_, i) => i + 1)
|
|
75
|
+
: [undefined];
|
|
76
|
+
}
|
|
77
|
+
const regionalReplicas = replicaIndexes.flatMap((replicaId) => {
|
|
61
78
|
return tags
|
|
62
79
|
.filter((tag) => (isSoft ? tag.startsWith(SOFT_TAG_PREFIX) : !tag.startsWith(SOFT_TAG_PREFIX)))
|
|
63
80
|
.map((tag) => {
|
|
@@ -72,6 +89,51 @@ class ShardedDOTagCache {
|
|
|
72
89
|
};
|
|
73
90
|
});
|
|
74
91
|
});
|
|
92
|
+
if (!this.enableRegionalReplication)
|
|
93
|
+
return regionalReplicas;
|
|
94
|
+
// If we have regional replication enabled, we need to further duplicate the shards in all the regions
|
|
95
|
+
const regionalReplicasInAllRegions = generateAllReplicas
|
|
96
|
+
? regionalReplicas.flatMap(({ doId, tag }) => {
|
|
97
|
+
return AVAILABLE_REGIONS.map((region) => {
|
|
98
|
+
return {
|
|
99
|
+
doId: new DOId({
|
|
100
|
+
baseShardId: doId.options.baseShardId,
|
|
101
|
+
numberOfReplicas: numReplicas,
|
|
102
|
+
shardType,
|
|
103
|
+
replicaId: doId.replicaId,
|
|
104
|
+
region,
|
|
105
|
+
}),
|
|
106
|
+
tag,
|
|
107
|
+
};
|
|
108
|
+
});
|
|
109
|
+
})
|
|
110
|
+
: regionalReplicas.map(({ doId, tag }) => {
|
|
111
|
+
doId.region = this.getClosestRegion();
|
|
112
|
+
return { doId, tag };
|
|
113
|
+
});
|
|
114
|
+
return regionalReplicasInAllRegions;
|
|
115
|
+
}
|
|
116
|
+
getClosestRegion() {
|
|
117
|
+
const continent = getCloudflareContext().cf?.continent;
|
|
118
|
+
if (!continent)
|
|
119
|
+
return this.defaultRegion;
|
|
120
|
+
debug("[shardedTagCache] - Continent : ", continent);
|
|
121
|
+
switch (continent) {
|
|
122
|
+
case "AF":
|
|
123
|
+
return "afr";
|
|
124
|
+
case "AS":
|
|
125
|
+
return "apac";
|
|
126
|
+
case "EU":
|
|
127
|
+
return "weur";
|
|
128
|
+
case "NA":
|
|
129
|
+
return "enam";
|
|
130
|
+
case "OC":
|
|
131
|
+
return "oc";
|
|
132
|
+
case "SA":
|
|
133
|
+
return "sam";
|
|
134
|
+
default:
|
|
135
|
+
return this.defaultRegion;
|
|
136
|
+
}
|
|
75
137
|
}
|
|
76
138
|
/**
|
|
77
139
|
* Same tags are guaranteed to be in the same shard
|
|
@@ -196,6 +258,7 @@ class ShardedDOTagCache {
|
|
|
196
258
|
await Promise.all(shardedTagGroups.map(async ({ doId, tags }) => {
|
|
197
259
|
await this.performWriteTagsWithRetry(doId, tags, currentTime);
|
|
198
260
|
}));
|
|
261
|
+
await purgeCacheByTags(tags);
|
|
199
262
|
}
|
|
200
263
|
async performWriteTagsWithRetry(doId, tags, lastModified, retryNumber = 0) {
|
|
201
264
|
try {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
|
2
|
-
import shardedDOTagCache, { DOId } from "./do-sharded-tag-cache";
|
|
2
|
+
import shardedDOTagCache, { AVAILABLE_REGIONS, DOId } from "./do-sharded-tag-cache";
|
|
3
3
|
const hasBeenRevalidatedMock = vi.fn();
|
|
4
4
|
const writeTagsMock = vi.fn();
|
|
5
5
|
const idFromNameMock = vi.fn();
|
|
@@ -7,6 +7,8 @@ const getMock = vi
|
|
|
7
7
|
.fn()
|
|
8
8
|
.mockReturnValue({ hasBeenRevalidated: hasBeenRevalidatedMock, writeTags: writeTagsMock });
|
|
9
9
|
const waitUntilMock = vi.fn().mockImplementation(async (fn) => fn());
|
|
10
|
+
// @ts-expect-error - We define it here only for the test
|
|
11
|
+
globalThis.continent = undefined;
|
|
10
12
|
const sendDLQMock = vi.fn();
|
|
11
13
|
vi.mock("../../cloudflare-context", () => ({
|
|
12
14
|
getCloudflareContext: () => ({
|
|
@@ -17,6 +19,10 @@ vi.mock("../../cloudflare-context", () => ({
|
|
|
17
19
|
},
|
|
18
20
|
},
|
|
19
21
|
ctx: { waitUntil: waitUntilMock },
|
|
22
|
+
cf: {
|
|
23
|
+
// @ts-expect-error - We define it here only for the test
|
|
24
|
+
continent: globalThis.continent,
|
|
25
|
+
},
|
|
20
26
|
}),
|
|
21
27
|
}));
|
|
22
28
|
describe("DOShardedTagCache", () => {
|
|
@@ -96,6 +102,80 @@ describe("DOShardedTagCache", () => {
|
|
|
96
102
|
expect(secondDOId?.replicaId).toBeGreaterThanOrEqual(1);
|
|
97
103
|
expect(secondDOId?.replicaId).toBeLessThanOrEqual(2);
|
|
98
104
|
});
|
|
105
|
+
it("should generate one doIds, but in the default region", () => {
|
|
106
|
+
const cache = shardedDOTagCache({
|
|
107
|
+
baseShardSize: 4,
|
|
108
|
+
shardReplication: {
|
|
109
|
+
numberOfSoftReplicas: 2,
|
|
110
|
+
numberOfHardReplicas: 2,
|
|
111
|
+
regionalReplication: {
|
|
112
|
+
defaultRegion: "enam",
|
|
113
|
+
},
|
|
114
|
+
},
|
|
115
|
+
});
|
|
116
|
+
const shardedTagCollection = cache.groupTagsByDO({
|
|
117
|
+
tags: ["tag1", "_N_T_/tag1"],
|
|
118
|
+
generateAllReplicas: false,
|
|
119
|
+
});
|
|
120
|
+
expect(shardedTagCollection.length).toBe(2);
|
|
121
|
+
const firstDOId = shardedTagCollection[0]?.doId;
|
|
122
|
+
const secondDOId = shardedTagCollection[1]?.doId;
|
|
123
|
+
expect(firstDOId?.shardId).toBe("tag-soft;shard-3");
|
|
124
|
+
expect(firstDOId?.region).toBe("enam");
|
|
125
|
+
expect(secondDOId?.shardId).toBe("tag-hard;shard-1");
|
|
126
|
+
expect(secondDOId?.region).toBe("enam");
|
|
127
|
+
// We still need to check if the last part is between the correct boundaries
|
|
128
|
+
expect(firstDOId?.replicaId).toBeGreaterThanOrEqual(1);
|
|
129
|
+
expect(firstDOId?.replicaId).toBeLessThanOrEqual(2);
|
|
130
|
+
expect(secondDOId?.replicaId).toBeGreaterThanOrEqual(1);
|
|
131
|
+
expect(secondDOId?.replicaId).toBeLessThanOrEqual(2);
|
|
132
|
+
});
|
|
133
|
+
it("should generate one doIds, but in the correct region", () => {
|
|
134
|
+
// @ts-expect-error - We define it here only for the test
|
|
135
|
+
globalThis.continent = "EU";
|
|
136
|
+
const cache = shardedDOTagCache({
|
|
137
|
+
baseShardSize: 4,
|
|
138
|
+
shardReplication: {
|
|
139
|
+
numberOfSoftReplicas: 2,
|
|
140
|
+
numberOfHardReplicas: 2,
|
|
141
|
+
regionalReplication: {
|
|
142
|
+
defaultRegion: "enam",
|
|
143
|
+
},
|
|
144
|
+
},
|
|
145
|
+
});
|
|
146
|
+
const shardedTagCollection = cache.groupTagsByDO({
|
|
147
|
+
tags: ["tag1", "_N_T_/tag1"],
|
|
148
|
+
generateAllReplicas: false,
|
|
149
|
+
});
|
|
150
|
+
expect(shardedTagCollection.length).toBe(2);
|
|
151
|
+
expect(shardedTagCollection[0]?.doId.region).toBe("weur");
|
|
152
|
+
expect(shardedTagCollection[1]?.doId.region).toBe("weur");
|
|
153
|
+
//@ts-expect-error - We need to reset the global variable
|
|
154
|
+
globalThis.continent = undefined;
|
|
155
|
+
});
|
|
156
|
+
it("should generate all the appropriate replicas in all the regions with enableRegionalReplication", () => {
|
|
157
|
+
const cache = shardedDOTagCache({
|
|
158
|
+
baseShardSize: 4,
|
|
159
|
+
shardReplication: {
|
|
160
|
+
numberOfSoftReplicas: 2,
|
|
161
|
+
numberOfHardReplicas: 2,
|
|
162
|
+
regionalReplication: {
|
|
163
|
+
defaultRegion: "enam",
|
|
164
|
+
},
|
|
165
|
+
},
|
|
166
|
+
});
|
|
167
|
+
const shardedTagCollection = cache.groupTagsByDO({
|
|
168
|
+
tags: ["tag1", "_N_T_/tag1"],
|
|
169
|
+
generateAllReplicas: true,
|
|
170
|
+
});
|
|
171
|
+
// 6 regions times 4 shards replica
|
|
172
|
+
expect(shardedTagCollection.length).toBe(24);
|
|
173
|
+
shardedTagCollection.forEach(({ doId }) => {
|
|
174
|
+
expect(AVAILABLE_REGIONS).toContain(doId.region);
|
|
175
|
+
// It should end with the region
|
|
176
|
+
expect(doId.key).toMatch(/tag-(soft|hard);shard-\d;replica-\d;region-(enam|weur|sam|afr|apac|oc)$/);
|
|
177
|
+
});
|
|
178
|
+
});
|
|
99
179
|
});
|
|
100
180
|
});
|
|
101
181
|
describe("hasBeenRevalidated", () => {
|
|
@@ -7,6 +7,7 @@ export function compileDurableObjects(buildOpts) {
|
|
|
7
7
|
const entryPoints = [
|
|
8
8
|
_require.resolve("@opennextjs/cloudflare/durable-objects/queue"),
|
|
9
9
|
_require.resolve("@opennextjs/cloudflare/durable-objects/sharded-tag-cache"),
|
|
10
|
+
_require.resolve("@opennextjs/cloudflare/durable-objects/bucket-cache-purge"),
|
|
10
11
|
];
|
|
11
12
|
const { outputDir } = buildOpts;
|
|
12
13
|
const baseManifestPath = path.join(outputDir, "server-functions/default", getPackagePath(buildOpts), ".next");
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
export { DOQueueHandler } from "./.build/durable-objects/queue.js";
|
|
2
2
|
export { DOShardedTagCache } from "./.build/durable-objects/sharded-tag-cache.js";
|
|
3
|
+
export { BucketCachePurge } from "./.build/durable-objects/bucket-cache-purge.js";
|
|
3
4
|
declare const _default: {
|
|
4
5
|
fetch(request: Request<unknown, IncomingRequestCfProperties<unknown>>, env: CloudflareEnv, ctx: ExecutionContext): Promise<any>;
|
|
5
6
|
};
|
|
@@ -6,6 +6,8 @@ import { handler as middlewareHandler } from "./middleware/handler.mjs";
|
|
|
6
6
|
export { DOQueueHandler } from "./.build/durable-objects/queue.js";
|
|
7
7
|
//@ts-expect-error: Will be resolved by wrangler build
|
|
8
8
|
export { DOShardedTagCache } from "./.build/durable-objects/sharded-tag-cache.js";
|
|
9
|
+
//@ts-expect-error: Will be resolved by wrangler build
|
|
10
|
+
export { BucketCachePurge } from "./.build/durable-objects/bucket-cache-purge.js";
|
|
9
11
|
export default {
|
|
10
12
|
async fetch(request, env, ctx) {
|
|
11
13
|
return runWithCloudflareRequestContext(request, env, ctx, async () => {
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@opennextjs/cloudflare",
|
|
3
3
|
"description": "Cloudflare builder for next apps",
|
|
4
|
-
"version": "1.0
|
|
4
|
+
"version": "1.1.0",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
7
7
|
"opennextjs-cloudflare": "dist/cli/index.js"
|
|
@@ -43,7 +43,7 @@
|
|
|
43
43
|
"homepage": "https://github.com/opennextjs/opennextjs-cloudflare",
|
|
44
44
|
"dependencies": {
|
|
45
45
|
"@dotenvx/dotenvx": "1.31.0",
|
|
46
|
-
"@opennextjs/aws": "
|
|
46
|
+
"@opennextjs/aws": "3.6.4",
|
|
47
47
|
"enquirer": "^2.4.1",
|
|
48
48
|
"glob": "^11.0.0",
|
|
49
49
|
"ts-tqdm": "^0.8.6"
|