@opennextjs/cloudflare 0.6.5 → 1.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
- import type { DurableObjectQueueHandler } from "./durable-objects/queue";
1
+ import type { GetPlatformProxyOptions } from "wrangler";
2
+ import type { DOQueueHandler } from "./durable-objects/queue";
2
3
  import { DOShardedTagCache } from "./durable-objects/sharded-tag-cache";
3
4
  declare global {
4
5
  interface CloudflareEnv {
@@ -11,11 +12,11 @@ declare global {
11
12
  NEXT_TAG_CACHE_D1?: D1Database;
12
13
  NEXT_TAG_CACHE_DO_SHARDED?: DurableObjectNamespace<DOShardedTagCache>;
13
14
  NEXT_TAG_CACHE_DO_SHARDED_DLQ?: Queue;
14
- NEXT_CACHE_DO_QUEUE?: DurableObjectNamespace<DurableObjectQueueHandler>;
15
+ NEXT_CACHE_DO_QUEUE?: DurableObjectNamespace<DOQueueHandler>;
15
16
  NEXT_CACHE_DO_QUEUE_MAX_REVALIDATION?: string;
16
17
  NEXT_CACHE_DO_QUEUE_REVALIDATION_TIMEOUT_MS?: string;
17
18
  NEXT_CACHE_DO_QUEUE_RETRY_INTERVAL_MS?: string;
18
- NEXT_CACHE_DO_QUEUE_MAX_NUM_REVALIDATIONS?: string;
19
+ NEXT_CACHE_DO_QUEUE_MAX_RETRIES?: string;
19
20
  NEXT_CACHE_DO_QUEUE_DISABLE_SQLITE?: string;
20
21
  }
21
22
  }
@@ -49,5 +50,6 @@ export declare function getCloudflareContext<CfProperties extends Record<string,
49
50
  * with the open-next Cloudflare adapter
50
51
  *
51
52
  * Note: this function should only be called inside the Next.js config file, and although async it doesn't need to be `await`ed
53
+ * @param options options on how the function should operate and if/where to persist the platform data
52
54
  */
53
- export declare function initOpenNextCloudflareForDev(): Promise<void>;
55
+ export declare function initOpenNextCloudflareForDev(options?: GetPlatformProxyOptions): Promise<void>;
@@ -71,12 +71,17 @@ async function getCloudflareContextAsync() {
71
71
  * with the open-next Cloudflare adapter
72
72
  *
73
73
  * Note: this function should only be called inside the Next.js config file, and although async it doesn't need to be `await`ed
74
+ * @param options options on how the function should operate and if/where to persist the platform data
74
75
  */
75
- export async function initOpenNextCloudflareForDev() {
76
+ export async function initOpenNextCloudflareForDev(options) {
76
77
  const shouldInitializationRun = shouldContextInitializationRun();
77
78
  if (!shouldInitializationRun)
78
79
  return;
79
- const context = await getCloudflareContextFromWrangler();
80
+ if (options?.environment && process.env.NEXT_DEV_WRANGLER_ENV) {
81
+ console.warn(`'initOpenNextCloudflareForDev' has been called with an environment option while NEXT_DEV_WRANGLER_ENV is set.` +
82
+ ` NEXT_DEV_WRANGLER_ENV will be ignored and the environment will be set to: '${options.environment}'`);
83
+ }
84
+ const context = await getCloudflareContextFromWrangler(options);
80
85
  addCloudflareContextToNodejsGlobal(context);
81
86
  await monkeyPatchVmModuleEdgeContext(context);
82
87
  }
@@ -131,12 +136,14 @@ async function monkeyPatchVmModuleEdgeContext(cloudflareContext) {
131
136
  *
132
137
  * @returns the cloudflare context ready for use
133
138
  */
134
- async function getCloudflareContextFromWrangler() {
139
+ async function getCloudflareContextFromWrangler(options) {
135
140
  // Note: we never want wrangler to be bundled in the Next.js app, that's why the import below looks like it does
136
141
  const { getPlatformProxy } = await import(/* webpackIgnore: true */ `${"__wrangler".replaceAll("_", "")}`);
142
+ // This allows the selection of a wrangler environment while running in next dev mode
143
+ const environment = options?.environment ?? process.env.NEXT_DEV_WRANGLER_ENV;
137
144
  const { env, cf, ctx } = await getPlatformProxy({
138
- // This allows the selection of a wrangler environment while running in next dev mode
139
- environment: process.env.NEXT_DEV_WRANGLER_ENV,
145
+ ...options,
146
+ environment,
140
147
  });
141
148
  return {
142
149
  env,
@@ -5,7 +5,7 @@ interface FailedState {
5
5
  retryCount: number;
6
6
  nextAlarmMs: number;
7
7
  }
8
- export declare class DurableObjectQueueHandler extends DurableObject<CloudflareEnv> {
8
+ export declare class DOQueueHandler extends DurableObject<CloudflareEnv> {
9
9
  ongoingRevalidations: Map<string, Promise<void>>;
10
10
  sql: SqlStorage;
11
11
  routeInFailedState: Map<string, FailedState>;
@@ -13,7 +13,7 @@ export declare class DurableObjectQueueHandler extends DurableObject<CloudflareE
13
13
  readonly maxRevalidations: number;
14
14
  readonly revalidationTimeout: number;
15
15
  readonly revalidationRetryInterval: number;
16
- readonly maxRevalidationAttempts: number;
16
+ readonly maxRetries: number;
17
17
  readonly disableSQLite: boolean;
18
18
  constructor(ctx: DurableObjectState, env: CloudflareEnv);
19
19
  revalidate(msg: QueueMessage): Promise<void>;
@@ -4,8 +4,8 @@ import { DurableObject } from "cloudflare:workers";
4
4
  const DEFAULT_MAX_REVALIDATION = 5;
5
5
  const DEFAULT_REVALIDATION_TIMEOUT_MS = 10_000;
6
6
  const DEFAULT_RETRY_INTERVAL_MS = 2_000;
7
- const DEFAULT_MAX_NUM_REVALIDATIONS = 6;
8
- export class DurableObjectQueueHandler extends DurableObject {
7
+ const DEFAULT_MAX_RETRIES = 6;
8
+ export class DOQueueHandler extends DurableObject {
9
9
  // Ongoing revalidations are deduped by the deduplication id
10
10
  // Since this is running in waitUntil, we expect the durable object state to persist this during the duration of the revalidation
11
11
  // TODO: handle incremental cache with only eventual consistency (i.e. KV or R2/D1 with the optional cache layer on top)
@@ -17,7 +17,7 @@ export class DurableObjectQueueHandler extends DurableObject {
17
17
  maxRevalidations;
18
18
  revalidationTimeout;
19
19
  revalidationRetryInterval;
20
- maxRevalidationAttempts;
20
+ maxRetries;
21
21
  disableSQLite;
22
22
  constructor(ctx, env) {
23
23
  super(ctx, env);
@@ -35,9 +35,9 @@ export class DurableObjectQueueHandler extends DurableObject {
35
35
  this.revalidationRetryInterval = env.NEXT_CACHE_DO_QUEUE_RETRY_INTERVAL_MS
36
36
  ? parseInt(env.NEXT_CACHE_DO_QUEUE_RETRY_INTERVAL_MS)
37
37
  : DEFAULT_RETRY_INTERVAL_MS;
38
- this.maxRevalidationAttempts = env.NEXT_CACHE_DO_QUEUE_MAX_NUM_REVALIDATIONS
39
- ? parseInt(env.NEXT_CACHE_DO_QUEUE_MAX_NUM_REVALIDATIONS)
40
- : DEFAULT_MAX_NUM_REVALIDATIONS;
38
+ this.maxRetries = env.NEXT_CACHE_DO_QUEUE_MAX_RETRIES
39
+ ? parseInt(env.NEXT_CACHE_DO_QUEUE_MAX_RETRIES)
40
+ : DEFAULT_MAX_RETRIES;
41
41
  this.disableSQLite = env.NEXT_CACHE_DO_QUEUE_DISABLE_SQLITE === "true";
42
42
  // We restore the state
43
43
  ctx.blockConcurrencyWhile(async () => {
@@ -152,9 +152,8 @@ export class DurableObjectQueueHandler extends DurableObject {
152
152
  const existingFailedState = this.routeInFailedState.get(msg.MessageDeduplicationId);
153
153
  let updatedFailedState;
154
154
  if (existingFailedState) {
155
- if (existingFailedState.retryCount >= this.maxRevalidationAttempts) {
156
- // We give up after 6 retries and log the error
157
- error(`The revalidation for ${msg.MessageBody.host}${msg.MessageBody.url} has failed after 6 retries. It will not be tried again, but subsequent ISR requests will retry.`);
155
+ if (existingFailedState.retryCount >= this.maxRetries) {
156
+ error(`The revalidation for ${msg.MessageBody.host}${msg.MessageBody.url} has failed after ${this.maxRetries} retries. It will not be tried again, but subsequent ISR requests will retry.`);
158
157
  this.routeInFailedState.delete(msg.MessageDeduplicationId);
159
158
  return;
160
159
  }
@@ -1,5 +1,5 @@
1
1
  import { describe, expect, it, vi } from "vitest";
2
- import { DurableObjectQueueHandler } from "./queue";
2
+ import { DOQueueHandler } from "./queue";
3
3
  vi.mock("cloudflare:workers", () => ({
4
4
  DurableObject: class {
5
5
  ctx;
@@ -25,7 +25,7 @@ const createDurableObjectQueue = ({ fetchDuration, statusCode, headers, disableS
25
25
  },
26
26
  };
27
27
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
28
- return new DurableObjectQueueHandler(mockState, {
28
+ return new DOQueueHandler(mockState, {
29
29
  WORKER_SELF_REFERENCE: {
30
30
  fetch: vi.fn().mockReturnValue(new Promise((res) => setTimeout(() => res(new Response(null, {
31
31
  status: statusCode,
@@ -1,6 +1,7 @@
1
1
  import type { CacheValue, IncrementalCache, WithLastModified } from "@opennextjs/aws/types/overrides";
2
2
  export declare const CACHE_ASSET_DIR = "cdn-cgi/_next_cache";
3
3
  export declare const STATUS_DELETED = 1;
4
+ export declare const NAME = "cf-kv-incremental-cache";
4
5
  /**
5
6
  * Open Next cache based on cloudflare KV and Assets.
6
7
  *
@@ -8,8 +9,8 @@ export declare const STATUS_DELETED = 1;
8
9
  * The cloudflare context and process.env are not initialized yet
9
10
  * when the constructor is called.
10
11
  */
11
- declare class Cache implements IncrementalCache {
12
- readonly name = "cloudflare-kv";
12
+ declare class KVIncrementalCache implements IncrementalCache {
13
+ readonly name = "cf-kv-incremental-cache";
13
14
  get<IsFetch extends boolean = false>(key: string, isFetch?: IsFetch): Promise<WithLastModified<CacheValue<IsFetch>> | null>;
14
15
  set<IsFetch extends boolean = false>(key: string, value: CacheValue<IsFetch>, isFetch?: IsFetch): Promise<void>;
15
16
  delete(key: string): Promise<void>;
@@ -18,5 +19,5 @@ declare class Cache implements IncrementalCache {
18
19
  protected debug(...args: unknown[]): void;
19
20
  protected getBuildId(): string;
20
21
  }
21
- declare const _default: Cache;
22
+ declare const _default: KVIncrementalCache;
22
23
  export default _default;
@@ -2,6 +2,7 @@ import { IgnorableError, RecoverableError } from "@opennextjs/aws/utils/error.js
2
2
  import { getCloudflareContext } from "../../cloudflare-context.js";
3
3
  export const CACHE_ASSET_DIR = "cdn-cgi/_next_cache";
4
4
  export const STATUS_DELETED = 1;
5
+ export const NAME = "cf-kv-incremental-cache";
5
6
  /**
6
7
  * Open Next cache based on cloudflare KV and Assets.
7
8
  *
@@ -9,8 +10,8 @@ export const STATUS_DELETED = 1;
9
10
  * The cloudflare context and process.env are not initialized yet
10
11
  * when the constructor is called.
11
12
  */
12
- class Cache {
13
- name = "cloudflare-kv";
13
+ class KVIncrementalCache {
14
+ name = NAME;
14
15
  async get(key, isFetch) {
15
16
  const cfEnv = getCloudflareContext().env;
16
17
  const kv = cfEnv.NEXT_INC_CACHE_KV;
@@ -120,4 +121,4 @@ class Cache {
120
121
  return process.env.NEXT_BUILD_ID ?? "no-build-id";
121
122
  }
122
123
  }
123
- export default new Cache();
124
+ export default new KVIncrementalCache();
@@ -1,4 +1,5 @@
1
1
  import type { CacheValue, IncrementalCache, WithLastModified } from "@opennextjs/aws/types/overrides.js";
2
+ export declare const NAME = "cf-r2-incremental-cache";
2
3
  /**
3
4
  * An instance of the Incremental Cache that uses an R2 bucket (`NEXT_INC_CACHE_R2_BUCKET`) as it's
4
5
  * underlying data store.
@@ -7,7 +8,7 @@ import type { CacheValue, IncrementalCache, WithLastModified } from "@opennextjs
7
8
  * environment variable, and defaults to `incremental-cache`.
8
9
  */
9
10
  declare class R2IncrementalCache implements IncrementalCache {
10
- readonly name = "r2-incremental-cache";
11
+ readonly name = "cf-r2-incremental-cache";
11
12
  get<IsFetch extends boolean = false>(key: string, isFetch?: IsFetch): Promise<WithLastModified<CacheValue<IsFetch>> | null>;
12
13
  set<IsFetch extends boolean = false>(key: string, value: CacheValue<IsFetch>, isFetch?: IsFetch): Promise<void>;
13
14
  delete(key: string): Promise<void>;
@@ -1,6 +1,7 @@
1
1
  import { debug, error } from "@opennextjs/aws/adapters/logger.js";
2
2
  import { IgnorableError } from "@opennextjs/aws/utils/error.js";
3
3
  import { getCloudflareContext } from "../../cloudflare-context.js";
4
+ export const NAME = "cf-r2-incremental-cache";
4
5
  /**
5
6
  * An instance of the Incremental Cache that uses an R2 bucket (`NEXT_INC_CACHE_R2_BUCKET`) as it's
6
7
  * underlying data store.
@@ -9,7 +10,7 @@ import { getCloudflareContext } from "../../cloudflare-context.js";
9
10
  * environment variable, and defaults to `incremental-cache`.
10
11
  */
11
12
  class R2IncrementalCache {
12
- name = "r2-incremental-cache";
13
+ name = NAME;
13
14
  async get(key, isFetch) {
14
15
  const r2 = getCloudflareContext().env.NEXT_INC_CACHE_R2_BUCKET;
15
16
  if (!r2)
@@ -5,17 +5,21 @@ type Options = {
5
5
  * The mode to use for the regional cache.
6
6
  *
7
7
  * - `short-lived`: Re-use a cache entry for up to a minute after it has been retrieved.
8
- * - `long-lived`: Re-use a fetch cache entry until it is revalidated (per-region), or an ISR/SSG entry for up to 30 minutes.
8
+ * - `long-lived`: Re-use a fetch cache entry until it is revalidated (per-region),
9
+ * or an ISR/SSG entry for up to 30 minutes.
9
10
  */
10
11
  mode: "short-lived" | "long-lived";
11
12
  /**
12
13
  * Whether the regional cache entry should be updated in the background or not when it experiences
13
14
  * a cache hit.
14
15
  *
15
- * Defaults to `false` for the `short-lived` mode, and `true` for the `long-lived` mode.
16
+ * @default `false` for the `short-lived` mode, and `true` for the `long-lived` mode.
16
17
  */
17
18
  shouldLazilyUpdateOnCacheHit?: boolean;
18
19
  };
20
+ /**
21
+ * Wrapper adding a regional cache on an `IncrementalCache` implementation
22
+ */
19
23
  declare class RegionalCache implements IncrementalCache {
20
24
  private store;
21
25
  private opts;
@@ -35,17 +39,19 @@ declare class RegionalCache implements IncrementalCache {
35
39
  *
36
40
  * The regional cache uses the Cache API.
37
41
  *
38
- * **WARNING:** If an entry is revalidated in one region, it will trigger an additional revalidation if
42
+ * **WARNING:**
43
+ * If an entry is revalidated on demand in one region (using either `revalidateTag`, `revalidatePath` or `res.revalidate` ), it will trigger an additional revalidation if
39
44
  * a request is made to another region that has an entry stored in its regional cache.
40
45
  *
41
- * @param cache - Incremental cache instance.
42
- * @param opts.mode - The mode to use for the regional cache.
43
- * - `short-lived`: Re-use a cache entry for up to a minute after it has been retrieved.
44
- * - `long-lived`: Re-use a fetch cache entry until it is revalidated (per-region), or an ISR/SSG entry for up to 30 minutes.
45
- * @param opts.shouldLazilyUpdateOnCacheHit - Whether the regional cache entry should be updated in
46
- * the background or not when it experiences a cache hit.
46
+ * @param cache Incremental cache instance.
47
+ * @param opts.mode The mode to use for the regional cache.
48
+ * - `short-lived`: Re-use a cache entry for up to a minute after it has been retrieved.
49
+ * - `long-lived`: Re-use a fetch cache entry until it is revalidated (per-region),
50
+ * or an ISR/SSG entry for up to 30 minutes.
51
+ * @param opts.shouldLazilyUpdateOnCacheHit Whether the regional cache entry should be updated in
52
+ * the background or not when it experiences a cache hit.
47
53
  *
48
- * Defaults to `false` for the `short-lived` mode, and `true` for the `long-lived` mode.
54
+ * @default `false` for the `short-lived` mode, and `true` for the `long-lived` mode.
49
55
  */
50
56
  export declare function withRegionalCache(cache: IncrementalCache, opts: Options): RegionalCache;
51
57
  export {};
@@ -1,7 +1,11 @@
1
1
  import { debug, error } from "@opennextjs/aws/adapters/logger.js";
2
2
  import { getCloudflareContext } from "../../cloudflare-context.js";
3
+ import { NAME as KV_CACHE_NAME } from "./kv-incremental-cache.js";
3
4
  const ONE_MINUTE_IN_SECONDS = 60;
4
5
  const THIRTY_MINUTES_IN_SECONDS = ONE_MINUTE_IN_SECONDS * 30;
6
+ /**
7
+ * Wrapper adding a regional cache on an `IncrementalCache` implementation
8
+ */
5
9
  class RegionalCache {
6
10
  store;
7
11
  opts;
@@ -10,6 +14,9 @@ class RegionalCache {
10
14
  constructor(store, opts) {
11
15
  this.store = store;
12
16
  this.opts = opts;
17
+ if (this.store.name === KV_CACHE_NAME) {
18
+ throw new Error("The KV incremental cache does not need a regional cache.");
19
+ }
13
20
  this.name = this.store.name;
14
21
  this.opts.shouldLazilyUpdateOnCacheHit ??= this.opts.mode === "long-lived";
15
22
  }
@@ -94,17 +101,19 @@ class RegionalCache {
94
101
  *
95
102
  * The regional cache uses the Cache API.
96
103
  *
97
- * **WARNING:** If an entry is revalidated in one region, it will trigger an additional revalidation if
104
+ * **WARNING:**
105
+ * If an entry is revalidated on demand in one region (using either `revalidateTag`, `revalidatePath` or `res.revalidate` ), it will trigger an additional revalidation if
98
106
  * a request is made to another region that has an entry stored in its regional cache.
99
107
  *
100
- * @param cache - Incremental cache instance.
101
- * @param opts.mode - The mode to use for the regional cache.
102
- * - `short-lived`: Re-use a cache entry for up to a minute after it has been retrieved.
103
- * - `long-lived`: Re-use a fetch cache entry until it is revalidated (per-region), or an ISR/SSG entry for up to 30 minutes.
104
- * @param opts.shouldLazilyUpdateOnCacheHit - Whether the regional cache entry should be updated in
105
- * the background or not when it experiences a cache hit.
108
+ * @param cache Incremental cache instance.
109
+ * @param opts.mode The mode to use for the regional cache.
110
+ * - `short-lived`: Re-use a cache entry for up to a minute after it has been retrieved.
111
+ * - `long-lived`: Re-use a fetch cache entry until it is revalidated (per-region),
112
+ * or an ISR/SSG entry for up to 30 minutes.
113
+ * @param opts.shouldLazilyUpdateOnCacheHit Whether the regional cache entry should be updated in
114
+ * the background or not when it experiences a cache hit.
106
115
  *
107
- * Defaults to `false` for the `short-lived` mode, and `true` for the `long-lived` mode.
116
+ * @default `false` for the `short-lived` mode, and `true` for the `long-lived` mode.
108
117
  */
109
118
  export function withRegionalCache(cache, opts) {
110
119
  return new RegionalCache(cache, opts);
@@ -1,4 +1,5 @@
1
1
  import type { NextModeTagCache } from "@opennextjs/aws/types/overrides.js";
2
+ export declare const NAME = "d1-next-mode-tag-cache";
2
3
  export declare class D1NextModeTagCache implements NextModeTagCache {
3
4
  readonly mode: "nextMode";
4
5
  readonly name = "d1-next-mode-tag-cache";
@@ -1,9 +1,10 @@
1
1
  import { debug, error } from "@opennextjs/aws/adapters/logger.js";
2
2
  import { RecoverableError } from "@opennextjs/aws/utils/error.js";
3
3
  import { getCloudflareContext } from "../../cloudflare-context.js";
4
+ export const NAME = "d1-next-mode-tag-cache";
4
5
  export class D1NextModeTagCache {
5
6
  mode = "nextMode";
6
- name = "d1-next-mode-tag-cache";
7
+ name = NAME;
7
8
  async hasBeenRevalidated(tags, lastModified) {
8
9
  const { isDisabled, db } = this.getConfig();
9
10
  if (isDisabled)
@@ -1,67 +1,73 @@
1
1
  import type { NextModeTagCache } from "@opennextjs/aws/types/overrides.js";
2
- export declare const DEFAULT_SOFT_REPLICAS = 4;
3
- export declare const DEFAULT_HARD_REPLICAS = 2;
4
2
  export declare const DEFAULT_WRITE_RETRIES = 3;
5
3
  export declare const DEFAULT_NUM_SHARDS = 4;
4
+ export declare const NAME = "do-sharded-tag-cache";
6
5
  interface ShardedDOTagCacheOptions {
7
6
  /**
8
7
  * The number of shards that will be used.
8
+ *
9
9
  * 1 shards means 1 durable object instance.
10
- * Soft (internal next tags used for `revalidatePath`) and hard tags (the one you define in your app) will be split in different shards.
10
+ * Soft (internal next tags used for `revalidatePath`) and hard tags (the one you define in your app)
11
+ * will be split in different shards.
12
+ *
11
13
  * The number of requests made to Durable Objects will scale linearly with the number of shards.
12
- * For example, a request involving 5 tags may access between 1 and 5 shards, with the upper limit being the lesser of the number of tags or the number of shards
14
+ * For example, a request involving 5 tags may access between 1 and 5 shards, with the upper limit being
15
+ * the lesser of the number of tags or the number of shards
16
+ *
13
17
  * @default 4
14
18
  */
15
19
  baseShardSize: number;
16
20
  /**
17
21
  * Whether to enable a regional cache on a per-shard basis
18
22
  * Because of the way tags are implemented in Next.js, some shards will have more requests than others. For these cases, it is recommended to enable the regional cache.
23
+ *
19
24
  * @default false
20
25
  */
21
26
  regionalCache?: boolean;
22
27
  /**
23
28
  * The TTL for the regional cache in seconds
24
29
  * Increasing this value will reduce the number of requests to the Durable Object, but it could make `revalidateTags`/`revalidatePath` call being longer to take effect
30
+ *
25
31
  * @default 5
26
32
  */
27
33
  regionalCacheTtlSec?: number;
28
34
  /**
29
- * Whether to enable shard replication
30
- * Shard replication will duplicate each shards into N replicas to spread the load even more
31
- * All replicas of the a shard contain the same value - write are sent to all of the replicas.
32
- * This allows most frequent read operations to be sent to only one replica to spread the load.
33
- * For example with N being 2, tag `tag1` could be read from 2 different durable object instance
34
- * On read you only need to read from one of the shards, but on write you need to write to all shards
35
- * @default false
36
- */
37
- enableShardReplication?: boolean;
38
- /**
39
- * The number of replicas that will be used for shard replication
40
- * Soft shard replicas are more often accessed than hard shard replicas, so it is recommended to have more soft replicas than hard replicas
41
- * Soft replicas are for internal next tags used for `revalidatePath` (i.e. `_N_T_/layout`, `_N_T_/page1`), hard replicas are the tags defined in your app
42
- * @default { numberOfSoftReplicas: 4, numberOfHardReplicas: 2 }
35
+ * Enable shard replication to handle higher load.
36
+ *
37
+ * By default shards are not replicated (`numberOfSoftReplicas = 1` or `numberOfHardReplicas = 1`).
38
+ *
39
+ * Setting the number of replicas to a number greater than 1 will replicate the shards.
40
+ * Write operations always apply to all of the shards.
41
+ * However read operations read from a single shard to spread the load.
42
+ *
43
+ * Soft replicas are for internal next tags used for `revalidatePath` (i.e. `_N_T_/layout`, `_N_T_/page1`).
44
+ * Hard replicas are the tags defined in your app.
45
+ *
46
+ * Soft replicas are accessed more often than hard replicas, so it is recommended to have more soft replicas
47
+ * than hard replicas (2x is a good rule of thumb)
43
48
  */
44
- shardReplicationOptions?: {
49
+ shardReplication?: {
45
50
  numberOfSoftReplicas: number;
46
51
  numberOfHardReplicas: number;
47
52
  };
48
53
  /**
49
54
  * The number of retries to perform when writing tags
55
+ *
50
56
  * @default 3
51
57
  */
52
58
  maxWriteRetries?: number;
53
59
  }
54
- interface TagCacheDOIdOptions {
60
+ interface DOIdOptions {
55
61
  baseShardId: string;
56
62
  numberOfReplicas: number;
57
63
  shardType: "soft" | "hard";
58
64
  replicaId?: number;
59
65
  }
60
- export declare class TagCacheDOId {
61
- options: TagCacheDOIdOptions;
66
+ export declare class DOId {
67
+ options: DOIdOptions;
62
68
  shardId: string;
63
69
  replicaId: number;
64
- constructor(options: TagCacheDOIdOptions);
70
+ constructor(options: DOIdOptions);
65
71
  private generateRandomNumberBetween;
66
72
  get key(): string;
67
73
  }
@@ -92,7 +98,7 @@ declare class ShardedDOTagCache implements NextModeTagCache {
92
98
  tags: string[];
93
99
  generateAllReplicas?: boolean;
94
100
  }): {
95
- doId: TagCacheDOId;
101
+ doId: DOId;
96
102
  tags: string[];
97
103
  }[];
98
104
  private getConfig;
@@ -111,12 +117,12 @@ declare class ShardedDOTagCache implements NextModeTagCache {
111
117
  * @returns
112
118
  */
113
119
  writeTags(tags: string[]): Promise<void>;
114
- performWriteTagsWithRetry(doId: TagCacheDOId, tags: string[], lastModified: number, retryNumber?: number): Promise<void>;
120
+ performWriteTagsWithRetry(doId: DOId, tags: string[], lastModified: number, retryNumber?: number): Promise<void>;
115
121
  getCacheInstance(): Promise<Cache | undefined>;
116
- getCacheKey(doId: TagCacheDOId, tags: string[]): Promise<Request<unknown, CfProperties<unknown>>>;
117
- getFromRegionalCache(doId: TagCacheDOId, tags: string[]): Promise<Response | undefined>;
118
- putToRegionalCache(doId: TagCacheDOId, tags: string[], hasBeenRevalidated: boolean): Promise<void>;
119
- deleteRegionalCache(doId: TagCacheDOId, tags: string[]): Promise<void>;
122
+ getCacheKey(doId: DOId, tags: string[]): Promise<Request<unknown, CfProperties<unknown>>>;
123
+ getFromRegionalCache(doId: DOId, tags: string[]): Promise<Response | undefined>;
124
+ putToRegionalCache(doId: DOId, tags: string[], hasBeenRevalidated: boolean): Promise<void>;
125
+ deleteRegionalCache(doId: DOId, tags: string[]): Promise<void>;
120
126
  }
121
127
  declare const _default: (opts?: ShardedDOTagCacheOptions) => ShardedDOTagCache;
122
128
  export default _default;
@@ -2,12 +2,11 @@ import { debug, error } from "@opennextjs/aws/adapters/logger.js";
2
2
  import { generateShardId } from "@opennextjs/aws/core/routing/queue.js";
3
3
  import { IgnorableError } from "@opennextjs/aws/utils/error.js";
4
4
  import { getCloudflareContext } from "../../cloudflare-context";
5
- const SOFT_TAG_PREFIX = "_N_T_/";
6
- export const DEFAULT_SOFT_REPLICAS = 4;
7
- export const DEFAULT_HARD_REPLICAS = 2;
8
5
  export const DEFAULT_WRITE_RETRIES = 3;
9
6
  export const DEFAULT_NUM_SHARDS = 4;
10
- export class TagCacheDOId {
7
+ export const NAME = "do-sharded-tag-cache";
8
+ const SOFT_TAG_PREFIX = "_N_T_/";
9
+ export class DOId {
11
10
  options;
12
11
  shardId;
13
12
  replicaId;
@@ -27,15 +26,15 @@ export class TagCacheDOId {
27
26
  class ShardedDOTagCache {
28
27
  opts;
29
28
  mode = "nextMode";
30
- name = "do-sharded-tag-cache";
29
+ name = NAME;
31
30
  numSoftReplicas;
32
31
  numHardReplicas;
33
32
  maxWriteRetries;
34
33
  localCache;
35
34
  constructor(opts = { baseShardSize: DEFAULT_NUM_SHARDS }) {
36
35
  this.opts = opts;
37
- this.numSoftReplicas = opts.shardReplicationOptions?.numberOfSoftReplicas ?? DEFAULT_SOFT_REPLICAS;
38
- this.numHardReplicas = opts.shardReplicationOptions?.numberOfHardReplicas ?? DEFAULT_HARD_REPLICAS;
36
+ this.numSoftReplicas = opts.shardReplication?.numberOfSoftReplicas ?? 1;
37
+ this.numHardReplicas = opts.shardReplication?.numberOfHardReplicas ?? 1;
39
38
  this.maxWriteRetries = opts.maxWriteRetries ?? DEFAULT_WRITE_RETRIES;
40
39
  }
41
40
  getDurableObjectStub(doId) {
@@ -55,19 +54,14 @@ class ShardedDOTagCache {
55
54
  generateDOIdArray({ tags, shardType, generateAllReplicas = false, }) {
56
55
  let replicaIndexes = [1];
57
56
  const isSoft = shardType === "soft";
58
- let numReplicas = 1;
59
- if (this.opts.enableShardReplication) {
60
- numReplicas = isSoft ? this.numSoftReplicas : this.numHardReplicas;
61
- replicaIndexes = generateAllReplicas
62
- ? Array.from({ length: numReplicas }, (_, i) => i + 1)
63
- : [undefined];
64
- }
57
+ const numReplicas = isSoft ? this.numSoftReplicas : this.numHardReplicas;
58
+ replicaIndexes = generateAllReplicas ? Array.from({ length: numReplicas }, (_, i) => i + 1) : [undefined];
65
59
  return replicaIndexes.flatMap((replicaId) => {
66
60
  return tags
67
61
  .filter((tag) => (isSoft ? tag.startsWith(SOFT_TAG_PREFIX) : !tag.startsWith(SOFT_TAG_PREFIX)))
68
62
  .map((tag) => {
69
63
  return {
70
- doId: new TagCacheDOId({
64
+ doId: new DOId({
71
65
  baseShardId: generateShardId(tag, this.opts.baseShardSize, "shard"),
72
66
  numberOfReplicas: numReplicas,
73
67
  shardType,
@@ -1,5 +1,5 @@
1
1
  import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
2
- import doShardedTagCache, { DEFAULT_HARD_REPLICAS, DEFAULT_SOFT_REPLICAS, TagCacheDOId, } from "./do-sharded-tag-cache";
2
+ import shardedDOTagCache, { DOId } from "./do-sharded-tag-cache";
3
3
  const hasBeenRevalidatedMock = vi.fn();
4
4
  const writeTagsMock = vi.fn();
5
5
  const idFromNameMock = vi.fn();
@@ -23,7 +23,7 @@ describe("DOShardedTagCache", () => {
23
23
  afterEach(() => vi.clearAllMocks());
24
24
  describe("generateShardId", () => {
25
25
  it("should generate a shardId", () => {
26
- const cache = doShardedTagCache();
26
+ const cache = shardedDOTagCache();
27
27
  const expectedResult = [
28
28
  { doId: expect.objectContaining({ shardId: "tag-hard;shard-1" }), tags: ["tag1"] },
29
29
  { doId: expect.objectContaining({ shardId: "tag-hard;shard-2" }), tags: ["tag2"] },
@@ -34,7 +34,7 @@ describe("DOShardedTagCache", () => {
34
34
  expect(result[1]?.doId.key).toBe("tag-hard;shard-2;replica-1");
35
35
  });
36
36
  it("should group tags by shard", () => {
37
- const cache = doShardedTagCache();
37
+ const cache = shardedDOTagCache();
38
38
  const expectedResult = [
39
39
  { doId: expect.objectContaining({ shardId: "tag-hard;shard-1" }), tags: ["tag1", "tag6"] },
40
40
  ];
@@ -43,13 +43,13 @@ describe("DOShardedTagCache", () => {
43
43
  expect(result[0]?.doId.key).toBe("tag-hard;shard-1;replica-1");
44
44
  });
45
45
  it("should generate the same shardId for the same tag", () => {
46
- const cache = doShardedTagCache();
46
+ const cache = shardedDOTagCache();
47
47
  const firstResult = cache.groupTagsByDO({ tags: ["tag1"] });
48
48
  const secondResult = cache.groupTagsByDO({ tags: ["tag1", "tag3", "tag4"] });
49
49
  expect(firstResult[0]).toEqual(secondResult[0]);
50
50
  });
51
51
  it("should split hard and soft tags", () => {
52
- const cache = doShardedTagCache();
52
+ const cache = shardedDOTagCache();
53
53
  const expectedResult = [
54
54
  { doId: expect.objectContaining({ shardId: "tag-soft;shard-3" }), tags: ["_N_T_/tag1"] },
55
55
  { doId: expect.objectContaining({ shardId: "tag-hard;shard-1", replicaId: 1 }), tags: ["tag1"] },
@@ -61,7 +61,10 @@ describe("DOShardedTagCache", () => {
61
61
  });
62
62
  describe("with shard replication", () => {
63
63
  it("should generate all doIds if generateAllReplicas is true", () => {
64
- const cache = doShardedTagCache({ baseShardSize: 4, enableShardReplication: true });
64
+ const cache = shardedDOTagCache({
65
+ baseShardSize: 4,
66
+ shardReplication: { numberOfSoftReplicas: 4, numberOfHardReplicas: 2 },
67
+ });
65
68
  const expectedResult = [
66
69
  { doId: expect.objectContaining({ shardId: "tag-soft;shard-3" }), tags: ["_N_T_/tag1"] },
67
70
  { doId: expect.objectContaining({ shardId: "tag-soft;shard-3" }), tags: ["_N_T_/tag1"] },
@@ -74,7 +77,10 @@ describe("DOShardedTagCache", () => {
74
77
  expect(result).toEqual(expectedResult);
75
78
  });
76
79
  it("should generate only one doIds by tag type if generateAllReplicas is false", () => {
77
- const cache = doShardedTagCache({ baseShardSize: 4, enableShardReplication: true });
80
+ const cache = shardedDOTagCache({
81
+ baseShardSize: 4,
82
+ shardReplication: { numberOfSoftReplicas: 4, numberOfHardReplicas: 2 },
83
+ });
78
84
  const shardedTagCollection = cache.groupTagsByDO({
79
85
  tags: ["tag1", "_N_T_/tag1"],
80
86
  generateAllReplicas: false,
@@ -86,9 +92,9 @@ describe("DOShardedTagCache", () => {
86
92
  expect(secondDOId?.shardId).toBe("tag-hard;shard-1");
87
93
  // We still need to check if the last part is between the correct boundaries
88
94
  expect(firstDOId?.replicaId).toBeGreaterThanOrEqual(1);
89
- expect(firstDOId?.replicaId).toBeLessThanOrEqual(DEFAULT_SOFT_REPLICAS);
95
+ expect(firstDOId?.replicaId).toBeLessThanOrEqual(4);
90
96
  expect(secondDOId?.replicaId).toBeGreaterThanOrEqual(1);
91
- expect(secondDOId?.replicaId).toBeLessThanOrEqual(DEFAULT_HARD_REPLICAS);
97
+ expect(secondDOId?.replicaId).toBeLessThanOrEqual(2);
92
98
  });
93
99
  });
94
100
  });
@@ -102,13 +108,13 @@ describe("DOShardedTagCache", () => {
102
108
  globalThis.openNextConfig = {
103
109
  dangerous: { disableTagCache: true },
104
110
  };
105
- const cache = doShardedTagCache();
111
+ const cache = shardedDOTagCache();
106
112
  const result = await cache.hasBeenRevalidated(["tag1"]);
107
113
  expect(result).toBe(false);
108
114
  expect(idFromNameMock).not.toHaveBeenCalled();
109
115
  });
110
116
  it("should return false if stub return false", async () => {
111
- const cache = doShardedTagCache();
117
+ const cache = shardedDOTagCache();
112
118
  cache.getFromRegionalCache = vi.fn();
113
119
  hasBeenRevalidatedMock.mockImplementationOnce(() => false);
114
120
  const result = await cache.hasBeenRevalidated(["tag1"], 123456);
@@ -118,7 +124,7 @@ describe("DOShardedTagCache", () => {
118
124
  expect(result).toBe(false);
119
125
  });
120
126
  it("should return true if stub return true", async () => {
121
- const cache = doShardedTagCache();
127
+ const cache = shardedDOTagCache();
122
128
  cache.getFromRegionalCache = vi.fn();
123
129
  hasBeenRevalidatedMock.mockImplementationOnce(() => true);
124
130
  const result = await cache.hasBeenRevalidated(["tag1"], 123456);
@@ -128,7 +134,7 @@ describe("DOShardedTagCache", () => {
128
134
  expect(result).toBe(true);
129
135
  });
130
136
  it("should return false if it throws", async () => {
131
- const cache = doShardedTagCache();
137
+ const cache = shardedDOTagCache();
132
138
  cache.getFromRegionalCache = vi.fn();
133
139
  hasBeenRevalidatedMock.mockImplementationOnce(() => {
134
140
  throw new Error("error");
@@ -140,7 +146,7 @@ describe("DOShardedTagCache", () => {
140
146
  expect(result).toBe(false);
141
147
  });
142
148
  it("Should return from the cache if it was found there", async () => {
143
- const cache = doShardedTagCache();
149
+ const cache = shardedDOTagCache();
144
150
  cache.getFromRegionalCache = vi.fn().mockReturnValueOnce(new Response("true"));
145
151
  const result = await cache.hasBeenRevalidated(["tag1"], 123456);
146
152
  expect(result).toBe(true);
@@ -148,7 +154,7 @@ describe("DOShardedTagCache", () => {
148
154
  expect(hasBeenRevalidatedMock).not.toHaveBeenCalled();
149
155
  });
150
156
  it("should try to put the result in the cache if it was not revalidated", async () => {
151
- const cache = doShardedTagCache();
157
+ const cache = shardedDOTagCache();
152
158
  cache.getFromRegionalCache = vi.fn();
153
159
  cache.putToRegionalCache = vi.fn();
154
160
  hasBeenRevalidatedMock.mockImplementationOnce(() => false);
@@ -158,7 +164,7 @@ describe("DOShardedTagCache", () => {
158
164
  expect(cache.putToRegionalCache).toHaveBeenCalled();
159
165
  });
160
166
  it("should call all the durable object instance", async () => {
161
- const cache = doShardedTagCache();
167
+ const cache = shardedDOTagCache();
162
168
  cache.getFromRegionalCache = vi.fn();
163
169
  const result = await cache.hasBeenRevalidated(["tag1", "tag2"], 123456);
164
170
  expect(result).toBe(false);
@@ -181,20 +187,20 @@ describe("DOShardedTagCache", () => {
181
187
  globalThis.openNextConfig = {
182
188
  dangerous: { disableTagCache: true },
183
189
  };
184
- const cache = doShardedTagCache();
190
+ const cache = shardedDOTagCache();
185
191
  await cache.writeTags(["tag1"]);
186
192
  expect(idFromNameMock).not.toHaveBeenCalled();
187
193
  expect(writeTagsMock).not.toHaveBeenCalled();
188
194
  });
189
195
  it("should write the tags to the cache", async () => {
190
- const cache = doShardedTagCache();
196
+ const cache = shardedDOTagCache();
191
197
  await cache.writeTags(["tag1"]);
192
198
  expect(idFromNameMock).toHaveBeenCalled();
193
199
  expect(writeTagsMock).toHaveBeenCalled();
194
200
  expect(writeTagsMock).toHaveBeenCalledWith(["tag1"], 1000);
195
201
  });
196
202
  it("should write the tags to the cache for multiple shards", async () => {
197
- const cache = doShardedTagCache();
203
+ const cache = shardedDOTagCache();
198
204
  await cache.writeTags(["tag1", "tag2"]);
199
205
  expect(idFromNameMock).toHaveBeenCalledTimes(2);
200
206
  expect(writeTagsMock).toHaveBeenCalledTimes(2);
@@ -202,7 +208,10 @@ describe("DOShardedTagCache", () => {
202
208
  expect(writeTagsMock).toHaveBeenCalledWith(["tag2"], 1000);
203
209
  });
204
210
  it('should write to all the replicated shards if "generateAllReplicas" is true', async () => {
205
- const cache = doShardedTagCache({ baseShardSize: 4, enableShardReplication: true });
211
+ const cache = shardedDOTagCache({
212
+ baseShardSize: 4,
213
+ shardReplication: { numberOfSoftReplicas: 4, numberOfHardReplicas: 2 },
214
+ });
206
215
  await cache.writeTags(["tag1", "_N_T_/tag1"]);
207
216
  expect(idFromNameMock).toHaveBeenCalledTimes(6);
208
217
  expect(writeTagsMock).toHaveBeenCalledTimes(6);
@@ -210,7 +219,7 @@ describe("DOShardedTagCache", () => {
210
219
  expect(writeTagsMock).toHaveBeenCalledWith(["_N_T_/tag1"], 1000);
211
220
  });
212
221
  it("should call deleteRegionalCache", async () => {
213
- const cache = doShardedTagCache();
222
+ const cache = shardedDOTagCache();
214
223
  cache.deleteRegionalCache = vi.fn();
215
224
  await cache.writeTags(["tag1"]);
216
225
  expect(cache.deleteRegionalCache).toHaveBeenCalled();
@@ -220,7 +229,7 @@ describe("DOShardedTagCache", () => {
220
229
  });
221
230
  describe("getCacheInstance", () => {
222
231
  it("should return undefined by default", async () => {
223
- const cache = doShardedTagCache();
232
+ const cache = shardedDOTagCache();
224
233
  expect(await cache.getCacheInstance()).toBeUndefined();
225
234
  });
226
235
  it("should try to return the cache instance if regional cache is enabled", async () => {
@@ -228,7 +237,7 @@ describe("DOShardedTagCache", () => {
228
237
  globalThis.caches = {
229
238
  open: vi.fn().mockResolvedValue("cache"),
230
239
  };
231
- const cache = doShardedTagCache({ baseShardSize: 4, regionalCache: true });
240
+ const cache = shardedDOTagCache({ baseShardSize: 4, regionalCache: true });
232
241
  expect(cache.localCache).toBeUndefined();
233
242
  expect(await cache.getCacheInstance()).toBe("cache");
234
243
  expect(cache.localCache).toBe("cache");
@@ -238,8 +247,8 @@ describe("DOShardedTagCache", () => {
238
247
  });
239
248
  describe("getFromRegionalCache", () => {
240
249
  it("should return undefined if regional cache is disabled", async () => {
241
- const cache = doShardedTagCache();
242
- const doId = new TagCacheDOId({
250
+ const cache = shardedDOTagCache();
251
+ const doId = new DOId({
243
252
  baseShardId: "shard-1",
244
253
  numberOfReplicas: 1,
245
254
  shardType: "hard",
@@ -253,8 +262,8 @@ describe("DOShardedTagCache", () => {
253
262
  match: vi.fn().mockResolvedValue("response"),
254
263
  }),
255
264
  };
256
- const cache = doShardedTagCache({ baseShardSize: 4, regionalCache: true });
257
- const doId = new TagCacheDOId({
265
+ const cache = shardedDOTagCache({ baseShardSize: 4, regionalCache: true });
266
+ const doId = new DOId({
258
267
  baseShardId: "shard-1",
259
268
  numberOfReplicas: 1,
260
269
  shardType: "hard",
@@ -266,11 +275,11 @@ describe("DOShardedTagCache", () => {
266
275
  });
267
276
  describe("getCacheKey", () => {
268
277
  it("should return the cache key without the random part", async () => {
269
- const cache = doShardedTagCache();
270
- const doId1 = new TagCacheDOId({ baseShardId: "shard-0", numberOfReplicas: 1, shardType: "hard" });
278
+ const cache = shardedDOTagCache();
279
+ const doId1 = new DOId({ baseShardId: "shard-0", numberOfReplicas: 1, shardType: "hard" });
271
280
  const reqKey = await cache.getCacheKey(doId1, ["_N_T_/tag1"]);
272
281
  expect(reqKey.url).toBe("http://local.cache/shard/tag-hard;shard-0?tags=_N_T_%2Ftag1");
273
- const doId2 = new TagCacheDOId({
282
+ const doId2 = new DOId({
274
283
  baseShardId: "shard-1",
275
284
  numberOfReplicas: 1,
276
285
  shardType: "hard",
@@ -283,12 +292,12 @@ describe("DOShardedTagCache", () => {
283
292
  it("should retry if it fails", async () => {
284
293
  vi.useFakeTimers();
285
294
  vi.setSystemTime(1000);
286
- const cache = doShardedTagCache();
295
+ const cache = shardedDOTagCache();
287
296
  writeTagsMock.mockImplementationOnce(() => {
288
297
  throw new Error("error");
289
298
  });
290
299
  const spiedFn = vi.spyOn(cache, "performWriteTagsWithRetry");
291
- const doId = new TagCacheDOId({
300
+ const doId = new DOId({
292
301
  baseShardId: "shard-1",
293
302
  numberOfReplicas: 1,
294
303
  shardType: "hard",
@@ -303,12 +312,12 @@ describe("DOShardedTagCache", () => {
303
312
  it("should stop retrying after 3 times", async () => {
304
313
  vi.useFakeTimers();
305
314
  vi.setSystemTime(1000);
306
- const cache = doShardedTagCache();
315
+ const cache = shardedDOTagCache();
307
316
  writeTagsMock.mockImplementationOnce(() => {
308
317
  throw new Error("error");
309
318
  });
310
319
  const spiedFn = vi.spyOn(cache, "performWriteTagsWithRetry");
311
- await cache.performWriteTagsWithRetry(new TagCacheDOId({ baseShardId: "shard-1", numberOfReplicas: 1, shardType: "hard" }), ["tag1"], Date.now(), 3);
320
+ await cache.performWriteTagsWithRetry(new DOId({ baseShardId: "shard-1", numberOfReplicas: 1, shardType: "hard" }), ["tag1"], Date.now(), 3);
312
321
  expect(writeTagsMock).toHaveBeenCalledTimes(1);
313
322
  expect(spiedFn).toHaveBeenCalledTimes(1);
314
323
  expect(sendDLQMock).toHaveBeenCalledWith({
@@ -82,7 +82,14 @@ rule:
82
82
  regex: ^NodeModuleLoader$
83
83
  fix: |
84
84
  async load($ID) {
85
- ${getRequires("$ID", files, serverDir)}
85
+ ${buildOpts.debug
86
+ ? ` try {
87
+ ${getRequires("$ID", files, serverDir)}
88
+ } catch (e) {
89
+ console.error('Exception in NodeModuleLoader', e);
90
+ throw e;
91
+ }`
92
+ : getRequires("$ID", files, serverDir)}
86
93
  }`;
87
94
  }
88
95
  async function getRequirePageRule(buildOpts) {
@@ -112,7 +119,14 @@ function requirePage($PAGE, $DIST_DIR, $IS_APP_PATH) {
112
119
  process.env.__NEXT_PRIVATE_RUNTIME_TYPE = $IS_APP_PATH ? 'app' : 'pages';
113
120
  try {
114
121
  ${getRequires("pagePath", jsFiles, serverDir)}
115
- } finally {
122
+ } ${buildOpts.debug
123
+ ? `
124
+ catch (e) {
125
+ console.error("Exception in requirePage", e);
126
+ throw e;
127
+ }`
128
+ : ``}
129
+ finally {
116
130
  process.env.__NEXT_PRIVATE_RUNTIME_TYPE = '';
117
131
  }
118
132
  }`,
@@ -1,8 +1,15 @@
1
1
  import { patchCode } from "@opennextjs/aws/build/patch/astCodePatcher.js";
2
+ // Remove an instantiation of `AbortController` from the runtime.
3
+ //
4
+ // Solves https://github.com/cloudflare/workerd/issues/3657:
5
+ // - The `AbortController` is meant for the client side, but ends in the server code somehow.
6
+ // That's why we can get ride of it. See https://github.com/vercel/next.js/pull/73975/files.
7
+ // - Top level instantiation of `AbortController` are not supported by workerd as of March, 2025.
8
+ // See https://github.com/cloudflare/workerd/issues/3657
9
+ // - As Next code is not more executed at top level, we do not need to apply this patch
10
+ // See https://github.com/opennextjs/opennextjs-cloudflare/pull/497
11
+ //
2
12
  // We try to be as specific as possible to avoid patching the wrong thing here
3
- // It seems that there is a bug in the worker runtime. When the AbortController is created outside of the request context it throws an error (not sure if it's expected or not) except in this case. https://github.com/cloudflare/workerd/issues/3657
4
- // It fails while requiring the `app-page.runtime.prod.js` file, but instead of throwing an error, it just return an empty object for the `require('app-page.runtime.prod.js')` call which makes every request to an app router page fail.
5
- // If it's a bug in workerd and it's not expected to throw an error, we can remove this patch.
6
13
  export const abortControllerRule = `
7
14
  rule:
8
15
  all:
@@ -65,18 +72,7 @@ fix:
65
72
  'true'
66
73
  `;
67
74
  export function patchNextMinimal(updater) {
68
- updater.updateContent("patch-abortController-next15.2", [
69
- {
70
- field: {
71
- filter: /app-page(-experimental)?\.runtime\.prod\.js$/,
72
- contentFilter: /new AbortController/,
73
- callback: ({ contents }) => {
74
- return patchCode(contents, abortControllerRule);
75
- },
76
- },
77
- },
78
- ]);
79
- updater.updateContent("patch-next-minimal", [
75
+ return updater.updateContent("patch-next-minimal", [
80
76
  {
81
77
  field: {
82
78
  filter: /next-server\.(js)$/,
@@ -87,8 +83,4 @@ export function patchNextMinimal(updater) {
87
83
  },
88
84
  },
89
85
  ]);
90
- return {
91
- name: "patch-abortController",
92
- setup() { },
93
- };
94
86
  }
@@ -2,6 +2,8 @@ import { existsSync } from "node:fs";
2
2
  import path from "node:path";
3
3
  import logger from "@opennextjs/aws/logger.js";
4
4
  import { globSync } from "glob";
5
+ import { NAME as R2_CACHE_NAME } from "../../api/overrides/incremental-cache/r2-incremental-cache.js";
6
+ import { NAME as D1_TAG_NAME } from "../../api/overrides/tag-cache/d1-next-tag-cache.js";
5
7
  import { runWrangler } from "../utils/run-wrangler.js";
6
8
  async function resolveCacheName(value) {
7
9
  return typeof value === "function" ? (await value()).name : value;
@@ -31,7 +33,7 @@ export async function populateCache(options, config, populateCacheOptions) {
31
33
  if (!config.dangerous?.disableIncrementalCache && incrementalCache) {
32
34
  const name = await resolveCacheName(incrementalCache);
33
35
  switch (name) {
34
- case "r2-incremental-cache": {
36
+ case R2_CACHE_NAME: {
35
37
  logger.info("\nPopulating R2 incremental cache...");
36
38
  const assets = getCacheAssetPaths(options);
37
39
  assets.forEach(({ fsPath, destPath }) => {
@@ -51,7 +53,7 @@ export async function populateCache(options, config, populateCacheOptions) {
51
53
  if (!config.dangerous?.disableTagCache && !config.dangerous?.disableIncrementalCache && tagCache) {
52
54
  const name = await resolveCacheName(tagCache);
53
55
  switch (name) {
54
- case "d1-next-mode-tag-cache": {
56
+ case D1_TAG_NAME: {
55
57
  logger.info("\nCreating D1 table if necessary...");
56
58
  runWrangler(options, [
57
59
  "d1 execute",
@@ -61,16 +63,6 @@ export async function populateCache(options, config, populateCacheOptions) {
61
63
  logger.info("\nSuccessfully created D1 table");
62
64
  break;
63
65
  }
64
- case "d1-tag-cache": {
65
- logger.info("\nPopulating D1 tag cache...");
66
- runWrangler(options, [
67
- "d1 execute",
68
- "NEXT_TAG_CACHE_D1",
69
- `--file ${JSON.stringify(path.join(options.outputDir, "cloudflare/cache-assets-manifest.sql"))}`,
70
- ], { ...populateCacheOptions, logging: "error" });
71
- logger.info("Successfully populated cache");
72
- break;
73
- }
74
66
  default:
75
67
  logger.info("Tag cache does not need populating");
76
68
  }
@@ -1,4 +1,4 @@
1
- export { DurableObjectQueueHandler } from "./.build/durable-objects/queue.js";
1
+ export { DOQueueHandler } from "./.build/durable-objects/queue.js";
2
2
  export { DOShardedTagCache } from "./.build/durable-objects/sharded-tag-cache.js";
3
3
  declare const _default: ExportedHandler<CloudflareEnv>;
4
4
  export default _default;
@@ -1,4 +1,5 @@
1
1
  import { AsyncLocalStorage } from "node:async_hooks";
2
+ import process from "node:process";
2
3
  // @ts-expect-error: resolved by wrangler build
3
4
  import * as nextEnvVars from "./env/next-env.mjs";
4
5
  const cloudflareContextALS = new AsyncLocalStorage();
@@ -9,7 +10,7 @@ Object.defineProperty(globalThis, Symbol.for("__cloudflare-context__"), {
9
10
  },
10
11
  });
11
12
  //@ts-expect-error: Will be resolved by wrangler build
12
- export { DurableObjectQueueHandler } from "./.build/durable-objects/queue.js";
13
+ export { DOQueueHandler } from "./.build/durable-objects/queue.js";
13
14
  //@ts-expect-error: Will be resolved by wrangler build
14
15
  export { DOShardedTagCache } from "./.build/durable-objects/sharded-tag-cache.js";
15
16
  // Populate process.env on the first request
@@ -54,6 +55,11 @@ function populateProcessEnv(url, env) {
54
55
  if (processEnvPopulated) {
55
56
  return;
56
57
  }
58
+ // Some packages rely on `process.version` and `process.versions.node` (i.e. Jose@4)
59
+ // TODO: Remove when https://github.com/unjs/unenv/pull/493 is merged
60
+ Object.assign(process, { version: process.version || "v22.14.0" });
61
+ // @ts-expect-error Node type does not match workerd
62
+ Object.assign(process.versions, { node: "22.14.0", ...process.versions });
57
63
  processEnvPopulated = true;
58
64
  for (const [key, value] of Object.entries(env)) {
59
65
  if (typeof value === "string") {
@@ -63,7 +69,7 @@ function populateProcessEnv(url, env) {
63
69
  const mode = env.NEXTJS_ENV ?? "production";
64
70
  if (nextEnvVars[mode]) {
65
71
  for (const key in nextEnvVars[mode]) {
66
- process.env[key] = nextEnvVars[mode][key];
72
+ process.env[key] ??= nextEnvVars[mode][key];
67
73
  }
68
74
  }
69
75
  // Set the default Origin for the origin resolver.
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@opennextjs/cloudflare",
3
3
  "description": "Cloudflare builder for next apps",
4
- "version": "0.6.5",
4
+ "version": "1.0.0-beta.0",
5
5
  "type": "module",
6
6
  "bin": {
7
7
  "opennextjs-cloudflare": "dist/cli/index.js"
@@ -27,7 +27,7 @@
27
27
  ],
28
28
  "repository": {
29
29
  "type": "git",
30
- "url": "https://github.com/opennextjs/opennextjs-cloudflare.git",
30
+ "url": "git+https://github.com/opennextjs/opennextjs-cloudflare.git",
31
31
  "directory": "packages/cloudflare"
32
32
  },
33
33
  "keywords": [
@@ -43,7 +43,7 @@
43
43
  "homepage": "https://github.com/opennextjs/opennextjs-cloudflare",
44
44
  "dependencies": {
45
45
  "@dotenvx/dotenvx": "1.31.0",
46
- "@opennextjs/aws": "https://pkg.pr.new/@opennextjs/aws@802",
46
+ "@opennextjs/aws": "3.5.4",
47
47
  "enquirer": "^2.4.1",
48
48
  "glob": "^11.0.0"
49
49
  },
@@ -67,7 +67,7 @@
67
67
  "vitest": "^2.1.1"
68
68
  },
69
69
  "peerDependencies": {
70
- "wrangler": "^3.114.1 || ^4.6.0"
70
+ "wrangler": "^3.114.3 || ^4.7.0"
71
71
  },
72
72
  "scripts": {
73
73
  "clean": "rimraf dist",
@@ -1,33 +0,0 @@
1
- import type { OriginalTagCache } from "@opennextjs/aws/types/overrides.js";
2
- /**
3
- * An instance of the Tag Cache that uses a D1 binding (`NEXT_TAG_CACHE_D1`) as it's underlying data store.
4
- *
5
- * **Tag/path mappings table**
6
- *
7
- * Information about the relation between tags and paths is stored in a `tags` table that contains
8
- * two columns; `tag`, and `path`.
9
- *
10
- * This table should be populated using an SQL file that is generated during the build process.
11
- *
12
- * **Tag revalidations table**
13
- *
14
- * Revalidation times for tags are stored in a `revalidations` table that contains two columns; `tags`,
15
- * and `revalidatedAt`.
16
- */
17
- declare class D1TagCache implements OriginalTagCache {
18
- readonly name = "d1-tag-cache";
19
- getByPath(rawPath: string): Promise<string[]>;
20
- getByTag(rawTag: string): Promise<string[]>;
21
- getLastModified(path: string, lastModified?: number): Promise<number>;
22
- writeTags(tags: {
23
- tag: string;
24
- path: string;
25
- revalidatedAt?: number;
26
- }[]): Promise<void>;
27
- private getConfig;
28
- protected removeBuildId(key: string): string;
29
- protected getCacheKey(key: string): string;
30
- protected getBuildId(): string;
31
- }
32
- declare const _default: D1TagCache;
33
- export default _default;
@@ -1,140 +0,0 @@
1
- import { debug, error } from "@opennextjs/aws/adapters/logger.js";
2
- import { RecoverableError } from "@opennextjs/aws/utils/error.js";
3
- import { getCloudflareContext } from "../../cloudflare-context.js";
4
- /**
5
- * An instance of the Tag Cache that uses a D1 binding (`NEXT_TAG_CACHE_D1`) as it's underlying data store.
6
- *
7
- * **Tag/path mappings table**
8
- *
9
- * Information about the relation between tags and paths is stored in a `tags` table that contains
10
- * two columns; `tag`, and `path`.
11
- *
12
- * This table should be populated using an SQL file that is generated during the build process.
13
- *
14
- * **Tag revalidations table**
15
- *
16
- * Revalidation times for tags are stored in a `revalidations` table that contains two columns; `tags`,
17
- * and `revalidatedAt`.
18
- */
19
- class D1TagCache {
20
- name = "d1-tag-cache";
21
- async getByPath(rawPath) {
22
- const { isDisabled, db } = this.getConfig();
23
- if (isDisabled)
24
- return [];
25
- const path = this.getCacheKey(rawPath);
26
- try {
27
- const { success, results } = await db
28
- .prepare(`SELECT tag FROM tags WHERE path = ?`)
29
- .bind(path)
30
- .all();
31
- if (!success)
32
- throw new RecoverableError(`D1 select failed for ${path}`);
33
- const tags = results?.map((item) => this.removeBuildId(item.tag));
34
- debug("tags for path", path, tags);
35
- return tags;
36
- }
37
- catch (e) {
38
- error("Failed to get tags by path", e);
39
- return [];
40
- }
41
- }
42
- async getByTag(rawTag) {
43
- const { isDisabled, db } = this.getConfig();
44
- if (isDisabled)
45
- return [];
46
- const tag = this.getCacheKey(rawTag);
47
- try {
48
- const { success, results } = await db
49
- .prepare(`SELECT path FROM tags WHERE tag = ?`)
50
- .bind(tag)
51
- .all();
52
- if (!success)
53
- throw new RecoverableError(`D1 select failed for ${tag}`);
54
- const paths = results?.map((item) => this.removeBuildId(item.path));
55
- debug("paths for tag", tag, paths);
56
- return paths;
57
- }
58
- catch (e) {
59
- error("Failed to get by tag", e);
60
- return [];
61
- }
62
- }
63
- async getLastModified(path, lastModified) {
64
- const { isDisabled, db } = this.getConfig();
65
- if (isDisabled)
66
- return lastModified ?? Date.now();
67
- try {
68
- const { success, results } = await db
69
- .prepare(`SELECT revalidations.tag FROM revalidations
70
- INNER JOIN tags ON revalidations.tag = tags.tag
71
- WHERE tags.path = ? AND revalidations.revalidatedAt > ?;`)
72
- .bind(this.getCacheKey(path), lastModified ?? 0)
73
- .all();
74
- if (!success)
75
- throw new RecoverableError(`D1 select failed for ${path} - ${lastModified ?? 0}`);
76
- debug("revalidatedTags", results);
77
- return results?.length > 0 ? -1 : (lastModified ?? Date.now());
78
- }
79
- catch (e) {
80
- error("Failed to get revalidated tags", e);
81
- return lastModified ?? Date.now();
82
- }
83
- }
84
- async writeTags(tags) {
85
- const { isDisabled, db } = this.getConfig();
86
- if (isDisabled || tags.length === 0)
87
- return;
88
- try {
89
- const uniqueTags = new Set();
90
- const results = await db.batch(tags
91
- .map(({ tag, path, revalidatedAt }) => {
92
- if (revalidatedAt === 1) {
93
- // new tag/path mapping from set
94
- return db
95
- .prepare(`INSERT INTO tags (tag, path) VALUES (?, ?)`)
96
- .bind(this.getCacheKey(tag), this.getCacheKey(path));
97
- }
98
- if (!uniqueTags.has(tag) && revalidatedAt !== -1) {
99
- // tag was revalidated
100
- uniqueTags.add(tag);
101
- return db
102
- .prepare(`INSERT INTO revalidations (tag, revalidatedAt) VALUES (?, ?)`)
103
- .bind(this.getCacheKey(tag), revalidatedAt ?? Date.now());
104
- }
105
- })
106
- .filter((stmt) => !!stmt));
107
- const failedResults = results.filter((res) => !res.success);
108
- if (failedResults.length > 0) {
109
- throw new RecoverableError(`${failedResults.length} tags failed to write`);
110
- }
111
- }
112
- catch (e) {
113
- error("Failed to batch write tags", e);
114
- }
115
- }
116
- getConfig() {
117
- const cfEnv = getCloudflareContext().env;
118
- const db = cfEnv.NEXT_TAG_CACHE_D1;
119
- if (!db)
120
- debug("No D1 database found");
121
- const isDisabled = !!globalThis.openNextConfig
122
- .dangerous?.disableTagCache;
123
- return !db || isDisabled
124
- ? { isDisabled: true }
125
- : {
126
- isDisabled: false,
127
- db,
128
- };
129
- }
130
- removeBuildId(key) {
131
- return key.replace(`${this.getBuildId()}/`, "");
132
- }
133
- getCacheKey(key) {
134
- return `${this.getBuildId()}/${key}`.replaceAll("//", "/");
135
- }
136
- getBuildId() {
137
- return process.env.NEXT_BUILD_ID ?? "no-build-id";
138
- }
139
- }
140
- export default new D1TagCache();