@rocicorp/zero 0.26.0 → 0.26.1-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/out/analyze-query/src/run-ast.d.ts.map +1 -1
  2. package/out/analyze-query/src/run-ast.js +4 -1
  3. package/out/analyze-query/src/run-ast.js.map +1 -1
  4. package/out/replicache/src/btree/node.js +4 -4
  5. package/out/replicache/src/btree/node.js.map +1 -1
  6. package/out/replicache/src/btree/write.js +2 -2
  7. package/out/replicache/src/btree/write.js.map +1 -1
  8. package/out/replicache/src/dag/gc.js +5 -2
  9. package/out/replicache/src/dag/gc.js.map +1 -1
  10. package/out/replicache/src/db/write.d.ts.map +1 -1
  11. package/out/replicache/src/db/write.js +21 -6
  12. package/out/replicache/src/db/write.js.map +1 -1
  13. package/out/replicache/src/error-responses.d.ts.map +1 -1
  14. package/out/replicache/src/error-responses.js +4 -1
  15. package/out/replicache/src/error-responses.js.map +1 -1
  16. package/out/replicache/src/persist/clients.d.ts.map +1 -1
  17. package/out/replicache/src/persist/clients.js +4 -1
  18. package/out/replicache/src/persist/clients.js.map +1 -1
  19. package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
  20. package/out/replicache/src/persist/collect-idb-databases.js +2 -1
  21. package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
  22. package/out/replicache/src/persist/idb-databases-store.d.ts.map +1 -1
  23. package/out/replicache/src/persist/idb-databases-store.js +4 -1
  24. package/out/replicache/src/persist/idb-databases-store.js.map +1 -1
  25. package/out/replicache/src/process-scheduler.js +4 -1
  26. package/out/replicache/src/process-scheduler.js.map +1 -1
  27. package/out/replicache/src/replicache-impl.js +2 -2
  28. package/out/replicache/src/replicache-impl.js.map +1 -1
  29. package/out/replicache/src/subscriptions.d.ts.map +1 -1
  30. package/out/replicache/src/subscriptions.js +5 -2
  31. package/out/replicache/src/subscriptions.js.map +1 -1
  32. package/out/replicache/src/sync/diff.d.ts.map +1 -1
  33. package/out/replicache/src/sync/diff.js +4 -1
  34. package/out/replicache/src/sync/diff.js.map +1 -1
  35. package/out/replicache/src/sync/pull.d.ts.map +1 -1
  36. package/out/replicache/src/sync/pull.js +4 -1
  37. package/out/replicache/src/sync/pull.js.map +1 -1
  38. package/out/replicache/src/sync/push.d.ts.map +1 -1
  39. package/out/replicache/src/sync/push.js +5 -2
  40. package/out/replicache/src/sync/push.js.map +1 -1
  41. package/out/shared/src/asserts.d.ts +1 -1
  42. package/out/shared/src/asserts.d.ts.map +1 -1
  43. package/out/shared/src/asserts.js +1 -1
  44. package/out/shared/src/asserts.js.map +1 -1
  45. package/out/z2s/src/compiler.d.ts.map +1 -1
  46. package/out/z2s/src/compiler.js +8 -2
  47. package/out/z2s/src/compiler.js.map +1 -1
  48. package/out/zero/package.json.js +1 -1
  49. package/out/zero-cache/src/config/zero-config.d.ts +4 -0
  50. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  51. package/out/zero-cache/src/config/zero-config.js +17 -0
  52. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  53. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  54. package/out/zero-cache/src/db/transaction-pool.js +17 -11
  55. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  56. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  57. package/out/zero-cache/src/observability/events.js +28 -9
  58. package/out/zero-cache/src/observability/events.js.map +1 -1
  59. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  60. package/out/zero-cache/src/server/change-streamer.js +3 -1
  61. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  62. package/out/zero-cache/src/services/analyze.js +1 -0
  63. package/out/zero-cache/src/services/analyze.js.map +1 -1
  64. package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
  65. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +29 -14
  66. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  67. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
  68. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  69. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +69 -25
  70. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  71. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  72. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +6 -1
  73. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  74. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  75. package/out/zero-cache/src/services/change-source/pg/schema/init.js +12 -8
  76. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  77. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +26 -0
  78. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
  79. package/out/zero-cache/src/services/change-source/protocol/current/data.js +15 -3
  80. package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
  81. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +30 -0
  82. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
  83. package/out/zero-cache/src/services/change-source/protocol/current.js +2 -1
  84. package/out/zero-cache/src/services/change-streamer/broadcast.d.ts +100 -0
  85. package/out/zero-cache/src/services/change-streamer/broadcast.d.ts.map +1 -0
  86. package/out/zero-cache/src/services/change-streamer/broadcast.js +171 -0
  87. package/out/zero-cache/src/services/change-streamer/broadcast.js.map +1 -0
  88. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
  89. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  90. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +22 -9
  91. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  92. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +10 -0
  93. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
  94. package/out/zero-cache/src/services/change-streamer/forwarder.d.ts +17 -1
  95. package/out/zero-cache/src/services/change-streamer/forwarder.d.ts.map +1 -1
  96. package/out/zero-cache/src/services/change-streamer/forwarder.js +52 -4
  97. package/out/zero-cache/src/services/change-streamer/forwarder.js.map +1 -1
  98. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +18 -0
  99. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
  100. package/out/zero-cache/src/services/change-streamer/subscriber.js +68 -12
  101. package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
  102. package/out/zero-cache/src/services/replicator/change-processor.d.ts +2 -0
  103. package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
  104. package/out/zero-cache/src/services/replicator/change-processor.js +8 -6
  105. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  106. package/out/zero-cache/src/services/replicator/incremental-sync.d.ts.map +1 -1
  107. package/out/zero-cache/src/services/replicator/incremental-sync.js +39 -1
  108. package/out/zero-cache/src/services/replicator/incremental-sync.js.map +1 -1
  109. package/out/zero-cache/src/services/replicator/replication-status.d.ts +4 -3
  110. package/out/zero-cache/src/services/replicator/replication-status.d.ts.map +1 -1
  111. package/out/zero-cache/src/services/replicator/replication-status.js +25 -10
  112. package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
  113. package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
  114. package/out/zero-cache/src/services/run-ast.js +22 -2
  115. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  116. package/out/zero-cache/src/services/running-state.d.ts +1 -0
  117. package/out/zero-cache/src/services/running-state.d.ts.map +1 -1
  118. package/out/zero-cache/src/services/running-state.js +4 -0
  119. package/out/zero-cache/src/services/running-state.js.map +1 -1
  120. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  121. package/out/zero-cache/src/services/view-syncer/cvr.js +8 -2
  122. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  123. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  124. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +10 -1
  125. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  126. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +1 -1
  127. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  128. package/out/zero-cache/src/services/view-syncer/snapshotter.js +15 -7
  129. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  130. package/out/zero-cache/src/types/subscription.d.ts +3 -1
  131. package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
  132. package/out/zero-cache/src/types/subscription.js +21 -9
  133. package/out/zero-cache/src/types/subscription.js.map +1 -1
  134. package/out/zero-client/src/client/http-string.js.map +1 -1
  135. package/out/zero-client/src/client/version.js +1 -1
  136. package/out/zero-client/src/client/zero.js.map +1 -1
  137. package/out/zero-events/src/status.d.ts +8 -0
  138. package/out/zero-events/src/status.d.ts.map +1 -1
  139. package/out/zero-schema/src/permissions.d.ts.map +1 -1
  140. package/out/zero-schema/src/permissions.js +4 -1
  141. package/out/zero-schema/src/permissions.js.map +1 -1
  142. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  143. package/out/zero-server/src/process-mutations.js +13 -19
  144. package/out/zero-server/src/process-mutations.js.map +1 -1
  145. package/out/zql/src/builder/filter.d.ts.map +1 -1
  146. package/out/zql/src/builder/filter.js +5 -2
  147. package/out/zql/src/builder/filter.js.map +1 -1
  148. package/out/zql/src/ivm/constraint.js.map +1 -1
  149. package/package.json +1 -1
@@ -1 +1 @@
1
- {"version":3,"file":"zero-config.js","sources":["../../../../../zero-cache/src/config/zero-config.ts"],"sourcesContent":["/**\n * These types represent the _compiled_ config whereas `define-config` types represent the _source_ config.\n */\n\nimport type {LogContext} from '@rocicorp/logger';\nimport {timingSafeEqual} from 'node:crypto';\nimport {logOptions} from '../../../otel/src/log-options.ts';\nimport {\n flagToEnv,\n parseOptions,\n type Config,\n type ParseOptions,\n} from '../../../shared/src/options.ts';\nimport * as v from '../../../shared/src/valita.ts';\n// @circular-dep-ignore - importing package.json for version info only\nimport packageJson from '../../../zero/package.json' with {type: 'json'};\nimport {runtimeDebugFlags} from '../../../zql/src/builder/debug-delegate.ts';\nimport {singleProcessMode} from '../types/processes.ts';\nimport {\n ALLOWED_APP_ID_CHARACTERS,\n INVALID_APP_ID_MESSAGE,\n} from '../types/shards.ts';\nimport {DEFAULT_PREFERRED_PREFIXES} from './network.ts';\nimport {\n assertNormalized,\n isDevelopmentMode,\n type NormalizedZeroConfig,\n} from './normalize.ts';\nexport type {LogConfig} from '../../../otel/src/log-options.ts';\n\nexport const ZERO_ENV_VAR_PREFIX = 'ZERO_';\n\nexport const appOptions = {\n id: {\n type: v\n .string()\n .default('zero')\n .assert(id => ALLOWED_APP_ID_CHARACTERS.test(id), INVALID_APP_ID_MESSAGE),\n desc: [\n 'Unique identifier for the app.',\n '',\n 'Multiple zero-cache apps can run on a single upstream database, each of which',\n 'is isolated from the others, with its own permissions, sharding (future feature),',\n 'and change/cvr databases.',\n '',\n 'The metadata of an app is stored in an upstream schema with the same name,',\n 'e.g. \"zero\", and the metadata for each app shard, e.g. client and mutation',\n 'ids, is stored in the \"\\\\{app-id\\\\}_\\\\{#\\\\}\" schema. (Currently there is only a single',\n '\"0\" shard, but this will change with sharding).',\n '',\n 'The CVR and Change data are managed in schemas named \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cvr\"',\n 'and \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cdc\", respectively, allowing multiple apps and shards',\n 'to share the same database instance (e.g. a Postgres \"cluster\") for CVR and Change management.',\n '',\n 'Due to constraints on replication slot names, an App ID may only consist of',\n 'lower-case letters, numbers, and the underscore character.',\n '',\n 'Note that this option is used by both {bold zero-cache} and {bold zero-deploy-permissions}.',\n ],\n },\n\n publications: {\n type: v.array(v.string()).optional(() => []),\n desc: [\n `Postgres {bold PUBLICATION}s that define the tables and columns to`,\n `replicate. Publication names may not begin with an underscore,`,\n `as zero reserves that prefix for internal use.`,\n ``,\n `If unspecified, zero-cache will create and use an internal publication that`,\n `publishes all tables in the {bold public} schema, i.e.:`,\n ``,\n `CREATE PUBLICATION _\\\\{app-id\\\\}_public_0 FOR TABLES IN SCHEMA public;`,\n ``,\n `Note that changing the set of publications will result in resyncing the replica,`,\n `which may involve downtime (replication lag) while the new replica is initializing.`,\n `To change the set of publications without disrupting an existing app, a new app`,\n `should be created.`,\n ],\n },\n};\n\nexport const shardOptions = {\n id: {\n type: v\n .string()\n .assert(() => {\n throw new Error(\n `ZERO_SHARD_ID is no longer an option. Please use ZERO_APP_ID instead.`,\n // TODO: Link to release / migration notes?\n );\n })\n .optional(),\n hidden: true,\n },\n\n num: {\n type: v.number().default(0),\n desc: [\n `The shard number (from 0 to NUM_SHARDS) of the App. zero will eventually`,\n `support data sharding as a first-class primitive; until then, deploying`,\n `multiple shard-nums creates functionally identical shards. Until sharding is`,\n `actually meaningful, this flag is hidden but available for testing.`,\n ],\n hidden: true,\n },\n};\n\nconst replicaOptions = {\n file: {\n type: v.string().default('zero.db'),\n desc: [\n `File path to the SQLite replica that zero-cache maintains.`,\n `This can be lost, but if it is, zero-cache will have to re-replicate next`,\n `time it starts up.`,\n ],\n },\n\n vacuumIntervalHours: {\n type: v.number().optional(),\n desc: [\n `Performs a VACUUM at server startup if the specified number of hours has elapsed`,\n `since the last VACUUM (or initial-sync). The VACUUM operation is heavyweight`,\n `and requires double the size of the db in disk space. If unspecified, VACUUM`,\n `operations are not performed.`,\n ],\n },\n};\n\nexport type ReplicaOptions = Config<typeof replicaOptions>;\n\nconst perUserMutationLimit = {\n max: {\n type: v.number().optional(),\n desc: [\n `The maximum mutations per user within the specified {bold windowMs}.`,\n `If unset, no rate limiting is enforced.`,\n ],\n },\n windowMs: {\n type: v.number().default(60_000),\n desc: [\n `The sliding window over which the {bold perUserMutationLimitMax} is enforced.`,\n ],\n },\n};\n\nexport type RateLimit = Config<typeof perUserMutationLimit>;\n\nconst authOptions = {\n jwk: {\n type: v.string().optional(),\n desc: [\n `A public key in JWK format used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n jwksUrl: {\n type: v.string().optional(),\n desc: [\n `A URL that returns a JWK set used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n secret: {\n type: v.string().optional(),\n desc: [\n `A symmetric key used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n issuer: {\n type: v.string().optional(),\n desc: [\n `Expected issuer ({bold iss} claim) for JWT validation.`,\n `If set, tokens with a different or missing issuer will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n audience: {\n type: v.string().optional(),\n desc: [\n `Expected audience ({bold aud} claim) for JWT validation.`,\n `If set, tokens with a different or missing audience will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n};\n\nconst makeDeprecationMessage = (flag: string) =>\n `Use {bold ${flagToEnv(ZERO_ENV_VAR_PREFIX, flag)}} (or {bold --${flag}}) instead.`;\n\nconst makeMutatorQueryOptions = (\n replacement: 'mutate' | 'query' | undefined,\n suffix: string,\n) => ({\n url: {\n type: v.array(v.string()).optional(), // optional until we remove CRUD mutations\n desc: [\n `The URL of the API server to which zero-cache will ${suffix}.`,\n ``,\n `{bold IMPORTANT:} URLs are matched using {bold URLPattern}, a standard Web API.`,\n ``,\n `{bold Pattern Syntax:}`,\n ` URLPattern uses a simple and intuitive syntax similar to Express routes.`,\n ` Wildcards and named parameters make it easy to match multiple URLs.`,\n ``,\n `{bold Basic Examples:}`,\n ` Exact URL match:`,\n ` \"https://api.example.com/mutate\"`,\n ` `,\n ` Any subdomain using wildcard:`,\n ` \"https://*.example.com/mutate\"`,\n ` `,\n ` Multiple subdomain levels:`,\n ` \"https://*.*.example.com/mutate\"`,\n ` `,\n ` Any path under a domain:`,\n ` \"https://api.example.com/*\"`,\n ` `,\n ` Named path parameters:`,\n ` \"https://api.example.com/:version/mutate\"`,\n ` ↳ Matches \"https://api.example.com/v1/mutate\", \"https://api.example.com/v2/mutate\", etc.`,\n ``,\n `{bold Advanced Patterns:}`,\n ` Optional path segments:`,\n ` \"https://api.example.com/:path?\"`,\n ` `,\n ` Regex in segments (for specific patterns):`,\n ` \"https://api.example.com/:version(v\\\\\\\\d+)/mutate\"`,\n ` ↳ Matches only \"v\" followed by digits`,\n ``,\n `{bold Multiple patterns:}`,\n ` [\"https://api1.example.com/mutate\", \"https://api2.example.com/mutate\"]`,\n ``,\n `{bold Note:} Query parameters and URL fragments (#) are automatically ignored during matching.`,\n ``,\n `For full URLPattern syntax, see: https://developer.mozilla.org/en-US/docs/Web/API/URLPattern`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-url`)]}\n : {}),\n },\n apiKey: {\n type: v.string().optional(),\n desc: [\n `An optional secret used to authorize zero-cache to call the API server handling writes.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-api-key`)]}\n : {}),\n },\n forwardCookies: {\n type: v.boolean().default(false),\n desc: [\n `If true, zero-cache will forward cookies from the request.`,\n `This is useful for passing authentication cookies to the API server.`,\n `If false, cookies are not forwarded.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-forward-cookies`)]}\n : {}),\n },\n allowedClientHeaders: {\n type: v.array(v.string()).optional(),\n desc: [\n `A list of header names that clients are allowed to set via custom headers.`,\n `If specified, only headers in this list will be forwarded to the ${suffix === 'push mutations' ? 'push' : 'query'} URL.`,\n `Header names are case-insensitive.`,\n `If not specified, no client-provided headers are forwarded (secure by default).`,\n `Example: ZERO_${replacement ? replacement.toUpperCase() : suffix === 'push mutations' ? 'MUTATE' : 'QUERY'}_ALLOWED_CLIENT_HEADERS=x-request-id,x-correlation-id`,\n ],\n ...(replacement\n ? {\n deprecated: [\n makeDeprecationMessage(`${replacement}-allowed-client-headers`),\n ],\n }\n : {}),\n },\n});\n\nconst mutateOptions = makeMutatorQueryOptions(undefined, 'push mutations');\nconst pushOptions = makeMutatorQueryOptions('mutate', 'push mutations');\nconst queryOptions = makeMutatorQueryOptions(undefined, 'send synced queries');\nconst getQueriesOptions = makeMutatorQueryOptions(\n 'query',\n 'send synced queries',\n);\n\n/** @deprecated */\nexport type AuthConfig = Config<typeof authOptions>;\n\n// Note: --help will list flags in the order in which they are defined here,\n// so order the fields such that the important (e.g. required) ones are first.\n// (Exported for testing)\nexport const zeroOptions = {\n upstream: {\n db: {\n type: v.string(),\n desc: [\n `The \"upstream\" authoritative postgres database.`,\n `In the future we will support other types of upstream besides PG.`,\n ],\n },\n\n type: {\n type: v.literalUnion('pg', 'custom').default('pg'),\n desc: [\n `The meaning of the {bold upstream-db} depends on the upstream type:`,\n `* {bold pg}: The connection database string, e.g. \"postgres://...\"`,\n `* {bold custom}: The base URI of the change source \"endpoint, e.g.`,\n ` \"https://my-change-source.dev/changes/v0/stream?apiKey=...\"`,\n ],\n hidden: true, // TODO: Unhide when ready to officially support.\n },\n\n maxConns: {\n type: v.number().default(20),\n desc: [\n `The maximum number of connections to open to the upstream database`,\n `for committing mutations. This is divided evenly amongst sync workers.`,\n `In addition to this number, zero-cache uses one connection for the`,\n `replication stream.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n },\n\n /** @deprecated */\n push: pushOptions,\n mutate: mutateOptions,\n /** @deprecated */\n getQueries: getQueriesOptions,\n query: queryOptions,\n\n cvr: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store CVRs. CVRs (client view records) keep track`,\n `of the data synced to clients in order to determine the diff to send on reconnect.`,\n `If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(30),\n desc: [\n `The maximum number of connections to open to the CVR database.`,\n `This is divided evenly amongst sync workers.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n\n garbageCollectionInactivityThresholdHours: {\n type: v.number().default(48),\n desc: [\n `The duration after which an inactive CVR is eligible for garbage collection.`,\n `Note that garbage collection is an incremental, periodic process which does not`,\n `necessarily purge all eligible CVRs immediately.`,\n ],\n },\n\n garbageCollectionInitialIntervalSeconds: {\n type: v.number().default(60),\n desc: [\n `The initial interval at which to check and garbage collect inactive CVRs.`,\n `This interval is increased exponentially (up to 16 minutes) when there is`,\n `nothing to purge.`,\n ],\n },\n\n garbageCollectionInitialBatchSize: {\n type: v.number().default(25),\n desc: [\n `The initial number of CVRs to purge per garbage collection interval.`,\n `This number is increased linearly if the rate of new CVRs exceeds the rate of`,\n `purged CVRs, in order to reach a steady state.`,\n ``,\n `Setting this to 0 effectively disables CVR garbage collection.`,\n ],\n },\n },\n\n queryHydrationStats: {\n type: v.boolean().optional(),\n desc: [\n `Track and log the number of rows considered by query hydrations which`,\n `take longer than {bold log-slow-hydrate-threshold} milliseconds.`,\n `This is useful for debugging and performance tuning.`,\n ],\n },\n\n enableQueryPlanner: {\n type: v.boolean().default(true),\n desc: [\n `Enable the query planner for optimizing ZQL queries.`,\n ``,\n `The query planner analyzes and optimizes query execution by determining`,\n `the most efficient join strategies.`,\n ``,\n `You can disable the planner if it is picking bad strategies.`,\n ],\n },\n\n yieldThresholdMs: {\n type: v.number().default(10),\n desc: [\n `The maximum amount of time in milliseconds that a sync worker will`,\n `spend in IVM (processing query hydration and advancement) before yielding`,\n `to the event loop. Lower values increase responsiveness and fairness at`,\n `the cost of reduced throughput.`,\n ],\n },\n\n change: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store recent replication log entries, in order`,\n `to sync multiple view-syncers without requiring multiple replication slots on`,\n `the upstream database. If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(5),\n desc: [\n `The maximum number of connections to open to the change database.`,\n `This is used by the {bold change-streamer} for catching up`,\n `{bold zero-cache} replication subscriptions.`,\n ],\n },\n },\n\n replica: replicaOptions,\n\n log: logOptions,\n\n app: appOptions,\n\n shard: shardOptions,\n\n /** @deprecated */\n auth: authOptions,\n\n port: {\n type: v.number().default(4848),\n desc: [`The port for sync connections.`],\n },\n\n changeStreamer: {\n uri: {\n type: v.string().optional(),\n desc: [\n `When set, connects to the {bold change-streamer} at the given URI.`,\n `In a multi-node setup, this should be specified in {bold view-syncer} options,`,\n `pointing to the {bold replication-manager} URI, which runs a {bold change-streamer}`,\n `on port 4849.`,\n ],\n },\n\n mode: {\n type: v.literalUnion('dedicated', 'discover').default('dedicated'),\n desc: [\n `As an alternative to {bold ZERO_CHANGE_STREAMER_URI}, the {bold ZERO_CHANGE_STREAMER_MODE}`,\n `can be set to \"{bold discover}\" to instruct the {bold view-syncer} to connect to the `,\n `ip address registered by the {bold replication-manager} upon startup.`,\n ``,\n `This may not work in all networking configurations, e.g. certain private `,\n `networking or port forwarding configurations. Using the {bold ZERO_CHANGE_STREAMER_URI}`,\n `with an explicit routable hostname is recommended instead.`,\n ``,\n `Note: This option is ignored if the {bold ZERO_CHANGE_STREAMER_URI} is set.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `The port on which the {bold change-streamer} runs. This is an internal`,\n `protocol between the {bold replication-manager} and {bold view-syncers}, which`,\n `runs in the same process tree in local development or a single-node configuration.`,\n ``,\n `If unspecified, defaults to {bold --port} + 1.`,\n ],\n },\n\n /** @deprecated */\n address: {\n type: v.string().optional(),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n /** @deprecated */\n protocol: {\n type: v.literalUnion('ws', 'wss').default('ws'),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n discoveryInterfacePreferences: {\n type: v.array(v.string()).default([...DEFAULT_PREFERRED_PREFIXES]),\n desc: [\n `The name prefixes to prefer when introspecting the network interfaces to determine`,\n `the externally reachable IP address for change-streamer discovery. This defaults`,\n `to commonly used names for standard ethernet interfaces in order to prevent selecting`,\n `special interfaces such as those for VPNs.`,\n ],\n // More confusing than it's worth to advertise this. The default list should be\n // adjusted to make things work for all environments; it is controlled as a\n // hidden flag as an emergency to unblock people with outlier network configs.\n hidden: true,\n },\n\n startupDelayMs: {\n type: v.number().default(15000),\n desc: [\n `The delay to wait before the change-streamer takes over the replication stream`,\n `(i.e. the handoff during replication-manager updates), to allow loadbalancers to register`,\n `the task as healthy based on healthcheck parameters. Note that if a change stream request`,\n `is received during this interval, the delay will be canceled and the takeover will happen`,\n `immediately, since the incoming request indicates that the task is registered as a target.`,\n ],\n },\n\n backPressureLimitHeapProportion: {\n type: v.number().default(0.04),\n desc: [\n `The percentage of {bold --max-old-space-size} to use as a buffer for absorbing replication`,\n `stream spikes. When the estimated amount of queued data exceeds this threshold, back pressure`,\n `is applied to the replication stream, delaying downstream sync as a result.`,\n ``,\n `The threshold was determined empirically with load testing. Higher thresholds have resulted`,\n `in OOMs. Note also that the byte-counting logic in the queue is strictly an underestimate of`,\n `actual memory usage (but importantly, proportionally correct), so the queue is actually`,\n `using more than what this proportion suggests.`,\n ``,\n `This parameter is exported as an emergency knob to reduce the size of the buffer in the`,\n `event that the server OOMs from back pressure. Resist the urge to {italic increase} this`,\n `proportion, as it is mainly useful for absorbing periodic spikes and does not meaningfully`,\n `affect steady-state replication throughput; the latter is determined by other factors such`,\n `as object serialization and PG throughput`,\n ``,\n `In other words, the back pressure limit does not constrain replication throughput;`,\n `rather, it protects the system when the upstream throughput exceeds the downstream`,\n `throughput.`,\n ],\n },\n },\n\n taskID: {\n type: v.string().optional(),\n desc: [\n `Globally unique identifier for the zero-cache instance.`,\n ``,\n `Setting this to a platform specific task identifier can be useful for debugging.`,\n `If unspecified, zero-cache will attempt to extract the TaskARN if run from within`,\n `an AWS ECS container, and otherwise use a random string.`,\n ],\n },\n\n perUserMutationLimit,\n\n numSyncWorkers: {\n type: v.number().optional(),\n desc: [\n `The number of processes to use for view syncing.`,\n `Leave this unset to use the maximum available parallelism.`,\n `If set to 0, the server runs without sync workers, which is the`,\n `configuration for running the {bold replication-manager}.`,\n ],\n },\n\n autoReset: {\n type: v.boolean().default(true),\n desc: [\n `Automatically wipe and resync the replica when replication is halted.`,\n `This situation can occur for configurations in which the upstream database`,\n `provider prohibits event trigger creation, preventing the zero-cache from`,\n `being able to correctly replicate schema changes. For such configurations,`,\n `an upstream schema change will instead result in halting replication with an`,\n `error indicating that the replica needs to be reset.`,\n ``,\n `When {bold auto-reset} is enabled, zero-cache will respond to such situations`,\n `by shutting down, and when restarted, resetting the replica and all synced `,\n `clients. This is a heavy-weight operation and can result in user-visible`,\n `slowness or downtime if compute resources are scarce.`,\n ],\n },\n\n adminPassword: {\n type: v.string().optional(),\n desc: [\n `A password used to administer zero-cache server, for example to access the`,\n `/statz endpoint.`,\n '',\n 'A password is optional in development mode but {bold required in production} mode.',\n ],\n },\n\n websocketCompression: {\n type: v.boolean().default(false),\n desc: [\n 'Enable WebSocket per-message deflate compression.',\n '',\n 'Compression can reduce bandwidth usage for sync traffic but',\n 'increases CPU usage on both client and server. Disabled by default.',\n '',\n 'See: https://github.com/websockets/ws#websocket-compression',\n ],\n },\n\n websocketCompressionOptions: {\n type: v.string().optional(),\n desc: [\n 'JSON string containing WebSocket compression options.',\n '',\n 'Only used if websocketCompression is enabled.',\n '',\n 'Example: \\\\{\"zlibDeflateOptions\":\\\\{\"level\":3\\\\},\"threshold\":1024\\\\}',\n '',\n 'See https://github.com/websockets/ws/blob/master/doc/ws.md#new-websocketserveroptions-callback for available options.',\n ],\n },\n\n websocketMaxPayloadBytes: {\n type: v.number().default(10 * 1024 * 1024),\n desc: [\n 'Maximum size of incoming WebSocket messages in bytes.',\n '',\n 'Messages exceeding this limit are rejected before parsing.',\n 'Default: 10MB (10 * 1024 * 1024 = 10485760)',\n ],\n },\n\n litestream: {\n executable: {\n type: v.string().optional(),\n desc: [`Path to the {bold litestream} executable.`],\n },\n\n configPath: {\n type: v.string().default('./src/services/litestream/config.yml'),\n desc: [\n `Path to the litestream yaml config file. zero-cache will run this with its`,\n `environment variables, which can be referenced in the file via $\\\\{ENV\\\\}`,\n `substitution, for example:`,\n `* {bold ZERO_REPLICA_FILE} for the db path`,\n `* {bold ZERO_LITESTREAM_BACKUP_LOCATION} for the db replica url`,\n `* {bold ZERO_LITESTREAM_LOG_LEVEL} for the log level`,\n `* {bold ZERO_LOG_FORMAT} for the log type`,\n ],\n },\n\n logLevel: {\n type: v.literalUnion('debug', 'info', 'warn', 'error').default('warn'),\n },\n\n backupURL: {\n type: v.string().optional(),\n desc: [\n `The location of the litestream backup, usually an {bold s3://} URL.`,\n `This is only consulted by the {bold replication-manager}.`,\n `{bold view-syncers} receive this information from the {bold replication-manager}.`,\n ],\n },\n\n endpoint: {\n type: v.string().optional(),\n desc: [\n `The S3-compatible endpoint URL to use for the litestream backup. Only required for non-AWS services.`,\n `The {bold replication-manager} and {bold view-syncers} must have the same endpoint.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `Port on which litestream exports metrics, used to determine the replication`,\n `watermark up to which it is safe to purge change log records.`,\n ``,\n `If unspecified, defaults to {bold --port} + 2.`,\n ],\n },\n\n checkpointThresholdMB: {\n type: v.number().default(40),\n desc: [\n `The size of the WAL file at which to perform an SQlite checkpoint to apply`,\n `the writes in the WAL to the main database file. Each checkpoint creates`,\n `a new WAL segment file that will be backed up by litestream. Smaller thresholds`,\n `may improve read performance, at the expense of creating more files to download`,\n `when restoring the replica from the backup.`,\n ],\n },\n\n minCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite attempts a PASSIVE checkpoint, which`,\n `transfers pages to the main database file without blocking writers.`,\n `Defaults to {bold checkpointThresholdMB * 250} (since SQLite page size is 4KB).`,\n ],\n },\n\n maxCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite performs a RESTART checkpoint, which`,\n `blocks writers until complete. Defaults to {bold minCheckpointPageCount * 10}.`,\n `Set to {bold 0} to disable RESTART checkpoints entirely.`,\n ],\n },\n\n incrementalBackupIntervalMinutes: {\n type: v.number().default(15),\n desc: [\n `The interval between incremental backups of the replica. Shorter intervals`,\n `reduce the amount of change history that needs to be replayed when catching`,\n `up a new view-syncer, at the expense of increasing the number of files needed`,\n `to download for the initial litestream restore.`,\n ],\n },\n\n snapshotBackupIntervalHours: {\n type: v.number().default(12),\n desc: [\n `The interval between snapshot backups of the replica. Snapshot backups`,\n `make a full copy of the database to a new litestream generation. This`,\n `improves restore time at the expense of bandwidth. Applications with a`,\n `large database and low write rate can increase this interval to reduce`,\n `network usage for backups (litestream defaults to 24 hours).`,\n ],\n },\n\n restoreParallelism: {\n type: v.number().default(48),\n desc: [\n `The number of WAL files to download in parallel when performing the`,\n `initial restore of the replica from the backup.`,\n ],\n },\n\n multipartConcurrency: {\n type: v.number().default(48),\n desc: [\n `The number of parts (of size {bold --litestream-multipart-size} bytes)`,\n `to upload or download in parallel when backing up or restoring the snapshot.`,\n ],\n },\n\n multipartSize: {\n type: v.number().default(16 * 1024 * 1024),\n desc: [\n `The size of each part when uploading or downloading the snapshot with`,\n `{bold --multipart-concurrency}. Note that up to {bold concurrency * size}`,\n `bytes of memory are used when backing up or restoring the snapshot.`,\n ],\n },\n },\n\n storageDBTmpDir: {\n type: v.string().optional(),\n desc: [\n `tmp directory for IVM operator storage. Leave unset to use os.tmpdir()`,\n ],\n },\n\n initialSync: {\n tableCopyWorkers: {\n type: v.number().default(5),\n desc: [\n `The number of parallel workers used to copy tables during initial sync.`,\n `Each worker uses a database connection and will buffer up to (approximately)`,\n `10 MB of table data in memory during initial sync. Increasing the number of`,\n `workers may improve initial sync speed; however, note that local disk throughput`,\n `(i.e. IOPS), upstream CPU, and network bandwidth may also be bottlenecks.`,\n ],\n },\n\n profileCopy: {\n type: v.boolean().optional(),\n hidden: true,\n desc: [\n `Takes a cpu profile during the copy phase initial-sync, storing it as a JSON file`,\n `initial-copy.cpuprofile in the tmp directory.`,\n ],\n },\n },\n\n /** @deprecated */\n targetClientRowCount: {\n type: v.number().default(20_000),\n deprecated: [\n 'This option is no longer used and will be removed in a future version.',\n 'The client-side cache no longer enforces a row limit. Instead, TTL-based expiration',\n 'automatically manages cache size to prevent unbounded growth.',\n ],\n hidden: true,\n },\n\n lazyStartup: {\n type: v.boolean().default(false),\n desc: [\n 'Delay starting the majority of zero-cache until first request.',\n '',\n 'This is mainly intended to avoid connecting to Postgres replication stream',\n 'until the first request is received, which can be useful i.e., for preview instances.',\n '',\n 'Currently only supported in single-node mode.',\n ],\n },\n\n serverVersion: {\n type: v.string().optional(),\n desc: [`The version string outputted to logs when the server starts up.`],\n },\n\n enableTelemetry: {\n type: v.boolean().default(true),\n desc: [\n `Set to false to opt out of telemetry collection.`,\n ``,\n `This helps us improve Zero by collecting anonymous usage data.`,\n `Setting the DO_NOT_TRACK environment variable also disables telemetry.`,\n ],\n },\n\n cloudEvent: {\n sinkEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a URI to a CloudEvents sink. When set, ZeroEvents`,\n `will be published to the sink as the {bold data} field of CloudEvents.`,\n `The {bold source} field of the CloudEvents will be set to the {bold ZERO_TASK_ID},`,\n `along with any extension attributes specified by the {bold ZERO_CLOUD_EVENT_EXTENSION_OVERRIDES_ENV}.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_SINK binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n `However, any CloudEvents sink can be used.`,\n ],\n },\n\n extensionOverridesEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a JSON stringified object with an {bold extensions} field`,\n `containing attributes that should be added or overridden on outbound CloudEvents.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_CE_OVERRIDES binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n ],\n },\n },\n};\n\nexport type ZeroConfig = Config<typeof zeroOptions>;\n\nlet loadedConfig: Config<typeof zeroOptions> | undefined;\n\nexport function getZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): ZeroConfig {\n if (!loadedConfig || singleProcessMode()) {\n loadedConfig = parseOptions(zeroOptions, {\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n emitDeprecationWarnings: false, // overridden at the top level parse\n ...opts,\n });\n\n if (loadedConfig.queryHydrationStats) {\n runtimeDebugFlags.trackRowCountsVended = true;\n }\n }\n return loadedConfig;\n}\n\n/**\n * Same as {@link getZeroConfig}, with an additional check that the\n * config has already been normalized (i.e. by the top level server/runner).\n */\nexport function getNormalizedZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): NormalizedZeroConfig {\n const config = getZeroConfig(opts);\n assertNormalized(config);\n return config;\n}\n\n/**\n * Gets the server version from the config if provided. Otherwise it gets it\n * from the Zero package.json.\n */\nexport function getServerVersion(\n config: Pick<ZeroConfig, 'serverVersion'> | undefined,\n): string {\n return config?.serverVersion ?? packageJson.version;\n}\n\nexport function isAdminPasswordValid(\n lc: LogContext,\n config: Pick<NormalizedZeroConfig, 'adminPassword'>,\n password: string | undefined,\n) {\n // If development mode, password is optional\n // We use process.env.NODE_ENV === 'development' as a sign that we're in\n // development mode, rather than a custom env var like ZERO_DEVELOPMENT_MODE,\n // because NODE_ENV is more standard and is already used by many tools.\n // Note that if NODE_ENV is not set, we assume production mode.\n\n if (!password && !config.adminPassword && isDevelopmentMode()) {\n warnOnce(\n lc,\n 'No admin password set; allowing access in development mode only',\n );\n return true;\n }\n\n if (!config.adminPassword) {\n lc.warn?.('No admin password set; denying access');\n return false;\n }\n\n // Use constant-time comparison to prevent timing attacks\n const passwordBuffer = Buffer.from(password ?? '');\n const configBuffer = Buffer.from(config.adminPassword);\n\n // Handle length mismatch in constant time\n if (passwordBuffer.length !== configBuffer.length) {\n // Perform dummy comparison to maintain constant timing\n timingSafeEqual(configBuffer, configBuffer);\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n if (!timingSafeEqual(passwordBuffer, configBuffer)) {\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n lc.debug?.('Admin password accepted');\n return true;\n}\n\nlet hasWarned = false;\n\nfunction warnOnce(lc: LogContext, msg: string) {\n if (!hasWarned) {\n lc.warn?.(msg);\n hasWarned = true;\n }\n}\n\n// For testing purposes - reset the warning state\nexport function resetWarnOnceState() {\n hasWarned = false;\n}\n"],"names":["v.string","v.array","v.number","v.boolean","v.literalUnion"],"mappings":";;;;;;;;;;;AA8BO,MAAM,sBAAsB;AAE5B,MAAM,aAAa;AAAA,EACxB,IAAI;AAAA,IACF,MAAMA,OACH,EACA,QAAQ,MAAM,EACd,OAAO,CAAA,OAAM,0BAA0B,KAAK,EAAE,GAAG,sBAAsB;AAAA,IAC1E,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,cAAc;AAAA,IACZ,MAAMC,MAAQD,OAAE,CAAQ,EAAE,SAAS,MAAM,CAAA,CAAE;AAAA,IAC3C,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAEO,MAAM,eAAe;AAAA,EAC1B,IAAI;AAAA,IACF,MAAMA,SAEH,OAAO,MAAM;AACZ,YAAM,IAAI;AAAA,QACR;AAAA;AAAA,MAAA;AAAA,IAGJ,CAAC,EACA,SAAA;AAAA,IACH,QAAQ;AAAA,EAAA;AAAA,EAGV,KAAK;AAAA,IACH,MAAME,OAAE,EAAS,QAAQ,CAAC;AAAA,IAC1B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,QAAQ;AAAA,EAAA;AAEZ;AAEA,MAAM,iBAAiB;AAAA,EACrB,MAAM;AAAA,IACJ,MAAMF,OAAE,EAAS,QAAQ,SAAS;AAAA,IAClC,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,qBAAqB;AAAA,IACnB,MAAME,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAIA,MAAM,uBAAuB;AAAA,EAC3B,KAAK;AAAA,IACH,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,UAAU;AAAA,IACR,MAAMA,OAAE,EAAS,QAAQ,GAAM;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,EACF;AAEJ;AAIA,MAAM,cAAc;AAAA,EAClB,KAAK;AAAA,IACH,MAAMF,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,SAAS;AAAA,IACP,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,QAAQ;AAAA,IACN,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,QAAQ;AAAA,IACN,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,UAAU;AAAA,IACR,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,MAAM,yBAAyB,CAAC,SAC9B,aAAa,UAAU,qBAAqB,IAAI,CAAC,iBAAiB,IAAI;AAExE,MAAM,0BAA0B,CAC9B,aACA,YACI;AAAA,EACJ,KAAK;AAAA,IACH,MAAMC,MAAQD,OAAE,CAAQ,EAAE,SAAA;AAAA;AAAA,IAC1B,MAAM;AAAA,MACJ,sDAAsD,MAAM;AAAA,MAC5D;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,WAAW,MAAM,CAAC,MAC1D,CAAA;AAAA,EAAC;AAAA,EAEP,QAAQ;AAAA,IACN,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,WAAW,UAAU,CAAC,MAC9D,CAAA;AAAA,EAAC;AAAA,EAEP,gBAAgB;AAAA,IACd,MAAMG,QAAE,EAAU,QAAQ,KAAK;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,WAAW,kBAAkB,CAAC,MACtE,CAAA;AAAA,EAAC;AAAA,EAEP,sBAAsB;AAAA,IACpB,MAAMF,MAAQD,OAAE,CAAQ,EAAE,SAAA;AAAA,IAC1B,MAAM;AAAA,MACJ;AAAA,MACA,oEAAoE,WAAW,mBAAmB,SAAS,OAAO;AAAA,MAClH;AAAA,MACA;AAAA,MACA,iBAAiB,cAAc,YAAY,YAAA,IAAgB,WAAW,mBAAmB,WAAW,OAAO;AAAA,IAAA;AAAA,IAE7G,GAAI,cACA;AAAA,MACE,YAAY;AAAA,QACV,uBAAuB,GAAG,WAAW,yBAAyB;AAAA,MAAA;AAAA,IAChE,IAEF,CAAA;AAAA,EAAC;AAET;AAEA,MAAM,gBAAgB,wBAAwB,QAAW,gBAAgB;AACzE,MAAM,cAAc,wBAAwB,UAAU,gBAAgB;AACtE,MAAM,eAAe,wBAAwB,QAAW,qBAAqB;AAC7E,MAAM,oBAAoB;AAAA,EACxB;AAAA,EACA;AACF;AAQO,MAAM,cAAc;AAAA,EACzB,UAAU;AAAA,IACR,IAAI;AAAA,MACF,MAAMA,OAAE;AAAA,MACR,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAMI,aAAe,MAAM,QAAQ,EAAE,QAAQ,IAAI;AAAA,MACjD,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,MAEF,QAAQ;AAAA;AAAA,IAAA;AAAA,IAGV,UAAU;AAAA,MACR,MAAMF,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,mBAAmB;AAAA,MACjB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,QAAQ;AAAA;AAAA,IAAA;AAAA,EACV;AAAA;AAAA,EAIF,MAAM;AAAA,EACN,QAAQ;AAAA;AAAA,EAER,YAAY;AAAA,EACZ,OAAO;AAAA,EAEP,KAAK;AAAA,IACH,IAAI;AAAA,MACF,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAME,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,mBAAmB;AAAA,MACjB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,QAAQ;AAAA;AAAA,IAAA;AAAA,IAGV,2CAA2C;AAAA,MACzC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,yCAAyC;AAAA,MACvC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,mCAAmC;AAAA,MACjC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,qBAAqB;AAAA,IACnB,MAAMC,QAAE,EAAU,SAAA;AAAA,IAClB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,oBAAoB;AAAA,IAClB,MAAMA,QAAE,EAAU,QAAQ,IAAI;AAAA,IAC9B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,kBAAkB;AAAA,IAChB,MAAMD,OAAE,EAAS,QAAQ,EAAE;AAAA,IAC3B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,QAAQ;AAAA,IACN,IAAI;AAAA,MACF,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAME,OAAE,EAAS,QAAQ,CAAC;AAAA,MAC1B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,SAAS;AAAA,EAET,KAAK;AAAA,EAEL,KAAK;AAAA,EAEL,OAAO;AAAA;AAAA,EAGP,MAAM;AAAA,EAEN,MAAM;AAAA,IACJ,MAAMA,OAAE,EAAS,QAAQ,IAAI;AAAA,IAC7B,MAAM,CAAC,gCAAgC;AAAA,EAAA;AAAA,EAGzC,gBAAgB;AAAA,IACd,KAAK;AAAA,MACH,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAMI,aAAe,aAAa,UAAU,EAAE,QAAQ,WAAW;AAAA,MACjE,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA;AAAA,IAIF,SAAS;AAAA,MACP,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,YAAY;AAAA,QACV;AAAA,MAAA;AAAA,MAEF,QAAQ;AAAA,IAAA;AAAA;AAAA,IAIV,UAAU;AAAA,MACR,MAAMI,aAAe,MAAM,KAAK,EAAE,QAAQ,IAAI;AAAA,MAC9C,YAAY;AAAA,QACV;AAAA,MAAA;AAAA,MAEF,QAAQ;AAAA,IAAA;AAAA,IAGV,+BAA+B;AAAA,MAC7B,MAAMH,MAAQD,OAAE,CAAQ,EAAE,QAAQ,CAAC,GAAG,0BAA0B,CAAC;AAAA,MACjE,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA;AAAA;AAAA;AAAA,MAKF,QAAQ;AAAA,IAAA;AAAA,IAGV,gBAAgB;AAAA,MACd,MAAME,OAAE,EAAS,QAAQ,IAAK;AAAA,MAC9B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,iCAAiC;AAAA,MAC/B,MAAMA,OAAE,EAAS,QAAQ,IAAI;AAAA,MAC7B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,QAAQ;AAAA,IACN,MAAMF,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF;AAAA,EAEA,gBAAgB;AAAA,IACd,MAAME,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,WAAW;AAAA,IACT,MAAMC,QAAE,EAAU,QAAQ,IAAI;AAAA,IAC9B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,eAAe;AAAA,IACb,MAAMH,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,sBAAsB;AAAA,IACpB,MAAMG,QAAE,EAAU,QAAQ,KAAK;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,6BAA6B;AAAA,IAC3B,MAAMH,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,0BAA0B;AAAA,IACxB,MAAME,OAAE,EAAS,QAAQ,KAAK,OAAO,IAAI;AAAA,IACzC,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,YAAY;AAAA,IACV,YAAY;AAAA,MACV,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM,CAAC,2CAA2C;AAAA,IAAA;AAAA,IAGpD,YAAY;AAAA,MACV,MAAMA,OAAE,EAAS,QAAQ,sCAAsC;AAAA,MAC/D,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAMI,aAAe,SAAS,QAAQ,QAAQ,OAAO,EAAE,QAAQ,MAAM;AAAA,IAAA;AAAA,IAGvE,WAAW;AAAA,MACT,MAAMJ,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAME,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,uBAAuB;AAAA,MACrB,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,wBAAwB;AAAA,MACtB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,wBAAwB;AAAA,MACtB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,kCAAkC;AAAA,MAChC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,6BAA6B;AAAA,MAC3B,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,oBAAoB;AAAA,MAClB,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,sBAAsB;AAAA,MACpB,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,eAAe;AAAA,MACb,MAAMA,OAAE,EAAS,QAAQ,KAAK,OAAO,IAAI;AAAA,MACzC,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,iBAAiB;AAAA,IACf,MAAMF,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,aAAa;AAAA,IACX,kBAAkB;AAAA,MAChB,MAAME,OAAE,EAAS,QAAQ,CAAC;AAAA,MAC1B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,aAAa;AAAA,MACX,MAAMC,QAAE,EAAU,SAAA;AAAA,MAClB,QAAQ;AAAA,MACR,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAIF,sBAAsB;AAAA,IACpB,MAAMD,OAAE,EAAS,QAAQ,GAAM;AAAA,IAC/B,YAAY;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,QAAQ;AAAA,EAAA;AAAA,EAGV,aAAa;AAAA,IACX,MAAMC,QAAE,EAAU,QAAQ,KAAK;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,eAAe;AAAA,IACb,MAAMH,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM,CAAC,iEAAiE;AAAA,EAAA;AAAA,EAG1E,iBAAiB;AAAA,IACf,MAAMG,QAAE,EAAU,QAAQ,IAAI;AAAA,IAC9B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,YAAY;AAAA,IACV,SAAS;AAAA,MACP,MAAMH,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,uBAAuB;AAAA,MACrB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAEJ;AAIA,IAAI;AAEG,SAAS,cACd,OAA4C,IAChC;AACZ,MAAI,CAAC,gBAAgB,qBAAqB;AACxC,mBAAe,aAAa,aAAa;AAAA,MACvC,eAAe;AAAA,MACf,yBAAyB;AAAA;AAAA,MACzB,GAAG;AAAA,IAAA,CACJ;AAED,QAAI,aAAa,qBAAqB;AACpC,wBAAkB,uBAAuB;AAAA,IAC3C;AAAA,EACF;AACA,SAAO;AACT;AAMO,SAAS,wBACd,OAA4C,IACtB;AACtB,QAAM,SAAS,cAAc,IAAI;AACjC,mBAAiB,MAAM;AACvB,SAAO;AACT;AAMO,SAAS,iBACd,QACQ;AACR,SAAO,QAAQ,iBAAiB,YAAY;AAC9C;AAEO,SAAS,qBACd,IACA,QACA,UACA;AAOA,MAAI,CAAC,YAAY,CAAC,OAAO,iBAAiB,qBAAqB;AAC7D;AAAA,MACE;AAAA,MACA;AAAA,IAAA;AAEF,WAAO;AAAA,EACT;AAEA,MAAI,CAAC,OAAO,eAAe;AACzB,OAAG,OAAO,uCAAuC;AACjD,WAAO;AAAA,EACT;AAGA,QAAM,iBAAiB,OAAO,KAAK,YAAY,EAAE;AACjD,QAAM,eAAe,OAAO,KAAK,OAAO,aAAa;AAGrD,MAAI,eAAe,WAAW,aAAa,QAAQ;AAEjD,oBAAgB,cAAc,YAAY;AAC1C,OAAG,OAAO,wBAAwB;AAClC,WAAO;AAAA,EACT;AAEA,MAAI,CAAC,gBAAgB,gBAAgB,YAAY,GAAG;AAClD,OAAG,OAAO,wBAAwB;AAClC,WAAO;AAAA,EACT;AAEA,KAAG,QAAQ,yBAAyB;AACpC,SAAO;AACT;AAEA,IAAI,YAAY;AAEhB,SAAS,SAAS,IAAgB,KAAa;AAC7C,MAAI,CAAC,WAAW;AACd,OAAG,OAAO,GAAG;AACb,gBAAY;AAAA,EACd;AACF;"}
1
+ {"version":3,"file":"zero-config.js","sources":["../../../../../zero-cache/src/config/zero-config.ts"],"sourcesContent":["/**\n * These types represent the _compiled_ config whereas `define-config` types represent the _source_ config.\n */\n\nimport type {LogContext} from '@rocicorp/logger';\nimport {timingSafeEqual} from 'node:crypto';\nimport {logOptions} from '../../../otel/src/log-options.ts';\nimport {\n flagToEnv,\n parseOptions,\n type Config,\n type ParseOptions,\n} from '../../../shared/src/options.ts';\nimport * as v from '../../../shared/src/valita.ts';\n// @circular-dep-ignore - importing package.json for version info only\nimport packageJson from '../../../zero/package.json' with {type: 'json'};\nimport {runtimeDebugFlags} from '../../../zql/src/builder/debug-delegate.ts';\nimport {singleProcessMode} from '../types/processes.ts';\nimport {\n ALLOWED_APP_ID_CHARACTERS,\n INVALID_APP_ID_MESSAGE,\n} from '../types/shards.ts';\nimport {DEFAULT_PREFERRED_PREFIXES} from './network.ts';\nimport {\n assertNormalized,\n isDevelopmentMode,\n type NormalizedZeroConfig,\n} from './normalize.ts';\nexport type {LogConfig} from '../../../otel/src/log-options.ts';\n\nexport const ZERO_ENV_VAR_PREFIX = 'ZERO_';\n\nexport const appOptions = {\n id: {\n type: v\n .string()\n .default('zero')\n .assert(id => ALLOWED_APP_ID_CHARACTERS.test(id), INVALID_APP_ID_MESSAGE),\n desc: [\n 'Unique identifier for the app.',\n '',\n 'Multiple zero-cache apps can run on a single upstream database, each of which',\n 'is isolated from the others, with its own permissions, sharding (future feature),',\n 'and change/cvr databases.',\n '',\n 'The metadata of an app is stored in an upstream schema with the same name,',\n 'e.g. \"zero\", and the metadata for each app shard, e.g. client and mutation',\n 'ids, is stored in the \"\\\\{app-id\\\\}_\\\\{#\\\\}\" schema. (Currently there is only a single',\n '\"0\" shard, but this will change with sharding).',\n '',\n 'The CVR and Change data are managed in schemas named \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cvr\"',\n 'and \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cdc\", respectively, allowing multiple apps and shards',\n 'to share the same database instance (e.g. a Postgres \"cluster\") for CVR and Change management.',\n '',\n 'Due to constraints on replication slot names, an App ID may only consist of',\n 'lower-case letters, numbers, and the underscore character.',\n '',\n 'Note that this option is used by both {bold zero-cache} and {bold zero-deploy-permissions}.',\n ],\n },\n\n publications: {\n type: v.array(v.string()).optional(() => []),\n desc: [\n `Postgres {bold PUBLICATION}s that define the tables and columns to`,\n `replicate. Publication names may not begin with an underscore,`,\n `as zero reserves that prefix for internal use.`,\n ``,\n `If unspecified, zero-cache will create and use an internal publication that`,\n `publishes all tables in the {bold public} schema, i.e.:`,\n ``,\n `CREATE PUBLICATION _\\\\{app-id\\\\}_public_0 FOR TABLES IN SCHEMA public;`,\n ``,\n `Note that changing the set of publications will result in resyncing the replica,`,\n `which may involve downtime (replication lag) while the new replica is initializing.`,\n `To change the set of publications without disrupting an existing app, a new app`,\n `should be created.`,\n ],\n },\n};\n\nexport const shardOptions = {\n id: {\n type: v\n .string()\n .assert(() => {\n throw new Error(\n `ZERO_SHARD_ID is no longer an option. Please use ZERO_APP_ID instead.`,\n // TODO: Link to release / migration notes?\n );\n })\n .optional(),\n hidden: true,\n },\n\n num: {\n type: v.number().default(0),\n desc: [\n `The shard number (from 0 to NUM_SHARDS) of the App. zero will eventually`,\n `support data sharding as a first-class primitive; until then, deploying`,\n `multiple shard-nums creates functionally identical shards. Until sharding is`,\n `actually meaningful, this flag is hidden but available for testing.`,\n ],\n hidden: true,\n },\n};\n\nconst replicaOptions = {\n file: {\n type: v.string().default('zero.db'),\n desc: [\n `File path to the SQLite replica that zero-cache maintains.`,\n `This can be lost, but if it is, zero-cache will have to re-replicate next`,\n `time it starts up.`,\n ],\n },\n\n vacuumIntervalHours: {\n type: v.number().optional(),\n desc: [\n `Performs a VACUUM at server startup if the specified number of hours has elapsed`,\n `since the last VACUUM (or initial-sync). The VACUUM operation is heavyweight`,\n `and requires double the size of the db in disk space. If unspecified, VACUUM`,\n `operations are not performed.`,\n ],\n },\n};\n\nexport type ReplicaOptions = Config<typeof replicaOptions>;\n\nconst perUserMutationLimit = {\n max: {\n type: v.number().optional(),\n desc: [\n `The maximum mutations per user within the specified {bold windowMs}.`,\n `If unset, no rate limiting is enforced.`,\n ],\n },\n windowMs: {\n type: v.number().default(60_000),\n desc: [\n `The sliding window over which the {bold perUserMutationLimitMax} is enforced.`,\n ],\n },\n};\n\nexport type RateLimit = Config<typeof perUserMutationLimit>;\n\nconst authOptions = {\n jwk: {\n type: v.string().optional(),\n desc: [\n `A public key in JWK format used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n jwksUrl: {\n type: v.string().optional(),\n desc: [\n `A URL that returns a JWK set used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n secret: {\n type: v.string().optional(),\n desc: [\n `A symmetric key used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n issuer: {\n type: v.string().optional(),\n desc: [\n `Expected issuer ({bold iss} claim) for JWT validation.`,\n `If set, tokens with a different or missing issuer will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n audience: {\n type: v.string().optional(),\n desc: [\n `Expected audience ({bold aud} claim) for JWT validation.`,\n `If set, tokens with a different or missing audience will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n};\n\nconst makeDeprecationMessage = (flag: string) =>\n `Use {bold ${flagToEnv(ZERO_ENV_VAR_PREFIX, flag)}} (or {bold --${flag}}) instead.`;\n\nconst makeMutatorQueryOptions = (\n replacement: 'mutate' | 'query' | undefined,\n suffix: string,\n) => ({\n url: {\n type: v.array(v.string()).optional(), // optional until we remove CRUD mutations\n desc: [\n `The URL of the API server to which zero-cache will ${suffix}.`,\n ``,\n `{bold IMPORTANT:} URLs are matched using {bold URLPattern}, a standard Web API.`,\n ``,\n `{bold Pattern Syntax:}`,\n ` URLPattern uses a simple and intuitive syntax similar to Express routes.`,\n ` Wildcards and named parameters make it easy to match multiple URLs.`,\n ``,\n `{bold Basic Examples:}`,\n ` Exact URL match:`,\n ` \"https://api.example.com/mutate\"`,\n ` `,\n ` Any subdomain using wildcard:`,\n ` \"https://*.example.com/mutate\"`,\n ` `,\n ` Multiple subdomain levels:`,\n ` \"https://*.*.example.com/mutate\"`,\n ` `,\n ` Any path under a domain:`,\n ` \"https://api.example.com/*\"`,\n ` `,\n ` Named path parameters:`,\n ` \"https://api.example.com/:version/mutate\"`,\n ` ↳ Matches \"https://api.example.com/v1/mutate\", \"https://api.example.com/v2/mutate\", etc.`,\n ``,\n `{bold Advanced Patterns:}`,\n ` Optional path segments:`,\n ` \"https://api.example.com/:path?\"`,\n ` `,\n ` Regex in segments (for specific patterns):`,\n ` \"https://api.example.com/:version(v\\\\\\\\d+)/mutate\"`,\n ` ↳ Matches only \"v\" followed by digits`,\n ``,\n `{bold Multiple patterns:}`,\n ` [\"https://api1.example.com/mutate\", \"https://api2.example.com/mutate\"]`,\n ``,\n `{bold Note:} Query parameters and URL fragments (#) are automatically ignored during matching.`,\n ``,\n `For full URLPattern syntax, see: https://developer.mozilla.org/en-US/docs/Web/API/URLPattern`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-url`)]}\n : {}),\n },\n apiKey: {\n type: v.string().optional(),\n desc: [\n `An optional secret used to authorize zero-cache to call the API server handling writes.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-api-key`)]}\n : {}),\n },\n forwardCookies: {\n type: v.boolean().default(false),\n desc: [\n `If true, zero-cache will forward cookies from the request.`,\n `This is useful for passing authentication cookies to the API server.`,\n `If false, cookies are not forwarded.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-forward-cookies`)]}\n : {}),\n },\n allowedClientHeaders: {\n type: v.array(v.string()).optional(),\n desc: [\n `A list of header names that clients are allowed to set via custom headers.`,\n `If specified, only headers in this list will be forwarded to the ${suffix === 'push mutations' ? 'push' : 'query'} URL.`,\n `Header names are case-insensitive.`,\n `If not specified, no client-provided headers are forwarded (secure by default).`,\n `Example: ZERO_${replacement ? replacement.toUpperCase() : suffix === 'push mutations' ? 'MUTATE' : 'QUERY'}_ALLOWED_CLIENT_HEADERS=x-request-id,x-correlation-id`,\n ],\n ...(replacement\n ? {\n deprecated: [\n makeDeprecationMessage(`${replacement}-allowed-client-headers`),\n ],\n }\n : {}),\n },\n});\n\nconst mutateOptions = makeMutatorQueryOptions(undefined, 'push mutations');\nconst pushOptions = makeMutatorQueryOptions('mutate', 'push mutations');\nconst queryOptions = makeMutatorQueryOptions(undefined, 'send synced queries');\nconst getQueriesOptions = makeMutatorQueryOptions(\n 'query',\n 'send synced queries',\n);\n\n/** @deprecated */\nexport type AuthConfig = Config<typeof authOptions>;\n\n// Note: --help will list flags in the order in which they are defined here,\n// so order the fields such that the important (e.g. required) ones are first.\n// (Exported for testing)\nexport const zeroOptions = {\n upstream: {\n db: {\n type: v.string(),\n desc: [\n `The \"upstream\" authoritative postgres database.`,\n `In the future we will support other types of upstream besides PG.`,\n ],\n },\n\n type: {\n type: v.literalUnion('pg', 'custom').default('pg'),\n desc: [\n `The meaning of the {bold upstream-db} depends on the upstream type:`,\n `* {bold pg}: The connection database string, e.g. \"postgres://...\"`,\n `* {bold custom}: The base URI of the change source \"endpoint, e.g.`,\n ` \"https://my-change-source.dev/changes/v0/stream?apiKey=...\"`,\n ],\n hidden: true, // TODO: Unhide when ready to officially support.\n },\n\n maxConns: {\n type: v.number().default(20),\n desc: [\n `The maximum number of connections to open to the upstream database`,\n `for committing mutations. This is divided evenly amongst sync workers.`,\n `In addition to this number, zero-cache uses one connection for the`,\n `replication stream.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n },\n\n /** @deprecated */\n push: pushOptions,\n mutate: mutateOptions,\n /** @deprecated */\n getQueries: getQueriesOptions,\n query: queryOptions,\n\n cvr: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store CVRs. CVRs (client view records) keep track`,\n `of the data synced to clients in order to determine the diff to send on reconnect.`,\n `If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(30),\n desc: [\n `The maximum number of connections to open to the CVR database.`,\n `This is divided evenly amongst sync workers.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n\n garbageCollectionInactivityThresholdHours: {\n type: v.number().default(48),\n desc: [\n `The duration after which an inactive CVR is eligible for garbage collection.`,\n `Note that garbage collection is an incremental, periodic process which does not`,\n `necessarily purge all eligible CVRs immediately.`,\n ],\n },\n\n garbageCollectionInitialIntervalSeconds: {\n type: v.number().default(60),\n desc: [\n `The initial interval at which to check and garbage collect inactive CVRs.`,\n `This interval is increased exponentially (up to 16 minutes) when there is`,\n `nothing to purge.`,\n ],\n },\n\n garbageCollectionInitialBatchSize: {\n type: v.number().default(25),\n desc: [\n `The initial number of CVRs to purge per garbage collection interval.`,\n `This number is increased linearly if the rate of new CVRs exceeds the rate of`,\n `purged CVRs, in order to reach a steady state.`,\n ``,\n `Setting this to 0 effectively disables CVR garbage collection.`,\n ],\n },\n },\n\n queryHydrationStats: {\n type: v.boolean().optional(),\n desc: [\n `Track and log the number of rows considered by query hydrations which`,\n `take longer than {bold log-slow-hydrate-threshold} milliseconds.`,\n `This is useful for debugging and performance tuning.`,\n ],\n },\n\n enableQueryPlanner: {\n type: v.boolean().default(true),\n desc: [\n `Enable the query planner for optimizing ZQL queries.`,\n ``,\n `The query planner analyzes and optimizes query execution by determining`,\n `the most efficient join strategies.`,\n ``,\n `You can disable the planner if it is picking bad strategies.`,\n ],\n },\n\n yieldThresholdMs: {\n type: v.number().default(10),\n desc: [\n `The maximum amount of time in milliseconds that a sync worker will`,\n `spend in IVM (processing query hydration and advancement) before yielding`,\n `to the event loop. Lower values increase responsiveness and fairness at`,\n `the cost of reduced throughput.`,\n ],\n },\n\n change: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store recent replication log entries, in order`,\n `to sync multiple view-syncers without requiring multiple replication slots on`,\n `the upstream database. If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(5),\n desc: [\n `The maximum number of connections to open to the change database.`,\n `This is used by the {bold change-streamer} for catching up`,\n `{bold zero-cache} replication subscriptions.`,\n ],\n },\n },\n\n replica: replicaOptions,\n\n log: logOptions,\n\n app: appOptions,\n\n shard: shardOptions,\n\n /** @deprecated */\n auth: authOptions,\n\n port: {\n type: v.number().default(4848),\n desc: [`The port for sync connections.`],\n },\n\n changeStreamer: {\n uri: {\n type: v.string().optional(),\n desc: [\n `When set, connects to the {bold change-streamer} at the given URI.`,\n `In a multi-node setup, this should be specified in {bold view-syncer} options,`,\n `pointing to the {bold replication-manager} URI, which runs a {bold change-streamer}`,\n `on port 4849.`,\n ],\n },\n\n mode: {\n type: v.literalUnion('dedicated', 'discover').default('dedicated'),\n desc: [\n `As an alternative to {bold ZERO_CHANGE_STREAMER_URI}, the {bold ZERO_CHANGE_STREAMER_MODE}`,\n `can be set to \"{bold discover}\" to instruct the {bold view-syncer} to connect to the `,\n `ip address registered by the {bold replication-manager} upon startup.`,\n ``,\n `This may not work in all networking configurations, e.g. certain private `,\n `networking or port forwarding configurations. Using the {bold ZERO_CHANGE_STREAMER_URI}`,\n `with an explicit routable hostname is recommended instead.`,\n ``,\n `Note: This option is ignored if the {bold ZERO_CHANGE_STREAMER_URI} is set.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `The port on which the {bold change-streamer} runs. This is an internal`,\n `protocol between the {bold replication-manager} and {bold view-syncers}, which`,\n `runs in the same process tree in local development or a single-node configuration.`,\n ``,\n `If unspecified, defaults to {bold --port} + 1.`,\n ],\n },\n\n /** @deprecated */\n address: {\n type: v.string().optional(),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n /** @deprecated */\n protocol: {\n type: v.literalUnion('ws', 'wss').default('ws'),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n discoveryInterfacePreferences: {\n type: v.array(v.string()).default([...DEFAULT_PREFERRED_PREFIXES]),\n desc: [\n `The name prefixes to prefer when introspecting the network interfaces to determine`,\n `the externally reachable IP address for change-streamer discovery. This defaults`,\n `to commonly used names for standard ethernet interfaces in order to prevent selecting`,\n `special interfaces such as those for VPNs.`,\n ],\n // More confusing than it's worth to advertise this. The default list should be\n // adjusted to make things work for all environments; it is controlled as a\n // hidden flag as an emergency to unblock people with outlier network configs.\n hidden: true,\n },\n\n startupDelayMs: {\n type: v.number().default(15000),\n desc: [\n `The delay to wait before the change-streamer takes over the replication stream`,\n `(i.e. the handoff during replication-manager updates), to allow loadbalancers to register`,\n `the task as healthy based on healthcheck parameters. Note that if a change stream request`,\n `is received during this interval, the delay will be canceled and the takeover will happen`,\n `immediately, since the incoming request indicates that the task is registered as a target.`,\n ],\n },\n\n backPressureLimitHeapProportion: {\n type: v.number().default(0.04),\n desc: [\n `The percentage of {bold --max-old-space-size} to use as a buffer for absorbing replication`,\n `stream spikes. When the estimated amount of queued data exceeds this threshold, back pressure`,\n `is applied to the replication stream, delaying downstream sync as a result.`,\n ``,\n `The threshold was determined empirically with load testing. Higher thresholds have resulted`,\n `in OOMs. Note also that the byte-counting logic in the queue is strictly an underestimate of`,\n `actual memory usage (but importantly, proportionally correct), so the queue is actually`,\n `using more than what this proportion suggests.`,\n ``,\n `This parameter is exported as an emergency knob to reduce the size of the buffer in the`,\n `event that the server OOMs from back pressure. Resist the urge to {italic increase} this`,\n `proportion, as it is mainly useful for absorbing periodic spikes and does not meaningfully`,\n `affect steady-state replication throughput; the latter is determined by other factors such`,\n `as object serialization and PG throughput`,\n ``,\n `In other words, the back pressure limit does not constrain replication throughput;`,\n `rather, it protects the system when the upstream throughput exceeds the downstream`,\n `throughput.`,\n ],\n },\n\n flowControlConsensusPaddingSeconds: {\n type: v.number().default(1),\n desc: [\n `During periodic flow control checks (every 64kb), the amount of time to wait after the`,\n `majority of subscribers have acked, after which replication will continue even if`,\n `some subscribers have yet to ack. (Note that this is not a timeout for the {italic entire} send,`,\n `but a timeout that starts {italic after} the majority of receivers have acked.)`,\n ``,\n `This allows a bounded amount of time for backlogged subscribers to catch up on each flush`,\n `without forcing all subscribers to wait for the entire backlog to be processed. It is also`,\n `useful for mitigating the effect of unresponsive subscribers due to severed websocket`,\n `connections (until liveness checks disconnect them).`,\n ``,\n `Set this to a negative number to disable early flow control releases. (Not recommended, but`,\n `available as an emergency measure.)`,\n ],\n },\n },\n\n taskID: {\n type: v.string().optional(),\n desc: [\n `Globally unique identifier for the zero-cache instance.`,\n ``,\n `Setting this to a platform specific task identifier can be useful for debugging.`,\n `If unspecified, zero-cache will attempt to extract the TaskARN if run from within`,\n `an AWS ECS container, and otherwise use a random string.`,\n ],\n },\n\n perUserMutationLimit,\n\n numSyncWorkers: {\n type: v.number().optional(),\n desc: [\n `The number of processes to use for view syncing.`,\n `Leave this unset to use the maximum available parallelism.`,\n `If set to 0, the server runs without sync workers, which is the`,\n `configuration for running the {bold replication-manager}.`,\n ],\n },\n\n autoReset: {\n type: v.boolean().default(true),\n desc: [\n `Automatically wipe and resync the replica when replication is halted.`,\n `This situation can occur for configurations in which the upstream database`,\n `provider prohibits event trigger creation, preventing the zero-cache from`,\n `being able to correctly replicate schema changes. For such configurations,`,\n `an upstream schema change will instead result in halting replication with an`,\n `error indicating that the replica needs to be reset.`,\n ``,\n `When {bold auto-reset} is enabled, zero-cache will respond to such situations`,\n `by shutting down, and when restarted, resetting the replica and all synced `,\n `clients. This is a heavy-weight operation and can result in user-visible`,\n `slowness or downtime if compute resources are scarce.`,\n ],\n },\n\n adminPassword: {\n type: v.string().optional(),\n desc: [\n `A password used to administer zero-cache server, for example to access the`,\n `/statz endpoint.`,\n '',\n 'A password is optional in development mode but {bold required in production} mode.',\n ],\n },\n\n websocketCompression: {\n type: v.boolean().default(false),\n desc: [\n 'Enable WebSocket per-message deflate compression.',\n '',\n 'Compression can reduce bandwidth usage for sync traffic but',\n 'increases CPU usage on both client and server. Disabled by default.',\n '',\n 'See: https://github.com/websockets/ws#websocket-compression',\n ],\n },\n\n websocketCompressionOptions: {\n type: v.string().optional(),\n desc: [\n 'JSON string containing WebSocket compression options.',\n '',\n 'Only used if websocketCompression is enabled.',\n '',\n 'Example: \\\\{\"zlibDeflateOptions\":\\\\{\"level\":3\\\\},\"threshold\":1024\\\\}',\n '',\n 'See https://github.com/websockets/ws/blob/master/doc/ws.md#new-websocketserveroptions-callback for available options.',\n ],\n },\n\n websocketMaxPayloadBytes: {\n type: v.number().default(10 * 1024 * 1024),\n desc: [\n 'Maximum size of incoming WebSocket messages in bytes.',\n '',\n 'Messages exceeding this limit are rejected before parsing.',\n 'Default: 10MB (10 * 1024 * 1024 = 10485760)',\n ],\n },\n\n litestream: {\n executable: {\n type: v.string().optional(),\n desc: [`Path to the {bold litestream} executable.`],\n },\n\n configPath: {\n type: v.string().default('./src/services/litestream/config.yml'),\n desc: [\n `Path to the litestream yaml config file. zero-cache will run this with its`,\n `environment variables, which can be referenced in the file via $\\\\{ENV\\\\}`,\n `substitution, for example:`,\n `* {bold ZERO_REPLICA_FILE} for the db path`,\n `* {bold ZERO_LITESTREAM_BACKUP_LOCATION} for the db replica url`,\n `* {bold ZERO_LITESTREAM_LOG_LEVEL} for the log level`,\n `* {bold ZERO_LOG_FORMAT} for the log type`,\n ],\n },\n\n logLevel: {\n type: v.literalUnion('debug', 'info', 'warn', 'error').default('warn'),\n },\n\n backupURL: {\n type: v.string().optional(),\n desc: [\n `The location of the litestream backup, usually an {bold s3://} URL.`,\n `This is only consulted by the {bold replication-manager}.`,\n `{bold view-syncers} receive this information from the {bold replication-manager}.`,\n ],\n },\n\n endpoint: {\n type: v.string().optional(),\n desc: [\n `The S3-compatible endpoint URL to use for the litestream backup. Only required for non-AWS services.`,\n `The {bold replication-manager} and {bold view-syncers} must have the same endpoint.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `Port on which litestream exports metrics, used to determine the replication`,\n `watermark up to which it is safe to purge change log records.`,\n ``,\n `If unspecified, defaults to {bold --port} + 2.`,\n ],\n },\n\n checkpointThresholdMB: {\n type: v.number().default(40),\n desc: [\n `The size of the WAL file at which to perform an SQlite checkpoint to apply`,\n `the writes in the WAL to the main database file. Each checkpoint creates`,\n `a new WAL segment file that will be backed up by litestream. Smaller thresholds`,\n `may improve read performance, at the expense of creating more files to download`,\n `when restoring the replica from the backup.`,\n ],\n },\n\n minCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite attempts a PASSIVE checkpoint, which`,\n `transfers pages to the main database file without blocking writers.`,\n `Defaults to {bold checkpointThresholdMB * 250} (since SQLite page size is 4KB).`,\n ],\n },\n\n maxCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite performs a RESTART checkpoint, which`,\n `blocks writers until complete. Defaults to {bold minCheckpointPageCount * 10}.`,\n `Set to {bold 0} to disable RESTART checkpoints entirely.`,\n ],\n },\n\n incrementalBackupIntervalMinutes: {\n type: v.number().default(15),\n desc: [\n `The interval between incremental backups of the replica. Shorter intervals`,\n `reduce the amount of change history that needs to be replayed when catching`,\n `up a new view-syncer, at the expense of increasing the number of files needed`,\n `to download for the initial litestream restore.`,\n ],\n },\n\n snapshotBackupIntervalHours: {\n type: v.number().default(12),\n desc: [\n `The interval between snapshot backups of the replica. Snapshot backups`,\n `make a full copy of the database to a new litestream generation. This`,\n `improves restore time at the expense of bandwidth. Applications with a`,\n `large database and low write rate can increase this interval to reduce`,\n `network usage for backups (litestream defaults to 24 hours).`,\n ],\n },\n\n restoreParallelism: {\n type: v.number().default(48),\n desc: [\n `The number of WAL files to download in parallel when performing the`,\n `initial restore of the replica from the backup.`,\n ],\n },\n\n multipartConcurrency: {\n type: v.number().default(48),\n desc: [\n `The number of parts (of size {bold --litestream-multipart-size} bytes)`,\n `to upload or download in parallel when backing up or restoring the snapshot.`,\n ],\n },\n\n multipartSize: {\n type: v.number().default(16 * 1024 * 1024),\n desc: [\n `The size of each part when uploading or downloading the snapshot with`,\n `{bold --multipart-concurrency}. Note that up to {bold concurrency * size}`,\n `bytes of memory are used when backing up or restoring the snapshot.`,\n ],\n },\n },\n\n storageDBTmpDir: {\n type: v.string().optional(),\n desc: [\n `tmp directory for IVM operator storage. Leave unset to use os.tmpdir()`,\n ],\n },\n\n initialSync: {\n tableCopyWorkers: {\n type: v.number().default(5),\n desc: [\n `The number of parallel workers used to copy tables during initial sync.`,\n `Each worker uses a database connection and will buffer up to (approximately)`,\n `10 MB of table data in memory during initial sync. Increasing the number of`,\n `workers may improve initial sync speed; however, note that local disk throughput`,\n `(i.e. IOPS), upstream CPU, and network bandwidth may also be bottlenecks.`,\n ],\n },\n\n profileCopy: {\n type: v.boolean().optional(),\n hidden: true,\n desc: [\n `Takes a cpu profile during the copy phase initial-sync, storing it as a JSON file`,\n `initial-copy.cpuprofile in the tmp directory.`,\n ],\n },\n },\n\n /** @deprecated */\n targetClientRowCount: {\n type: v.number().default(20_000),\n deprecated: [\n 'This option is no longer used and will be removed in a future version.',\n 'The client-side cache no longer enforces a row limit. Instead, TTL-based expiration',\n 'automatically manages cache size to prevent unbounded growth.',\n ],\n hidden: true,\n },\n\n lazyStartup: {\n type: v.boolean().default(false),\n desc: [\n 'Delay starting the majority of zero-cache until first request.',\n '',\n 'This is mainly intended to avoid connecting to Postgres replication stream',\n 'until the first request is received, which can be useful i.e., for preview instances.',\n '',\n 'Currently only supported in single-node mode.',\n ],\n },\n\n serverVersion: {\n type: v.string().optional(),\n desc: [`The version string outputted to logs when the server starts up.`],\n },\n\n enableTelemetry: {\n type: v.boolean().default(true),\n desc: [\n `Set to false to opt out of telemetry collection.`,\n ``,\n `This helps us improve Zero by collecting anonymous usage data.`,\n `Setting the DO_NOT_TRACK environment variable also disables telemetry.`,\n ],\n },\n\n cloudEvent: {\n sinkEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a URI to a CloudEvents sink. When set, ZeroEvents`,\n `will be published to the sink as the {bold data} field of CloudEvents.`,\n `The {bold source} field of the CloudEvents will be set to the {bold ZERO_TASK_ID},`,\n `along with any extension attributes specified by the {bold ZERO_CLOUD_EVENT_EXTENSION_OVERRIDES_ENV}.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_SINK binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n `However, any CloudEvents sink can be used.`,\n ],\n },\n\n extensionOverridesEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a JSON stringified object with an {bold extensions} field`,\n `containing attributes that should be added or overridden on outbound CloudEvents.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_CE_OVERRIDES binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n ],\n },\n },\n};\n\nexport type ZeroConfig = Config<typeof zeroOptions>;\n\nlet loadedConfig: Config<typeof zeroOptions> | undefined;\n\nexport function getZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): ZeroConfig {\n if (!loadedConfig || singleProcessMode()) {\n loadedConfig = parseOptions(zeroOptions, {\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n emitDeprecationWarnings: false, // overridden at the top level parse\n ...opts,\n });\n\n if (loadedConfig.queryHydrationStats) {\n runtimeDebugFlags.trackRowCountsVended = true;\n }\n }\n return loadedConfig;\n}\n\n/**\n * Same as {@link getZeroConfig}, with an additional check that the\n * config has already been normalized (i.e. by the top level server/runner).\n */\nexport function getNormalizedZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): NormalizedZeroConfig {\n const config = getZeroConfig(opts);\n assertNormalized(config);\n return config;\n}\n\n/**\n * Gets the server version from the config if provided. Otherwise it gets it\n * from the Zero package.json.\n */\nexport function getServerVersion(\n config: Pick<ZeroConfig, 'serverVersion'> | undefined,\n): string {\n return config?.serverVersion ?? packageJson.version;\n}\n\nexport function isAdminPasswordValid(\n lc: LogContext,\n config: Pick<NormalizedZeroConfig, 'adminPassword'>,\n password: string | undefined,\n) {\n // If development mode, password is optional\n // We use process.env.NODE_ENV === 'development' as a sign that we're in\n // development mode, rather than a custom env var like ZERO_DEVELOPMENT_MODE,\n // because NODE_ENV is more standard and is already used by many tools.\n // Note that if NODE_ENV is not set, we assume production mode.\n\n if (!password && !config.adminPassword && isDevelopmentMode()) {\n warnOnce(\n lc,\n 'No admin password set; allowing access in development mode only',\n );\n return true;\n }\n\n if (!config.adminPassword) {\n lc.warn?.('No admin password set; denying access');\n return false;\n }\n\n // Use constant-time comparison to prevent timing attacks\n const passwordBuffer = Buffer.from(password ?? '');\n const configBuffer = Buffer.from(config.adminPassword);\n\n // Handle length mismatch in constant time\n if (passwordBuffer.length !== configBuffer.length) {\n // Perform dummy comparison to maintain constant timing\n timingSafeEqual(configBuffer, configBuffer);\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n if (!timingSafeEqual(passwordBuffer, configBuffer)) {\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n lc.debug?.('Admin password accepted');\n return true;\n}\n\nlet hasWarned = false;\n\nfunction warnOnce(lc: LogContext, msg: string) {\n if (!hasWarned) {\n lc.warn?.(msg);\n hasWarned = true;\n }\n}\n\n// For testing purposes - reset the warning state\nexport function resetWarnOnceState() {\n hasWarned = false;\n}\n"],"names":["v.string","v.array","v.number","v.boolean","v.literalUnion"],"mappings":";;;;;;;;;;;AA8BO,MAAM,sBAAsB;AAE5B,MAAM,aAAa;AAAA,EACxB,IAAI;AAAA,IACF,MAAMA,OACH,EACA,QAAQ,MAAM,EACd,OAAO,CAAA,OAAM,0BAA0B,KAAK,EAAE,GAAG,sBAAsB;AAAA,IAC1E,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,cAAc;AAAA,IACZ,MAAMC,MAAQD,OAAE,CAAQ,EAAE,SAAS,MAAM,CAAA,CAAE;AAAA,IAC3C,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAEO,MAAM,eAAe;AAAA,EAC1B,IAAI;AAAA,IACF,MAAMA,SAEH,OAAO,MAAM;AACZ,YAAM,IAAI;AAAA,QACR;AAAA;AAAA,MAAA;AAAA,IAGJ,CAAC,EACA,SAAA;AAAA,IACH,QAAQ;AAAA,EAAA;AAAA,EAGV,KAAK;AAAA,IACH,MAAME,OAAE,EAAS,QAAQ,CAAC;AAAA,IAC1B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,QAAQ;AAAA,EAAA;AAEZ;AAEA,MAAM,iBAAiB;AAAA,EACrB,MAAM;AAAA,IACJ,MAAMF,OAAE,EAAS,QAAQ,SAAS;AAAA,IAClC,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,qBAAqB;AAAA,IACnB,MAAME,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAIA,MAAM,uBAAuB;AAAA,EAC3B,KAAK;AAAA,IACH,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,UAAU;AAAA,IACR,MAAMA,OAAE,EAAS,QAAQ,GAAM;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,EACF;AAEJ;AAIA,MAAM,cAAc;AAAA,EAClB,KAAK;AAAA,IACH,MAAMF,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,SAAS;AAAA,IACP,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,QAAQ;AAAA,IACN,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,QAAQ;AAAA,IACN,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAAA,EAEF,UAAU;AAAA,IACR,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,YAAY;AAAA,MACV;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,MAAM,yBAAyB,CAAC,SAC9B,aAAa,UAAU,qBAAqB,IAAI,CAAC,iBAAiB,IAAI;AAExE,MAAM,0BAA0B,CAC9B,aACA,YACI;AAAA,EACJ,KAAK;AAAA,IACH,MAAMC,MAAQD,OAAE,CAAQ,EAAE,SAAA;AAAA;AAAA,IAC1B,MAAM;AAAA,MACJ,sDAAsD,MAAM;AAAA,MAC5D;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,WAAW,MAAM,CAAC,MAC1D,CAAA;AAAA,EAAC;AAAA,EAEP,QAAQ;AAAA,IACN,MAAMA,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,IAEF,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,WAAW,UAAU,CAAC,MAC9D,CAAA;AAAA,EAAC;AAAA,EAEP,gBAAgB;AAAA,IACd,MAAMG,QAAE,EAAU,QAAQ,KAAK;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,WAAW,kBAAkB,CAAC,MACtE,CAAA;AAAA,EAAC;AAAA,EAEP,sBAAsB;AAAA,IACpB,MAAMF,MAAQD,OAAE,CAAQ,EAAE,SAAA;AAAA,IAC1B,MAAM;AAAA,MACJ;AAAA,MACA,oEAAoE,WAAW,mBAAmB,SAAS,OAAO;AAAA,MAClH;AAAA,MACA;AAAA,MACA,iBAAiB,cAAc,YAAY,YAAA,IAAgB,WAAW,mBAAmB,WAAW,OAAO;AAAA,IAAA;AAAA,IAE7G,GAAI,cACA;AAAA,MACE,YAAY;AAAA,QACV,uBAAuB,GAAG,WAAW,yBAAyB;AAAA,MAAA;AAAA,IAChE,IAEF,CAAA;AAAA,EAAC;AAET;AAEA,MAAM,gBAAgB,wBAAwB,QAAW,gBAAgB;AACzE,MAAM,cAAc,wBAAwB,UAAU,gBAAgB;AACtE,MAAM,eAAe,wBAAwB,QAAW,qBAAqB;AAC7E,MAAM,oBAAoB;AAAA,EACxB;AAAA,EACA;AACF;AAQO,MAAM,cAAc;AAAA,EACzB,UAAU;AAAA,IACR,IAAI;AAAA,MACF,MAAMA,OAAE;AAAA,MACR,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAMI,aAAe,MAAM,QAAQ,EAAE,QAAQ,IAAI;AAAA,MACjD,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,MAEF,QAAQ;AAAA;AAAA,IAAA;AAAA,IAGV,UAAU;AAAA,MACR,MAAMF,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,mBAAmB;AAAA,MACjB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,QAAQ;AAAA;AAAA,IAAA;AAAA,EACV;AAAA;AAAA,EAIF,MAAM;AAAA,EACN,QAAQ;AAAA;AAAA,EAER,YAAY;AAAA,EACZ,OAAO;AAAA,EAEP,KAAK;AAAA,IACH,IAAI;AAAA,MACF,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAME,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,mBAAmB;AAAA,MACjB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,QAAQ;AAAA;AAAA,IAAA;AAAA,IAGV,2CAA2C;AAAA,MACzC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,yCAAyC;AAAA,MACvC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,mCAAmC;AAAA,MACjC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,qBAAqB;AAAA,IACnB,MAAMC,QAAE,EAAU,SAAA;AAAA,IAClB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,oBAAoB;AAAA,IAClB,MAAMA,QAAE,EAAU,QAAQ,IAAI;AAAA,IAC9B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,kBAAkB;AAAA,IAChB,MAAMD,OAAE,EAAS,QAAQ,EAAE;AAAA,IAC3B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,QAAQ;AAAA,IACN,IAAI;AAAA,MACF,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAME,OAAE,EAAS,QAAQ,CAAC;AAAA,MAC1B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,SAAS;AAAA,EAET,KAAK;AAAA,EAEL,KAAK;AAAA,EAEL,OAAO;AAAA;AAAA,EAGP,MAAM;AAAA,EAEN,MAAM;AAAA,IACJ,MAAMA,OAAE,EAAS,QAAQ,IAAI;AAAA,IAC7B,MAAM,CAAC,gCAAgC;AAAA,EAAA;AAAA,EAGzC,gBAAgB;AAAA,IACd,KAAK;AAAA,MACH,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAMI,aAAe,aAAa,UAAU,EAAE,QAAQ,WAAW;AAAA,MACjE,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA;AAAA,IAIF,SAAS;AAAA,MACP,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,YAAY;AAAA,QACV;AAAA,MAAA;AAAA,MAEF,QAAQ;AAAA,IAAA;AAAA;AAAA,IAIV,UAAU;AAAA,MACR,MAAMI,aAAe,MAAM,KAAK,EAAE,QAAQ,IAAI;AAAA,MAC9C,YAAY;AAAA,QACV;AAAA,MAAA;AAAA,MAEF,QAAQ;AAAA,IAAA;AAAA,IAGV,+BAA+B;AAAA,MAC7B,MAAMH,MAAQD,OAAE,CAAQ,EAAE,QAAQ,CAAC,GAAG,0BAA0B,CAAC;AAAA,MACjE,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA;AAAA;AAAA;AAAA,MAKF,QAAQ;AAAA,IAAA;AAAA,IAGV,gBAAgB;AAAA,MACd,MAAME,OAAE,EAAS,QAAQ,IAAK;AAAA,MAC9B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,iCAAiC;AAAA,MAC/B,MAAMA,OAAE,EAAS,QAAQ,IAAI;AAAA,MAC7B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,oCAAoC;AAAA,MAClC,MAAMA,OAAE,EAAS,QAAQ,CAAC;AAAA,MAC1B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,QAAQ;AAAA,IACN,MAAMF,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF;AAAA,EAEA,gBAAgB;AAAA,IACd,MAAME,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,WAAW;AAAA,IACT,MAAMC,QAAE,EAAU,QAAQ,IAAI;AAAA,IAC9B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,eAAe;AAAA,IACb,MAAMH,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,sBAAsB;AAAA,IACpB,MAAMG,QAAE,EAAU,QAAQ,KAAK;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,6BAA6B;AAAA,IAC3B,MAAMH,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,0BAA0B;AAAA,IACxB,MAAME,OAAE,EAAS,QAAQ,KAAK,OAAO,IAAI;AAAA,IACzC,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,YAAY;AAAA,IACV,YAAY;AAAA,MACV,MAAMF,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM,CAAC,2CAA2C;AAAA,IAAA;AAAA,IAGpD,YAAY;AAAA,MACV,MAAMA,OAAE,EAAS,QAAQ,sCAAsC;AAAA,MAC/D,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAMI,aAAe,SAAS,QAAQ,QAAQ,OAAO,EAAE,QAAQ,MAAM;AAAA,IAAA;AAAA,IAGvE,WAAW;AAAA,MACT,MAAMJ,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,UAAU;AAAA,MACR,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,MAAM;AAAA,MACJ,MAAME,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,uBAAuB;AAAA,MACrB,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,wBAAwB;AAAA,MACtB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,wBAAwB;AAAA,MACtB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,kCAAkC;AAAA,MAChC,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,6BAA6B;AAAA,MAC3B,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,oBAAoB;AAAA,MAClB,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,sBAAsB;AAAA,MACpB,MAAMA,OAAE,EAAS,QAAQ,EAAE;AAAA,MAC3B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,eAAe;AAAA,MACb,MAAMA,OAAE,EAAS,QAAQ,KAAK,OAAO,IAAI;AAAA,MACzC,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA,EAGF,iBAAiB;AAAA,IACf,MAAMF,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM;AAAA,MACJ;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,aAAa;AAAA,IACX,kBAAkB;AAAA,MAChB,MAAME,OAAE,EAAS,QAAQ,CAAC;AAAA,MAC1B,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,aAAa;AAAA,MACX,MAAMC,QAAE,EAAU,SAAA;AAAA,MAClB,QAAQ;AAAA,MACR,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAIF,sBAAsB;AAAA,IACpB,MAAMD,OAAE,EAAS,QAAQ,GAAM;AAAA,IAC/B,YAAY;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF,QAAQ;AAAA,EAAA;AAAA,EAGV,aAAa;AAAA,IACX,MAAMC,QAAE,EAAU,QAAQ,KAAK;AAAA,IAC/B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,eAAe;AAAA,IACb,MAAMH,OAAE,EAAS,SAAA;AAAA,IACjB,MAAM,CAAC,iEAAiE;AAAA,EAAA;AAAA,EAG1E,iBAAiB;AAAA,IACf,MAAMG,QAAE,EAAU,QAAQ,IAAI;AAAA,IAC9B,MAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAAA,EAGF,YAAY;AAAA,IACV,SAAS;AAAA,MACP,MAAMH,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,IAGF,uBAAuB;AAAA,MACrB,MAAMA,OAAE,EAAS,SAAA;AAAA,MACjB,MAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EACF;AAEJ;AAIA,IAAI;AAEG,SAAS,cACd,OAA4C,IAChC;AACZ,MAAI,CAAC,gBAAgB,qBAAqB;AACxC,mBAAe,aAAa,aAAa;AAAA,MACvC,eAAe;AAAA,MACf,yBAAyB;AAAA;AAAA,MACzB,GAAG;AAAA,IAAA,CACJ;AAED,QAAI,aAAa,qBAAqB;AACpC,wBAAkB,uBAAuB;AAAA,IAC3C;AAAA,EACF;AACA,SAAO;AACT;AAMO,SAAS,wBACd,OAA4C,IACtB;AACtB,QAAM,SAAS,cAAc,IAAI;AACjC,mBAAiB,MAAM;AACvB,SAAO;AACT;AAMO,SAAS,iBACd,QACQ;AACR,SAAO,QAAQ,iBAAiB,YAAY;AAC9C;AAEO,SAAS,qBACd,IACA,QACA,UACA;AAOA,MAAI,CAAC,YAAY,CAAC,OAAO,iBAAiB,qBAAqB;AAC7D;AAAA,MACE;AAAA,MACA;AAAA,IAAA;AAEF,WAAO;AAAA,EACT;AAEA,MAAI,CAAC,OAAO,eAAe;AACzB,OAAG,OAAO,uCAAuC;AACjD,WAAO;AAAA,EACT;AAGA,QAAM,iBAAiB,OAAO,KAAK,YAAY,EAAE;AACjD,QAAM,eAAe,OAAO,KAAK,OAAO,aAAa;AAGrD,MAAI,eAAe,WAAW,aAAa,QAAQ;AAEjD,oBAAgB,cAAc,YAAY;AAC1C,OAAG,OAAO,wBAAwB;AAClC,WAAO;AAAA,EACT;AAEA,MAAI,CAAC,gBAAgB,gBAAgB,YAAY,GAAG;AAClD,OAAG,OAAO,wBAAwB;AAClC,WAAO;AAAA,EACT;AAEA,KAAG,QAAQ,yBAAyB;AACpC,SAAO;AACT;AAEA,IAAI,YAAY;AAEhB,SAAS,SAAS,IAAgB,KAAa;AAC7C,MAAI,CAAC,WAAW;AACd,OAAG,OAAO,GAAG;AACb,gBAAY;AAAA,EACd;AACF;"}
@@ -1 +1 @@
1
- {"version":3,"file":"transaction-pool.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,6BAA6B,CAAC;AAGtD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,gBAAgB,CAAC;AACzE,OAAO,KAAK,KAAK,IAAI,MAAM,gBAAgB,CAAC;AAG5C,KAAK,IAAI,GAAG,IAAI,CAAC,OAAO,IAAI,CAAC,CAAC;AAE9B,KAAK,YAAY,CAAC,CAAC,IAAI,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAEtC,MAAM,MAAM,SAAS,GACjB,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,GAAG,GAAG,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAChE,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;AAE1C;;;;;GAKG;AACH,MAAM,MAAM,IAAI,GAAG,CACjB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,SAAS,EAAE,CAAC,CAAC;AAE/B;;;;GAIG;AACH,MAAM,MAAM,QAAQ,CAAC,CAAC,IAAI,CACxB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,CAAC,CAAC,CAAC;AAErB;;;;;;;GAOG;AACH,qBAAa,eAAe;;IAkB1B;;;;;;;;;;;;;;;OAeG;gBAED,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,IAAI,EACV,IAAI,CAAC,EAAE,IAAI,EACX,OAAO,CAAC,EAAE,IAAI,EACd,cAAc,SAAI,EAClB,UAAU,SAAiB,EAC3B,YAAY,eAAgB;IAkB9B;;;OAGG;IACH,GAAG,CAAC,EAAE,EAAE,UAAU,GAAG,IAAI;IASzB;;;;;;OAMG;IACH,iBAAiB,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM;iCAIf,MAAM,SAAS,MAAM;;IAKlD;;;;;;;;;;;;;;;;;;;OAmBG;IACG,IAAI;IAqGV;;;;;;;;;;;OAWG;IACH,OAAO,CAAC,IAAI,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IA+DlC;;;;;OAKG;IACH,eAAe,CAAC,CAAC,EAAE,QAAQ,EAAE,QAAQ,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAsDrD;;;OAGG;IACH,KAAK;IAIL;;;OAGG;IACH,OAAO;IASP;;;;;;;;;;;;;;;;;OAiBG;IAEH,GAAG,CAAC,KAAK,SAAI;IAQb;;OAEG;IACH,KAAK,CAAC,KAAK,SAAI;IAYf,SAAS,IAAI,OAAO;IAIpB;;OAEG;IACH,IAAI,CAAC,GAAG,EAAE,OAAO;CAelB;AAED,KAAK,wBAAwB,GAAG;IAC9B;;;;;OAKG;IACH,cAAc,EAAE,IAAI,CAAC;IAErB;;;;;OAKG;IACH,aAAa,EAAE,IAAI,CAAC;IAEpB;;;;OAIG;IACH,WAAW,EAAE,IAAI,CAAC;IAElB,qCAAqC;IACrC,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAAC;AAEF;;;;GAIG;AACH,wBAAgB,qBAAqB,IAAI,wBAAwB,CAoDhE;AAED;;;;GAIG;AACH,wBAAgB,cAAc,IAAI;IAChC,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;IACd,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAyCA;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,UAAU,EAAE,MAAM,GAAG;IAClD,IAAI,EAAE,IAAI,CAAC;IACX,QAAQ,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC;CACzB,CAYA;AAED;;;;;GAKG;AACH,qBAAa,gBAAiB,SAAQ,KAAK;gBAC7B,KAAK,CAAC,EAAE,OAAO;CAI5B;AAoED,KAAK,WAAW,GAAG;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,IAAI,GAAG,MAAM,CAAC;CACrB,CAAC;AAEF,KAAK,YAAY,GAAG;IAClB,iBAAiB,EAAE,WAAW,CAAC;IAC/B,eAAe,EAAE,WAAW,CAAC;CAC9B,CAAC;AAGF,eAAO,MAAM,aAAa,EAAE,YAS3B,CAAC"}
1
+ {"version":3,"file":"transaction-pool.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,6BAA6B,CAAC;AAGtD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,gBAAgB,CAAC;AACzE,OAAO,KAAK,KAAK,IAAI,MAAM,gBAAgB,CAAC;AAG5C,KAAK,IAAI,GAAG,IAAI,CAAC,OAAO,IAAI,CAAC,CAAC;AAE9B,KAAK,YAAY,CAAC,CAAC,IAAI,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAEtC,MAAM,MAAM,SAAS,GACjB,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,GAAG,GAAG,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAChE,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;AAE1C;;;;;GAKG;AACH,MAAM,MAAM,IAAI,GAAG,CACjB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,SAAS,EAAE,CAAC,CAAC;AAE/B;;;;GAIG;AACH,MAAM,MAAM,QAAQ,CAAC,CAAC,IAAI,CACxB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,CAAC,CAAC,CAAC;AAErB;;;;;;;GAOG;AACH,qBAAa,eAAe;;IAkB1B;;;;;;;;;;;;;;;OAeG;gBAED,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,IAAI,EACV,IAAI,CAAC,EAAE,IAAI,EACX,OAAO,CAAC,EAAE,IAAI,EACd,cAAc,SAAI,EAClB,UAAU,SAAiB,EAC3B,YAAY,eAAgB;IAkB9B;;;OAGG;IACH,GAAG,CAAC,EAAE,EAAE,UAAU,GAAG,IAAI;IASzB;;;;;;OAMG;IACH,iBAAiB,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM;iCAIf,MAAM,SAAS,MAAM;;IAKlD;;;;;;;;;;;;;;;;;;;OAmBG;IACG,IAAI;IA0GV;;;;;;;;;;;OAWG;IACH,OAAO,CAAC,IAAI,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IA+DlC;;;;;OAKG;IACH,eAAe,CAAC,CAAC,EAAE,QAAQ,EAAE,QAAQ,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAsDrD;;;OAGG;IACH,KAAK;IAIL;;;OAGG;IACH,OAAO;IASP;;;;;;;;;;;;;;;;;OAiBG;IAEH,GAAG,CAAC,KAAK,SAAI;IAQb;;OAEG;IACH,KAAK,CAAC,KAAK,SAAI;IAYf,SAAS,IAAI,OAAO;IAIpB;;OAEG;IACH,IAAI,CAAC,GAAG,EAAE,OAAO;CAelB;AAED,KAAK,wBAAwB,GAAG;IAC9B;;;;;OAKG;IACH,cAAc,EAAE,IAAI,CAAC;IAErB;;;;;OAKG;IACH,aAAa,EAAE,IAAI,CAAC;IAEpB;;;;OAIG;IACH,WAAW,EAAE,IAAI,CAAC;IAElB,qCAAqC;IACrC,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAAC;AAEF;;;;GAIG;AACH,wBAAgB,qBAAqB,IAAI,wBAAwB,CAoDhE;AAED;;;;GAIG;AACH,wBAAgB,cAAc,IAAI;IAChC,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;IACd,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAyCA;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,UAAU,EAAE,MAAM,GAAG;IAClD,IAAI,EAAE,IAAI,CAAC;IACX,QAAQ,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC;CACzB,CAYA;AAED;;;;;GAKG;AACH,qBAAa,gBAAiB,SAAQ,KAAK;gBAC7B,KAAK,CAAC,EAAE,OAAO;CAI5B;AAgFD,KAAK,WAAW,GAAG;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,IAAI,GAAG,MAAM,CAAC;CACrB,CAAC;AAEF,KAAK,YAAY,GAAG;IAClB,iBAAiB,EAAE,WAAW,CAAC;IAC/B,eAAe,EAAE,WAAW,CAAC;CAC9B,CAAC;AAGF,eAAO,MAAM,aAAa,EAAE,YAS3B,CAAC"}
@@ -147,15 +147,16 @@ class TransactionPool {
147
147
  throw e;
148
148
  }
149
149
  };
150
- this.#workers.push(
151
- runTx(db, worker, { mode: this.#mode }).catch((e) => {
152
- if (e instanceof RollbackSignal) {
153
- lc.debug?.("aborted transaction");
154
- } else {
155
- throw e;
156
- }
157
- }).finally(() => this.#numWorkers--)
158
- );
150
+ const workerTx = runTx(db, worker, { mode: this.#mode }).catch((e) => {
151
+ if (e instanceof RollbackSignal) {
152
+ lc.debug?.("aborted transaction");
153
+ } else {
154
+ throw e;
155
+ }
156
+ }).finally(() => this.#numWorkers--);
157
+ workerTx.catch(() => {
158
+ });
159
+ this.#workers.push(workerTx);
159
160
  if (this.#done) {
160
161
  this.#tasks.enqueue("done");
161
162
  }
@@ -383,8 +384,13 @@ function ensureError(err) {
383
384
  return error;
384
385
  }
385
386
  const IDLE_TIMEOUT_MS = 5e3;
386
- const KEEPALIVE_TIMEOUT_MS = 6e4;
387
- const KEEPALIVE_TASK = (tx) => [tx`SELECT 1`.simple()];
387
+ const KEEPALIVE_TIMEOUT_MS = parseInt(
388
+ process.env.ZERO_TRANSACTION_POOL_KEEPALIVE_MS ?? "5000"
389
+ );
390
+ const KEEPALIVE_TASK = (tx, lc) => {
391
+ lc.debug?.(`sending tx keepalive`);
392
+ return [tx`SELECT 1`.simple()];
393
+ };
388
394
  const TIMEOUT_TASKS = {
389
395
  forInitialWorkers: {
390
396
  timeoutMs: KEEPALIVE_TIMEOUT_MS,
@@ -1 +1 @@
1
- {"version":3,"file":"transaction-pool.js","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport type {Enum} from '../../../shared/src/enum.ts';\nimport {Queue} from '../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../types/pg.ts';\nimport type * as Mode from './mode-enum.ts';\nimport {runTx} from './run-transaction.ts';\n\ntype Mode = Enum<typeof Mode>;\n\ntype MaybePromise<T> = Promise<T> | T;\n\nexport type Statement =\n | postgres.PendingQuery<(postgres.Row & Iterable<postgres.Row>)[]>\n | postgres.PendingQuery<postgres.Row[]>;\n\n/**\n * A {@link Task} is logic run from within a transaction in a {@link TransactionPool}.\n * It returns a list of `Statements` that the transaction executes asynchronously and\n * awaits when it receives the 'done' signal.\n *\n */\nexport type Task = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<Statement[]>;\n\n/**\n * A {@link ReadTask} is run from within a transaction, but unlike a {@link Task},\n * the results of a ReadTask are opaque to the TransactionPool and returned to the\n * caller of {@link TransactionPool.processReadTask}.\n */\nexport type ReadTask<T> = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<T>;\n\n/**\n * A TransactionPool is a pool of one or more {@link postgres.TransactionSql}\n * objects that participate in processing a dynamic queue of tasks.\n *\n * This can be used for serializing a set of tasks that arrive asynchronously\n * to a single transaction (for writing) or performing parallel reads across\n * multiple connections at the same snapshot (e.g. read only snapshot transactions).\n */\nexport class TransactionPool {\n #lc: LogContext;\n readonly #mode: Mode;\n readonly #init: TaskRunner | undefined;\n readonly #cleanup: TaskRunner | undefined;\n readonly #tasks = new Queue<TaskRunner | Error | 'done'>();\n readonly #workers: Promise<unknown>[] = [];\n readonly #initialWorkers: number;\n readonly #maxWorkers: number;\n readonly #timeoutTask: TimeoutTasks;\n #numWorkers: number;\n #numWorking = 0;\n #db: PostgresDB | undefined; // set when running. stored to allow adaptive pool sizing.\n\n #refCount = 1;\n #done = false;\n #failure: Error | undefined;\n\n /**\n * @param init A {@link Task} that is run in each Transaction before it begins\n * processing general tasks. This can be used to to set the transaction\n * mode, export/set snapshots, etc. This will be run even if\n * {@link fail} has been called on the pool.\n * @param cleanup A {@link Task} that is run in each Transaction before it closes.\n * This will be run even if {@link fail} has been called, or if a\n * preceding Task threw an Error.\n * @param initialWorkers The initial number of transaction workers to process tasks.\n * This is the steady state number of workers that will be kept\n * alive if the TransactionPool is long lived.\n * This must be greater than 0. Defaults to 1.\n * @param maxWorkers When specified, allows the pool to grow to `maxWorkers`. This\n * must be greater than or equal to `initialWorkers`. On-demand\n * workers will be shut down after an idle timeout of 5 seconds.\n */\n constructor(\n lc: LogContext,\n mode: Mode,\n init?: Task,\n cleanup?: Task,\n initialWorkers = 1,\n maxWorkers = initialWorkers,\n timeoutTasks = TIMEOUT_TASKS, // Overridden for tests.\n ) {\n assert(initialWorkers > 0, 'initialWorkers must be positive');\n assert(\n maxWorkers >= initialWorkers,\n 'maxWorkers must be >= initialWorkers',\n );\n\n this.#lc = lc;\n this.#mode = mode;\n this.#init = init ? this.#stmtRunner(init) : undefined;\n this.#cleanup = cleanup ? this.#stmtRunner(cleanup) : undefined;\n this.#initialWorkers = initialWorkers;\n this.#numWorkers = initialWorkers;\n this.#maxWorkers = maxWorkers;\n this.#timeoutTask = timeoutTasks;\n }\n\n /**\n * Starts the pool of workers to process Tasks with transactions opened from the\n * specified {@link db}.\n */\n run(db: PostgresDB): this {\n assert(!this.#db, 'already running');\n this.#db = db;\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#addWorker(db);\n }\n return this;\n }\n\n /**\n * Adds context parameters to internal LogContext. This is useful for context values that\n * are not known when the TransactionPool is constructed (e.g. determined after a database\n * call when the pool is running).\n *\n * Returns an object that can be used to add more parameters.\n */\n addLoggingContext(key: string, value: string) {\n this.#lc = this.#lc.withContext(key, value);\n\n return {\n addLoggingContext: (key: string, value: string) =>\n this.addLoggingContext(key, value),\n };\n }\n\n /**\n * Returns a promise that:\n *\n * * resolves after {@link setDone} has been called (or the the pool as been {@link unref}ed\n * to a 0 ref count), once all added tasks have been processed and all transactions have been\n * committed or closed.\n *\n * * rejects if processing was aborted with {@link fail} or if processing any of\n * the tasks resulted in an error. All uncommitted transactions will have been\n * rolled back.\n *\n * Note that partial failures are possible if processing writes with multiple workers\n * (e.g. `setDone` is called, allowing some workers to commit, after which other\n * workers encounter errors). Using a TransactionPool in this manner does not make\n * sense in terms of transactional semantics, and is thus not recommended.\n *\n * For reads, however, multiple workers is useful for performing parallel reads\n * at the same snapshot. See {@link synchronizedSnapshots} for an example.\n * Resolves or rejects when all workers are done or failed.\n */\n async done() {\n const numWorkers = this.#workers.length;\n await Promise.all(this.#workers);\n\n if (numWorkers < this.#workers.length) {\n // If workers were added after the initial set, they must be awaited to ensure\n // that the results (i.e. rejections) of all workers are accounted for. This only\n // needs to be re-done once, because the fact that the first `await` completed\n // guarantees that the pool is in a terminal state and no new workers can be added.\n await Promise.all(this.#workers);\n }\n this.#lc.debug?.('transaction pool done');\n }\n\n #addWorker(db: PostgresDB) {\n const id = this.#workers.length + 1;\n const lc = this.#lc.withContext('tx', id);\n\n const tt: TimeoutTask =\n this.#workers.length < this.#initialWorkers\n ? this.#timeoutTask.forInitialWorkers\n : this.#timeoutTask.forExtraWorkers;\n const {timeoutMs} = tt;\n const timeoutTask = tt.task === 'done' ? 'done' : this.#stmtRunner(tt.task);\n\n const worker = async (tx: PostgresTransaction) => {\n const start = performance.now();\n try {\n lc.debug?.('started transaction');\n\n let last: Promise<void> = promiseVoid;\n\n const executeTask = async (runner: TaskRunner) => {\n runner !== this.#init && this.#numWorking++;\n const {pending} = await runner.run(tx, lc, () => {\n runner !== this.#init && this.#numWorking--;\n });\n last = pending ?? last;\n };\n\n let task: TaskRunner | Error | 'done' =\n this.#init ?? (await this.#tasks.dequeue(timeoutTask, timeoutMs));\n\n try {\n while (task !== 'done') {\n if (\n task instanceof Error ||\n (task !== this.#init && this.#failure)\n ) {\n throw this.#failure ?? task;\n }\n await executeTask(task);\n\n // await the next task.\n task = await this.#tasks.dequeue(timeoutTask, timeoutMs);\n }\n } finally {\n // Execute the cleanup task even on failure.\n if (this.#cleanup) {\n await executeTask(this.#cleanup);\n }\n }\n\n const elapsed = performance.now() - start;\n lc.debug?.(`closing transaction (${elapsed.toFixed(3)} ms)`);\n // Given the semantics of a Postgres transaction, the last statement\n // will only succeed if all of the preceding statements succeeded.\n return last;\n } catch (e) {\n if (e !== this.#failure) {\n this.fail(e); // A failure in any worker should fail the pool.\n }\n throw e;\n }\n };\n\n this.#workers.push(\n runTx(db, worker, {mode: this.#mode})\n .catch(e => {\n if (e instanceof RollbackSignal) {\n // A RollbackSignal is used to gracefully rollback the postgres.js\n // transaction block. It should not be thrown up to the application.\n lc.debug?.('aborted transaction');\n } else {\n throw e;\n }\n })\n .finally(() => this.#numWorkers--),\n );\n\n // After adding the worker, enqueue a terminal signal if we are in either of the\n // terminal states (both of which prevent more tasks from being enqueued), to ensure\n // that the added worker eventually exits.\n if (this.#done) {\n this.#tasks.enqueue('done');\n }\n if (this.#failure) {\n this.#tasks.enqueue(this.#failure);\n }\n }\n\n /**\n * Processes the statements produced by the specified {@link Task},\n * returning a Promise that resolves when the statements are either processed\n * by the database or rejected.\n *\n * Note that statement failures will result in failing the entire\n * TransactionPool (per transaction semantics). However, the returned Promise\n * itself will resolve rather than reject. As such, it is fine to ignore\n * returned Promises in order to pipeline requests to the database. It is\n * recommended to occasionally await them (e.g. after some threshold) in\n * order to avoid memory blowup in the case of database slowness.\n */\n process(task: Task): Promise<void> {\n const r = resolver<void>();\n this.#process(this.#stmtRunner(task, r));\n return r.promise;\n }\n\n readonly #start = performance.now();\n #stmts = 0;\n\n /**\n * Implements the semantics specified in {@link process()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the statements are produced,\n * allowing them to be pipelined to the database.\n * * Statement errors result in failing the transaction pool.\n * * The client-supplied Resolver resolves on success or failure;\n * it is never rejected.\n */\n #stmtRunner(task: Task, r: {resolve: () => void} = resolver()): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let stmts: Statement[];\n try {\n stmts = await task(tx, lc);\n } catch (e) {\n r.resolve();\n throw e;\n } finally {\n freeWorker();\n }\n\n if (stmts.length === 0) {\n r.resolve();\n return {pending: null};\n }\n\n // Execute the statements (i.e. send to the db) immediately.\n // The last result is returned for the worker to await before\n // closing the transaction.\n const last = stmts.reduce(\n (_, stmt) =>\n stmt\n .execute()\n .then(() => {\n if (++this.#stmts % 1000 === 0) {\n const log = this.#stmts % 10000 === 0 ? 'info' : 'debug';\n const q = stmt as unknown as Query;\n lc[log]?.(\n `executed ${this.#stmts}th statement (${(performance.now() - this.#start).toFixed(3)} ms)`,\n {statement: q.string},\n );\n }\n })\n .catch(e => this.fail(e)),\n promiseVoid,\n );\n return {pending: last.then(r.resolve)};\n },\n rejected: r.resolve,\n };\n }\n\n /**\n * Processes and returns the result of executing the {@link ReadTask} from\n * within the transaction. An error thrown by the task will result in\n * rejecting the returned Promise, but will not affect the transaction pool\n * itself.\n */\n processReadTask<T>(readTask: ReadTask<T>): Promise<T> {\n const r = resolver<T>();\n this.#process(this.#readRunner(readTask, r));\n return r.promise;\n }\n\n /**\n * Implements the semantics specified in {@link processReadTask()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the result is produced,\n * before resolving the client-supplied Resolver.\n * * Errors result in rejecting the client-supplied Resolver but\n * do not affect transaction pool.\n */\n #readRunner<T>(readTask: ReadTask<T>, r: Resolver<T>): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let result: T;\n try {\n result = await readTask(tx, lc);\n freeWorker();\n r.resolve(result);\n } catch (e) {\n freeWorker();\n r.reject(e);\n }\n return {pending: null};\n },\n rejected: r.reject,\n };\n }\n\n #process(runner: TaskRunner): void {\n assert(!this.#done, 'already set done');\n if (this.#failure) {\n runner.rejected(this.#failure);\n return;\n }\n\n this.#tasks.enqueue(runner);\n\n // Check if the pool size can and should be increased.\n if (this.#numWorkers < this.#maxWorkers) {\n const outstanding = this.#tasks.size();\n\n if (outstanding > this.#numWorkers - this.#numWorking) {\n this.#db && this.#addWorker(this.#db);\n this.#numWorkers++;\n this.#lc.debug?.(`Increased pool size to ${this.#numWorkers}`);\n }\n }\n }\n\n /**\n * Ends all workers with a ROLLBACK. Throws if the pool is already done\n * or aborted.\n */\n abort() {\n this.fail(new RollbackSignal());\n }\n\n /**\n * Signals to all workers to end their transaction once all pending tasks have\n * been completed. Throws if the pool is already done or aborted.\n */\n setDone() {\n assert(!this.#done, 'already set done');\n this.#done = true;\n\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#tasks.enqueue('done');\n }\n }\n\n /**\n * An alternative to explicitly calling {@link setDone}, `ref()` increments an internal reference\n * count, and {@link unref} decrements it. When the reference count reaches 0, {@link setDone} is\n * automatically called. A TransactionPool is initialized with a reference count of 1.\n *\n * `ref()` should be called before sharing the pool with another component, and only after the\n * pool has been started with {@link run()}. It must not be called on a TransactionPool that is\n * already done (either via {@link unref()} or {@link setDone()}. (Doing so indicates a logical\n * error in the code.)\n *\n * It follows that:\n * * The creator of the TransactionPool is responsible for running it.\n * * The TransactionPool should be ref'ed before being sharing.\n * * The receiver of the TransactionPool is only responsible for unref'ing it.\n *\n * On the other hand, a transaction pool that fails with a runtime error can still be ref'ed;\n * attempts to use the pool will result in the runtime error as expected.\n */\n // TODO: Get rid of the ref-counting stuff. It's no longer needed.\n ref(count = 1) {\n assert(\n this.#db !== undefined && !this.#done,\n `Cannot ref() a TransactionPool that is not running`,\n );\n this.#refCount += count;\n }\n\n /**\n * Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.\n */\n unref(count = 1) {\n assert(\n count <= this.#refCount,\n () => `Cannot unref ${count} when refCount is ${this.#refCount}`,\n );\n\n this.#refCount -= count;\n if (this.#refCount === 0) {\n this.setDone();\n }\n }\n\n isRunning(): boolean {\n return this.#db !== undefined && !this.#done && this.#failure === undefined;\n }\n\n /**\n * Signals all workers to fail their transactions with the given {@link err}.\n */\n fail(err: unknown) {\n if (!this.#failure) {\n this.#failure = ensureError(err); // Fail fast: this is checked in the worker loop.\n // Logged for informational purposes. It is the responsibility of\n // higher level logic to classify and handle the exception.\n const level =\n this.#failure instanceof ControlFlowError ? 'debug' : 'info';\n this.#lc[level]?.(this.#failure);\n\n for (let i = 0; i < this.#numWorkers; i++) {\n // Enqueue the Error to terminate any workers waiting for tasks.\n this.#tasks.enqueue(this.#failure);\n }\n }\n }\n}\n\ntype SynchronizeSnapshotTasks = {\n /**\n * The `init` Task for the TransactionPool from which the snapshot originates.\n * The pool must have Mode.SERIALIZABLE, and will be set to READ ONLY by the\n * `exportSnapshot` init task. If the TransactionPool has multiple workers, the\n * first worker will export a snapshot that the others set.\n */\n exportSnapshot: Task;\n\n /**\n * The `cleanup` Task for the TransactionPool from which the snapshot\n * originates. This Task will wait for the follower pool to `setSnapshot`\n * to ensure that the snapshot is successfully shared before the originating\n * transaction is closed.\n */\n cleanupExport: Task;\n\n /**\n * The `init` Task for the TransactionPool in which workers will\n * consequently see the same snapshot as that of the first pool. The pool\n * must have Mode.SERIALIZABLE, and will have the ability to perform writes.\n */\n setSnapshot: Task;\n\n /** The ID of the shared snapshot. */\n snapshotID: Promise<string>;\n};\n\n/**\n * Init Tasks for Postgres snapshot synchronization across transactions.\n *\n * https://www.postgresql.org/docs/9.3/functions-admin.html#:~:text=Snapshot%20Synchronization%20Functions,identical%20content%20in%20the%20database.\n */\nexport function synchronizedSnapshots(): SynchronizeSnapshotTasks {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n const {\n promise: snapshotCaptured,\n resolve: captureSnapshot,\n reject: failCapture,\n } = resolver<unknown>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot. TODO: Plumb the workerNum and use that instead.\n let firstWorkerRun = false;\n\n // Note: Neither init task should `await`, as processing in each pool can proceed\n // as soon as the statements have been sent to the db. However, the `cleanupExport`\n // task must `await` the result of `setSnapshot` to ensure that exporting transaction\n // does not close before the snapshot has been captured.\n return {\n exportSnapshot: tx => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt =\n tx`SELECT pg_export_snapshot() AS snapshot; SET TRANSACTION READ ONLY;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n tx`SET TRANSACTION READ ONLY`.simple(),\n ]);\n },\n\n setSnapshot: tx =>\n snapshotExported.then(snapshotID => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n // Intercept the promise to propagate the information to `cleanupExport`.\n stmt.then(captureSnapshot, failCapture);\n return [stmt];\n }),\n\n cleanupExport: async () => {\n await snapshotCaptured;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * Returns `init` and `cleanup` {@link Task}s for a TransactionPool that ensure its workers\n * share a single view of the database. This is used for View Notifier and View Syncer logic\n * that allows multiple entities to perform parallel reads on the same snapshot of the database.\n */\nexport function sharedSnapshot(): {\n init: Task;\n cleanup: Task;\n snapshotID: Promise<string>;\n} {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot.\n let firstWorkerRun = false;\n\n // Set when any worker is done, signalling that all non-sentinel Tasks have been\n // dequeued, and thus any subsequently spawned workers should skip their initTask\n // since the snapshot is no longer needed (and soon to become invalid).\n let firstWorkerDone = false;\n\n return {\n init: (tx, lc) => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt = tx`SELECT pg_export_snapshot() AS snapshot;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n if (!firstWorkerDone) {\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n ]);\n }\n lc.debug?.('All work is done. No need to set snapshot');\n return [];\n },\n\n cleanup: () => {\n firstWorkerDone = true;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * @returns An `init` Task for importing a snapshot from another transaction.\n */\nexport function importSnapshot(snapshotID: string): {\n init: Task;\n imported: Promise<void>;\n} {\n const {promise: imported, resolve, reject} = resolver<void>();\n\n return {\n init: tx => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n stmt.then(() => resolve(), reject);\n return [stmt];\n },\n\n imported,\n };\n}\n\n/**\n * A superclass of Errors used for control flow that is needed to handle\n * another Error but does not constitute an error condition itself (e.g.\n * aborting transactions after a previous one fails). Subclassing this Error\n * will result in lowering the log level from `error` to `debug`.\n */\nexport class ControlFlowError extends Error {\n constructor(cause?: unknown) {\n super();\n this.cause = cause;\n }\n}\n\n/**\n * Internal error used to rollback the worker transaction. This is used\n * instead of executing a `ROLLBACK` statement because the postgres.js\n * library will otherwise try to execute an extraneous `COMMIT`, which\n * results in outputting a \"no transaction in progress\" warning to the\n * database logs.\n *\n * Throwing an exception, on the other hand, executes the postgres.js\n * codepath that calls `ROLLBACK` instead.\n */\nclass RollbackSignal extends ControlFlowError {\n readonly name = 'RollbackSignal';\n readonly message = 'rolling back transaction';\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n\ninterface TaskRunner {\n /**\n * Manages the running of a Task or ReadTask in two phases:\n *\n * - If the task involves blocking, this is done in the worker. Once the\n * blocking is done, `freeWorker()` is invoked to signal that the worker\n * is available to run another task. Note that this should be invoked\n * *before* resolving the result to the calling thread so that a\n * subsequent task can reuse the same worker.\n *\n * - Task statements are executed on the database asynchronously. The final\n * result of this processing is encapsulated in the returned `pending`\n * Promise. The worker will await the last pending Promise before closing\n * the transaction.\n *\n * @param freeWorker should be called as soon as all blocking operations are\n * completed in order to return the transaction to the pool.\n * @returns A `pending` Promise indicating when the statements have been\n * processed by the database, allowing the transaction to be closed.\n * This should be `null` if there are no transaction-dependent\n * statements to await.\n */\n run(\n tx: PostgresTransaction,\n lc: LogContext,\n freeWorker: () => void,\n ): Promise<{pending: Promise<void> | null}>;\n\n /**\n * Invoked if the TransactionPool is already in a failed state when the task\n * is requested.\n */\n rejected(reason: unknown): void;\n}\n\n// TODO: Get rid of the timeout stuff. It's no longer needed.\nconst IDLE_TIMEOUT_MS = 5_000;\n\nconst KEEPALIVE_TIMEOUT_MS = 60_000;\n\nconst KEEPALIVE_TASK: Task = tx => [tx`SELECT 1`.simple()];\n\ntype TimeoutTask = {\n timeoutMs: number;\n task: Task | 'done';\n};\n\ntype TimeoutTasks = {\n forInitialWorkers: TimeoutTask;\n forExtraWorkers: TimeoutTask;\n};\n\n// Production timeout tasks. Overridden in tests.\nexport const TIMEOUT_TASKS: TimeoutTasks = {\n forInitialWorkers: {\n timeoutMs: KEEPALIVE_TIMEOUT_MS,\n task: KEEPALIVE_TASK,\n },\n forExtraWorkers: {\n timeoutMs: IDLE_TIMEOUT_MS,\n task: 'done',\n },\n};\n\n// The slice of information from the Query object in Postgres.js that gets logged for debugging.\n// https://github.com/porsager/postgres/blob/f58cd4f3affd3e8ce8f53e42799672d86cd2c70b/src/connection.js#L219\ntype Query = {string: string; parameters: object[]};\n"],"names":["key","value"],"mappings":";;;;;;AAgDO,MAAM,gBAAgB;AAAA,EAC3B;AAAA,EACS;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAAS,IAAI,MAAA;AAAA,EACb,WAA+B,CAAA;AAAA,EAC/B;AAAA,EACA;AAAA,EACA;AAAA,EACT;AAAA,EACA,cAAc;AAAA,EACd;AAAA;AAAA,EAEA,YAAY;AAAA,EACZ,QAAQ;AAAA,EACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,YACE,IACA,MACA,MACA,SACA,iBAAiB,GACjB,aAAa,gBACb,eAAe,eACf;AACA,WAAO,iBAAiB,GAAG,iCAAiC;AAC5D;AAAA,MACE,cAAc;AAAA,MACd;AAAA,IAAA;AAGF,SAAK,MAAM;AACX,SAAK,QAAQ;AACb,SAAK,QAAQ,OAAO,KAAK,YAAY,IAAI,IAAI;AAC7C,SAAK,WAAW,UAAU,KAAK,YAAY,OAAO,IAAI;AACtD,SAAK,kBAAkB;AACvB,SAAK,cAAc;AACnB,SAAK,cAAc;AACnB,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,IAAI,IAAsB;AACxB,WAAO,CAAC,KAAK,KAAK,iBAAiB;AACnC,SAAK,MAAM;AACX,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,WAAW,EAAE;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,kBAAkB,KAAa,OAAe;AAC5C,SAAK,MAAM,KAAK,IAAI,YAAY,KAAK,KAAK;AAE1C,WAAO;AAAA,MACL,mBAAmB,CAACA,MAAaC,WAC/B,KAAK,kBAAkBD,MAAKC,MAAK;AAAA,IAAA;AAAA,EAEvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBA,MAAM,OAAO;AACX,UAAM,aAAa,KAAK,SAAS;AACjC,UAAM,QAAQ,IAAI,KAAK,QAAQ;AAE/B,QAAI,aAAa,KAAK,SAAS,QAAQ;AAKrC,YAAM,QAAQ,IAAI,KAAK,QAAQ;AAAA,IACjC;AACA,SAAK,IAAI,QAAQ,uBAAuB;AAAA,EAC1C;AAAA,EAEA,WAAW,IAAgB;AACzB,UAAM,KAAK,KAAK,SAAS,SAAS;AAClC,UAAM,KAAK,KAAK,IAAI,YAAY,MAAM,EAAE;AAExC,UAAM,KACJ,KAAK,SAAS,SAAS,KAAK,kBACxB,KAAK,aAAa,oBAClB,KAAK,aAAa;AACxB,UAAM,EAAC,cAAa;AACpB,UAAM,cAAc,GAAG,SAAS,SAAS,SAAS,KAAK,YAAY,GAAG,IAAI;AAE1E,UAAM,SAAS,OAAO,OAA4B;AAChD,YAAM,QAAQ,YAAY,IAAA;AAC1B,UAAI;AACF,WAAG,QAAQ,qBAAqB;AAEhC,YAAI,OAAsB;AAE1B,cAAM,cAAc,OAAO,WAAuB;AAChD,qBAAW,KAAK,SAAS,KAAK;AAC9B,gBAAM,EAAC,YAAW,MAAM,OAAO,IAAI,IAAI,IAAI,MAAM;AAC/C,uBAAW,KAAK,SAAS,KAAK;AAAA,UAChC,CAAC;AACD,iBAAO,WAAW;AAAA,QACpB;AAEA,YAAI,OACF,KAAK,SAAU,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAEjE,YAAI;AACF,iBAAO,SAAS,QAAQ;AACtB,gBACE,gBAAgB,SACf,SAAS,KAAK,SAAS,KAAK,UAC7B;AACA,oBAAM,KAAK,YAAY;AAAA,YACzB;AACA,kBAAM,YAAY,IAAI;AAGtB,mBAAO,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAAA,UACzD;AAAA,QACF,UAAA;AAEE,cAAI,KAAK,UAAU;AACjB,kBAAM,YAAY,KAAK,QAAQ;AAAA,UACjC;AAAA,QACF;AAEA,cAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,WAAG,QAAQ,wBAAwB,QAAQ,QAAQ,CAAC,CAAC,MAAM;AAG3D,eAAO;AAAA,MACT,SAAS,GAAG;AACV,YAAI,MAAM,KAAK,UAAU;AACvB,eAAK,KAAK,CAAC;AAAA,QACb;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,SAAK,SAAS;AAAA,MACZ,MAAM,IAAI,QAAQ,EAAC,MAAM,KAAK,OAAM,EACjC,MAAM,CAAA,MAAK;AACV,YAAI,aAAa,gBAAgB;AAG/B,aAAG,QAAQ,qBAAqB;AAAA,QAClC,OAAO;AACL,gBAAM;AAAA,QACR;AAAA,MACF,CAAC,EACA,QAAQ,MAAM,KAAK,aAAa;AAAA,IAAA;AAMrC,QAAI,KAAK,OAAO;AACd,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AACA,QAAI,KAAK,UAAU;AACjB,WAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,IACnC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,QAAQ,MAA2B;AACjC,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,MAAM,CAAC,CAAC;AACvC,WAAO,EAAE;AAAA,EACX;AAAA,EAES,SAAS,YAAY,IAAA;AAAA,EAC9B,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYT,YAAY,MAAY,IAA2B,YAAwB;AACzE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,kBAAQ,MAAM,KAAK,IAAI,EAAE;AAAA,QAC3B,SAAS,GAAG;AACV,YAAE,QAAA;AACF,gBAAM;AAAA,QACR,UAAA;AACE,qBAAA;AAAA,QACF;AAEA,YAAI,MAAM,WAAW,GAAG;AACtB,YAAE,QAAA;AACF,iBAAO,EAAC,SAAS,KAAA;AAAA,QACnB;AAKA,cAAM,OAAO,MAAM;AAAA,UACjB,CAAC,GAAG,SACF,KACG,QAAA,EACA,KAAK,MAAM;AACV,gBAAI,EAAE,KAAK,SAAS,QAAS,GAAG;AAC9B,oBAAM,MAAM,KAAK,SAAS,QAAU,IAAI,SAAS;AACjD,oBAAM,IAAI;AACV,iBAAG,GAAG;AAAA,gBACJ,YAAY,KAAK,MAAM,kBAAkB,YAAY,IAAA,IAAQ,KAAK,QAAQ,QAAQ,CAAC,CAAC;AAAA,gBACpF,EAAC,WAAW,EAAE,OAAA;AAAA,cAAM;AAAA,YAExB;AAAA,UACF,CAAC,EACA,MAAM,OAAK,KAAK,KAAK,CAAC,CAAC;AAAA,UAC5B;AAAA,QAAA;AAEF,eAAO,EAAC,SAAS,KAAK,KAAK,EAAE,OAAO,EAAA;AAAA,MACtC;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,gBAAmB,UAAmC;AACpD,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,UAAU,CAAC,CAAC;AAC3C,WAAO,EAAE;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,YAAe,UAAuB,GAA4B;AAChE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,mBAAS,MAAM,SAAS,IAAI,EAAE;AAC9B,qBAAA;AACA,YAAE,QAAQ,MAAM;AAAA,QAClB,SAAS,GAAG;AACV,qBAAA;AACA,YAAE,OAAO,CAAC;AAAA,QACZ;AACA,eAAO,EAAC,SAAS,KAAA;AAAA,MACnB;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA,EAEA,SAAS,QAA0B;AACjC,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,QAAI,KAAK,UAAU;AACjB,aAAO,SAAS,KAAK,QAAQ;AAC7B;AAAA,IACF;AAEA,SAAK,OAAO,QAAQ,MAAM;AAG1B,QAAI,KAAK,cAAc,KAAK,aAAa;AACvC,YAAM,cAAc,KAAK,OAAO,KAAA;AAEhC,UAAI,cAAc,KAAK,cAAc,KAAK,aAAa;AACrD,aAAK,OAAO,KAAK,WAAW,KAAK,GAAG;AACpC,aAAK;AACL,aAAK,IAAI,QAAQ,0BAA0B,KAAK,WAAW,EAAE;AAAA,MAC/D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ;AACN,SAAK,KAAK,IAAI,gBAAgB;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU;AACR,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,SAAK,QAAQ;AAEb,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,IAAI,QAAQ,GAAG;AACb;AAAA,MACE,KAAK,QAAQ,UAAa,CAAC,KAAK;AAAA,MAChC;AAAA,IAAA;AAEF,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QAAQ,GAAG;AACf;AAAA,MACE,SAAS,KAAK;AAAA,MACd,MAAM,gBAAgB,KAAK,qBAAqB,KAAK,SAAS;AAAA,IAAA;AAGhE,SAAK,aAAa;AAClB,QAAI,KAAK,cAAc,GAAG;AACxB,WAAK,QAAA;AAAA,IACP;AAAA,EACF;AAAA,EAEA,YAAqB;AACnB,WAAO,KAAK,QAAQ,UAAa,CAAC,KAAK,SAAS,KAAK,aAAa;AAAA,EACpE;AAAA;AAAA;AAAA;AAAA,EAKA,KAAK,KAAc;AACjB,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW,YAAY,GAAG;AAG/B,YAAM,QACJ,KAAK,oBAAoB,mBAAmB,UAAU;AACxD,WAAK,IAAI,KAAK,IAAI,KAAK,QAAQ;AAE/B,eAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AAEzC,aAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,MACnC;AAAA,IACF;AAAA,EACF;AACF;AAgJO,SAAS,eAAe,YAG7B;AACA,QAAM,EAAC,SAAS,UAAU,SAAS,OAAA,IAAU,SAAA;AAE7C,SAAO;AAAA,IACL,MAAM,CAAA,OAAM;AACV,YAAM,OAAO,GAAG,OAAO,6BAA6B,UAAU,GAAG;AACjE,WAAK,KAAK,MAAM,QAAA,GAAW,MAAM;AACjC,aAAO,CAAC,IAAI;AAAA,IACd;AAAA,IAEA;AAAA,EAAA;AAEJ;AAQO,MAAM,yBAAyB,MAAM;AAAA,EAC1C,YAAY,OAAiB;AAC3B,UAAA;AACA,SAAK,QAAQ;AAAA,EACf;AACF;AAYA,MAAM,uBAAuB,iBAAiB;AAAA,EACnC,OAAO;AAAA,EACP,UAAU;AACrB;AAEA,SAAS,YAAY,KAAqB;AACxC,MAAI,eAAe,OAAO;AACxB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,MAAA;AAClB,QAAM,QAAQ;AACd,SAAO;AACT;AAsCA,MAAM,kBAAkB;AAExB,MAAM,uBAAuB;AAE7B,MAAM,iBAAuB,CAAA,OAAM,CAAC,aAAa,QAAQ;AAalD,MAAM,gBAA8B;AAAA,EACzC,mBAAmB;AAAA,IACjB,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAAA,EAER,iBAAiB;AAAA,IACf,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAEV;"}
1
+ {"version":3,"file":"transaction-pool.js","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport type {Enum} from '../../../shared/src/enum.ts';\nimport {Queue} from '../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../types/pg.ts';\nimport type * as Mode from './mode-enum.ts';\nimport {runTx} from './run-transaction.ts';\n\ntype Mode = Enum<typeof Mode>;\n\ntype MaybePromise<T> = Promise<T> | T;\n\nexport type Statement =\n | postgres.PendingQuery<(postgres.Row & Iterable<postgres.Row>)[]>\n | postgres.PendingQuery<postgres.Row[]>;\n\n/**\n * A {@link Task} is logic run from within a transaction in a {@link TransactionPool}.\n * It returns a list of `Statements` that the transaction executes asynchronously and\n * awaits when it receives the 'done' signal.\n *\n */\nexport type Task = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<Statement[]>;\n\n/**\n * A {@link ReadTask} is run from within a transaction, but unlike a {@link Task},\n * the results of a ReadTask are opaque to the TransactionPool and returned to the\n * caller of {@link TransactionPool.processReadTask}.\n */\nexport type ReadTask<T> = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<T>;\n\n/**\n * A TransactionPool is a pool of one or more {@link postgres.TransactionSql}\n * objects that participate in processing a dynamic queue of tasks.\n *\n * This can be used for serializing a set of tasks that arrive asynchronously\n * to a single transaction (for writing) or performing parallel reads across\n * multiple connections at the same snapshot (e.g. read only snapshot transactions).\n */\nexport class TransactionPool {\n #lc: LogContext;\n readonly #mode: Mode;\n readonly #init: TaskRunner | undefined;\n readonly #cleanup: TaskRunner | undefined;\n readonly #tasks = new Queue<TaskRunner | Error | 'done'>();\n readonly #workers: Promise<unknown>[] = [];\n readonly #initialWorkers: number;\n readonly #maxWorkers: number;\n readonly #timeoutTask: TimeoutTasks;\n #numWorkers: number;\n #numWorking = 0;\n #db: PostgresDB | undefined; // set when running. stored to allow adaptive pool sizing.\n\n #refCount = 1;\n #done = false;\n #failure: Error | undefined;\n\n /**\n * @param init A {@link Task} that is run in each Transaction before it begins\n * processing general tasks. This can be used to to set the transaction\n * mode, export/set snapshots, etc. This will be run even if\n * {@link fail} has been called on the pool.\n * @param cleanup A {@link Task} that is run in each Transaction before it closes.\n * This will be run even if {@link fail} has been called, or if a\n * preceding Task threw an Error.\n * @param initialWorkers The initial number of transaction workers to process tasks.\n * This is the steady state number of workers that will be kept\n * alive if the TransactionPool is long lived.\n * This must be greater than 0. Defaults to 1.\n * @param maxWorkers When specified, allows the pool to grow to `maxWorkers`. This\n * must be greater than or equal to `initialWorkers`. On-demand\n * workers will be shut down after an idle timeout of 5 seconds.\n */\n constructor(\n lc: LogContext,\n mode: Mode,\n init?: Task,\n cleanup?: Task,\n initialWorkers = 1,\n maxWorkers = initialWorkers,\n timeoutTasks = TIMEOUT_TASKS, // Overridden for tests.\n ) {\n assert(initialWorkers > 0, 'initialWorkers must be positive');\n assert(\n maxWorkers >= initialWorkers,\n 'maxWorkers must be >= initialWorkers',\n );\n\n this.#lc = lc;\n this.#mode = mode;\n this.#init = init ? this.#stmtRunner(init) : undefined;\n this.#cleanup = cleanup ? this.#stmtRunner(cleanup) : undefined;\n this.#initialWorkers = initialWorkers;\n this.#numWorkers = initialWorkers;\n this.#maxWorkers = maxWorkers;\n this.#timeoutTask = timeoutTasks;\n }\n\n /**\n * Starts the pool of workers to process Tasks with transactions opened from the\n * specified {@link db}.\n */\n run(db: PostgresDB): this {\n assert(!this.#db, 'already running');\n this.#db = db;\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#addWorker(db);\n }\n return this;\n }\n\n /**\n * Adds context parameters to internal LogContext. This is useful for context values that\n * are not known when the TransactionPool is constructed (e.g. determined after a database\n * call when the pool is running).\n *\n * Returns an object that can be used to add more parameters.\n */\n addLoggingContext(key: string, value: string) {\n this.#lc = this.#lc.withContext(key, value);\n\n return {\n addLoggingContext: (key: string, value: string) =>\n this.addLoggingContext(key, value),\n };\n }\n\n /**\n * Returns a promise that:\n *\n * * resolves after {@link setDone} has been called (or the the pool as been {@link unref}ed\n * to a 0 ref count), once all added tasks have been processed and all transactions have been\n * committed or closed.\n *\n * * rejects if processing was aborted with {@link fail} or if processing any of\n * the tasks resulted in an error. All uncommitted transactions will have been\n * rolled back.\n *\n * Note that partial failures are possible if processing writes with multiple workers\n * (e.g. `setDone` is called, allowing some workers to commit, after which other\n * workers encounter errors). Using a TransactionPool in this manner does not make\n * sense in terms of transactional semantics, and is thus not recommended.\n *\n * For reads, however, multiple workers is useful for performing parallel reads\n * at the same snapshot. See {@link synchronizedSnapshots} for an example.\n * Resolves or rejects when all workers are done or failed.\n */\n async done() {\n const numWorkers = this.#workers.length;\n await Promise.all(this.#workers);\n\n if (numWorkers < this.#workers.length) {\n // If workers were added after the initial set, they must be awaited to ensure\n // that the results (i.e. rejections) of all workers are accounted for. This only\n // needs to be re-done once, because the fact that the first `await` completed\n // guarantees that the pool is in a terminal state and no new workers can be added.\n await Promise.all(this.#workers);\n }\n this.#lc.debug?.('transaction pool done');\n }\n\n #addWorker(db: PostgresDB) {\n const id = this.#workers.length + 1;\n const lc = this.#lc.withContext('tx', id);\n\n const tt: TimeoutTask =\n this.#workers.length < this.#initialWorkers\n ? this.#timeoutTask.forInitialWorkers\n : this.#timeoutTask.forExtraWorkers;\n const {timeoutMs} = tt;\n const timeoutTask = tt.task === 'done' ? 'done' : this.#stmtRunner(tt.task);\n\n const worker = async (tx: PostgresTransaction) => {\n const start = performance.now();\n try {\n lc.debug?.('started transaction');\n\n let last: Promise<void> = promiseVoid;\n\n const executeTask = async (runner: TaskRunner) => {\n runner !== this.#init && this.#numWorking++;\n const {pending} = await runner.run(tx, lc, () => {\n runner !== this.#init && this.#numWorking--;\n });\n last = pending ?? last;\n };\n\n let task: TaskRunner | Error | 'done' =\n this.#init ?? (await this.#tasks.dequeue(timeoutTask, timeoutMs));\n\n try {\n while (task !== 'done') {\n if (\n task instanceof Error ||\n (task !== this.#init && this.#failure)\n ) {\n throw this.#failure ?? task;\n }\n await executeTask(task);\n\n // await the next task.\n task = await this.#tasks.dequeue(timeoutTask, timeoutMs);\n }\n } finally {\n // Execute the cleanup task even on failure.\n if (this.#cleanup) {\n await executeTask(this.#cleanup);\n }\n }\n\n const elapsed = performance.now() - start;\n lc.debug?.(`closing transaction (${elapsed.toFixed(3)} ms)`);\n // Given the semantics of a Postgres transaction, the last statement\n // will only succeed if all of the preceding statements succeeded.\n return last;\n } catch (e) {\n if (e !== this.#failure) {\n this.fail(e); // A failure in any worker should fail the pool.\n }\n throw e;\n }\n };\n\n const workerTx = runTx(db, worker, {mode: this.#mode})\n .catch(e => {\n if (e instanceof RollbackSignal) {\n // A RollbackSignal is used to gracefully rollback the postgres.js\n // transaction block. It should not be thrown up to the application.\n lc.debug?.('aborted transaction');\n } else {\n throw e;\n }\n })\n .finally(() => this.#numWorkers--);\n\n // Attach a rejection handler immediately to prevent unhandledRejections.\n // The application will handle errors when it awaits processReadTask()\n // or done().\n workerTx.catch(() => {});\n\n this.#workers.push(workerTx);\n\n // After adding the worker, enqueue a terminal signal if we are in either of the\n // terminal states (both of which prevent more tasks from being enqueued), to ensure\n // that the added worker eventually exits.\n if (this.#done) {\n this.#tasks.enqueue('done');\n }\n if (this.#failure) {\n this.#tasks.enqueue(this.#failure);\n }\n }\n\n /**\n * Processes the statements produced by the specified {@link Task},\n * returning a Promise that resolves when the statements are either processed\n * by the database or rejected.\n *\n * Note that statement failures will result in failing the entire\n * TransactionPool (per transaction semantics). However, the returned Promise\n * itself will resolve rather than reject. As such, it is fine to ignore\n * returned Promises in order to pipeline requests to the database. It is\n * recommended to occasionally await them (e.g. after some threshold) in\n * order to avoid memory blowup in the case of database slowness.\n */\n process(task: Task): Promise<void> {\n const r = resolver<void>();\n this.#process(this.#stmtRunner(task, r));\n return r.promise;\n }\n\n readonly #start = performance.now();\n #stmts = 0;\n\n /**\n * Implements the semantics specified in {@link process()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the statements are produced,\n * allowing them to be pipelined to the database.\n * * Statement errors result in failing the transaction pool.\n * * The client-supplied Resolver resolves on success or failure;\n * it is never rejected.\n */\n #stmtRunner(task: Task, r: {resolve: () => void} = resolver()): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let stmts: Statement[];\n try {\n stmts = await task(tx, lc);\n } catch (e) {\n r.resolve();\n throw e;\n } finally {\n freeWorker();\n }\n\n if (stmts.length === 0) {\n r.resolve();\n return {pending: null};\n }\n\n // Execute the statements (i.e. send to the db) immediately.\n // The last result is returned for the worker to await before\n // closing the transaction.\n const last = stmts.reduce(\n (_, stmt) =>\n stmt\n .execute()\n .then(() => {\n if (++this.#stmts % 1000 === 0) {\n const log = this.#stmts % 10000 === 0 ? 'info' : 'debug';\n const q = stmt as unknown as Query;\n lc[log]?.(\n `executed ${this.#stmts}th statement (${(performance.now() - this.#start).toFixed(3)} ms)`,\n {statement: q.string},\n );\n }\n })\n .catch(e => this.fail(e)),\n promiseVoid,\n );\n return {pending: last.then(r.resolve)};\n },\n rejected: r.resolve,\n };\n }\n\n /**\n * Processes and returns the result of executing the {@link ReadTask} from\n * within the transaction. An error thrown by the task will result in\n * rejecting the returned Promise, but will not affect the transaction pool\n * itself.\n */\n processReadTask<T>(readTask: ReadTask<T>): Promise<T> {\n const r = resolver<T>();\n this.#process(this.#readRunner(readTask, r));\n return r.promise;\n }\n\n /**\n * Implements the semantics specified in {@link processReadTask()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the result is produced,\n * before resolving the client-supplied Resolver.\n * * Errors result in rejecting the client-supplied Resolver but\n * do not affect transaction pool.\n */\n #readRunner<T>(readTask: ReadTask<T>, r: Resolver<T>): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let result: T;\n try {\n result = await readTask(tx, lc);\n freeWorker();\n r.resolve(result);\n } catch (e) {\n freeWorker();\n r.reject(e);\n }\n return {pending: null};\n },\n rejected: r.reject,\n };\n }\n\n #process(runner: TaskRunner): void {\n assert(!this.#done, 'already set done');\n if (this.#failure) {\n runner.rejected(this.#failure);\n return;\n }\n\n this.#tasks.enqueue(runner);\n\n // Check if the pool size can and should be increased.\n if (this.#numWorkers < this.#maxWorkers) {\n const outstanding = this.#tasks.size();\n\n if (outstanding > this.#numWorkers - this.#numWorking) {\n this.#db && this.#addWorker(this.#db);\n this.#numWorkers++;\n this.#lc.debug?.(`Increased pool size to ${this.#numWorkers}`);\n }\n }\n }\n\n /**\n * Ends all workers with a ROLLBACK. Throws if the pool is already done\n * or aborted.\n */\n abort() {\n this.fail(new RollbackSignal());\n }\n\n /**\n * Signals to all workers to end their transaction once all pending tasks have\n * been completed. Throws if the pool is already done or aborted.\n */\n setDone() {\n assert(!this.#done, 'already set done');\n this.#done = true;\n\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#tasks.enqueue('done');\n }\n }\n\n /**\n * An alternative to explicitly calling {@link setDone}, `ref()` increments an internal reference\n * count, and {@link unref} decrements it. When the reference count reaches 0, {@link setDone} is\n * automatically called. A TransactionPool is initialized with a reference count of 1.\n *\n * `ref()` should be called before sharing the pool with another component, and only after the\n * pool has been started with {@link run()}. It must not be called on a TransactionPool that is\n * already done (either via {@link unref()} or {@link setDone()}. (Doing so indicates a logical\n * error in the code.)\n *\n * It follows that:\n * * The creator of the TransactionPool is responsible for running it.\n * * The TransactionPool should be ref'ed before being sharing.\n * * The receiver of the TransactionPool is only responsible for unref'ing it.\n *\n * On the other hand, a transaction pool that fails with a runtime error can still be ref'ed;\n * attempts to use the pool will result in the runtime error as expected.\n */\n // TODO: Get rid of the ref-counting stuff. It's no longer needed.\n ref(count = 1) {\n assert(\n this.#db !== undefined && !this.#done,\n `Cannot ref() a TransactionPool that is not running`,\n );\n this.#refCount += count;\n }\n\n /**\n * Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.\n */\n unref(count = 1) {\n assert(\n count <= this.#refCount,\n () => `Cannot unref ${count} when refCount is ${this.#refCount}`,\n );\n\n this.#refCount -= count;\n if (this.#refCount === 0) {\n this.setDone();\n }\n }\n\n isRunning(): boolean {\n return this.#db !== undefined && !this.#done && this.#failure === undefined;\n }\n\n /**\n * Signals all workers to fail their transactions with the given {@link err}.\n */\n fail(err: unknown) {\n if (!this.#failure) {\n this.#failure = ensureError(err); // Fail fast: this is checked in the worker loop.\n // Logged for informational purposes. It is the responsibility of\n // higher level logic to classify and handle the exception.\n const level =\n this.#failure instanceof ControlFlowError ? 'debug' : 'info';\n this.#lc[level]?.(this.#failure);\n\n for (let i = 0; i < this.#numWorkers; i++) {\n // Enqueue the Error to terminate any workers waiting for tasks.\n this.#tasks.enqueue(this.#failure);\n }\n }\n }\n}\n\ntype SynchronizeSnapshotTasks = {\n /**\n * The `init` Task for the TransactionPool from which the snapshot originates.\n * The pool must have Mode.SERIALIZABLE, and will be set to READ ONLY by the\n * `exportSnapshot` init task. If the TransactionPool has multiple workers, the\n * first worker will export a snapshot that the others set.\n */\n exportSnapshot: Task;\n\n /**\n * The `cleanup` Task for the TransactionPool from which the snapshot\n * originates. This Task will wait for the follower pool to `setSnapshot`\n * to ensure that the snapshot is successfully shared before the originating\n * transaction is closed.\n */\n cleanupExport: Task;\n\n /**\n * The `init` Task for the TransactionPool in which workers will\n * consequently see the same snapshot as that of the first pool. The pool\n * must have Mode.SERIALIZABLE, and will have the ability to perform writes.\n */\n setSnapshot: Task;\n\n /** The ID of the shared snapshot. */\n snapshotID: Promise<string>;\n};\n\n/**\n * Init Tasks for Postgres snapshot synchronization across transactions.\n *\n * https://www.postgresql.org/docs/9.3/functions-admin.html#:~:text=Snapshot%20Synchronization%20Functions,identical%20content%20in%20the%20database.\n */\nexport function synchronizedSnapshots(): SynchronizeSnapshotTasks {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n const {\n promise: snapshotCaptured,\n resolve: captureSnapshot,\n reject: failCapture,\n } = resolver<unknown>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot. TODO: Plumb the workerNum and use that instead.\n let firstWorkerRun = false;\n\n // Note: Neither init task should `await`, as processing in each pool can proceed\n // as soon as the statements have been sent to the db. However, the `cleanupExport`\n // task must `await` the result of `setSnapshot` to ensure that exporting transaction\n // does not close before the snapshot has been captured.\n return {\n exportSnapshot: tx => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt =\n tx`SELECT pg_export_snapshot() AS snapshot; SET TRANSACTION READ ONLY;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n tx`SET TRANSACTION READ ONLY`.simple(),\n ]);\n },\n\n setSnapshot: tx =>\n snapshotExported.then(snapshotID => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n // Intercept the promise to propagate the information to `cleanupExport`.\n stmt.then(captureSnapshot, failCapture);\n return [stmt];\n }),\n\n cleanupExport: async () => {\n await snapshotCaptured;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * Returns `init` and `cleanup` {@link Task}s for a TransactionPool that ensure its workers\n * share a single view of the database. This is used for View Notifier and View Syncer logic\n * that allows multiple entities to perform parallel reads on the same snapshot of the database.\n */\nexport function sharedSnapshot(): {\n init: Task;\n cleanup: Task;\n snapshotID: Promise<string>;\n} {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot.\n let firstWorkerRun = false;\n\n // Set when any worker is done, signalling that all non-sentinel Tasks have been\n // dequeued, and thus any subsequently spawned workers should skip their initTask\n // since the snapshot is no longer needed (and soon to become invalid).\n let firstWorkerDone = false;\n\n return {\n init: (tx, lc) => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt = tx`SELECT pg_export_snapshot() AS snapshot;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n if (!firstWorkerDone) {\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n ]);\n }\n lc.debug?.('All work is done. No need to set snapshot');\n return [];\n },\n\n cleanup: () => {\n firstWorkerDone = true;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * @returns An `init` Task for importing a snapshot from another transaction.\n */\nexport function importSnapshot(snapshotID: string): {\n init: Task;\n imported: Promise<void>;\n} {\n const {promise: imported, resolve, reject} = resolver<void>();\n\n return {\n init: tx => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n stmt.then(() => resolve(), reject);\n return [stmt];\n },\n\n imported,\n };\n}\n\n/**\n * A superclass of Errors used for control flow that is needed to handle\n * another Error but does not constitute an error condition itself (e.g.\n * aborting transactions after a previous one fails). Subclassing this Error\n * will result in lowering the log level from `error` to `debug`.\n */\nexport class ControlFlowError extends Error {\n constructor(cause?: unknown) {\n super();\n this.cause = cause;\n }\n}\n\n/**\n * Internal error used to rollback the worker transaction. This is used\n * instead of executing a `ROLLBACK` statement because the postgres.js\n * library will otherwise try to execute an extraneous `COMMIT`, which\n * results in outputting a \"no transaction in progress\" warning to the\n * database logs.\n *\n * Throwing an exception, on the other hand, executes the postgres.js\n * codepath that calls `ROLLBACK` instead.\n */\nclass RollbackSignal extends ControlFlowError {\n readonly name = 'RollbackSignal';\n readonly message = 'rolling back transaction';\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n\ninterface TaskRunner {\n /**\n * Manages the running of a Task or ReadTask in two phases:\n *\n * - If the task involves blocking, this is done in the worker. Once the\n * blocking is done, `freeWorker()` is invoked to signal that the worker\n * is available to run another task. Note that this should be invoked\n * *before* resolving the result to the calling thread so that a\n * subsequent task can reuse the same worker.\n *\n * - Task statements are executed on the database asynchronously. The final\n * result of this processing is encapsulated in the returned `pending`\n * Promise. The worker will await the last pending Promise before closing\n * the transaction.\n *\n * @param freeWorker should be called as soon as all blocking operations are\n * completed in order to return the transaction to the pool.\n * @returns A `pending` Promise indicating when the statements have been\n * processed by the database, allowing the transaction to be closed.\n * This should be `null` if there are no transaction-dependent\n * statements to await.\n */\n run(\n tx: PostgresTransaction,\n lc: LogContext,\n freeWorker: () => void,\n ): Promise<{pending: Promise<void> | null}>;\n\n /**\n * Invoked if the TransactionPool is already in a failed state when the task\n * is requested.\n */\n rejected(reason: unknown): void;\n}\n\nconst IDLE_TIMEOUT_MS = 5_000;\n\n// The keepalive interval is settable by ZERO_TRANSACTION_POOL_KEEPALIVE_MS\n// as an emergency measure and is explicitly not made available as a server\n// option. This value is function of how the zero-cache uses transactions, and\n// should never need to be \"tuned\" or adjusted for different environments.\n//\n// Note that it must be shorter than IDLE_IN_TRANSACTION_SESSION_TIMEOUT_MS\n// with sufficient buffering to account for when the process is blocked by\n// synchronous calls (e.g. to the replica).\nconst KEEPALIVE_TIMEOUT_MS = parseInt(\n process.env.ZERO_TRANSACTION_POOL_KEEPALIVE_MS ?? '5000',\n);\n\nconst KEEPALIVE_TASK: Task = (tx, lc) => {\n lc.debug?.(`sending tx keepalive`);\n return [tx`SELECT 1`.simple()];\n};\n\ntype TimeoutTask = {\n timeoutMs: number;\n task: Task | 'done';\n};\n\ntype TimeoutTasks = {\n forInitialWorkers: TimeoutTask;\n forExtraWorkers: TimeoutTask;\n};\n\n// Production timeout tasks. Overridden in tests.\nexport const TIMEOUT_TASKS: TimeoutTasks = {\n forInitialWorkers: {\n timeoutMs: KEEPALIVE_TIMEOUT_MS,\n task: KEEPALIVE_TASK,\n },\n forExtraWorkers: {\n timeoutMs: IDLE_TIMEOUT_MS,\n task: 'done',\n },\n};\n\n// The slice of information from the Query object in Postgres.js that gets logged for debugging.\n// https://github.com/porsager/postgres/blob/f58cd4f3affd3e8ce8f53e42799672d86cd2c70b/src/connection.js#L219\ntype Query = {string: string; parameters: object[]};\n"],"names":["key","value"],"mappings":";;;;;;AAgDO,MAAM,gBAAgB;AAAA,EAC3B;AAAA,EACS;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAAS,IAAI,MAAA;AAAA,EACb,WAA+B,CAAA;AAAA,EAC/B;AAAA,EACA;AAAA,EACA;AAAA,EACT;AAAA,EACA,cAAc;AAAA,EACd;AAAA;AAAA,EAEA,YAAY;AAAA,EACZ,QAAQ;AAAA,EACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,YACE,IACA,MACA,MACA,SACA,iBAAiB,GACjB,aAAa,gBACb,eAAe,eACf;AACA,WAAO,iBAAiB,GAAG,iCAAiC;AAC5D;AAAA,MACE,cAAc;AAAA,MACd;AAAA,IAAA;AAGF,SAAK,MAAM;AACX,SAAK,QAAQ;AACb,SAAK,QAAQ,OAAO,KAAK,YAAY,IAAI,IAAI;AAC7C,SAAK,WAAW,UAAU,KAAK,YAAY,OAAO,IAAI;AACtD,SAAK,kBAAkB;AACvB,SAAK,cAAc;AACnB,SAAK,cAAc;AACnB,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,IAAI,IAAsB;AACxB,WAAO,CAAC,KAAK,KAAK,iBAAiB;AACnC,SAAK,MAAM;AACX,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,WAAW,EAAE;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,kBAAkB,KAAa,OAAe;AAC5C,SAAK,MAAM,KAAK,IAAI,YAAY,KAAK,KAAK;AAE1C,WAAO;AAAA,MACL,mBAAmB,CAACA,MAAaC,WAC/B,KAAK,kBAAkBD,MAAKC,MAAK;AAAA,IAAA;AAAA,EAEvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBA,MAAM,OAAO;AACX,UAAM,aAAa,KAAK,SAAS;AACjC,UAAM,QAAQ,IAAI,KAAK,QAAQ;AAE/B,QAAI,aAAa,KAAK,SAAS,QAAQ;AAKrC,YAAM,QAAQ,IAAI,KAAK,QAAQ;AAAA,IACjC;AACA,SAAK,IAAI,QAAQ,uBAAuB;AAAA,EAC1C;AAAA,EAEA,WAAW,IAAgB;AACzB,UAAM,KAAK,KAAK,SAAS,SAAS;AAClC,UAAM,KAAK,KAAK,IAAI,YAAY,MAAM,EAAE;AAExC,UAAM,KACJ,KAAK,SAAS,SAAS,KAAK,kBACxB,KAAK,aAAa,oBAClB,KAAK,aAAa;AACxB,UAAM,EAAC,cAAa;AACpB,UAAM,cAAc,GAAG,SAAS,SAAS,SAAS,KAAK,YAAY,GAAG,IAAI;AAE1E,UAAM,SAAS,OAAO,OAA4B;AAChD,YAAM,QAAQ,YAAY,IAAA;AAC1B,UAAI;AACF,WAAG,QAAQ,qBAAqB;AAEhC,YAAI,OAAsB;AAE1B,cAAM,cAAc,OAAO,WAAuB;AAChD,qBAAW,KAAK,SAAS,KAAK;AAC9B,gBAAM,EAAC,YAAW,MAAM,OAAO,IAAI,IAAI,IAAI,MAAM;AAC/C,uBAAW,KAAK,SAAS,KAAK;AAAA,UAChC,CAAC;AACD,iBAAO,WAAW;AAAA,QACpB;AAEA,YAAI,OACF,KAAK,SAAU,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAEjE,YAAI;AACF,iBAAO,SAAS,QAAQ;AACtB,gBACE,gBAAgB,SACf,SAAS,KAAK,SAAS,KAAK,UAC7B;AACA,oBAAM,KAAK,YAAY;AAAA,YACzB;AACA,kBAAM,YAAY,IAAI;AAGtB,mBAAO,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAAA,UACzD;AAAA,QACF,UAAA;AAEE,cAAI,KAAK,UAAU;AACjB,kBAAM,YAAY,KAAK,QAAQ;AAAA,UACjC;AAAA,QACF;AAEA,cAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,WAAG,QAAQ,wBAAwB,QAAQ,QAAQ,CAAC,CAAC,MAAM;AAG3D,eAAO;AAAA,MACT,SAAS,GAAG;AACV,YAAI,MAAM,KAAK,UAAU;AACvB,eAAK,KAAK,CAAC;AAAA,QACb;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,UAAM,WAAW,MAAM,IAAI,QAAQ,EAAC,MAAM,KAAK,MAAA,CAAM,EAClD,MAAM,CAAA,MAAK;AACV,UAAI,aAAa,gBAAgB;AAG/B,WAAG,QAAQ,qBAAqB;AAAA,MAClC,OAAO;AACL,cAAM;AAAA,MACR;AAAA,IACF,CAAC,EACA,QAAQ,MAAM,KAAK,aAAa;AAKnC,aAAS,MAAM,MAAM;AAAA,IAAC,CAAC;AAEvB,SAAK,SAAS,KAAK,QAAQ;AAK3B,QAAI,KAAK,OAAO;AACd,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AACA,QAAI,KAAK,UAAU;AACjB,WAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,IACnC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,QAAQ,MAA2B;AACjC,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,MAAM,CAAC,CAAC;AACvC,WAAO,EAAE;AAAA,EACX;AAAA,EAES,SAAS,YAAY,IAAA;AAAA,EAC9B,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYT,YAAY,MAAY,IAA2B,YAAwB;AACzE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,kBAAQ,MAAM,KAAK,IAAI,EAAE;AAAA,QAC3B,SAAS,GAAG;AACV,YAAE,QAAA;AACF,gBAAM;AAAA,QACR,UAAA;AACE,qBAAA;AAAA,QACF;AAEA,YAAI,MAAM,WAAW,GAAG;AACtB,YAAE,QAAA;AACF,iBAAO,EAAC,SAAS,KAAA;AAAA,QACnB;AAKA,cAAM,OAAO,MAAM;AAAA,UACjB,CAAC,GAAG,SACF,KACG,QAAA,EACA,KAAK,MAAM;AACV,gBAAI,EAAE,KAAK,SAAS,QAAS,GAAG;AAC9B,oBAAM,MAAM,KAAK,SAAS,QAAU,IAAI,SAAS;AACjD,oBAAM,IAAI;AACV,iBAAG,GAAG;AAAA,gBACJ,YAAY,KAAK,MAAM,kBAAkB,YAAY,IAAA,IAAQ,KAAK,QAAQ,QAAQ,CAAC,CAAC;AAAA,gBACpF,EAAC,WAAW,EAAE,OAAA;AAAA,cAAM;AAAA,YAExB;AAAA,UACF,CAAC,EACA,MAAM,OAAK,KAAK,KAAK,CAAC,CAAC;AAAA,UAC5B;AAAA,QAAA;AAEF,eAAO,EAAC,SAAS,KAAK,KAAK,EAAE,OAAO,EAAA;AAAA,MACtC;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,gBAAmB,UAAmC;AACpD,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,UAAU,CAAC,CAAC;AAC3C,WAAO,EAAE;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,YAAe,UAAuB,GAA4B;AAChE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,mBAAS,MAAM,SAAS,IAAI,EAAE;AAC9B,qBAAA;AACA,YAAE,QAAQ,MAAM;AAAA,QAClB,SAAS,GAAG;AACV,qBAAA;AACA,YAAE,OAAO,CAAC;AAAA,QACZ;AACA,eAAO,EAAC,SAAS,KAAA;AAAA,MACnB;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA,EAEA,SAAS,QAA0B;AACjC,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,QAAI,KAAK,UAAU;AACjB,aAAO,SAAS,KAAK,QAAQ;AAC7B;AAAA,IACF;AAEA,SAAK,OAAO,QAAQ,MAAM;AAG1B,QAAI,KAAK,cAAc,KAAK,aAAa;AACvC,YAAM,cAAc,KAAK,OAAO,KAAA;AAEhC,UAAI,cAAc,KAAK,cAAc,KAAK,aAAa;AACrD,aAAK,OAAO,KAAK,WAAW,KAAK,GAAG;AACpC,aAAK;AACL,aAAK,IAAI,QAAQ,0BAA0B,KAAK,WAAW,EAAE;AAAA,MAC/D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ;AACN,SAAK,KAAK,IAAI,gBAAgB;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU;AACR,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,SAAK,QAAQ;AAEb,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,IAAI,QAAQ,GAAG;AACb;AAAA,MACE,KAAK,QAAQ,UAAa,CAAC,KAAK;AAAA,MAChC;AAAA,IAAA;AAEF,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QAAQ,GAAG;AACf;AAAA,MACE,SAAS,KAAK;AAAA,MACd,MAAM,gBAAgB,KAAK,qBAAqB,KAAK,SAAS;AAAA,IAAA;AAGhE,SAAK,aAAa;AAClB,QAAI,KAAK,cAAc,GAAG;AACxB,WAAK,QAAA;AAAA,IACP;AAAA,EACF;AAAA,EAEA,YAAqB;AACnB,WAAO,KAAK,QAAQ,UAAa,CAAC,KAAK,SAAS,KAAK,aAAa;AAAA,EACpE;AAAA;AAAA;AAAA;AAAA,EAKA,KAAK,KAAc;AACjB,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW,YAAY,GAAG;AAG/B,YAAM,QACJ,KAAK,oBAAoB,mBAAmB,UAAU;AACxD,WAAK,IAAI,KAAK,IAAI,KAAK,QAAQ;AAE/B,eAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AAEzC,aAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,MACnC;AAAA,IACF;AAAA,EACF;AACF;AAgJO,SAAS,eAAe,YAG7B;AACA,QAAM,EAAC,SAAS,UAAU,SAAS,OAAA,IAAU,SAAA;AAE7C,SAAO;AAAA,IACL,MAAM,CAAA,OAAM;AACV,YAAM,OAAO,GAAG,OAAO,6BAA6B,UAAU,GAAG;AACjE,WAAK,KAAK,MAAM,QAAA,GAAW,MAAM;AACjC,aAAO,CAAC,IAAI;AAAA,IACd;AAAA,IAEA;AAAA,EAAA;AAEJ;AAQO,MAAM,yBAAyB,MAAM;AAAA,EAC1C,YAAY,OAAiB;AAC3B,UAAA;AACA,SAAK,QAAQ;AAAA,EACf;AACF;AAYA,MAAM,uBAAuB,iBAAiB;AAAA,EACnC,OAAO;AAAA,EACP,UAAU;AACrB;AAEA,SAAS,YAAY,KAAqB;AACxC,MAAI,eAAe,OAAO;AACxB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,MAAA;AAClB,QAAM,QAAQ;AACd,SAAO;AACT;AAqCA,MAAM,kBAAkB;AAUxB,MAAM,uBAAuB;AAAA,EAC3B,QAAQ,IAAI,sCAAsC;AACpD;AAEA,MAAM,iBAAuB,CAAC,IAAI,OAAO;AACvC,KAAG,QAAQ,sBAAsB;AACjC,SAAO,CAAC,aAAa,OAAA,CAAQ;AAC/B;AAaO,MAAM,gBAA8B;AAAA,EACzC,mBAAmB;AAAA,IACjB,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAAA,EAER,iBAAiB;AAAA,IACf,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAEV;"}
@@ -1 +1 @@
1
- {"version":3,"file":"events.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/observability/events.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,EAAc,KAAK,UAAU,EAAC,MAAM,6BAA6B,CAAC;AAKzE,OAAO,EAAC,KAAK,SAAS,EAAC,MAAM,mCAAmC,CAAC;AACjE,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,wBAAwB,CAAC;AAyBjE;;;;GAIG;AACH,wBAAgB,aAAa,CAC3B,EAAE,EAAE,UAAU,EACd,EAAC,MAAM,EAAE,UAAU,EAAC,EAAE,IAAI,CAAC,oBAAoB,EAAE,QAAQ,GAAG,YAAY,CAAC,QAqD1E;AAED,wBAAgB,uBAAuB,CAAC,IAAI,EAAE,SAAS,EAAE,EAAE,GAAG,OAAa,QAO1E;AAED,wBAAgB,YAAY,CAAC,CAAC,SAAS,SAAS,EAAE,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,CAAC,QAEzE;AAED,wBAAsB,oBAAoB,CAAC,CAAC,SAAS,SAAS,EAC5D,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,CAAC,iBAGT;AAED,wBAAgB,gBAAgB,CAAC,CAAC,EAAE,OAAO,GAAG,UAAU,CAevD"}
1
+ {"version":3,"file":"events.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/observability/events.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAMjD,OAAO,EAAc,KAAK,UAAU,EAAC,MAAM,6BAA6B,CAAC;AAKzE,OAAO,EAAC,KAAK,SAAS,EAAC,MAAM,mCAAmC,CAAC;AACjE,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,wBAAwB,CAAC;AA+BjE;;;;GAIG;AACH,wBAAgB,aAAa,CAC3B,EAAE,EAAE,UAAU,EACd,EAAC,MAAM,EAAE,UAAU,EAAC,EAAE,IAAI,CAAC,oBAAoB,EAAE,QAAQ,GAAG,YAAY,CAAC,QAsE1E;AAED,wBAAgB,uBAAuB,CAAC,IAAI,EAAE,SAAS,EAAE,EAAE,GAAG,OAAa,QAO1E;AAED,wBAAgB,YAAY,CAAC,CAAC,SAAS,SAAS,EAAE,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,CAAC,QAEzE;AAED,wBAAsB,oBAAoB,CAAC,CAAC,SAAS,SAAS,EAC5D,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,CAAC,iBAGT;AAED,wBAAgB,gBAAgB,CAAC,CAAC,EAAE,OAAO,GAAG,UAAU,CAevD"}
@@ -1,5 +1,8 @@
1
+ import { resolver } from "@rocicorp/resolver";
1
2
  import { emitterFor, httpTransport, CloudEvent } from "cloudevents";
2
3
  import { nanoid } from "nanoid";
4
+ import { gzip } from "node:zlib";
5
+ import { stringify } from "../../../shared/src/bigint-json.js";
3
6
  import { isJSONValue } from "../../../shared/src/json.js";
4
7
  import { must } from "../../../shared/src/must.js";
5
8
  import { promiseVoid } from "../../../shared/src/resolved-promises.js";
@@ -17,6 +20,11 @@ let publishFn = (lc, { type }) => {
17
20
  const attributeValueSchema = union(string(), number(), boolean());
18
21
  const eventSchema = record(attributeValueSchema);
19
22
  const extensionsObjectSchema = object({ extensions: eventSchema });
23
+ async function base64gzip(str) {
24
+ const { promise: gzipped, resolve, reject } = resolver();
25
+ gzip(Buffer.from(str), (err, buf) => err ? reject(err) : resolve(buf));
26
+ return (await gzipped).toString("base64");
27
+ }
20
28
  function initEventSink(lc, { taskID, cloudEvent }) {
21
29
  if (!cloudEvent.sinkEnv) {
22
30
  publishFn = (lc2, event) => {
@@ -31,13 +39,21 @@ function initEventSink(lc, { taskID, cloudEvent }) {
31
39
  const { extensions } = parse(JSON.parse(strVal), extensionsObjectSchema);
32
40
  overrides = extensions;
33
41
  }
34
- function createCloudEvent(data) {
35
- const { type, time } = data;
42
+ async function createCloudEvent(event) {
43
+ const { type, time } = event;
44
+ const json = stringify(event);
45
+ const data = await base64gzip(json);
36
46
  return new CloudEvent({
37
47
  id: nanoid(),
38
48
  source: taskID,
39
49
  type,
40
50
  time,
51
+ // Pass `data` as text/plain to prevent intermediaries from
52
+ // base64-decoding it. It is the responsibility of the final processor
53
+ // to recognize that datacontentencoding === "gzip" and unpack the
54
+ // `data` accordingly before parsing it.
55
+ datacontenttype: "text/plain",
56
+ datacontentencoding: "gzip",
41
57
  data,
42
58
  ...overrides
43
59
  });
@@ -46,7 +62,13 @@ function initEventSink(lc, { taskID, cloudEvent }) {
46
62
  const emit = emitterFor(httpTransport(sinkURI));
47
63
  lc.debug?.(`Publishing ZeroEvents to ${sinkURI}`);
48
64
  publishFn = async (lc2, event) => {
49
- const cloudEvent2 = createCloudEvent(event);
65
+ let cloudEvent2;
66
+ try {
67
+ cloudEvent2 = await createCloudEvent(event);
68
+ } catch (e) {
69
+ lc2.error?.(`Error creating CloudEvent ${event.type}`, e);
70
+ return;
71
+ }
50
72
  lc2.debug?.(`Publishing CloudEvent: ${cloudEvent2.type}`);
51
73
  for (let i = 0; i < MAX_PUBLISH_ATTEMPTS; i++) {
52
74
  if (i > 0) {
@@ -54,7 +76,8 @@ function initEventSink(lc, { taskID, cloudEvent }) {
54
76
  }
55
77
  try {
56
78
  await emit(cloudEvent2);
57
- lc2.info?.(`Published CloudEvent: ${cloudEvent2.type}`, cloudEvent2);
79
+ const { data: _, ...event2 } = cloudEvent2;
80
+ lc2.info?.(`Published CloudEvent: ${cloudEvent2.type}`, { event: event2 });
58
81
  return;
59
82
  } catch (e) {
60
83
  lc2.warn?.(`Error publishing ${cloudEvent2.type} (attempt ${i + 1})`, e);
@@ -62,9 +85,6 @@ function initEventSink(lc, { taskID, cloudEvent }) {
62
85
  }
63
86
  };
64
87
  }
65
- function publishEvent(lc, event) {
66
- void publishFn(lc, event);
67
- }
68
88
  async function publishCriticalEvent(lc, event) {
69
89
  await publishFn(lc, event);
70
90
  }
@@ -86,7 +106,6 @@ function makeErrorDetails(e) {
86
106
  export {
87
107
  initEventSink,
88
108
  makeErrorDetails,
89
- publishCriticalEvent,
90
- publishEvent
109
+ publishCriticalEvent
91
110
  };
92
111
  //# sourceMappingURL=events.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"events.js","sources":["../../../../../zero-cache/src/observability/events.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {CloudEvent, emitterFor, httpTransport} from 'cloudevents';\nimport {nanoid} from 'nanoid';\nimport {isJSONValue, type JSONObject} from '../../../shared/src/json.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {type ZeroEvent} from '../../../zero-events/src/index.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\n\nconst MAX_PUBLISH_ATTEMPTS = 6;\nconst INITIAL_PUBLISH_BACKOFF_MS = 500;\n\ntype PublisherFn = (lc: LogContext, event: ZeroEvent) => Promise<void>;\n\nlet publishFn: PublisherFn = (lc, {type}) => {\n lc.warn?.(\n `Cannot publish \"${type}\" event before initEventSink(). ` +\n `This is only expected in unit tests.`,\n );\n return promiseVoid;\n};\n\nconst attributeValueSchema = v.union(v.string(), v.number(), v.boolean());\n\nconst eventSchema = v.record(attributeValueSchema);\n\ntype PartialEvent = v.Infer<typeof eventSchema>;\n\n// Note: This conforms to the format of the knative K_CE_OVERRIDES binding:\n// https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding\nconst extensionsObjectSchema = v.object({extensions: eventSchema});\n\n/**\n * Initializes a per-process event sink according to the cloud event\n * parameters in the ZeroConfig. This must be called at the beginning\n * of the process, before any ZeroEvents are generated / published.\n */\nexport function initEventSink(\n lc: LogContext,\n {taskID, cloudEvent}: Pick<NormalizedZeroConfig, 'taskID' | 'cloudEvent'>,\n) {\n if (!cloudEvent.sinkEnv) {\n // The default implementation just outputs the events to logs.\n publishFn = (lc, event) => {\n lc.info?.(`ZeroEvent: ${event.type}`, event);\n return promiseVoid;\n };\n return;\n }\n\n let overrides: PartialEvent = {};\n\n if (cloudEvent.extensionOverridesEnv) {\n const strVal = must(process.env[cloudEvent.extensionOverridesEnv]);\n const {extensions} = v.parse(JSON.parse(strVal), extensionsObjectSchema);\n overrides = extensions;\n }\n\n function createCloudEvent(data: ZeroEvent) {\n const {type, time} = data;\n return new CloudEvent({\n id: nanoid(),\n source: taskID,\n type,\n time,\n data,\n ...overrides,\n });\n }\n\n const sinkURI = must(process.env[cloudEvent.sinkEnv]);\n const emit = emitterFor(httpTransport(sinkURI));\n lc.debug?.(`Publishing ZeroEvents to ${sinkURI}`);\n\n publishFn = async (lc, event) => {\n const cloudEvent = createCloudEvent(event);\n lc.debug?.(`Publishing CloudEvent: ${cloudEvent.type}`);\n\n for (let i = 0; i < MAX_PUBLISH_ATTEMPTS; i++) {\n if (i > 0) {\n // exponential backoff on retries\n await sleep(INITIAL_PUBLISH_BACKOFF_MS * 2 ** (i - 1));\n }\n try {\n await emit(cloudEvent);\n lc.info?.(`Published CloudEvent: ${cloudEvent.type}`, cloudEvent);\n return;\n } catch (e) {\n lc.warn?.(`Error publishing ${cloudEvent.type} (attempt ${i + 1})`, e);\n }\n }\n };\n}\n\nexport function initEventSinkForTesting(sink: ZeroEvent[], now = new Date()) {\n publishFn = (lc, event) => {\n lc.info?.(`Testing event sink received ${event.type} event`, event);\n // Replace the default Date.now() with the test instance for determinism.\n sink.push({...event, time: now.toISOString()});\n return promiseVoid;\n };\n}\n\nexport function publishEvent<E extends ZeroEvent>(lc: LogContext, event: E) {\n void publishFn(lc, event);\n}\n\nexport async function publishCriticalEvent<E extends ZeroEvent>(\n lc: LogContext,\n event: E,\n) {\n await publishFn(lc, event);\n}\n\nexport function makeErrorDetails(e: unknown): JSONObject {\n const err = e instanceof Error ? e : new Error(String(e));\n const errorDetails: JSONObject = {\n name: err.name,\n message: err.message,\n stack: err.stack,\n cause: err.cause ? makeErrorDetails(err.cause) : undefined,\n };\n // Include any enumerable properties (e.g. of Error subtypes).\n for (const [field, value] of Object.entries(err)) {\n if (isJSONValue(value, [])) {\n errorDetails[field] = value;\n }\n }\n return errorDetails;\n}\n"],"names":["v.union","v.string","v.number","v.boolean","v.record","v.object","lc","v.parse","cloudEvent"],"mappings":";;;;;;;;AAWA,MAAM,uBAAuB;AAC7B,MAAM,6BAA6B;AAInC,IAAI,YAAyB,CAAC,IAAI,EAAC,WAAU;AAC3C,KAAG;AAAA,IACD,mBAAmB,IAAI;AAAA,EAAA;AAGzB,SAAO;AACT;AAEA,MAAM,uBAAuBA,MAAQC,OAAE,GAAUC,OAAE,GAAUC,QAAE,CAAS;AAExE,MAAM,cAAcC,OAAS,oBAAoB;AAMjD,MAAM,yBAAyBC,OAAS,EAAC,YAAY,aAAY;AAO1D,SAAS,cACd,IACA,EAAC,QAAQ,cACT;AACA,MAAI,CAAC,WAAW,SAAS;AAEvB,gBAAY,CAACC,KAAI,UAAU;AACzBA,UAAG,OAAO,cAAc,MAAM,IAAI,IAAI,KAAK;AAC3C,aAAO;AAAA,IACT;AACA;AAAA,EACF;AAEA,MAAI,YAA0B,CAAA;AAE9B,MAAI,WAAW,uBAAuB;AACpC,UAAM,SAAS,KAAK,QAAQ,IAAI,WAAW,qBAAqB,CAAC;AACjE,UAAM,EAAC,eAAcC,MAAQ,KAAK,MAAM,MAAM,GAAG,sBAAsB;AACvE,gBAAY;AAAA,EACd;AAEA,WAAS,iBAAiB,MAAiB;AACzC,UAAM,EAAC,MAAM,KAAA,IAAQ;AACrB,WAAO,IAAI,WAAW;AAAA,MACpB,IAAI,OAAA;AAAA,MACJ,QAAQ;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA,GAAG;AAAA,IAAA,CACJ;AAAA,EACH;AAEA,QAAM,UAAU,KAAK,QAAQ,IAAI,WAAW,OAAO,CAAC;AACpD,QAAM,OAAO,WAAW,cAAc,OAAO,CAAC;AAC9C,KAAG,QAAQ,4BAA4B,OAAO,EAAE;AAEhD,cAAY,OAAOD,KAAI,UAAU;AAC/B,UAAME,cAAa,iBAAiB,KAAK;AACzCF,QAAG,QAAQ,0BAA0BE,YAAW,IAAI,EAAE;AAEtD,aAAS,IAAI,GAAG,IAAI,sBAAsB,KAAK;AAC7C,UAAI,IAAI,GAAG;AAET,cAAM,MAAM,6BAA6B,MAAM,IAAI,EAAE;AAAA,MACvD;AACA,UAAI;AACF,cAAM,KAAKA,WAAU;AACrBF,YAAG,OAAO,yBAAyBE,YAAW,IAAI,IAAIA,WAAU;AAChE;AAAA,MACF,SAAS,GAAG;AACVF,YAAG,OAAO,oBAAoBE,YAAW,IAAI,aAAa,IAAI,CAAC,KAAK,CAAC;AAAA,MACvE;AAAA,IACF;AAAA,EACF;AACF;AAWO,SAAS,aAAkC,IAAgB,OAAU;AAC1E,OAAK,UAAU,IAAI,KAAK;AAC1B;AAEA,eAAsB,qBACpB,IACA,OACA;AACA,QAAM,UAAU,IAAI,KAAK;AAC3B;AAEO,SAAS,iBAAiB,GAAwB;AACvD,QAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC;AACxD,QAAM,eAA2B;AAAA,IAC/B,MAAM,IAAI;AAAA,IACV,SAAS,IAAI;AAAA,IACb,OAAO,IAAI;AAAA,IACX,OAAO,IAAI,QAAQ,iBAAiB,IAAI,KAAK,IAAI;AAAA,EAAA;AAGnD,aAAW,CAAC,OAAO,KAAK,KAAK,OAAO,QAAQ,GAAG,GAAG;AAChD,QAAI,YAAY,OAAO,CAAA,CAAE,GAAG;AAC1B,mBAAa,KAAK,IAAI;AAAA,IACxB;AAAA,EACF;AACA,SAAO;AACT;"}
1
+ {"version":3,"file":"events.js","sources":["../../../../../zero-cache/src/observability/events.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {CloudEvent, emitterFor, httpTransport} from 'cloudevents';\nimport {nanoid} from 'nanoid';\nimport {gzip} from 'node:zlib';\nimport {stringify} from '../../../shared/src/bigint-json.ts';\nimport {isJSONValue, type JSONObject} from '../../../shared/src/json.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {type ZeroEvent} from '../../../zero-events/src/index.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\n\nconst MAX_PUBLISH_ATTEMPTS = 6;\nconst INITIAL_PUBLISH_BACKOFF_MS = 500;\n\ntype PublisherFn = (lc: LogContext, event: ZeroEvent) => Promise<void>;\n\nlet publishFn: PublisherFn = (lc, {type}) => {\n lc.warn?.(\n `Cannot publish \"${type}\" event before initEventSink(). ` +\n `This is only expected in unit tests.`,\n );\n return promiseVoid;\n};\n\nconst attributeValueSchema = v.union(v.string(), v.number(), v.boolean());\n\nconst eventSchema = v.record(attributeValueSchema);\n\ntype PartialEvent = v.Infer<typeof eventSchema>;\n\n// Note: This conforms to the format of the knative K_CE_OVERRIDES binding:\n// https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding\nconst extensionsObjectSchema = v.object({extensions: eventSchema});\n\nasync function base64gzip(str: string): Promise<string> {\n const {promise: gzipped, resolve, reject} = resolver<Buffer>();\n gzip(Buffer.from(str), (err, buf) => (err ? reject(err) : resolve(buf)));\n return (await gzipped).toString('base64');\n}\n\n/**\n * Initializes a per-process event sink according to the cloud event\n * parameters in the ZeroConfig. This must be called at the beginning\n * of the process, before any ZeroEvents are generated / published.\n */\nexport function initEventSink(\n lc: LogContext,\n {taskID, cloudEvent}: Pick<NormalizedZeroConfig, 'taskID' | 'cloudEvent'>,\n) {\n if (!cloudEvent.sinkEnv) {\n // The default implementation just outputs the events to logs.\n publishFn = (lc, event) => {\n lc.info?.(`ZeroEvent: ${event.type}`, event);\n return promiseVoid;\n };\n return;\n }\n\n let overrides: PartialEvent = {};\n\n if (cloudEvent.extensionOverridesEnv) {\n const strVal = must(process.env[cloudEvent.extensionOverridesEnv]);\n const {extensions} = v.parse(JSON.parse(strVal), extensionsObjectSchema);\n overrides = extensions;\n }\n\n async function createCloudEvent(event: ZeroEvent) {\n const {type, time} = event;\n const json = stringify(event);\n const data = await base64gzip(json);\n\n return new CloudEvent({\n id: nanoid(),\n source: taskID,\n type,\n time,\n // Pass `data` as text/plain to prevent intermediaries from\n // base64-decoding it. It is the responsibility of the final processor\n // to recognize that datacontentencoding === \"gzip\" and unpack the\n // `data` accordingly before parsing it.\n datacontenttype: 'text/plain',\n datacontentencoding: 'gzip',\n data,\n ...overrides,\n });\n }\n\n const sinkURI = must(process.env[cloudEvent.sinkEnv]);\n const emit = emitterFor(httpTransport(sinkURI));\n lc.debug?.(`Publishing ZeroEvents to ${sinkURI}`);\n\n publishFn = async (lc, event) => {\n let cloudEvent: CloudEvent<string>;\n try {\n cloudEvent = await createCloudEvent(event);\n } catch (e) {\n lc.error?.(`Error creating CloudEvent ${event.type}`, e);\n return;\n }\n lc.debug?.(`Publishing CloudEvent: ${cloudEvent.type}`);\n\n for (let i = 0; i < MAX_PUBLISH_ATTEMPTS; i++) {\n if (i > 0) {\n // exponential backoff on retries\n await sleep(INITIAL_PUBLISH_BACKOFF_MS * 2 ** (i - 1));\n }\n try {\n await emit(cloudEvent);\n // Avoid logging the (possibly large and) unreadable data field.\n const {data: _, ...event} = cloudEvent;\n lc.info?.(`Published CloudEvent: ${cloudEvent.type}`, {event});\n return;\n } catch (e) {\n lc.warn?.(`Error publishing ${cloudEvent.type} (attempt ${i + 1})`, e);\n }\n }\n };\n}\n\nexport function initEventSinkForTesting(sink: ZeroEvent[], now = new Date()) {\n publishFn = (lc, event) => {\n lc.info?.(`Testing event sink received ${event.type} event`, event);\n // Replace the default Date.now() with the test instance for determinism.\n sink.push({...event, time: now.toISOString()});\n return promiseVoid;\n };\n}\n\nexport function publishEvent<E extends ZeroEvent>(lc: LogContext, event: E) {\n void publishFn(lc, event);\n}\n\nexport async function publishCriticalEvent<E extends ZeroEvent>(\n lc: LogContext,\n event: E,\n) {\n await publishFn(lc, event);\n}\n\nexport function makeErrorDetails(e: unknown): JSONObject {\n const err = e instanceof Error ? e : new Error(String(e));\n const errorDetails: JSONObject = {\n name: err.name,\n message: err.message,\n stack: err.stack,\n cause: err.cause ? makeErrorDetails(err.cause) : undefined,\n };\n // Include any enumerable properties (e.g. of Error subtypes).\n for (const [field, value] of Object.entries(err)) {\n if (isJSONValue(value, [])) {\n errorDetails[field] = value;\n }\n }\n return errorDetails;\n}\n"],"names":["v.union","v.string","v.number","v.boolean","v.record","v.object","lc","v.parse","cloudEvent","event"],"mappings":";;;;;;;;;;;AAcA,MAAM,uBAAuB;AAC7B,MAAM,6BAA6B;AAInC,IAAI,YAAyB,CAAC,IAAI,EAAC,WAAU;AAC3C,KAAG;AAAA,IACD,mBAAmB,IAAI;AAAA,EAAA;AAGzB,SAAO;AACT;AAEA,MAAM,uBAAuBA,MAAQC,OAAE,GAAUC,OAAE,GAAUC,QAAE,CAAS;AAExE,MAAM,cAAcC,OAAS,oBAAoB;AAMjD,MAAM,yBAAyBC,OAAS,EAAC,YAAY,aAAY;AAEjE,eAAe,WAAW,KAA8B;AACtD,QAAM,EAAC,SAAS,SAAS,SAAS,OAAA,IAAU,SAAA;AAC5C,OAAK,OAAO,KAAK,GAAG,GAAG,CAAC,KAAK,QAAS,MAAM,OAAO,GAAG,IAAI,QAAQ,GAAG,CAAE;AACvE,UAAQ,MAAM,SAAS,SAAS,QAAQ;AAC1C;AAOO,SAAS,cACd,IACA,EAAC,QAAQ,cACT;AACA,MAAI,CAAC,WAAW,SAAS;AAEvB,gBAAY,CAACC,KAAI,UAAU;AACzBA,UAAG,OAAO,cAAc,MAAM,IAAI,IAAI,KAAK;AAC3C,aAAO;AAAA,IACT;AACA;AAAA,EACF;AAEA,MAAI,YAA0B,CAAA;AAE9B,MAAI,WAAW,uBAAuB;AACpC,UAAM,SAAS,KAAK,QAAQ,IAAI,WAAW,qBAAqB,CAAC;AACjE,UAAM,EAAC,eAAcC,MAAQ,KAAK,MAAM,MAAM,GAAG,sBAAsB;AACvE,gBAAY;AAAA,EACd;AAEA,iBAAe,iBAAiB,OAAkB;AAChD,UAAM,EAAC,MAAM,KAAA,IAAQ;AACrB,UAAM,OAAO,UAAU,KAAK;AAC5B,UAAM,OAAO,MAAM,WAAW,IAAI;AAElC,WAAO,IAAI,WAAW;AAAA,MACpB,IAAI,OAAA;AAAA,MACJ,QAAQ;AAAA,MACR;AAAA,MACA;AAAA;AAAA;AAAA;AAAA;AAAA,MAKA,iBAAiB;AAAA,MACjB,qBAAqB;AAAA,MACrB;AAAA,MACA,GAAG;AAAA,IAAA,CACJ;AAAA,EACH;AAEA,QAAM,UAAU,KAAK,QAAQ,IAAI,WAAW,OAAO,CAAC;AACpD,QAAM,OAAO,WAAW,cAAc,OAAO,CAAC;AAC9C,KAAG,QAAQ,4BAA4B,OAAO,EAAE;AAEhD,cAAY,OAAOD,KAAI,UAAU;AAC/B,QAAIE;AACJ,QAAI;AACFA,oBAAa,MAAM,iBAAiB,KAAK;AAAA,IAC3C,SAAS,GAAG;AACVF,UAAG,QAAQ,6BAA6B,MAAM,IAAI,IAAI,CAAC;AACvD;AAAA,IACF;AACAA,QAAG,QAAQ,0BAA0BE,YAAW,IAAI,EAAE;AAEtD,aAAS,IAAI,GAAG,IAAI,sBAAsB,KAAK;AAC7C,UAAI,IAAI,GAAG;AAET,cAAM,MAAM,6BAA6B,MAAM,IAAI,EAAE;AAAA,MACvD;AACA,UAAI;AACF,cAAM,KAAKA,WAAU;AAErB,cAAM,EAAC,MAAM,GAAG,GAAGC,WAASD;AAC5BF,YAAG,OAAO,yBAAyBE,YAAW,IAAI,IAAI,EAAC,OAAAC,QAAM;AAC7D;AAAA,MACF,SAAS,GAAG;AACVH,YAAG,OAAO,oBAAoBE,YAAW,IAAI,aAAa,IAAI,CAAC,KAAK,CAAC;AAAA,MACvE;AAAA,IACF;AAAA,EACF;AACF;AAeA,eAAsB,qBACpB,IACA,OACA;AACA,QAAM,UAAU,IAAI,KAAK;AAC3B;AAEO,SAAS,iBAAiB,GAAwB;AACvD,QAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC;AACxD,QAAM,eAA2B;AAAA,IAC/B,MAAM,IAAI;AAAA,IACV,SAAS,IAAI;AAAA,IACb,OAAO,IAAI;AAAA,IACX,OAAO,IAAI,QAAQ,iBAAiB,IAAI,KAAK,IAAI;AAAA,EAAA;AAGnD,aAAW,CAAC,OAAO,KAAK,KAAK,OAAO,QAAQ,GAAG,GAAG;AAChD,QAAI,YAAY,OAAO,CAAA,CAAE,GAAG;AAC1B,mBAAa,KAAK,IAAI;AAAA,IACxB;AAAA,EACF;AACA,SAAO;AACT;"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"names":[],"mappings":"AAmBA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAK/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAuIf"}
1
+ {"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"names":[],"mappings":"AAmBA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAK/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAyIf"}