@rocicorp/zero 1.4.0-canary.1 → 1.4.0-canary.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/analyze-query/src/analyze-cli.d.ts +0 -1
- package/out/analyze-query/src/analyze-cli.d.ts.map +1 -1
- package/out/analyze-query/src/analyze-cli.js +0 -1
- package/out/analyze-query/src/analyze-cli.js.map +1 -1
- package/out/analyze-query/src/bin-analyze.js +11 -10
- package/out/analyze-query/src/bin-analyze.js.map +1 -1
- package/out/analyze-query/src/bin-transform.js +1 -1
- package/out/analyze-query/src/bin-transform.js.map +1 -1
- package/out/replicache/src/btree/node.d.ts +1 -1
- package/out/replicache/src/btree/node.d.ts.map +1 -1
- package/out/replicache/src/btree/node.js +34 -21
- package/out/replicache/src/btree/node.js.map +1 -1
- package/out/replicache/src/btree/write.js +1 -2
- package/out/replicache/src/btree/write.js.map +1 -1
- package/out/replicache/src/kv/sqlite-store.d.ts.map +1 -1
- package/out/replicache/src/kv/sqlite-store.js +7 -1
- package/out/replicache/src/kv/sqlite-store.js.map +1 -1
- package/out/replicache/src/with-transactions.d.ts.map +1 -1
- package/out/replicache/src/with-transactions.js +16 -2
- package/out/replicache/src/with-transactions.js.map +1 -1
- package/out/shared/src/btree-set.d.ts +6 -0
- package/out/shared/src/btree-set.d.ts.map +1 -1
- package/out/shared/src/btree-set.js +34 -0
- package/out/shared/src/btree-set.js.map +1 -1
- package/out/zero/package.js +8 -2
- package/out/zero/package.js.map +1 -1
- package/out/zero/src/adapters/kysely.d.ts +2 -0
- package/out/zero/src/adapters/kysely.d.ts.map +1 -0
- package/out/zero/src/adapters/kysely.js +2 -0
- package/out/zero/src/zero.js +2 -1
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +14 -1
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +18 -0
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +35 -3
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/db/migration-lite.js +8 -1
- package/out/zero-cache/src/db/migration-lite.js.map +1 -1
- package/out/zero-cache/src/db/pg-to-lite.d.ts +1 -1
- package/out/zero-cache/src/db/pg-to-lite.d.ts.map +1 -1
- package/out/zero-cache/src/db/pg-to-lite.js +13 -13
- package/out/zero-cache/src/db/pg-to-lite.js.map +1 -1
- package/out/zero-cache/src/observability/metrics.d.ts +36 -6
- package/out/zero-cache/src/observability/metrics.d.ts.map +1 -1
- package/out/zero-cache/src/observability/metrics.js +55 -10
- package/out/zero-cache/src/observability/metrics.js.map +1 -1
- package/out/zero-cache/src/scripts/decommission.d.ts.map +1 -1
- package/out/zero-cache/src/scripts/decommission.js +3 -3
- package/out/zero-cache/src/scripts/decommission.js.map +1 -1
- package/out/zero-cache/src/scripts/deploy-permissions.js +1 -1
- package/out/zero-cache/src/scripts/deploy-permissions.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +4 -5
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/server/main.d.ts.map +1 -1
- package/out/zero-cache/src/server/main.js +6 -1
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/reaper.d.ts.map +1 -1
- package/out/zero-cache/src/server/reaper.js +1 -4
- package/out/zero-cache/src/server/reaper.js.map +1 -1
- package/out/zero-cache/src/server/shadow-syncer.js +35 -0
- package/out/zero-cache/src/server/shadow-syncer.js.map +1 -0
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +2 -8
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/server/worker-urls.d.ts +1 -0
- package/out/zero-cache/src/server/worker-urls.d.ts.map +1 -1
- package/out/zero-cache/src/server/worker-urls.js +2 -1
- package/out/zero-cache/src/server/worker-urls.js.map +1 -1
- package/out/zero-cache/src/services/analyze.d.ts.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts +8 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +31 -18
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +48 -47
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +64 -22
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +2 -3
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.js +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +10 -3
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js +49 -0
- package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js.map +1 -0
- package/out/zero-cache/src/services/statz.js +3 -3
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/client-handler.js +3 -6
- package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -0
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +34 -11
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +16 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +19 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +8 -2
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +50 -10
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +4 -7
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-set-signature.d.ts +17 -0
- package/out/zero-cache/src/services/view-syncer/row-set-signature.d.ts.map +1 -0
- package/out/zero-cache/src/services/view-syncer/row-set-signature.js +29 -0
- package/out/zero-cache/src/services/view-syncer/row-set-signature.js.map +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/cvr.js +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.js +5 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/types.d.ts +105 -0
- package/out/zero-cache/src/services/view-syncer/schema/types.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/types.js +8 -4
- package/out/zero-cache/src/services/view-syncer/schema/types.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +18 -28
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/pg.d.ts +1 -1
- package/out/zero-cache/src/types/pg.d.ts.map +1 -1
- package/out/zero-cache/src/types/pg.js +8 -2
- package/out/zero-cache/src/types/pg.js.map +1 -1
- package/out/zero-cache/src/types/timeout.d.ts +11 -0
- package/out/zero-cache/src/types/timeout.d.ts.map +1 -0
- package/out/zero-cache/src/types/timeout.js +26 -0
- package/out/zero-cache/src/types/timeout.js.map +1 -0
- package/out/zero-cache/src/workers/connection.js +3 -3
- package/out/zero-cache/src/workers/connection.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/mod.d.ts +1 -0
- package/out/zero-client/src/mod.d.ts.map +1 -1
- package/out/zero-client/src/mod.js +1 -0
- package/out/zero-react/src/zero.js +1 -0
- package/out/zero-server/src/adapters/kysely.d.ts +69 -0
- package/out/zero-server/src/adapters/kysely.d.ts.map +1 -0
- package/out/zero-server/src/adapters/kysely.js +82 -0
- package/out/zero-server/src/adapters/kysely.js.map +1 -0
- package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.js +1 -1
- package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
- package/out/zero-solid/src/zero.js +1 -0
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +3 -3
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/query/query-internals.d.ts.map +1 -1
- package/out/zql/src/query/query-internals.js +1 -1
- package/out/zql/src/query/query-internals.js.map +1 -1
- package/out/zql/src/query/validate-input.d.ts +8 -0
- package/out/zql/src/query/validate-input.d.ts.map +1 -1
- package/out/zql/src/query/validate-input.js +15 -2
- package/out/zql/src/query/validate-input.js.map +1 -1
- package/out/zqlite/src/query-builder.js +19 -7
- package/out/zqlite/src/query-builder.js.map +1 -1
- package/package.json +10 -2
- package/out/analyze-query/src/explain-queries.d.ts +0 -4
- package/out/analyze-query/src/explain-queries.d.ts.map +0 -1
- package/out/analyze-query/src/explain-queries.js +0 -13
- package/out/analyze-query/src/explain-queries.js.map +0 -1
- package/out/otel/src/test-log-config.d.ts +0 -8
- package/out/otel/src/test-log-config.d.ts.map +0 -1
- package/out/otel/src/test-log-config.js +0 -12
- package/out/otel/src/test-log-config.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"write-authorizer.js","names":["#schema","#replica","#builderDelegate","#tableSpecs","#tables","#statementRunner","#lc","#appID","#logConfig","#cgStorage","#config","#getSource","#loadedPermissions","#canUpdate","#canDelete","#requirePreMutationRow","#canInsert","#getPreMutationRow","#timedCanDo","#canDo","#getPrimaryKey","#passesPolicyGroup","#passesPolicy"],"sources":["../../../../../zero-cache/src/auth/write-authorizer.ts"],"sourcesContent":["import type {SQLQuery} from '@databases/sql';\nimport type {MaybePromise} from '@opentelemetry/resources';\nimport type {LogContext} from '@rocicorp/logger';\nimport type {JWTPayload} from 'jose';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport type {JSONValue, ReadonlyJSONValue} from '../../../shared/src/json.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {Condition} from '../../../zero-protocol/src/ast.ts';\nimport {\n primaryKeyValueSchema,\n type PrimaryKeyValue,\n} from '../../../zero-protocol/src/primary-key.ts';\nimport type {\n CRUDOp,\n DeleteOp,\n InsertOp,\n UpdateOp,\n UpsertOp,\n} from '../../../zero-protocol/src/push.ts';\nimport type {Policy} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {Schema} from '../../../zero-types/src/schema.ts';\nimport type {BuilderDelegate} from '../../../zql/src/builder/builder.ts';\nimport {\n bindStaticParameters,\n buildPipeline,\n} from '../../../zql/src/builder/builder.ts';\nimport {\n makeSourceChangeAdd,\n makeSourceChangeEdit,\n makeSourceChangeRemove,\n} from '../../../zql/src/ivm/source.ts';\nimport {consume} from '../../../zql/src/ivm/stream.ts';\nimport {simplifyCondition} from '../../../zql/src/query/expression.ts';\nimport {asQueryInternals} from '../../../zql/src/query/query-internals.ts';\nimport type {Query} from '../../../zql/src/query/query.ts';\nimport {newStaticQuery} from '../../../zql/src/query/static-query.ts';\nimport type {\n ClientGroupStorage,\n DatabaseStorage,\n} from '../../../zqlite/src/database-storage.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {compile, sql} from '../../../zqlite/src/internal/sql.ts';\nimport {\n fromSQLiteTypes,\n TableSource,\n} from '../../../zqlite/src/table-source.ts';\nimport type {LogConfig, ZeroConfig} from '../config/zero-config.ts';\nimport {computeZqlSpecs} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {StatementRunner} from '../db/statements.ts';\nimport {mapLiteDataTypeToZqlSchemaValue} from '../types/lite.ts';\nimport {\n getSchema,\n reloadPermissionsIfChanged,\n type LoadedPermissions,\n} from './load-permissions.ts';\n\ntype Phase = 'preMutation' | 'postMutation';\n\nexport interface WriteAuthorizer {\n canPreMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ): Promise<boolean>;\n canPostMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ): Promise<boolean>;\n reloadPermissions(): void;\n normalizeOps(ops: CRUDOp[]): Exclude<CRUDOp, UpsertOp>[];\n\n /**\n * Validates that all table names in the operations exist in the schema.\n * @throws Error if any table name is invalid\n */\n validateTableNames(ops: CRUDOp[]): void;\n}\n\nexport class WriteAuthorizerImpl implements WriteAuthorizer {\n readonly #schema: Schema;\n readonly #replica: Database;\n readonly #builderDelegate: BuilderDelegate;\n readonly #tableSpecs: Map<string, LiteAndZqlSpec>;\n readonly #tables = new Map<string, TableSource>();\n readonly #statementRunner: StatementRunner;\n readonly #lc: LogContext;\n readonly #appID: string;\n readonly #logConfig: LogConfig;\n readonly #cgStorage: ClientGroupStorage;\n readonly #config: ZeroConfig;\n\n #loadedPermissions: LoadedPermissions | null = null;\n\n constructor(\n lc: LogContext,\n config: ZeroConfig,\n replica: Database,\n appID: string,\n cgID: string,\n writeAuthzStorage: DatabaseStorage,\n ) {\n this.#appID = appID;\n this.#config = config;\n this.#lc = lc.withContext('class', 'WriteAuthorizerImpl');\n this.#logConfig = config.log;\n this.#schema = getSchema(this.#lc, replica);\n this.#replica = replica;\n this.#cgStorage = writeAuthzStorage.createClientGroupStorage(cgID);\n this.#builderDelegate = {\n getSource: name => this.#getSource(name),\n createStorage: () => this.#cgStorage.createStorage(),\n decorateSourceInput: input => input,\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n };\n this.#tableSpecs = computeZqlSpecs(this.#lc, replica, {\n includeBackfillingColumns: false,\n });\n this.#statementRunner = new StatementRunner(replica);\n this.reloadPermissions();\n }\n\n reloadPermissions() {\n this.#loadedPermissions = reloadPermissionsIfChanged(\n this.#lc,\n this.#statementRunner,\n this.#appID,\n this.#loadedPermissions,\n this.#config,\n ).permissions;\n }\n\n destroy() {\n this.#cgStorage.destroy();\n }\n\n async canPreMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ) {\n for (const op of ops) {\n switch (op.op) {\n case 'insert':\n // insert does not run pre-mutation checks\n break;\n case 'update':\n if (!(await this.#canUpdate('preMutation', authData, op))) {\n return false;\n }\n break;\n case 'delete':\n if (!(await this.#canDelete('preMutation', authData, op))) {\n return false;\n }\n break;\n }\n }\n return true;\n }\n\n async canPostMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ) {\n this.#statementRunner.beginConcurrent();\n try {\n for (const op of ops) {\n const source = this.#getSource(op.tableName);\n switch (op.op) {\n case 'insert': {\n consume(source.push(makeSourceChangeAdd(op.value)));\n break;\n }\n // TODO(mlaw): what if someone updates the same thing twice?\n // TODO(aa): It seems like it will just work? source.push()\n // is going to push the row into the table source, and then the\n // next requirePreMutationRow will just return the row that was\n // pushed in.\n case 'update': {\n consume(\n source.push(\n makeSourceChangeEdit(op.value, this.#requirePreMutationRow(op)),\n ),\n );\n break;\n }\n case 'delete': {\n consume(\n source.push(\n makeSourceChangeRemove(this.#requirePreMutationRow(op)),\n ),\n );\n break;\n }\n }\n }\n\n for (const op of ops) {\n switch (op.op) {\n case 'insert':\n if (!(await this.#canInsert('postMutation', authData, op))) {\n return false;\n }\n break;\n case 'update':\n if (!(await this.#canUpdate('postMutation', authData, op))) {\n return false;\n }\n break;\n case 'delete':\n // delete does not run post-mutation checks.\n break;\n }\n }\n } finally {\n this.#statementRunner.rollback();\n }\n\n return true;\n }\n\n normalizeOps(ops: CRUDOp[]): Exclude<CRUDOp, UpsertOp>[] {\n return ops.map(op => {\n if (op.op === 'upsert') {\n const preMutationRow = this.#getPreMutationRow(op);\n if (preMutationRow) {\n return {\n op: 'update',\n tableName: op.tableName,\n primaryKey: op.primaryKey,\n value: op.value,\n };\n }\n return {\n op: 'insert',\n tableName: op.tableName,\n primaryKey: op.primaryKey,\n value: op.value,\n };\n }\n return op;\n });\n }\n\n validateTableNames(ops: CRUDOp[]): void {\n for (const op of ops) {\n if (!this.#tableSpecs.has(op.tableName)) {\n throw new Error(`Table '${op.tableName}' is not a valid table.`);\n }\n }\n }\n\n #canInsert(phase: Phase, authData: JWTPayload | undefined, op: InsertOp) {\n return this.#timedCanDo(phase, 'insert', authData, op);\n }\n\n #canUpdate(phase: Phase, authData: JWTPayload | undefined, op: UpdateOp) {\n return this.#timedCanDo(phase, 'update', authData, op);\n }\n\n #canDelete(phase: Phase, authData: JWTPayload | undefined, op: DeleteOp) {\n return this.#timedCanDo(phase, 'delete', authData, op);\n }\n\n /**\n * Gets schema-defined primary key and validates that operation contains required PK values.\n *\n * @returns Record where keys are column names and values are client-provided values\n * @throws Error if operation value is missing required primary key columns\n */\n #getPrimaryKey(\n tableName: string,\n opValue: Record<string, ReadonlyJSONValue | undefined>,\n ): Record<string, ReadonlyJSONValue> {\n const tableSpec = this.#tableSpecs.get(tableName);\n if (!tableSpec) {\n throw new Error(`Table ${tableName} not found`);\n }\n const columns = tableSpec.tableSpec.primaryKey;\n\n // Extract primary key values from operation value and validate they exist\n const values: Record<string, ReadonlyJSONValue> = {};\n for (const col of columns) {\n const val = opValue[col];\n if (val === undefined) {\n throw new Error(\n `Primary key column '${col}' is missing from operation value for table ${tableName}`,\n );\n }\n values[col] = val;\n }\n\n return values;\n }\n\n #getSource(tableName: string) {\n let source = this.#tables.get(tableName);\n if (source) {\n return source;\n }\n const tableSpec = this.#tableSpecs.get(tableName);\n if (!tableSpec) {\n throw new Error(`Table ${tableName} not found`);\n }\n const {columns, primaryKey} = tableSpec.tableSpec;\n assert(\n primaryKey.length,\n () => `Table ${tableName} must have a primary key`,\n );\n source = new TableSource(\n this.#lc,\n this.#logConfig,\n this.#replica,\n tableName,\n Object.fromEntries(\n Object.entries(columns).map(([name, {dataType}]) => [\n name,\n mapLiteDataTypeToZqlSchemaValue(dataType),\n ]),\n ),\n [primaryKey[0], ...primaryKey.slice(1)],\n );\n this.#tables.set(tableName, source);\n\n return source;\n }\n\n async #timedCanDo<A extends keyof ActionOpMap>(\n phase: Phase,\n action: A,\n authData: JWTPayload | undefined,\n op: ActionOpMap[A],\n ) {\n const start = performance.now();\n try {\n const ret = await this.#canDo(phase, action, authData, op);\n return ret;\n } finally {\n this.#lc.info?.(\n 'action:',\n action,\n 'duration:',\n performance.now() - start,\n 'tableName:',\n op.tableName,\n 'primaryKey:',\n op.primaryKey,\n );\n }\n }\n\n /**\n * Evaluation order is from static to dynamic, broad to specific.\n * table -> column -> row -> cell.\n *\n * If any step fails, the entire operation is denied.\n *\n * That is, table rules supersede column rules, which supersede row rules,\n *\n * All steps must allow for the operation to be allowed.\n */\n async #canDo<A extends keyof ActionOpMap>(\n phase: Phase,\n action: A,\n authData: JWTPayload | undefined,\n op: ActionOpMap[A],\n ) {\n const rules = must(this.#loadedPermissions)?.permissions?.tables?.[\n op.tableName\n ];\n const rowPolicies = rules?.row;\n let rowQuery = newStaticQuery(this.#schema, op.tableName);\n\n const primaryKeyValues = this.#getPrimaryKey(op.tableName, op.value);\n\n for (const pk in primaryKeyValues) {\n rowQuery = rowQuery.where(pk, '=', primaryKeyValues[pk]);\n }\n\n let applicableRowPolicy: Policy | undefined;\n switch (action) {\n case 'insert':\n if (phase === 'postMutation') {\n applicableRowPolicy = rowPolicies?.insert;\n }\n break;\n case 'update':\n if (phase === 'preMutation') {\n applicableRowPolicy = rowPolicies?.update?.preMutation;\n } else if (phase === 'postMutation') {\n applicableRowPolicy = rowPolicies?.update?.postMutation;\n }\n break;\n case 'delete':\n if (phase === 'preMutation') {\n applicableRowPolicy = rowPolicies?.delete;\n }\n break;\n }\n\n const cellPolicies = rules?.cell;\n const applicableCellPolicies: Policy[] = [];\n if (cellPolicies) {\n for (const [column, policy] of Object.entries(cellPolicies)) {\n if (action === 'update' && op.value[column] === undefined) {\n // If the cell is not being updated, we do not need to check\n // the cell rules.\n continue;\n }\n switch (action) {\n case 'insert':\n if (policy.insert && phase === 'postMutation') {\n applicableCellPolicies.push(policy.insert);\n }\n break;\n case 'update':\n if (phase === 'preMutation' && policy.update?.preMutation) {\n applicableCellPolicies.push(policy.update.preMutation);\n }\n if (phase === 'postMutation' && policy.update?.postMutation) {\n applicableCellPolicies.push(policy.update.postMutation);\n }\n break;\n case 'delete':\n if (policy.delete && phase === 'preMutation') {\n applicableCellPolicies.push(policy.delete);\n }\n break;\n }\n }\n }\n\n if (\n !(await this.#passesPolicyGroup(\n applicableRowPolicy,\n applicableCellPolicies,\n authData,\n rowQuery,\n ))\n ) {\n this.#lc.warn?.(\n `Permission check failed for ${JSON.stringify(\n op,\n )}, action ${action}, phase ${phase}, authData: ${JSON.stringify(\n authData,\n )}, rowPolicies: ${JSON.stringify(\n applicableRowPolicy,\n )}, cellPolicies: ${JSON.stringify(applicableCellPolicies)}`,\n );\n return false;\n }\n\n return true;\n }\n\n #getPreMutationRow(op: UpsertOp | UpdateOp | DeleteOp) {\n const {value} = op;\n\n const primaryKeyValues = this.#getPrimaryKey(op.tableName, value);\n\n const spec = this.#tableSpecs.get(op.tableName);\n if (!spec) {\n throw new Error(`Table ${op.tableName} not found`);\n }\n\n const conditions: SQLQuery[] = [];\n const values: PrimaryKeyValue[] = [];\n for (const pk in primaryKeyValues) {\n conditions.push(sql`${sql.ident(pk)}=?`);\n values.push(v.parse(primaryKeyValues[pk], primaryKeyValueSchema));\n }\n\n const ret = this.#statementRunner.get(\n compile(\n sql`SELECT ${sql.join(\n Object.keys(spec.zqlSpec).map(c => sql.ident(c)),\n sql`,`,\n )} FROM ${sql.ident(op.tableName)} WHERE ${sql.join(\n conditions,\n sql` AND `,\n )}`,\n ),\n ...values,\n );\n if (ret === undefined) {\n return ret;\n }\n return fromSQLiteTypes(spec.zqlSpec, ret, op.tableName);\n }\n\n #requirePreMutationRow(op: UpdateOp | DeleteOp) {\n const ret = this.#getPreMutationRow(op);\n assert(\n ret !== undefined,\n () => `Pre-mutation row not found for ${JSON.stringify(op.value)}`,\n );\n return ret;\n }\n\n async #passesPolicyGroup(\n applicableRowPolicy: Policy | undefined,\n applicableCellPolicies: Policy[],\n authData: JWTPayload | undefined,\n rowQuery: Query<string, Schema>,\n ) {\n if (!(await this.#passesPolicy(applicableRowPolicy, authData, rowQuery))) {\n return false;\n }\n\n for (const policy of applicableCellPolicies) {\n if (!(await this.#passesPolicy(policy, authData, rowQuery))) {\n return false;\n }\n }\n\n return true;\n }\n\n /**\n * Defaults to *false* if the policy is empty. At least one rule has to pass\n * for the policy to pass.\n */\n #passesPolicy(\n policy: Policy | undefined,\n authData: JWTPayload | undefined,\n rowQuery: Query<string, Schema>,\n ): MaybePromise<boolean> {\n if (policy === undefined) {\n return false;\n }\n if (policy.length === 0) {\n return false;\n }\n let rowQueryAst = asQueryInternals(rowQuery).ast;\n rowQueryAst = bindStaticParameters(\n {\n ...rowQueryAst,\n where: updateWhere(rowQueryAst.where, policy),\n },\n {\n authData: authData as Record<string, JSONValue>,\n preMutationRow: undefined,\n },\n );\n\n // call the compiler directly\n // run the sql against upstream.\n // remove the collecting into json? just need to know if a row comes back.\n\n const input = buildPipeline(rowQueryAst, this.#builderDelegate, 'query-id');\n try {\n const res = input.fetch({});\n for (const _ of res) {\n // if any row is returned at all, the\n // rule passes.\n return true;\n }\n } finally {\n input.destroy();\n }\n\n // no rows returned by any rules? The policy fails.\n return false;\n }\n}\n\nfunction updateWhere(where: Condition | undefined, policy: Policy) {\n assert(where, 'A where condition must exist for RowQuery');\n\n return simplifyCondition({\n type: 'and',\n conditions: [\n where,\n {\n type: 'or',\n conditions: policy.map(([action, rule]) => {\n assert(action, 'action must be defined in policy');\n return rule;\n }),\n },\n ],\n });\n}\n\ntype ActionOpMap = {\n insert: InsertOp;\n update: UpdateOp;\n delete: DeleteOp;\n};\n"],"mappings":";;;;;;;;;;;;;;;;;AA+EA,IAAa,sBAAb,MAA4D;CAC1D;CACA;CACA;CACA;CACA,0BAAmB,IAAI,KAA0B;CACjD;CACA;CACA;CACA;CACA;CACA;CAEA,qBAA+C;CAE/C,YACE,IACA,QACA,SACA,OACA,MACA,mBACA;AACA,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,KAAW,GAAG,YAAY,SAAS,sBAAsB;AACzD,QAAA,YAAkB,OAAO;AACzB,QAAA,SAAe,UAAU,MAAA,IAAU,QAAQ;AAC3C,QAAA,UAAgB;AAChB,QAAA,YAAkB,kBAAkB,yBAAyB,KAAK;AAClE,QAAA,kBAAwB;GACtB,YAAW,SAAQ,MAAA,UAAgB,KAAK;GACxC,qBAAqB,MAAA,UAAgB,eAAe;GACpD,sBAAqB,UAAS;GAC9B,gBAAe,UAAS;GACxB,UAAU;GACV,sBAAqB,UAAS;GAC/B;AACD,QAAA,aAAmB,gBAAgB,MAAA,IAAU,SAAS,EACpD,2BAA2B,OAC5B,CAAC;AACF,QAAA,kBAAwB,IAAI,gBAAgB,QAAQ;AACpD,OAAK,mBAAmB;;CAG1B,oBAAoB;AAClB,QAAA,oBAA0B,2BACxB,MAAA,IACA,MAAA,iBACA,MAAA,OACA,MAAA,mBACA,MAAA,OACD,CAAC;;CAGJ,UAAU;AACR,QAAA,UAAgB,SAAS;;CAG3B,MAAM,eACJ,UACA,KACA;AACA,OAAK,MAAM,MAAM,IACf,SAAQ,GAAG,IAAX;GACE,KAAK,SAEH;GACF,KAAK;AACH,QAAI,CAAE,MAAM,MAAA,UAAgB,eAAe,UAAU,GAAG,CACtD,QAAO;AAET;GACF,KAAK;AACH,QAAI,CAAE,MAAM,MAAA,UAAgB,eAAe,UAAU,GAAG,CACtD,QAAO;AAET;;AAGN,SAAO;;CAGT,MAAM,gBACJ,UACA,KACA;AACA,QAAA,gBAAsB,iBAAiB;AACvC,MAAI;AACF,QAAK,MAAM,MAAM,KAAK;IACpB,MAAM,SAAS,MAAA,UAAgB,GAAG,UAAU;AAC5C,YAAQ,GAAG,IAAX;KACE,KAAK;AACH,cAAQ,OAAO,KAAK,oBAAoB,GAAG,MAAM,CAAC,CAAC;AACnD;KAOF,KAAK;AACH,cACE,OAAO,KACL,qBAAqB,GAAG,OAAO,MAAA,sBAA4B,GAAG,CAAC,CAChE,CACF;AACD;KAEF,KAAK;AACH,cACE,OAAO,KACL,uBAAuB,MAAA,sBAA4B,GAAG,CAAC,CACxD,CACF;AACD;;;AAKN,QAAK,MAAM,MAAM,IACf,SAAQ,GAAG,IAAX;IACE,KAAK;AACH,SAAI,CAAE,MAAM,MAAA,UAAgB,gBAAgB,UAAU,GAAG,CACvD,QAAO;AAET;IACF,KAAK;AACH,SAAI,CAAE,MAAM,MAAA,UAAgB,gBAAgB,UAAU,GAAG,CACvD,QAAO;AAET;IACF,KAAK,SAEH;;YAGE;AACR,SAAA,gBAAsB,UAAU;;AAGlC,SAAO;;CAGT,aAAa,KAA4C;AACvD,SAAO,IAAI,KAAI,OAAM;AACnB,OAAI,GAAG,OAAO,UAAU;AAEtB,QADuB,MAAA,kBAAwB,GAAG,CAEhD,QAAO;KACL,IAAI;KACJ,WAAW,GAAG;KACd,YAAY,GAAG;KACf,OAAO,GAAG;KACX;AAEH,WAAO;KACL,IAAI;KACJ,WAAW,GAAG;KACd,YAAY,GAAG;KACf,OAAO,GAAG;KACX;;AAEH,UAAO;IACP;;CAGJ,mBAAmB,KAAqB;AACtC,OAAK,MAAM,MAAM,IACf,KAAI,CAAC,MAAA,WAAiB,IAAI,GAAG,UAAU,CACrC,OAAM,IAAI,MAAM,UAAU,GAAG,UAAU,yBAAyB;;CAKtE,WAAW,OAAc,UAAkC,IAAc;AACvE,SAAO,MAAA,WAAiB,OAAO,UAAU,UAAU,GAAG;;CAGxD,WAAW,OAAc,UAAkC,IAAc;AACvE,SAAO,MAAA,WAAiB,OAAO,UAAU,UAAU,GAAG;;CAGxD,WAAW,OAAc,UAAkC,IAAc;AACvE,SAAO,MAAA,WAAiB,OAAO,UAAU,UAAU,GAAG;;;;;;;;CASxD,eACE,WACA,SACmC;EACnC,MAAM,YAAY,MAAA,WAAiB,IAAI,UAAU;AACjD,MAAI,CAAC,UACH,OAAM,IAAI,MAAM,SAAS,UAAU,YAAY;EAEjD,MAAM,UAAU,UAAU,UAAU;EAGpC,MAAM,SAA4C,EAAE;AACpD,OAAK,MAAM,OAAO,SAAS;GACzB,MAAM,MAAM,QAAQ;AACpB,OAAI,QAAQ,KAAA,EACV,OAAM,IAAI,MACR,uBAAuB,IAAI,8CAA8C,YAC1E;AAEH,UAAO,OAAO;;AAGhB,SAAO;;CAGT,WAAW,WAAmB;EAC5B,IAAI,SAAS,MAAA,OAAa,IAAI,UAAU;AACxC,MAAI,OACF,QAAO;EAET,MAAM,YAAY,MAAA,WAAiB,IAAI,UAAU;AACjD,MAAI,CAAC,UACH,OAAM,IAAI,MAAM,SAAS,UAAU,YAAY;EAEjD,MAAM,EAAC,SAAS,eAAc,UAAU;AACxC,SACE,WAAW,cACL,SAAS,UAAU,0BAC1B;AACD,WAAS,IAAI,YACX,MAAA,IACA,MAAA,WACA,MAAA,SACA,WACA,OAAO,YACL,OAAO,QAAQ,QAAQ,CAAC,KAAK,CAAC,MAAM,EAAC,gBAAe,CAClD,MACA,gCAAgC,SAAS,CAC1C,CAAC,CACH,EACD,CAAC,WAAW,IAAI,GAAG,WAAW,MAAM,EAAE,CAAC,CACxC;AACD,QAAA,OAAa,IAAI,WAAW,OAAO;AAEnC,SAAO;;CAGT,OAAA,WACE,OACA,QACA,UACA,IACA;EACA,MAAM,QAAQ,YAAY,KAAK;AAC/B,MAAI;AAEF,UADY,MAAM,MAAA,MAAY,OAAO,QAAQ,UAAU,GAAG;YAElD;AACR,SAAA,GAAS,OACP,WACA,QACA,aACA,YAAY,KAAK,GAAG,OACpB,cACA,GAAG,WACH,eACA,GAAG,WACJ;;;;;;;;;;;;;CAcL,OAAA,MACE,OACA,QACA,UACA,IACA;EACA,MAAM,QAAQ,KAAK,MAAA,kBAAwB,EAAE,aAAa,SACxD,GAAG;EAEL,MAAM,cAAc,OAAO;EAC3B,IAAI,WAAW,eAAe,MAAA,QAAc,GAAG,UAAU;EAEzD,MAAM,mBAAmB,MAAA,cAAoB,GAAG,WAAW,GAAG,MAAM;AAEpE,OAAK,MAAM,MAAM,iBACf,YAAW,SAAS,MAAM,IAAI,KAAK,iBAAiB,IAAI;EAG1D,IAAI;AACJ,UAAQ,QAAR;GACE,KAAK;AACH,QAAI,UAAU,eACZ,uBAAsB,aAAa;AAErC;GACF,KAAK;AACH,QAAI,UAAU,cACZ,uBAAsB,aAAa,QAAQ;aAClC,UAAU,eACnB,uBAAsB,aAAa,QAAQ;AAE7C;GACF,KAAK;AACH,QAAI,UAAU,cACZ,uBAAsB,aAAa;AAErC;;EAGJ,MAAM,eAAe,OAAO;EAC5B,MAAM,yBAAmC,EAAE;AAC3C,MAAI,aACF,MAAK,MAAM,CAAC,QAAQ,WAAW,OAAO,QAAQ,aAAa,EAAE;AAC3D,OAAI,WAAW,YAAY,GAAG,MAAM,YAAY,KAAA,EAG9C;AAEF,WAAQ,QAAR;IACE,KAAK;AACH,SAAI,OAAO,UAAU,UAAU,eAC7B,wBAAuB,KAAK,OAAO,OAAO;AAE5C;IACF,KAAK;AACH,SAAI,UAAU,iBAAiB,OAAO,QAAQ,YAC5C,wBAAuB,KAAK,OAAO,OAAO,YAAY;AAExD,SAAI,UAAU,kBAAkB,OAAO,QAAQ,aAC7C,wBAAuB,KAAK,OAAO,OAAO,aAAa;AAEzD;IACF,KAAK;AACH,SAAI,OAAO,UAAU,UAAU,cAC7B,wBAAuB,KAAK,OAAO,OAAO;AAE5C;;;AAKR,MACE,CAAE,MAAM,MAAA,kBACN,qBACA,wBACA,UACA,SACD,EACD;AACA,SAAA,GAAS,OACP,+BAA+B,KAAK,UAClC,GACD,CAAC,WAAW,OAAO,UAAU,MAAM,cAAc,KAAK,UACrD,SACD,CAAC,iBAAiB,KAAK,UACtB,oBACD,CAAC,kBAAkB,KAAK,UAAU,uBAAuB,GAC3D;AACD,UAAO;;AAGT,SAAO;;CAGT,mBAAmB,IAAoC;EACrD,MAAM,EAAC,UAAS;EAEhB,MAAM,mBAAmB,MAAA,cAAoB,GAAG,WAAW,MAAM;EAEjE,MAAM,OAAO,MAAA,WAAiB,IAAI,GAAG,UAAU;AAC/C,MAAI,CAAC,KACH,OAAM,IAAI,MAAM,SAAS,GAAG,UAAU,YAAY;EAGpD,MAAM,aAAyB,EAAE;EACjC,MAAM,SAA4B,EAAE;AACpC,OAAK,MAAM,MAAM,kBAAkB;AACjC,cAAW,KAAK,GAAG,GAAG,IAAI,MAAM,GAAG,CAAC,IAAI;AACxC,UAAO,KAAK,MAAQ,iBAAiB,KAAK,sBAAsB,CAAC;;EAGnE,MAAM,MAAM,MAAA,gBAAsB,IAChC,QACE,GAAG,UAAU,IAAI,KACf,OAAO,KAAK,KAAK,QAAQ,CAAC,KAAI,MAAK,IAAI,MAAM,EAAE,CAAC,EAChD,GAAG,IACJ,CAAC,QAAQ,IAAI,MAAM,GAAG,UAAU,CAAC,SAAS,IAAI,KAC7C,YACA,GAAG,QACJ,GACF,EACD,GAAG,OACJ;AACD,MAAI,QAAQ,KAAA,EACV,QAAO;AAET,SAAO,gBAAgB,KAAK,SAAS,KAAK,GAAG,UAAU;;CAGzD,uBAAuB,IAAyB;EAC9C,MAAM,MAAM,MAAA,kBAAwB,GAAG;AACvC,SACE,QAAQ,KAAA,SACF,kCAAkC,KAAK,UAAU,GAAG,MAAM,GACjE;AACD,SAAO;;CAGT,OAAA,kBACE,qBACA,wBACA,UACA,UACA;AACA,MAAI,CAAE,MAAM,MAAA,aAAmB,qBAAqB,UAAU,SAAS,CACrE,QAAO;AAGT,OAAK,MAAM,UAAU,uBACnB,KAAI,CAAE,MAAM,MAAA,aAAmB,QAAQ,UAAU,SAAS,CACxD,QAAO;AAIX,SAAO;;;;;;CAOT,cACE,QACA,UACA,UACuB;AACvB,MAAI,WAAW,KAAA,EACb,QAAO;AAET,MAAI,OAAO,WAAW,EACpB,QAAO;EAET,IAAI,cAAc,iBAAiB,SAAS,CAAC;AAC7C,gBAAc,qBACZ;GACE,GAAG;GACH,OAAO,YAAY,YAAY,OAAO,OAAO;GAC9C,EACD;GACY;GACV,gBAAgB,KAAA;GACjB,CACF;EAMD,MAAM,QAAQ,cAAc,aAAa,MAAA,iBAAuB,WAAW;AAC3E,MAAI;GACF,MAAM,MAAM,MAAM,MAAM,EAAE,CAAC;AAC3B,QAAK,MAAM,KAAK,IAGd,QAAO;YAED;AACR,SAAM,SAAS;;AAIjB,SAAO;;;AAIX,SAAS,YAAY,OAA8B,QAAgB;AACjE,QAAO,OAAO,4CAA4C;AAE1D,QAAO,kBAAkB;EACvB,MAAM;EACN,YAAY,CACV,OACA;GACE,MAAM;GACN,YAAY,OAAO,KAAK,CAAC,QAAQ,UAAU;AACzC,WAAO,QAAQ,mCAAmC;AAClD,WAAO;KACP;GACH,CACF;EACF,CAAC"}
|
|
1
|
+
{"version":3,"file":"write-authorizer.js","names":["#schema","#replica","#builderDelegate","#tableSpecs","#tables","#statementRunner","#lc","#appID","#logConfig","#cgStorage","#config","#getSource","#loadedPermissions","#canUpdate","#canDelete","#requirePreMutationRow","#canInsert","#getPreMutationRow","#timedCanDo","#canDo","#getPrimaryKey","#passesPolicyGroup","#passesPolicy"],"sources":["../../../../../zero-cache/src/auth/write-authorizer.ts"],"sourcesContent":["import type {SQLQuery} from '@databases/sql';\nimport type {MaybePromise} from '@opentelemetry/resources';\nimport type {LogContext} from '@rocicorp/logger';\nimport type {JWTPayload} from 'jose';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport type {JSONValue, ReadonlyJSONValue} from '../../../shared/src/json.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {Condition} from '../../../zero-protocol/src/ast.ts';\nimport {\n primaryKeyValueSchema,\n type PrimaryKeyValue,\n} from '../../../zero-protocol/src/primary-key.ts';\nimport type {\n CRUDOp,\n DeleteOp,\n InsertOp,\n UpdateOp,\n UpsertOp,\n} from '../../../zero-protocol/src/push.ts';\nimport type {Policy} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {Schema} from '../../../zero-types/src/schema.ts';\nimport type {BuilderDelegate} from '../../../zql/src/builder/builder.ts';\nimport {\n bindStaticParameters,\n buildPipeline,\n} from '../../../zql/src/builder/builder.ts';\nimport {\n makeSourceChangeAdd,\n makeSourceChangeEdit,\n makeSourceChangeRemove,\n} from '../../../zql/src/ivm/source.ts';\nimport {consume} from '../../../zql/src/ivm/stream.ts';\nimport {simplifyCondition} from '../../../zql/src/query/expression.ts';\nimport {asQueryInternals} from '../../../zql/src/query/query-internals.ts';\nimport type {Query} from '../../../zql/src/query/query.ts';\nimport {newStaticQuery} from '../../../zql/src/query/static-query.ts';\nimport type {\n ClientGroupStorage,\n DatabaseStorage,\n} from '../../../zqlite/src/database-storage.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {compile, sql} from '../../../zqlite/src/internal/sql.ts';\nimport {\n fromSQLiteTypes,\n TableSource,\n} from '../../../zqlite/src/table-source.ts';\nimport type {LogConfig, ZeroConfig} from '../config/zero-config.ts';\nimport {computeZqlSpecs} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {StatementRunner} from '../db/statements.ts';\nimport {mapLiteDataTypeToZqlSchemaValue} from '../types/lite.ts';\nimport {\n getSchema,\n reloadPermissionsIfChanged,\n type LoadedPermissions,\n} from './load-permissions.ts';\n\ntype Phase = 'preMutation' | 'postMutation';\n\nexport interface WriteAuthorizer {\n canPreMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ): Promise<boolean>;\n canPostMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ): Promise<boolean>;\n reloadPermissions(): void;\n normalizeOps(ops: CRUDOp[]): Exclude<CRUDOp, UpsertOp>[];\n\n /**\n * Validates that all table names in the operations exist in the schema.\n * @throws Error if any table name is invalid\n */\n validateTableNames(ops: CRUDOp[]): void;\n}\n\nexport class WriteAuthorizerImpl implements WriteAuthorizer {\n readonly #schema: Schema;\n readonly #replica: Database;\n readonly #builderDelegate: BuilderDelegate;\n readonly #tableSpecs: Map<string, LiteAndZqlSpec>;\n readonly #tables = new Map<string, TableSource>();\n readonly #statementRunner: StatementRunner;\n readonly #lc: LogContext;\n readonly #appID: string;\n readonly #logConfig: LogConfig;\n readonly #cgStorage: ClientGroupStorage;\n readonly #config: ZeroConfig;\n\n #loadedPermissions: LoadedPermissions | null = null;\n\n constructor(\n lc: LogContext,\n config: ZeroConfig,\n replica: Database,\n appID: string,\n cgID: string,\n writeAuthzStorage: DatabaseStorage,\n ) {\n this.#appID = appID;\n this.#config = config;\n this.#lc = lc.withContext('class', 'WriteAuthorizerImpl');\n this.#logConfig = config.log;\n this.#schema = getSchema(this.#lc, replica);\n this.#replica = replica;\n this.#cgStorage = writeAuthzStorage.createClientGroupStorage(cgID);\n this.#builderDelegate = {\n getSource: name => this.#getSource(name),\n createStorage: () => this.#cgStorage.createStorage(),\n decorateSourceInput: input => input,\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n };\n this.#tableSpecs = computeZqlSpecs(this.#lc, replica, {\n includeBackfillingColumns: false,\n });\n this.#statementRunner = new StatementRunner(replica);\n this.reloadPermissions();\n }\n\n reloadPermissions() {\n this.#loadedPermissions = reloadPermissionsIfChanged(\n this.#lc,\n this.#statementRunner,\n this.#appID,\n this.#loadedPermissions,\n this.#config,\n ).permissions;\n }\n\n destroy() {\n this.#cgStorage.destroy();\n }\n\n async canPreMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ) {\n for (const op of ops) {\n switch (op.op) {\n case 'insert':\n // insert does not run pre-mutation checks\n break;\n case 'update':\n if (!(await this.#canUpdate('preMutation', authData, op))) {\n return false;\n }\n break;\n case 'delete':\n if (!(await this.#canDelete('preMutation', authData, op))) {\n return false;\n }\n break;\n }\n }\n return true;\n }\n\n async canPostMutation(\n authData: JWTPayload | undefined,\n ops: Exclude<CRUDOp, UpsertOp>[],\n ) {\n this.#statementRunner.beginConcurrent();\n let opError: unknown;\n try {\n for (const op of ops) {\n const source = this.#getSource(op.tableName);\n switch (op.op) {\n case 'insert': {\n consume(source.push(makeSourceChangeAdd(op.value)));\n break;\n }\n // TODO(mlaw): what if someone updates the same thing twice?\n // TODO(aa): It seems like it will just work? source.push()\n // is going to push the row into the table source, and then the\n // next requirePreMutationRow will just return the row that was\n // pushed in.\n case 'update': {\n consume(\n source.push(\n makeSourceChangeEdit(op.value, this.#requirePreMutationRow(op)),\n ),\n );\n break;\n }\n case 'delete': {\n consume(\n source.push(\n makeSourceChangeRemove(this.#requirePreMutationRow(op)),\n ),\n );\n break;\n }\n }\n }\n\n for (const op of ops) {\n switch (op.op) {\n case 'insert':\n if (!(await this.#canInsert('postMutation', authData, op))) {\n return false;\n }\n break;\n case 'update':\n if (!(await this.#canUpdate('postMutation', authData, op))) {\n return false;\n }\n break;\n case 'delete':\n // delete does not run post-mutation checks.\n break;\n }\n }\n } catch (e) {\n opError = e;\n throw e;\n } finally {\n try {\n this.#statementRunner.rollback();\n } catch (rollbackError) {\n if (opError !== undefined) {\n const combinedError = new Error(\n `canPostMutation failed and rollback also failed: operation error = ${String(opError)}; rollback error = ${String(rollbackError)}`,\n );\n combinedError.cause = opError;\n throw combinedError;\n }\n throw rollbackError;\n }\n }\n\n return true;\n }\n\n normalizeOps(ops: CRUDOp[]): Exclude<CRUDOp, UpsertOp>[] {\n return ops.map(op => {\n if (op.op === 'upsert') {\n const preMutationRow = this.#getPreMutationRow(op);\n if (preMutationRow) {\n return {\n op: 'update',\n tableName: op.tableName,\n primaryKey: op.primaryKey,\n value: op.value,\n };\n }\n return {\n op: 'insert',\n tableName: op.tableName,\n primaryKey: op.primaryKey,\n value: op.value,\n };\n }\n return op;\n });\n }\n\n validateTableNames(ops: CRUDOp[]): void {\n for (const op of ops) {\n if (!this.#tableSpecs.has(op.tableName)) {\n throw new Error(`Table '${op.tableName}' is not a valid table.`);\n }\n }\n }\n\n #canInsert(phase: Phase, authData: JWTPayload | undefined, op: InsertOp) {\n return this.#timedCanDo(phase, 'insert', authData, op);\n }\n\n #canUpdate(phase: Phase, authData: JWTPayload | undefined, op: UpdateOp) {\n return this.#timedCanDo(phase, 'update', authData, op);\n }\n\n #canDelete(phase: Phase, authData: JWTPayload | undefined, op: DeleteOp) {\n return this.#timedCanDo(phase, 'delete', authData, op);\n }\n\n /**\n * Gets schema-defined primary key and validates that operation contains required PK values.\n *\n * @returns Record where keys are column names and values are client-provided values\n * @throws Error if operation value is missing required primary key columns\n */\n #getPrimaryKey(\n tableName: string,\n opValue: Record<string, ReadonlyJSONValue | undefined>,\n ): Record<string, ReadonlyJSONValue> {\n const tableSpec = this.#tableSpecs.get(tableName);\n if (!tableSpec) {\n throw new Error(`Table ${tableName} not found`);\n }\n const columns = tableSpec.tableSpec.primaryKey;\n\n // Extract primary key values from operation value and validate they exist\n const values: Record<string, ReadonlyJSONValue> = {};\n for (const col of columns) {\n const val = opValue[col];\n if (val === undefined) {\n throw new Error(\n `Primary key column '${col}' is missing from operation value for table ${tableName}`,\n );\n }\n values[col] = val;\n }\n\n return values;\n }\n\n #getSource(tableName: string) {\n let source = this.#tables.get(tableName);\n if (source) {\n return source;\n }\n const tableSpec = this.#tableSpecs.get(tableName);\n if (!tableSpec) {\n throw new Error(`Table ${tableName} not found`);\n }\n const {columns, primaryKey} = tableSpec.tableSpec;\n assert(\n primaryKey.length,\n () => `Table ${tableName} must have a primary key`,\n );\n source = new TableSource(\n this.#lc,\n this.#logConfig,\n this.#replica,\n tableName,\n Object.fromEntries(\n Object.entries(columns).map(([name, {dataType}]) => [\n name,\n mapLiteDataTypeToZqlSchemaValue(dataType),\n ]),\n ),\n [primaryKey[0], ...primaryKey.slice(1)],\n );\n this.#tables.set(tableName, source);\n\n return source;\n }\n\n async #timedCanDo<A extends keyof ActionOpMap>(\n phase: Phase,\n action: A,\n authData: JWTPayload | undefined,\n op: ActionOpMap[A],\n ) {\n const start = performance.now();\n try {\n const ret = await this.#canDo(phase, action, authData, op);\n return ret;\n } finally {\n this.#lc.info?.(\n 'action:',\n action,\n 'duration:',\n performance.now() - start,\n 'tableName:',\n op.tableName,\n 'primaryKey:',\n op.primaryKey,\n );\n }\n }\n\n /**\n * Evaluation order is from static to dynamic, broad to specific.\n * table -> column -> row -> cell.\n *\n * If any step fails, the entire operation is denied.\n *\n * That is, table rules supersede column rules, which supersede row rules,\n *\n * All steps must allow for the operation to be allowed.\n */\n async #canDo<A extends keyof ActionOpMap>(\n phase: Phase,\n action: A,\n authData: JWTPayload | undefined,\n op: ActionOpMap[A],\n ) {\n const rules = must(this.#loadedPermissions)?.permissions?.tables?.[\n op.tableName\n ];\n const rowPolicies = rules?.row;\n let rowQuery = newStaticQuery(this.#schema, op.tableName);\n\n const primaryKeyValues = this.#getPrimaryKey(op.tableName, op.value);\n\n for (const pk in primaryKeyValues) {\n rowQuery = rowQuery.where(pk, '=', primaryKeyValues[pk]);\n }\n\n let applicableRowPolicy: Policy | undefined;\n switch (action) {\n case 'insert':\n if (phase === 'postMutation') {\n applicableRowPolicy = rowPolicies?.insert;\n }\n break;\n case 'update':\n if (phase === 'preMutation') {\n applicableRowPolicy = rowPolicies?.update?.preMutation;\n } else if (phase === 'postMutation') {\n applicableRowPolicy = rowPolicies?.update?.postMutation;\n }\n break;\n case 'delete':\n if (phase === 'preMutation') {\n applicableRowPolicy = rowPolicies?.delete;\n }\n break;\n }\n\n const cellPolicies = rules?.cell;\n const applicableCellPolicies: Policy[] = [];\n if (cellPolicies) {\n for (const [column, policy] of Object.entries(cellPolicies)) {\n if (action === 'update' && op.value[column] === undefined) {\n // If the cell is not being updated, we do not need to check\n // the cell rules.\n continue;\n }\n switch (action) {\n case 'insert':\n if (policy.insert && phase === 'postMutation') {\n applicableCellPolicies.push(policy.insert);\n }\n break;\n case 'update':\n if (phase === 'preMutation' && policy.update?.preMutation) {\n applicableCellPolicies.push(policy.update.preMutation);\n }\n if (phase === 'postMutation' && policy.update?.postMutation) {\n applicableCellPolicies.push(policy.update.postMutation);\n }\n break;\n case 'delete':\n if (policy.delete && phase === 'preMutation') {\n applicableCellPolicies.push(policy.delete);\n }\n break;\n }\n }\n }\n\n if (\n !(await this.#passesPolicyGroup(\n applicableRowPolicy,\n applicableCellPolicies,\n authData,\n rowQuery,\n ))\n ) {\n this.#lc.warn?.(\n `Permission check failed for ${JSON.stringify(\n op,\n )}, action ${action}, phase ${phase}, authData: ${JSON.stringify(\n authData,\n )}, rowPolicies: ${JSON.stringify(\n applicableRowPolicy,\n )}, cellPolicies: ${JSON.stringify(applicableCellPolicies)}`,\n );\n return false;\n }\n\n return true;\n }\n\n #getPreMutationRow(op: UpsertOp | UpdateOp | DeleteOp) {\n const {value} = op;\n\n const primaryKeyValues = this.#getPrimaryKey(op.tableName, value);\n\n const spec = this.#tableSpecs.get(op.tableName);\n if (!spec) {\n throw new Error(`Table ${op.tableName} not found`);\n }\n\n const conditions: SQLQuery[] = [];\n const values: PrimaryKeyValue[] = [];\n for (const pk in primaryKeyValues) {\n conditions.push(sql`${sql.ident(pk)}=?`);\n values.push(v.parse(primaryKeyValues[pk], primaryKeyValueSchema));\n }\n\n const ret = this.#statementRunner.get(\n compile(\n sql`SELECT ${sql.join(\n Object.keys(spec.zqlSpec).map(c => sql.ident(c)),\n sql`,`,\n )} FROM ${sql.ident(op.tableName)} WHERE ${sql.join(\n conditions,\n sql` AND `,\n )}`,\n ),\n ...values,\n );\n if (ret === undefined) {\n return ret;\n }\n return fromSQLiteTypes(spec.zqlSpec, ret, op.tableName);\n }\n\n #requirePreMutationRow(op: UpdateOp | DeleteOp) {\n const ret = this.#getPreMutationRow(op);\n assert(\n ret !== undefined,\n () => `Pre-mutation row not found for ${JSON.stringify(op.value)}`,\n );\n return ret;\n }\n\n async #passesPolicyGroup(\n applicableRowPolicy: Policy | undefined,\n applicableCellPolicies: Policy[],\n authData: JWTPayload | undefined,\n rowQuery: Query<string, Schema>,\n ) {\n if (!(await this.#passesPolicy(applicableRowPolicy, authData, rowQuery))) {\n return false;\n }\n\n for (const policy of applicableCellPolicies) {\n if (!(await this.#passesPolicy(policy, authData, rowQuery))) {\n return false;\n }\n }\n\n return true;\n }\n\n /**\n * Defaults to *false* if the policy is empty. At least one rule has to pass\n * for the policy to pass.\n */\n #passesPolicy(\n policy: Policy | undefined,\n authData: JWTPayload | undefined,\n rowQuery: Query<string, Schema>,\n ): MaybePromise<boolean> {\n if (policy === undefined) {\n return false;\n }\n if (policy.length === 0) {\n return false;\n }\n let rowQueryAst = asQueryInternals(rowQuery).ast;\n rowQueryAst = bindStaticParameters(\n {\n ...rowQueryAst,\n where: updateWhere(rowQueryAst.where, policy),\n },\n {\n authData: authData as Record<string, JSONValue>,\n preMutationRow: undefined,\n },\n );\n\n // call the compiler directly\n // run the sql against upstream.\n // remove the collecting into json? just need to know if a row comes back.\n\n const input = buildPipeline(rowQueryAst, this.#builderDelegate, 'query-id');\n try {\n const res = input.fetch({});\n for (const _ of res) {\n // if any row is returned at all, the\n // rule passes.\n return true;\n }\n } finally {\n input.destroy();\n }\n\n // no rows returned by any rules? The policy fails.\n return false;\n }\n}\n\nfunction updateWhere(where: Condition | undefined, policy: Policy) {\n assert(where, 'A where condition must exist for RowQuery');\n\n return simplifyCondition({\n type: 'and',\n conditions: [\n where,\n {\n type: 'or',\n conditions: policy.map(([action, rule]) => {\n assert(action, 'action must be defined in policy');\n return rule;\n }),\n },\n ],\n });\n}\n\ntype ActionOpMap = {\n insert: InsertOp;\n update: UpdateOp;\n delete: DeleteOp;\n};\n"],"mappings":";;;;;;;;;;;;;;;;;AA+EA,IAAa,sBAAb,MAA4D;CAC1D;CACA;CACA;CACA;CACA,0BAAmB,IAAI,KAA0B;CACjD;CACA;CACA;CACA;CACA;CACA;CAEA,qBAA+C;CAE/C,YACE,IACA,QACA,SACA,OACA,MACA,mBACA;AACA,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,KAAW,GAAG,YAAY,SAAS,sBAAsB;AACzD,QAAA,YAAkB,OAAO;AACzB,QAAA,SAAe,UAAU,MAAA,IAAU,QAAQ;AAC3C,QAAA,UAAgB;AAChB,QAAA,YAAkB,kBAAkB,yBAAyB,KAAK;AAClE,QAAA,kBAAwB;GACtB,YAAW,SAAQ,MAAA,UAAgB,KAAK;GACxC,qBAAqB,MAAA,UAAgB,eAAe;GACpD,sBAAqB,UAAS;GAC9B,gBAAe,UAAS;GACxB,UAAU;GACV,sBAAqB,UAAS;GAC/B;AACD,QAAA,aAAmB,gBAAgB,MAAA,IAAU,SAAS,EACpD,2BAA2B,OAC5B,CAAC;AACF,QAAA,kBAAwB,IAAI,gBAAgB,QAAQ;AACpD,OAAK,mBAAmB;;CAG1B,oBAAoB;AAClB,QAAA,oBAA0B,2BACxB,MAAA,IACA,MAAA,iBACA,MAAA,OACA,MAAA,mBACA,MAAA,OACD,CAAC;;CAGJ,UAAU;AACR,QAAA,UAAgB,SAAS;;CAG3B,MAAM,eACJ,UACA,KACA;AACA,OAAK,MAAM,MAAM,IACf,SAAQ,GAAG,IAAX;GACE,KAAK,SAEH;GACF,KAAK;AACH,QAAI,CAAE,MAAM,MAAA,UAAgB,eAAe,UAAU,GAAG,CACtD,QAAO;AAET;GACF,KAAK;AACH,QAAI,CAAE,MAAM,MAAA,UAAgB,eAAe,UAAU,GAAG,CACtD,QAAO;AAET;;AAGN,SAAO;;CAGT,MAAM,gBACJ,UACA,KACA;AACA,QAAA,gBAAsB,iBAAiB;EACvC,IAAI;AACJ,MAAI;AACF,QAAK,MAAM,MAAM,KAAK;IACpB,MAAM,SAAS,MAAA,UAAgB,GAAG,UAAU;AAC5C,YAAQ,GAAG,IAAX;KACE,KAAK;AACH,cAAQ,OAAO,KAAK,oBAAoB,GAAG,MAAM,CAAC,CAAC;AACnD;KAOF,KAAK;AACH,cACE,OAAO,KACL,qBAAqB,GAAG,OAAO,MAAA,sBAA4B,GAAG,CAAC,CAChE,CACF;AACD;KAEF,KAAK;AACH,cACE,OAAO,KACL,uBAAuB,MAAA,sBAA4B,GAAG,CAAC,CACxD,CACF;AACD;;;AAKN,QAAK,MAAM,MAAM,IACf,SAAQ,GAAG,IAAX;IACE,KAAK;AACH,SAAI,CAAE,MAAM,MAAA,UAAgB,gBAAgB,UAAU,GAAG,CACvD,QAAO;AAET;IACF,KAAK;AACH,SAAI,CAAE,MAAM,MAAA,UAAgB,gBAAgB,UAAU,GAAG,CACvD,QAAO;AAET;IACF,KAAK,SAEH;;WAGC,GAAG;AACV,aAAU;AACV,SAAM;YACE;AACR,OAAI;AACF,UAAA,gBAAsB,UAAU;YACzB,eAAe;AACtB,QAAI,YAAY,KAAA,GAAW;KACzB,MAAM,gCAAgB,IAAI,MACxB,sEAAsE,OAAO,QAAQ,CAAC,qBAAqB,OAAO,cAAc,GACjI;AACD,mBAAc,QAAQ;AACtB,WAAM;;AAER,UAAM;;;AAIV,SAAO;;CAGT,aAAa,KAA4C;AACvD,SAAO,IAAI,KAAI,OAAM;AACnB,OAAI,GAAG,OAAO,UAAU;AAEtB,QADuB,MAAA,kBAAwB,GAAG,CAEhD,QAAO;KACL,IAAI;KACJ,WAAW,GAAG;KACd,YAAY,GAAG;KACf,OAAO,GAAG;KACX;AAEH,WAAO;KACL,IAAI;KACJ,WAAW,GAAG;KACd,YAAY,GAAG;KACf,OAAO,GAAG;KACX;;AAEH,UAAO;IACP;;CAGJ,mBAAmB,KAAqB;AACtC,OAAK,MAAM,MAAM,IACf,KAAI,CAAC,MAAA,WAAiB,IAAI,GAAG,UAAU,CACrC,OAAM,IAAI,MAAM,UAAU,GAAG,UAAU,yBAAyB;;CAKtE,WAAW,OAAc,UAAkC,IAAc;AACvE,SAAO,MAAA,WAAiB,OAAO,UAAU,UAAU,GAAG;;CAGxD,WAAW,OAAc,UAAkC,IAAc;AACvE,SAAO,MAAA,WAAiB,OAAO,UAAU,UAAU,GAAG;;CAGxD,WAAW,OAAc,UAAkC,IAAc;AACvE,SAAO,MAAA,WAAiB,OAAO,UAAU,UAAU,GAAG;;;;;;;;CASxD,eACE,WACA,SACmC;EACnC,MAAM,YAAY,MAAA,WAAiB,IAAI,UAAU;AACjD,MAAI,CAAC,UACH,OAAM,IAAI,MAAM,SAAS,UAAU,YAAY;EAEjD,MAAM,UAAU,UAAU,UAAU;EAGpC,MAAM,SAA4C,EAAE;AACpD,OAAK,MAAM,OAAO,SAAS;GACzB,MAAM,MAAM,QAAQ;AACpB,OAAI,QAAQ,KAAA,EACV,OAAM,IAAI,MACR,uBAAuB,IAAI,8CAA8C,YAC1E;AAEH,UAAO,OAAO;;AAGhB,SAAO;;CAGT,WAAW,WAAmB;EAC5B,IAAI,SAAS,MAAA,OAAa,IAAI,UAAU;AACxC,MAAI,OACF,QAAO;EAET,MAAM,YAAY,MAAA,WAAiB,IAAI,UAAU;AACjD,MAAI,CAAC,UACH,OAAM,IAAI,MAAM,SAAS,UAAU,YAAY;EAEjD,MAAM,EAAC,SAAS,eAAc,UAAU;AACxC,SACE,WAAW,cACL,SAAS,UAAU,0BAC1B;AACD,WAAS,IAAI,YACX,MAAA,IACA,MAAA,WACA,MAAA,SACA,WACA,OAAO,YACL,OAAO,QAAQ,QAAQ,CAAC,KAAK,CAAC,MAAM,EAAC,gBAAe,CAClD,MACA,gCAAgC,SAAS,CAC1C,CAAC,CACH,EACD,CAAC,WAAW,IAAI,GAAG,WAAW,MAAM,EAAE,CAAC,CACxC;AACD,QAAA,OAAa,IAAI,WAAW,OAAO;AAEnC,SAAO;;CAGT,OAAA,WACE,OACA,QACA,UACA,IACA;EACA,MAAM,QAAQ,YAAY,KAAK;AAC/B,MAAI;AAEF,UADY,MAAM,MAAA,MAAY,OAAO,QAAQ,UAAU,GAAG;YAElD;AACR,SAAA,GAAS,OACP,WACA,QACA,aACA,YAAY,KAAK,GAAG,OACpB,cACA,GAAG,WACH,eACA,GAAG,WACJ;;;;;;;;;;;;;CAcL,OAAA,MACE,OACA,QACA,UACA,IACA;EACA,MAAM,QAAQ,KAAK,MAAA,kBAAwB,EAAE,aAAa,SACxD,GAAG;EAEL,MAAM,cAAc,OAAO;EAC3B,IAAI,WAAW,eAAe,MAAA,QAAc,GAAG,UAAU;EAEzD,MAAM,mBAAmB,MAAA,cAAoB,GAAG,WAAW,GAAG,MAAM;AAEpE,OAAK,MAAM,MAAM,iBACf,YAAW,SAAS,MAAM,IAAI,KAAK,iBAAiB,IAAI;EAG1D,IAAI;AACJ,UAAQ,QAAR;GACE,KAAK;AACH,QAAI,UAAU,eACZ,uBAAsB,aAAa;AAErC;GACF,KAAK;AACH,QAAI,UAAU,cACZ,uBAAsB,aAAa,QAAQ;aAClC,UAAU,eACnB,uBAAsB,aAAa,QAAQ;AAE7C;GACF,KAAK;AACH,QAAI,UAAU,cACZ,uBAAsB,aAAa;AAErC;;EAGJ,MAAM,eAAe,OAAO;EAC5B,MAAM,yBAAmC,EAAE;AAC3C,MAAI,aACF,MAAK,MAAM,CAAC,QAAQ,WAAW,OAAO,QAAQ,aAAa,EAAE;AAC3D,OAAI,WAAW,YAAY,GAAG,MAAM,YAAY,KAAA,EAG9C;AAEF,WAAQ,QAAR;IACE,KAAK;AACH,SAAI,OAAO,UAAU,UAAU,eAC7B,wBAAuB,KAAK,OAAO,OAAO;AAE5C;IACF,KAAK;AACH,SAAI,UAAU,iBAAiB,OAAO,QAAQ,YAC5C,wBAAuB,KAAK,OAAO,OAAO,YAAY;AAExD,SAAI,UAAU,kBAAkB,OAAO,QAAQ,aAC7C,wBAAuB,KAAK,OAAO,OAAO,aAAa;AAEzD;IACF,KAAK;AACH,SAAI,OAAO,UAAU,UAAU,cAC7B,wBAAuB,KAAK,OAAO,OAAO;AAE5C;;;AAKR,MACE,CAAE,MAAM,MAAA,kBACN,qBACA,wBACA,UACA,SACD,EACD;AACA,SAAA,GAAS,OACP,+BAA+B,KAAK,UAClC,GACD,CAAC,WAAW,OAAO,UAAU,MAAM,cAAc,KAAK,UACrD,SACD,CAAC,iBAAiB,KAAK,UACtB,oBACD,CAAC,kBAAkB,KAAK,UAAU,uBAAuB,GAC3D;AACD,UAAO;;AAGT,SAAO;;CAGT,mBAAmB,IAAoC;EACrD,MAAM,EAAC,UAAS;EAEhB,MAAM,mBAAmB,MAAA,cAAoB,GAAG,WAAW,MAAM;EAEjE,MAAM,OAAO,MAAA,WAAiB,IAAI,GAAG,UAAU;AAC/C,MAAI,CAAC,KACH,OAAM,IAAI,MAAM,SAAS,GAAG,UAAU,YAAY;EAGpD,MAAM,aAAyB,EAAE;EACjC,MAAM,SAA4B,EAAE;AACpC,OAAK,MAAM,MAAM,kBAAkB;AACjC,cAAW,KAAK,GAAG,GAAG,IAAI,MAAM,GAAG,CAAC,IAAI;AACxC,UAAO,KAAK,MAAQ,iBAAiB,KAAK,sBAAsB,CAAC;;EAGnE,MAAM,MAAM,MAAA,gBAAsB,IAChC,QACE,GAAG,UAAU,IAAI,KACf,OAAO,KAAK,KAAK,QAAQ,CAAC,KAAI,MAAK,IAAI,MAAM,EAAE,CAAC,EAChD,GAAG,IACJ,CAAC,QAAQ,IAAI,MAAM,GAAG,UAAU,CAAC,SAAS,IAAI,KAC7C,YACA,GAAG,QACJ,GACF,EACD,GAAG,OACJ;AACD,MAAI,QAAQ,KAAA,EACV,QAAO;AAET,SAAO,gBAAgB,KAAK,SAAS,KAAK,GAAG,UAAU;;CAGzD,uBAAuB,IAAyB;EAC9C,MAAM,MAAM,MAAA,kBAAwB,GAAG;AACvC,SACE,QAAQ,KAAA,SACF,kCAAkC,KAAK,UAAU,GAAG,MAAM,GACjE;AACD,SAAO;;CAGT,OAAA,kBACE,qBACA,wBACA,UACA,UACA;AACA,MAAI,CAAE,MAAM,MAAA,aAAmB,qBAAqB,UAAU,SAAS,CACrE,QAAO;AAGT,OAAK,MAAM,UAAU,uBACnB,KAAI,CAAE,MAAM,MAAA,aAAmB,QAAQ,UAAU,SAAS,CACxD,QAAO;AAIX,SAAO;;;;;;CAOT,cACE,QACA,UACA,UACuB;AACvB,MAAI,WAAW,KAAA,EACb,QAAO;AAET,MAAI,OAAO,WAAW,EACpB,QAAO;EAET,IAAI,cAAc,iBAAiB,SAAS,CAAC;AAC7C,gBAAc,qBACZ;GACE,GAAG;GACH,OAAO,YAAY,YAAY,OAAO,OAAO;GAC9C,EACD;GACY;GACV,gBAAgB,KAAA;GACjB,CACF;EAMD,MAAM,QAAQ,cAAc,aAAa,MAAA,iBAAuB,WAAW;AAC3E,MAAI;GACF,MAAM,MAAM,MAAM,MAAM,EAAE,CAAC;AAC3B,QAAK,MAAM,KAAK,IAGd,QAAO;YAED;AACR,SAAM,SAAS;;AAIjB,SAAO;;;AAIX,SAAS,YAAY,OAA8B,QAAgB;AACjE,QAAO,OAAO,4CAA4C;AAE1D,QAAO,kBAAkB;EACvB,MAAM;EACN,YAAY,CACV,OACA;GACE,MAAM;GACN,YAAY,OAAO,KAAK,CAAC,QAAQ,UAAU;AACzC,WAAO,QAAQ,mCAAmC;AAClD,WAAO;KACP;GACH,CACF;EACF,CAAC"}
|
|
@@ -519,6 +519,24 @@ export declare const zeroOptions: {
|
|
|
519
519
|
desc: string[];
|
|
520
520
|
};
|
|
521
521
|
};
|
|
522
|
+
shadowSync: {
|
|
523
|
+
enabled: {
|
|
524
|
+
type: v.Type<boolean>;
|
|
525
|
+
desc: string[];
|
|
526
|
+
};
|
|
527
|
+
intervalHours: {
|
|
528
|
+
type: v.Type<number>;
|
|
529
|
+
desc: string[];
|
|
530
|
+
};
|
|
531
|
+
sampleRate: {
|
|
532
|
+
type: v.Type<number>;
|
|
533
|
+
desc: string[];
|
|
534
|
+
};
|
|
535
|
+
maxRowsPerTable: {
|
|
536
|
+
type: v.Type<number>;
|
|
537
|
+
desc: string[];
|
|
538
|
+
};
|
|
539
|
+
};
|
|
522
540
|
/** @deprecated */
|
|
523
541
|
targetClientRowCount: {
|
|
524
542
|
type: v.Type<number>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"zero-config.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/config/zero-config.ts"],"names":[],"mappings":"AAAA;;GAEG;AAGH,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,EAGL,KAAK,MAAM,EACX,KAAK,YAAY,EAClB,MAAM,gCAAgC,CAAC;AACxC,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AAUnD,OAAO,EAGL,KAAK,oBAAoB,EAC1B,MAAM,gBAAgB,CAAC;AACxB,YAAY,EAAC,SAAS,EAAC,MAAM,kCAAkC,CAAC;AAEhE,eAAO,MAAM,mBAAmB,UAAU,CAAC;AAE3C,eAAO,MAAM,UAAU;;;;;;;;;CA+CtB,CAAC;AAEF,eAAO,MAAM,YAAY;;;;;;;;;;CAwBxB,CAAC;AAEF,QAAA,MAAM,cAAc;;;;;;;;;CAmBnB,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG,MAAM,CAAC,OAAO,cAAc,CAAC,CAAC;AAE3D,QAAA,MAAM,oBAAoB;;;;;;;;;CAczB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,MAAM,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAE5D,QAAA,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8DhB,CAAC;AAuGF,MAAM,MAAM,UAAU,GAAG,MAAM,CAAC,OAAO,WAAW,CAAC,CAAC;AAEpD,+DAA+D;AAC/D,MAAM,MAAM,mBAAmB,GAAG,IAAI,CACpC,UAAU,EACV,KAAK,GAAG,SAAS,GAAG,QAAQ,GAAG,QAAQ,GAAG,UAAU,CACrD,CAAC;AAKF,eAAO,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;IAsDtB,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;IAGlB,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;QAuLhB,kBAAkB;;;;;;QASlB,kBAAkB
|
|
1
|
+
{"version":3,"file":"zero-config.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/config/zero-config.ts"],"names":[],"mappings":"AAAA;;GAEG;AAGH,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,EAGL,KAAK,MAAM,EACX,KAAK,YAAY,EAClB,MAAM,gCAAgC,CAAC;AACxC,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AAUnD,OAAO,EAGL,KAAK,oBAAoB,EAC1B,MAAM,gBAAgB,CAAC;AACxB,YAAY,EAAC,SAAS,EAAC,MAAM,kCAAkC,CAAC;AAEhE,eAAO,MAAM,mBAAmB,UAAU,CAAC;AAE3C,eAAO,MAAM,UAAU;;;;;;;;;CA+CtB,CAAC;AAEF,eAAO,MAAM,YAAY;;;;;;;;;;CAwBxB,CAAC;AAEF,QAAA,MAAM,cAAc;;;;;;;;;CAmBnB,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG,MAAM,CAAC,OAAO,cAAc,CAAC,CAAC;AAE3D,QAAA,MAAM,oBAAoB;;;;;;;;;CAczB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,MAAM,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAE5D,QAAA,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8DhB,CAAC;AAuGF,MAAM,MAAM,UAAU,GAAG,MAAM,CAAC,OAAO,WAAW,CAAC,CAAC;AAEpD,+DAA+D;AAC/D,MAAM,MAAM,mBAAmB,GAAG,IAAI,CACpC,UAAU,EACV,KAAK,GAAG,SAAS,GAAG,QAAQ,GAAG,QAAQ,GAAG,UAAU,CACrD,CAAC;AAKF,eAAO,MAAM,WAAW;;;;;;;;;;;;;;;;;;;;;;;;IAsDtB,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;IAGlB,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;QAuLhB,kBAAkB;;;;;;QASlB,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;IAkZpB,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAgEnB,CAAC;AAEF,MAAM,MAAM,UAAU,GAAG,MAAM,CAAC,OAAO,WAAW,CAAC,CAAC;AAIpD,wBAAgB,aAAa,CAC3B,IAAI,GAAE,IAAI,CAAC,YAAY,EAAE,eAAe,CAAM,GAC7C,UAAU,CAaZ;AAED;;;GAGG;AACH,wBAAgB,uBAAuB,CACrC,IAAI,GAAE,IAAI,CAAC,YAAY,EAAE,eAAe,CAAM,GAC7C,oBAAoB,CAItB;AAED;;;GAGG;AACH,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,IAAI,CAAC,UAAU,EAAE,eAAe,CAAC,GAAG,SAAS,GACpD,MAAM,CAER;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,IAAI,CAAC,oBAAoB,EAAE,eAAe,CAAC,EACnD,QAAQ,EAAE,MAAM,GAAG,SAAS,WAwC7B;AAYD,wBAAgB,kBAAkB,SAEjC"}
|
|
@@ -725,12 +725,44 @@ var zeroOptions = {
|
|
|
725
725
|
textCopy: {
|
|
726
726
|
type: valita_exports.boolean().default(false),
|
|
727
727
|
desc: [
|
|
728
|
-
`Use text-format COPY instead of binary COPY for
|
|
729
|
-
`This is slower but can work around issues with
|
|
730
|
-
`certain data types.`
|
|
728
|
+
`Use text-format COPY instead of binary COPY for initial sync and`,
|
|
729
|
+
`backfill streaming. This is slower but can work around issues with`,
|
|
730
|
+
`binary encoding of certain data types.`
|
|
731
731
|
]
|
|
732
732
|
}
|
|
733
733
|
},
|
|
734
|
+
shadowSync: {
|
|
735
|
+
enabled: {
|
|
736
|
+
type: valita_exports.boolean().default(false),
|
|
737
|
+
desc: [
|
|
738
|
+
`Periodically exercises the initial-sync code path against a sample of`,
|
|
739
|
+
`rows from every published table, writing to a throwaway SQLite database.`,
|
|
740
|
+
`This acts as a canary: if the real initial-sync path ever breaks (schema`,
|
|
741
|
+
`drift, PG version quirks, etc.), the shadow run fails before a customer`,
|
|
742
|
+
`actually needs a full reset.`
|
|
743
|
+
]
|
|
744
|
+
},
|
|
745
|
+
intervalHours: {
|
|
746
|
+
type: valita_exports.number().default(24),
|
|
747
|
+
desc: [
|
|
748
|
+
`The interval between shadow initial-sync runs, in hours. The first run`,
|
|
749
|
+
`is additionally staggered by a random fraction of this interval so that`,
|
|
750
|
+
`a fleet restart does not cause all tasks to canary simultaneously.`
|
|
751
|
+
]
|
|
752
|
+
},
|
|
753
|
+
sampleRate: {
|
|
754
|
+
type: valita_exports.number().default(.1),
|
|
755
|
+
desc: [
|
|
756
|
+
`The BERNOULLI sampling rate for each table (0 < rate <= 1). A value of`,
|
|
757
|
+
`1 disables sampling and copies all rows (still subject to`,
|
|
758
|
+
`{bold max-rows-per-table}).`
|
|
759
|
+
]
|
|
760
|
+
},
|
|
761
|
+
maxRowsPerTable: {
|
|
762
|
+
type: valita_exports.number().default(1e4),
|
|
763
|
+
desc: [`The hard upper bound on rows copied per table per shadow run. Guards`, `against unexpectedly large tables consuming disk / upstream bandwidth.`]
|
|
764
|
+
}
|
|
765
|
+
},
|
|
734
766
|
targetClientRowCount: {
|
|
735
767
|
type: valita_exports.number().default(2e4),
|
|
736
768
|
deprecated: [
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"zero-config.js","names":[],"sources":["../../../../../zero-cache/src/config/zero-config.ts"],"sourcesContent":["/**\n * These types represent the _compiled_ config whereas `define-config` types represent the _source_ config.\n */\n\nimport {timingSafeEqual} from 'node:crypto';\nimport type {LogContext} from '@rocicorp/logger';\nimport {logOptions} from '../../../otel/src/log-options.ts';\nimport {\n flagToEnv,\n parseOptions,\n type Config,\n type ParseOptions,\n} from '../../../shared/src/options.ts';\nimport * as v from '../../../shared/src/valita.ts';\n// @circular-dep-ignore - importing package.json for version info only\nimport packageJson from '../../../zero/package.json' with {type: 'json'};\nimport {runtimeDebugFlags} from '../../../zql/src/builder/debug-delegate.ts';\nimport {singleProcessMode} from '../types/processes.ts';\nimport {\n ALLOWED_APP_ID_CHARACTERS,\n INVALID_APP_ID_MESSAGE,\n} from '../types/shards.ts';\nimport {DEFAULT_PREFERRED_PREFIXES} from './network.ts';\nimport {\n assertNormalized,\n isDevelopmentMode,\n type NormalizedZeroConfig,\n} from './normalize.ts';\nexport type {LogConfig} from '../../../otel/src/log-options.ts';\n\nexport const ZERO_ENV_VAR_PREFIX = 'ZERO_';\n\nexport const appOptions = {\n id: {\n type: v\n .string()\n .default('zero')\n .assert(id => ALLOWED_APP_ID_CHARACTERS.test(id), INVALID_APP_ID_MESSAGE),\n desc: [\n 'Unique identifier for the app.',\n '',\n 'Multiple zero-cache apps can run on a single upstream database, each of which',\n 'is isolated from the others, with its own permissions, sharding (future feature),',\n 'and change/cvr databases.',\n '',\n 'The metadata of an app is stored in an upstream schema with the same name,',\n 'e.g. \"zero\", and the metadata for each app shard, e.g. client and mutation',\n 'ids, is stored in the \"\\\\{app-id\\\\}_\\\\{#\\\\}\" schema. (Currently there is only a single',\n '\"0\" shard, but this will change with sharding).',\n '',\n 'The CVR and Change data are managed in schemas named \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cvr\"',\n 'and \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cdc\", respectively, allowing multiple apps and shards',\n 'to share the same database instance (e.g. a Postgres \"cluster\") for CVR and Change management.',\n '',\n 'Due to constraints on replication slot names, an App ID may only consist of',\n 'lower-case letters, numbers, and the underscore character.',\n '',\n 'Note that this option is used by both {bold zero-cache} and {bold zero-deploy-permissions}.',\n ],\n },\n\n publications: {\n type: v.array(v.string()).optional(() => []),\n desc: [\n `Postgres {bold PUBLICATION}s that define the tables and columns to`,\n `replicate. Publication names may not begin with an underscore,`,\n `as zero reserves that prefix for internal use.`,\n ``,\n `If unspecified, zero-cache will create and use an internal publication that`,\n `publishes all tables in the {bold public} schema, i.e.:`,\n ``,\n `CREATE PUBLICATION _\\\\{app-id\\\\}_public_0 FOR TABLES IN SCHEMA public;`,\n ``,\n `Note that changing the set of publications will result in resyncing the replica,`,\n `which may involve downtime (replication lag) while the new replica is initializing.`,\n `To change the set of publications without disrupting an existing app, a new app`,\n `should be created.`,\n ],\n },\n};\n\nexport const shardOptions = {\n id: {\n type: v\n .string()\n .assert(() => {\n throw new Error(\n `ZERO_SHARD_ID is no longer an option. Please use ZERO_APP_ID instead.`,\n // TODO: Link to release / migration notes?\n );\n })\n .optional(),\n hidden: true,\n },\n\n num: {\n type: v.number().default(0),\n desc: [\n `The shard number (from 0 to NUM_SHARDS) of the App. zero will eventually`,\n `support data sharding as a first-class primitive; until then, deploying`,\n `multiple shard-nums creates functionally identical shards. Until sharding is`,\n `actually meaningful, this flag is hidden but available for testing.`,\n ],\n hidden: true,\n },\n};\n\nconst replicaOptions = {\n file: {\n type: v.string().default('zero.db'),\n desc: [\n `File path to the SQLite replica that zero-cache maintains.`,\n `This can be lost, but if it is, zero-cache will have to re-replicate next`,\n `time it starts up.`,\n ],\n },\n\n vacuumIntervalHours: {\n type: v.number().optional(),\n desc: [\n `Performs a VACUUM at server startup if the specified number of hours has elapsed`,\n `since the last VACUUM (or initial-sync). The VACUUM operation is heavyweight`,\n `and requires double the size of the db in disk space. If unspecified, VACUUM`,\n `operations are not performed.`,\n ],\n },\n};\n\nexport type ReplicaOptions = Config<typeof replicaOptions>;\n\nconst perUserMutationLimit = {\n max: {\n type: v.number().optional(),\n desc: [\n `The maximum mutations per user within the specified {bold windowMs}.`,\n `If unset, no rate limiting is enforced.`,\n ],\n },\n windowMs: {\n type: v.number().default(60_000),\n desc: [\n `The sliding window over which the {bold perUserMutationLimitMax} is enforced.`,\n ],\n },\n};\n\nexport type RateLimit = Config<typeof perUserMutationLimit>;\n\nconst authOptions = {\n jwk: {\n type: v.string().optional(),\n desc: [\n `A public key in JWK format used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n jwksUrl: {\n type: v.string().optional(),\n desc: [\n `A URL that returns a JWK set used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n secret: {\n type: v.string().optional(),\n desc: [\n `A symmetric key used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n issuer: {\n type: v.string().optional(),\n desc: [\n `Expected issuer ({bold iss} claim) for JWT validation.`,\n `If set, tokens with a different or missing issuer will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n audience: {\n type: v.string().optional(),\n desc: [\n `Expected audience ({bold aud} claim) for JWT validation.`,\n `If set, tokens with a different or missing audience will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n revalidateIntervalSeconds: {\n type: v.number().optional(),\n desc: [\n `The interval in seconds between periodic /query auth revalidation for validated connections.`,\n `If unset, periodic auth revalidation is disabled.`,\n ],\n },\n retransformIntervalSeconds: {\n type: v.number().optional(),\n desc: [\n `The interval in seconds between periodic shared /query retransform work for a client group.`,\n `If unset, periodic shared retransform is disabled.`,\n ],\n },\n};\n\nconst makeDeprecationMessage = (flag: string) =>\n `Use {bold ${flagToEnv(ZERO_ENV_VAR_PREFIX, flag)}} (or {bold --${flag}}) instead.`;\n\nconst makeMutatorQueryOptions = (\n replacement: 'mutate' | 'query' | undefined,\n suffix: string,\n) => ({\n url: {\n type: v.array(v.string()).optional(), // optional until we remove CRUD mutations\n desc: [\n `The URL of the API server to which zero-cache will ${suffix}.`,\n ``,\n `{bold IMPORTANT:} URLs are matched using {bold URLPattern}, a standard Web API.`,\n ``,\n `{bold Pattern Syntax:}`,\n ` URLPattern uses a simple and intuitive syntax similar to Express routes.`,\n ` Wildcards and named parameters make it easy to match multiple URLs.`,\n ``,\n `{bold Basic Examples:}`,\n ` Exact URL match:`,\n ` \"https://api.example.com/mutate\"`,\n ` `,\n ` Any subdomain using wildcard:`,\n ` \"https://*.example.com/mutate\"`,\n ` `,\n ` Multiple subdomain levels:`,\n ` \"https://*.*.example.com/mutate\"`,\n ` `,\n ` Any path under a domain:`,\n ` \"https://api.example.com/*\"`,\n ` `,\n ` Named path parameters:`,\n ` \"https://api.example.com/:version/mutate\"`,\n ` ↳ Matches \"https://api.example.com/v1/mutate\", \"https://api.example.com/v2/mutate\", etc.`,\n ``,\n `{bold Advanced Patterns:}`,\n ` Optional path segments:`,\n ` \"https://api.example.com/:path?\"`,\n ` `,\n ` Regex in segments (for specific patterns):`,\n ` \"https://api.example.com/:version(v\\\\\\\\d+)/mutate\"`,\n ` ↳ Matches only \"v\" followed by digits`,\n ``,\n `{bold Multiple patterns:}`,\n ` [\"https://api1.example.com/mutate\", \"https://api2.example.com/mutate\"]`,\n ``,\n `{bold Note:} Query parameters and URL fragments (#) are automatically ignored during matching.`,\n ``,\n `For full URLPattern syntax, see: https://developer.mozilla.org/en-US/docs/Web/API/URLPattern`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-url`)]}\n : {}),\n },\n apiKey: {\n type: v.string().optional(),\n desc: [\n `An optional secret used to authorize zero-cache to call the API server handling writes.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-api-key`)]}\n : {}),\n },\n forwardCookies: {\n type: v.boolean().default(false),\n desc: [\n `If true, zero-cache will forward cookies from the request.`,\n `This is useful for passing authentication cookies to the API server.`,\n `If false, cookies are not forwarded.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-forward-cookies`)]}\n : {}),\n },\n allowedClientHeaders: {\n type: v.array(v.string()).optional(),\n desc: [\n `A list of header names that clients are allowed to set via custom headers.`,\n `If specified, only headers in this list will be forwarded to the ${suffix === 'push mutations' ? 'push' : 'query'} URL.`,\n `Header names are case-insensitive.`,\n `If not specified, no client-provided headers are forwarded (secure by default).`,\n `Example: ZERO_${replacement ? replacement.toUpperCase() : suffix === 'push mutations' ? 'MUTATE' : 'QUERY'}_ALLOWED_CLIENT_HEADERS=x-request-id,x-correlation-id`,\n ],\n ...(replacement\n ? {\n deprecated: [\n makeDeprecationMessage(`${replacement}-allowed-client-headers`),\n ],\n }\n : {}),\n },\n});\n\nconst mutateOptions = makeMutatorQueryOptions(undefined, 'push mutations');\nconst pushOptions = makeMutatorQueryOptions('mutate', 'push mutations');\nconst queryOptions = makeMutatorQueryOptions(undefined, 'send synced queries');\nconst getQueriesOptions = makeMutatorQueryOptions(\n 'query',\n 'send synced queries',\n);\n\nexport type AuthConfig = Config<typeof authOptions>;\n\n/** @deprecated used only by legacy JWT verification helpers */\nexport type LegacyJWTAuthConfig = Pick<\n AuthConfig,\n 'jwk' | 'jwksUrl' | 'secret' | 'issuer' | 'audience'\n>;\n\n// Note: --help will list flags in the order in which they are defined here,\n// so order the fields such that the important (e.g. required) ones are first.\n// (Exported for testing)\nexport const zeroOptions = {\n upstream: {\n db: {\n type: v.string(),\n desc: [\n `The \"upstream\" authoritative postgres database.`,\n `In the future we will support other types of upstream besides PG.`,\n ],\n },\n\n type: {\n type: v.literalUnion('pg', 'custom').default('pg'),\n desc: [\n `The meaning of the {bold upstream-db} depends on the upstream type:`,\n `* {bold pg}: The connection database string, e.g. \"postgres://...\"`,\n `* {bold custom}: The base URI of the change source \"endpoint, e.g.`,\n ` \"https://my-change-source.dev/changes/v0/stream?apiKey=...\"`,\n ],\n hidden: true, // TODO: Unhide when ready to officially support.\n },\n\n maxConns: {\n type: v.number().default(20),\n desc: [\n `The maximum number of connections to open to the upstream database`,\n `for committing mutations. This is divided evenly amongst sync workers.`,\n `In addition to this number, zero-cache uses one connection for the`,\n `replication stream.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n\n pgReplicationSlotFailover: {\n type: v.boolean().optional(),\n desc: [\n `For upstream Postgres versions 17+, creates replication slots with the`,\n `{bold failover} parameter set to {bold true} to enable slot synchronization`,\n `and failover. Note that additional Postgres-level configuration is necessary`,\n `when enabling this option. For details, see:`,\n ``,\n `https://www.postgresql.org/docs/current/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS-SYNCHRONIZATION`,\n ``,\n `(Note that this option has no effect for Postgres versions before 17.)`,\n ],\n },\n },\n\n /** @deprecated */\n push: pushOptions,\n mutate: mutateOptions,\n /** @deprecated */\n getQueries: getQueriesOptions,\n query: queryOptions,\n\n enableCrudMutations: {\n type: v.boolean().default(true),\n desc: [\n `Enables support for legacy CRUD mutations. When this is {bold false}, no connections`,\n `are made from view-syncers to the upstream db, and push messages with CRUD mutations`,\n `result in an InvalidPush response.`,\n ],\n },\n\n cvr: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store CVRs. CVRs (client view records) keep track`,\n `of the data synced to clients in order to determine the diff to send on reconnect.`,\n `If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(30),\n desc: [\n `The maximum number of connections to open to the CVR database.`,\n `This is divided evenly amongst sync workers.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n\n garbageCollectionInactivityThresholdHours: {\n type: v.number().default(48),\n desc: [\n `The duration after which an inactive CVR is eligible for garbage collection.`,\n `Note that garbage collection is an incremental, periodic process which does not`,\n `necessarily purge all eligible CVRs immediately.`,\n ],\n },\n\n garbageCollectionInitialIntervalSeconds: {\n type: v.number().default(60),\n desc: [\n `The initial interval at which to check and garbage collect inactive CVRs.`,\n `This interval is increased exponentially (up to 16 minutes) when there is`,\n `nothing to purge.`,\n ],\n },\n\n garbageCollectionInitialBatchSize: {\n type: v.number().default(25),\n desc: [\n `The initial number of CVRs to purge per garbage collection interval.`,\n `This number is increased linearly if the rate of new CVRs exceeds the rate of`,\n `purged CVRs, in order to reach a steady state.`,\n ``,\n `Setting this to 0 effectively disables CVR garbage collection.`,\n ],\n },\n },\n\n queryHydrationStats: {\n type: v.boolean().optional(),\n desc: [\n `Track and log the number of rows considered by query hydrations which`,\n `take longer than {bold log-slow-hydrate-threshold} milliseconds.`,\n `This is useful for debugging and performance tuning.`,\n ],\n },\n\n enableQueryPlanner: {\n type: v.boolean().default(true),\n desc: [\n `Enable the query planner for optimizing ZQL queries.`,\n ``,\n `The query planner analyzes and optimizes query execution by determining`,\n `the most efficient join strategies.`,\n ``,\n `You can disable the planner if it is picking bad strategies.`,\n ],\n },\n\n yieldThresholdMs: {\n type: v.number().default(10),\n desc: [\n `The maximum amount of time in milliseconds that a sync worker will`,\n `spend in IVM (processing query hydration and advancement) before yielding`,\n `to the event loop. Lower values increase responsiveness and fairness at`,\n `the cost of reduced throughput.`,\n ],\n },\n\n change: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store recent replication log entries, in order`,\n `to sync multiple view-syncers without requiring multiple replication slots on`,\n `the upstream database. If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(5),\n desc: [\n `The maximum number of connections to open to the change database.`,\n `This is used by the {bold change-streamer} for catching up`,\n `{bold zero-cache} replication subscriptions.`,\n ],\n },\n\n statementTimeoutMs: {\n type: v.number().default(20_000),\n desc: [\n `Fail change-log transactions if a statement response from postgres is not received within`,\n `the specified timeout. This differs from a postgres {bold statement_timeout} in that`,\n `it is implemented to handle a pathological case in which Postgres does not return a`,\n `response but otherwise believes the transaction to be idle.`,\n ],\n hidden: true, // make visible if proven to be effective/necessary\n },\n },\n\n replica: replicaOptions,\n\n log: logOptions,\n\n app: appOptions,\n\n shard: shardOptions,\n\n auth: authOptions,\n\n port: {\n type: v.number().default(4848),\n desc: [`The port for sync connections.`],\n },\n\n changeStreamer: {\n uri: {\n type: v.string().optional(),\n desc: [\n `When set, connects to the {bold change-streamer} at the given URI.`,\n `In a multi-node setup, this should be specified in {bold view-syncer} options,`,\n `pointing to the {bold replication-manager} URI, which runs a {bold change-streamer}`,\n `on port 4849.`,\n ],\n },\n\n mode: {\n type: v.literalUnion('dedicated', 'discover').default('dedicated'),\n desc: [\n `As an alternative to {bold ZERO_CHANGE_STREAMER_URI}, the {bold ZERO_CHANGE_STREAMER_MODE}`,\n `can be set to \"{bold discover}\" to instruct the {bold view-syncer} to connect to the `,\n `ip address registered by the {bold replication-manager} upon startup.`,\n ``,\n `This may not work in all networking configurations, e.g. certain private `,\n `networking or port forwarding configurations. Using the {bold ZERO_CHANGE_STREAMER_URI}`,\n `with an explicit routable hostname is recommended instead.`,\n ``,\n `Note: This option is ignored if the {bold ZERO_CHANGE_STREAMER_URI} is set.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `The port on which the {bold change-streamer} runs. This is an internal`,\n `protocol between the {bold replication-manager} and {bold view-syncers}, which`,\n `runs in the same process tree in local development or a single-node configuration.`,\n ``,\n `If unspecified, defaults to {bold --port} + 1.`,\n ],\n },\n\n /** @deprecated */\n address: {\n type: v.string().optional(),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n /** @deprecated */\n protocol: {\n type: v.literalUnion('ws', 'wss').default('ws'),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n discoveryInterfacePreferences: {\n type: v.array(v.string()).default([...DEFAULT_PREFERRED_PREFIXES]),\n desc: [\n `The name prefixes to prefer when introspecting the network interfaces to determine`,\n `the externally reachable IP address for change-streamer discovery. This defaults`,\n `to commonly used names for standard ethernet interfaces in order to prevent selecting`,\n `special interfaces such as those for VPNs.`,\n ],\n // More confusing than it's worth to advertise this. The default list should be\n // adjusted to make things work for all environments; it is controlled as a\n // hidden flag as an emergency to unblock people with outlier network configs.\n hidden: true,\n },\n\n startupDelayMs: {\n type: v.number().default(15000),\n desc: [\n `The delay to wait before the change-streamer takes over the replication stream`,\n `(i.e. the handoff during replication-manager updates), to allow loadbalancers to register`,\n `the task as healthy based on healthcheck parameters. Note that if a change stream request`,\n `is received during this interval, the delay will be canceled and the takeover will happen`,\n `immediately, since the incoming request indicates that the task is registered as a target.`,\n ],\n },\n\n backPressureLimitHeapProportion: {\n type: v.number().default(0.04),\n desc: [\n `The percentage of {bold --max-old-space-size} to use as a buffer for absorbing replication`,\n `stream spikes. When the estimated amount of queued data exceeds this threshold, back pressure`,\n `is applied to the replication stream, delaying downstream sync as a result.`,\n ``,\n `The threshold was determined empirically with load testing. Higher thresholds have resulted`,\n `in OOMs. Note also that the byte-counting logic in the queue is strictly an underestimate of`,\n `actual memory usage (but importantly, proportionally correct), so the queue is actually`,\n `using more than what this proportion suggests.`,\n ``,\n `This parameter is exported as an emergency knob to reduce the size of the buffer in the`,\n `event that the server OOMs from back pressure. Resist the urge to {italic increase} this`,\n `proportion, as it is mainly useful for absorbing periodic spikes and does not meaningfully`,\n `affect steady-state replication throughput; the latter is determined by other factors such`,\n `as object serialization and PG throughput`,\n ``,\n `In other words, the back pressure limit does not constrain replication throughput;`,\n `rather, it protects the system when the upstream throughput exceeds the downstream`,\n `throughput.`,\n ],\n },\n\n flowControlConsensusPaddingSeconds: {\n type: v.number().default(1),\n desc: [\n `During periodic flow control checks (every 64kb), the amount of time to wait after the`,\n `majority of subscribers have acked, after which replication will continue even if`,\n `some subscribers have yet to ack. (Note that this is not a timeout for the {italic entire} send,`,\n `but a timeout that starts {italic after} the majority of receivers have acked.)`,\n ``,\n `This allows a bounded amount of time for backlogged subscribers to catch up on each flush`,\n `without forcing all subscribers to wait for the entire backlog to be processed. It is also`,\n `useful for mitigating the effect of unresponsive subscribers due to severed websocket`,\n `connections (until liveness checks disconnect them).`,\n ``,\n `Set this to a negative number to disable early flow control releases. (Not recommended, but`,\n `available as an emergency measure.)`,\n ],\n },\n },\n\n taskID: {\n type: v.string().optional(),\n desc: [\n `Globally unique identifier for the zero-cache instance.`,\n ``,\n `Setting this to a platform specific task identifier can be useful for debugging.`,\n `If unspecified, zero-cache will attempt to extract the TaskARN if run from within`,\n `an AWS ECS container, and otherwise use a random string.`,\n ],\n },\n\n perUserMutationLimit,\n\n numSyncWorkers: {\n type: v.number().optional(),\n desc: [\n `The number of processes to use for view syncing.`,\n `Leave this unset to use the maximum available parallelism.`,\n `If set to 0, the server runs without sync workers, which is the`,\n `configuration for running the {bold replication-manager}.`,\n ],\n },\n\n autoReset: {\n type: v.boolean().default(true),\n desc: [\n `Automatically wipe and resync the replica when replication is halted.`,\n `This situation can occur for configurations in which the upstream database`,\n `provider prohibits event trigger creation, preventing the zero-cache from`,\n `being able to correctly replicate schema changes. For such configurations,`,\n `an upstream schema change will instead result in halting replication with an`,\n `error indicating that the replica needs to be reset.`,\n ``,\n `When {bold auto-reset} is enabled, zero-cache will respond to such situations`,\n `by shutting down, and when restarted, resetting the replica and all synced `,\n `clients. This is a heavy-weight operation and can result in user-visible`,\n `slowness or downtime if compute resources are scarce.`,\n ],\n },\n\n replicationLag: {\n reportIntervalMs: {\n type: v.number().default(30000),\n desc: [\n `The minimum interval at which replication lag reports are written upstream and`,\n `reported via the {bold zero.replication.total_lag} opentelemetry metric. Because`,\n `replication lag reports are only issued after the previous one was received, the`,\n `actual interval between reports may be longer when there is a backlog in the`,\n `replication stream. A negative or 0 value disables lag reporting.`,\n ``,\n `This monitoring feature is only support on the postgres upstream type.`,\n ],\n },\n },\n\n adminPassword: {\n type: v.string().optional(),\n desc: [\n `A password used to administer zero-cache server, for example to access the`,\n `/statz endpoint.`,\n '',\n 'A password is optional in development mode but {bold required in production} mode.',\n ],\n },\n\n websocketCompression: {\n type: v.boolean().default(false),\n desc: [\n 'Enable WebSocket per-message deflate compression.',\n '',\n 'Compression can reduce bandwidth usage for sync traffic but',\n 'increases CPU usage on both client and server. Disabled by default.',\n '',\n 'See: https://github.com/websockets/ws#websocket-compression',\n ],\n },\n\n websocketCompressionOptions: {\n type: v.string().optional(),\n desc: [\n 'JSON string containing WebSocket compression options.',\n '',\n 'Only used if websocketCompression is enabled.',\n '',\n 'Example: \\\\{\"zlibDeflateOptions\":\\\\{\"level\":3\\\\},\"threshold\":1024\\\\}',\n '',\n 'See https://github.com/websockets/ws/blob/master/doc/ws.md#new-websocketserveroptions-callback for available options.',\n ],\n },\n\n websocketMaxPayloadBytes: {\n type: v.number().default(10 * 1024 * 1024),\n desc: [\n 'Maximum size of incoming WebSocket messages in bytes.',\n '',\n 'Messages exceeding this limit are rejected before parsing.',\n 'Default: 10MB (10 * 1024 * 1024 = 10485760)',\n ],\n },\n\n litestream: {\n executable: {\n type: v.string().optional(),\n desc: [\n `Path to the {bold litestream} executable. This must be built from the`,\n `{bold rocicorp/litestream} fork. Support for the official binary at v0.5.x`,\n `is planned.`,\n ],\n },\n\n executableV5: {\n type: v.string().optional(),\n desc: [\n `The v0.5.x litestream executable which is used for restoring the backup`,\n `backup when {bold ZERO_LITESTREAM_RESTORE_USING_V5} is specified.`,\n `litestream v0.5.8+ can restore from both v0.3.x and v0.5.x backup formats,`,\n `affording forwards compatibility with a future zero-cache`,\n `version that will use litestream v0.5.x to backup the replica.`,\n ],\n },\n\n restoreUsingV5: {\n type: v.boolean().default(false),\n desc: [\n `Restores the backup using the {bold ZERO_LITESTREAM_EXECUTABLE_V5} if specified.`,\n ],\n },\n\n configPath: {\n type: v.string().default('./src/services/litestream/config.yml'),\n desc: [\n `Path to the litestream yaml config file. zero-cache will run this with its`,\n `environment variables, which can be referenced in the file via $\\\\{ENV\\\\}`,\n `substitution, for example:`,\n `* {bold ZERO_REPLICA_FILE} for the db path`,\n `* {bold ZERO_LITESTREAM_BACKUP_LOCATION} for the db replica url`,\n `* {bold ZERO_LITESTREAM_LOG_LEVEL} for the log level`,\n `* {bold ZERO_LOG_FORMAT} for the log type`,\n ],\n },\n\n logLevel: {\n type: v.literalUnion('debug', 'info', 'warn', 'error').default('warn'),\n },\n\n backupURL: {\n type: v.string().optional(),\n desc: [\n `The location of the litestream backup, usually an {bold s3://} URL.`,\n `This is only consulted by the {bold replication-manager}.`,\n `{bold view-syncers} receive this information from the {bold replication-manager}.`,\n ],\n },\n\n endpoint: {\n type: v.string().optional(),\n desc: [\n `The S3-compatible endpoint URL to use for the litestream backup. Only required for non-AWS services.`,\n `The {bold replication-manager} and {bold view-syncers} must have the same endpoint.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `Port on which litestream exports metrics, used to determine the replication`,\n `watermark up to which it is safe to purge change log records.`,\n ``,\n `If unspecified, defaults to {bold --port} + 2.`,\n ],\n },\n\n checkpointThresholdMB: {\n type: v.number().default(40),\n desc: [\n `The size of the WAL file at which to perform an SQlite checkpoint to apply`,\n `the writes in the WAL to the main database file. Each checkpoint creates`,\n `a new WAL segment file that will be backed up by litestream. Smaller thresholds`,\n `may improve read performance, at the expense of creating more files to download`,\n `when restoring the replica from the backup.`,\n ],\n },\n\n minCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite attempts a PASSIVE checkpoint, which`,\n `transfers pages to the main database file without blocking writers.`,\n `Defaults to {bold checkpointThresholdMB * 250} (since SQLite page size is 4KB).`,\n ],\n },\n\n maxCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite performs a RESTART checkpoint, which`,\n `blocks writers until complete. Defaults to {bold minCheckpointPageCount * 10}.`,\n `Set to {bold 0} to disable RESTART checkpoints entirely.`,\n ],\n },\n\n incrementalBackupIntervalMinutes: {\n type: v.number().default(15),\n desc: [\n `The interval between incremental backups of the replica. Shorter intervals`,\n `reduce the amount of change history that needs to be replayed when catching`,\n `up a new view-syncer, at the expense of increasing the number of files needed`,\n `to download for the initial litestream restore.`,\n ],\n },\n\n snapshotBackupIntervalHours: {\n type: v.number().default(12),\n desc: [\n `The interval between snapshot backups of the replica. Snapshot backups`,\n `make a full copy of the database to a new litestream generation. This`,\n `improves restore time at the expense of bandwidth. Applications with a`,\n `large database and low write rate can increase this interval to reduce`,\n `network usage for backups (litestream defaults to 24 hours).`,\n ],\n },\n\n restoreParallelism: {\n type: v.number().default(48),\n desc: [\n `The number of WAL files to download in parallel when performing the`,\n `initial restore of the replica from the backup.`,\n ],\n },\n\n multipartConcurrency: {\n type: v.number().default(48),\n desc: [\n `The number of parts (of size {bold --litestream-multipart-size} bytes)`,\n `to upload or download in parallel when backing up or restoring the snapshot.`,\n ],\n },\n\n multipartSize: {\n type: v.number().default(16 * 1024 * 1024),\n desc: [\n `The size of each part when uploading or downloading the snapshot with`,\n `{bold --multipart-concurrency}. Note that up to {bold concurrency * size}`,\n `bytes of memory are used when backing up or restoring the snapshot.`,\n ],\n },\n },\n\n storageDBTmpDir: {\n type: v.string().optional(),\n desc: [\n `tmp directory for IVM operator storage. Leave unset to use os.tmpdir()`,\n ],\n },\n\n initialSync: {\n tableCopyWorkers: {\n type: v.number().default(5),\n desc: [\n `The number of parallel workers used to copy tables during initial sync.`,\n `Each worker uses a database connection and will buffer up to (approximately)`,\n `10 MB of table data in memory during initial sync. Increasing the number of`,\n `workers may improve initial sync speed; however, note that local disk throughput`,\n `(i.e. IOPS), upstream CPU, and network bandwidth may also be bottlenecks.`,\n ],\n },\n\n profileCopy: {\n type: v.boolean().optional(),\n hidden: true,\n desc: [\n `Takes a cpu profile during the copy phase initial-sync, storing it as a JSON file`,\n `initial-copy.cpuprofile in the tmp directory.`,\n ],\n },\n\n textCopy: {\n type: v.boolean().default(false),\n desc: [\n `Use text-format COPY instead of binary COPY for the initial sync.`,\n `This is slower but can work around issues with binary encoding of`,\n `certain data types.`,\n ],\n },\n },\n\n /** @deprecated */\n targetClientRowCount: {\n type: v.number().default(20_000),\n deprecated: [\n 'This option is no longer used and will be removed in a future version.',\n 'The client-side cache no longer enforces a row limit. Instead, TTL-based expiration',\n 'automatically manages cache size to prevent unbounded growth.',\n ],\n hidden: true,\n },\n\n lazyStartup: {\n type: v.boolean().default(false),\n desc: [\n 'Delay starting the majority of zero-cache until first request.',\n '',\n 'This is mainly intended to avoid connecting to Postgres replication stream',\n 'until the first request is received, which can be useful i.e., for preview instances.',\n '',\n 'Currently only supported in single-node mode.',\n ],\n },\n\n serverVersion: {\n type: v.string().optional(),\n desc: [`The version string outputted to logs when the server starts up.`],\n },\n\n enableTelemetry: {\n type: v.boolean().default(true),\n desc: [\n `Set to false to opt out of telemetry collection.`,\n ``,\n `This helps us improve Zero by collecting anonymous usage data.`,\n `Setting the DO_NOT_TRACK environment variable also disables telemetry.`,\n ],\n },\n\n cloudEvent: {\n sinkEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a URI to a CloudEvents sink. When set, ZeroEvents`,\n `will be published to the sink as the {bold data} field of CloudEvents.`,\n `The {bold source} field of the CloudEvents will be set to the {bold ZERO_TASK_ID},`,\n `along with any extension attributes specified by the {bold ZERO_CLOUD_EVENT_EXTENSION_OVERRIDES_ENV}.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_SINK binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n `However, any CloudEvents sink can be used.`,\n ],\n },\n\n extensionOverridesEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a JSON stringified object with an {bold extensions} field`,\n `containing attributes that should be added or overridden on outbound CloudEvents.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_CE_OVERRIDES binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n ],\n },\n },\n};\n\nexport type ZeroConfig = Config<typeof zeroOptions>;\n\nlet loadedConfig: Config<typeof zeroOptions> | undefined;\n\nexport function getZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): ZeroConfig {\n if (!loadedConfig || singleProcessMode()) {\n loadedConfig = parseOptions(zeroOptions, {\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n emitDeprecationWarnings: false, // overridden at the top level parse\n ...opts,\n });\n\n if (loadedConfig.queryHydrationStats) {\n runtimeDebugFlags.trackRowCountsVended = true;\n }\n }\n return loadedConfig;\n}\n\n/**\n * Same as {@link getZeroConfig}, with an additional check that the\n * config has already been normalized (i.e. by the top level server/runner).\n */\nexport function getNormalizedZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): NormalizedZeroConfig {\n const config = getZeroConfig(opts);\n assertNormalized(config);\n return config;\n}\n\n/**\n * Gets the server version from the config if provided. Otherwise it gets it\n * from the Zero package.json.\n */\nexport function getServerVersion(\n config: Pick<ZeroConfig, 'serverVersion'> | undefined,\n): string {\n return config?.serverVersion ?? packageJson.version;\n}\n\nexport function isAdminPasswordValid(\n lc: LogContext,\n config: Pick<NormalizedZeroConfig, 'adminPassword'>,\n password: string | undefined,\n) {\n // If development mode, password is optional\n // We use process.env.NODE_ENV === 'development' as a sign that we're in\n // development mode, rather than a custom env var like ZERO_DEVELOPMENT_MODE,\n // because NODE_ENV is more standard and is already used by many tools.\n // Note that if NODE_ENV is not set, we assume production mode.\n\n if (!password && !config.adminPassword && isDevelopmentMode()) {\n warnOnce(\n lc,\n 'No admin password set; allowing access in development mode only',\n );\n return true;\n }\n\n if (!config.adminPassword) {\n lc.warn?.('No admin password set; denying access');\n return false;\n }\n\n // Use constant-time comparison to prevent timing attacks\n const passwordBuffer = Buffer.from(password ?? '');\n const configBuffer = Buffer.from(config.adminPassword);\n\n // Handle length mismatch in constant time\n if (passwordBuffer.length !== configBuffer.length) {\n // Perform dummy comparison to maintain constant timing\n timingSafeEqual(configBuffer, configBuffer);\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n if (!timingSafeEqual(passwordBuffer, configBuffer)) {\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n lc.debug?.('Admin password accepted');\n return true;\n}\n\nlet hasWarned = false;\n\nfunction warnOnce(lc: LogContext, msg: string) {\n if (!hasWarned) {\n lc.warn?.(msg);\n hasWarned = true;\n }\n}\n\n// For testing purposes - reset the warning state\nexport function resetWarnOnceState() {\n hasWarned = false;\n}\n"],"mappings":";;;;;;;;;;;;;;AA8BA,IAAa,sBAAsB;AAEnC,IAAa,aAAa;CACxB,IAAI;EACF,MAAM,eACH,QAAQ,CACR,QAAQ,OAAO,CACf,QAAO,OAAM,0BAA0B,KAAK,GAAG,EAAE,uBAAuB;EAC3E,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,cAAc;EACZ,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,eAAe,EAAE,CAAC;EAC5C,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CACF;AAED,IAAa,eAAe;CAC1B,IAAI;EACF,MAAM,eACH,QAAQ,CACR,aAAa;AACZ,SAAM,IAAI,MACR,wEAED;IACD,CACD,UAAU;EACb,QAAQ;EACT;CAED,KAAK;EACH,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACD,QAAQ;EACT;CACF;AAED,IAAM,iBAAiB;CACrB,MAAM;EACJ,MAAM,eAAE,QAAQ,CAAC,QAAQ,UAAU;EACnC,MAAM;GACJ;GACA;GACA;GACD;EACF;CAED,qBAAqB;EACnB,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CACF;AAID,IAAM,uBAAuB;CAC3B,KAAK;EACH,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,wEACA,0CACD;EACF;CACD,UAAU;EACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAO;EAChC,MAAM,CACJ,gFACD;EACF;CACF;AAID,IAAM,cAAc;CAClB,KAAK;EACH,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,uHACD;EACD,YAAY,CACV,sGACD;EACF;CACD,SAAS;EACP,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,yHACD;EACD,YAAY,CACV,sGACD;EACF;CACD,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,4GACD;EACD,YAAY,CACV,sGACD;EACF;CACD,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,0DACA,sEACD;EACD,YAAY,CACV,sGACD;EACF;CACD,UAAU;EACR,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,4DACA,wEACD;EACD,YAAY,CACV,sGACD;EACF;CACD,2BAA2B;EACzB,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,gGACA,oDACD;EACF;CACD,4BAA4B;EAC1B,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,+FACA,qDACD;EACF;CACF;AAED,IAAM,0BAA0B,SAC9B,aAAa,UAAU,qBAAqB,KAAK,CAAC,gBAAgB,KAAK;AAEzE,IAAM,2BACJ,aACA,YACI;CACJ,KAAK;EACH,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,UAAU;EACpC,MAAM;GACJ,sDAAsD,OAAO;GAC7D;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACD,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,YAAY,MAAM,CAAC,EAAC,GAC5D,EAAE;EACP;CACD,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,0FACD;EACD,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,YAAY,UAAU,CAAC,EAAC,GAChE,EAAE;EACP;CACD,gBAAgB;EACd,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;EAChC,MAAM;GACJ;GACA;GACA;GACD;EACD,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,YAAY,kBAAkB,CAAC,EAAC,GACxE,EAAE;EACP;CACD,sBAAsB;EACpB,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,UAAU;EACpC,MAAM;GACJ;GACA,oEAAoE,WAAW,mBAAmB,SAAS,QAAQ;GACnH;GACA;GACA,iBAAiB,cAAc,YAAY,aAAa,GAAG,WAAW,mBAAmB,WAAW,QAAQ;GAC7G;EACD,GAAI,cACA,EACE,YAAY,CACV,uBAAuB,GAAG,YAAY,yBAAyB,CAChE,EACF,GACD,EAAE;EACP;CACF;AAED,IAAM,gBAAgB,wBAAwB,KAAA,GAAW,iBAAiB;AAC1E,IAAM,cAAc,wBAAwB,UAAU,iBAAiB;AACvE,IAAM,eAAe,wBAAwB,KAAA,GAAW,sBAAsB;AAC9E,IAAM,oBAAoB,wBACxB,SACA,sBACD;AAaD,IAAa,cAAc;CACzB,UAAU;EACR,IAAI;GACF,MAAM,eAAE,QAAQ;GAChB,MAAM,CACJ,mDACA,oEACD;GACF;EAED,MAAM;GACJ,MAAM,aAAe,MAAM,SAAS,CAAC,QAAQ,KAAK;GAClD,MAAM;IACJ;IACA;IACA;IACA;IACD;GACD,QAAQ;GACT;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,mBAAmB;GACjB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,QAAQ;GACT;EAED,2BAA2B;GACzB,MAAM,eAAE,SAAS,CAAC,UAAU;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CAGD,MAAM;CACN,QAAQ;CAER,YAAY;CACZ,OAAO;CAEP,qBAAqB;EACnB,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACD;EACF;CAED,KAAK;EACH,IAAI;GACF,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,mBAAmB;GACjB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,QAAQ;GACT;EAED,2CAA2C;GACzC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,yCAAyC;GACvC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,mCAAmC;GACjC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CAED,qBAAqB;EACnB,MAAM,eAAE,SAAS,CAAC,UAAU;EAC5B,MAAM;GACJ;GACA;GACA;GACD;EACF;CAED,oBAAoB;EAClB,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,kBAAkB;EAChB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;EAC5B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,QAAQ;EACN,IAAI;GACF,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,oBAAoB;GAClB,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAO;GAChC,MAAM;IACJ;IACA;IACA;IACA;IACD;GACD,QAAQ;GACT;EACF;CAED,SAAS;CAET,KAAK;CAEL,KAAK;CAEL,OAAO;CAEP,MAAM;CAEN,MAAM;EACJ,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAK;EAC9B,MAAM,CAAC,iCAAiC;EACzC;CAED,gBAAgB;EACd,KAAK;GACH,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACD;GACF;EAED,MAAM;GACJ,MAAM,aAAe,aAAa,WAAW,CAAC,QAAQ,YAAY;GAClE,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,MAAM;GACJ,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAGD,SAAS;GACP,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,YAAY,CACV,mEACD;GACD,QAAQ;GACT;EAGD,UAAU;GACR,MAAM,aAAe,MAAM,MAAM,CAAC,QAAQ,KAAK;GAC/C,YAAY,CACV,mEACD;GACD,QAAQ;GACT;EAED,+BAA+B;GAC7B,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,QAAQ,CAAC,GAAG,2BAA2B,CAAC;GAClE,MAAM;IACJ;IACA;IACA;IACA;IACD;GAID,QAAQ;GACT;EAED,gBAAgB;GACd,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAM;GAC/B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,iCAAiC;GAC/B,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAK;GAC9B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,oCAAoC;GAClC,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CAED,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACD;EACF;CAED;CAEA,gBAAgB;EACd,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,WAAW;EACT,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,gBAAgB,EACd,kBAAkB;EAChB,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAM;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF,EACF;CAED,eAAe;EACb,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,sBAAsB;EACpB,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;EAChC,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,6BAA6B;EAC3B,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,0BAA0B;EACxB,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAK,OAAO,KAAK;EAC1C,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,YAAY;EACV,YAAY;GACV,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,cAAc;GACZ,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,gBAAgB;GACd,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;GAChC,MAAM,CACJ,mFACD;GACF;EAED,YAAY;GACV,MAAM,eAAE,QAAQ,CAAC,QAAQ,uCAAuC;GAChE,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,UAAU,EACR,MAAM,aAAe,SAAS,QAAQ,QAAQ,QAAQ,CAAC,QAAQ,OAAO,EACvE;EAED,WAAW;GACT,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM,CACJ,wGACA,sFACD;GACF;EAED,MAAM;GACJ,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACD;GACF;EAED,uBAAuB;GACrB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,wBAAwB;GACtB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,wBAAwB;GACtB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,kCAAkC;GAChC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACD;GACF;EAED,6BAA6B;GAC3B,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,oBAAoB;GAClB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM,CACJ,uEACA,kDACD;GACF;EAED,sBAAsB;GACpB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM,CACJ,0EACA,+EACD;GACF;EAED,eAAe;GACb,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAK,OAAO,KAAK;GAC1C,MAAM;IACJ;IACA;IACA;IACD;GACF;EACF;CAED,iBAAiB;EACf,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,yEACD;EACF;CAED,aAAa;EACX,kBAAkB;GAChB,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,aAAa;GACX,MAAM,eAAE,SAAS,CAAC,UAAU;GAC5B,QAAQ;GACR,MAAM,CACJ,qFACA,gDACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;GAChC,MAAM;IACJ;IACA;IACA;IACD;GACF;EACF;CAGD,sBAAsB;EACpB,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAO;EAChC,YAAY;GACV;GACA;GACA;GACD;EACD,QAAQ;EACT;CAED,aAAa;EACX,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;EAChC,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,eAAe;EACb,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CAAC,kEAAkE;EAC1E;CAED,iBAAiB;EACf,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,YAAY;EACV,SAAS;GACP,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,uBAAuB;GACrB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CACF;AAID,IAAI;AAEJ,SAAgB,cACd,OAA4C,EAAE,EAClC;AACZ,KAAI,CAAC,gBAAgB,mBAAmB,EAAE;AACxC,iBAAe,aAAa,aAAa;GACvC,eAAe;GACf,yBAAyB;GACzB,GAAG;GACJ,CAAC;AAEF,MAAI,aAAa,oBACf,mBAAkB,uBAAuB;;AAG7C,QAAO;;;;;;AAOT,SAAgB,wBACd,OAA4C,EAAE,EACxB;CACtB,MAAM,SAAS,cAAc,KAAK;AAClC,kBAAiB,OAAO;AACxB,QAAO;;;;;;AAOT,SAAgB,iBACd,QACQ;AACR,QAAO,QAAQ,iBAAiB,gBAAY;;AAG9C,SAAgB,qBACd,IACA,QACA,UACA;AAOA,KAAI,CAAC,YAAY,CAAC,OAAO,iBAAiB,mBAAmB,EAAE;AAC7D,WACE,IACA,kEACD;AACD,SAAO;;AAGT,KAAI,CAAC,OAAO,eAAe;AACzB,KAAG,OAAO,wCAAwC;AAClD,SAAO;;CAIT,MAAM,iBAAiB,OAAO,KAAK,YAAY,GAAG;CAClD,MAAM,eAAe,OAAO,KAAK,OAAO,cAAc;AAGtD,KAAI,eAAe,WAAW,aAAa,QAAQ;AAEjD,kBAAgB,cAAc,aAAa;AAC3C,KAAG,OAAO,yBAAyB;AACnC,SAAO;;AAGT,KAAI,CAAC,gBAAgB,gBAAgB,aAAa,EAAE;AAClD,KAAG,OAAO,yBAAyB;AACnC,SAAO;;AAGT,IAAG,QAAQ,0BAA0B;AACrC,QAAO;;AAGT,IAAI,YAAY;AAEhB,SAAS,SAAS,IAAgB,KAAa;AAC7C,KAAI,CAAC,WAAW;AACd,KAAG,OAAO,IAAI;AACd,cAAY"}
|
|
1
|
+
{"version":3,"file":"zero-config.js","names":[],"sources":["../../../../../zero-cache/src/config/zero-config.ts"],"sourcesContent":["/**\n * These types represent the _compiled_ config whereas `define-config` types represent the _source_ config.\n */\n\nimport {timingSafeEqual} from 'node:crypto';\nimport type {LogContext} from '@rocicorp/logger';\nimport {logOptions} from '../../../otel/src/log-options.ts';\nimport {\n flagToEnv,\n parseOptions,\n type Config,\n type ParseOptions,\n} from '../../../shared/src/options.ts';\nimport * as v from '../../../shared/src/valita.ts';\n// @circular-dep-ignore - importing package.json for version info only\nimport packageJson from '../../../zero/package.json' with {type: 'json'};\nimport {runtimeDebugFlags} from '../../../zql/src/builder/debug-delegate.ts';\nimport {singleProcessMode} from '../types/processes.ts';\nimport {\n ALLOWED_APP_ID_CHARACTERS,\n INVALID_APP_ID_MESSAGE,\n} from '../types/shards.ts';\nimport {DEFAULT_PREFERRED_PREFIXES} from './network.ts';\nimport {\n assertNormalized,\n isDevelopmentMode,\n type NormalizedZeroConfig,\n} from './normalize.ts';\nexport type {LogConfig} from '../../../otel/src/log-options.ts';\n\nexport const ZERO_ENV_VAR_PREFIX = 'ZERO_';\n\nexport const appOptions = {\n id: {\n type: v\n .string()\n .default('zero')\n .assert(id => ALLOWED_APP_ID_CHARACTERS.test(id), INVALID_APP_ID_MESSAGE),\n desc: [\n 'Unique identifier for the app.',\n '',\n 'Multiple zero-cache apps can run on a single upstream database, each of which',\n 'is isolated from the others, with its own permissions, sharding (future feature),',\n 'and change/cvr databases.',\n '',\n 'The metadata of an app is stored in an upstream schema with the same name,',\n 'e.g. \"zero\", and the metadata for each app shard, e.g. client and mutation',\n 'ids, is stored in the \"\\\\{app-id\\\\}_\\\\{#\\\\}\" schema. (Currently there is only a single',\n '\"0\" shard, but this will change with sharding).',\n '',\n 'The CVR and Change data are managed in schemas named \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cvr\"',\n 'and \"\\\\{app-id\\\\}_\\\\{shard-num\\\\}/cdc\", respectively, allowing multiple apps and shards',\n 'to share the same database instance (e.g. a Postgres \"cluster\") for CVR and Change management.',\n '',\n 'Due to constraints on replication slot names, an App ID may only consist of',\n 'lower-case letters, numbers, and the underscore character.',\n '',\n 'Note that this option is used by both {bold zero-cache} and {bold zero-deploy-permissions}.',\n ],\n },\n\n publications: {\n type: v.array(v.string()).optional(() => []),\n desc: [\n `Postgres {bold PUBLICATION}s that define the tables and columns to`,\n `replicate. Publication names may not begin with an underscore,`,\n `as zero reserves that prefix for internal use.`,\n ``,\n `If unspecified, zero-cache will create and use an internal publication that`,\n `publishes all tables in the {bold public} schema, i.e.:`,\n ``,\n `CREATE PUBLICATION _\\\\{app-id\\\\}_public_0 FOR TABLES IN SCHEMA public;`,\n ``,\n `Note that changing the set of publications will result in resyncing the replica,`,\n `which may involve downtime (replication lag) while the new replica is initializing.`,\n `To change the set of publications without disrupting an existing app, a new app`,\n `should be created.`,\n ],\n },\n};\n\nexport const shardOptions = {\n id: {\n type: v\n .string()\n .assert(() => {\n throw new Error(\n `ZERO_SHARD_ID is no longer an option. Please use ZERO_APP_ID instead.`,\n // TODO: Link to release / migration notes?\n );\n })\n .optional(),\n hidden: true,\n },\n\n num: {\n type: v.number().default(0),\n desc: [\n `The shard number (from 0 to NUM_SHARDS) of the App. zero will eventually`,\n `support data sharding as a first-class primitive; until then, deploying`,\n `multiple shard-nums creates functionally identical shards. Until sharding is`,\n `actually meaningful, this flag is hidden but available for testing.`,\n ],\n hidden: true,\n },\n};\n\nconst replicaOptions = {\n file: {\n type: v.string().default('zero.db'),\n desc: [\n `File path to the SQLite replica that zero-cache maintains.`,\n `This can be lost, but if it is, zero-cache will have to re-replicate next`,\n `time it starts up.`,\n ],\n },\n\n vacuumIntervalHours: {\n type: v.number().optional(),\n desc: [\n `Performs a VACUUM at server startup if the specified number of hours has elapsed`,\n `since the last VACUUM (or initial-sync). The VACUUM operation is heavyweight`,\n `and requires double the size of the db in disk space. If unspecified, VACUUM`,\n `operations are not performed.`,\n ],\n },\n};\n\nexport type ReplicaOptions = Config<typeof replicaOptions>;\n\nconst perUserMutationLimit = {\n max: {\n type: v.number().optional(),\n desc: [\n `The maximum mutations per user within the specified {bold windowMs}.`,\n `If unset, no rate limiting is enforced.`,\n ],\n },\n windowMs: {\n type: v.number().default(60_000),\n desc: [\n `The sliding window over which the {bold perUserMutationLimitMax} is enforced.`,\n ],\n },\n};\n\nexport type RateLimit = Config<typeof perUserMutationLimit>;\n\nconst authOptions = {\n jwk: {\n type: v.string().optional(),\n desc: [\n `A public key in JWK format used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n jwksUrl: {\n type: v.string().optional(),\n desc: [\n `A URL that returns a JWK set used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n secret: {\n type: v.string().optional(),\n desc: [\n `A symmetric key used to verify JWTs. Only one of {bold jwk}, {bold jwksUrl} and {bold secret} may be set.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n issuer: {\n type: v.string().optional(),\n desc: [\n `Expected issuer ({bold iss} claim) for JWT validation.`,\n `If set, tokens with a different or missing issuer will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n audience: {\n type: v.string().optional(),\n desc: [\n `Expected audience ({bold aud} claim) for JWT validation.`,\n `If set, tokens with a different or missing audience will be rejected.`,\n ],\n deprecated: [\n `Use cookie-based authentication or an auth token instead - see https://zero.rocicorp.dev/docs/auth.`,\n ],\n },\n revalidateIntervalSeconds: {\n type: v.number().optional(),\n desc: [\n `The interval in seconds between periodic /query auth revalidation for validated connections.`,\n `If unset, periodic auth revalidation is disabled.`,\n ],\n },\n retransformIntervalSeconds: {\n type: v.number().optional(),\n desc: [\n `The interval in seconds between periodic shared /query retransform work for a client group.`,\n `If unset, periodic shared retransform is disabled.`,\n ],\n },\n};\n\nconst makeDeprecationMessage = (flag: string) =>\n `Use {bold ${flagToEnv(ZERO_ENV_VAR_PREFIX, flag)}} (or {bold --${flag}}) instead.`;\n\nconst makeMutatorQueryOptions = (\n replacement: 'mutate' | 'query' | undefined,\n suffix: string,\n) => ({\n url: {\n type: v.array(v.string()).optional(), // optional until we remove CRUD mutations\n desc: [\n `The URL of the API server to which zero-cache will ${suffix}.`,\n ``,\n `{bold IMPORTANT:} URLs are matched using {bold URLPattern}, a standard Web API.`,\n ``,\n `{bold Pattern Syntax:}`,\n ` URLPattern uses a simple and intuitive syntax similar to Express routes.`,\n ` Wildcards and named parameters make it easy to match multiple URLs.`,\n ``,\n `{bold Basic Examples:}`,\n ` Exact URL match:`,\n ` \"https://api.example.com/mutate\"`,\n ` `,\n ` Any subdomain using wildcard:`,\n ` \"https://*.example.com/mutate\"`,\n ` `,\n ` Multiple subdomain levels:`,\n ` \"https://*.*.example.com/mutate\"`,\n ` `,\n ` Any path under a domain:`,\n ` \"https://api.example.com/*\"`,\n ` `,\n ` Named path parameters:`,\n ` \"https://api.example.com/:version/mutate\"`,\n ` ↳ Matches \"https://api.example.com/v1/mutate\", \"https://api.example.com/v2/mutate\", etc.`,\n ``,\n `{bold Advanced Patterns:}`,\n ` Optional path segments:`,\n ` \"https://api.example.com/:path?\"`,\n ` `,\n ` Regex in segments (for specific patterns):`,\n ` \"https://api.example.com/:version(v\\\\\\\\d+)/mutate\"`,\n ` ↳ Matches only \"v\" followed by digits`,\n ``,\n `{bold Multiple patterns:}`,\n ` [\"https://api1.example.com/mutate\", \"https://api2.example.com/mutate\"]`,\n ``,\n `{bold Note:} Query parameters and URL fragments (#) are automatically ignored during matching.`,\n ``,\n `For full URLPattern syntax, see: https://developer.mozilla.org/en-US/docs/Web/API/URLPattern`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-url`)]}\n : {}),\n },\n apiKey: {\n type: v.string().optional(),\n desc: [\n `An optional secret used to authorize zero-cache to call the API server handling writes.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-api-key`)]}\n : {}),\n },\n forwardCookies: {\n type: v.boolean().default(false),\n desc: [\n `If true, zero-cache will forward cookies from the request.`,\n `This is useful for passing authentication cookies to the API server.`,\n `If false, cookies are not forwarded.`,\n ],\n ...(replacement\n ? {deprecated: [makeDeprecationMessage(`${replacement}-forward-cookies`)]}\n : {}),\n },\n allowedClientHeaders: {\n type: v.array(v.string()).optional(),\n desc: [\n `A list of header names that clients are allowed to set via custom headers.`,\n `If specified, only headers in this list will be forwarded to the ${suffix === 'push mutations' ? 'push' : 'query'} URL.`,\n `Header names are case-insensitive.`,\n `If not specified, no client-provided headers are forwarded (secure by default).`,\n `Example: ZERO_${replacement ? replacement.toUpperCase() : suffix === 'push mutations' ? 'MUTATE' : 'QUERY'}_ALLOWED_CLIENT_HEADERS=x-request-id,x-correlation-id`,\n ],\n ...(replacement\n ? {\n deprecated: [\n makeDeprecationMessage(`${replacement}-allowed-client-headers`),\n ],\n }\n : {}),\n },\n});\n\nconst mutateOptions = makeMutatorQueryOptions(undefined, 'push mutations');\nconst pushOptions = makeMutatorQueryOptions('mutate', 'push mutations');\nconst queryOptions = makeMutatorQueryOptions(undefined, 'send synced queries');\nconst getQueriesOptions = makeMutatorQueryOptions(\n 'query',\n 'send synced queries',\n);\n\nexport type AuthConfig = Config<typeof authOptions>;\n\n/** @deprecated used only by legacy JWT verification helpers */\nexport type LegacyJWTAuthConfig = Pick<\n AuthConfig,\n 'jwk' | 'jwksUrl' | 'secret' | 'issuer' | 'audience'\n>;\n\n// Note: --help will list flags in the order in which they are defined here,\n// so order the fields such that the important (e.g. required) ones are first.\n// (Exported for testing)\nexport const zeroOptions = {\n upstream: {\n db: {\n type: v.string(),\n desc: [\n `The \"upstream\" authoritative postgres database.`,\n `In the future we will support other types of upstream besides PG.`,\n ],\n },\n\n type: {\n type: v.literalUnion('pg', 'custom').default('pg'),\n desc: [\n `The meaning of the {bold upstream-db} depends on the upstream type:`,\n `* {bold pg}: The connection database string, e.g. \"postgres://...\"`,\n `* {bold custom}: The base URI of the change source \"endpoint, e.g.`,\n ` \"https://my-change-source.dev/changes/v0/stream?apiKey=...\"`,\n ],\n hidden: true, // TODO: Unhide when ready to officially support.\n },\n\n maxConns: {\n type: v.number().default(20),\n desc: [\n `The maximum number of connections to open to the upstream database`,\n `for committing mutations. This is divided evenly amongst sync workers.`,\n `In addition to this number, zero-cache uses one connection for the`,\n `replication stream.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n\n pgReplicationSlotFailover: {\n type: v.boolean().optional(),\n desc: [\n `For upstream Postgres versions 17+, creates replication slots with the`,\n `{bold failover} parameter set to {bold true} to enable slot synchronization`,\n `and failover. Note that additional Postgres-level configuration is necessary`,\n `when enabling this option. For details, see:`,\n ``,\n `https://www.postgresql.org/docs/current/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS-SYNCHRONIZATION`,\n ``,\n `(Note that this option has no effect for Postgres versions before 17.)`,\n ],\n },\n },\n\n /** @deprecated */\n push: pushOptions,\n mutate: mutateOptions,\n /** @deprecated */\n getQueries: getQueriesOptions,\n query: queryOptions,\n\n enableCrudMutations: {\n type: v.boolean().default(true),\n desc: [\n `Enables support for legacy CRUD mutations. When this is {bold false}, no connections`,\n `are made from view-syncers to the upstream db, and push messages with CRUD mutations`,\n `result in an InvalidPush response.`,\n ],\n },\n\n cvr: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store CVRs. CVRs (client view records) keep track`,\n `of the data synced to clients in order to determine the diff to send on reconnect.`,\n `If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(30),\n desc: [\n `The maximum number of connections to open to the CVR database.`,\n `This is divided evenly amongst sync workers.`,\n ``,\n `Note that this number must allow for at least one connection per`,\n `sync worker, or zero-cache will fail to start. See {bold num-sync-workers}`,\n ],\n },\n\n maxConnsPerWorker: {\n type: v.number().optional(),\n hidden: true, // Passed from main thread to sync workers\n },\n\n garbageCollectionInactivityThresholdHours: {\n type: v.number().default(48),\n desc: [\n `The duration after which an inactive CVR is eligible for garbage collection.`,\n `Note that garbage collection is an incremental, periodic process which does not`,\n `necessarily purge all eligible CVRs immediately.`,\n ],\n },\n\n garbageCollectionInitialIntervalSeconds: {\n type: v.number().default(60),\n desc: [\n `The initial interval at which to check and garbage collect inactive CVRs.`,\n `This interval is increased exponentially (up to 16 minutes) when there is`,\n `nothing to purge.`,\n ],\n },\n\n garbageCollectionInitialBatchSize: {\n type: v.number().default(25),\n desc: [\n `The initial number of CVRs to purge per garbage collection interval.`,\n `This number is increased linearly if the rate of new CVRs exceeds the rate of`,\n `purged CVRs, in order to reach a steady state.`,\n ``,\n `Setting this to 0 effectively disables CVR garbage collection.`,\n ],\n },\n },\n\n queryHydrationStats: {\n type: v.boolean().optional(),\n desc: [\n `Track and log the number of rows considered by query hydrations which`,\n `take longer than {bold log-slow-hydrate-threshold} milliseconds.`,\n `This is useful for debugging and performance tuning.`,\n ],\n },\n\n enableQueryPlanner: {\n type: v.boolean().default(true),\n desc: [\n `Enable the query planner for optimizing ZQL queries.`,\n ``,\n `The query planner analyzes and optimizes query execution by determining`,\n `the most efficient join strategies.`,\n ``,\n `You can disable the planner if it is picking bad strategies.`,\n ],\n },\n\n yieldThresholdMs: {\n type: v.number().default(10),\n desc: [\n `The maximum amount of time in milliseconds that a sync worker will`,\n `spend in IVM (processing query hydration and advancement) before yielding`,\n `to the event loop. Lower values increase responsiveness and fairness at`,\n `the cost of reduced throughput.`,\n ],\n },\n\n change: {\n db: {\n type: v.string().optional(),\n desc: [\n `The Postgres database used to store recent replication log entries, in order`,\n `to sync multiple view-syncers without requiring multiple replication slots on`,\n `the upstream database. If unspecified, the {bold upstream-db} will be used.`,\n ],\n },\n\n maxConns: {\n type: v.number().default(5),\n desc: [\n `The maximum number of connections to open to the change database.`,\n `This is used by the {bold change-streamer} for catching up`,\n `{bold zero-cache} replication subscriptions.`,\n ],\n },\n\n statementTimeoutMs: {\n type: v.number().default(20_000),\n desc: [\n `Fail change-log transactions if a statement response from postgres is not received within`,\n `the specified timeout. This differs from a postgres {bold statement_timeout} in that`,\n `it is implemented to handle a pathological case in which Postgres does not return a`,\n `response but otherwise believes the transaction to be idle.`,\n ],\n hidden: true, // make visible if proven to be effective/necessary\n },\n },\n\n replica: replicaOptions,\n\n log: logOptions,\n\n app: appOptions,\n\n shard: shardOptions,\n\n auth: authOptions,\n\n port: {\n type: v.number().default(4848),\n desc: [`The port for sync connections.`],\n },\n\n changeStreamer: {\n uri: {\n type: v.string().optional(),\n desc: [\n `When set, connects to the {bold change-streamer} at the given URI.`,\n `In a multi-node setup, this should be specified in {bold view-syncer} options,`,\n `pointing to the {bold replication-manager} URI, which runs a {bold change-streamer}`,\n `on port 4849.`,\n ],\n },\n\n mode: {\n type: v.literalUnion('dedicated', 'discover').default('dedicated'),\n desc: [\n `As an alternative to {bold ZERO_CHANGE_STREAMER_URI}, the {bold ZERO_CHANGE_STREAMER_MODE}`,\n `can be set to \"{bold discover}\" to instruct the {bold view-syncer} to connect to the `,\n `ip address registered by the {bold replication-manager} upon startup.`,\n ``,\n `This may not work in all networking configurations, e.g. certain private `,\n `networking or port forwarding configurations. Using the {bold ZERO_CHANGE_STREAMER_URI}`,\n `with an explicit routable hostname is recommended instead.`,\n ``,\n `Note: This option is ignored if the {bold ZERO_CHANGE_STREAMER_URI} is set.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `The port on which the {bold change-streamer} runs. This is an internal`,\n `protocol between the {bold replication-manager} and {bold view-syncers}, which`,\n `runs in the same process tree in local development or a single-node configuration.`,\n ``,\n `If unspecified, defaults to {bold --port} + 1.`,\n ],\n },\n\n /** @deprecated */\n address: {\n type: v.string().optional(),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n /** @deprecated */\n protocol: {\n type: v.literalUnion('ws', 'wss').default('ws'),\n deprecated: [\n `Set the {bold ZERO_CHANGE_STREAMER_URI} on view-syncers instead.`,\n ],\n hidden: true,\n },\n\n discoveryInterfacePreferences: {\n type: v.array(v.string()).default([...DEFAULT_PREFERRED_PREFIXES]),\n desc: [\n `The name prefixes to prefer when introspecting the network interfaces to determine`,\n `the externally reachable IP address for change-streamer discovery. This defaults`,\n `to commonly used names for standard ethernet interfaces in order to prevent selecting`,\n `special interfaces such as those for VPNs.`,\n ],\n // More confusing than it's worth to advertise this. The default list should be\n // adjusted to make things work for all environments; it is controlled as a\n // hidden flag as an emergency to unblock people with outlier network configs.\n hidden: true,\n },\n\n startupDelayMs: {\n type: v.number().default(15000),\n desc: [\n `The delay to wait before the change-streamer takes over the replication stream`,\n `(i.e. the handoff during replication-manager updates), to allow loadbalancers to register`,\n `the task as healthy based on healthcheck parameters. Note that if a change stream request`,\n `is received during this interval, the delay will be canceled and the takeover will happen`,\n `immediately, since the incoming request indicates that the task is registered as a target.`,\n ],\n },\n\n backPressureLimitHeapProportion: {\n type: v.number().default(0.04),\n desc: [\n `The percentage of {bold --max-old-space-size} to use as a buffer for absorbing replication`,\n `stream spikes. When the estimated amount of queued data exceeds this threshold, back pressure`,\n `is applied to the replication stream, delaying downstream sync as a result.`,\n ``,\n `The threshold was determined empirically with load testing. Higher thresholds have resulted`,\n `in OOMs. Note also that the byte-counting logic in the queue is strictly an underestimate of`,\n `actual memory usage (but importantly, proportionally correct), so the queue is actually`,\n `using more than what this proportion suggests.`,\n ``,\n `This parameter is exported as an emergency knob to reduce the size of the buffer in the`,\n `event that the server OOMs from back pressure. Resist the urge to {italic increase} this`,\n `proportion, as it is mainly useful for absorbing periodic spikes and does not meaningfully`,\n `affect steady-state replication throughput; the latter is determined by other factors such`,\n `as object serialization and PG throughput`,\n ``,\n `In other words, the back pressure limit does not constrain replication throughput;`,\n `rather, it protects the system when the upstream throughput exceeds the downstream`,\n `throughput.`,\n ],\n },\n\n flowControlConsensusPaddingSeconds: {\n type: v.number().default(1),\n desc: [\n `During periodic flow control checks (every 64kb), the amount of time to wait after the`,\n `majority of subscribers have acked, after which replication will continue even if`,\n `some subscribers have yet to ack. (Note that this is not a timeout for the {italic entire} send,`,\n `but a timeout that starts {italic after} the majority of receivers have acked.)`,\n ``,\n `This allows a bounded amount of time for backlogged subscribers to catch up on each flush`,\n `without forcing all subscribers to wait for the entire backlog to be processed. It is also`,\n `useful for mitigating the effect of unresponsive subscribers due to severed websocket`,\n `connections (until liveness checks disconnect them).`,\n ``,\n `Set this to a negative number to disable early flow control releases. (Not recommended, but`,\n `available as an emergency measure.)`,\n ],\n },\n },\n\n taskID: {\n type: v.string().optional(),\n desc: [\n `Globally unique identifier for the zero-cache instance.`,\n ``,\n `Setting this to a platform specific task identifier can be useful for debugging.`,\n `If unspecified, zero-cache will attempt to extract the TaskARN if run from within`,\n `an AWS ECS container, and otherwise use a random string.`,\n ],\n },\n\n perUserMutationLimit,\n\n numSyncWorkers: {\n type: v.number().optional(),\n desc: [\n `The number of processes to use for view syncing.`,\n `Leave this unset to use the maximum available parallelism.`,\n `If set to 0, the server runs without sync workers, which is the`,\n `configuration for running the {bold replication-manager}.`,\n ],\n },\n\n autoReset: {\n type: v.boolean().default(true),\n desc: [\n `Automatically wipe and resync the replica when replication is halted.`,\n `This situation can occur for configurations in which the upstream database`,\n `provider prohibits event trigger creation, preventing the zero-cache from`,\n `being able to correctly replicate schema changes. For such configurations,`,\n `an upstream schema change will instead result in halting replication with an`,\n `error indicating that the replica needs to be reset.`,\n ``,\n `When {bold auto-reset} is enabled, zero-cache will respond to such situations`,\n `by shutting down, and when restarted, resetting the replica and all synced `,\n `clients. This is a heavy-weight operation and can result in user-visible`,\n `slowness or downtime if compute resources are scarce.`,\n ],\n },\n\n replicationLag: {\n reportIntervalMs: {\n type: v.number().default(30000),\n desc: [\n `The minimum interval at which replication lag reports are written upstream and`,\n `reported via the {bold zero.replication.total_lag} opentelemetry metric. Because`,\n `replication lag reports are only issued after the previous one was received, the`,\n `actual interval between reports may be longer when there is a backlog in the`,\n `replication stream. A negative or 0 value disables lag reporting.`,\n ``,\n `This monitoring feature is only support on the postgres upstream type.`,\n ],\n },\n },\n\n adminPassword: {\n type: v.string().optional(),\n desc: [\n `A password used to administer zero-cache server, for example to access the`,\n `/statz endpoint.`,\n '',\n 'A password is optional in development mode but {bold required in production} mode.',\n ],\n },\n\n websocketCompression: {\n type: v.boolean().default(false),\n desc: [\n 'Enable WebSocket per-message deflate compression.',\n '',\n 'Compression can reduce bandwidth usage for sync traffic but',\n 'increases CPU usage on both client and server. Disabled by default.',\n '',\n 'See: https://github.com/websockets/ws#websocket-compression',\n ],\n },\n\n websocketCompressionOptions: {\n type: v.string().optional(),\n desc: [\n 'JSON string containing WebSocket compression options.',\n '',\n 'Only used if websocketCompression is enabled.',\n '',\n 'Example: \\\\{\"zlibDeflateOptions\":\\\\{\"level\":3\\\\},\"threshold\":1024\\\\}',\n '',\n 'See https://github.com/websockets/ws/blob/master/doc/ws.md#new-websocketserveroptions-callback for available options.',\n ],\n },\n\n websocketMaxPayloadBytes: {\n type: v.number().default(10 * 1024 * 1024),\n desc: [\n 'Maximum size of incoming WebSocket messages in bytes.',\n '',\n 'Messages exceeding this limit are rejected before parsing.',\n 'Default: 10MB (10 * 1024 * 1024 = 10485760)',\n ],\n },\n\n litestream: {\n executable: {\n type: v.string().optional(),\n desc: [\n `Path to the {bold litestream} executable. This must be built from the`,\n `{bold rocicorp/litestream} fork. Support for the official binary at v0.5.x`,\n `is planned.`,\n ],\n },\n\n executableV5: {\n type: v.string().optional(),\n desc: [\n `The v0.5.x litestream executable which is used for restoring the backup`,\n `backup when {bold ZERO_LITESTREAM_RESTORE_USING_V5} is specified.`,\n `litestream v0.5.8+ can restore from both v0.3.x and v0.5.x backup formats,`,\n `affording forwards compatibility with a future zero-cache`,\n `version that will use litestream v0.5.x to backup the replica.`,\n ],\n },\n\n restoreUsingV5: {\n type: v.boolean().default(false),\n desc: [\n `Restores the backup using the {bold ZERO_LITESTREAM_EXECUTABLE_V5} if specified.`,\n ],\n },\n\n configPath: {\n type: v.string().default('./src/services/litestream/config.yml'),\n desc: [\n `Path to the litestream yaml config file. zero-cache will run this with its`,\n `environment variables, which can be referenced in the file via $\\\\{ENV\\\\}`,\n `substitution, for example:`,\n `* {bold ZERO_REPLICA_FILE} for the db path`,\n `* {bold ZERO_LITESTREAM_BACKUP_LOCATION} for the db replica url`,\n `* {bold ZERO_LITESTREAM_LOG_LEVEL} for the log level`,\n `* {bold ZERO_LOG_FORMAT} for the log type`,\n ],\n },\n\n logLevel: {\n type: v.literalUnion('debug', 'info', 'warn', 'error').default('warn'),\n },\n\n backupURL: {\n type: v.string().optional(),\n desc: [\n `The location of the litestream backup, usually an {bold s3://} URL.`,\n `This is only consulted by the {bold replication-manager}.`,\n `{bold view-syncers} receive this information from the {bold replication-manager}.`,\n ],\n },\n\n endpoint: {\n type: v.string().optional(),\n desc: [\n `The S3-compatible endpoint URL to use for the litestream backup. Only required for non-AWS services.`,\n `The {bold replication-manager} and {bold view-syncers} must have the same endpoint.`,\n ],\n },\n\n port: {\n type: v.number().optional(),\n desc: [\n `Port on which litestream exports metrics, used to determine the replication`,\n `watermark up to which it is safe to purge change log records.`,\n ``,\n `If unspecified, defaults to {bold --port} + 2.`,\n ],\n },\n\n checkpointThresholdMB: {\n type: v.number().default(40),\n desc: [\n `The size of the WAL file at which to perform an SQlite checkpoint to apply`,\n `the writes in the WAL to the main database file. Each checkpoint creates`,\n `a new WAL segment file that will be backed up by litestream. Smaller thresholds`,\n `may improve read performance, at the expense of creating more files to download`,\n `when restoring the replica from the backup.`,\n ],\n },\n\n minCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite attempts a PASSIVE checkpoint, which`,\n `transfers pages to the main database file without blocking writers.`,\n `Defaults to {bold checkpointThresholdMB * 250} (since SQLite page size is 4KB).`,\n ],\n },\n\n maxCheckpointPageCount: {\n type: v.number().optional(),\n desc: [\n `The WAL page count at which SQLite performs a RESTART checkpoint, which`,\n `blocks writers until complete. Defaults to {bold minCheckpointPageCount * 10}.`,\n `Set to {bold 0} to disable RESTART checkpoints entirely.`,\n ],\n },\n\n incrementalBackupIntervalMinutes: {\n type: v.number().default(15),\n desc: [\n `The interval between incremental backups of the replica. Shorter intervals`,\n `reduce the amount of change history that needs to be replayed when catching`,\n `up a new view-syncer, at the expense of increasing the number of files needed`,\n `to download for the initial litestream restore.`,\n ],\n },\n\n snapshotBackupIntervalHours: {\n type: v.number().default(12),\n desc: [\n `The interval between snapshot backups of the replica. Snapshot backups`,\n `make a full copy of the database to a new litestream generation. This`,\n `improves restore time at the expense of bandwidth. Applications with a`,\n `large database and low write rate can increase this interval to reduce`,\n `network usage for backups (litestream defaults to 24 hours).`,\n ],\n },\n\n restoreParallelism: {\n type: v.number().default(48),\n desc: [\n `The number of WAL files to download in parallel when performing the`,\n `initial restore of the replica from the backup.`,\n ],\n },\n\n multipartConcurrency: {\n type: v.number().default(48),\n desc: [\n `The number of parts (of size {bold --litestream-multipart-size} bytes)`,\n `to upload or download in parallel when backing up or restoring the snapshot.`,\n ],\n },\n\n multipartSize: {\n type: v.number().default(16 * 1024 * 1024),\n desc: [\n `The size of each part when uploading or downloading the snapshot with`,\n `{bold --multipart-concurrency}. Note that up to {bold concurrency * size}`,\n `bytes of memory are used when backing up or restoring the snapshot.`,\n ],\n },\n },\n\n storageDBTmpDir: {\n type: v.string().optional(),\n desc: [\n `tmp directory for IVM operator storage. Leave unset to use os.tmpdir()`,\n ],\n },\n\n initialSync: {\n tableCopyWorkers: {\n type: v.number().default(5),\n desc: [\n `The number of parallel workers used to copy tables during initial sync.`,\n `Each worker uses a database connection and will buffer up to (approximately)`,\n `10 MB of table data in memory during initial sync. Increasing the number of`,\n `workers may improve initial sync speed; however, note that local disk throughput`,\n `(i.e. IOPS), upstream CPU, and network bandwidth may also be bottlenecks.`,\n ],\n },\n\n profileCopy: {\n type: v.boolean().optional(),\n hidden: true,\n desc: [\n `Takes a cpu profile during the copy phase initial-sync, storing it as a JSON file`,\n `initial-copy.cpuprofile in the tmp directory.`,\n ],\n },\n\n textCopy: {\n type: v.boolean().default(false),\n desc: [\n `Use text-format COPY instead of binary COPY for initial sync and`,\n `backfill streaming. This is slower but can work around issues with`,\n `binary encoding of certain data types.`,\n ],\n },\n },\n\n shadowSync: {\n enabled: {\n type: v.boolean().default(false),\n desc: [\n `Periodically exercises the initial-sync code path against a sample of`,\n `rows from every published table, writing to a throwaway SQLite database.`,\n `This acts as a canary: if the real initial-sync path ever breaks (schema`,\n `drift, PG version quirks, etc.), the shadow run fails before a customer`,\n `actually needs a full reset.`,\n ],\n },\n\n intervalHours: {\n type: v.number().default(24),\n desc: [\n `The interval between shadow initial-sync runs, in hours. The first run`,\n `is additionally staggered by a random fraction of this interval so that`,\n `a fleet restart does not cause all tasks to canary simultaneously.`,\n ],\n },\n\n sampleRate: {\n type: v.number().default(0.1),\n desc: [\n `The BERNOULLI sampling rate for each table (0 < rate <= 1). A value of`,\n `1 disables sampling and copies all rows (still subject to`,\n `{bold max-rows-per-table}).`,\n ],\n },\n\n maxRowsPerTable: {\n type: v.number().default(10000),\n desc: [\n `The hard upper bound on rows copied per table per shadow run. Guards`,\n `against unexpectedly large tables consuming disk / upstream bandwidth.`,\n ],\n },\n },\n\n /** @deprecated */\n targetClientRowCount: {\n type: v.number().default(20_000),\n deprecated: [\n 'This option is no longer used and will be removed in a future version.',\n 'The client-side cache no longer enforces a row limit. Instead, TTL-based expiration',\n 'automatically manages cache size to prevent unbounded growth.',\n ],\n hidden: true,\n },\n\n lazyStartup: {\n type: v.boolean().default(false),\n desc: [\n 'Delay starting the majority of zero-cache until first request.',\n '',\n 'This is mainly intended to avoid connecting to Postgres replication stream',\n 'until the first request is received, which can be useful i.e., for preview instances.',\n '',\n 'Currently only supported in single-node mode.',\n ],\n },\n\n serverVersion: {\n type: v.string().optional(),\n desc: [`The version string outputted to logs when the server starts up.`],\n },\n\n enableTelemetry: {\n type: v.boolean().default(true),\n desc: [\n `Set to false to opt out of telemetry collection.`,\n ``,\n `This helps us improve Zero by collecting anonymous usage data.`,\n `Setting the DO_NOT_TRACK environment variable also disables telemetry.`,\n ],\n },\n\n cloudEvent: {\n sinkEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a URI to a CloudEvents sink. When set, ZeroEvents`,\n `will be published to the sink as the {bold data} field of CloudEvents.`,\n `The {bold source} field of the CloudEvents will be set to the {bold ZERO_TASK_ID},`,\n `along with any extension attributes specified by the {bold ZERO_CLOUD_EVENT_EXTENSION_OVERRIDES_ENV}.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_SINK binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n `However, any CloudEvents sink can be used.`,\n ],\n },\n\n extensionOverridesEnv: {\n type: v.string().optional(),\n desc: [\n `ENV variable containing a JSON stringified object with an {bold extensions} field`,\n `containing attributes that should be added or overridden on outbound CloudEvents.`,\n ``,\n `This configuration is modeled to easily integrate with a knative K_CE_OVERRIDES binding,`,\n `(i.e. https://github.com/knative/eventing/blob/main/docs/spec/sources.md#sinkbinding).`,\n ],\n },\n },\n};\n\nexport type ZeroConfig = Config<typeof zeroOptions>;\n\nlet loadedConfig: Config<typeof zeroOptions> | undefined;\n\nexport function getZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): ZeroConfig {\n if (!loadedConfig || singleProcessMode()) {\n loadedConfig = parseOptions(zeroOptions, {\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n emitDeprecationWarnings: false, // overridden at the top level parse\n ...opts,\n });\n\n if (loadedConfig.queryHydrationStats) {\n runtimeDebugFlags.trackRowCountsVended = true;\n }\n }\n return loadedConfig;\n}\n\n/**\n * Same as {@link getZeroConfig}, with an additional check that the\n * config has already been normalized (i.e. by the top level server/runner).\n */\nexport function getNormalizedZeroConfig(\n opts: Omit<ParseOptions, 'envNamePrefix'> = {},\n): NormalizedZeroConfig {\n const config = getZeroConfig(opts);\n assertNormalized(config);\n return config;\n}\n\n/**\n * Gets the server version from the config if provided. Otherwise it gets it\n * from the Zero package.json.\n */\nexport function getServerVersion(\n config: Pick<ZeroConfig, 'serverVersion'> | undefined,\n): string {\n return config?.serverVersion ?? packageJson.version;\n}\n\nexport function isAdminPasswordValid(\n lc: LogContext,\n config: Pick<NormalizedZeroConfig, 'adminPassword'>,\n password: string | undefined,\n) {\n // If development mode, password is optional\n // We use process.env.NODE_ENV === 'development' as a sign that we're in\n // development mode, rather than a custom env var like ZERO_DEVELOPMENT_MODE,\n // because NODE_ENV is more standard and is already used by many tools.\n // Note that if NODE_ENV is not set, we assume production mode.\n\n if (!password && !config.adminPassword && isDevelopmentMode()) {\n warnOnce(\n lc,\n 'No admin password set; allowing access in development mode only',\n );\n return true;\n }\n\n if (!config.adminPassword) {\n lc.warn?.('No admin password set; denying access');\n return false;\n }\n\n // Use constant-time comparison to prevent timing attacks\n const passwordBuffer = Buffer.from(password ?? '');\n const configBuffer = Buffer.from(config.adminPassword);\n\n // Handle length mismatch in constant time\n if (passwordBuffer.length !== configBuffer.length) {\n // Perform dummy comparison to maintain constant timing\n timingSafeEqual(configBuffer, configBuffer);\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n if (!timingSafeEqual(passwordBuffer, configBuffer)) {\n lc.warn?.('Invalid admin password');\n return false;\n }\n\n lc.debug?.('Admin password accepted');\n return true;\n}\n\nlet hasWarned = false;\n\nfunction warnOnce(lc: LogContext, msg: string) {\n if (!hasWarned) {\n lc.warn?.(msg);\n hasWarned = true;\n }\n}\n\n// For testing purposes - reset the warning state\nexport function resetWarnOnceState() {\n hasWarned = false;\n}\n"],"mappings":";;;;;;;;;;;;;;AA8BA,IAAa,sBAAsB;AAEnC,IAAa,aAAa;CACxB,IAAI;EACF,MAAM,eACH,QAAQ,CACR,QAAQ,OAAO,CACf,QAAO,OAAM,0BAA0B,KAAK,GAAG,EAAE,uBAAuB;EAC3E,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,cAAc;EACZ,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,eAAe,EAAE,CAAC;EAC5C,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CACF;AAED,IAAa,eAAe;CAC1B,IAAI;EACF,MAAM,eACH,QAAQ,CACR,aAAa;AACZ,SAAM,IAAI,MACR,wEAED;IACD,CACD,UAAU;EACb,QAAQ;EACT;CAED,KAAK;EACH,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACD,QAAQ;EACT;CACF;AAED,IAAM,iBAAiB;CACrB,MAAM;EACJ,MAAM,eAAE,QAAQ,CAAC,QAAQ,UAAU;EACnC,MAAM;GACJ;GACA;GACA;GACD;EACF;CAED,qBAAqB;EACnB,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CACF;AAID,IAAM,uBAAuB;CAC3B,KAAK;EACH,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,wEACA,0CACD;EACF;CACD,UAAU;EACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAO;EAChC,MAAM,CACJ,gFACD;EACF;CACF;AAID,IAAM,cAAc;CAClB,KAAK;EACH,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,uHACD;EACD,YAAY,CACV,sGACD;EACF;CACD,SAAS;EACP,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,yHACD;EACD,YAAY,CACV,sGACD;EACF;CACD,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,4GACD;EACD,YAAY,CACV,sGACD;EACF;CACD,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,0DACA,sEACD;EACD,YAAY,CACV,sGACD;EACF;CACD,UAAU;EACR,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,4DACA,wEACD;EACD,YAAY,CACV,sGACD;EACF;CACD,2BAA2B;EACzB,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,gGACA,oDACD;EACF;CACD,4BAA4B;EAC1B,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,+FACA,qDACD;EACF;CACF;AAED,IAAM,0BAA0B,SAC9B,aAAa,UAAU,qBAAqB,KAAK,CAAC,gBAAgB,KAAK;AAEzE,IAAM,2BACJ,aACA,YACI;CACJ,KAAK;EACH,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,UAAU;EACpC,MAAM;GACJ,sDAAsD,OAAO;GAC7D;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACD,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,YAAY,MAAM,CAAC,EAAC,GAC5D,EAAE;EACP;CACD,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,0FACD;EACD,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,YAAY,UAAU,CAAC,EAAC,GAChE,EAAE;EACP;CACD,gBAAgB;EACd,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;EAChC,MAAM;GACJ;GACA;GACA;GACD;EACD,GAAI,cACA,EAAC,YAAY,CAAC,uBAAuB,GAAG,YAAY,kBAAkB,CAAC,EAAC,GACxE,EAAE;EACP;CACD,sBAAsB;EACpB,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,UAAU;EACpC,MAAM;GACJ;GACA,oEAAoE,WAAW,mBAAmB,SAAS,QAAQ;GACnH;GACA;GACA,iBAAiB,cAAc,YAAY,aAAa,GAAG,WAAW,mBAAmB,WAAW,QAAQ;GAC7G;EACD,GAAI,cACA,EACE,YAAY,CACV,uBAAuB,GAAG,YAAY,yBAAyB,CAChE,EACF,GACD,EAAE;EACP;CACF;AAED,IAAM,gBAAgB,wBAAwB,KAAA,GAAW,iBAAiB;AAC1E,IAAM,cAAc,wBAAwB,UAAU,iBAAiB;AACvE,IAAM,eAAe,wBAAwB,KAAA,GAAW,sBAAsB;AAC9E,IAAM,oBAAoB,wBACxB,SACA,sBACD;AAaD,IAAa,cAAc;CACzB,UAAU;EACR,IAAI;GACF,MAAM,eAAE,QAAQ;GAChB,MAAM,CACJ,mDACA,oEACD;GACF;EAED,MAAM;GACJ,MAAM,aAAe,MAAM,SAAS,CAAC,QAAQ,KAAK;GAClD,MAAM;IACJ;IACA;IACA;IACA;IACD;GACD,QAAQ;GACT;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,mBAAmB;GACjB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,QAAQ;GACT;EAED,2BAA2B;GACzB,MAAM,eAAE,SAAS,CAAC,UAAU;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CAGD,MAAM;CACN,QAAQ;CAER,YAAY;CACZ,OAAO;CAEP,qBAAqB;EACnB,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACD;EACF;CAED,KAAK;EACH,IAAI;GACF,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,mBAAmB;GACjB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,QAAQ;GACT;EAED,2CAA2C;GACzC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,yCAAyC;GACvC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,mCAAmC;GACjC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CAED,qBAAqB;EACnB,MAAM,eAAE,SAAS,CAAC,UAAU;EAC5B,MAAM;GACJ;GACA;GACA;GACD;EACF;CAED,oBAAoB;EAClB,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,kBAAkB;EAChB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;EAC5B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,QAAQ;EACN,IAAI;GACF,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,oBAAoB;GAClB,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAO;GAChC,MAAM;IACJ;IACA;IACA;IACA;IACD;GACD,QAAQ;GACT;EACF;CAED,SAAS;CAET,KAAK;CAEL,KAAK;CAEL,OAAO;CAEP,MAAM;CAEN,MAAM;EACJ,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAK;EAC9B,MAAM,CAAC,iCAAiC;EACzC;CAED,gBAAgB;EACd,KAAK;GACH,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACD;GACF;EAED,MAAM;GACJ,MAAM,aAAe,aAAa,WAAW,CAAC,QAAQ,YAAY;GAClE,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,MAAM;GACJ,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAGD,SAAS;GACP,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,YAAY,CACV,mEACD;GACD,QAAQ;GACT;EAGD,UAAU;GACR,MAAM,aAAe,MAAM,MAAM,CAAC,QAAQ,KAAK;GAC/C,YAAY,CACV,mEACD;GACD,QAAQ;GACT;EAED,+BAA+B;GAC7B,MAAM,eAAE,MAAM,eAAE,QAAQ,CAAC,CAAC,QAAQ,CAAC,GAAG,2BAA2B,CAAC;GAClE,MAAM;IACJ;IACA;IACA;IACA;IACD;GAID,QAAQ;GACT;EAED,gBAAgB;GACd,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAM;GAC/B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,iCAAiC;GAC/B,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAK;GAC9B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,oCAAoC;GAClC,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CAED,QAAQ;EACN,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACD;EACF;CAED;CAEA,gBAAgB;EACd,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,WAAW;EACT,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,gBAAgB,EACd,kBAAkB;EAChB,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAM;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF,EACF;CAED,eAAe;EACb,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,sBAAsB;EACpB,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;EAChC,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,6BAA6B;EAC3B,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,0BAA0B;EACxB,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAK,OAAO,KAAK;EAC1C,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,YAAY;EACV,YAAY;GACV,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,cAAc;GACZ,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,gBAAgB;GACd,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;GAChC,MAAM,CACJ,mFACD;GACF;EAED,YAAY;GACV,MAAM,eAAE,QAAQ,CAAC,QAAQ,uCAAuC;GAChE,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,UAAU,EACR,MAAM,aAAe,SAAS,QAAQ,QAAQ,QAAQ,CAAC,QAAQ,OAAO,EACvE;EAED,WAAW;GACT,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM,CACJ,wGACA,sFACD;GACF;EAED,MAAM;GACJ,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACD;GACF;EAED,uBAAuB;GACrB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,wBAAwB;GACtB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,wBAAwB;GACtB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,kCAAkC;GAChC,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACD;GACF;EAED,6BAA6B;GAC3B,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,oBAAoB;GAClB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM,CACJ,uEACA,kDACD;GACF;EAED,sBAAsB;GACpB,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM,CACJ,0EACA,+EACD;GACF;EAED,eAAe;GACb,MAAM,eAAE,QAAQ,CAAC,QAAQ,KAAK,OAAO,KAAK;GAC1C,MAAM;IACJ;IACA;IACA;IACD;GACF;EACF;CAED,iBAAiB;EACf,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CACJ,yEACD;EACF;CAED,aAAa;EACX,kBAAkB;GAChB,MAAM,eAAE,QAAQ,CAAC,QAAQ,EAAE;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,aAAa;GACX,MAAM,eAAE,SAAS,CAAC,UAAU;GAC5B,QAAQ;GACR,MAAM,CACJ,qFACA,gDACD;GACF;EAED,UAAU;GACR,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;GAChC,MAAM;IACJ;IACA;IACA;IACD;GACF;EACF;CAED,YAAY;EACV,SAAS;GACP,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;GAChC,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EAED,eAAe;GACb,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAG;GAC5B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,YAAY;GACV,MAAM,eAAE,QAAQ,CAAC,QAAQ,GAAI;GAC7B,MAAM;IACJ;IACA;IACA;IACD;GACF;EAED,iBAAiB;GACf,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAM;GAC/B,MAAM,CACJ,wEACA,yEACD;GACF;EACF;CAGD,sBAAsB;EACpB,MAAM,eAAE,QAAQ,CAAC,QAAQ,IAAO;EAChC,YAAY;GACV;GACA;GACA;GACD;EACD,QAAQ;EACT;CAED,aAAa;EACX,MAAM,eAAE,SAAS,CAAC,QAAQ,MAAM;EAChC,MAAM;GACJ;GACA;GACA;GACA;GACA;GACA;GACD;EACF;CAED,eAAe;EACb,MAAM,eAAE,QAAQ,CAAC,UAAU;EAC3B,MAAM,CAAC,kEAAkE;EAC1E;CAED,iBAAiB;EACf,MAAM,eAAE,SAAS,CAAC,QAAQ,KAAK;EAC/B,MAAM;GACJ;GACA;GACA;GACA;GACD;EACF;CAED,YAAY;EACV,SAAS;GACP,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACD;GACF;EAED,uBAAuB;GACrB,MAAM,eAAE,QAAQ,CAAC,UAAU;GAC3B,MAAM;IACJ;IACA;IACA;IACA;IACA;IACD;GACF;EACF;CACF;AAID,IAAI;AAEJ,SAAgB,cACd,OAA4C,EAAE,EAClC;AACZ,KAAI,CAAC,gBAAgB,mBAAmB,EAAE;AACxC,iBAAe,aAAa,aAAa;GACvC,eAAe;GACf,yBAAyB;GACzB,GAAG;GACJ,CAAC;AAEF,MAAI,aAAa,oBACf,mBAAkB,uBAAuB;;AAG7C,QAAO;;;;;;AAOT,SAAgB,wBACd,OAA4C,EAAE,EACxB;CACtB,MAAM,SAAS,cAAc,KAAK;AAClC,kBAAiB,OAAO;AACxB,QAAO;;;;;;AAOT,SAAgB,iBACd,QACQ;AACR,QAAO,QAAQ,iBAAiB,gBAAY;;AAG9C,SAAgB,qBACd,IACA,QACA,UACA;AAOA,KAAI,CAAC,YAAY,CAAC,OAAO,iBAAiB,mBAAmB,EAAE;AAC7D,WACE,IACA,kEACD;AACD,SAAO;;AAGT,KAAI,CAAC,OAAO,eAAe;AACzB,KAAG,OAAO,wCAAwC;AAClD,SAAO;;CAIT,MAAM,iBAAiB,OAAO,KAAK,YAAY,GAAG;CAClD,MAAM,eAAe,OAAO,KAAK,OAAO,cAAc;AAGtD,KAAI,eAAe,WAAW,aAAa,QAAQ;AAEjD,kBAAgB,cAAc,aAAa;AAC3C,KAAG,OAAO,yBAAyB;AACnC,SAAO;;AAGT,KAAI,CAAC,gBAAgB,gBAAgB,aAAa,EAAE;AAClD,KAAG,OAAO,yBAAyB;AACnC,SAAO;;AAGT,IAAG,QAAQ,0BAA0B;AACrC,QAAO;;AAGT,IAAI,YAAY;AAEhB,SAAS,SAAS,IAAgB,KAAa;AAC7C,KAAI,CAAC,WAAW;AACd,KAAG,OAAO,IAAI;AACd,cAAY"}
|
|
@@ -130,7 +130,14 @@ async function runTransaction(log, db, tx) {
|
|
|
130
130
|
return result;
|
|
131
131
|
} catch (e) {
|
|
132
132
|
log.error?.("Aborted transaction due to error", e);
|
|
133
|
-
|
|
133
|
+
try {
|
|
134
|
+
db.prepare("ROLLBACK").run();
|
|
135
|
+
} catch (rollbackError) {
|
|
136
|
+
log.error?.("Unable to rollback transaction", rollbackError);
|
|
137
|
+
const combinedError = /* @__PURE__ */ new Error(`Transaction failed and rollback also failed: operation error = ${String(e)}; rollback error = ${String(rollbackError)}`);
|
|
138
|
+
combinedError.cause = e;
|
|
139
|
+
throw combinedError;
|
|
140
|
+
}
|
|
134
141
|
throw e;
|
|
135
142
|
}
|
|
136
143
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"migration-lite.js","names":[],"sources":["../../../../../zero-cache/src/db/migration-lite.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {Database as Db} from '../../../zqlite/src/db.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\n\ntype Operations = (log: LogContext, tx: Db) => Promise<void> | void;\n\n/**\n * Encapsulates the logic for setting up or upgrading to a new schema. After the\n * Migration code successfully completes, {@link runSchemaMigrations}\n * will update the schema version and commit the transaction.\n */\nexport type Migration = {\n /**\n * Perform database operations that create or alter table structure. This is\n * called at most once during lifetime of the application. If a `migrateData()`\n * operation is defined, that will be performed after `migrateSchema()` succeeds.\n */\n migrateSchema?: Operations;\n\n /**\n * Perform database operations to migrate data to the new schema. This is\n * called after `migrateSchema()` (if defined), and may be called again\n * to re-migrate data after the server was rolled back to an earlier version,\n * and rolled forward again.\n *\n * Consequently, the logic in `migrateData()` must be idempotent.\n */\n migrateData?: Operations;\n\n /**\n * Sets the `minSafeVersion` to the specified value, prohibiting running\n * any earlier code versions.\n */\n minSafeVersion?: number;\n};\n\n/**\n * Mapping of incremental migrations to move from the previous old code\n * version to next one. Versions must be non-zero.\n *\n * The schema resulting from performing incremental migrations should be\n * equivalent to that of the `setupMigration` on a blank database.\n *\n * The highest destinationVersion of this map denotes the current\n * \"code version\", and is also used as the destination version when\n * running the initial setup migration on a blank database.\n */\nexport type IncrementalMigrationMap = {\n [destinationVersion: number]: Migration;\n};\n\n/**\n * Ensures that the schema is compatible with the current code, updating and\n * migrating the schema if necessary.\n */\nexport async function runSchemaMigrations(\n log: LogContext,\n debugName: string,\n dbPath: string,\n setupMigration: Migration,\n incrementalMigrationMap: IncrementalMigrationMap,\n): Promise<void> {\n const start = Date.now();\n log = log.withContext(\n 'initSchema',\n randInt(0, Number.MAX_SAFE_INTEGER).toString(36),\n );\n const db = new Database(log, dbPath);\n\n try {\n const versionMigrations = sorted(incrementalMigrationMap);\n assert(\n versionMigrations.length,\n `Must specify a at least one version migration`,\n );\n assert(\n versionMigrations[0][0] > 0,\n `Versions must be non-zero positive numbers`,\n );\n // oxlint-disable-next-line typescript/no-non-null-assertion\n const codeVersion = versionMigrations.at(-1)![0];\n log.info?.(\n `Checking schema for compatibility with ${debugName} at schema v${codeVersion}`,\n );\n\n let versions = await runTransaction(log, db, tx => {\n const versions = getVersionHistory(tx);\n if (codeVersion < versions.minSafeVersion) {\n throw new Error(\n `Cannot run ${debugName} at schema v${codeVersion} because rollback limit is v${versions.minSafeVersion}`,\n );\n }\n\n if (versions.dataVersion > codeVersion) {\n log.info?.(\n `Data is at v${versions.dataVersion}. Resetting to v${codeVersion}`,\n );\n return updateVersionHistory(log, tx, versions, codeVersion);\n }\n return versions;\n });\n\n if (versions.dataVersion < codeVersion) {\n db.unsafeMode(true); // Enables journal_mode = OFF\n db.pragma('locking_mode = EXCLUSIVE');\n db.pragma('foreign_keys = OFF');\n db.pragma('journal_mode = OFF');\n db.pragma('synchronous = OFF');\n // Unfortunately, AUTO_VACUUM is not compatible with BEGIN CONCURRENT,\n // so it is not an option for the replica file.\n // https://sqlite.org/forum/forumpost/25f183416a\n // db.pragma('auto_vacuum = INCREMENTAL');\n\n const migrations =\n versions.dataVersion === 0\n ? // For the empty database v0, only run the setup migration.\n ([[codeVersion, setupMigration]] as const)\n : versionMigrations;\n\n for (const [dest, migration] of migrations) {\n if (versions.dataVersion < dest) {\n log.info?.(\n `Migrating schema from v${versions.dataVersion} to v${dest}`,\n );\n void log.flush(); // Flush logs before each migration to help debug crash-y migrations.\n\n versions = await runTransaction(log, db, async tx => {\n // Fetch meta from within the transaction to make the migration atomic.\n let versions = getVersionHistory(tx);\n if (versions.dataVersion < dest) {\n versions = await runMigration(log, tx, versions, dest, migration);\n assert(\n versions.dataVersion === dest,\n () =>\n `Migration did not reach target version: expected ${dest}, got ${versions.dataVersion}`,\n );\n }\n return versions;\n });\n }\n }\n\n db.exec('ANALYZE main');\n log.info?.('ANALYZE completed');\n } else {\n // Run optimize whenever opening an sqlite db file as recommended in\n // https://www.sqlite.org/pragma.html#pragma_optimize\n // It is important to run the same initialization steps as is done\n // in the view-syncer (i.e. when preparing database for serving\n // replication) so that any corruption detected in the view-syncer is\n // similarly detected in the change-streamer, facilitating an eventual\n // recovery by resyncing the replica anew.\n db.pragma('optimize = 0x10002');\n\n // TODO: Investigate running `integrity_check` or `quick_check` as well,\n // provided that they are not inordinately expensive on large databases.\n }\n\n db.pragma('synchronous = NORMAL');\n db.unsafeMode(false);\n\n assert(\n versions.dataVersion === codeVersion,\n () =>\n `Final dataVersion (${versions.dataVersion}) does not match codeVersion (${codeVersion})`,\n );\n log.info?.(\n `Running ${debugName} at schema v${codeVersion} (${\n Date.now() - start\n } ms)`,\n );\n } catch (e) {\n log.error?.('Error in ensureSchemaMigrated', e);\n throw e;\n } finally {\n db.close();\n void log.flush(); // Flush the logs but do not block server progress on it.\n }\n}\n\nfunction sorted(\n incrementalMigrationMap: IncrementalMigrationMap,\n): [number, Migration][] {\n const versionMigrations: [number, Migration][] = [];\n for (const [v, m] of Object.entries(incrementalMigrationMap)) {\n versionMigrations.push([Number(v), m]);\n }\n return versionMigrations.sort(([a], [b]) => a - b);\n}\n\n// Exposed for tests.\nexport const versionHistory = v.object({\n /**\n * The `schemaVersion` is highest code version that has ever been run\n * on the database, and is used to delineate the structure of the tables\n * in the database. A schemaVersion only moves forward; rolling back to\n * an earlier (safe) code version does not revert schema changes that\n * have already been applied.\n */\n schemaVersion: v.number(),\n\n /**\n * The data version is the code version of the latest server that ran.\n * Note that this may be less than the schemaVersion in the case that\n * a server is rolled back to an earlier version after a schema change.\n * In such a case, data (but not schema), may need to be re-migrated\n * when rolling forward again.\n */\n dataVersion: v.number(),\n\n /**\n * The minimum code version that is safe to run. This is used when\n * a schema migration is not backwards compatible with an older version\n * of the code.\n */\n minSafeVersion: v.number(),\n});\n\n// Exposed for tests.\nexport type VersionHistory = v.Infer<typeof versionHistory>;\n\n// Exposed for tests\nexport function getVersionHistory(db: Db): VersionHistory {\n // Note: The `lock` column transparently ensures that at most one row exists.\n db.prepare(\n `\n CREATE TABLE IF NOT EXISTS \"_zero.versionHistory\" (\n dataVersion INTEGER NOT NULL,\n schemaVersion INTEGER NOT NULL,\n minSafeVersion INTEGER NOT NULL,\n\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n `,\n ).run();\n const result = db\n .prepare(\n 'SELECT dataVersion, schemaVersion, minSafeVersion FROM \"_zero.versionHistory\"',\n )\n .get() as VersionHistory;\n return result ?? {dataVersion: 0, schemaVersion: 0, minSafeVersion: 0};\n}\n\nfunction updateVersionHistory(\n log: LogContext,\n db: Db,\n prev: VersionHistory,\n newVersion: number,\n minSafeVersion?: number,\n): VersionHistory {\n assert(newVersion > 0, 'newVersion must be positive');\n const meta = {\n ...prev,\n dataVersion: newVersion,\n // The schemaVersion never moves backwards.\n schemaVersion: Math.max(newVersion, prev.schemaVersion),\n minSafeVersion: getMinSafeVersion(log, prev, minSafeVersion),\n } satisfies VersionHistory;\n\n db.prepare(\n `\n INSERT INTO \"_zero.versionHistory\" (dataVersion, schemaVersion, minSafeVersion, lock)\n VALUES (@dataVersion, @schemaVersion, @minSafeVersion, 1)\n ON CONFLICT (lock) DO UPDATE\n SET dataVersion=@dataVersion,\n schemaVersion=@schemaVersion,\n minSafeVersion=@minSafeVersion\n `,\n ).run(meta);\n\n return meta;\n}\n\nasync function runMigration(\n log: LogContext,\n tx: Db,\n versions: VersionHistory,\n destinationVersion: number,\n migration: Migration,\n): Promise<VersionHistory> {\n if (versions.schemaVersion < destinationVersion) {\n await migration.migrateSchema?.(log, tx);\n }\n if (versions.dataVersion < destinationVersion) {\n await migration.migrateData?.(log, tx);\n }\n return updateVersionHistory(\n log,\n tx,\n versions,\n destinationVersion,\n migration.minSafeVersion,\n );\n}\n\n/**\n * Bumps the rollback limit [[toAtLeast]] the specified version.\n * Leaves the rollback limit unchanged if it is equal or greater.\n */\nfunction getMinSafeVersion(\n log: LogContext,\n current: VersionHistory,\n proposedSafeVersion?: number,\n): number {\n if (proposedSafeVersion === undefined) {\n return current.minSafeVersion;\n }\n if (current.minSafeVersion >= proposedSafeVersion) {\n // The rollback limit must never move backwards.\n log.debug?.(\n `rollback limit is already at ${current.minSafeVersion}, ` +\n `don't need to bump to ${proposedSafeVersion}`,\n );\n return current.minSafeVersion;\n }\n log.info?.(\n `bumping rollback limit from ${current.minSafeVersion} to ${proposedSafeVersion}`,\n );\n return proposedSafeVersion;\n}\n\n// Note: We use a custom transaction wrapper (instead of db.begin(...)) in order\n// to support async operations within the transaction.\nasync function runTransaction<T>(\n log: LogContext,\n db: Db,\n tx: (db: Db) => Promise<T> | T,\n): Promise<T> {\n db.prepare('BEGIN EXCLUSIVE').run();\n try {\n const result = await tx(db);\n db.prepare('COMMIT').run();\n return result;\n } catch (e) {\n log.error?.('Aborted transaction due to error', e);\n db.prepare('ROLLBACK').run();\n throw e;\n }\n}\n"],"mappings":";;;;;;;;;AA0DA,eAAsB,oBACpB,KACA,WACA,QACA,gBACA,yBACe;CACf,MAAM,QAAQ,KAAK,KAAK;AACxB,OAAM,IAAI,YACR,cACA,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG,CACjD;CACD,MAAM,KAAK,IAAI,SAAS,KAAK,OAAO;AAEpC,KAAI;EACF,MAAM,oBAAoB,OAAO,wBAAwB;AACzD,SACE,kBAAkB,QAClB,gDACD;AACD,SACE,kBAAkB,GAAG,KAAK,GAC1B,6CACD;EAED,MAAM,cAAc,kBAAkB,GAAG,GAAG,CAAE;AAC9C,MAAI,OACF,0CAA0C,UAAU,cAAc,cACnE;EAED,IAAI,WAAW,MAAM,eAAe,KAAK,KAAI,OAAM;GACjD,MAAM,WAAW,kBAAkB,GAAG;AACtC,OAAI,cAAc,SAAS,eACzB,OAAM,IAAI,MACR,cAAc,UAAU,cAAc,YAAY,8BAA8B,SAAS,iBAC1F;AAGH,OAAI,SAAS,cAAc,aAAa;AACtC,QAAI,OACF,eAAe,SAAS,YAAY,kBAAkB,cACvD;AACD,WAAO,qBAAqB,KAAK,IAAI,UAAU,YAAY;;AAE7D,UAAO;IACP;AAEF,MAAI,SAAS,cAAc,aAAa;AACtC,MAAG,WAAW,KAAK;AACnB,MAAG,OAAO,2BAA2B;AACrC,MAAG,OAAO,qBAAqB;AAC/B,MAAG,OAAO,qBAAqB;AAC/B,MAAG,OAAO,oBAAoB;GAM9B,MAAM,aACJ,SAAS,gBAAgB,IAEpB,CAAC,CAAC,aAAa,eAAe,CAAC,GAChC;AAEN,QAAK,MAAM,CAAC,MAAM,cAAc,WAC9B,KAAI,SAAS,cAAc,MAAM;AAC/B,QAAI,OACF,0BAA0B,SAAS,YAAY,OAAO,OACvD;AACI,QAAI,OAAO;AAEhB,eAAW,MAAM,eAAe,KAAK,IAAI,OAAM,OAAM;KAEnD,IAAI,WAAW,kBAAkB,GAAG;AACpC,SAAI,SAAS,cAAc,MAAM;AAC/B,iBAAW,MAAM,aAAa,KAAK,IAAI,UAAU,MAAM,UAAU;AACjE,aACE,SAAS,gBAAgB,YAEvB,oDAAoD,KAAK,QAAQ,SAAS,cAC7E;;AAEH,YAAO;MACP;;AAIN,MAAG,KAAK,eAAe;AACvB,OAAI,OAAO,oBAAoB;QAS/B,IAAG,OAAO,qBAAqB;AAMjC,KAAG,OAAO,uBAAuB;AACjC,KAAG,WAAW,MAAM;AAEpB,SACE,SAAS,gBAAgB,mBAEvB,sBAAsB,SAAS,YAAY,gCAAgC,YAAY,GAC1F;AACD,MAAI,OACF,WAAW,UAAU,cAAc,YAAY,IAC7C,KAAK,KAAK,GAAG,MACd,MACF;UACM,GAAG;AACV,MAAI,QAAQ,iCAAiC,EAAE;AAC/C,QAAM;WACE;AACR,KAAG,OAAO;AACL,MAAI,OAAO;;;AAIpB,SAAS,OACP,yBACuB;CACvB,MAAM,oBAA2C,EAAE;AACnD,MAAK,MAAM,CAAC,GAAG,MAAM,OAAO,QAAQ,wBAAwB,CAC1D,mBAAkB,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,CAAC;AAExC,QAAO,kBAAkB,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE;;AAItB,eAAE,OAAO;CAQrC,eAAe,eAAE,QAAQ;CASzB,aAAa,eAAE,QAAQ;CAOvB,gBAAgB,eAAE,QAAQ;CAC3B,CAAC;AAMF,SAAgB,kBAAkB,IAAwB;AAExD,IAAG,QACD;;;;;;;;IASD,CAAC,KAAK;AAMP,QALe,GACZ,QACC,kFACD,CACA,KAAK,IACS;EAAC,aAAa;EAAG,eAAe;EAAG,gBAAgB;EAAE;;AAGxE,SAAS,qBACP,KACA,IACA,MACA,YACA,gBACgB;AAChB,QAAO,aAAa,GAAG,8BAA8B;CACrD,MAAM,OAAO;EACX,GAAG;EACH,aAAa;EAEb,eAAe,KAAK,IAAI,YAAY,KAAK,cAAc;EACvD,gBAAgB,kBAAkB,KAAK,MAAM,eAAe;EAC7D;AAED,IAAG,QACD;;;;;;;IAQD,CAAC,IAAI,KAAK;AAEX,QAAO;;AAGT,eAAe,aACb,KACA,IACA,UACA,oBACA,WACyB;AACzB,KAAI,SAAS,gBAAgB,mBAC3B,OAAM,UAAU,gBAAgB,KAAK,GAAG;AAE1C,KAAI,SAAS,cAAc,mBACzB,OAAM,UAAU,cAAc,KAAK,GAAG;AAExC,QAAO,qBACL,KACA,IACA,UACA,oBACA,UAAU,eACX;;;;;;AAOH,SAAS,kBACP,KACA,SACA,qBACQ;AACR,KAAI,wBAAwB,KAAA,EAC1B,QAAO,QAAQ;AAEjB,KAAI,QAAQ,kBAAkB,qBAAqB;AAEjD,MAAI,QACF,gCAAgC,QAAQ,eAAe,0BAC5B,sBAC5B;AACD,SAAO,QAAQ;;AAEjB,KAAI,OACF,+BAA+B,QAAQ,eAAe,MAAM,sBAC7D;AACD,QAAO;;AAKT,eAAe,eACb,KACA,IACA,IACY;AACZ,IAAG,QAAQ,kBAAkB,CAAC,KAAK;AACnC,KAAI;EACF,MAAM,SAAS,MAAM,GAAG,GAAG;AAC3B,KAAG,QAAQ,SAAS,CAAC,KAAK;AAC1B,SAAO;UACA,GAAG;AACV,MAAI,QAAQ,oCAAoC,EAAE;AAClD,KAAG,QAAQ,WAAW,CAAC,KAAK;AAC5B,QAAM"}
|
|
1
|
+
{"version":3,"file":"migration-lite.js","names":[],"sources":["../../../../../zero-cache/src/db/migration-lite.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {Database as Db} from '../../../zqlite/src/db.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\n\ntype Operations = (log: LogContext, tx: Db) => Promise<void> | void;\n\n/**\n * Encapsulates the logic for setting up or upgrading to a new schema. After the\n * Migration code successfully completes, {@link runSchemaMigrations}\n * will update the schema version and commit the transaction.\n */\nexport type Migration = {\n /**\n * Perform database operations that create or alter table structure. This is\n * called at most once during lifetime of the application. If a `migrateData()`\n * operation is defined, that will be performed after `migrateSchema()` succeeds.\n */\n migrateSchema?: Operations;\n\n /**\n * Perform database operations to migrate data to the new schema. This is\n * called after `migrateSchema()` (if defined), and may be called again\n * to re-migrate data after the server was rolled back to an earlier version,\n * and rolled forward again.\n *\n * Consequently, the logic in `migrateData()` must be idempotent.\n */\n migrateData?: Operations;\n\n /**\n * Sets the `minSafeVersion` to the specified value, prohibiting running\n * any earlier code versions.\n */\n minSafeVersion?: number;\n};\n\n/**\n * Mapping of incremental migrations to move from the previous old code\n * version to next one. Versions must be non-zero.\n *\n * The schema resulting from performing incremental migrations should be\n * equivalent to that of the `setupMigration` on a blank database.\n *\n * The highest destinationVersion of this map denotes the current\n * \"code version\", and is also used as the destination version when\n * running the initial setup migration on a blank database.\n */\nexport type IncrementalMigrationMap = {\n [destinationVersion: number]: Migration;\n};\n\n/**\n * Ensures that the schema is compatible with the current code, updating and\n * migrating the schema if necessary.\n */\nexport async function runSchemaMigrations(\n log: LogContext,\n debugName: string,\n dbPath: string,\n setupMigration: Migration,\n incrementalMigrationMap: IncrementalMigrationMap,\n): Promise<void> {\n const start = Date.now();\n log = log.withContext(\n 'initSchema',\n randInt(0, Number.MAX_SAFE_INTEGER).toString(36),\n );\n const db = new Database(log, dbPath);\n\n try {\n const versionMigrations = sorted(incrementalMigrationMap);\n assert(\n versionMigrations.length,\n `Must specify a at least one version migration`,\n );\n assert(\n versionMigrations[0][0] > 0,\n `Versions must be non-zero positive numbers`,\n );\n // oxlint-disable-next-line typescript/no-non-null-assertion\n const codeVersion = versionMigrations.at(-1)![0];\n log.info?.(\n `Checking schema for compatibility with ${debugName} at schema v${codeVersion}`,\n );\n\n let versions = await runTransaction(log, db, tx => {\n const versions = getVersionHistory(tx);\n if (codeVersion < versions.minSafeVersion) {\n throw new Error(\n `Cannot run ${debugName} at schema v${codeVersion} because rollback limit is v${versions.minSafeVersion}`,\n );\n }\n\n if (versions.dataVersion > codeVersion) {\n log.info?.(\n `Data is at v${versions.dataVersion}. Resetting to v${codeVersion}`,\n );\n return updateVersionHistory(log, tx, versions, codeVersion);\n }\n return versions;\n });\n\n if (versions.dataVersion < codeVersion) {\n db.unsafeMode(true); // Enables journal_mode = OFF\n db.pragma('locking_mode = EXCLUSIVE');\n db.pragma('foreign_keys = OFF');\n db.pragma('journal_mode = OFF');\n db.pragma('synchronous = OFF');\n // Unfortunately, AUTO_VACUUM is not compatible with BEGIN CONCURRENT,\n // so it is not an option for the replica file.\n // https://sqlite.org/forum/forumpost/25f183416a\n // db.pragma('auto_vacuum = INCREMENTAL');\n\n const migrations =\n versions.dataVersion === 0\n ? // For the empty database v0, only run the setup migration.\n ([[codeVersion, setupMigration]] as const)\n : versionMigrations;\n\n for (const [dest, migration] of migrations) {\n if (versions.dataVersion < dest) {\n log.info?.(\n `Migrating schema from v${versions.dataVersion} to v${dest}`,\n );\n void log.flush(); // Flush logs before each migration to help debug crash-y migrations.\n\n versions = await runTransaction(log, db, async tx => {\n // Fetch meta from within the transaction to make the migration atomic.\n let versions = getVersionHistory(tx);\n if (versions.dataVersion < dest) {\n versions = await runMigration(log, tx, versions, dest, migration);\n assert(\n versions.dataVersion === dest,\n () =>\n `Migration did not reach target version: expected ${dest}, got ${versions.dataVersion}`,\n );\n }\n return versions;\n });\n }\n }\n\n db.exec('ANALYZE main');\n log.info?.('ANALYZE completed');\n } else {\n // Run optimize whenever opening an sqlite db file as recommended in\n // https://www.sqlite.org/pragma.html#pragma_optimize\n // It is important to run the same initialization steps as is done\n // in the view-syncer (i.e. when preparing database for serving\n // replication) so that any corruption detected in the view-syncer is\n // similarly detected in the change-streamer, facilitating an eventual\n // recovery by resyncing the replica anew.\n db.pragma('optimize = 0x10002');\n\n // TODO: Investigate running `integrity_check` or `quick_check` as well,\n // provided that they are not inordinately expensive on large databases.\n }\n\n db.pragma('synchronous = NORMAL');\n db.unsafeMode(false);\n\n assert(\n versions.dataVersion === codeVersion,\n () =>\n `Final dataVersion (${versions.dataVersion}) does not match codeVersion (${codeVersion})`,\n );\n log.info?.(\n `Running ${debugName} at schema v${codeVersion} (${\n Date.now() - start\n } ms)`,\n );\n } catch (e) {\n log.error?.('Error in ensureSchemaMigrated', e);\n throw e;\n } finally {\n db.close();\n void log.flush(); // Flush the logs but do not block server progress on it.\n }\n}\n\nfunction sorted(\n incrementalMigrationMap: IncrementalMigrationMap,\n): [number, Migration][] {\n const versionMigrations: [number, Migration][] = [];\n for (const [v, m] of Object.entries(incrementalMigrationMap)) {\n versionMigrations.push([Number(v), m]);\n }\n return versionMigrations.sort(([a], [b]) => a - b);\n}\n\n// Exposed for tests.\nexport const versionHistory = v.object({\n /**\n * The `schemaVersion` is highest code version that has ever been run\n * on the database, and is used to delineate the structure of the tables\n * in the database. A schemaVersion only moves forward; rolling back to\n * an earlier (safe) code version does not revert schema changes that\n * have already been applied.\n */\n schemaVersion: v.number(),\n\n /**\n * The data version is the code version of the latest server that ran.\n * Note that this may be less than the schemaVersion in the case that\n * a server is rolled back to an earlier version after a schema change.\n * In such a case, data (but not schema), may need to be re-migrated\n * when rolling forward again.\n */\n dataVersion: v.number(),\n\n /**\n * The minimum code version that is safe to run. This is used when\n * a schema migration is not backwards compatible with an older version\n * of the code.\n */\n minSafeVersion: v.number(),\n});\n\n// Exposed for tests.\nexport type VersionHistory = v.Infer<typeof versionHistory>;\n\n// Exposed for tests\nexport function getVersionHistory(db: Db): VersionHistory {\n // Note: The `lock` column transparently ensures that at most one row exists.\n db.prepare(\n `\n CREATE TABLE IF NOT EXISTS \"_zero.versionHistory\" (\n dataVersion INTEGER NOT NULL,\n schemaVersion INTEGER NOT NULL,\n minSafeVersion INTEGER NOT NULL,\n\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n `,\n ).run();\n const result = db\n .prepare(\n 'SELECT dataVersion, schemaVersion, minSafeVersion FROM \"_zero.versionHistory\"',\n )\n .get() as VersionHistory;\n return result ?? {dataVersion: 0, schemaVersion: 0, minSafeVersion: 0};\n}\n\nfunction updateVersionHistory(\n log: LogContext,\n db: Db,\n prev: VersionHistory,\n newVersion: number,\n minSafeVersion?: number,\n): VersionHistory {\n assert(newVersion > 0, 'newVersion must be positive');\n const meta = {\n ...prev,\n dataVersion: newVersion,\n // The schemaVersion never moves backwards.\n schemaVersion: Math.max(newVersion, prev.schemaVersion),\n minSafeVersion: getMinSafeVersion(log, prev, minSafeVersion),\n } satisfies VersionHistory;\n\n db.prepare(\n `\n INSERT INTO \"_zero.versionHistory\" (dataVersion, schemaVersion, minSafeVersion, lock)\n VALUES (@dataVersion, @schemaVersion, @minSafeVersion, 1)\n ON CONFLICT (lock) DO UPDATE\n SET dataVersion=@dataVersion,\n schemaVersion=@schemaVersion,\n minSafeVersion=@minSafeVersion\n `,\n ).run(meta);\n\n return meta;\n}\n\nasync function runMigration(\n log: LogContext,\n tx: Db,\n versions: VersionHistory,\n destinationVersion: number,\n migration: Migration,\n): Promise<VersionHistory> {\n if (versions.schemaVersion < destinationVersion) {\n await migration.migrateSchema?.(log, tx);\n }\n if (versions.dataVersion < destinationVersion) {\n await migration.migrateData?.(log, tx);\n }\n return updateVersionHistory(\n log,\n tx,\n versions,\n destinationVersion,\n migration.minSafeVersion,\n );\n}\n\n/**\n * Bumps the rollback limit [[toAtLeast]] the specified version.\n * Leaves the rollback limit unchanged if it is equal or greater.\n */\nfunction getMinSafeVersion(\n log: LogContext,\n current: VersionHistory,\n proposedSafeVersion?: number,\n): number {\n if (proposedSafeVersion === undefined) {\n return current.minSafeVersion;\n }\n if (current.minSafeVersion >= proposedSafeVersion) {\n // The rollback limit must never move backwards.\n log.debug?.(\n `rollback limit is already at ${current.minSafeVersion}, ` +\n `don't need to bump to ${proposedSafeVersion}`,\n );\n return current.minSafeVersion;\n }\n log.info?.(\n `bumping rollback limit from ${current.minSafeVersion} to ${proposedSafeVersion}`,\n );\n return proposedSafeVersion;\n}\n\n// Note: We use a custom transaction wrapper (instead of db.begin(...)) in order\n// to support async operations within the transaction.\nasync function runTransaction<T>(\n log: LogContext,\n db: Db,\n tx: (db: Db) => Promise<T> | T,\n): Promise<T> {\n db.prepare('BEGIN EXCLUSIVE').run();\n try {\n const result = await tx(db);\n db.prepare('COMMIT').run();\n return result;\n } catch (e) {\n log.error?.('Aborted transaction due to error', e);\n try {\n db.prepare('ROLLBACK').run();\n } catch (rollbackError) {\n log.error?.('Unable to rollback transaction', rollbackError);\n const combinedError = new Error(\n `Transaction failed and rollback also failed: operation error = ${String(\n e,\n )}; rollback error = ${String(rollbackError)}`,\n );\n combinedError.cause = e;\n throw combinedError;\n }\n throw e;\n }\n}\n"],"mappings":";;;;;;;;;AA0DA,eAAsB,oBACpB,KACA,WACA,QACA,gBACA,yBACe;CACf,MAAM,QAAQ,KAAK,KAAK;AACxB,OAAM,IAAI,YACR,cACA,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG,CACjD;CACD,MAAM,KAAK,IAAI,SAAS,KAAK,OAAO;AAEpC,KAAI;EACF,MAAM,oBAAoB,OAAO,wBAAwB;AACzD,SACE,kBAAkB,QAClB,gDACD;AACD,SACE,kBAAkB,GAAG,KAAK,GAC1B,6CACD;EAED,MAAM,cAAc,kBAAkB,GAAG,GAAG,CAAE;AAC9C,MAAI,OACF,0CAA0C,UAAU,cAAc,cACnE;EAED,IAAI,WAAW,MAAM,eAAe,KAAK,KAAI,OAAM;GACjD,MAAM,WAAW,kBAAkB,GAAG;AACtC,OAAI,cAAc,SAAS,eACzB,OAAM,IAAI,MACR,cAAc,UAAU,cAAc,YAAY,8BAA8B,SAAS,iBAC1F;AAGH,OAAI,SAAS,cAAc,aAAa;AACtC,QAAI,OACF,eAAe,SAAS,YAAY,kBAAkB,cACvD;AACD,WAAO,qBAAqB,KAAK,IAAI,UAAU,YAAY;;AAE7D,UAAO;IACP;AAEF,MAAI,SAAS,cAAc,aAAa;AACtC,MAAG,WAAW,KAAK;AACnB,MAAG,OAAO,2BAA2B;AACrC,MAAG,OAAO,qBAAqB;AAC/B,MAAG,OAAO,qBAAqB;AAC/B,MAAG,OAAO,oBAAoB;GAM9B,MAAM,aACJ,SAAS,gBAAgB,IAEpB,CAAC,CAAC,aAAa,eAAe,CAAC,GAChC;AAEN,QAAK,MAAM,CAAC,MAAM,cAAc,WAC9B,KAAI,SAAS,cAAc,MAAM;AAC/B,QAAI,OACF,0BAA0B,SAAS,YAAY,OAAO,OACvD;AACI,QAAI,OAAO;AAEhB,eAAW,MAAM,eAAe,KAAK,IAAI,OAAM,OAAM;KAEnD,IAAI,WAAW,kBAAkB,GAAG;AACpC,SAAI,SAAS,cAAc,MAAM;AAC/B,iBAAW,MAAM,aAAa,KAAK,IAAI,UAAU,MAAM,UAAU;AACjE,aACE,SAAS,gBAAgB,YAEvB,oDAAoD,KAAK,QAAQ,SAAS,cAC7E;;AAEH,YAAO;MACP;;AAIN,MAAG,KAAK,eAAe;AACvB,OAAI,OAAO,oBAAoB;QAS/B,IAAG,OAAO,qBAAqB;AAMjC,KAAG,OAAO,uBAAuB;AACjC,KAAG,WAAW,MAAM;AAEpB,SACE,SAAS,gBAAgB,mBAEvB,sBAAsB,SAAS,YAAY,gCAAgC,YAAY,GAC1F;AACD,MAAI,OACF,WAAW,UAAU,cAAc,YAAY,IAC7C,KAAK,KAAK,GAAG,MACd,MACF;UACM,GAAG;AACV,MAAI,QAAQ,iCAAiC,EAAE;AAC/C,QAAM;WACE;AACR,KAAG,OAAO;AACL,MAAI,OAAO;;;AAIpB,SAAS,OACP,yBACuB;CACvB,MAAM,oBAA2C,EAAE;AACnD,MAAK,MAAM,CAAC,GAAG,MAAM,OAAO,QAAQ,wBAAwB,CAC1D,mBAAkB,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,CAAC;AAExC,QAAO,kBAAkB,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE;;AAItB,eAAE,OAAO;CAQrC,eAAe,eAAE,QAAQ;CASzB,aAAa,eAAE,QAAQ;CAOvB,gBAAgB,eAAE,QAAQ;CAC3B,CAAC;AAMF,SAAgB,kBAAkB,IAAwB;AAExD,IAAG,QACD;;;;;;;;IASD,CAAC,KAAK;AAMP,QALe,GACZ,QACC,kFACD,CACA,KAAK,IACS;EAAC,aAAa;EAAG,eAAe;EAAG,gBAAgB;EAAE;;AAGxE,SAAS,qBACP,KACA,IACA,MACA,YACA,gBACgB;AAChB,QAAO,aAAa,GAAG,8BAA8B;CACrD,MAAM,OAAO;EACX,GAAG;EACH,aAAa;EAEb,eAAe,KAAK,IAAI,YAAY,KAAK,cAAc;EACvD,gBAAgB,kBAAkB,KAAK,MAAM,eAAe;EAC7D;AAED,IAAG,QACD;;;;;;;IAQD,CAAC,IAAI,KAAK;AAEX,QAAO;;AAGT,eAAe,aACb,KACA,IACA,UACA,oBACA,WACyB;AACzB,KAAI,SAAS,gBAAgB,mBAC3B,OAAM,UAAU,gBAAgB,KAAK,GAAG;AAE1C,KAAI,SAAS,cAAc,mBACzB,OAAM,UAAU,cAAc,KAAK,GAAG;AAExC,QAAO,qBACL,KACA,IACA,UACA,oBACA,UAAU,eACX;;;;;;AAOH,SAAS,kBACP,KACA,SACA,qBACQ;AACR,KAAI,wBAAwB,KAAA,EAC1B,QAAO,QAAQ;AAEjB,KAAI,QAAQ,kBAAkB,qBAAqB;AAEjD,MAAI,QACF,gCAAgC,QAAQ,eAAe,0BAC5B,sBAC5B;AACD,SAAO,QAAQ;;AAEjB,KAAI,OACF,+BAA+B,QAAQ,eAAe,MAAM,sBAC7D;AACD,QAAO;;AAKT,eAAe,eACb,KACA,IACA,IACY;AACZ,IAAG,QAAQ,kBAAkB,CAAC,KAAK;AACnC,KAAI;EACF,MAAM,SAAS,MAAM,GAAG,GAAG;AAC3B,KAAG,QAAQ,SAAS,CAAC,KAAK;AAC1B,SAAO;UACA,GAAG;AACV,MAAI,QAAQ,oCAAoC,EAAE;AAClD,MAAI;AACF,MAAG,QAAQ,WAAW,CAAC,KAAK;WACrB,eAAe;AACtB,OAAI,QAAQ,kCAAkC,cAAc;GAC5D,MAAM,gCAAgB,IAAI,MACxB,kEAAkE,OAChE,EACD,CAAC,qBAAqB,OAAO,cAAc,GAC7C;AACD,iBAAc,QAAQ;AACtB,SAAM;;AAER,QAAM"}
|
|
@@ -12,7 +12,7 @@ export declare function isEnumColumn(spec: Pick<ColumnSpec, 'pgTypeClass' | 'ele
|
|
|
12
12
|
*/
|
|
13
13
|
export declare function isArrayColumn(spec: Pick<ColumnSpec, 'elemPgTypeClass'>): boolean;
|
|
14
14
|
export declare function warnIfDataTypeSupported(lc: LogContext, liteTypeString: LiteTypeString, table: string, column: string): void;
|
|
15
|
-
export declare function mapPostgresToLiteDefault(table: string, column: string,
|
|
15
|
+
export declare function mapPostgresToLiteDefault(table: string, column: string, defaultExpression: string | null | undefined): string | null;
|
|
16
16
|
export declare function mapPostgresToLiteColumn(table: string, column: {
|
|
17
17
|
name: string;
|
|
18
18
|
spec: ColumnSpec;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"pg-to-lite.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/pg-to-lite.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,EAIL,KAAK,cAAc,EACpB,MAAM,kBAAkB,CAAC;AAG1B,OAAO,EACL,KAAK,UAAU,EACf,KAAK,SAAS,EACd,KAAK,aAAa,EAClB,KAAK,aAAa,EAClB,KAAK,SAAS,EACf,MAAM,YAAY,CAAC;AAEpB;;;GAGG;AACH,wBAAgB,YAAY,CAC1B,IAAI,EAAE,IAAI,CAAC,UAAU,EAAE,aAAa,GAAG,iBAAiB,CAAC,GACxD,OAAO,CAET;AAED;;;GAGG;AACH,wBAAgB,aAAa,CAC3B,IAAI,EAAE,IAAI,CAAC,UAAU,EAAE,iBAAiB,CAAC,GACxC,OAAO,CAET;AAaD,wBAAgB,uBAAuB,CACrC,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,cAAc,EAC9B,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,QAUf;
|
|
1
|
+
{"version":3,"file":"pg-to-lite.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/pg-to-lite.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,EAIL,KAAK,cAAc,EACpB,MAAM,kBAAkB,CAAC;AAG1B,OAAO,EACL,KAAK,UAAU,EACf,KAAK,SAAS,EACd,KAAK,aAAa,EAClB,KAAK,aAAa,EAClB,KAAK,SAAS,EACf,MAAM,YAAY,CAAC;AAEpB;;;GAGG;AACH,wBAAgB,YAAY,CAC1B,IAAI,EAAE,IAAI,CAAC,UAAU,EAAE,aAAa,GAAG,iBAAiB,CAAC,GACxD,OAAO,CAET;AAED;;;GAGG;AACH,wBAAgB,aAAa,CAC3B,IAAI,EAAE,IAAI,CAAC,UAAU,EAAE,iBAAiB,CAAC,GACxC,OAAO,CAET;AAaD,wBAAgB,uBAAuB,CACrC,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,cAAc,EAC9B,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,QAUf;AA0CD,wBAAgB,wBAAwB,CACtC,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,EACd,iBAAiB,EAAE,MAAM,GAAG,IAAI,GAAG,SAAS,GAC3C,MAAM,GAAG,IAAI,CAiCf;AAED,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,MAAM,EAAE;IAAC,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,UAAU,CAAA;CAAC,EACxC,aAAa,CAAC,EAAE,gBAAgB,GAC/B,UAAU,CAiCZ;AAED,wBAAgB,iBAAiB,CAC/B,CAAC,EAAE,SAAS,EACZ,cAAc,CAAC,EAAE,MAAM,GACtB,aAAa,CAoBf;AAED,wBAAgB,sBAAsB,CAAC,KAAK,EAAE,SAAS,GAAG,aAAa,CAOtE;AAED,qBAAa,6BAA8B,SAAQ,KAAK;IACtD,QAAQ,CAAC,IAAI,mCAAmC;CACjD"}
|