@rocicorp/zero 0.25.7 → 0.26.0-canary.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. package/out/shared/src/custom-key-map.d.ts +4 -4
  2. package/out/shared/src/custom-key-map.d.ts.map +1 -1
  3. package/out/shared/src/custom-key-map.js.map +1 -1
  4. package/out/shared/src/iterables.d.ts +6 -8
  5. package/out/shared/src/iterables.d.ts.map +1 -1
  6. package/out/shared/src/iterables.js +13 -7
  7. package/out/shared/src/iterables.js.map +1 -1
  8. package/out/zero/package.json.js +1 -1
  9. package/out/zero-cache/src/config/zero-config.d.ts +5 -0
  10. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  11. package/out/zero-cache/src/config/zero-config.js +12 -0
  12. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  13. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  14. package/out/zero-cache/src/observability/events.js +15 -5
  15. package/out/zero-cache/src/observability/events.js.map +1 -1
  16. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  17. package/out/zero-cache/src/server/change-streamer.js +10 -2
  18. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  19. package/out/zero-cache/src/server/syncer.d.ts +1 -0
  20. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  21. package/out/zero-cache/src/server/syncer.js +22 -4
  22. package/out/zero-cache/src/server/syncer.js.map +1 -1
  23. package/out/zero-cache/src/services/change-source/custom/change-source.js +0 -4
  24. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  25. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  26. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +1 -10
  27. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  28. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  29. package/out/zero-cache/src/services/change-source/pg/schema/init.js +8 -2
  30. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  31. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
  32. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -14
  33. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  34. package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
  35. package/out/zero-cache/src/services/change-source/replica-schema.js +8 -1
  36. package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
  37. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
  38. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  39. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +5 -3
  40. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  41. package/out/zero-cache/src/services/change-streamer/storer.d.ts +1 -1
  42. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  43. package/out/zero-cache/src/services/change-streamer/storer.js +16 -5
  44. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  45. package/out/zero-cache/src/services/life-cycle.d.ts +1 -1
  46. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  47. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  48. package/out/zero-cache/src/services/mutagen/mutagen.d.ts +4 -4
  49. package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
  50. package/out/zero-cache/src/services/mutagen/mutagen.js +9 -24
  51. package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
  52. package/out/zero-cache/src/services/mutagen/pusher.d.ts +1 -2
  53. package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
  54. package/out/zero-cache/src/services/mutagen/pusher.js +51 -12
  55. package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
  56. package/out/zero-cache/src/services/replicator/change-processor.js +4 -3
  57. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  58. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +3 -2
  59. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
  60. package/out/zero-cache/src/services/replicator/schema/change-log.js +36 -31
  61. package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
  62. package/out/zero-cache/src/services/view-syncer/client-handler.d.ts +5 -6
  63. package/out/zero-cache/src/services/view-syncer/client-handler.d.ts.map +1 -1
  64. package/out/zero-cache/src/services/view-syncer/client-handler.js +5 -23
  65. package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
  66. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
  67. package/out/zero-cache/src/services/view-syncer/cvr-store.js +6 -4
  68. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  69. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +1 -8
  70. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  71. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +2 -11
  72. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  73. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +0 -2
  74. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  75. package/out/zero-cache/src/services/view-syncer/snapshotter.js +2 -10
  76. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  77. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +1 -2
  78. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  79. package/out/zero-cache/src/services/view-syncer/view-syncer.js +40 -42
  80. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  81. package/out/zero-cache/src/workers/connect-params.d.ts +0 -1
  82. package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
  83. package/out/zero-cache/src/workers/connect-params.js +0 -2
  84. package/out/zero-cache/src/workers/connect-params.js.map +1 -1
  85. package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
  86. package/out/zero-cache/src/workers/replicator.js +2 -5
  87. package/out/zero-cache/src/workers/replicator.js.map +1 -1
  88. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  89. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +1 -4
  90. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  91. package/out/zero-client/src/client/context.js +1 -0
  92. package/out/zero-client/src/client/context.js.map +1 -1
  93. package/out/zero-client/src/client/version.js +1 -1
  94. package/out/zero-protocol/src/push.d.ts +7 -0
  95. package/out/zero-protocol/src/push.d.ts.map +1 -1
  96. package/out/zero-protocol/src/push.js +9 -1
  97. package/out/zero-protocol/src/push.js.map +1 -1
  98. package/out/zero-server/src/process-mutations.d.ts +1 -0
  99. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  100. package/out/zero-server/src/process-mutations.js +41 -2
  101. package/out/zero-server/src/process-mutations.js.map +1 -1
  102. package/out/zero-server/src/zql-database.d.ts.map +1 -1
  103. package/out/zero-server/src/zql-database.js +9 -0
  104. package/out/zero-server/src/zql-database.js.map +1 -1
  105. package/out/zero-solid/src/solid-view.js +1 -0
  106. package/out/zero-solid/src/solid-view.js.map +1 -1
  107. package/out/zero-solid/src/use-query.js +1 -0
  108. package/out/zero-solid/src/use-query.js.map +1 -1
  109. package/package.json +3 -3
  110. package/out/zero-cache/src/types/schema-versions.d.ts +0 -12
  111. package/out/zero-cache/src/types/schema-versions.d.ts.map +0 -1
  112. package/out/zero-cache/src/types/schema-versions.js +0 -28
  113. package/out/zero-cache/src/types/schema-versions.js.map +0 -1
@@ -81,6 +81,11 @@ function runWorker(parent, env, ...args) {
81
81
  const customQueryConfig = getCustomQueryConfig(config);
82
82
  const customQueryTransformer = customQueryConfig && new CustomQueryTransformer(logger, customQueryConfig, shard);
83
83
  const inspectorDelegate = new InspectorDelegate(customQueryTransformer);
84
+ const priorityOpRunningYieldThresholdMs = Math.max(
85
+ config.yieldThresholdMs / 4,
86
+ 2
87
+ );
88
+ const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);
84
89
  return new ViewSyncerService(
85
90
  config,
86
91
  logger,
@@ -102,14 +107,15 @@ function runWorker(parent, env, ...args) {
102
107
  operatorStorage.createClientGroupStorage(id),
103
108
  id,
104
109
  inspectorDelegate,
105
- config.yieldThresholdMs,
110
+ () => isPriorityOpRunning() ? priorityOpRunningYieldThresholdMs : normalYieldThresholdMs,
106
111
  config.enableQueryPlanner
107
112
  ),
108
113
  sub,
109
114
  drainCoordinator,
110
115
  config.log.slowHydrateThreshold,
111
116
  inspectorDelegate,
112
- customQueryTransformer
117
+ customQueryTransformer,
118
+ runPriorityOp
113
119
  );
114
120
  };
115
121
  const mutagenFactory = (id) => new MutagenService(
@@ -121,7 +127,6 @@ function runWorker(parent, env, ...args) {
121
127
  writeAuthzStorage
122
128
  );
123
129
  const pusherFactory = config.push.url === void 0 && config.mutate.url === void 0 ? void 0 : (id) => new PusherService(
124
- upstreamDB,
125
130
  config,
126
131
  {
127
132
  ...config.push,
@@ -151,7 +156,20 @@ if (!singleProcessMode()) {
151
156
  () => runWorker(must(parentWorker), process.env, ...process.argv.slice(2))
152
157
  );
153
158
  }
159
+ let priorityOpCounter = 0;
160
+ async function runPriorityOp(op) {
161
+ priorityOpCounter++;
162
+ try {
163
+ return await op();
164
+ } finally {
165
+ priorityOpCounter--;
166
+ }
167
+ }
168
+ function isPriorityOpRunning() {
169
+ return priorityOpCounter > 0;
170
+ }
154
171
  export {
155
- runWorker as default
172
+ runWorker as default,
173
+ isPriorityOpRunning
156
174
  };
157
175
  //# sourceMappingURL=syncer.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"syncer.js","sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n\n startOtelAuto(createLogContext(config, {worker: 'syncer'}, false));\n const lc = createLogContext(config, {worker: 'syncer'}, true);\n initEventSink(lc, config);\n\n assert(args.length > 0, `replicator mode not specified`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n\n const {cvr, upstream} = config;\n assert(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set');\n assert(upstream.maxConnsPerWorker, 'upstream.maxConnsPerWorker must be set');\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, {\n max: cvr.maxConnsPerWorker,\n connection: {['application_name']: `zero-sync-worker-${pid}-cvr`},\n });\n\n const upstreamDB = pgClient(lc, upstream.db, {\n max: upstream.maxConnsPerWorker,\n connection: {['application_name']: `zero-sync-worker-${pid}-upstream`},\n });\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n warmupConnections(lc, upstreamDB, 'upstream'),\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n // Create the custom query transformer if configured\n const customQueryConfig = getCustomQueryConfig(config);\n const customQueryTransformer =\n customQueryConfig &&\n new CustomQueryTransformer(logger, customQueryConfig, shard);\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n config.upstream.type === 'pg' ? upstreamDB : undefined,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(\n logger,\n replicaFile,\n shard,\n config.replica.pageCacheSizeKib,\n ),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n config.yieldThresholdMs,\n config.enableQueryPlanner,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n customQueryTransformer,\n );\n };\n\n const mutagenFactory = (id: string) =>\n new MutagenService(\n lc.withContext('component', 'mutagen').withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n );\n\n const pusherFactory =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : (id: string) =>\n new PusherService(\n upstreamDB,\n config,\n {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n },\n lc.withContext('clientGroupID', id),\n id,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"names":["v.parse"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqCA,SAAS,WAAW;AAClB,SAAO,QAAQ,GAAG,OAAO,gBAAgB,EAAE,SAAS,EAAE;AACxD;AAEA,SAAS,qBACP,QACA;AACA,QAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,MAAI,CAAC,aAAa,KAAK;AACrB,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,KAAK,YAAY;AAAA,IACjB,gBAAgB,YAAY,kBAAkB;AAAA,EAAA;AAElD;AAEA,SAAwB,UACtB,QACA,QACG,MACY;AACf,QAAM,SAAS,wBAAwB,EAAC,KAAK,MAAM,KAAK,MAAM,CAAC,GAAE;AAEjE,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,SAAA,GAAW,KAAK,CAAC;AACjE,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,SAAA,GAAW,IAAI;AAC5D,gBAAc,IAAI,MAAM;AAExB,SAAO,KAAK,SAAS,GAAG,+BAA+B;AACvD,QAAM,WAAWA,MAAQ,KAAK,CAAC,GAAG,qBAAqB;AAEvD,QAAM,EAAC,KAAK,SAAA,IAAY;AACxB,SAAO,IAAI,mBAAmB,mCAAmC;AACjE,SAAO,SAAS,mBAAmB,wCAAwC;AAE3E,QAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,QAAQ;AACjE,KAAG,QAAQ,0BAA0B,WAAW,EAAE;AAElD,QAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;AAAA,IACjC,KAAK,IAAI;AAAA,IACT,YAAY,EAAC,CAAC,kBAAkB,GAAG,oBAAoB,GAAG,OAAA;AAAA,EAAM,CACjE;AAED,QAAM,aAAa,SAAS,IAAI,SAAS,IAAI;AAAA,IAC3C,KAAK,SAAS;AAAA,IACd,YAAY,EAAC,CAAC,kBAAkB,GAAG,oBAAoB,GAAG,YAAA;AAAA,EAAW,CACtE;AAED,QAAM,WAAW,QAAQ,WAAW;AAAA,IAClC,kBAAkB,IAAI,OAAO,KAAK;AAAA,IAClC,kBAAkB,IAAI,YAAY,UAAU;AAAA,EAAA,CAC7C;AAED,QAAM,SAAS,OAAO,mBAAmB,OAAA;AACzC,QAAM,kBAAkB,gBAAgB;AAAA,IACtC;AAAA,IACA,KAAK,KAAK,QAAQ,eAAe,WAAA,CAAY,EAAE;AAAA,EAAA;AAEjD,QAAM,oBAAoB,gBAAgB;AAAA,IACxC;AAAA,IACA,KAAK,KAAK,QAAQ,WAAW,WAAA,CAAY,EAAE;AAAA,EAAA;AAG7C,QAAM,QAAQ,WAAW,MAAM;AAE/B,QAAM,oBAAoB,CACxB,IACA,KACA,qBACG;AACH,UAAM,SAAS,GACZ,YAAY,aAAa,aAAa,EACtC,YAAY,iBAAiB,EAAE,EAC/B,YAAY,YAAY,UAAU;AACrC,OAAG;AAAA,MACD,gDAAgD,OAAO,kBAAkB;AAAA,IAAA;AAI3E,UAAM,oBAAoB,qBAAqB,MAAM;AACrD,UAAM,yBACJ,qBACA,IAAI,uBAAuB,QAAQ,mBAAmB,KAAK;AAE7D,UAAM,oBAAoB,IAAI,kBAAkB,sBAAsB;AAEtE,WAAO,IAAI;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA,OAAO;AAAA,MACP;AAAA,MACA;AAAA,MACA,OAAO,SAAS,SAAS,OAAO,aAAa;AAAA,MAC7C,IAAI;AAAA,QACF;AAAA,QACA,OAAO;AAAA,QACP,IAAI;AAAA,UACF;AAAA,UACA;AAAA,UACA;AAAA,UACA,OAAO,QAAQ;AAAA,QAAA;AAAA,QAEjB;AAAA,QACA,gBAAgB,yBAAyB,EAAE;AAAA,QAC3C;AAAA,QACA;AAAA,QACA,OAAO;AAAA,QACP,OAAO;AAAA,MAAA;AAAA,MAET;AAAA,MACA;AAAA,MACA,OAAO,IAAI;AAAA,MACX;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ;AAEA,QAAM,iBAAiB,CAAC,OACtB,IAAI;AAAA,IACF,GAAG,YAAY,aAAa,SAAS,EAAE,YAAY,iBAAiB,EAAE;AAAA,IACtE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGJ,QAAM,gBACJ,OAAO,KAAK,QAAQ,UAAa,OAAO,OAAO,QAAQ,SACnD,SACA,CAAC,OACC,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA;AAAA,MACE,GAAG,OAAO;AAAA,MACV,GAAG,OAAO;AAAA,MACV,KAAK;AAAA,QACH,OAAO,KAAK,OAAO,OAAO,OAAO;AAAA,QACjC;AAAA,MAAA;AAAA,IACF;AAAA,IAEF,GAAG,YAAY,iBAAiB,EAAE;AAAA,IAClC;AAAA,EAAA;AAGV,QAAM,SAAS,IAAI;AAAA,IACjB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,0BAAwB,IAAI,MAAM;AAElC,OAAK,SAAS,KAAK,MAAM,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC,CAAC;AAE9D,SAAO,eAAe,IAAI,QAAQ,MAAM;AAC1C;AAGA,IAAI,CAAC,qBAAqB;AACxB,OAAK;AAAA,IAAU,MACb,UAAU,KAAK,YAAY,GAAG,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,CAAC,CAAC;AAAA,EAAA;AAEvE;"}
1
+ {"version":3,"file":"syncer.js","sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n\n startOtelAuto(createLogContext(config, {worker: 'syncer'}, false));\n const lc = createLogContext(config, {worker: 'syncer'}, true);\n initEventSink(lc, config);\n\n assert(args.length > 0, `replicator mode not specified`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n\n const {cvr, upstream} = config;\n assert(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set');\n assert(upstream.maxConnsPerWorker, 'upstream.maxConnsPerWorker must be set');\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, {\n max: cvr.maxConnsPerWorker,\n connection: {['application_name']: `zero-sync-worker-${pid}-cvr`},\n });\n\n const upstreamDB = pgClient(lc, upstream.db, {\n max: upstream.maxConnsPerWorker,\n connection: {['application_name']: `zero-sync-worker-${pid}-upstream`},\n });\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n warmupConnections(lc, upstreamDB, 'upstream'),\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n // Create the custom query transformer if configured\n const customQueryConfig = getCustomQueryConfig(config);\n const customQueryTransformer =\n customQueryConfig &&\n new CustomQueryTransformer(logger, customQueryConfig, shard);\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n config.upstream.type === 'pg' ? upstreamDB : undefined,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(\n logger,\n replicaFile,\n shard,\n config.replica.pageCacheSizeKib,\n ),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = (id: string) =>\n new MutagenService(\n lc.withContext('component', 'mutagen').withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n );\n\n const pusherFactory =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : (id: string) =>\n new PusherService(\n config,\n {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n },\n lc.withContext('clientGroupID', id),\n id,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n\nlet priorityOpCounter = 0;\n\n/**\n * Run an operation with priority, indicating that IVM should use smaller time\n * slices to allow this operation to proceed more quickly\n */\nasync function runPriorityOp<T>(op: () => Promise<T>) {\n priorityOpCounter++;\n try {\n return await op();\n } finally {\n priorityOpCounter--;\n }\n}\n\nexport function isPriorityOpRunning() {\n return priorityOpCounter > 0;\n}\n"],"names":["v.parse"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqCA,SAAS,WAAW;AAClB,SAAO,QAAQ,GAAG,OAAO,gBAAgB,EAAE,SAAS,EAAE;AACxD;AAEA,SAAS,qBACP,QACA;AACA,QAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,MAAI,CAAC,aAAa,KAAK;AACrB,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,KAAK,YAAY;AAAA,IACjB,gBAAgB,YAAY,kBAAkB;AAAA,EAAA;AAElD;AAEA,SAAwB,UACtB,QACA,QACG,MACY;AACf,QAAM,SAAS,wBAAwB,EAAC,KAAK,MAAM,KAAK,MAAM,CAAC,GAAE;AAEjE,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,SAAA,GAAW,KAAK,CAAC;AACjE,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,SAAA,GAAW,IAAI;AAC5D,gBAAc,IAAI,MAAM;AAExB,SAAO,KAAK,SAAS,GAAG,+BAA+B;AACvD,QAAM,WAAWA,MAAQ,KAAK,CAAC,GAAG,qBAAqB;AAEvD,QAAM,EAAC,KAAK,SAAA,IAAY;AACxB,SAAO,IAAI,mBAAmB,mCAAmC;AACjE,SAAO,SAAS,mBAAmB,wCAAwC;AAE3E,QAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,QAAQ;AACjE,KAAG,QAAQ,0BAA0B,WAAW,EAAE;AAElD,QAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;AAAA,IACjC,KAAK,IAAI;AAAA,IACT,YAAY,EAAC,CAAC,kBAAkB,GAAG,oBAAoB,GAAG,OAAA;AAAA,EAAM,CACjE;AAED,QAAM,aAAa,SAAS,IAAI,SAAS,IAAI;AAAA,IAC3C,KAAK,SAAS;AAAA,IACd,YAAY,EAAC,CAAC,kBAAkB,GAAG,oBAAoB,GAAG,YAAA;AAAA,EAAW,CACtE;AAED,QAAM,WAAW,QAAQ,WAAW;AAAA,IAClC,kBAAkB,IAAI,OAAO,KAAK;AAAA,IAClC,kBAAkB,IAAI,YAAY,UAAU;AAAA,EAAA,CAC7C;AAED,QAAM,SAAS,OAAO,mBAAmB,OAAA;AACzC,QAAM,kBAAkB,gBAAgB;AAAA,IACtC;AAAA,IACA,KAAK,KAAK,QAAQ,eAAe,WAAA,CAAY,EAAE;AAAA,EAAA;AAEjD,QAAM,oBAAoB,gBAAgB;AAAA,IACxC;AAAA,IACA,KAAK,KAAK,QAAQ,WAAW,WAAA,CAAY,EAAE;AAAA,EAAA;AAG7C,QAAM,QAAQ,WAAW,MAAM;AAE/B,QAAM,oBAAoB,CACxB,IACA,KACA,qBACG;AACH,UAAM,SAAS,GACZ,YAAY,aAAa,aAAa,EACtC,YAAY,iBAAiB,EAAE,EAC/B,YAAY,YAAY,UAAU;AACrC,OAAG;AAAA,MACD,gDAAgD,OAAO,kBAAkB;AAAA,IAAA;AAI3E,UAAM,oBAAoB,qBAAqB,MAAM;AACrD,UAAM,yBACJ,qBACA,IAAI,uBAAuB,QAAQ,mBAAmB,KAAK;AAE7D,UAAM,oBAAoB,IAAI,kBAAkB,sBAAsB;AAEtE,UAAM,oCAAoC,KAAK;AAAA,MAC7C,OAAO,mBAAmB;AAAA,MAC1B;AAAA,IAAA;AAEF,UAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,CAAC;AAClE,WAAO,IAAI;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA,OAAO;AAAA,MACP;AAAA,MACA;AAAA,MACA,OAAO,SAAS,SAAS,OAAO,aAAa;AAAA,MAC7C,IAAI;AAAA,QACF;AAAA,QACA,OAAO;AAAA,QACP,IAAI;AAAA,UACF;AAAA,UACA;AAAA,UACA;AAAA,UACA,OAAO,QAAQ;AAAA,QAAA;AAAA,QAEjB;AAAA,QACA,gBAAgB,yBAAyB,EAAE;AAAA,QAC3C;AAAA,QACA;AAAA,QACA,MACE,oBAAA,IACI,oCACA;AAAA,QACN,OAAO;AAAA,MAAA;AAAA,MAET;AAAA,MACA;AAAA,MACA,OAAO,IAAI;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ;AAEA,QAAM,iBAAiB,CAAC,OACtB,IAAI;AAAA,IACF,GAAG,YAAY,aAAa,SAAS,EAAE,YAAY,iBAAiB,EAAE;AAAA,IACtE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGJ,QAAM,gBACJ,OAAO,KAAK,QAAQ,UAAa,OAAO,OAAO,QAAQ,SACnD,SACA,CAAC,OACC,IAAI;AAAA,IACF;AAAA,IACA;AAAA,MACE,GAAG,OAAO;AAAA,MACV,GAAG,OAAO;AAAA,MACV,KAAK;AAAA,QACH,OAAO,KAAK,OAAO,OAAO,OAAO;AAAA,QACjC;AAAA,MAAA;AAAA,IACF;AAAA,IAEF,GAAG,YAAY,iBAAiB,EAAE;AAAA,IAClC;AAAA,EAAA;AAGV,QAAM,SAAS,IAAI;AAAA,IACjB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,0BAAwB,IAAI,MAAM;AAElC,OAAK,SAAS,KAAK,MAAM,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC,CAAC;AAE9D,SAAO,eAAe,IAAI,QAAQ,MAAM;AAC1C;AAGA,IAAI,CAAC,qBAAqB;AACxB,OAAK;AAAA,IAAU,MACb,UAAU,KAAK,YAAY,GAAG,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,CAAC,CAAC;AAAA,EAAA;AAEvE;AAEA,IAAI,oBAAoB;AAMxB,eAAe,cAAiB,IAAsB;AACpD;AACA,MAAI;AACF,WAAO,MAAM,GAAA;AAAA,EACf,UAAA;AACE;AAAA,EACF;AACF;AAEO,SAAS,sBAAsB;AACpC,SAAO,oBAAoB;AAC7B;"}
@@ -183,10 +183,6 @@ function getRequiredTables({
183
183
  [`${appID}.permissions`]: {
184
184
  permissions: { type: "json" },
185
185
  hash: { type: "string" }
186
- },
187
- [`${appID}.schemaVersions`]: {
188
- minSupportedVersion: { type: "number" },
189
- maxSupportedVersion: { type: "number" }
190
186
  }
191
187
  };
192
188
  }
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.js","sources":["../../../../../../../zero-cache/src/services/change-source/custom/change-source.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {WebSocket} from 'ws';\nimport {assert, unreachable} from '../../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport type {SchemaValue} from '../../../../../zero-schema/src/table-schema.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs} from '../../../db/lite-tables.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport type {ShardConfig, ShardID} from '../../../types/shards.ts';\nimport {stream} from '../../../types/streams.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../../change-streamer/change-streamer-service.ts';\nimport {\n AutoResetSignal,\n type ReplicationConfig,\n} from '../../change-streamer/schema/tables.ts';\nimport {ChangeProcessor} from '../../replicator/change-processor.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {initChangeLog} from '../../replicator/schema/change-log.ts';\nimport {\n getSubscriptionState,\n initReplicationState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport {changeStreamMessageSchema} from '../protocol/current/downstream.ts';\nimport {type ChangeSourceUpstream} from '../protocol/current/upstream.ts';\nimport {initSyncSchema} from './sync-schema.ts';\n\n/**\n * Initializes a Custom change source before streaming changes from the\n * corresponding logical replication stream.\n */\nexport async function initializeCustomChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initSyncSchema(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n shard,\n replicaDbFile,\n upstreamURI,\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n if (shard.publications.length) {\n // Verify that the publications match what has been synced.\n const requested = [...shard.publications].sort();\n const replicated = subscriptionState.publications.sort();\n if (!deepEqual(requested, replicated)) {\n throw new Error(\n `Invalid ShardConfig. Requested publications [${requested}] do not match synced publications: [${replicated}]`,\n );\n }\n }\n\n const changeSource = new CustomChangeSource(\n lc,\n upstreamURI,\n shard,\n subscriptionState,\n );\n\n return {subscriptionState, changeSource};\n}\n\nclass CustomChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replicationConfig: ReplicationConfig;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replicationConfig: ReplicationConfig,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replicationConfig = replicationConfig;\n }\n\n initialSync(): ChangeStream {\n return this.#startStream();\n }\n\n startStream(clientWatermark: string): Promise<ChangeStream> {\n return Promise.resolve(this.#startStream(clientWatermark));\n }\n\n #startStream(clientWatermark?: string): ChangeStream {\n const {publications, replicaVersion} = this.#replicationConfig;\n const {appID, shardNum} = this.#shard;\n const url = new URL(this.#upstreamUri);\n url.searchParams.set('appID', appID);\n url.searchParams.set('shardNum', String(shardNum));\n for (const pub of publications) {\n url.searchParams.append('publications', pub);\n }\n if (clientWatermark) {\n assert(replicaVersion.length);\n url.searchParams.set('lastWatermark', clientWatermark);\n url.searchParams.set('replicaVersion', replicaVersion);\n }\n\n const ws = new WebSocket(url);\n const {instream, outstream} = stream(\n this.#lc,\n ws,\n changeStreamMessageSchema,\n // Upstream acks coalesce. If upstream exhibits back-pressure,\n // only the last ACK is kept / buffered.\n {coalesce: (curr: ChangeSourceUpstream) => curr},\n );\n return {changes: instream, acks: outstream};\n }\n}\n\n/**\n * Initial sync for a custom change source makes a request to the\n * change source endpoint with no `replicaVersion` or `lastWatermark`.\n * The initial transaction returned by the endpoint is treated as\n * the initial sync, and the commit watermark of that transaction\n * becomes the `replicaVersion` of the initialized replica.\n *\n * Note that this is equivalent to how the LSN of the Postgres WAL\n * at initial sync time is the `replicaVersion` (and starting\n * version for all initially-synced rows).\n */\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n) {\n const {appID: id, publications} = shard;\n const changeSource = new CustomChangeSource(lc, upstreamURI, shard, {\n replicaVersion: '', // ignored for initialSync()\n publications,\n });\n const {changes} = changeSource.initialSync();\n\n const processor = new ChangeProcessor(\n new StatementRunner(tx),\n 'initial-sync',\n (_, err) => {\n throw err;\n },\n );\n\n const statusPublisher = new ReplicationStatusPublisher(tx);\n try {\n let num = 0;\n for await (const change of changes) {\n const [tag] = change;\n switch (tag) {\n case 'begin': {\n const {commitWatermark} = change[2];\n lc.info?.(\n `initial sync of shard ${id} at replicaVersion ${commitWatermark}`,\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying upstream tables at version ${commitWatermark}`,\n 5000,\n );\n initReplicationState(tx, [...publications].sort(), commitWatermark);\n initChangeLog(tx);\n processor.processMessage(lc, change);\n break;\n }\n case 'data':\n processor.processMessage(lc, change);\n if (++num % 1000 === 0) {\n lc.debug?.(`processed ${num} changes`);\n }\n break;\n case 'commit':\n processor.processMessage(lc, change);\n validateInitiallySyncedData(lc, tx, shard);\n lc.info?.(`finished initial-sync of ${num} changes`);\n return;\n\n case 'status':\n break; // Ignored\n // @ts-expect-error: falls through if the tag is not 'reset-required\n case 'control': {\n const {tag, message} = change[1];\n if (tag === 'reset-required') {\n throw new AutoResetSignal(\n message ?? 'auto-reset signaled by change source',\n );\n }\n }\n // falls through\n case 'rollback':\n throw new Error(\n `unexpected message during initial-sync: ${stringify(change)}`,\n );\n default:\n unreachable(change);\n }\n }\n throw new Error(\n `change source ${upstreamURI} closed before initial-sync completed`,\n );\n } catch (e) {\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n }\n}\n\n// Verify that the upstream tables expected by the sync logic\n// have been properly initialized.\nfunction getRequiredTables({\n appID,\n shardNum,\n}: ShardID): Record<string, Record<string, SchemaValue>> {\n return {\n [`${appID}_${shardNum}.clients`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n lastMutationID: {type: 'number'},\n userID: {type: 'string'},\n },\n [`${appID}_${shardNum}.mutations`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n mutationID: {type: 'number'},\n mutation: {type: 'json'},\n },\n [`${appID}.permissions`]: {\n permissions: {type: 'json'},\n hash: {type: 'string'},\n },\n [`${appID}.schemaVersions`]: {\n minSupportedVersion: {type: 'number'},\n maxSupportedVersion: {type: 'number'},\n },\n };\n}\n\nfunction validateInitiallySyncedData(\n lc: LogContext,\n db: Database,\n shard: ShardID,\n) {\n const tables = computeZqlSpecs(lc, db);\n const required = getRequiredTables(shard);\n for (const [name, columns] of Object.entries(required)) {\n const table = tables.get(name)?.zqlSpec;\n if (!table) {\n throw new Error(\n `Upstream is missing the \"${name}\" table. (Found ${[\n ...tables.keys(),\n ]})` +\n `Please ensure that each table has a unique index over one ` +\n `or more non-null columns.`,\n );\n }\n for (const [col, {type}] of Object.entries(columns)) {\n const found = table[col];\n if (!found) {\n throw new Error(\n `Upstream \"${table}\" table is missing the \"${col}\" column`,\n );\n }\n if (found.type !== type) {\n throw new Error(\n `Upstream \"${table}.${col}\" column is a ${found.type} type but must be a ${type} type.`,\n );\n }\n }\n }\n}\n"],"names":["tag"],"mappings":";;;;;;;;;;;;;;;;AAmCA,eAAsB,6BACpB,IACA,aACA,OACA,eAC6E;AAC7E,QAAM;AAAA,IACJ;AAAA,IACA,WAAW,MAAM,KAAK,IAAI,MAAM,QAAQ;AAAA,IACxC;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,UAAU,IAAI,SAAS,IAAI,aAAa;AAC9C,QAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,OAAO,CAAC;AAC3E,UAAQ,MAAA;AAER,MAAI,MAAM,aAAa,QAAQ;AAE7B,UAAM,YAAY,CAAC,GAAG,MAAM,YAAY,EAAE,KAAA;AAC1C,UAAM,aAAa,kBAAkB,aAAa,KAAA;AAClD,QAAI,CAAC,UAAU,WAAW,UAAU,GAAG;AACrC,YAAM,IAAI;AAAA,QACR,gDAAgD,SAAS,wCAAwC,UAAU;AAAA,MAAA;AAAA,IAE/G;AAAA,EACF;AAEA,QAAM,eAAe,IAAI;AAAA,IACvB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,SAAO,EAAC,mBAAmB,aAAA;AAC7B;AAEA,MAAM,mBAA2C;AAAA,EACtC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,YACE,IACA,aACA,OACA,mBACA;AACA,SAAK,MAAM,GAAG,YAAY,aAAa,eAAe;AACtD,SAAK,eAAe;AACpB,SAAK,SAAS;AACd,SAAK,qBAAqB;AAAA,EAC5B;AAAA,EAEA,cAA4B;AAC1B,WAAO,KAAK,aAAA;AAAA,EACd;AAAA,EAEA,YAAY,iBAAgD;AAC1D,WAAO,QAAQ,QAAQ,KAAK,aAAa,eAAe,CAAC;AAAA,EAC3D;AAAA,EAEA,aAAa,iBAAwC;AACnD,UAAM,EAAC,cAAc,eAAA,IAAkB,KAAK;AAC5C,UAAM,EAAC,OAAO,SAAA,IAAY,KAAK;AAC/B,UAAM,MAAM,IAAI,IAAI,KAAK,YAAY;AACrC,QAAI,aAAa,IAAI,SAAS,KAAK;AACnC,QAAI,aAAa,IAAI,YAAY,OAAO,QAAQ,CAAC;AACjD,eAAW,OAAO,cAAc;AAC9B,UAAI,aAAa,OAAO,gBAAgB,GAAG;AAAA,IAC7C;AACA,QAAI,iBAAiB;AACnB,aAAO,eAAe,MAAM;AAC5B,UAAI,aAAa,IAAI,iBAAiB,eAAe;AACrD,UAAI,aAAa,IAAI,kBAAkB,cAAc;AAAA,IACvD;AAEA,UAAM,KAAK,IAAI,UAAU,GAAG;AAC5B,UAAM,EAAC,UAAU,UAAA,IAAa;AAAA,MAC5B,KAAK;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAAA,MAGA,EAAC,UAAU,CAAC,SAA+B,KAAA;AAAA,IAAI;AAEjD,WAAO,EAAC,SAAS,UAAU,MAAM,UAAA;AAAA,EACnC;AACF;AAaA,eAAsB,YACpB,IACA,OACA,IACA,aACA;AACA,QAAM,EAAC,OAAO,IAAI,aAAA,IAAgB;AAClC,QAAM,eAAe,IAAI,mBAAmB,IAAI,aAAa,OAAO;AAAA,IAClE,gBAAgB;AAAA;AAAA,IAChB;AAAA,EAAA,CACD;AACD,QAAM,EAAC,QAAA,IAAW,aAAa,YAAA;AAE/B,QAAM,YAAY,IAAI;AAAA,IACpB,IAAI,gBAAgB,EAAE;AAAA,IACtB;AAAA,IACA,CAAC,GAAG,QAAQ;AACV,YAAM;AAAA,IACR;AAAA,EAAA;AAGF,QAAM,kBAAkB,IAAI,2BAA2B,EAAE;AACzD,MAAI;AACF,QAAI,MAAM;AACV,qBAAiB,UAAU,SAAS;AAClC,YAAM,CAAC,GAAG,IAAI;AACd,cAAQ,KAAA;AAAA,QACN,KAAK,SAAS;AACZ,gBAAM,EAAC,gBAAA,IAAmB,OAAO,CAAC;AAClC,aAAG;AAAA,YACD,yBAAyB,EAAE,sBAAsB,eAAe;AAAA,UAAA;AAElE,0BAAgB;AAAA,YACd;AAAA,YACA;AAAA,YACA,sCAAsC,eAAe;AAAA,YACrD;AAAA,UAAA;AAEF,+BAAqB,IAAI,CAAC,GAAG,YAAY,EAAE,KAAA,GAAQ,eAAe;AAClE,wBAAc,EAAE;AAChB,oBAAU,eAAe,IAAI,MAAM;AACnC;AAAA,QACF;AAAA,QACA,KAAK;AACH,oBAAU,eAAe,IAAI,MAAM;AACnC,cAAI,EAAE,MAAM,QAAS,GAAG;AACtB,eAAG,QAAQ,aAAa,GAAG,UAAU;AAAA,UACvC;AACA;AAAA,QACF,KAAK;AACH,oBAAU,eAAe,IAAI,MAAM;AACnC,sCAA4B,IAAI,IAAI,KAAK;AACzC,aAAG,OAAO,4BAA4B,GAAG,UAAU;AACnD;AAAA,QAEF,KAAK;AACH;AAAA;AAAA;AAAA,QAEF,KAAK,WAAW;AACd,gBAAM,EAAC,KAAAA,MAAK,QAAA,IAAW,OAAO,CAAC;AAC/B,cAAIA,SAAQ,kBAAkB;AAC5B,kBAAM,IAAI;AAAA,cACR,WAAW;AAAA,YAAA;AAAA,UAEf;AAAA,QACF;AAAA;AAAA,QAEA,KAAK;AACH,gBAAM,IAAI;AAAA,YACR,2CAA2C,UAAU,MAAM,CAAC;AAAA,UAAA;AAAA,QAEhE;AACE,sBAAY,MAAM;AAAA,MAAA;AAAA,IAExB;AACA,UAAM,IAAI;AAAA,MACR,iBAAiB,WAAW;AAAA,IAAA;AAAA,EAEhC,SAAS,GAAG;AACV,UAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,CAAC;AAAA,EAClE,UAAA;AACE,oBAAgB,KAAA;AAAA,EAClB;AACF;AAIA,SAAS,kBAAkB;AAAA,EACzB;AAAA,EACA;AACF,GAAyD;AACvD,SAAO;AAAA,IACL,CAAC,GAAG,KAAK,IAAI,QAAQ,UAAU,GAAG;AAAA,MAChC,eAAe,EAAC,MAAM,SAAA;AAAA,MACtB,UAAU,EAAC,MAAM,SAAA;AAAA,MACjB,gBAAgB,EAAC,MAAM,SAAA;AAAA,MACvB,QAAQ,EAAC,MAAM,SAAA;AAAA,IAAQ;AAAA,IAEzB,CAAC,GAAG,KAAK,IAAI,QAAQ,YAAY,GAAG;AAAA,MAClC,eAAe,EAAC,MAAM,SAAA;AAAA,MACtB,UAAU,EAAC,MAAM,SAAA;AAAA,MACjB,YAAY,EAAC,MAAM,SAAA;AAAA,MACnB,UAAU,EAAC,MAAM,OAAA;AAAA,IAAM;AAAA,IAEzB,CAAC,GAAG,KAAK,cAAc,GAAG;AAAA,MACxB,aAAa,EAAC,MAAM,OAAA;AAAA,MACpB,MAAM,EAAC,MAAM,SAAA;AAAA,IAAQ;AAAA,IAEvB,CAAC,GAAG,KAAK,iBAAiB,GAAG;AAAA,MAC3B,qBAAqB,EAAC,MAAM,SAAA;AAAA,MAC5B,qBAAqB,EAAC,MAAM,SAAA;AAAA,IAAQ;AAAA,EACtC;AAEJ;AAEA,SAAS,4BACP,IACA,IACA,OACA;AACA,QAAM,SAAS,gBAAgB,IAAI,EAAE;AACrC,QAAM,WAAW,kBAAkB,KAAK;AACxC,aAAW,CAAC,MAAM,OAAO,KAAK,OAAO,QAAQ,QAAQ,GAAG;AACtD,UAAM,QAAQ,OAAO,IAAI,IAAI,GAAG;AAChC,QAAI,CAAC,OAAO;AACV,YAAM,IAAI;AAAA,QACR,4BAA4B,IAAI,mBAAmB;AAAA,UACjD,GAAG,OAAO,KAAA;AAAA,QAAK,CAChB;AAAA,MAAA;AAAA,IAIL;AACA,eAAW,CAAC,KAAK,EAAC,KAAA,CAAK,KAAK,OAAO,QAAQ,OAAO,GAAG;AACnD,YAAM,QAAQ,MAAM,GAAG;AACvB,UAAI,CAAC,OAAO;AACV,cAAM,IAAI;AAAA,UACR,aAAa,KAAK,2BAA2B,GAAG;AAAA,QAAA;AAAA,MAEpD;AACA,UAAI,MAAM,SAAS,MAAM;AACvB,cAAM,IAAI;AAAA,UACR,aAAa,KAAK,IAAI,GAAG,iBAAiB,MAAM,IAAI,uBAAuB,IAAI;AAAA,QAAA;AAAA,MAEnF;AAAA,IACF;AAAA,EACF;AACF;"}
1
+ {"version":3,"file":"change-source.js","sources":["../../../../../../../zero-cache/src/services/change-source/custom/change-source.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {WebSocket} from 'ws';\nimport {assert, unreachable} from '../../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport type {SchemaValue} from '../../../../../zero-schema/src/table-schema.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs} from '../../../db/lite-tables.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport type {ShardConfig, ShardID} from '../../../types/shards.ts';\nimport {stream} from '../../../types/streams.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../../change-streamer/change-streamer-service.ts';\nimport {\n AutoResetSignal,\n type ReplicationConfig,\n} from '../../change-streamer/schema/tables.ts';\nimport {ChangeProcessor} from '../../replicator/change-processor.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {initChangeLog} from '../../replicator/schema/change-log.ts';\nimport {\n getSubscriptionState,\n initReplicationState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport {changeStreamMessageSchema} from '../protocol/current/downstream.ts';\nimport {type ChangeSourceUpstream} from '../protocol/current/upstream.ts';\nimport {initSyncSchema} from './sync-schema.ts';\n\n/**\n * Initializes a Custom change source before streaming changes from the\n * corresponding logical replication stream.\n */\nexport async function initializeCustomChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initSyncSchema(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n shard,\n replicaDbFile,\n upstreamURI,\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n if (shard.publications.length) {\n // Verify that the publications match what has been synced.\n const requested = [...shard.publications].sort();\n const replicated = subscriptionState.publications.sort();\n if (!deepEqual(requested, replicated)) {\n throw new Error(\n `Invalid ShardConfig. Requested publications [${requested}] do not match synced publications: [${replicated}]`,\n );\n }\n }\n\n const changeSource = new CustomChangeSource(\n lc,\n upstreamURI,\n shard,\n subscriptionState,\n );\n\n return {subscriptionState, changeSource};\n}\n\nclass CustomChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replicationConfig: ReplicationConfig;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replicationConfig: ReplicationConfig,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replicationConfig = replicationConfig;\n }\n\n initialSync(): ChangeStream {\n return this.#startStream();\n }\n\n startStream(clientWatermark: string): Promise<ChangeStream> {\n return Promise.resolve(this.#startStream(clientWatermark));\n }\n\n #startStream(clientWatermark?: string): ChangeStream {\n const {publications, replicaVersion} = this.#replicationConfig;\n const {appID, shardNum} = this.#shard;\n const url = new URL(this.#upstreamUri);\n url.searchParams.set('appID', appID);\n url.searchParams.set('shardNum', String(shardNum));\n for (const pub of publications) {\n url.searchParams.append('publications', pub);\n }\n if (clientWatermark) {\n assert(replicaVersion.length);\n url.searchParams.set('lastWatermark', clientWatermark);\n url.searchParams.set('replicaVersion', replicaVersion);\n }\n\n const ws = new WebSocket(url);\n const {instream, outstream} = stream(\n this.#lc,\n ws,\n changeStreamMessageSchema,\n // Upstream acks coalesce. If upstream exhibits back-pressure,\n // only the last ACK is kept / buffered.\n {coalesce: (curr: ChangeSourceUpstream) => curr},\n );\n return {changes: instream, acks: outstream};\n }\n}\n\n/**\n * Initial sync for a custom change source makes a request to the\n * change source endpoint with no `replicaVersion` or `lastWatermark`.\n * The initial transaction returned by the endpoint is treated as\n * the initial sync, and the commit watermark of that transaction\n * becomes the `replicaVersion` of the initialized replica.\n *\n * Note that this is equivalent to how the LSN of the Postgres WAL\n * at initial sync time is the `replicaVersion` (and starting\n * version for all initially-synced rows).\n */\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n) {\n const {appID: id, publications} = shard;\n const changeSource = new CustomChangeSource(lc, upstreamURI, shard, {\n replicaVersion: '', // ignored for initialSync()\n publications,\n });\n const {changes} = changeSource.initialSync();\n\n const processor = new ChangeProcessor(\n new StatementRunner(tx),\n 'initial-sync',\n (_, err) => {\n throw err;\n },\n );\n\n const statusPublisher = new ReplicationStatusPublisher(tx);\n try {\n let num = 0;\n for await (const change of changes) {\n const [tag] = change;\n switch (tag) {\n case 'begin': {\n const {commitWatermark} = change[2];\n lc.info?.(\n `initial sync of shard ${id} at replicaVersion ${commitWatermark}`,\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying upstream tables at version ${commitWatermark}`,\n 5000,\n );\n initReplicationState(tx, [...publications].sort(), commitWatermark);\n initChangeLog(tx);\n processor.processMessage(lc, change);\n break;\n }\n case 'data':\n processor.processMessage(lc, change);\n if (++num % 1000 === 0) {\n lc.debug?.(`processed ${num} changes`);\n }\n break;\n case 'commit':\n processor.processMessage(lc, change);\n validateInitiallySyncedData(lc, tx, shard);\n lc.info?.(`finished initial-sync of ${num} changes`);\n return;\n\n case 'status':\n break; // Ignored\n // @ts-expect-error: falls through if the tag is not 'reset-required\n case 'control': {\n const {tag, message} = change[1];\n if (tag === 'reset-required') {\n throw new AutoResetSignal(\n message ?? 'auto-reset signaled by change source',\n );\n }\n }\n // falls through\n case 'rollback':\n throw new Error(\n `unexpected message during initial-sync: ${stringify(change)}`,\n );\n default:\n unreachable(change);\n }\n }\n throw new Error(\n `change source ${upstreamURI} closed before initial-sync completed`,\n );\n } catch (e) {\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n }\n}\n\n// Verify that the upstream tables expected by the sync logic\n// have been properly initialized.\nfunction getRequiredTables({\n appID,\n shardNum,\n}: ShardID): Record<string, Record<string, SchemaValue>> {\n return {\n [`${appID}_${shardNum}.clients`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n lastMutationID: {type: 'number'},\n userID: {type: 'string'},\n },\n [`${appID}_${shardNum}.mutations`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n mutationID: {type: 'number'},\n mutation: {type: 'json'},\n },\n [`${appID}.permissions`]: {\n permissions: {type: 'json'},\n hash: {type: 'string'},\n },\n };\n}\n\nfunction validateInitiallySyncedData(\n lc: LogContext,\n db: Database,\n shard: ShardID,\n) {\n const tables = computeZqlSpecs(lc, db);\n const required = getRequiredTables(shard);\n for (const [name, columns] of Object.entries(required)) {\n const table = tables.get(name)?.zqlSpec;\n if (!table) {\n throw new Error(\n `Upstream is missing the \"${name}\" table. (Found ${[\n ...tables.keys(),\n ]})` +\n `Please ensure that each table has a unique index over one ` +\n `or more non-null columns.`,\n );\n }\n for (const [col, {type}] of Object.entries(columns)) {\n const found = table[col];\n if (!found) {\n throw new Error(\n `Upstream \"${table}\" table is missing the \"${col}\" column`,\n );\n }\n if (found.type !== type) {\n throw new Error(\n `Upstream \"${table}.${col}\" column is a ${found.type} type but must be a ${type} type.`,\n );\n }\n }\n }\n}\n"],"names":["tag"],"mappings":";;;;;;;;;;;;;;;;AAmCA,eAAsB,6BACpB,IACA,aACA,OACA,eAC6E;AAC7E,QAAM;AAAA,IACJ;AAAA,IACA,WAAW,MAAM,KAAK,IAAI,MAAM,QAAQ;AAAA,IACxC;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,UAAU,IAAI,SAAS,IAAI,aAAa;AAC9C,QAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,OAAO,CAAC;AAC3E,UAAQ,MAAA;AAER,MAAI,MAAM,aAAa,QAAQ;AAE7B,UAAM,YAAY,CAAC,GAAG,MAAM,YAAY,EAAE,KAAA;AAC1C,UAAM,aAAa,kBAAkB,aAAa,KAAA;AAClD,QAAI,CAAC,UAAU,WAAW,UAAU,GAAG;AACrC,YAAM,IAAI;AAAA,QACR,gDAAgD,SAAS,wCAAwC,UAAU;AAAA,MAAA;AAAA,IAE/G;AAAA,EACF;AAEA,QAAM,eAAe,IAAI;AAAA,IACvB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,SAAO,EAAC,mBAAmB,aAAA;AAC7B;AAEA,MAAM,mBAA2C;AAAA,EACtC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,YACE,IACA,aACA,OACA,mBACA;AACA,SAAK,MAAM,GAAG,YAAY,aAAa,eAAe;AACtD,SAAK,eAAe;AACpB,SAAK,SAAS;AACd,SAAK,qBAAqB;AAAA,EAC5B;AAAA,EAEA,cAA4B;AAC1B,WAAO,KAAK,aAAA;AAAA,EACd;AAAA,EAEA,YAAY,iBAAgD;AAC1D,WAAO,QAAQ,QAAQ,KAAK,aAAa,eAAe,CAAC;AAAA,EAC3D;AAAA,EAEA,aAAa,iBAAwC;AACnD,UAAM,EAAC,cAAc,eAAA,IAAkB,KAAK;AAC5C,UAAM,EAAC,OAAO,SAAA,IAAY,KAAK;AAC/B,UAAM,MAAM,IAAI,IAAI,KAAK,YAAY;AACrC,QAAI,aAAa,IAAI,SAAS,KAAK;AACnC,QAAI,aAAa,IAAI,YAAY,OAAO,QAAQ,CAAC;AACjD,eAAW,OAAO,cAAc;AAC9B,UAAI,aAAa,OAAO,gBAAgB,GAAG;AAAA,IAC7C;AACA,QAAI,iBAAiB;AACnB,aAAO,eAAe,MAAM;AAC5B,UAAI,aAAa,IAAI,iBAAiB,eAAe;AACrD,UAAI,aAAa,IAAI,kBAAkB,cAAc;AAAA,IACvD;AAEA,UAAM,KAAK,IAAI,UAAU,GAAG;AAC5B,UAAM,EAAC,UAAU,UAAA,IAAa;AAAA,MAC5B,KAAK;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAAA,MAGA,EAAC,UAAU,CAAC,SAA+B,KAAA;AAAA,IAAI;AAEjD,WAAO,EAAC,SAAS,UAAU,MAAM,UAAA;AAAA,EACnC;AACF;AAaA,eAAsB,YACpB,IACA,OACA,IACA,aACA;AACA,QAAM,EAAC,OAAO,IAAI,aAAA,IAAgB;AAClC,QAAM,eAAe,IAAI,mBAAmB,IAAI,aAAa,OAAO;AAAA,IAClE,gBAAgB;AAAA;AAAA,IAChB;AAAA,EAAA,CACD;AACD,QAAM,EAAC,QAAA,IAAW,aAAa,YAAA;AAE/B,QAAM,YAAY,IAAI;AAAA,IACpB,IAAI,gBAAgB,EAAE;AAAA,IACtB;AAAA,IACA,CAAC,GAAG,QAAQ;AACV,YAAM;AAAA,IACR;AAAA,EAAA;AAGF,QAAM,kBAAkB,IAAI,2BAA2B,EAAE;AACzD,MAAI;AACF,QAAI,MAAM;AACV,qBAAiB,UAAU,SAAS;AAClC,YAAM,CAAC,GAAG,IAAI;AACd,cAAQ,KAAA;AAAA,QACN,KAAK,SAAS;AACZ,gBAAM,EAAC,gBAAA,IAAmB,OAAO,CAAC;AAClC,aAAG;AAAA,YACD,yBAAyB,EAAE,sBAAsB,eAAe;AAAA,UAAA;AAElE,0BAAgB;AAAA,YACd;AAAA,YACA;AAAA,YACA,sCAAsC,eAAe;AAAA,YACrD;AAAA,UAAA;AAEF,+BAAqB,IAAI,CAAC,GAAG,YAAY,EAAE,KAAA,GAAQ,eAAe;AAClE,wBAAc,EAAE;AAChB,oBAAU,eAAe,IAAI,MAAM;AACnC;AAAA,QACF;AAAA,QACA,KAAK;AACH,oBAAU,eAAe,IAAI,MAAM;AACnC,cAAI,EAAE,MAAM,QAAS,GAAG;AACtB,eAAG,QAAQ,aAAa,GAAG,UAAU;AAAA,UACvC;AACA;AAAA,QACF,KAAK;AACH,oBAAU,eAAe,IAAI,MAAM;AACnC,sCAA4B,IAAI,IAAI,KAAK;AACzC,aAAG,OAAO,4BAA4B,GAAG,UAAU;AACnD;AAAA,QAEF,KAAK;AACH;AAAA;AAAA;AAAA,QAEF,KAAK,WAAW;AACd,gBAAM,EAAC,KAAAA,MAAK,QAAA,IAAW,OAAO,CAAC;AAC/B,cAAIA,SAAQ,kBAAkB;AAC5B,kBAAM,IAAI;AAAA,cACR,WAAW;AAAA,YAAA;AAAA,UAEf;AAAA,QACF;AAAA;AAAA,QAEA,KAAK;AACH,gBAAM,IAAI;AAAA,YACR,2CAA2C,UAAU,MAAM,CAAC;AAAA,UAAA;AAAA,QAEhE;AACE,sBAAY,MAAM;AAAA,MAAA;AAAA,IAExB;AACA,UAAM,IAAI;AAAA,MACR,iBAAiB,WAAW;AAAA,IAAA;AAAA,EAEhC,SAAS,GAAG;AACV,UAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,CAAC;AAAA,EAClE,UAAA;AACE,oBAAgB,KAAA;AAAA,EAClB;AACF;AAIA,SAAS,kBAAkB;AAAA,EACzB;AAAA,EACA;AACF,GAAyD;AACvD,SAAO;AAAA,IACL,CAAC,GAAG,KAAK,IAAI,QAAQ,UAAU,GAAG;AAAA,MAChC,eAAe,EAAC,MAAM,SAAA;AAAA,MACtB,UAAU,EAAC,MAAM,SAAA;AAAA,MACjB,gBAAgB,EAAC,MAAM,SAAA;AAAA,MACvB,QAAQ,EAAC,MAAM,SAAA;AAAA,IAAQ;AAAA,IAEzB,CAAC,GAAG,KAAK,IAAI,QAAQ,YAAY,GAAG;AAAA,MAClC,eAAe,EAAC,MAAM,SAAA;AAAA,MACtB,UAAU,EAAC,MAAM,SAAA;AAAA,MACjB,YAAY,EAAC,MAAM,SAAA;AAAA,MACnB,UAAU,EAAC,MAAM,OAAA;AAAA,IAAM;AAAA,IAEzB,CAAC,GAAG,KAAK,cAAc,GAAG;AAAA,MACxB,aAAa,EAAC,MAAM,OAAA;AAAA,MACpB,MAAM,EAAC,MAAM,SAAA;AAAA,IAAQ;AAAA,EACvB;AAEJ;AAEA,SAAS,4BACP,IACA,IACA,OACA;AACA,QAAM,SAAS,gBAAgB,IAAI,EAAE;AACrC,QAAM,WAAW,kBAAkB,KAAK;AACxC,aAAW,CAAC,MAAM,OAAO,KAAK,OAAO,QAAQ,QAAQ,GAAG;AACtD,UAAM,QAAQ,OAAO,IAAI,IAAI,GAAG;AAChC,QAAI,CAAC,OAAO;AACV,YAAM,IAAI;AAAA,QACR,4BAA4B,IAAI,mBAAmB;AAAA,UACjD,GAAG,OAAO,KAAA;AAAA,QAAK,CAChB;AAAA,MAAA;AAAA,IAIL;AACA,eAAW,CAAC,KAAK,EAAC,KAAA,CAAK,KAAK,OAAO,QAAQ,OAAO,GAAG;AACnD,YAAM,QAAQ,MAAM,GAAG;AACvB,UAAI,CAAC,OAAO;AACV,cAAM,IAAI;AAAA,UACR,aAAa,KAAK,2BAA2B,GAAG;AAAA,QAAA;AAAA,MAEpD;AACA,UAAI,MAAM,SAAS,MAAM;AACvB,cAAM,IAAI;AAAA,UACR,aAAa,KAAK,IAAI,GAAG,iBAAiB,MAAM,IAAI,uBAAuB,IAAI;AAAA,QAAA;AAAA,MAEnF;AAAA,IACF;AAAA,EACF;AACF;"}
@@ -1 +1 @@
1
- {"version":3,"file":"ddl.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAC5D,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAc7E,eAAO,MAAM,gBAAgB,IAAI,CAAC;AAQlC,eAAO,MAAM,cAAc;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAGzB,CAAC;AASH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAE9B,CAAC;AAEH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;;;;;;;;;;GAWG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAG/B,CAAC;AAEH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;eAGlC,CAAC;AAEF,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AA6MtE,eAAO,MAAM,IAAI,2HAQP,CAAC;AAEX,wBAAgB,4BAA4B,CAAC,KAAK,EAAE,WAAW,UA0C9D;AAGD,wBAAgB,0BAA0B,CACxC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAgBzB"}
1
+ {"version":3,"file":"ddl.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAC5D,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAc7E,eAAO,MAAM,gBAAgB,IAAI,CAAC;AAQlC,eAAO,MAAM,cAAc;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAGzB,CAAC;AASH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAE9B,CAAC;AAEH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;;;;;;;;;;GAWG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAG/B,CAAC;AAEH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;eAGlC,CAAC;AAEF,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAoMtE,eAAO,MAAM,IAAI,2HAQP,CAAC;AAEX,wBAAgB,4BAA4B,CAAC,KAAK,EAAE,WAAW,UA0C9D;AAGD,wBAAgB,0BAA0B,CACxC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAgBzB"}
@@ -98,7 +98,6 @@ DECLARE
98
98
  publications TEXT[];
99
99
  cmd RECORD;
100
100
  relevant RECORD;
101
- deprecated RECORD;
102
101
  schema_specs TEXT;
103
102
  message TEXT;
104
103
  event TEXT;
@@ -180,15 +179,7 @@ BEGIN
180
179
  END IF;
181
180
 
182
181
  -- Construct and emit the DdlUpdateEvent message.
183
-
184
- -- TODO: Remove backwards-compatibility fields after a few releases.
185
- SELECT 'deprecated' as "schema", 'deprecated' as "name" INTO deprecated;
186
-
187
- SELECT json_build_object(
188
- 'tag', tag,
189
- 'table', deprecated,
190
- 'index', deprecated
191
- ) INTO event;
182
+ SELECT json_build_object('tag', tag) INTO event;
192
183
 
193
184
  SELECT ${schema}.schema_specs() INTO schema_specs;
194
185
 
@@ -1 +1 @@
1
- {"version":3,"file":"ddl.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"sourcesContent":["import {literal as lit} from 'pg-format';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {\n indexDefinitionsQuery,\n publishedSchema,\n publishedTableQuery,\n} from './published.ts';\n\n// Sent in the 'version' tag of \"ddlStart\" and \"ddlUpdate\" event messages.\n// This is used to ensure that the message constructed in the upstream\n// Trigger function is compatible with the code processing it in the zero-cache.\n//\n// Increment this when changing the format of the contents of the \"ddl\" events.\n// This will allow old / incompatible code to detect the change and abort.\nexport const PROTOCOL_VERSION = 1;\n\nconst triggerEvent = v.object({\n context: v.object({query: v.string()}).rest(v.string()),\n});\n\n// All DDL events contain a snapshot of the current tables and indexes that\n// are published / relevant to the shard.\nexport const ddlEventSchema = triggerEvent.extend({\n version: v.literal(PROTOCOL_VERSION),\n schema: publishedSchema,\n});\n\n// The `ddlStart` message is computed before every DDL event, regardless of\n// whether the subsequent event affects the shard. Downstream processing should\n// capture the contained schema information in order to determine the schema\n// changes necessary to apply a subsequent `ddlUpdate` message. Note that a\n// `ddlUpdate` message may not follow, as updates determined to be irrelevant\n// to the shard will not result in a message. However, all `ddlUpdate` messages\n// are guaranteed to be preceded by a `ddlStart` message.\nexport const ddlStartEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlStart'),\n});\n\nexport type DdlStartEvent = v.Infer<typeof ddlStartEventSchema>;\n\n/**\n * The {@link DdlUpdateEvent} contains an updated schema resulting from\n * a particular ddl event. The event type provides information\n * (i.e. constraints) on the difference from the schema of the preceding\n * {@link DdlStartEvent}.\n *\n * Note that in almost all cases (the exception being `CREATE` events),\n * it is possible that there is no relevant difference between the\n * ddl-start schema and the ddl-update schema, as many aspects of the\n * schema (e.g. column constraints) are not relevant to downstream\n * replication.\n */\nexport const ddlUpdateEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlUpdate'),\n event: v.object({tag: v.string()}),\n});\n\nexport type DdlUpdateEvent = v.Infer<typeof ddlUpdateEventSchema>;\n\nexport const replicationEventSchema = v.union(\n ddlStartEventSchema,\n ddlUpdateEventSchema,\n);\n\nexport type ReplicationEvent = v.Infer<typeof replicationEventSchema>;\n\n// Creates a function that appends `_{shard-num}` to the input and\n// quotes the result to be a valid identifier.\nfunction append(shardNum: number) {\n return (name: string) => id(name + '_' + String(shardNum));\n}\n\n/**\n * Event trigger functions contain the core logic that are invoked by triggers.\n *\n * Note that although many of these functions can theoretically be parameterized and\n * shared across shards, it is advantageous to keep the functions in each shard\n * isolated from each other in order to avoid the complexity of shared-function\n * versioning.\n *\n * In a sense, shards (and their triggers and functions) should be thought of as\n * execution environments that can be updated at different schedules. If per-shard\n * triggers called into shared functions, we would have to consider versioning the\n * functions when changing their behavior, backwards compatibility, removal of\n * unused versions, etc. (not unlike versioning of npm packages).\n *\n * Instead, we opt for the simplicity and isolation of having each shard\n * completely own (and maintain) the entirety of its trigger/function stack.\n */\nfunction createEventFunctionStatements(shard: ShardConfig) {\n const {appID, shardNum, publications} = shard;\n const schema = id(upstreamSchema(shard)); // e.g. \"{APP_ID}_{SHARD_ID}\"\n return /*sql*/ `\nCREATE SCHEMA IF NOT EXISTS ${schema};\n\nCREATE OR REPLACE FUNCTION ${schema}.get_trigger_context()\nRETURNS record AS $$\nDECLARE\n result record;\nBEGIN\n SELECT current_query() AS \"query\" into result;\n RETURN result;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.notice_ignore(object_id TEXT)\nRETURNS void AS $$\nBEGIN\n RAISE NOTICE 'zero(%) ignoring %', ${lit(shardNum)}, object_id;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.schema_specs()\nRETURNS TEXT AS $$\nDECLARE\n tables record;\n indexes record;\nBEGIN\n ${publishedTableQuery(publications)} INTO tables;\n ${indexDefinitionsQuery(publications)} INTO indexes;\n RETURN json_build_object(\n 'tables', tables.tables,\n 'indexes', indexes.indexes\n );\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_start()\nRETURNS event_trigger AS $$\nDECLARE\n schema_specs TEXT;\n message TEXT;\nBEGIN\n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlStart',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_end(tag TEXT)\nRETURNS void AS $$\nDECLARE\n publications TEXT[];\n cmd RECORD;\n relevant RECORD;\n deprecated RECORD;\n schema_specs TEXT;\n message TEXT;\n event TEXT;\nBEGIN\n publications := ARRAY[${lit(publications)}];\n\n SELECT objid, object_type, object_identity \n FROM pg_event_trigger_ddl_commands() \n WHERE object_type IN (\n 'table',\n 'table column',\n 'index',\n 'publication relation',\n 'publication namespace',\n 'schema')\n LIMIT 1 INTO cmd;\n\n -- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.\n\n IF cmd.object_type = 'table' OR cmd.object_type = 'table column' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n cmd.object_type := 'table'; -- normalize the 'table column' target to 'table'\n\n ELSIF cmd.object_type = 'index' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_indexes as ind ON ind.schemaname = ns.nspname AND ind.indexname = c.relname\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = ind.tablename\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication relation' THEN\n SELECT pb.pubname FROM pg_publication_rel AS rel\n JOIN pg_publication AS pb ON pb.oid = rel.prpubid\n WHERE rel.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication namespace' THEN\n SELECT pb.pubname FROM pg_publication_namespace AS ns\n JOIN pg_publication AS pb ON pb.oid = ns.pnpubid\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'schema' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF tag LIKE 'CREATE %' THEN\n PERFORM ${schema}.notice_ignore('noop ' || tag);\n RETURN;\n END IF;\n\n -- Construct and emit the DdlUpdateEvent message.\n\n -- TODO: Remove backwards-compatibility fields after a few releases.\n SELECT 'deprecated' as \"schema\", 'deprecated' as \"name\" INTO deprecated;\n\n SELECT json_build_object(\n 'tag', tag,\n 'table', deprecated,\n 'index', deprecated\n ) INTO event;\n \n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlUpdate',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'event', event::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n`;\n}\n\n// Exported for testing.\nexport const TAGS = [\n 'CREATE TABLE',\n 'ALTER TABLE',\n 'CREATE INDEX',\n 'DROP TABLE',\n 'DROP INDEX',\n 'ALTER PUBLICATION',\n 'ALTER SCHEMA',\n] as const;\n\nexport function createEventTriggerStatements(shard: ShardConfig) {\n // Better to assert here than get a cryptic syntax error from Postgres.\n assert(shard.publications.length, `shard publications must be non-empty`);\n\n // Unlike functions, which are namespaced in shard-specific schemas,\n // EVENT TRIGGER names are in the global namespace and thus must include\n // the appID and shardNum.\n const {appID, shardNum} = shard;\n const sharded = append(shardNum);\n const schema = id(upstreamSchema(shard));\n\n const triggers = [\n dropEventTriggerStatements(shard.appID, shard.shardNum),\n createEventFunctionStatements(shard),\n ];\n\n // A single ddl_command_start trigger covering all relevant tags.\n triggers.push(/*sql*/ `\nCREATE EVENT TRIGGER ${sharded(`${appID}_ddl_start`)}\n ON ddl_command_start\n WHEN TAG IN (${lit(TAGS)})\n EXECUTE PROCEDURE ${schema}.emit_ddl_start();\n`);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n triggers.push(/*sql*/ `\nCREATE OR REPLACE FUNCTION ${schema}.emit_${tagID}() \nRETURNS event_trigger AS $$\nBEGIN\n PERFORM ${schema}.emit_ddl_end(${lit(tag)});\nEND\n$$ LANGUAGE plpgsql;\n\nCREATE EVENT TRIGGER ${sharded(`${appID}_${tagID}`)}\n ON ddl_command_end\n WHEN TAG IN (${lit(tag)})\n EXECUTE PROCEDURE ${schema}.emit_${tagID}();\n`);\n }\n return triggers.join('');\n}\n\n// Exported for testing.\nexport function dropEventTriggerStatements(\n appID: string,\n shardID: string | number,\n) {\n const stmts: string[] = [];\n // A single ddl_command_start trigger covering all relevant tags.\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_ddl_start_${shardID}`)};\n `);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_${tagID}_${shardID}`)};\n `);\n }\n return stmts.join('');\n}\n"],"names":["v.object","v.string","v.literal","v.union","lit"],"mappings":";;;;;;;AAiBO,MAAM,mBAAmB;AAEhC,MAAM,eAAeA,OAAS;AAAA,EAC5B,SAASA,OAAS,EAAC,OAAOC,OAAE,GAAS,EAAE,KAAKA,QAAU;AACxD,CAAC;AAIM,MAAM,iBAAiB,aAAa,OAAO;AAAA,EAChD,SAASC,QAAU,gBAAgB;AAAA,EACnC,QAAQ;AACV,CAAC;AASM,MAAM,sBAAsB,eAAe,OAAO;AAAA,EACvD,MAAMA,QAAU,UAAU;AAC5B,CAAC;AAgBM,MAAM,uBAAuB,eAAe,OAAO;AAAA,EACxD,MAAMA,QAAU,WAAW;AAAA,EAC3B,OAAOF,OAAS,EAAC,KAAKC,OAAE,GAAS;AACnC,CAAC;AAIM,MAAM,yBAAyBE;AAAAA,EACpC;AAAA,EACA;AACF;AAMA,SAAS,OAAO,UAAkB;AAChC,SAAO,CAAC,SAAiB,GAAG,OAAO,MAAM,OAAO,QAAQ,CAAC;AAC3D;AAmBA,SAAS,8BAA8B,OAAoB;AACzD,QAAM,EAAC,OAAO,UAAU,aAAA,IAAgB;AACxC,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC;AAAA;AAAA,IAAe;AAAA,8BACa,MAAM;AAAA;AAAA,6BAEP,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BAWN,MAAM;AAAA;AAAA;AAAA,uCAGIC,UAAI,QAAQ,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,6BAKvB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,oBAAoB,YAAY,CAAC;AAAA,IACjC,sBAAsB,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BASV,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAMxB,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA,iBAEhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAAA,6BAK0B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAWTA,UAAI,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAsB3B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAcN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAWN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,cAKR,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAeT,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA;AAAA,iBAGhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAIH;AAGO,MAAM,OAAO;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,SAAS,6BAA6B,OAAoB;AAE/D,SAAO,MAAM,aAAa,QAAQ,sCAAsC;AAKxE,QAAM,EAAC,OAAO,SAAA,IAAY;AAC1B,QAAM,UAAU,OAAO,QAAQ;AAC/B,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AAEvC,QAAM,WAAW;AAAA,IACf,2BAA2B,MAAM,OAAO,MAAM,QAAQ;AAAA,IACtD,8BAA8B,KAAK;AAAA,EAAA;AAIrC,WAAS;AAAA;AAAA,IAAa;AAAA,uBACD,QAAQ,GAAG,KAAK,YAAY,CAAC;AAAA;AAAA,iBAEnCA,UAAI,IAAI,CAAC;AAAA,sBACJ,MAAM;AAAA;AAAA,EAAA;AAI1B,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,aAAS;AAAA;AAAA,MAAa;AAAA,6BACG,MAAM,SAAS,KAAK;AAAA;AAAA;AAAA,YAGrC,MAAM,iBAAiBA,UAAI,GAAG,CAAC;AAAA;AAAA;AAAA;AAAA,uBAIpB,QAAQ,GAAG,KAAK,IAAI,KAAK,EAAE,CAAC;AAAA;AAAA,iBAElCA,UAAI,GAAG,CAAC;AAAA,sBACH,MAAM,SAAS,KAAK;AAAA;AAAA,IAAA;AAAA,EAExC;AACA,SAAO,SAAS,KAAK,EAAE;AACzB;AAGO,SAAS,2BACd,OACA,SACA;AACA,QAAM,QAAkB,CAAA;AAExB,QAAM;AAAA;AAAA,IAAa;AAAA,mCACc,GAAG,GAAG,KAAK,cAAc,OAAO,EAAE,CAAC;AAAA;AAAA,EAAA;AAIpE,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,UAAM;AAAA;AAAA,MAAa;AAAA,qCACc,GAAG,GAAG,KAAK,IAAI,KAAK,IAAI,OAAO,EAAE,CAAC;AAAA;AAAA,IAAA;AAAA,EAErE;AACA,SAAO,MAAM,KAAK,EAAE;AACtB;"}
1
+ {"version":3,"file":"ddl.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"sourcesContent":["import {literal as lit} from 'pg-format';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {\n indexDefinitionsQuery,\n publishedSchema,\n publishedTableQuery,\n} from './published.ts';\n\n// Sent in the 'version' tag of \"ddlStart\" and \"ddlUpdate\" event messages.\n// This is used to ensure that the message constructed in the upstream\n// Trigger function is compatible with the code processing it in the zero-cache.\n//\n// Increment this when changing the format of the contents of the \"ddl\" events.\n// This will allow old / incompatible code to detect the change and abort.\nexport const PROTOCOL_VERSION = 1;\n\nconst triggerEvent = v.object({\n context: v.object({query: v.string()}).rest(v.string()),\n});\n\n// All DDL events contain a snapshot of the current tables and indexes that\n// are published / relevant to the shard.\nexport const ddlEventSchema = triggerEvent.extend({\n version: v.literal(PROTOCOL_VERSION),\n schema: publishedSchema,\n});\n\n// The `ddlStart` message is computed before every DDL event, regardless of\n// whether the subsequent event affects the shard. Downstream processing should\n// capture the contained schema information in order to determine the schema\n// changes necessary to apply a subsequent `ddlUpdate` message. Note that a\n// `ddlUpdate` message may not follow, as updates determined to be irrelevant\n// to the shard will not result in a message. However, all `ddlUpdate` messages\n// are guaranteed to be preceded by a `ddlStart` message.\nexport const ddlStartEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlStart'),\n});\n\nexport type DdlStartEvent = v.Infer<typeof ddlStartEventSchema>;\n\n/**\n * The {@link DdlUpdateEvent} contains an updated schema resulting from\n * a particular ddl event. The event type provides information\n * (i.e. constraints) on the difference from the schema of the preceding\n * {@link DdlStartEvent}.\n *\n * Note that in almost all cases (the exception being `CREATE` events),\n * it is possible that there is no relevant difference between the\n * ddl-start schema and the ddl-update schema, as many aspects of the\n * schema (e.g. column constraints) are not relevant to downstream\n * replication.\n */\nexport const ddlUpdateEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlUpdate'),\n event: v.object({tag: v.string()}),\n});\n\nexport type DdlUpdateEvent = v.Infer<typeof ddlUpdateEventSchema>;\n\nexport const replicationEventSchema = v.union(\n ddlStartEventSchema,\n ddlUpdateEventSchema,\n);\n\nexport type ReplicationEvent = v.Infer<typeof replicationEventSchema>;\n\n// Creates a function that appends `_{shard-num}` to the input and\n// quotes the result to be a valid identifier.\nfunction append(shardNum: number) {\n return (name: string) => id(name + '_' + String(shardNum));\n}\n\n/**\n * Event trigger functions contain the core logic that are invoked by triggers.\n *\n * Note that although many of these functions can theoretically be parameterized and\n * shared across shards, it is advantageous to keep the functions in each shard\n * isolated from each other in order to avoid the complexity of shared-function\n * versioning.\n *\n * In a sense, shards (and their triggers and functions) should be thought of as\n * execution environments that can be updated at different schedules. If per-shard\n * triggers called into shared functions, we would have to consider versioning the\n * functions when changing their behavior, backwards compatibility, removal of\n * unused versions, etc. (not unlike versioning of npm packages).\n *\n * Instead, we opt for the simplicity and isolation of having each shard\n * completely own (and maintain) the entirety of its trigger/function stack.\n */\nfunction createEventFunctionStatements(shard: ShardConfig) {\n const {appID, shardNum, publications} = shard;\n const schema = id(upstreamSchema(shard)); // e.g. \"{APP_ID}_{SHARD_ID}\"\n return /*sql*/ `\nCREATE SCHEMA IF NOT EXISTS ${schema};\n\nCREATE OR REPLACE FUNCTION ${schema}.get_trigger_context()\nRETURNS record AS $$\nDECLARE\n result record;\nBEGIN\n SELECT current_query() AS \"query\" into result;\n RETURN result;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.notice_ignore(object_id TEXT)\nRETURNS void AS $$\nBEGIN\n RAISE NOTICE 'zero(%) ignoring %', ${lit(shardNum)}, object_id;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.schema_specs()\nRETURNS TEXT AS $$\nDECLARE\n tables record;\n indexes record;\nBEGIN\n ${publishedTableQuery(publications)} INTO tables;\n ${indexDefinitionsQuery(publications)} INTO indexes;\n RETURN json_build_object(\n 'tables', tables.tables,\n 'indexes', indexes.indexes\n );\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_start()\nRETURNS event_trigger AS $$\nDECLARE\n schema_specs TEXT;\n message TEXT;\nBEGIN\n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlStart',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_end(tag TEXT)\nRETURNS void AS $$\nDECLARE\n publications TEXT[];\n cmd RECORD;\n relevant RECORD;\n schema_specs TEXT;\n message TEXT;\n event TEXT;\nBEGIN\n publications := ARRAY[${lit(publications)}];\n\n SELECT objid, object_type, object_identity \n FROM pg_event_trigger_ddl_commands() \n WHERE object_type IN (\n 'table',\n 'table column',\n 'index',\n 'publication relation',\n 'publication namespace',\n 'schema')\n LIMIT 1 INTO cmd;\n\n -- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.\n\n IF cmd.object_type = 'table' OR cmd.object_type = 'table column' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n cmd.object_type := 'table'; -- normalize the 'table column' target to 'table'\n\n ELSIF cmd.object_type = 'index' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_indexes as ind ON ind.schemaname = ns.nspname AND ind.indexname = c.relname\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = ind.tablename\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication relation' THEN\n SELECT pb.pubname FROM pg_publication_rel AS rel\n JOIN pg_publication AS pb ON pb.oid = rel.prpubid\n WHERE rel.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication namespace' THEN\n SELECT pb.pubname FROM pg_publication_namespace AS ns\n JOIN pg_publication AS pb ON pb.oid = ns.pnpubid\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'schema' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF tag LIKE 'CREATE %' THEN\n PERFORM ${schema}.notice_ignore('noop ' || tag);\n RETURN;\n END IF;\n\n -- Construct and emit the DdlUpdateEvent message.\n SELECT json_build_object('tag', tag) INTO event;\n \n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlUpdate',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'event', event::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n`;\n}\n\n// Exported for testing.\nexport const TAGS = [\n 'CREATE TABLE',\n 'ALTER TABLE',\n 'CREATE INDEX',\n 'DROP TABLE',\n 'DROP INDEX',\n 'ALTER PUBLICATION',\n 'ALTER SCHEMA',\n] as const;\n\nexport function createEventTriggerStatements(shard: ShardConfig) {\n // Better to assert here than get a cryptic syntax error from Postgres.\n assert(shard.publications.length, `shard publications must be non-empty`);\n\n // Unlike functions, which are namespaced in shard-specific schemas,\n // EVENT TRIGGER names are in the global namespace and thus must include\n // the appID and shardNum.\n const {appID, shardNum} = shard;\n const sharded = append(shardNum);\n const schema = id(upstreamSchema(shard));\n\n const triggers = [\n dropEventTriggerStatements(shard.appID, shard.shardNum),\n createEventFunctionStatements(shard),\n ];\n\n // A single ddl_command_start trigger covering all relevant tags.\n triggers.push(/*sql*/ `\nCREATE EVENT TRIGGER ${sharded(`${appID}_ddl_start`)}\n ON ddl_command_start\n WHEN TAG IN (${lit(TAGS)})\n EXECUTE PROCEDURE ${schema}.emit_ddl_start();\n`);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n triggers.push(/*sql*/ `\nCREATE OR REPLACE FUNCTION ${schema}.emit_${tagID}() \nRETURNS event_trigger AS $$\nBEGIN\n PERFORM ${schema}.emit_ddl_end(${lit(tag)});\nEND\n$$ LANGUAGE plpgsql;\n\nCREATE EVENT TRIGGER ${sharded(`${appID}_${tagID}`)}\n ON ddl_command_end\n WHEN TAG IN (${lit(tag)})\n EXECUTE PROCEDURE ${schema}.emit_${tagID}();\n`);\n }\n return triggers.join('');\n}\n\n// Exported for testing.\nexport function dropEventTriggerStatements(\n appID: string,\n shardID: string | number,\n) {\n const stmts: string[] = [];\n // A single ddl_command_start trigger covering all relevant tags.\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_ddl_start_${shardID}`)};\n `);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_${tagID}_${shardID}`)};\n `);\n }\n return stmts.join('');\n}\n"],"names":["v.object","v.string","v.literal","v.union","lit"],"mappings":";;;;;;;AAiBO,MAAM,mBAAmB;AAEhC,MAAM,eAAeA,OAAS;AAAA,EAC5B,SAASA,OAAS,EAAC,OAAOC,OAAE,GAAS,EAAE,KAAKA,QAAU;AACxD,CAAC;AAIM,MAAM,iBAAiB,aAAa,OAAO;AAAA,EAChD,SAASC,QAAU,gBAAgB;AAAA,EACnC,QAAQ;AACV,CAAC;AASM,MAAM,sBAAsB,eAAe,OAAO;AAAA,EACvD,MAAMA,QAAU,UAAU;AAC5B,CAAC;AAgBM,MAAM,uBAAuB,eAAe,OAAO;AAAA,EACxD,MAAMA,QAAU,WAAW;AAAA,EAC3B,OAAOF,OAAS,EAAC,KAAKC,OAAE,GAAS;AACnC,CAAC;AAIM,MAAM,yBAAyBE;AAAAA,EACpC;AAAA,EACA;AACF;AAMA,SAAS,OAAO,UAAkB;AAChC,SAAO,CAAC,SAAiB,GAAG,OAAO,MAAM,OAAO,QAAQ,CAAC;AAC3D;AAmBA,SAAS,8BAA8B,OAAoB;AACzD,QAAM,EAAC,OAAO,UAAU,aAAA,IAAgB;AACxC,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC;AAAA;AAAA,IAAe;AAAA,8BACa,MAAM;AAAA;AAAA,6BAEP,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BAWN,MAAM;AAAA;AAAA;AAAA,uCAGIC,UAAI,QAAQ,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,6BAKvB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,oBAAoB,YAAY,CAAC;AAAA,IACjC,sBAAsB,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BASV,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAMxB,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA,iBAEhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAAA,6BAK0B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAUTA,UAAI,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAsB3B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAcN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAWN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,cAKR,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAOT,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA;AAAA,iBAGhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAIH;AAGO,MAAM,OAAO;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,SAAS,6BAA6B,OAAoB;AAE/D,SAAO,MAAM,aAAa,QAAQ,sCAAsC;AAKxE,QAAM,EAAC,OAAO,SAAA,IAAY;AAC1B,QAAM,UAAU,OAAO,QAAQ;AAC/B,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AAEvC,QAAM,WAAW;AAAA,IACf,2BAA2B,MAAM,OAAO,MAAM,QAAQ;AAAA,IACtD,8BAA8B,KAAK;AAAA,EAAA;AAIrC,WAAS;AAAA;AAAA,IAAa;AAAA,uBACD,QAAQ,GAAG,KAAK,YAAY,CAAC;AAAA;AAAA,iBAEnCA,UAAI,IAAI,CAAC;AAAA,sBACJ,MAAM;AAAA;AAAA,EAAA;AAI1B,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,aAAS;AAAA;AAAA,MAAa;AAAA,6BACG,MAAM,SAAS,KAAK;AAAA;AAAA;AAAA,YAGrC,MAAM,iBAAiBA,UAAI,GAAG,CAAC;AAAA;AAAA;AAAA;AAAA,uBAIpB,QAAQ,GAAG,KAAK,IAAI,KAAK,EAAE,CAAC;AAAA;AAAA,iBAElCA,UAAI,GAAG,CAAC;AAAA,sBACH,MAAM,SAAS,KAAK;AAAA;AAAA,IAAA;AAAA,EAExC;AACA,SAAO,SAAS,KAAK,EAAE;AACzB;AAGO,SAAS,2BACd,OACA,SACA;AACA,QAAM,QAAkB,CAAA;AAExB,QAAM;AAAA;AAAA,IAAa;AAAA,mCACc,GAAG,GAAG,KAAK,cAAc,OAAO,EAAE,CAAC;AAAA;AAAA,EAAA;AAIpE,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,UAAM;AAAA;AAAA,MAAa;AAAA,qCACc,GAAG,GAAG,KAAK,IAAI,KAAK,IAAI,OAAO,EAAE,CAAC;AAAA;AAAA,IAAA;AAAA,EAErE;AACA,SAAO,MAAM,KAAK,EAAE;AACtB;"}
@@ -1 +1 @@
1
- {"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAa7E;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,GACjB,OAAO,CAAC,IAAI,CAAC,CAgBf;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,IAAI,CAAC,CAqBf;AAuHD,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,iBAYnB"}
1
+ {"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAGL,KAAK,WAAW,EACjB,MAAM,6BAA6B,CAAC;AAarC;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,GACjB,OAAO,CAAC,IAAI,CAAC,CAgBf;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,IAAI,CAAC,CAqBf;AA8HD,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,iBAYnB"}
@@ -1,12 +1,12 @@
1
1
  import { assert } from "../../../../../../shared/src/asserts.js";
2
2
  import { parse } from "../../../../../../shared/src/valita.js";
3
3
  import { runSchemaMigrations, getVersionHistory } from "../../../../db/migration.js";
4
- import { upstreamSchema } from "../../../../types/shards.js";
4
+ import { upstreamSchema, appSchema } from "../../../../types/shards.js";
5
+ import { id } from "../../../../types/sql.js";
5
6
  import { AutoResetSignal } from "../../../change-streamer/schema/tables.js";
6
7
  import { decommissionShard } from "../decommission.js";
7
8
  import { publishedSchema } from "./published.js";
8
9
  import { getMutationsTableDefinition, metadataPublicationName, setupTriggers, legacyReplicationSlot, setupTablesAndReplication } from "./shard.js";
9
- import { id } from "../../../../types/sql.js";
10
10
  import { object, string } from "@badrap/valita";
11
11
  async function ensureShardSchema(lc, db, shard) {
12
12
  const initialSetup = {
@@ -149,6 +149,12 @@ function getIncrementalMigrations(shard, replicaVersion) {
149
149
  );
150
150
  lc.info?.("Upgraded schema with new mutations table");
151
151
  }
152
+ },
153
+ 11: {
154
+ migrateSchema: async (lc, sql) => {
155
+ await sql`DROP TABLE IF EXISTS ${sql(appSchema(shard))}."schemaVersions"`;
156
+ lc.info?.(`Dropped legacy schemaVersions table`);
157
+ }
152
158
  }
153
159
  };
154
160
  }
@@ -1 +1 @@
1
- {"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\nimport {id} from '../../../../types/sql.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(result.length === 1);\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n 9: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA0BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE,eAAO,OAAO,WAAW,CAAC;AAC1B,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA,IAMF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
1
+ {"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {\n appSchema,\n upstreamSchema,\n type ShardConfig,\n} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(result.length === 1);\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n 9: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n\n 11: {\n migrateSchema: async (lc, sql) => {\n await sql`DROP TABLE IF EXISTS ${sql(appSchema(shard))}.\"schemaVersions\"`;\n lc.info?.(`Dropped legacy schemaVersions table`);\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA8BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE,eAAO,OAAO,WAAW,CAAC;AAC1B,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA,IAMF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA,IAGF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,2BAA2B,IAAI,UAAU,KAAK,CAAC,CAAC;AACtD,WAAG,OAAO,qCAAqC;AAAA,MACjD;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
@@ -1 +1 @@
1
- {"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AAGxB,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAiDD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CAuCR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAIjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,iBAOnC;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAczB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAO9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAwDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBAqBnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
1
+ {"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AAGxB,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAoCD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CAuCR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAIjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,iBAOnC;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAczB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAO9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAwDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBAqBnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
@@ -39,19 +39,6 @@ function globalSetup(appID) {
39
39
  `
40
40
  CREATE SCHEMA IF NOT EXISTS ${app};
41
41
 
42
- CREATE TABLE IF NOT EXISTS ${app}."schemaVersions" (
43
- "minSupportedVersion" INT4,
44
- "maxSupportedVersion" INT4,
45
-
46
- -- Ensure that there is only a single row in the table.
47
- -- Application code can be agnostic to this column, and
48
- -- simply invoke UPDATE statements on the version columns.
49
- "lock" BOOL PRIMARY KEY DEFAULT true CHECK (lock)
50
- );
51
-
52
- INSERT INTO ${app}."schemaVersions" ("lock", "minSupportedVersion", "maxSupportedVersion")
53
- VALUES (true, 1, 1) ON CONFLICT DO NOTHING;
54
-
55
42
  CREATE TABLE IF NOT EXISTS ${app}.permissions (
56
43
  "permissions" JSONB,
57
44
  "hash" TEXT,
@@ -124,7 +111,7 @@ function shardSetup(shardConfig, metadataPublication) {
124
111
 
125
112
  DROP PUBLICATION IF EXISTS ${id(metadataPublication)};
126
113
  CREATE PUBLICATION ${id(metadataPublication)}
127
- FOR TABLE ${app}."schemaVersions", ${app}."permissions", TABLE ${shard}."clients", ${shard}."mutations";
114
+ FOR TABLE ${app}."permissions", TABLE ${shard}."clients", ${shard}."mutations";
128
115
 
129
116
  CREATE TABLE ${shard}."${SHARD_CONFIG_TABLE}" (
130
117
  "publications" TEXT[] NOT NULL,
@@ -1 +1 @@
1
- {"version":3,"file":"shard.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"sourcesContent":["import {PG_INSUFFICIENT_PRIVILEGE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {literal} from 'pg-format';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {Default} from '../../../../db/postgres-replica-identity-enum.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../../../types/pg.ts';\nimport type {AppID, ShardConfig, ShardID} from '../../../../types/shards.ts';\nimport {appSchema, check, upstreamSchema} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {createEventTriggerStatements} from './ddl.ts';\nimport {\n getPublicationInfo,\n publishedSchema,\n type PublicationInfo,\n type PublishedSchema,\n} from './published.ts';\nimport {validate} from './validation.ts';\n\nexport function internalPublicationPrefix({appID}: AppID) {\n return `_${appID}_`;\n}\n\nexport function legacyReplicationSlot({appID, shardNum}: ShardID) {\n return `${appID}_${shardNum}`;\n}\n\nexport function replicationSlotPrefix(shard: ShardID) {\n const {appID, shardNum} = check(shard);\n return `${appID}_${shardNum}_`;\n}\n\n/**\n * An expression used to match replication slots in the shard\n * in a Postgres `LIKE` operator.\n */\nexport function replicationSlotExpression(shard: ShardID) {\n // Underscores have a special meaning in LIKE values\n // so they have to be escaped.\n return `${replicationSlotPrefix(shard)}%`.replaceAll('_', '\\\\_');\n}\n\nexport function newReplicationSlot(shard: ShardID) {\n return replicationSlotPrefix(shard) + Date.now();\n}\n\nfunction defaultPublicationName(appID: string, shardID: string | number) {\n return `_${appID}_public_${shardID}`;\n}\n\nexport function metadataPublicationName(\n appID: string,\n shardID: string | number,\n) {\n return `_${appID}_metadata_${shardID}`;\n}\n\n// The GLOBAL_SETUP must be idempotent as it can be run multiple times for different shards.\nfunction globalSetup(appID: AppID): string {\n const app = id(appSchema(appID));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${app};\n\n CREATE TABLE IF NOT EXISTS ${app}.\"schemaVersions\" (\n \"minSupportedVersion\" INT4,\n \"maxSupportedVersion\" INT4,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${app}.\"schemaVersions\" (\"lock\", \"minSupportedVersion\", \"maxSupportedVersion\")\n VALUES (true, 1, 1) ON CONFLICT DO NOTHING;\n\n CREATE TABLE IF NOT EXISTS ${app}.permissions (\n \"permissions\" JSONB,\n \"hash\" TEXT,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n CREATE OR REPLACE FUNCTION ${app}.set_permissions_hash()\n RETURNS TRIGGER AS $$\n BEGIN\n NEW.hash = md5(NEW.permissions::text);\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE OR REPLACE TRIGGER on_set_permissions \n BEFORE INSERT OR UPDATE ON ${app}.permissions\n FOR EACH ROW\n EXECUTE FUNCTION ${app}.set_permissions_hash();\n\n INSERT INTO ${app}.permissions (permissions) VALUES (NULL) ON CONFLICT DO NOTHING;\n`;\n}\n\nexport async function ensureGlobalTables(db: PostgresDB, appID: AppID) {\n await db.unsafe(globalSetup(appID));\n}\n\nexport function getClientsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"clients\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"lastMutationID\" BIGINT NOT NULL,\n \"userID\" TEXT,\n PRIMARY KEY(\"clientGroupID\", \"clientID\")\n );`;\n}\n\n/**\n * Tracks the results of mutations.\n * 1. It is an error for the same mutation ID to be used twice.\n * 2. The result is JSONB to allow for arbitrary results.\n *\n * The tables must be cleaned up as the clients\n * receive the mutation responses and as clients are removed.\n */\nexport function getMutationsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"mutations\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"mutationID\" BIGINT NOT NULL,\n \"result\" JSON NOT NULL,\n PRIMARY KEY(\"clientGroupID\", \"clientID\", \"mutationID\")\n );`;\n}\n\nexport const SHARD_CONFIG_TABLE = 'shardConfig';\n\nexport function shardSetup(\n shardConfig: ShardConfig,\n metadataPublication: string,\n): string {\n const app = id(appSchema(shardConfig));\n const shard = id(upstreamSchema(shardConfig));\n\n const pubs = [...shardConfig.publications].sort();\n assert(pubs.includes(metadataPublication));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${shard};\n\n ${getClientsTableDefinition(shard)}\n ${getMutationsTableDefinition(shard)}\n\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n CREATE PUBLICATION ${id(metadataPublication)}\n FOR TABLE ${app}.\"schemaVersions\", ${app}.\"permissions\", TABLE ${shard}.\"clients\", ${shard}.\"mutations\";\n\n CREATE TABLE ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\" TEXT[] NOT NULL,\n \"ddlDetection\" BOOL NOT NULL,\n\n -- Ensure that there is only a single row in the table.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\",\n \"ddlDetection\" \n ) VALUES (\n ARRAY[${literal(pubs)}], \n false -- set in SAVEPOINT with triggerSetup() statements\n );\n\n CREATE TABLE ${shard}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `;\n}\n\nexport function dropShard(appID: string, shardID: string | number): string {\n const schema = `${appID}_${shardID}`;\n const metadataPublication = metadataPublicationName(appID, shardID);\n const defaultPublication = defaultPublicationName(appID, shardID);\n\n // DROP SCHEMA ... CASCADE does not drop dependent PUBLICATIONS,\n // so PUBLICATIONs must be dropped explicitly.\n return /*sql*/ `\n DROP PUBLICATION IF EXISTS ${id(defaultPublication)};\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n DROP SCHEMA IF EXISTS ${id(schema)} CASCADE;\n `;\n}\n\nconst internalShardConfigSchema = v.object({\n publications: v.array(v.string()),\n ddlDetection: v.boolean(),\n});\n\nexport type InternalShardConfig = v.Infer<typeof internalShardConfigSchema>;\n\nconst replicaSchema = internalShardConfigSchema.extend({\n slot: v.string(),\n version: v.string(),\n initialSchema: publishedSchema,\n});\n\nexport type Replica = v.Infer<typeof replicaSchema>;\n\n// triggerSetup is run separately in a sub-transaction (i.e. SAVEPOINT) so\n// that a failure (e.g. due to lack of superuser permissions) can be handled\n// by continuing in a degraded mode (ddlDetection = false).\nfunction triggerSetup(shard: ShardConfig): string {\n const schema = id(upstreamSchema(shard));\n return (\n createEventTriggerStatements(shard) +\n /*sql*/ `UPDATE ${schema}.\"shardConfig\" SET \"ddlDetection\" = true;`\n );\n}\n\n// Called in initial-sync to store the exact schema that was initially synced.\nexport async function addReplica(\n sql: PostgresDB,\n shard: ShardID,\n slot: string,\n replicaVersion: string,\n {tables, indexes}: PublishedSchema,\n) {\n const schema = upstreamSchema(shard);\n const synced: PublishedSchema = {tables, indexes};\n await sql`\n INSERT INTO ${sql(schema)}.replicas (\"slot\", \"version\", \"initialSchema\")\n VALUES (${slot}, ${replicaVersion}, ${synced})`;\n}\n\nexport async function getReplicaAtVersion(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardID,\n replicaVersion: string,\n): Promise<Replica | null> {\n const schema = sql(upstreamSchema(shard));\n const result = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true\n WHERE version = ${replicaVersion};\n `;\n if (result.length === 0) {\n // log out all the replicas and the joined shardConfig\n const allReplicas = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true`;\n lc.debug?.(`Replica not found in: ${JSON.stringify(allReplicas)}`);\n return null;\n }\n return v.parse(result[0], replicaSchema, 'passthrough');\n}\n\nexport async function getInternalShardConfig(\n sql: PostgresDB,\n shard: ShardID,\n): Promise<InternalShardConfig> {\n const result = await sql`\n SELECT \"publications\", \"ddlDetection\"\n FROM ${sql(upstreamSchema(shard))}.\"shardConfig\";\n `;\n assert(result.length === 1);\n return v.parse(result[0], internalShardConfigSchema, 'passthrough');\n}\n\n/**\n * Sets up and returns all publications (including internal ones) for\n * the given shard.\n */\nexport async function setupTablesAndReplication(\n lc: LogContext,\n sql: PostgresTransaction,\n requested: ShardConfig,\n) {\n const {publications} = requested;\n // Validate requested publications.\n for (const pub of publications) {\n if (pub.startsWith('_')) {\n throw new Error(\n `Publication names starting with \"_\" are reserved for internal use.\\n` +\n `Please use a different name for publication \"${pub}\".`,\n );\n }\n }\n const allPublications: string[] = [];\n\n // Setup application publications.\n if (publications.length) {\n const results = await sql<{pubname: string}[]>`\n SELECT pubname from pg_publication WHERE pubname IN ${sql(\n publications,\n )}`.values();\n\n if (results.length !== publications.length) {\n throw new Error(\n `Unknown or invalid publications. Specified: [${publications}]. Found: [${results.flat()}]`,\n );\n }\n allPublications.push(...publications);\n } else {\n const defaultPublication = defaultPublicationName(\n requested.appID,\n requested.shardNum,\n );\n await sql`\n DROP PUBLICATION IF EXISTS ${sql(defaultPublication)}`;\n await sql`\n CREATE PUBLICATION ${sql(defaultPublication)} \n FOR TABLES IN SCHEMA public\n WITH (publish_via_partition_root = true)`;\n allPublications.push(defaultPublication);\n }\n\n const metadataPublication = metadataPublicationName(\n requested.appID,\n requested.shardNum,\n );\n allPublications.push(metadataPublication);\n\n const shard = {...requested, publications: allPublications};\n\n // Setup the global tables and shard tables / publications.\n await sql.unsafe(globalSetup(shard) + shardSetup(shard, metadataPublication));\n\n const pubs = await getPublicationInfo(sql, allPublications);\n await replicaIdentitiesForTablesWithoutPrimaryKeys(pubs)?.apply(lc, sql);\n\n await setupTriggers(lc, sql, shard);\n}\n\nexport async function setupTriggers(\n lc: LogContext,\n tx: PostgresTransaction,\n shard: ShardConfig,\n) {\n try {\n await tx.savepoint(sub => sub.unsafe(triggerSetup(shard)));\n } catch (e) {\n if (\n !(\n e instanceof postgres.PostgresError &&\n e.code === PG_INSUFFICIENT_PRIVILEGE\n )\n ) {\n throw e;\n }\n // If triggerSetup() fails, replication continues in ddlDetection=false mode.\n lc.warn?.(\n `Unable to create event triggers for schema change detection:\\n\\n` +\n `\"${e.hint ?? e.message}\"\\n\\n` +\n `Proceeding in degraded mode: schema changes will halt replication,\\n` +\n `requiring the replica to be reset (manually or with --auto-reset).`,\n );\n }\n}\n\nexport function validatePublications(\n lc: LogContext,\n published: PublicationInfo,\n) {\n // Verify that all publications export the proper events.\n published.publications.forEach(pub => {\n if (\n !pub.pubinsert ||\n !pub.pubtruncate ||\n !pub.pubdelete ||\n !pub.pubtruncate\n ) {\n // TODO: Make APIError?\n throw new Error(\n `PUBLICATION ${pub.pubname} must publish insert, update, delete, and truncate`,\n );\n }\n });\n\n published.tables.forEach(table => validate(lc, table, published.indexes));\n}\n\ntype ReplicaIdentities = {\n apply(lc: LogContext, db: PostgresDB): Promise<void>;\n};\n\nexport function replicaIdentitiesForTablesWithoutPrimaryKeys(\n pubs: PublishedSchema,\n): ReplicaIdentities | undefined {\n const replicaIdentities: {\n schema: string;\n tableName: string;\n indexName: string;\n }[] = [];\n for (const table of pubs.tables) {\n if (!table.primaryKey?.length && table.replicaIdentity === Default) {\n // Look for an index that can serve as the REPLICA IDENTITY USING INDEX. It must be:\n // - UNIQUE\n // - NOT NULL columns\n // - not deferrable (i.e. isImmediate)\n // - not partial (are already filtered out)\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY\n const {schema, name: tableName} = table;\n for (const {columns, name: indexName} of pubs.indexes.filter(\n idx =>\n idx.schema === schema &&\n idx.tableName === tableName &&\n idx.unique &&\n idx.isImmediate,\n )) {\n if (Object.keys(columns).some(col => !table.columns[col].notNull)) {\n continue; // Only indexes with all NOT NULL columns are suitable.\n }\n replicaIdentities.push({schema, tableName, indexName});\n break;\n }\n }\n }\n\n if (replicaIdentities.length === 0) {\n return undefined;\n }\n return {\n apply: async (lc: LogContext, sql: PostgresDB) => {\n for (const {schema, tableName, indexName} of replicaIdentities) {\n lc.info?.(\n `setting \"${indexName}\" as the REPLICA IDENTITY for \"${tableName}\"`,\n );\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(tableName)} \n REPLICA IDENTITY USING INDEX ${sql(indexName)}`;\n }\n },\n };\n}\n"],"names":["v.object","v.array","v.string","v.boolean","v.parse"],"mappings":";;;;;;;;;;;;AAoBO,SAAS,0BAA0B,EAAC,SAAe;AACxD,SAAO,IAAI,KAAK;AAClB;AAEO,SAAS,sBAAsB,EAAC,OAAO,YAAoB;AAChE,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAEO,SAAS,sBAAsB,OAAgB;AACpD,QAAM,EAAC,OAAO,aAAY,MAAM,KAAK;AACrC,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAMO,SAAS,0BAA0B,OAAgB;AAGxD,SAAO,GAAG,sBAAsB,KAAK,CAAC,IAAI,WAAW,KAAK,KAAK;AACjE;AAEO,SAAS,mBAAmB,OAAgB;AACjD,SAAO,sBAAsB,KAAK,IAAI,KAAK,IAAA;AAC7C;AAEA,SAAS,uBAAuB,OAAe,SAA0B;AACvE,SAAO,IAAI,KAAK,WAAW,OAAO;AACpC;AAEO,SAAS,wBACd,OACA,SACA;AACA,SAAO,IAAI,KAAK,aAAa,OAAO;AACtC;AAGA,SAAS,YAAY,OAAsB;AACzC,QAAM,MAAM,GAAG,UAAU,KAAK,CAAC;AAE/B;AAAA;AAAA,IAAe;AAAA,gCACe,GAAG;AAAA;AAAA,+BAEJ,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUlB,GAAG;AAAA;AAAA;AAAA,+BAGY,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAUH,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iCASD,GAAG;AAAA;AAAA,uBAEb,GAAG;AAAA;AAAA,gBAEV,GAAG;AAAA;AAAA;AAEnB;AAEA,eAAsB,mBAAmB,IAAgB,OAAc;AACrE,QAAM,GAAG,OAAO,YAAY,KAAK,CAAC;AACpC;AAEO,SAAS,0BAA0B,QAAgB;AACxD;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAUO,SAAS,4BAA4B,QAAgB;AAC1D;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAEO,MAAM,qBAAqB;AAE3B,SAAS,WACd,aACA,qBACQ;AACR,QAAM,MAAM,GAAG,UAAU,WAAW,CAAC;AACrC,QAAM,QAAQ,GAAG,eAAe,WAAW,CAAC;AAE5C,QAAM,OAAO,CAAC,GAAG,YAAY,YAAY,EAAE,KAAA;AAC3C,SAAO,KAAK,SAAS,mBAAmB,CAAC;AAEzC;AAAA;AAAA,IAAe;AAAA,gCACe,KAAK;AAAA;AAAA,IAEjC,0BAA0B,KAAK,CAAC;AAAA,IAChC,4BAA4B,KAAK,CAAC;AAAA;AAAA,+BAEP,GAAG,mBAAmB,CAAC;AAAA,uBAC/B,GAAG,mBAAmB,CAAC;AAAA,gBAC9B,GAAG,sBAAsB,GAAG,yBAAyB,KAAK,eAAe,KAAK;AAAA;AAAA,iBAE7E,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAQ7B,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA,cAI9B,QAAQ,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA,iBAIV,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAMtB;AAEO,SAAS,UAAU,OAAe,SAAkC;AACzE,QAAM,SAAS,GAAG,KAAK,IAAI,OAAO;AAClC,QAAM,sBAAsB,wBAAwB,OAAO,OAAO;AAClE,QAAM,qBAAqB,uBAAuB,OAAO,OAAO;AAIhE;AAAA;AAAA,IAAe;AAAA,iCACgB,GAAG,kBAAkB,CAAC;AAAA,iCACtB,GAAG,mBAAmB,CAAC;AAAA,4BAC5B,GAAG,MAAM,CAAC;AAAA;AAAA;AAEtC;AAEA,MAAM,4BAA4BA,OAAS;AAAA,EACzC,cAAcC,MAAQC,QAAU;AAAA,EAChC,cAAcC,QAAE;AAClB,CAAC;AAID,MAAM,gBAAgB,0BAA0B,OAAO;AAAA,EACrD,MAAMD,OAAE;AAAA,EACR,SAASA,OAAE;AAAA,EACX,eAAe;AACjB,CAAC;AAOD,SAAS,aAAa,OAA4B;AAChD,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC,SACE,6BAA6B,KAAK;AAAA,EAC1B,UAAU,MAAM;AAE5B;AAGA,eAAsB,WACpB,KACA,OACA,MACA,gBACA,EAAC,QAAQ,WACT;AACA,QAAM,SAAS,eAAe,KAAK;AACnC,QAAM,SAA0B,EAAC,QAAQ,QAAA;AACzC,QAAM;AAAA,kBACU,IAAI,MAAM,CAAC;AAAA,gBACb,IAAI,KAAK,cAAc,KAAK,MAAM;AAClD;AAEA,eAAsB,oBACpB,IACA,KACA,OACA,gBACyB;AACzB,QAAM,SAAS,IAAI,eAAe,KAAK,CAAC;AACxC,QAAM,SAAS,MAAM;AAAA,oBACH,MAAM,kBAAkB,MAAM;AAAA,wBAC1B,cAAc;AAAA;AAEpC,MAAI,OAAO,WAAW,GAAG;AAEvB,UAAM,cAAc,MAAM;AAAA,sBACR,MAAM,kBAAkB,MAAM;AAChD,OAAG,QAAQ,yBAAyB,KAAK,UAAU,WAAW,CAAC,EAAE;AACjE,WAAO;AAAA,EACT;AACA,SAAOE,MAAQ,OAAO,CAAC,GAAG,eAAe,aAAa;AACxD;AAEA,eAAsB,uBACpB,KACA,OAC8B;AAC9B,QAAM,SAAS,MAAM;AAAA;AAAA,aAEV,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAErC,SAAO,OAAO,WAAW,CAAC;AAC1B,SAAOA,MAAQ,OAAO,CAAC,GAAG,2BAA2B,aAAa;AACpE;AAMA,eAAsB,0BACpB,IACA,KACA,WACA;AACA,QAAM,EAAC,iBAAgB;AAEvB,aAAW,OAAO,cAAc;AAC9B,QAAI,IAAI,WAAW,GAAG,GAAG;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,+CACkD,GAAG;AAAA,MAAA;AAAA,IAEzD;AAAA,EACF;AACA,QAAM,kBAA4B,CAAA;AAGlC,MAAI,aAAa,QAAQ;AACvB,UAAM,UAAU,MAAM;AAAA,0DACgC;AAAA,MACpD;AAAA,IAAA,CACD,GAAG,OAAA;AAEJ,QAAI,QAAQ,WAAW,aAAa,QAAQ;AAC1C,YAAM,IAAI;AAAA,QACR,gDAAgD,YAAY,cAAc,QAAQ,MAAM;AAAA,MAAA;AAAA,IAE5F;AACA,oBAAgB,KAAK,GAAG,YAAY;AAAA,EACtC,OAAO;AACL,UAAM,qBAAqB;AAAA,MACzB,UAAU;AAAA,MACV,UAAU;AAAA,IAAA;AAEZ,UAAM;AAAA,mCACyB,IAAI,kBAAkB,CAAC;AACtD,UAAM;AAAA,2BACiB,IAAI,kBAAkB,CAAC;AAAA;AAAA;AAG9C,oBAAgB,KAAK,kBAAkB;AAAA,EACzC;AAEA,QAAM,sBAAsB;AAAA,IAC1B,UAAU;AAAA,IACV,UAAU;AAAA,EAAA;AAEZ,kBAAgB,KAAK,mBAAmB;AAExC,QAAM,QAAQ,EAAC,GAAG,WAAW,cAAc,gBAAA;AAG3C,QAAM,IAAI,OAAO,YAAY,KAAK,IAAI,WAAW,OAAO,mBAAmB,CAAC;AAE5E,QAAM,OAAO,MAAM,mBAAmB,KAAK,eAAe;AAC1D,QAAM,6CAA6C,IAAI,GAAG,MAAM,IAAI,GAAG;AAEvE,QAAM,cAAc,IAAI,KAAK,KAAK;AACpC;AAEA,eAAsB,cACpB,IACA,IACA,OACA;AACA,MAAI;AACF,UAAM,GAAG,UAAU,CAAA,QAAO,IAAI,OAAO,aAAa,KAAK,CAAC,CAAC;AAAA,EAC3D,SAAS,GAAG;AACV,QACE,EACE,aAAa,SAAS,iBACtB,EAAE,SAAS,4BAEb;AACA,YAAM;AAAA,IACR;AAEA,OAAG;AAAA,MACD;AAAA;AAAA,GACM,EAAE,QAAQ,EAAE,OAAO;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAI7B;AACF;AAEO,SAAS,qBACd,IACA,WACA;AAEA,YAAU,aAAa,QAAQ,CAAA,QAAO;AACpC,QACE,CAAC,IAAI,aACL,CAAC,IAAI,eACL,CAAC,IAAI,aACL,CAAC,IAAI,aACL;AAEA,YAAM,IAAI;AAAA,QACR,eAAe,IAAI,OAAO;AAAA,MAAA;AAAA,IAE9B;AAAA,EACF,CAAC;AAED,YAAU,OAAO,QAAQ,CAAA,UAAS,SAAS,IAAI,OAAO,UAAU,OAAO,CAAC;AAC1E;AAMO,SAAS,6CACd,MAC+B;AAC/B,QAAM,oBAIA,CAAA;AACN,aAAW,SAAS,KAAK,QAAQ;AAC/B,QAAI,CAAC,MAAM,YAAY,UAAU,MAAM,oBAAoB,SAAS;AAQlE,YAAM,EAAC,QAAQ,MAAM,UAAA,IAAa;AAClC,iBAAW,EAAC,SAAS,MAAM,UAAA,KAAc,KAAK,QAAQ;AAAA,QACpD,CAAA,QACE,IAAI,WAAW,UACf,IAAI,cAAc,aAClB,IAAI,UACJ,IAAI;AAAA,MAAA,GACL;AACD,YAAI,OAAO,KAAK,OAAO,EAAE,KAAK,CAAA,QAAO,CAAC,MAAM,QAAQ,GAAG,EAAE,OAAO,GAAG;AACjE;AAAA,QACF;AACA,0BAAkB,KAAK,EAAC,QAAQ,WAAW,WAAU;AACrD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,MAAI,kBAAkB,WAAW,GAAG;AAClC,WAAO;AAAA,EACT;AACA,SAAO;AAAA,IACL,OAAO,OAAO,IAAgB,QAAoB;AAChD,iBAAW,EAAC,QAAQ,WAAW,UAAA,KAAc,mBAAmB;AAC9D,WAAG;AAAA,UACD,YAAY,SAAS,kCAAkC,SAAS;AAAA,QAAA;AAElE,cAAM;AAAA,sBACQ,IAAI,MAAM,CAAC,IAAI,IAAI,SAAS,CAAC;AAAA,yCACV,IAAI,SAAS,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EAAA;AAEJ;"}
1
+ {"version":3,"file":"shard.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"sourcesContent":["import {PG_INSUFFICIENT_PRIVILEGE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {literal} from 'pg-format';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {Default} from '../../../../db/postgres-replica-identity-enum.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../../../types/pg.ts';\nimport type {AppID, ShardConfig, ShardID} from '../../../../types/shards.ts';\nimport {appSchema, check, upstreamSchema} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {createEventTriggerStatements} from './ddl.ts';\nimport {\n getPublicationInfo,\n publishedSchema,\n type PublicationInfo,\n type PublishedSchema,\n} from './published.ts';\nimport {validate} from './validation.ts';\n\nexport function internalPublicationPrefix({appID}: AppID) {\n return `_${appID}_`;\n}\n\nexport function legacyReplicationSlot({appID, shardNum}: ShardID) {\n return `${appID}_${shardNum}`;\n}\n\nexport function replicationSlotPrefix(shard: ShardID) {\n const {appID, shardNum} = check(shard);\n return `${appID}_${shardNum}_`;\n}\n\n/**\n * An expression used to match replication slots in the shard\n * in a Postgres `LIKE` operator.\n */\nexport function replicationSlotExpression(shard: ShardID) {\n // Underscores have a special meaning in LIKE values\n // so they have to be escaped.\n return `${replicationSlotPrefix(shard)}%`.replaceAll('_', '\\\\_');\n}\n\nexport function newReplicationSlot(shard: ShardID) {\n return replicationSlotPrefix(shard) + Date.now();\n}\n\nfunction defaultPublicationName(appID: string, shardID: string | number) {\n return `_${appID}_public_${shardID}`;\n}\n\nexport function metadataPublicationName(\n appID: string,\n shardID: string | number,\n) {\n return `_${appID}_metadata_${shardID}`;\n}\n\n// The GLOBAL_SETUP must be idempotent as it can be run multiple times for different shards.\nfunction globalSetup(appID: AppID): string {\n const app = id(appSchema(appID));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${app};\n\n CREATE TABLE IF NOT EXISTS ${app}.permissions (\n \"permissions\" JSONB,\n \"hash\" TEXT,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n CREATE OR REPLACE FUNCTION ${app}.set_permissions_hash()\n RETURNS TRIGGER AS $$\n BEGIN\n NEW.hash = md5(NEW.permissions::text);\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE OR REPLACE TRIGGER on_set_permissions \n BEFORE INSERT OR UPDATE ON ${app}.permissions\n FOR EACH ROW\n EXECUTE FUNCTION ${app}.set_permissions_hash();\n\n INSERT INTO ${app}.permissions (permissions) VALUES (NULL) ON CONFLICT DO NOTHING;\n`;\n}\n\nexport async function ensureGlobalTables(db: PostgresDB, appID: AppID) {\n await db.unsafe(globalSetup(appID));\n}\n\nexport function getClientsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"clients\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"lastMutationID\" BIGINT NOT NULL,\n \"userID\" TEXT,\n PRIMARY KEY(\"clientGroupID\", \"clientID\")\n );`;\n}\n\n/**\n * Tracks the results of mutations.\n * 1. It is an error for the same mutation ID to be used twice.\n * 2. The result is JSONB to allow for arbitrary results.\n *\n * The tables must be cleaned up as the clients\n * receive the mutation responses and as clients are removed.\n */\nexport function getMutationsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"mutations\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"mutationID\" BIGINT NOT NULL,\n \"result\" JSON NOT NULL,\n PRIMARY KEY(\"clientGroupID\", \"clientID\", \"mutationID\")\n );`;\n}\n\nexport const SHARD_CONFIG_TABLE = 'shardConfig';\n\nexport function shardSetup(\n shardConfig: ShardConfig,\n metadataPublication: string,\n): string {\n const app = id(appSchema(shardConfig));\n const shard = id(upstreamSchema(shardConfig));\n\n const pubs = [...shardConfig.publications].sort();\n assert(pubs.includes(metadataPublication));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${shard};\n\n ${getClientsTableDefinition(shard)}\n ${getMutationsTableDefinition(shard)}\n\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n CREATE PUBLICATION ${id(metadataPublication)}\n FOR TABLE ${app}.\"permissions\", TABLE ${shard}.\"clients\", ${shard}.\"mutations\";\n\n CREATE TABLE ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\" TEXT[] NOT NULL,\n \"ddlDetection\" BOOL NOT NULL,\n\n -- Ensure that there is only a single row in the table.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\",\n \"ddlDetection\" \n ) VALUES (\n ARRAY[${literal(pubs)}], \n false -- set in SAVEPOINT with triggerSetup() statements\n );\n\n CREATE TABLE ${shard}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `;\n}\n\nexport function dropShard(appID: string, shardID: string | number): string {\n const schema = `${appID}_${shardID}`;\n const metadataPublication = metadataPublicationName(appID, shardID);\n const defaultPublication = defaultPublicationName(appID, shardID);\n\n // DROP SCHEMA ... CASCADE does not drop dependent PUBLICATIONS,\n // so PUBLICATIONs must be dropped explicitly.\n return /*sql*/ `\n DROP PUBLICATION IF EXISTS ${id(defaultPublication)};\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n DROP SCHEMA IF EXISTS ${id(schema)} CASCADE;\n `;\n}\n\nconst internalShardConfigSchema = v.object({\n publications: v.array(v.string()),\n ddlDetection: v.boolean(),\n});\n\nexport type InternalShardConfig = v.Infer<typeof internalShardConfigSchema>;\n\nconst replicaSchema = internalShardConfigSchema.extend({\n slot: v.string(),\n version: v.string(),\n initialSchema: publishedSchema,\n});\n\nexport type Replica = v.Infer<typeof replicaSchema>;\n\n// triggerSetup is run separately in a sub-transaction (i.e. SAVEPOINT) so\n// that a failure (e.g. due to lack of superuser permissions) can be handled\n// by continuing in a degraded mode (ddlDetection = false).\nfunction triggerSetup(shard: ShardConfig): string {\n const schema = id(upstreamSchema(shard));\n return (\n createEventTriggerStatements(shard) +\n /*sql*/ `UPDATE ${schema}.\"shardConfig\" SET \"ddlDetection\" = true;`\n );\n}\n\n// Called in initial-sync to store the exact schema that was initially synced.\nexport async function addReplica(\n sql: PostgresDB,\n shard: ShardID,\n slot: string,\n replicaVersion: string,\n {tables, indexes}: PublishedSchema,\n) {\n const schema = upstreamSchema(shard);\n const synced: PublishedSchema = {tables, indexes};\n await sql`\n INSERT INTO ${sql(schema)}.replicas (\"slot\", \"version\", \"initialSchema\")\n VALUES (${slot}, ${replicaVersion}, ${synced})`;\n}\n\nexport async function getReplicaAtVersion(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardID,\n replicaVersion: string,\n): Promise<Replica | null> {\n const schema = sql(upstreamSchema(shard));\n const result = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true\n WHERE version = ${replicaVersion};\n `;\n if (result.length === 0) {\n // log out all the replicas and the joined shardConfig\n const allReplicas = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true`;\n lc.debug?.(`Replica not found in: ${JSON.stringify(allReplicas)}`);\n return null;\n }\n return v.parse(result[0], replicaSchema, 'passthrough');\n}\n\nexport async function getInternalShardConfig(\n sql: PostgresDB,\n shard: ShardID,\n): Promise<InternalShardConfig> {\n const result = await sql`\n SELECT \"publications\", \"ddlDetection\"\n FROM ${sql(upstreamSchema(shard))}.\"shardConfig\";\n `;\n assert(result.length === 1);\n return v.parse(result[0], internalShardConfigSchema, 'passthrough');\n}\n\n/**\n * Sets up and returns all publications (including internal ones) for\n * the given shard.\n */\nexport async function setupTablesAndReplication(\n lc: LogContext,\n sql: PostgresTransaction,\n requested: ShardConfig,\n) {\n const {publications} = requested;\n // Validate requested publications.\n for (const pub of publications) {\n if (pub.startsWith('_')) {\n throw new Error(\n `Publication names starting with \"_\" are reserved for internal use.\\n` +\n `Please use a different name for publication \"${pub}\".`,\n );\n }\n }\n const allPublications: string[] = [];\n\n // Setup application publications.\n if (publications.length) {\n const results = await sql<{pubname: string}[]>`\n SELECT pubname from pg_publication WHERE pubname IN ${sql(\n publications,\n )}`.values();\n\n if (results.length !== publications.length) {\n throw new Error(\n `Unknown or invalid publications. Specified: [${publications}]. Found: [${results.flat()}]`,\n );\n }\n allPublications.push(...publications);\n } else {\n const defaultPublication = defaultPublicationName(\n requested.appID,\n requested.shardNum,\n );\n await sql`\n DROP PUBLICATION IF EXISTS ${sql(defaultPublication)}`;\n await sql`\n CREATE PUBLICATION ${sql(defaultPublication)} \n FOR TABLES IN SCHEMA public\n WITH (publish_via_partition_root = true)`;\n allPublications.push(defaultPublication);\n }\n\n const metadataPublication = metadataPublicationName(\n requested.appID,\n requested.shardNum,\n );\n allPublications.push(metadataPublication);\n\n const shard = {...requested, publications: allPublications};\n\n // Setup the global tables and shard tables / publications.\n await sql.unsafe(globalSetup(shard) + shardSetup(shard, metadataPublication));\n\n const pubs = await getPublicationInfo(sql, allPublications);\n await replicaIdentitiesForTablesWithoutPrimaryKeys(pubs)?.apply(lc, sql);\n\n await setupTriggers(lc, sql, shard);\n}\n\nexport async function setupTriggers(\n lc: LogContext,\n tx: PostgresTransaction,\n shard: ShardConfig,\n) {\n try {\n await tx.savepoint(sub => sub.unsafe(triggerSetup(shard)));\n } catch (e) {\n if (\n !(\n e instanceof postgres.PostgresError &&\n e.code === PG_INSUFFICIENT_PRIVILEGE\n )\n ) {\n throw e;\n }\n // If triggerSetup() fails, replication continues in ddlDetection=false mode.\n lc.warn?.(\n `Unable to create event triggers for schema change detection:\\n\\n` +\n `\"${e.hint ?? e.message}\"\\n\\n` +\n `Proceeding in degraded mode: schema changes will halt replication,\\n` +\n `requiring the replica to be reset (manually or with --auto-reset).`,\n );\n }\n}\n\nexport function validatePublications(\n lc: LogContext,\n published: PublicationInfo,\n) {\n // Verify that all publications export the proper events.\n published.publications.forEach(pub => {\n if (\n !pub.pubinsert ||\n !pub.pubtruncate ||\n !pub.pubdelete ||\n !pub.pubtruncate\n ) {\n // TODO: Make APIError?\n throw new Error(\n `PUBLICATION ${pub.pubname} must publish insert, update, delete, and truncate`,\n );\n }\n });\n\n published.tables.forEach(table => validate(lc, table, published.indexes));\n}\n\ntype ReplicaIdentities = {\n apply(lc: LogContext, db: PostgresDB): Promise<void>;\n};\n\nexport function replicaIdentitiesForTablesWithoutPrimaryKeys(\n pubs: PublishedSchema,\n): ReplicaIdentities | undefined {\n const replicaIdentities: {\n schema: string;\n tableName: string;\n indexName: string;\n }[] = [];\n for (const table of pubs.tables) {\n if (!table.primaryKey?.length && table.replicaIdentity === Default) {\n // Look for an index that can serve as the REPLICA IDENTITY USING INDEX. It must be:\n // - UNIQUE\n // - NOT NULL columns\n // - not deferrable (i.e. isImmediate)\n // - not partial (are already filtered out)\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY\n const {schema, name: tableName} = table;\n for (const {columns, name: indexName} of pubs.indexes.filter(\n idx =>\n idx.schema === schema &&\n idx.tableName === tableName &&\n idx.unique &&\n idx.isImmediate,\n )) {\n if (Object.keys(columns).some(col => !table.columns[col].notNull)) {\n continue; // Only indexes with all NOT NULL columns are suitable.\n }\n replicaIdentities.push({schema, tableName, indexName});\n break;\n }\n }\n }\n\n if (replicaIdentities.length === 0) {\n return undefined;\n }\n return {\n apply: async (lc: LogContext, sql: PostgresDB) => {\n for (const {schema, tableName, indexName} of replicaIdentities) {\n lc.info?.(\n `setting \"${indexName}\" as the REPLICA IDENTITY for \"${tableName}\"`,\n );\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(tableName)} \n REPLICA IDENTITY USING INDEX ${sql(indexName)}`;\n }\n },\n };\n}\n"],"names":["v.object","v.array","v.string","v.boolean","v.parse"],"mappings":";;;;;;;;;;;;AAoBO,SAAS,0BAA0B,EAAC,SAAe;AACxD,SAAO,IAAI,KAAK;AAClB;AAEO,SAAS,sBAAsB,EAAC,OAAO,YAAoB;AAChE,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAEO,SAAS,sBAAsB,OAAgB;AACpD,QAAM,EAAC,OAAO,aAAY,MAAM,KAAK;AACrC,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAMO,SAAS,0BAA0B,OAAgB;AAGxD,SAAO,GAAG,sBAAsB,KAAK,CAAC,IAAI,WAAW,KAAK,KAAK;AACjE;AAEO,SAAS,mBAAmB,OAAgB;AACjD,SAAO,sBAAsB,KAAK,IAAI,KAAK,IAAA;AAC7C;AAEA,SAAS,uBAAuB,OAAe,SAA0B;AACvE,SAAO,IAAI,KAAK,WAAW,OAAO;AACpC;AAEO,SAAS,wBACd,OACA,SACA;AACA,SAAO,IAAI,KAAK,aAAa,OAAO;AACtC;AAGA,SAAS,YAAY,OAAsB;AACzC,QAAM,MAAM,GAAG,UAAU,KAAK,CAAC;AAE/B;AAAA;AAAA,IAAe;AAAA,gCACe,GAAG;AAAA;AAAA,+BAEJ,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAUH,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iCASD,GAAG;AAAA;AAAA,uBAEb,GAAG;AAAA;AAAA,gBAEV,GAAG;AAAA;AAAA;AAEnB;AAEA,eAAsB,mBAAmB,IAAgB,OAAc;AACrE,QAAM,GAAG,OAAO,YAAY,KAAK,CAAC;AACpC;AAEO,SAAS,0BAA0B,QAAgB;AACxD;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAUO,SAAS,4BAA4B,QAAgB;AAC1D;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAEO,MAAM,qBAAqB;AAE3B,SAAS,WACd,aACA,qBACQ;AACR,QAAM,MAAM,GAAG,UAAU,WAAW,CAAC;AACrC,QAAM,QAAQ,GAAG,eAAe,WAAW,CAAC;AAE5C,QAAM,OAAO,CAAC,GAAG,YAAY,YAAY,EAAE,KAAA;AAC3C,SAAO,KAAK,SAAS,mBAAmB,CAAC;AAEzC;AAAA;AAAA,IAAe;AAAA,gCACe,KAAK;AAAA;AAAA,IAEjC,0BAA0B,KAAK,CAAC;AAAA,IAChC,4BAA4B,KAAK,CAAC;AAAA;AAAA,+BAEP,GAAG,mBAAmB,CAAC;AAAA,uBAC/B,GAAG,mBAAmB,CAAC;AAAA,gBAC9B,GAAG,yBAAyB,KAAK,eAAe,KAAK;AAAA;AAAA,iBAEpD,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAQ7B,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA,cAI9B,QAAQ,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA,iBAIV,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAMtB;AAEO,SAAS,UAAU,OAAe,SAAkC;AACzE,QAAM,SAAS,GAAG,KAAK,IAAI,OAAO;AAClC,QAAM,sBAAsB,wBAAwB,OAAO,OAAO;AAClE,QAAM,qBAAqB,uBAAuB,OAAO,OAAO;AAIhE;AAAA;AAAA,IAAe;AAAA,iCACgB,GAAG,kBAAkB,CAAC;AAAA,iCACtB,GAAG,mBAAmB,CAAC;AAAA,4BAC5B,GAAG,MAAM,CAAC;AAAA;AAAA;AAEtC;AAEA,MAAM,4BAA4BA,OAAS;AAAA,EACzC,cAAcC,MAAQC,QAAU;AAAA,EAChC,cAAcC,QAAE;AAClB,CAAC;AAID,MAAM,gBAAgB,0BAA0B,OAAO;AAAA,EACrD,MAAMD,OAAE;AAAA,EACR,SAASA,OAAE;AAAA,EACX,eAAe;AACjB,CAAC;AAOD,SAAS,aAAa,OAA4B;AAChD,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC,SACE,6BAA6B,KAAK;AAAA,EAC1B,UAAU,MAAM;AAE5B;AAGA,eAAsB,WACpB,KACA,OACA,MACA,gBACA,EAAC,QAAQ,WACT;AACA,QAAM,SAAS,eAAe,KAAK;AACnC,QAAM,SAA0B,EAAC,QAAQ,QAAA;AACzC,QAAM;AAAA,kBACU,IAAI,MAAM,CAAC;AAAA,gBACb,IAAI,KAAK,cAAc,KAAK,MAAM;AAClD;AAEA,eAAsB,oBACpB,IACA,KACA,OACA,gBACyB;AACzB,QAAM,SAAS,IAAI,eAAe,KAAK,CAAC;AACxC,QAAM,SAAS,MAAM;AAAA,oBACH,MAAM,kBAAkB,MAAM;AAAA,wBAC1B,cAAc;AAAA;AAEpC,MAAI,OAAO,WAAW,GAAG;AAEvB,UAAM,cAAc,MAAM;AAAA,sBACR,MAAM,kBAAkB,MAAM;AAChD,OAAG,QAAQ,yBAAyB,KAAK,UAAU,WAAW,CAAC,EAAE;AACjE,WAAO;AAAA,EACT;AACA,SAAOE,MAAQ,OAAO,CAAC,GAAG,eAAe,aAAa;AACxD;AAEA,eAAsB,uBACpB,KACA,OAC8B;AAC9B,QAAM,SAAS,MAAM;AAAA;AAAA,aAEV,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAErC,SAAO,OAAO,WAAW,CAAC;AAC1B,SAAOA,MAAQ,OAAO,CAAC,GAAG,2BAA2B,aAAa;AACpE;AAMA,eAAsB,0BACpB,IACA,KACA,WACA;AACA,QAAM,EAAC,iBAAgB;AAEvB,aAAW,OAAO,cAAc;AAC9B,QAAI,IAAI,WAAW,GAAG,GAAG;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,+CACkD,GAAG;AAAA,MAAA;AAAA,IAEzD;AAAA,EACF;AACA,QAAM,kBAA4B,CAAA;AAGlC,MAAI,aAAa,QAAQ;AACvB,UAAM,UAAU,MAAM;AAAA,0DACgC;AAAA,MACpD;AAAA,IAAA,CACD,GAAG,OAAA;AAEJ,QAAI,QAAQ,WAAW,aAAa,QAAQ;AAC1C,YAAM,IAAI;AAAA,QACR,gDAAgD,YAAY,cAAc,QAAQ,MAAM;AAAA,MAAA;AAAA,IAE5F;AACA,oBAAgB,KAAK,GAAG,YAAY;AAAA,EACtC,OAAO;AACL,UAAM,qBAAqB;AAAA,MACzB,UAAU;AAAA,MACV,UAAU;AAAA,IAAA;AAEZ,UAAM;AAAA,mCACyB,IAAI,kBAAkB,CAAC;AACtD,UAAM;AAAA,2BACiB,IAAI,kBAAkB,CAAC;AAAA;AAAA;AAG9C,oBAAgB,KAAK,kBAAkB;AAAA,EACzC;AAEA,QAAM,sBAAsB;AAAA,IAC1B,UAAU;AAAA,IACV,UAAU;AAAA,EAAA;AAEZ,kBAAgB,KAAK,mBAAmB;AAExC,QAAM,QAAQ,EAAC,GAAG,WAAW,cAAc,gBAAA;AAG3C,QAAM,IAAI,OAAO,YAAY,KAAK,IAAI,WAAW,OAAO,mBAAmB,CAAC;AAE5E,QAAM,OAAO,MAAM,mBAAmB,KAAK,eAAe;AAC1D,QAAM,6CAA6C,IAAI,GAAG,MAAM,IAAI,GAAG;AAEvE,QAAM,cAAc,IAAI,KAAK,KAAK;AACpC;AAEA,eAAsB,cACpB,IACA,IACA,OACA;AACA,MAAI;AACF,UAAM,GAAG,UAAU,CAAA,QAAO,IAAI,OAAO,aAAa,KAAK,CAAC,CAAC;AAAA,EAC3D,SAAS,GAAG;AACV,QACE,EACE,aAAa,SAAS,iBACtB,EAAE,SAAS,4BAEb;AACA,YAAM;AAAA,IACR;AAEA,OAAG;AAAA,MACD;AAAA;AAAA,GACM,EAAE,QAAQ,EAAE,OAAO;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAI7B;AACF;AAEO,SAAS,qBACd,IACA,WACA;AAEA,YAAU,aAAa,QAAQ,CAAA,QAAO;AACpC,QACE,CAAC,IAAI,aACL,CAAC,IAAI,eACL,CAAC,IAAI,aACL,CAAC,IAAI,aACL;AAEA,YAAM,IAAI;AAAA,QACR,eAAe,IAAI,OAAO;AAAA,MAAA;AAAA,IAE9B;AAAA,EACF,CAAC;AAED,YAAU,OAAO,QAAQ,CAAA,UAAS,SAAS,IAAI,OAAO,UAAU,OAAO,CAAC;AAC1E;AAMO,SAAS,6CACd,MAC+B;AAC/B,QAAM,oBAIA,CAAA;AACN,aAAW,SAAS,KAAK,QAAQ;AAC/B,QAAI,CAAC,MAAM,YAAY,UAAU,MAAM,oBAAoB,SAAS;AAQlE,YAAM,EAAC,QAAQ,MAAM,UAAA,IAAa;AAClC,iBAAW,EAAC,SAAS,MAAM,UAAA,KAAc,KAAK,QAAQ;AAAA,QACpD,CAAA,QACE,IAAI,WAAW,UACf,IAAI,cAAc,aAClB,IAAI,UACJ,IAAI;AAAA,MAAA,GACL;AACD,YAAI,OAAO,KAAK,OAAO,EAAE,KAAK,CAAA,QAAO,CAAC,MAAM,QAAQ,GAAG,EAAE,OAAO,GAAG;AACjE;AAAA,QACF;AACA,0BAAkB,KAAK,EAAC,QAAQ,WAAW,WAAU;AACrD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,MAAI,kBAAkB,WAAW,GAAG;AAClC,WAAO;AAAA,EACT;AACA,SAAO;AAAA,IACL,OAAO,OAAO,IAAgB,QAAoB;AAChD,iBAAW,EAAC,QAAQ,WAAW,UAAA,KAAc,mBAAmB;AAC9D,WAAG;AAAA,UACD,YAAY,SAAS,kCAAkC,SAAS;AAAA,QAAA;AAElE,cAAM;AAAA,sBACQ,IAAI,MAAM,CAAC,IAAI,IAAI,SAAS,CAAC;AAAA,yCACV,IAAI,SAAS,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EAAA;AAEJ;"}
@@ -1 +1 @@
1
- {"version":3,"file":"replica-schema.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAC3D,OAAO,EAEL,KAAK,uBAAuB,EAE7B,MAAM,4BAA4B,CAAC;AAYpC,wBAAsB,WAAW,CAC/B,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,QAAQ,KAAK,OAAO,CAAC,IAAI,CAAC,GAC3D,OAAO,CAAC,IAAI,CAAC,CAoBf;AAED,wBAAsB,cAAc,CAClC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,iBAgBf;AAED,eAAO,MAAM,yBAAyB,EAAE,uBA4BvC,CAAC"}
1
+ {"version":3,"file":"replica-schema.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAE3D,OAAO,EAEL,KAAK,uBAAuB,EAE7B,MAAM,4BAA4B,CAAC;AAYpC,wBAAsB,WAAW,CAC/B,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,QAAQ,KAAK,OAAO,CAAC,IAAI,CAAC,GAC3D,OAAO,CAAC,IAAI,CAAC,CAoBf;AAED,wBAAsB,cAAc,CAClC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,iBAgBf;AAED,eAAO,MAAM,yBAAyB,EAAE,uBAsCvC,CAAC"}
@@ -1,8 +1,9 @@
1
1
  import { SqliteError } from "@rocicorp/zero-sqlite3";
2
2
  import { must } from "../../../../shared/src/must.js";
3
- import { runSchemaMigrations } from "../../db/migration-lite.js";
4
3
  import { listTables } from "../../db/lite-tables.js";
4
+ import { runSchemaMigrations } from "../../db/migration-lite.js";
5
5
  import { AutoResetSignal } from "../change-streamer/schema/tables.js";
6
+ import { initChangeLog } from "../replicator/schema/change-log.js";
6
7
  import { recordEvent, CREATE_RUNTIME_EVENTS_TABLE } from "../replicator/schema/replication-state.js";
7
8
  import { ColumnMetadataStore, CREATE_COLUMN_METADATA_TABLE } from "./column-metadata.js";
8
9
  async function initReplica(log, debugName, dbPath, initialSync) {
@@ -66,6 +67,12 @@ const schemaVersionMigrationMap = {
66
67
  const tables = listTables(db);
67
68
  must(store).populateFromExistingTables(tables);
68
69
  }
70
+ },
71
+ 7: {
72
+ migrateSchema: (_, db) => {
73
+ db.exec(`DELETE FROM "_zero.changeLog"`);
74
+ initChangeLog(db);
75
+ }
69
76
  }
70
77
  };
71
78
  export {
@@ -1 +1 @@
1
- {"version":3,"file":"replica-schema.js","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../db/migration-lite.ts';\nimport {listTables} from '../../db/lite-tables.ts';\nimport {AutoResetSignal} from '../change-streamer/schema/tables.ts';\nimport {\n CREATE_RUNTIME_EVENTS_TABLE,\n recordEvent,\n} from '../replicator/schema/replication-state.ts';\nimport {\n ColumnMetadataStore,\n CREATE_COLUMN_METADATA_TABLE,\n} from './column-metadata.ts';\n\nexport async function initReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n initialSync: (lc: LogContext, tx: Database) => Promise<void>,\n): Promise<void> {\n const setupMigration: Migration = {\n migrateSchema: (log, tx) => initialSync(log, tx),\n minSafeVersion: 1,\n };\n\n try {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n setupMigration,\n schemaVersionMigrationMap,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_CORRUPT') {\n throw new AutoResetSignal(e.message);\n }\n throw e;\n }\n}\n\nexport async function upgradeReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n) {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n // setupMigration should never be invoked\n {\n migrateSchema: () => {\n throw new Error(\n 'This should only be called for already synced replicas',\n );\n },\n },\n schemaVersionMigrationMap,\n );\n}\n\nexport const schemaVersionMigrationMap: IncrementalMigrationMap = {\n // There's no incremental migration from v1. Just reset the replica.\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('upgrading replica to new schema');\n },\n minSafeVersion: 3,\n },\n\n 5: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_RUNTIME_EVENTS_TABLE);\n },\n migrateData: (_, db) => {\n recordEvent(db, 'upgrade');\n },\n },\n\n 6: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_COLUMN_METADATA_TABLE);\n },\n migrateData: (_, db) => {\n const store = ColumnMetadataStore.getInstance(db);\n const tables = listTables(db);\n must(store).populateFromExistingTables(tables);\n },\n },\n};\n"],"names":["log"],"mappings":";;;;;;;AAoBA,eAAsB,YACpB,KACA,WACA,QACA,aACe;AACf,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAACA,MAAK,OAAO,YAAYA,MAAK,EAAE;AAAA,IAC/C,gBAAgB;AAAA,EAAA;AAGlB,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AACV,QAAI,aAAa,eAAe,EAAE,SAAS,kBAAkB;AAC3D,YAAM,IAAI,gBAAgB,EAAE,OAAO;AAAA,IACrC;AACA,UAAM;AAAA,EACR;AACF;AAEA,eAAsB,eACpB,KACA,WACA,QACA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,IAEF;AAAA,EAAA;AAEJ;AAEO,MAAM,4BAAqD;AAAA;AAAA,EAEhE,GAAG;AAAA,IACD,eAAe,MAAM;AACnB,YAAM,IAAI,gBAAgB,iCAAiC;AAAA,IAC7D;AAAA,IACA,gBAAgB;AAAA,EAAA;AAAA,EAGlB,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,2BAA2B;AAAA,IACrC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,kBAAY,IAAI,SAAS;AAAA,IAC3B;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,4BAA4B;AAAA,IACtC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,YAAM,QAAQ,oBAAoB,YAAY,EAAE;AAChD,YAAM,SAAS,WAAW,EAAE;AAC5B,WAAK,KAAK,EAAE,2BAA2B,MAAM;AAAA,IAC/C;AAAA,EAAA;AAEJ;"}
1
+ {"version":3,"file":"replica-schema.js","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {listTables} from '../../db/lite-tables.ts';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../db/migration-lite.ts';\nimport {AutoResetSignal} from '../change-streamer/schema/tables.ts';\nimport {initChangeLog} from '../replicator/schema/change-log.ts';\nimport {\n CREATE_RUNTIME_EVENTS_TABLE,\n recordEvent,\n} from '../replicator/schema/replication-state.ts';\nimport {\n ColumnMetadataStore,\n CREATE_COLUMN_METADATA_TABLE,\n} from './column-metadata.ts';\n\nexport async function initReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n initialSync: (lc: LogContext, tx: Database) => Promise<void>,\n): Promise<void> {\n const setupMigration: Migration = {\n migrateSchema: (log, tx) => initialSync(log, tx),\n minSafeVersion: 1,\n };\n\n try {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n setupMigration,\n schemaVersionMigrationMap,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_CORRUPT') {\n throw new AutoResetSignal(e.message);\n }\n throw e;\n }\n}\n\nexport async function upgradeReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n) {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n // setupMigration should never be invoked\n {\n migrateSchema: () => {\n throw new Error(\n 'This should only be called for already synced replicas',\n );\n },\n },\n schemaVersionMigrationMap,\n );\n}\n\nexport const schemaVersionMigrationMap: IncrementalMigrationMap = {\n // There's no incremental migration from v1. Just reset the replica.\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('upgrading replica to new schema');\n },\n minSafeVersion: 3,\n },\n\n 5: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_RUNTIME_EVENTS_TABLE);\n },\n migrateData: (_, db) => {\n recordEvent(db, 'upgrade');\n },\n },\n\n 6: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_COLUMN_METADATA_TABLE);\n },\n migrateData: (_, db) => {\n const store = ColumnMetadataStore.getInstance(db);\n const tables = listTables(db);\n must(store).populateFromExistingTables(tables);\n },\n },\n\n 7: {\n migrateSchema: (_, db) => {\n // Note: The original \"changeLog\" table is kept so that the replica file\n // is compatible with older zero-caches. However, it is truncated for\n // space savings (since historic changes were never read).\n db.exec(`DELETE FROM \"_zero.changeLog\"`);\n initChangeLog(db); // Creates _zero.changeLog2\n },\n },\n};\n"],"names":["log"],"mappings":";;;;;;;;AAqBA,eAAsB,YACpB,KACA,WACA,QACA,aACe;AACf,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAACA,MAAK,OAAO,YAAYA,MAAK,EAAE;AAAA,IAC/C,gBAAgB;AAAA,EAAA;AAGlB,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AACV,QAAI,aAAa,eAAe,EAAE,SAAS,kBAAkB;AAC3D,YAAM,IAAI,gBAAgB,EAAE,OAAO;AAAA,IACrC;AACA,UAAM;AAAA,EACR;AACF;AAEA,eAAsB,eACpB,KACA,WACA,QACA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,IAEF;AAAA,EAAA;AAEJ;AAEO,MAAM,4BAAqD;AAAA;AAAA,EAEhE,GAAG;AAAA,IACD,eAAe,MAAM;AACnB,YAAM,IAAI,gBAAgB,iCAAiC;AAAA,IAC7D;AAAA,IACA,gBAAgB;AAAA,EAAA;AAAA,EAGlB,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,2BAA2B;AAAA,IACrC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,kBAAY,IAAI,SAAS;AAAA,IAC3B;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,4BAA4B;AAAA,IACtC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,YAAM,QAAQ,oBAAoB,YAAY,EAAE;AAChD,YAAM,SAAS,WAAW,EAAE;AAC5B,WAAK,KAAK,EAAE,2BAA2B,MAAM;AAAA,IAC/C;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AAIxB,SAAG,KAAK,+BAA+B;AACvC,oBAAc,EAAE;AAAA,IAClB;AAAA,EAAA;AAEJ;"}