@rocicorp/zero 1.4.0 → 1.5.0-canary.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (189) hide show
  1. package/out/analyze-query/src/analyze-cli.js +2 -2
  2. package/out/analyze-query/src/analyze-cli.js.map +1 -1
  3. package/out/zero/package.js +1 -1
  4. package/out/zero/package.js.map +1 -1
  5. package/out/zero-cache/src/auth/auth.d.ts +1 -1
  6. package/out/zero-cache/src/auth/auth.d.ts.map +1 -1
  7. package/out/zero-cache/src/auth/auth.js +1 -1
  8. package/out/zero-cache/src/auth/auth.js.map +1 -1
  9. package/out/zero-cache/src/auth/write-authorizer.d.ts +1 -1
  10. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  11. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  12. package/out/zero-cache/src/config/normalize.d.ts.map +1 -1
  13. package/out/zero-cache/src/config/normalize.js +8 -0
  14. package/out/zero-cache/src/config/normalize.js.map +1 -1
  15. package/out/zero-cache/src/config/zero-config.d.ts +8 -4
  16. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  17. package/out/zero-cache/src/config/zero-config.js +28 -6
  18. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  19. package/out/zero-cache/src/custom/fetch.d.ts +1 -1
  20. package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
  21. package/out/zero-cache/src/custom/fetch.js +2 -2
  22. package/out/zero-cache/src/custom/fetch.js.map +1 -1
  23. package/out/zero-cache/src/custom-queries/transform-query.d.ts +21 -7
  24. package/out/zero-cache/src/custom-queries/transform-query.d.ts.map +1 -1
  25. package/out/zero-cache/src/custom-queries/transform-query.js +26 -9
  26. package/out/zero-cache/src/custom-queries/transform-query.js.map +1 -1
  27. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  28. package/out/zero-cache/src/server/change-streamer.js +2 -1
  29. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  30. package/out/zero-cache/src/server/runner/run-worker.d.ts.map +1 -1
  31. package/out/zero-cache/src/server/runner/run-worker.js +5 -2
  32. package/out/zero-cache/src/server/runner/run-worker.js.map +1 -1
  33. package/out/zero-cache/src/server/syncer.js +3 -3
  34. package/out/zero-cache/src/server/syncer.js.map +1 -1
  35. package/out/zero-cache/src/services/change-source/custom/change-source.js +2 -2
  36. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  37. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  38. package/out/zero-cache/src/services/change-source/pg/change-source.js +24 -20
  39. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  40. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +258 -45
  41. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  42. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +119 -83
  43. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  44. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  45. package/out/zero-cache/src/services/change-source/pg/schema/init.js +1 -1
  46. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  47. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
  48. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +2 -1
  49. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  50. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts +1 -0
  51. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
  52. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +3 -3
  53. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
  54. package/out/zero-cache/src/services/http-service.d.ts +1 -0
  55. package/out/zero-cache/src/services/http-service.d.ts.map +1 -1
  56. package/out/zero-cache/src/services/http-service.js +5 -4
  57. package/out/zero-cache/src/services/http-service.js.map +1 -1
  58. package/out/zero-cache/src/services/life-cycle.d.ts +1 -1
  59. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  60. package/out/zero-cache/src/services/life-cycle.js +1 -2
  61. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  62. package/out/zero-cache/src/services/mutagen/mutagen.d.ts +1 -1
  63. package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
  64. package/out/zero-cache/src/services/mutagen/mutagen.js +1 -1
  65. package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
  66. package/out/zero-cache/src/services/mutagen/pusher.d.ts +4 -3
  67. package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
  68. package/out/zero-cache/src/services/mutagen/pusher.js +57 -38
  69. package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
  70. package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js +2 -1
  71. package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js.map +1 -1
  72. package/out/zero-cache/src/services/view-syncer/client-handler.js +1 -1
  73. package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
  74. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts +41 -27
  75. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts.map +1 -1
  76. package/out/zero-cache/src/services/view-syncer/connection-context-manager.js +147 -104
  77. package/out/zero-cache/src/services/view-syncer/connection-context-manager.js.map +1 -1
  78. package/out/zero-cache/src/services/view-syncer/cvr.d.ts +6 -0
  79. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  80. package/out/zero-cache/src/services/view-syncer/cvr.js +8 -0
  81. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  82. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +3 -3
  83. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  84. package/out/zero-cache/src/services/view-syncer/view-syncer.js +119 -86
  85. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  86. package/out/zero-cache/src/workers/connection.js +2 -2
  87. package/out/zero-cache/src/workers/connection.js.map +1 -1
  88. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts +1 -1
  89. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  90. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +7 -7
  91. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  92. package/out/zero-cache/src/workers/syncer.d.ts +1 -1
  93. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  94. package/out/zero-cache/src/workers/syncer.js +11 -10
  95. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  96. package/out/zero-client/src/client/connection.d.ts +15 -7
  97. package/out/zero-client/src/client/connection.d.ts.map +1 -1
  98. package/out/zero-client/src/client/connection.js.map +1 -1
  99. package/out/zero-client/src/client/crud-impl.d.ts +1 -1
  100. package/out/zero-client/src/client/crud-impl.d.ts.map +1 -1
  101. package/out/zero-client/src/client/crud-impl.js +1 -1
  102. package/out/zero-client/src/client/crud-impl.js.map +1 -1
  103. package/out/zero-client/src/client/crud.d.ts +1 -1
  104. package/out/zero-client/src/client/crud.d.ts.map +1 -1
  105. package/out/zero-client/src/client/crud.js +1 -1
  106. package/out/zero-client/src/client/crud.js.map +1 -1
  107. package/out/zero-client/src/client/keys.d.ts +1 -1
  108. package/out/zero-client/src/client/keys.d.ts.map +1 -1
  109. package/out/zero-client/src/client/keys.js.map +1 -1
  110. package/out/zero-client/src/client/make-replicache-mutators.js +1 -1
  111. package/out/zero-client/src/client/make-replicache-mutators.js.map +1 -1
  112. package/out/zero-client/src/client/mutation-tracker.d.ts +2 -1
  113. package/out/zero-client/src/client/mutation-tracker.d.ts.map +1 -1
  114. package/out/zero-client/src/client/mutation-tracker.js +3 -3
  115. package/out/zero-client/src/client/mutation-tracker.js.map +1 -1
  116. package/out/zero-client/src/client/version.js +1 -1
  117. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  118. package/out/zero-client/src/client/zero.js +2 -2
  119. package/out/zero-client/src/client/zero.js.map +1 -1
  120. package/out/zero-client/src/types/client-state.d.ts +1 -1
  121. package/out/zero-client/src/types/client-state.d.ts.map +1 -1
  122. package/out/zero-protocol/src/custom-queries.js +1 -1
  123. package/out/zero-protocol/src/down.js +1 -1
  124. package/out/zero-protocol/src/error-kind-enum.d.ts +1 -2
  125. package/out/zero-protocol/src/error-kind-enum.d.ts.map +1 -1
  126. package/out/zero-protocol/src/error-kind-enum.js.map +1 -1
  127. package/out/zero-protocol/src/mutate-server.d.ts +165 -0
  128. package/out/zero-protocol/src/mutate-server.d.ts.map +1 -0
  129. package/out/zero-protocol/src/mutate-server.js +24 -0
  130. package/out/zero-protocol/src/mutate-server.js.map +1 -0
  131. package/out/zero-protocol/src/mutation.d.ts +229 -0
  132. package/out/zero-protocol/src/mutation.d.ts.map +1 -0
  133. package/out/zero-protocol/src/mutation.js +112 -0
  134. package/out/zero-protocol/src/mutation.js.map +1 -0
  135. package/out/zero-protocol/src/mutations-patch.js +1 -1
  136. package/out/zero-protocol/src/mutations-patch.js.map +1 -1
  137. package/out/zero-protocol/src/push.d.ts +3 -234
  138. package/out/zero-protocol/src/push.d.ts.map +1 -1
  139. package/out/zero-protocol/src/push.js +3 -114
  140. package/out/zero-protocol/src/push.js.map +1 -1
  141. package/out/zero-protocol/src/query-server.d.ts +150 -0
  142. package/out/zero-protocol/src/query-server.d.ts.map +1 -0
  143. package/out/zero-protocol/src/query-server.js +16 -0
  144. package/out/zero-protocol/src/query-server.js.map +1 -0
  145. package/out/zero-protocol/src/up.js +1 -1
  146. package/out/zero-server/src/mod.d.ts +4 -2
  147. package/out/zero-server/src/mod.d.ts.map +1 -1
  148. package/out/zero-server/src/process-mutations.d.ts +50 -4
  149. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  150. package/out/zero-server/src/process-mutations.js +73 -36
  151. package/out/zero-server/src/process-mutations.js.map +1 -1
  152. package/out/zero-server/src/push-processor.d.ts +3 -3
  153. package/out/zero-server/src/push-processor.d.ts.map +1 -1
  154. package/out/zero-server/src/push-processor.js.map +1 -1
  155. package/out/zero-server/src/queries/process-queries.d.ts +45 -53
  156. package/out/zero-server/src/queries/process-queries.d.ts.map +1 -1
  157. package/out/zero-server/src/queries/process-queries.js +72 -53
  158. package/out/zero-server/src/queries/process-queries.js.map +1 -1
  159. package/out/zero-server/src/zql-database.js.map +1 -1
  160. package/out/zero-types/src/default-types.d.ts +1 -0
  161. package/out/zero-types/src/default-types.d.ts.map +1 -1
  162. package/out/zql/src/builder/builder.d.ts.map +1 -1
  163. package/out/zql/src/builder/builder.js +17 -7
  164. package/out/zql/src/builder/builder.js.map +1 -1
  165. package/out/zql/src/ivm/cap.d.ts +32 -0
  166. package/out/zql/src/ivm/cap.d.ts.map +1 -0
  167. package/out/zql/src/ivm/cap.js +205 -0
  168. package/out/zql/src/ivm/cap.js.map +1 -0
  169. package/out/zql/src/ivm/constraint.js +1 -1
  170. package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
  171. package/out/zql/src/ivm/flipped-join.js +61 -15
  172. package/out/zql/src/ivm/flipped-join.js.map +1 -1
  173. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  174. package/out/zql/src/ivm/memory-source.js +3 -4
  175. package/out/zql/src/ivm/memory-source.js.map +1 -1
  176. package/out/zql/src/ivm/schema.d.ts +8 -0
  177. package/out/zql/src/ivm/schema.d.ts.map +1 -1
  178. package/out/zql/src/ivm/take.js +2 -2
  179. package/out/zql/src/mutate/mutator-registry.js.map +1 -1
  180. package/out/zql/src/mutate/mutator.d.ts +11 -2
  181. package/out/zql/src/mutate/mutator.d.ts.map +1 -1
  182. package/out/zql/src/mutate/mutator.js.map +1 -1
  183. package/out/zql/src/query/query-registry.d.ts +9 -2
  184. package/out/zql/src/query/query-registry.d.ts.map +1 -1
  185. package/out/zql/src/query/query-registry.js.map +1 -1
  186. package/out/zqlite/src/table-source.d.ts.map +1 -1
  187. package/out/zqlite/src/table-source.js +4 -1
  188. package/out/zqlite/src/table-source.js.map +1 -1
  189. package/package.json +1 -1
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer.js","names":[],"sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {upgradeReplica} from '../services/change-source/common/replica-schema.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {initChangeStreamerSchema} from '../services/change-streamer/schema/init.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {PurgeLocker} from '../services/change-streamer/storer.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {\n BackupNotFoundException,\n restoreReplica,\n} from '../services/litestream/commands.ts';\nimport {\n replicationStatusError,\n ReplicationStatusPublisher,\n} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const workerStartTime = Date.now();\n const config = getNormalizedZeroConfig({env, argv});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(\n createLogContext(config, 'change-streamer', 0, false),\n 'change-streamer',\n 0,\n );\n const lc = createLogContext(config, 'change-streamer');\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n 'change-streamer',\n {\n max: change.maxConns,\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change').catch(() => {});\n\n const {autoReset, replicationLag} = config;\n const shard = getShardConfig(config);\n\n // Ensure the change DB schema is initialized/up-to-date, then acquire\n // a lock to prevent change-lock purges. This ensures that (this)\n // change-streamer will be able to resume from the backup.\n await initChangeStreamerSchema(lc, changeDB, shard);\n let purgeLock = await new PurgeLocker(lc, shard, changeDB).acquire();\n\n // Restore from litestream if the change-log has entries.\n if (purgeLock) {\n try {\n await restoreReplica(lc, config, purgeLock);\n } catch (e) {\n // If the restore failed, e.g. due to a corrupt or missing backup, the\n // replication-manager recovers by re-syncing.\n const log = e instanceof BackupNotFoundException ? 'warn' : 'error';\n lc[log]?.(\n `error restoring backup. resyncing the replica: ${String(e)}`,\n e,\n );\n\n // The purgeLock must be released if the backup could not be restored,\n // or it will otherwise prevent the change-db update after the resync\n // completes.\n await purgeLock.release();\n purgeLock = null;\n }\n }\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n {\n ...initialSync,\n replicationSlotFailover: upstream.pgReplicationSlotFailover,\n },\n context,\n replicationLag.reportIntervalMs,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n const replicationStatusPublisher =\n ReplicationStatusPublisher.forReplicaFile(replica.file);\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n replicationStatusPublisher,\n subscriptionState,\n purgeLock,\n autoReset ?? false,\n {\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n statementTimeoutMs: change.statementTimeoutMs,\n },\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n // Release the purge lock before retrying. This is safe because the\n // purge lock exists to preserve change-log entries so the new\n // change-streamer can resume from the backup replica's watermark.\n // An AutoResetSignal means we cant resume from the backup replica\n // (e.g. its replication slot is gone), so the change-log entries the lock\n // was protecting are no longer needed. The retry performs a fresh\n // initial sync with a new replication slot, independent of the old\n // change-log. Releasing is also necessary to avoid a\n // self-deadlock when CHANGE_DB == UPSTREAM_DB:\n // CREATE_REPLICATION_SLOT waits for all older transactions to\n // finish, including this lock's open transaction.\n await purgeLock?.release();\n purgeLock = null;\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n // Perform any upgrades to the replica in case it was restored from an\n // earlier version. Note that this upgrade is done by the replicator worker\n // as well (in both the replication-manager and the view-syncer), but the\n // change-streamer independently reads the replica, and it is fine run the\n // upgrade logic redundantly since it is idempotent.\n await upgradeReplica(lc, 'change-streamer-init', replica.file);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n replica.file,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - workerStartTime,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAsCA,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,kBAAkB,KAAK,KAAK;CAClC,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;CACnD,MAAM,EACJ,QACA,gBAAgB,EACd,MACA,SACA,UACA,gBACA,iCACA,sCAEF,UACA,QACA,SACA,aACA,eACE;AAEJ,eACE,iBAAiB,QAAQ,mBAAmB,GAAG,MAAM,EACrD,mBACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,kBAAkB;AACtD,eAAc,IAAI,OAAO;CAGzB,MAAM,WAAW,SACf,IACA,OAAO,IACP,mBACA,EACE,KAAK,OAAO,UACb,EACD,EAAC,kBAAkB,MAAK,CACzB;AACI,mBAAkB,IAAI,UAAU,SAAS,CAAC,YAAY,GAAG;CAE9D,MAAM,EAAC,WAAW,mBAAkB;CACpC,MAAM,QAAQ,eAAe,OAAO;AAKpC,OAAM,yBAAyB,IAAI,UAAU,MAAM;CACnD,IAAI,YAAY,MAAM,IAAI,YAAY,IAAI,OAAO,SAAS,CAAC,SAAS;AAGpE,KAAI,UACF,KAAI;AACF,QAAM,eAAe,IAAI,QAAQ,UAAU;UACpC,GAAG;AAIV,KADY,aAAa,0BAA0B,SAAS,WAE1D,kDAAkD,OAAO,EAAE,IAC3D,EACD;AAKD,QAAM,UAAU,SAAS;AACzB,cAAY;;CAIhB,IAAI;CAEJ,MAAM,UAAU,iBAAiB,OAAO;AAExC,MAAK,MAAM,SAAS,CAAC,MAAM,MAAM,CAC/B,KAAI;EAEF,MAAM,EAAC,cAAc,sBACnB,SAAS,SAAS,OACd,MAAM,+BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR;GACE,GAAG;GACH,yBAAyB,SAAS;GACnC,EACD,SACA,eAAe,iBAChB,GACD,MAAM,6BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR,QACD;AAKP,mBAAiB,MAAM,mBACrB,IACA,OACA,QACA,SACA,UACA,UACA,cATA,2BAA2B,eAAe,QAAQ,KAAK,EAWvD,mBACA,WACA,aAAa,OACb;GACE;GACA;GACA,oBAAoB,OAAO;GAC5B,EACD,WACD;AACD;UACO,GAAG;AACV,MAAI,SAAS,aAAa,iBAAiB;AACzC,MAAG,OAAO,qBAAqB,QAAQ,QAAQ,EAAE;AAGjD,gBAAa,QAAQ,KAAK;AAY1B,SAAM,WAAW,SAAS;AAC1B,eAAY;AACZ;;AAEF,QAAM,qBACJ,IACA,uBAAuB,IAAI,gBAAgB,EAAE,CAC9C;AACD,MAAI,aAAa,kBACf,OAAM,IAAI,MACR,qCAAqC,QAAQ,KAAK,0CAClD,EAAC,OAAO,GAAE,CACX;AAEH,QAAM;;AAIV,QAAO,gBAAgB,mDAAmD;AAO1E,OAAM,eAAe,IAAI,wBAAwB,QAAQ,KAAK;CAE9D,MAAM,EAAC,WAAW,MAAM,gBAAe;CACvC,MAAM,UAAU,YACZ,IAAI,cACF,IACA,QAAQ,MACR,WACA,oBAAoB,YAAY,WAChC,gBAQA,KAAK,KAAK,GAAG,gBACd,GACD,IAAI,eAAe,IAAI,QAAQ,MAAM,eAAe;CAExD,MAAM,0BAA0B,IAAI,yBAClC,IACA,QACA;EAAC;EAAM;EAAe,EACtB,QACA,gBACA,mBAAmB,gBAAgB,UAAU,KAC9C;AAED,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAIrC,QAAO,eAAe,IAAI,QAAQ,yBAAyB,QAAQ;;AAIrE,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"change-streamer.js","names":[],"sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {upgradeReplica} from '../services/change-source/common/replica-schema.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {initChangeStreamerSchema} from '../services/change-streamer/schema/init.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {PurgeLocker} from '../services/change-streamer/storer.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {\n BackupNotFoundException,\n restoreReplica,\n} from '../services/litestream/commands.ts';\nimport {\n replicationStatusError,\n ReplicationStatusPublisher,\n} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const workerStartTime = Date.now();\n const config = getNormalizedZeroConfig({env, argv});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n keepaliveTimeoutMs,\n } = config;\n\n startOtelAuto(\n createLogContext(config, 'change-streamer', 0, false),\n 'change-streamer',\n 0,\n );\n const lc = createLogContext(config, 'change-streamer');\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n 'change-streamer',\n {\n max: change.maxConns,\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change').catch(() => {});\n\n const {autoReset, replicationLag} = config;\n const shard = getShardConfig(config);\n\n // Ensure the change DB schema is initialized/up-to-date, then acquire\n // a lock to prevent change-lock purges. This ensures that (this)\n // change-streamer will be able to resume from the backup.\n await initChangeStreamerSchema(lc, changeDB, shard);\n let purgeLock = await new PurgeLocker(lc, shard, changeDB).acquire();\n\n // Restore from litestream if the change-log has entries.\n if (purgeLock) {\n try {\n await restoreReplica(lc, config, purgeLock);\n } catch (e) {\n // If the restore failed, e.g. due to a corrupt or missing backup, the\n // replication-manager recovers by re-syncing.\n const log = e instanceof BackupNotFoundException ? 'warn' : 'error';\n lc[log]?.(\n `error restoring backup. resyncing the replica: ${String(e)}`,\n e,\n );\n\n // The purgeLock must be released if the backup could not be restored,\n // or it will otherwise prevent the change-db update after the resync\n // completes.\n await purgeLock.release();\n purgeLock = null;\n }\n }\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n {\n ...initialSync,\n replicationSlotFailover: upstream.pgReplicationSlotFailover,\n },\n context,\n replicationLag.reportIntervalMs,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n const replicationStatusPublisher =\n ReplicationStatusPublisher.forReplicaFile(replica.file);\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n replicationStatusPublisher,\n subscriptionState,\n purgeLock,\n autoReset ?? false,\n {\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n statementTimeoutMs: change.statementTimeoutMs,\n },\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n // Release the purge lock before retrying. This is safe because the\n // purge lock exists to preserve change-log entries so the new\n // change-streamer can resume from the backup replica's watermark.\n // An AutoResetSignal means we cant resume from the backup replica\n // (e.g. its replication slot is gone), so the change-log entries the lock\n // was protecting are no longer needed. The retry performs a fresh\n // initial sync with a new replication slot, independent of the old\n // change-log. Releasing is also necessary to avoid a\n // self-deadlock when CHANGE_DB == UPSTREAM_DB:\n // CREATE_REPLICATION_SLOT waits for all older transactions to\n // finish, including this lock's open transaction.\n await purgeLock?.release();\n purgeLock = null;\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n // Perform any upgrades to the replica in case it was restored from an\n // earlier version. Note that this upgrade is done by the replicator worker\n // as well (in both the replication-manager and the view-syncer), but the\n // change-streamer independently reads the replica, and it is fine run the\n // upgrade logic redundantly since it is idempotent.\n await upgradeReplica(lc, 'change-streamer-init', replica.file);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n replica.file,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - workerStartTime,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, keepaliveTimeoutMs, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAsCA,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,kBAAkB,KAAK,KAAK;CAClC,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;CACnD,MAAM,EACJ,QACA,gBAAgB,EACd,MACA,SACA,UACA,gBACA,iCACA,sCAEF,UACA,QACA,SACA,aACA,YACA,uBACE;AAEJ,eACE,iBAAiB,QAAQ,mBAAmB,GAAG,MAAM,EACrD,mBACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,kBAAkB;AACtD,eAAc,IAAI,OAAO;CAGzB,MAAM,WAAW,SACf,IACA,OAAO,IACP,mBACA,EACE,KAAK,OAAO,UACb,EACD,EAAC,kBAAkB,MAAK,CACzB;AACI,mBAAkB,IAAI,UAAU,SAAS,CAAC,YAAY,GAAG;CAE9D,MAAM,EAAC,WAAW,mBAAkB;CACpC,MAAM,QAAQ,eAAe,OAAO;AAKpC,OAAM,yBAAyB,IAAI,UAAU,MAAM;CACnD,IAAI,YAAY,MAAM,IAAI,YAAY,IAAI,OAAO,SAAS,CAAC,SAAS;AAGpE,KAAI,UACF,KAAI;AACF,QAAM,eAAe,IAAI,QAAQ,UAAU;UACpC,GAAG;AAIV,KADY,aAAa,0BAA0B,SAAS,WAE1D,kDAAkD,OAAO,EAAE,IAC3D,EACD;AAKD,QAAM,UAAU,SAAS;AACzB,cAAY;;CAIhB,IAAI;CAEJ,MAAM,UAAU,iBAAiB,OAAO;AAExC,MAAK,MAAM,SAAS,CAAC,MAAM,MAAM,CAC/B,KAAI;EAEF,MAAM,EAAC,cAAc,sBACnB,SAAS,SAAS,OACd,MAAM,+BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR;GACE,GAAG;GACH,yBAAyB,SAAS;GACnC,EACD,SACA,eAAe,iBAChB,GACD,MAAM,6BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR,QACD;AAKP,mBAAiB,MAAM,mBACrB,IACA,OACA,QACA,SACA,UACA,UACA,cATA,2BAA2B,eAAe,QAAQ,KAAK,EAWvD,mBACA,WACA,aAAa,OACb;GACE;GACA;GACA,oBAAoB,OAAO;GAC5B,EACD,WACD;AACD;UACO,GAAG;AACV,MAAI,SAAS,aAAa,iBAAiB;AACzC,MAAG,OAAO,qBAAqB,QAAQ,QAAQ,EAAE;AAGjD,gBAAa,QAAQ,KAAK;AAY1B,SAAM,WAAW,SAAS;AAC1B,eAAY;AACZ;;AAEF,QAAM,qBACJ,IACA,uBAAuB,IAAI,gBAAgB,EAAE,CAC9C;AACD,MAAI,aAAa,kBACf,OAAM,IAAI,MACR,qCAAqC,QAAQ,KAAK,0CAClD,EAAC,OAAO,GAAE,CACX;AAEH,QAAM;;AAIV,QAAO,gBAAgB,mDAAmD;AAO1E,OAAM,eAAe,IAAI,wBAAwB,QAAQ,KAAK;CAE9D,MAAM,EAAC,WAAW,MAAM,gBAAe;CACvC,MAAM,UAAU,YACZ,IAAI,cACF,IACA,QAAQ,MACR,WACA,oBAAoB,YAAY,WAChC,gBAQA,KAAK,KAAK,GAAG,gBACd,GACD,IAAI,eAAe,IAAI,QAAQ,MAAM,eAAe;CAExD,MAAM,0BAA0B,IAAI,yBAClC,IACA,QACA;EAAC;EAAM;EAAoB;EAAe,EAC1C,QACA,gBACA,mBAAmB,gBAAgB,UAAU,KAC9C;AAED,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAIrC,QAAO,eAAe,IAAI,QAAQ,yBAAyB,QAAQ;;AAIrE,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
@@ -1 +1 @@
1
- {"version":3,"file":"run-worker.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/server/runner/run-worker.ts"],"names":[],"mappings":"AAAA,OAAO,kCAAkC,CAAC;AAS1C,OAAO,EAAc,KAAK,MAAM,EAAC,MAAM,0BAA0B,CAAC;AAsBlE;;;;;;GAMG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,GAAG,IAAI,EACrB,GAAG,EAAE,MAAM,CAAC,UAAU,GACrB,OAAO,CAAC,IAAI,CAAC,CA4Df"}
1
+ {"version":3,"file":"run-worker.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/server/runner/run-worker.ts"],"names":[],"mappings":"AAAA,OAAO,kCAAkC,CAAC;AAS1C,OAAO,EAAc,KAAK,MAAM,EAAC,MAAM,0BAA0B,CAAC;AAsBlE;;;;;;GAMG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,GAAG,IAAI,EACrB,GAAG,EAAE,MAAM,CAAC,UAAU,GACrB,OAAO,CAAC,IAAI,CAAC,CAgEf"}
@@ -33,7 +33,7 @@ async function runWorker(parent, env) {
33
33
  const lc = createLogContext(cfg, "runner");
34
34
  const config = normalizeZeroConfig(lc, cfg, env, await getTaskID(lc));
35
35
  const processes = new ProcessManager(lc, parent ?? process);
36
- const { port, lazyStartup } = config;
36
+ const { port, keepaliveTimeoutMs, lazyStartup } = config;
37
37
  const serverVersion = getServerVersion(config);
38
38
  lc.info?.(`starting server${!serverVersion ? "" : `@${serverVersion}`} `, {
39
39
  protocolVersion: 50,
@@ -60,7 +60,10 @@ async function runWorker(parent, env) {
60
60
  await processes.allWorkersReady();
61
61
  parent?.send(["ready", { ready: true }]);
62
62
  try {
63
- await runUntilKilled(lc, parent ?? process, new ZeroDispatcher(config, lc, { port }, startZeroCache, () => printStartupMessage(env)));
63
+ await runUntilKilled(lc, parent ?? process, new ZeroDispatcher(config, lc, {
64
+ port,
65
+ keepaliveTimeoutMs
66
+ }, startZeroCache, () => printStartupMessage(env)));
64
67
  } catch (err) {
65
68
  processes.logErrorAndExit(err, "main");
66
69
  }
@@ -1 +1 @@
1
- {"version":3,"file":"run-worker.js","names":[],"sources":["../../../../../../zero-cache/src/server/runner/run-worker.ts"],"sourcesContent":["import '../../../../shared/src/dotenv.ts';\n\nimport {styleText} from 'node:util';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {colorConsole} from '../../../../shared/src/logging.ts';\nimport {PROTOCOL_VERSION} from '../../../../zero-protocol/src/protocol-version.ts';\nimport {normalizeZeroConfig} from '../../config/normalize.ts';\nimport {getServerVersion, getZeroConfig} from '../../config/zero-config.ts';\nimport {ProcessManager, runUntilKilled} from '../../services/life-cycle.ts';\nimport {childWorker, type Worker} from '../../types/processes.ts';\nimport {createLogContext} from '../logging.ts';\nimport {MAIN_URL} from '../worker-urls.ts';\nimport {getTaskID} from './runtime.ts';\nimport {ZeroDispatcher} from './zero-dispatcher.ts';\n\nconst startupMessageEnv = 'ZERO_ENABLE_STARTUP_MESSAGE';\n\nfunction printStartupMessage(env: NodeJS.ProcessEnv) {\n if (env[startupMessageEnv] !== '1') {\n return;\n }\n\n colorConsole.log(\n `\\nBTW, ${styleText(['bold', 'cyan'], 'Cloud Zero')} ` +\n 'is now available - professional Zero hosting from the team that built it.\\n' +\n `Get started now: ${styleText(['blue', 'underline'], 'https://zero.rocicorp.dev/cloud')}\\n\\n` +\n styleText('dim', `Disable this message with ${startupMessageEnv}=0`) +\n '\\n',\n );\n}\n\n/**\n * Top-level `runner` entry point to the zero-cache. This layer is responsible for:\n * * runtime-based config normalization\n * * lazy startup\n * * serving /statsz\n * * auto-reset restarts (TODO)\n */\nexport async function runWorker(\n parent: Worker | null,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n // Note: Deprecation warnings are only emitted at this top-level parse;\n // they are suppressed when parsed in subprocesses.\n const cfg = getZeroConfig({env, emitDeprecationWarnings: true});\n const lc = createLogContext(cfg, 'runner');\n\n const defaultTaskID = await getTaskID(lc);\n const config = normalizeZeroConfig(lc, cfg, env, defaultTaskID);\n const processes = new ProcessManager(lc, parent ?? process);\n\n const {port, lazyStartup} = config;\n const serverVersion = getServerVersion(config);\n lc.info?.(`starting server${!serverVersion ? '' : `@${serverVersion}`} `, {\n protocolVersion: PROTOCOL_VERSION,\n taskID: config.taskID,\n app: config.app,\n shard: config.shard,\n port: config.port,\n });\n\n let zeroCache: Resolver<Worker> | undefined;\n function startZeroCache(): Promise<Worker> {\n if (zeroCache === undefined) {\n const startMs = performance.now();\n lc.info?.('starting zero-cache');\n\n const r = (zeroCache = resolver<Worker>());\n const w = childWorker(MAIN_URL, env)\n .once('message', () => {\n r.resolve(w);\n lc.info?.(`zero-cache ready (${performance.now() - startMs} ms)`);\n })\n .once('error', r.reject);\n\n processes.addWorker(w, 'user-facing', 'zero-cache');\n }\n return zeroCache.promise;\n }\n\n // Eagerly start the zero-cache if it was not configured with --lazy-startup.\n if (!lazyStartup) {\n void startZeroCache();\n }\n\n await processes.allWorkersReady();\n parent?.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent ?? process,\n new ZeroDispatcher(config, lc, {port}, startZeroCache, () =>\n printStartupMessage(env),\n ),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'main');\n }\n\n await processes.done();\n}\n"],"mappings":";;;;;;;;;;;;;;AAeA,IAAM,oBAAoB;AAE1B,SAAS,oBAAoB,KAAwB;AACnD,KAAI,IAAI,uBAAuB,IAC7B;AAGF,cAAa,IACX,UAAU,UAAU,CAAC,QAAQ,OAAO,EAAE,aAAa,CAAC;mBAE9B,UAAU,CAAC,QAAQ,YAAY,EAAE,kCAAkC,CAAC,QACxF,UAAU,OAAO,6BAA6B,kBAAkB,IAAI,GACpE,KACH;;;;;;;;;AAUH,eAAsB,UACpB,QACA,KACe;CAGf,MAAM,MAAM,cAAc;EAAC;EAAK,yBAAyB;EAAK,CAAC;CAC/D,MAAM,KAAK,iBAAiB,KAAK,SAAS;CAG1C,MAAM,SAAS,oBAAoB,IAAI,KAAK,KADtB,MAAM,UAAU,GAAG,CACsB;CAC/D,MAAM,YAAY,IAAI,eAAe,IAAI,UAAU,QAAQ;CAE3D,MAAM,EAAC,MAAM,gBAAe;CAC5B,MAAM,gBAAgB,iBAAiB,OAAO;AAC9C,IAAG,OAAO,kBAAkB,CAAC,gBAAgB,KAAK,IAAI,gBAAgB,IAAI;EACxE,iBAAA;EACA,QAAQ,OAAO;EACf,KAAK,OAAO;EACZ,OAAO,OAAO;EACd,MAAM,OAAO;EACd,CAAC;CAEF,IAAI;CACJ,SAAS,iBAAkC;AACzC,MAAI,cAAc,KAAA,GAAW;GAC3B,MAAM,UAAU,YAAY,KAAK;AACjC,MAAG,OAAO,sBAAsB;GAEhC,MAAM,IAAK,YAAY,UAAkB;GACzC,MAAM,IAAI,YAAY,UAAU,IAAI,CACjC,KAAK,iBAAiB;AACrB,MAAE,QAAQ,EAAE;AACZ,OAAG,OAAO,qBAAqB,YAAY,KAAK,GAAG,QAAQ,MAAM;KACjE,CACD,KAAK,SAAS,EAAE,OAAO;AAE1B,aAAU,UAAU,GAAG,eAAe,aAAa;;AAErD,SAAO,UAAU;;AAInB,KAAI,CAAC,YACE,iBAAgB;AAGvB,OAAM,UAAU,iBAAiB;AACjC,SAAQ,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAEtC,KAAI;AACF,QAAM,eACJ,IACA,UAAU,SACV,IAAI,eAAe,QAAQ,IAAI,EAAC,MAAK,EAAE,sBACrC,oBAAoB,IAAI,CACzB,CACF;UACM,KAAK;AACZ,YAAU,gBAAgB,KAAK,OAAO;;AAGxC,OAAM,UAAU,MAAM"}
1
+ {"version":3,"file":"run-worker.js","names":[],"sources":["../../../../../../zero-cache/src/server/runner/run-worker.ts"],"sourcesContent":["import '../../../../shared/src/dotenv.ts';\n\nimport {styleText} from 'node:util';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {colorConsole} from '../../../../shared/src/logging.ts';\nimport {PROTOCOL_VERSION} from '../../../../zero-protocol/src/protocol-version.ts';\nimport {normalizeZeroConfig} from '../../config/normalize.ts';\nimport {getServerVersion, getZeroConfig} from '../../config/zero-config.ts';\nimport {ProcessManager, runUntilKilled} from '../../services/life-cycle.ts';\nimport {childWorker, type Worker} from '../../types/processes.ts';\nimport {createLogContext} from '../logging.ts';\nimport {MAIN_URL} from '../worker-urls.ts';\nimport {getTaskID} from './runtime.ts';\nimport {ZeroDispatcher} from './zero-dispatcher.ts';\n\nconst startupMessageEnv = 'ZERO_ENABLE_STARTUP_MESSAGE';\n\nfunction printStartupMessage(env: NodeJS.ProcessEnv) {\n if (env[startupMessageEnv] !== '1') {\n return;\n }\n\n colorConsole.log(\n `\\nBTW, ${styleText(['bold', 'cyan'], 'Cloud Zero')} ` +\n 'is now available - professional Zero hosting from the team that built it.\\n' +\n `Get started now: ${styleText(['blue', 'underline'], 'https://zero.rocicorp.dev/cloud')}\\n\\n` +\n styleText('dim', `Disable this message with ${startupMessageEnv}=0`) +\n '\\n',\n );\n}\n\n/**\n * Top-level `runner` entry point to the zero-cache. This layer is responsible for:\n * * runtime-based config normalization\n * * lazy startup\n * * serving /statsz\n * * auto-reset restarts (TODO)\n */\nexport async function runWorker(\n parent: Worker | null,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n // Note: Deprecation warnings are only emitted at this top-level parse;\n // they are suppressed when parsed in subprocesses.\n const cfg = getZeroConfig({env, emitDeprecationWarnings: true});\n const lc = createLogContext(cfg, 'runner');\n\n const defaultTaskID = await getTaskID(lc);\n const config = normalizeZeroConfig(lc, cfg, env, defaultTaskID);\n const processes = new ProcessManager(lc, parent ?? process);\n\n const {port, keepaliveTimeoutMs, lazyStartup} = config;\n const serverVersion = getServerVersion(config);\n lc.info?.(`starting server${!serverVersion ? '' : `@${serverVersion}`} `, {\n protocolVersion: PROTOCOL_VERSION,\n taskID: config.taskID,\n app: config.app,\n shard: config.shard,\n port: config.port,\n });\n\n let zeroCache: Resolver<Worker> | undefined;\n function startZeroCache(): Promise<Worker> {\n if (zeroCache === undefined) {\n const startMs = performance.now();\n lc.info?.('starting zero-cache');\n\n const r = (zeroCache = resolver<Worker>());\n const w = childWorker(MAIN_URL, env)\n .once('message', () => {\n r.resolve(w);\n lc.info?.(`zero-cache ready (${performance.now() - startMs} ms)`);\n })\n .once('error', r.reject);\n\n processes.addWorker(w, 'user-facing', 'zero-cache');\n }\n return zeroCache.promise;\n }\n\n // Eagerly start the zero-cache if it was not configured with --lazy-startup.\n if (!lazyStartup) {\n void startZeroCache();\n }\n\n await processes.allWorkersReady();\n parent?.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent ?? process,\n new ZeroDispatcher(\n config,\n lc,\n {port, keepaliveTimeoutMs},\n startZeroCache,\n () => printStartupMessage(env),\n ),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'main');\n }\n\n await processes.done();\n}\n"],"mappings":";;;;;;;;;;;;;;AAeA,IAAM,oBAAoB;AAE1B,SAAS,oBAAoB,KAAwB;AACnD,KAAI,IAAI,uBAAuB,IAC7B;AAGF,cAAa,IACX,UAAU,UAAU,CAAC,QAAQ,OAAO,EAAE,aAAa,CAAC;mBAE9B,UAAU,CAAC,QAAQ,YAAY,EAAE,kCAAkC,CAAC,QACxF,UAAU,OAAO,6BAA6B,kBAAkB,IAAI,GACpE,KACH;;;;;;;;;AAUH,eAAsB,UACpB,QACA,KACe;CAGf,MAAM,MAAM,cAAc;EAAC;EAAK,yBAAyB;EAAK,CAAC;CAC/D,MAAM,KAAK,iBAAiB,KAAK,SAAS;CAG1C,MAAM,SAAS,oBAAoB,IAAI,KAAK,KADtB,MAAM,UAAU,GAAG,CACsB;CAC/D,MAAM,YAAY,IAAI,eAAe,IAAI,UAAU,QAAQ;CAE3D,MAAM,EAAC,MAAM,oBAAoB,gBAAe;CAChD,MAAM,gBAAgB,iBAAiB,OAAO;AAC9C,IAAG,OAAO,kBAAkB,CAAC,gBAAgB,KAAK,IAAI,gBAAgB,IAAI;EACxE,iBAAA;EACA,QAAQ,OAAO;EACf,KAAK,OAAO;EACZ,OAAO,OAAO;EACd,MAAM,OAAO;EACd,CAAC;CAEF,IAAI;CACJ,SAAS,iBAAkC;AACzC,MAAI,cAAc,KAAA,GAAW;GAC3B,MAAM,UAAU,YAAY,KAAK;AACjC,MAAG,OAAO,sBAAsB;GAEhC,MAAM,IAAK,YAAY,UAAkB;GACzC,MAAM,IAAI,YAAY,UAAU,IAAI,CACjC,KAAK,iBAAiB;AACrB,MAAE,QAAQ,EAAE;AACZ,OAAG,OAAO,qBAAqB,YAAY,KAAK,GAAG,QAAQ,MAAM;KACjE,CACD,KAAK,SAAS,EAAE,OAAO;AAE1B,aAAU,UAAU,GAAG,eAAe,aAAa;;AAErD,SAAO,UAAU;;AAInB,KAAI,CAAC,YACE,iBAAgB;AAGvB,OAAM,UAAU,iBAAiB;AACjC,SAAQ,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAEtC,KAAI;AACF,QAAM,eACJ,IACA,UAAU,SACV,IAAI,eACF,QACA,IACA;GAAC;GAAM;GAAmB,EAC1B,sBACM,oBAAoB,IAAI,CAC/B,CACF;UACM,KAAK;AACZ,YAAU,gBAAgB,KAAK,OAAO;;AAGxC,OAAM,UAAU,MAAM"}
@@ -93,14 +93,14 @@ function runWorker(parent, env, ...args) {
93
93
  const viewSyncerFactory = (id, sub, drainCoordinator) => {
94
94
  const logger = lc.withContext("component", "view-syncer").withContext("clientGroupID", id).withContext("instance", randomID());
95
95
  const customQueryTransformer = customQueryConfig && new CustomQueryTransformer(logger, shard);
96
- const contextManager = new ConnectionContextManagerImpl(logger, config.auth.revalidateIntervalSeconds, config.auth.retransformIntervalSeconds, customQueryConfig, pushConfig, validateLegacyJWT);
96
+ const connContextManager = new ConnectionContextManagerImpl(logger, config.auth.revalidateIntervalSeconds, config.auth.retransformIntervalSeconds, customQueryConfig, pushConfig, validateLegacyJWT);
97
97
  lc.debug?.(`creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`);
98
98
  const inspectorDelegate = new InspectorDelegate(customQueryTransformer);
99
99
  const priorityOpRunningYieldThresholdMs = Math.max(config.yieldThresholdMs / 4, 2);
100
100
  const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);
101
- return new ViewSyncerService(config, logger, shard, config.taskID, id, cvrDB, new PipelineDriver(logger, config.log, new Snapshotter(logger, replicaFile, shard), shard, operatorStorage.createClientGroupStorage(id), id, inspectorDelegate, () => isPriorityOpRunning() ? priorityOpRunningYieldThresholdMs : normalYieldThresholdMs, config.enableQueryPlanner, config), sub, drainCoordinator, config.log.slowHydrateThreshold, inspectorDelegate, contextManager, customQueryTransformer, runPriorityOp);
101
+ return new ViewSyncerService(config, logger, shard, config.taskID, id, cvrDB, new PipelineDriver(logger, config.log, new Snapshotter(logger, replicaFile, shard), shard, operatorStorage.createClientGroupStorage(id), id, inspectorDelegate, () => isPriorityOpRunning() ? priorityOpRunningYieldThresholdMs : normalYieldThresholdMs, config.enableQueryPlanner, config), sub, drainCoordinator, config.log.slowHydrateThreshold, inspectorDelegate, connContextManager, customQueryTransformer, runPriorityOp);
102
102
  };
103
- const syncer = new Syncer(lc, config, viewSyncerFactory, upstreamDB ? (id) => new MutagenService(lc.withContext("component", "mutagen").withContext("clientGroupID", id), shard, id, upstreamDB, config, writeAuthzStorage) : void 0, pushConfig === void 0 ? void 0 : (id, contextManager) => new PusherService(config, lc.withContext("clientGroupID", id), id, contextManager), parent, validateLegacyJWT);
103
+ const syncer = new Syncer(lc, config, viewSyncerFactory, upstreamDB ? (id) => new MutagenService(lc.withContext("component", "mutagen").withContext("clientGroupID", id), shard, id, upstreamDB, config, writeAuthzStorage) : void 0, pushConfig === void 0 ? void 0 : (id, connContextManager) => new PusherService(config, lc.withContext("clientGroupID", id), id, connContextManager), parent, validateLegacyJWT);
104
104
  startAnonymousTelemetry(lc, config);
105
105
  dbWarmup.then(() => parent.send(["ready", { ready: true }]));
106
106
  return runUntilKilled(lc, parent, syncer);
@@ -1 +1 @@
1
- {"version":3,"file":"syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {ValidateLegacyJWT} from '../auth/auth.ts';\nimport {tokenConfigOptions, verifyToken} from '../auth/jwt.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport {\n type ConnectionContextManager,\n ConnectionContextManagerImpl,\n} from '../services/view-syncer/connection-context-manager.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {ProtocolErrorWithLevel} from '../types/error-with-level.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {isPriorityOpRunning, runPriorityOp} from './priority-op.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n apiKey: queryConfig.apiKey,\n allowedClientHeaders: queryConfig.allowedClientHeaders,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length >= 2, `expected [fileMode, workerIndex, ...flags]`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n const workerIndex = Number(args[1]);\n const config = getNormalizedZeroConfig({env, argv: args.slice(2)});\n\n startOtelAuto(\n createLogContext(config, 'syncer', workerIndex, false),\n 'syncer',\n workerIndex,\n );\n const lc = createLogContext(config, 'syncer', workerIndex);\n initEventSink(lc, config);\n\n const {cvr, upstream, enableCrudMutations} = config;\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, `sync-worker-${pid}-cvr`, {\n max: must(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set'),\n });\n\n const upstreamDB = enableCrudMutations\n ? pgClient(lc, upstream.db, `sync-worker-${pid}-upstream`, {\n max: must(\n upstream.maxConnsPerWorker,\n 'upstream.maxConnsPerWorker must be set',\n ),\n })\n : undefined;\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n upstreamDB ? warmupConnections(lc, upstreamDB, 'upstream') : promiseVoid,\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n const customQueryConfig = getCustomQueryConfig(config);\n const pushConfig =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n };\n\n /** @deprecated used in JWT validation */\n let validateLegacyJWT: ValidateLegacyJWT | undefined = undefined;\n\n const tokenOptions = tokenConfigOptions(config.auth ?? {});\n if (tokenOptions.length === 1) {\n validateLegacyJWT = async (token, {userID}) => {\n if (!userID) {\n throw new ProtocolErrorWithLevel(\n {\n kind: 'Unauthorized',\n message: 'UserID is required for JWT validation.',\n origin: 'zeroCache',\n },\n 'warn',\n );\n }\n\n const decoded = await verifyToken(config.auth, token, {\n subject: userID,\n ...(config.auth?.issuer && {issuer: config.auth.issuer}),\n ...(config.auth?.audience && {\n audience: config.auth.audience,\n }),\n });\n return {\n type: 'jwt',\n raw: token,\n decoded,\n };\n };\n }\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n\n const customQueryTransformer =\n customQueryConfig && new CustomQueryTransformer(logger, shard);\n const contextManager = new ConnectionContextManagerImpl(\n logger,\n config.auth.revalidateIntervalSeconds,\n config.auth.retransformIntervalSeconds,\n customQueryConfig,\n pushConfig,\n validateLegacyJWT,\n );\n\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(logger, replicaFile, shard),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n config,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n contextManager,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = upstreamDB\n ? (id: string) =>\n new MutagenService(\n lc\n .withContext('component', 'mutagen')\n .withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n )\n : undefined;\n\n const pusherFactory =\n pushConfig === undefined\n ? undefined\n : (id: string, contextManager: ConnectionContextManager) =>\n new PusherService(\n config,\n lc.withContext('clientGroupID', id),\n id,\n contextManager,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n validateLegacyJWT,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8CA,SAAS,WAAW;AAClB,QAAO,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG;;AAGzD,SAAS,qBACP,QACA;CACA,MAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,KAAI,CAAC,aAAa,IAChB;AAGF,QAAO;EACL,KAAK,YAAY;EACjB,QAAQ,YAAY;EACpB,sBAAsB,YAAY;EAClC,gBAAgB,YAAY,kBAAkB;EAC/C;;AAGH,SAAwB,UACtB,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,UAAU,GAAG,6CAA6C;CACtE,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CACxD,MAAM,cAAc,OAAO,KAAK,GAAG;CACnC,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;AAElE,eACE,iBAAiB,QAAQ,UAAU,aAAa,MAAM,EACtD,UACA,YACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,UAAU,YAAY;AAC1D,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,KAAK,UAAU,wBAAuB;CAE7C,MAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,SAAS;AAClE,IAAG,QAAQ,0BAA0B,cAAc;CAEnD,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI,eAAe,IAAI,OAAO,EAC3D,KAAK,KAAK,IAAI,mBAAmB,oCAAoC,EACtE,CAAC;CAEF,MAAM,aAAa,sBACf,SAAS,IAAI,SAAS,IAAI,eAAe,IAAI,YAAY,EACvD,KAAK,KACH,SAAS,mBACT,yCACD,EACF,CAAC,GACF,KAAA;CAEJ,MAAM,WAAW,QAAQ,WAAW,CAClC,kBAAkB,IAAI,OAAO,MAAM,EACnC,aAAa,kBAAkB,IAAI,YAAY,WAAW,GAAG,YAC9D,CAAC;CAEF,MAAM,SAAS,OAAO,mBAAmB,QAAQ;CACjD,MAAM,kBAAkB,gBAAgB,OACtC,IACA,KAAK,KAAK,QAAQ,eAAe,YAAY,GAAG,CACjD;CACD,MAAM,oBAAoB,gBAAgB,OACxC,IACA,KAAK,KAAK,QAAQ,WAAW,YAAY,GAAG,CAC7C;CAED,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,oBAAoB,qBAAqB,OAAO;CACtD,MAAM,aACJ,OAAO,KAAK,QAAQ,KAAA,KAAa,OAAO,OAAO,QAAQ,KAAA,IACnD,KAAA,IACA;EACE,GAAG,OAAO;EACV,GAAG,OAAO;EACV,KAAK,KACH,OAAO,KAAK,OAAO,OAAO,OAAO,KACjC,mCACD;EACF;;CAGP,IAAI,oBAAmD,KAAA;AAGvD,KADqB,mBAAmB,OAAO,QAAQ,EAAE,CAAC,CACzC,WAAW,EAC1B,qBAAoB,OAAO,OAAO,EAAC,aAAY;AAC7C,MAAI,CAAC,OACH,OAAM,IAAI,uBACR;GACE,MAAM;GACN,SAAS;GACT,QAAQ;GACT,EACD,OACD;AAUH,SAAO;GACL,MAAM;GACN,KAAK;GACL,SAVc,MAAM,YAAY,OAAO,MAAM,OAAO;IACpD,SAAS;IACT,GAAI,OAAO,MAAM,UAAU,EAAC,QAAQ,OAAO,KAAK,QAAO;IACvD,GAAI,OAAO,MAAM,YAAY,EAC3B,UAAU,OAAO,KAAK,UACvB;IACF,CAAC;GAKD;;CAIL,MAAM,qBACJ,IACA,KACA,qBACG;EACH,MAAM,SAAS,GACZ,YAAY,aAAa,cAAc,CACvC,YAAY,iBAAiB,GAAG,CAChC,YAAY,YAAY,UAAU,CAAC;EAEtC,MAAM,yBACJ,qBAAqB,IAAI,uBAAuB,QAAQ,MAAM;EAChE,MAAM,iBAAiB,IAAI,6BACzB,QACA,OAAO,KAAK,2BACZ,OAAO,KAAK,4BACZ,mBACA,YACA,kBACD;AAED,KAAG,QACD,gDAAgD,OAAO,qBACxD;EAED,MAAM,oBAAoB,IAAI,kBAAkB,uBAAuB;EAEvE,MAAM,oCAAoC,KAAK,IAC7C,OAAO,mBAAmB,GAC1B,EACD;EACD,MAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,EAAE;AAEnE,SAAO,IAAI,kBACT,QACA,QACA,OACA,OAAO,QACP,IACA,OACA,IAAI,eACF,QACA,OAAO,KACP,IAAI,YAAY,QAAQ,aAAa,MAAM,EAC3C,OACA,gBAAgB,yBAAyB,GAAG,EAC5C,IACA,yBAEE,qBAAqB,GACjB,oCACA,wBACN,OAAO,oBACP,OACD,EACD,KACA,kBACA,OAAO,IAAI,sBACX,mBACA,gBACA,wBACA,cACD;;CA4BH,MAAM,SAAS,IAAI,OACjB,IACA,QACA,mBA5BqB,cAClB,OACC,IAAI,eACF,GACG,YAAY,aAAa,UAAU,CACnC,YAAY,iBAAiB,GAAG,EACnC,OACA,IACA,YACA,QACA,kBACD,GACH,KAAA,GAGF,eAAe,KAAA,IACX,KAAA,KACC,IAAY,mBACX,IAAI,cACF,QACA,GAAG,YAAY,iBAAiB,GAAG,EACnC,IACA,eACD,EAQP,QACA,kBACD;AAED,yBAAwB,IAAI,OAAO;AAE9B,UAAS,WAAW,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC,CAAC;AAE/D,QAAO,eAAe,IAAI,QAAQ,OAAO;;AAI3C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {ValidateLegacyJWT} from '../auth/auth.ts';\nimport {tokenConfigOptions, verifyToken} from '../auth/jwt.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport {\n type ConnectionContextManager,\n ConnectionContextManagerImpl,\n} from '../services/view-syncer/connection-context-manager.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {ProtocolErrorWithLevel} from '../types/error-with-level.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {isPriorityOpRunning, runPriorityOp} from './priority-op.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n apiKey: queryConfig.apiKey,\n allowedClientHeaders: queryConfig.allowedClientHeaders,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length >= 2, `expected [fileMode, workerIndex, ...flags]`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n const workerIndex = Number(args[1]);\n const config = getNormalizedZeroConfig({env, argv: args.slice(2)});\n\n startOtelAuto(\n createLogContext(config, 'syncer', workerIndex, false),\n 'syncer',\n workerIndex,\n );\n const lc = createLogContext(config, 'syncer', workerIndex);\n initEventSink(lc, config);\n\n const {cvr, upstream, enableCrudMutations} = config;\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, `sync-worker-${pid}-cvr`, {\n max: must(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set'),\n });\n\n const upstreamDB = enableCrudMutations\n ? pgClient(lc, upstream.db, `sync-worker-${pid}-upstream`, {\n max: must(\n upstream.maxConnsPerWorker,\n 'upstream.maxConnsPerWorker must be set',\n ),\n })\n : undefined;\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n upstreamDB ? warmupConnections(lc, upstreamDB, 'upstream') : promiseVoid,\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n const customQueryConfig = getCustomQueryConfig(config);\n const pushConfig =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n };\n\n /** @deprecated used in JWT validation */\n let validateLegacyJWT: ValidateLegacyJWT | undefined = undefined;\n\n const tokenOptions = tokenConfigOptions(config.auth ?? {});\n if (tokenOptions.length === 1) {\n validateLegacyJWT = async (token, {userID}) => {\n if (!userID) {\n throw new ProtocolErrorWithLevel(\n {\n kind: 'Unauthorized',\n message: 'UserID is required for JWT validation.',\n origin: 'zeroCache',\n },\n 'warn',\n );\n }\n\n const decoded = await verifyToken(config.auth, token, {\n subject: userID,\n ...(config.auth?.issuer && {issuer: config.auth.issuer}),\n ...(config.auth?.audience && {\n audience: config.auth.audience,\n }),\n });\n return {\n type: 'jwt',\n raw: token,\n decoded,\n };\n };\n }\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n\n const customQueryTransformer =\n customQueryConfig && new CustomQueryTransformer(logger, shard);\n const connContextManager = new ConnectionContextManagerImpl(\n logger,\n config.auth.revalidateIntervalSeconds,\n config.auth.retransformIntervalSeconds,\n customQueryConfig,\n pushConfig,\n validateLegacyJWT,\n );\n\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(logger, replicaFile, shard),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n config,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n connContextManager,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = upstreamDB\n ? (id: string) =>\n new MutagenService(\n lc\n .withContext('component', 'mutagen')\n .withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n )\n : undefined;\n\n const pusherFactory =\n pushConfig === undefined\n ? undefined\n : (id: string, connContextManager: ConnectionContextManager) =>\n new PusherService(\n config,\n lc.withContext('clientGroupID', id),\n id,\n connContextManager,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n validateLegacyJWT,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8CA,SAAS,WAAW;AAClB,QAAO,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG;;AAGzD,SAAS,qBACP,QACA;CACA,MAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,KAAI,CAAC,aAAa,IAChB;AAGF,QAAO;EACL,KAAK,YAAY;EACjB,QAAQ,YAAY;EACpB,sBAAsB,YAAY;EAClC,gBAAgB,YAAY,kBAAkB;EAC/C;;AAGH,SAAwB,UACtB,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,UAAU,GAAG,6CAA6C;CACtE,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CACxD,MAAM,cAAc,OAAO,KAAK,GAAG;CACnC,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;AAElE,eACE,iBAAiB,QAAQ,UAAU,aAAa,MAAM,EACtD,UACA,YACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,UAAU,YAAY;AAC1D,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,KAAK,UAAU,wBAAuB;CAE7C,MAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,SAAS;AAClE,IAAG,QAAQ,0BAA0B,cAAc;CAEnD,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI,eAAe,IAAI,OAAO,EAC3D,KAAK,KAAK,IAAI,mBAAmB,oCAAoC,EACtE,CAAC;CAEF,MAAM,aAAa,sBACf,SAAS,IAAI,SAAS,IAAI,eAAe,IAAI,YAAY,EACvD,KAAK,KACH,SAAS,mBACT,yCACD,EACF,CAAC,GACF,KAAA;CAEJ,MAAM,WAAW,QAAQ,WAAW,CAClC,kBAAkB,IAAI,OAAO,MAAM,EACnC,aAAa,kBAAkB,IAAI,YAAY,WAAW,GAAG,YAC9D,CAAC;CAEF,MAAM,SAAS,OAAO,mBAAmB,QAAQ;CACjD,MAAM,kBAAkB,gBAAgB,OACtC,IACA,KAAK,KAAK,QAAQ,eAAe,YAAY,GAAG,CACjD;CACD,MAAM,oBAAoB,gBAAgB,OACxC,IACA,KAAK,KAAK,QAAQ,WAAW,YAAY,GAAG,CAC7C;CAED,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,oBAAoB,qBAAqB,OAAO;CACtD,MAAM,aACJ,OAAO,KAAK,QAAQ,KAAA,KAAa,OAAO,OAAO,QAAQ,KAAA,IACnD,KAAA,IACA;EACE,GAAG,OAAO;EACV,GAAG,OAAO;EACV,KAAK,KACH,OAAO,KAAK,OAAO,OAAO,OAAO,KACjC,mCACD;EACF;;CAGP,IAAI,oBAAmD,KAAA;AAGvD,KADqB,mBAAmB,OAAO,QAAQ,EAAE,CAAC,CACzC,WAAW,EAC1B,qBAAoB,OAAO,OAAO,EAAC,aAAY;AAC7C,MAAI,CAAC,OACH,OAAM,IAAI,uBACR;GACE,MAAM;GACN,SAAS;GACT,QAAQ;GACT,EACD,OACD;AAUH,SAAO;GACL,MAAM;GACN,KAAK;GACL,SAVc,MAAM,YAAY,OAAO,MAAM,OAAO;IACpD,SAAS;IACT,GAAI,OAAO,MAAM,UAAU,EAAC,QAAQ,OAAO,KAAK,QAAO;IACvD,GAAI,OAAO,MAAM,YAAY,EAC3B,UAAU,OAAO,KAAK,UACvB;IACF,CAAC;GAKD;;CAIL,MAAM,qBACJ,IACA,KACA,qBACG;EACH,MAAM,SAAS,GACZ,YAAY,aAAa,cAAc,CACvC,YAAY,iBAAiB,GAAG,CAChC,YAAY,YAAY,UAAU,CAAC;EAEtC,MAAM,yBACJ,qBAAqB,IAAI,uBAAuB,QAAQ,MAAM;EAChE,MAAM,qBAAqB,IAAI,6BAC7B,QACA,OAAO,KAAK,2BACZ,OAAO,KAAK,4BACZ,mBACA,YACA,kBACD;AAED,KAAG,QACD,gDAAgD,OAAO,qBACxD;EAED,MAAM,oBAAoB,IAAI,kBAAkB,uBAAuB;EAEvE,MAAM,oCAAoC,KAAK,IAC7C,OAAO,mBAAmB,GAC1B,EACD;EACD,MAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,EAAE;AAEnE,SAAO,IAAI,kBACT,QACA,QACA,OACA,OAAO,QACP,IACA,OACA,IAAI,eACF,QACA,OAAO,KACP,IAAI,YAAY,QAAQ,aAAa,MAAM,EAC3C,OACA,gBAAgB,yBAAyB,GAAG,EAC5C,IACA,yBAEE,qBAAqB,GACjB,oCACA,wBACN,OAAO,oBACP,OACD,EACD,KACA,kBACA,OAAO,IAAI,sBACX,mBACA,oBACA,wBACA,cACD;;CA4BH,MAAM,SAAS,IAAI,OACjB,IACA,QACA,mBA5BqB,cAClB,OACC,IAAI,eACF,GACG,YAAY,aAAa,UAAU,CACnC,YAAY,iBAAiB,GAAG,EACnC,OACA,IACA,YACA,QACA,kBACD,GACH,KAAA,GAGF,eAAe,KAAA,IACX,KAAA,KACC,IAAY,uBACX,IAAI,cACF,QACA,GAAG,YAAY,iBAAiB,GAAG,EACnC,IACA,mBACD,EAQP,QACA,kBACD;AAED,yBAAwB,IAAI,OAAO;AAE9B,UAAS,WAAW,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC,CAAC;AAE/D,QAAO,eAAe,IAAI,QAAQ,OAAO;;AAI3C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
@@ -12,7 +12,7 @@ import { initReplica } from "../common/replica-schema.js";
12
12
  import { stream } from "../../../types/streams.js";
13
13
  import { ChangeProcessor } from "../../replicator/change-processor.js";
14
14
  import { ReplicationStatusPublisher } from "../../replicator/replication-status.js";
15
- import { WebSocket as WebSocket$1 } from "ws";
15
+ import { WebSocket } from "ws";
16
16
  //#region ../zero-cache/src/services/change-source/custom/change-source.ts
17
17
  /**
18
18
  * Initializes a Custom change source before streaming changes from the
@@ -69,7 +69,7 @@ var CustomChangeSource = class {
69
69
  url.searchParams.set("lastWatermark", clientWatermark);
70
70
  url.searchParams.set("replicaVersion", replicaVersion);
71
71
  }
72
- const ws = new WebSocket$1(url);
72
+ const ws = new WebSocket(url);
73
73
  const { instream, outstream } = stream(this.#lc, ws, changeStreamMessageSchema, { coalesce: (curr) => curr });
74
74
  return {
75
75
  changes: instream,
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.js","names":["#lc","#upstreamUri","#shard","#replicationConfig","#startStream"],"sources":["../../../../../../../zero-cache/src/services/change-source/custom/change-source.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {WebSocket} from 'ws';\nimport {assert, unreachable} from '../../../../../shared/src/asserts.ts';\nimport {\n stringify,\n type JSONObject,\n} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport type {SchemaValue} from '../../../../../zero-schema/src/table-schema.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs} from '../../../db/lite-tables.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport type {ShardConfig, ShardID} from '../../../types/shards.ts';\nimport {stream} from '../../../types/streams.ts';\nimport {\n AutoResetSignal,\n type ReplicationConfig,\n} from '../../change-streamer/schema/tables.ts';\nimport {ChangeProcessor} from '../../replicator/change-processor.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {\n createReplicationStateTables,\n getSubscriptionState,\n initReplicationState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport {changeStreamMessageSchema} from '../protocol/current/downstream.ts';\nimport {\n type BackfillRequest,\n type ChangeSourceUpstream,\n} from '../protocol/current/upstream.ts';\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\n/**\n * Initializes a Custom change source before streaming changes from the\n * corresponding logical replication stream.\n */\nexport async function initializeCustomChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n context: ServerContext,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n if (shard.publications.length) {\n // Verify that the publications match what has been synced.\n const requested = shard.publications.toSorted();\n const replicated = subscriptionState.publications.sort();\n if (!deepEqual(requested, replicated)) {\n throw new Error(\n `Invalid ShardConfig. Requested publications [${requested}] do not match synced publications: [${replicated}]`,\n );\n }\n }\n\n const changeSource = new CustomChangeSource(\n lc,\n upstreamURI,\n shard,\n subscriptionState,\n );\n\n return {subscriptionState, changeSource};\n}\n\nclass CustomChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replicationConfig: ReplicationConfig;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replicationConfig: ReplicationConfig,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replicationConfig = replicationConfig;\n }\n\n initialSync(): ChangeStream {\n return this.#startStream();\n }\n\n startLagReporter() {\n return null; // Not supported for custom sources\n }\n\n stop(): Promise<void> {\n return Promise.resolve();\n }\n\n startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n if (backfillRequests?.length) {\n throw new Error(\n 'backfill is yet not supported for custom change sources',\n );\n }\n return Promise.resolve(this.#startStream(clientWatermark));\n }\n\n #startStream(clientWatermark?: string): ChangeStream {\n const {publications, replicaVersion} = this.#replicationConfig;\n const {appID, shardNum} = this.#shard;\n const url = new URL(this.#upstreamUri);\n url.searchParams.set('appID', appID);\n url.searchParams.set('shardNum', String(shardNum));\n for (const pub of publications) {\n url.searchParams.append('publications', pub);\n }\n if (clientWatermark) {\n assert(\n replicaVersion.length,\n 'replicaVersion is required when clientWatermark is set',\n );\n url.searchParams.set('lastWatermark', clientWatermark);\n url.searchParams.set('replicaVersion', replicaVersion);\n }\n\n const ws = new WebSocket(url);\n const {instream, outstream} = stream(\n this.#lc,\n ws,\n changeStreamMessageSchema,\n // Upstream acks coalesce. If upstream exhibits back-pressure,\n // only the last ACK is kept / buffered.\n {coalesce: (curr: ChangeSourceUpstream) => curr},\n );\n return {changes: instream, acks: outstream};\n }\n}\n\n/**\n * Initial sync for a custom change source makes a request to the\n * change source endpoint with no `replicaVersion` or `lastWatermark`.\n * The initial transaction returned by the endpoint is treated as\n * the initial sync, and the commit watermark of that transaction\n * becomes the `replicaVersion` of the initialized replica.\n *\n * Note that this is equivalent to how the LSN of the Postgres WAL\n * at initial sync time is the `replicaVersion` (and starting\n * version for all initially-synced rows).\n */\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n context: ServerContext,\n) {\n const {appID: id, publications} = shard;\n const changeSource = new CustomChangeSource(lc, upstreamURI, shard, {\n replicaVersion: '', // ignored for initialSync()\n publications,\n });\n const {changes} = changeSource.initialSync();\n\n createReplicationStateTables(tx);\n const processor = new ChangeProcessor(\n new StatementRunner(tx),\n 'initial-sync',\n (_, err) => {\n throw err;\n },\n );\n\n const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(tx);\n try {\n let num = 0;\n for await (const change of changes) {\n const [tag] = change;\n switch (tag) {\n case 'begin': {\n const {commitWatermark} = change[2];\n lc.info?.(\n `initial sync of shard ${id} at replicaVersion ${commitWatermark}`,\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying upstream tables at version ${commitWatermark}`,\n 5000,\n );\n initReplicationState(\n tx,\n publications.toSorted(),\n commitWatermark,\n context,\n false,\n );\n processor.processMessage(lc, change);\n break;\n }\n case 'data':\n processor.processMessage(lc, change);\n if (++num % 1000 === 0) {\n lc.debug?.(`processed ${num} changes`);\n }\n break;\n case 'commit':\n processor.processMessage(lc, change);\n validateInitiallySyncedData(lc, tx, shard);\n lc.info?.(`finished initial-sync of ${num} changes`);\n return;\n\n case 'status':\n break; // Ignored\n // @ts-expect-error: falls through if the tag is not 'reset-required\n case 'control': {\n const {tag, message} = change[1];\n if (tag === 'reset-required') {\n throw new AutoResetSignal(\n message ?? 'auto-reset signaled by change source',\n );\n }\n }\n // falls through\n case 'rollback':\n throw new Error(\n `unexpected message during initial-sync: ${stringify(change)}`,\n );\n default:\n unreachable(change);\n }\n }\n throw new Error(\n `change source ${upstreamURI} closed before initial-sync completed`,\n );\n } catch (e) {\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n }\n}\n\n// Verify that the upstream tables expected by the sync logic\n// have been properly initialized.\nfunction getRequiredTables({\n appID,\n shardNum,\n}: ShardID): Record<string, Record<string, SchemaValue>> {\n return {\n [`${appID}_${shardNum}.clients`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n lastMutationID: {type: 'number'},\n userID: {type: 'string'},\n },\n [`${appID}_${shardNum}.mutations`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n mutationID: {type: 'number'},\n mutation: {type: 'json'},\n },\n [`${appID}.permissions`]: {\n permissions: {type: 'json'},\n hash: {type: 'string'},\n },\n };\n}\n\nfunction validateInitiallySyncedData(\n lc: LogContext,\n db: Database,\n shard: ShardID,\n) {\n const tables = computeZqlSpecs(lc, db, {includeBackfillingColumns: true});\n const required = getRequiredTables(shard);\n for (const [name, columns] of Object.entries(required)) {\n const table = tables.get(name)?.zqlSpec;\n if (!table) {\n throw new Error(\n `Upstream is missing the \"${name}\" table. (Found ${[\n ...tables.keys(),\n ]})` +\n `Please ensure that each table has a unique index over one ` +\n `or more non-null columns.`,\n );\n }\n for (const [col, {type}] of Object.entries(columns)) {\n const found = table[col];\n if (!found) {\n throw new Error(\n `Upstream \"${table}\" table is missing the \"${col}\" column`,\n );\n }\n if (found.type !== type) {\n throw new Error(\n `Upstream \"${table}.${col}\" column is a ${found.type} type but must be a ${type} type.`,\n );\n }\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAyCA,eAAsB,6BACpB,IACA,aACA,OACA,eACA,SAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,QAAQ,CAC/D;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,QAAQ,CAAC;AAC5E,SAAQ,OAAO;AAEf,KAAI,MAAM,aAAa,QAAQ;EAE7B,MAAM,YAAY,MAAM,aAAa,UAAU;EAC/C,MAAM,aAAa,kBAAkB,aAAa,MAAM;AACxD,MAAI,CAAC,UAAU,WAAW,WAAW,CACnC,OAAM,IAAI,MACR,gDAAgD,UAAU,uCAAuC,WAAW,GAC7G;;AAWL,QAAO;EAAC;EAAmB,cAPN,IAAI,mBACvB,IACA,aACA,OACA,kBACD;EAEuC;;AAG1C,IAAM,qBAAN,MAAiD;CAC/C;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,mBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,oBAA0B;;CAG5B,cAA4B;AAC1B,SAAO,MAAA,aAAmB;;CAG5B,mBAAmB;AACjB,SAAO;;CAGT,OAAsB;AACpB,SAAO,QAAQ,SAAS;;CAG1B,YACE,iBACA,mBAAsC,EAAE,EACjB;AACvB,MAAI,kBAAkB,OACpB,OAAM,IAAI,MACR,0DACD;AAEH,SAAO,QAAQ,QAAQ,MAAA,YAAkB,gBAAgB,CAAC;;CAG5D,aAAa,iBAAwC;EACnD,MAAM,EAAC,cAAc,mBAAkB,MAAA;EACvC,MAAM,EAAC,OAAO,aAAY,MAAA;EAC1B,MAAM,MAAM,IAAI,IAAI,MAAA,YAAkB;AACtC,MAAI,aAAa,IAAI,SAAS,MAAM;AACpC,MAAI,aAAa,IAAI,YAAY,OAAO,SAAS,CAAC;AAClD,OAAK,MAAM,OAAO,aAChB,KAAI,aAAa,OAAO,gBAAgB,IAAI;AAE9C,MAAI,iBAAiB;AACnB,UACE,eAAe,QACf,yDACD;AACD,OAAI,aAAa,IAAI,iBAAiB,gBAAgB;AACtD,OAAI,aAAa,IAAI,kBAAkB,eAAe;;EAGxD,MAAM,KAAK,IAAI,YAAU,IAAI;EAC7B,MAAM,EAAC,UAAU,cAAa,OAC5B,MAAA,IACA,IACA,2BAGA,EAAC,WAAW,SAA+B,MAAK,CACjD;AACD,SAAO;GAAC,SAAS;GAAU,MAAM;GAAU;;;;;;;;;;;;;;AAe/C,eAAsB,YACpB,IACA,OACA,IACA,aACA,SACA;CACA,MAAM,EAAC,OAAO,IAAI,iBAAgB;CAKlC,MAAM,EAAC,YAJc,IAAI,mBAAmB,IAAI,aAAa,OAAO;EAClE,gBAAgB;EAChB;EACD,CAAC,CAC6B,aAAa;AAE5C,8BAA6B,GAAG;CAChC,MAAM,YAAY,IAAI,gBACpB,IAAI,gBAAgB,GAAG,EACvB,iBACC,GAAG,QAAQ;AACV,QAAM;GAET;CAED,MAAM,kBAAkB,2BAA2B,sBAAsB,GAAG;AAC5E,KAAI;EACF,IAAI,MAAM;AACV,aAAW,MAAM,UAAU,SAAS;GAClC,MAAM,CAAC,OAAO;AACd,WAAQ,KAAR;IACE,KAAK,SAAS;KACZ,MAAM,EAAC,oBAAmB,OAAO;AACjC,QAAG,OACD,yBAAyB,GAAG,qBAAqB,kBAClD;AACD,qBAAgB,QACd,IACA,gBACA,sCAAsC,mBACtC,IACD;AACD,0BACE,IACA,aAAa,UAAU,EACvB,iBACA,SACA,MACD;AACD,eAAU,eAAe,IAAI,OAAO;AACpC;;IAEF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,SAAI,EAAE,MAAM,QAAS,EACnB,IAAG,QAAQ,aAAa,IAAI,UAAU;AAExC;IACF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,iCAA4B,IAAI,IAAI,MAAM;AAC1C,QAAG,OAAO,4BAA4B,IAAI,UAAU;AACpD;IAEF,KAAK,SACH;IAEF,KAAK,WAAW;KACd,MAAM,EAAC,KAAK,YAAW,OAAO;AAC9B,SAAI,QAAQ,iBACV,OAAM,IAAI,gBACR,WAAW,uCACZ;;IAIL,KAAK,WACH,OAAM,IAAI,MACR,2CAA2C,UAAU,OAAO,GAC7D;IACH,QACE,aAAY,OAAO;;;AAGzB,QAAM,IAAI,MACR,iBAAiB,YAAY,uCAC9B;UACM,GAAG;AACV,QAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,EAAE;WACzD;AACR,kBAAgB,MAAM;;;AAM1B,SAAS,kBAAkB,EACzB,OACA,YACuD;AACvD,QAAO;GACJ,GAAG,MAAM,GAAG,SAAS,YAAY;GAChC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,gBAAgB,EAAC,MAAM,UAAS;GAChC,QAAQ,EAAC,MAAM,UAAS;GACzB;GACA,GAAG,MAAM,GAAG,SAAS,cAAc;GAClC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,YAAY,EAAC,MAAM,UAAS;GAC5B,UAAU,EAAC,MAAM,QAAO;GACzB;GACA,GAAG,MAAM,gBAAgB;GACxB,aAAa,EAAC,MAAM,QAAO;GAC3B,MAAM,EAAC,MAAM,UAAS;GACvB;EACF;;AAGH,SAAS,4BACP,IACA,IACA,OACA;CACA,MAAM,SAAS,gBAAgB,IAAI,IAAI,EAAC,2BAA2B,MAAK,CAAC;CACzE,MAAM,WAAW,kBAAkB,MAAM;AACzC,MAAK,MAAM,CAAC,MAAM,YAAY,OAAO,QAAQ,SAAS,EAAE;EACtD,MAAM,QAAQ,OAAO,IAAI,KAAK,EAAE;AAChC,MAAI,CAAC,MACH,OAAM,IAAI,MACR,4BAA4B,KAAK,kBAAkB,CACjD,GAAG,OAAO,MAAM,CACjB,CAAC,sFAGH;AAEH,OAAK,MAAM,CAAC,KAAK,EAAC,WAAU,OAAO,QAAQ,QAAQ,EAAE;GACnD,MAAM,QAAQ,MAAM;AACpB,OAAI,CAAC,MACH,OAAM,IAAI,MACR,aAAa,MAAM,0BAA0B,IAAI,UAClD;AAEH,OAAI,MAAM,SAAS,KACjB,OAAM,IAAI,MACR,aAAa,MAAM,GAAG,IAAI,gBAAgB,MAAM,KAAK,sBAAsB,KAAK,QACjF"}
1
+ {"version":3,"file":"change-source.js","names":["#lc","#upstreamUri","#shard","#replicationConfig","#startStream"],"sources":["../../../../../../../zero-cache/src/services/change-source/custom/change-source.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {WebSocket} from 'ws';\nimport {assert, unreachable} from '../../../../../shared/src/asserts.ts';\nimport {\n stringify,\n type JSONObject,\n} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport type {SchemaValue} from '../../../../../zero-schema/src/table-schema.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs} from '../../../db/lite-tables.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport type {ShardConfig, ShardID} from '../../../types/shards.ts';\nimport {stream} from '../../../types/streams.ts';\nimport {\n AutoResetSignal,\n type ReplicationConfig,\n} from '../../change-streamer/schema/tables.ts';\nimport {ChangeProcessor} from '../../replicator/change-processor.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {\n createReplicationStateTables,\n getSubscriptionState,\n initReplicationState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport {changeStreamMessageSchema} from '../protocol/current/downstream.ts';\nimport {\n type BackfillRequest,\n type ChangeSourceUpstream,\n} from '../protocol/current/upstream.ts';\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\n/**\n * Initializes a Custom change source before streaming changes from the\n * corresponding logical replication stream.\n */\nexport async function initializeCustomChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n context: ServerContext,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n if (shard.publications.length) {\n // Verify that the publications match what has been synced.\n const requested = shard.publications.toSorted();\n const replicated = subscriptionState.publications.sort();\n if (!deepEqual(requested, replicated)) {\n throw new Error(\n `Invalid ShardConfig. Requested publications [${requested}] do not match synced publications: [${replicated}]`,\n );\n }\n }\n\n const changeSource = new CustomChangeSource(\n lc,\n upstreamURI,\n shard,\n subscriptionState,\n );\n\n return {subscriptionState, changeSource};\n}\n\nclass CustomChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replicationConfig: ReplicationConfig;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replicationConfig: ReplicationConfig,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replicationConfig = replicationConfig;\n }\n\n initialSync(): ChangeStream {\n return this.#startStream();\n }\n\n startLagReporter() {\n return null; // Not supported for custom sources\n }\n\n stop(): Promise<void> {\n return Promise.resolve();\n }\n\n startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n if (backfillRequests?.length) {\n throw new Error(\n 'backfill is yet not supported for custom change sources',\n );\n }\n return Promise.resolve(this.#startStream(clientWatermark));\n }\n\n #startStream(clientWatermark?: string): ChangeStream {\n const {publications, replicaVersion} = this.#replicationConfig;\n const {appID, shardNum} = this.#shard;\n const url = new URL(this.#upstreamUri);\n url.searchParams.set('appID', appID);\n url.searchParams.set('shardNum', String(shardNum));\n for (const pub of publications) {\n url.searchParams.append('publications', pub);\n }\n if (clientWatermark) {\n assert(\n replicaVersion.length,\n 'replicaVersion is required when clientWatermark is set',\n );\n url.searchParams.set('lastWatermark', clientWatermark);\n url.searchParams.set('replicaVersion', replicaVersion);\n }\n\n const ws = new WebSocket(url);\n const {instream, outstream} = stream(\n this.#lc,\n ws,\n changeStreamMessageSchema,\n // Upstream acks coalesce. If upstream exhibits back-pressure,\n // only the last ACK is kept / buffered.\n {coalesce: (curr: ChangeSourceUpstream) => curr},\n );\n return {changes: instream, acks: outstream};\n }\n}\n\n/**\n * Initial sync for a custom change source makes a request to the\n * change source endpoint with no `replicaVersion` or `lastWatermark`.\n * The initial transaction returned by the endpoint is treated as\n * the initial sync, and the commit watermark of that transaction\n * becomes the `replicaVersion` of the initialized replica.\n *\n * Note that this is equivalent to how the LSN of the Postgres WAL\n * at initial sync time is the `replicaVersion` (and starting\n * version for all initially-synced rows).\n */\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n context: ServerContext,\n) {\n const {appID: id, publications} = shard;\n const changeSource = new CustomChangeSource(lc, upstreamURI, shard, {\n replicaVersion: '', // ignored for initialSync()\n publications,\n });\n const {changes} = changeSource.initialSync();\n\n createReplicationStateTables(tx);\n const processor = new ChangeProcessor(\n new StatementRunner(tx),\n 'initial-sync',\n (_, err) => {\n throw err;\n },\n );\n\n const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(tx);\n try {\n let num = 0;\n for await (const change of changes) {\n const [tag] = change;\n switch (tag) {\n case 'begin': {\n const {commitWatermark} = change[2];\n lc.info?.(\n `initial sync of shard ${id} at replicaVersion ${commitWatermark}`,\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying upstream tables at version ${commitWatermark}`,\n 5000,\n );\n initReplicationState(\n tx,\n publications.toSorted(),\n commitWatermark,\n context,\n false,\n );\n processor.processMessage(lc, change);\n break;\n }\n case 'data':\n processor.processMessage(lc, change);\n if (++num % 1000 === 0) {\n lc.debug?.(`processed ${num} changes`);\n }\n break;\n case 'commit':\n processor.processMessage(lc, change);\n validateInitiallySyncedData(lc, tx, shard);\n lc.info?.(`finished initial-sync of ${num} changes`);\n return;\n\n case 'status':\n break; // Ignored\n // @ts-expect-error: falls through if the tag is not 'reset-required\n case 'control': {\n const {tag, message} = change[1];\n if (tag === 'reset-required') {\n throw new AutoResetSignal(\n message ?? 'auto-reset signaled by change source',\n );\n }\n }\n // falls through\n case 'rollback':\n throw new Error(\n `unexpected message during initial-sync: ${stringify(change)}`,\n );\n default:\n unreachable(change);\n }\n }\n throw new Error(\n `change source ${upstreamURI} closed before initial-sync completed`,\n );\n } catch (e) {\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n }\n}\n\n// Verify that the upstream tables expected by the sync logic\n// have been properly initialized.\nfunction getRequiredTables({\n appID,\n shardNum,\n}: ShardID): Record<string, Record<string, SchemaValue>> {\n return {\n [`${appID}_${shardNum}.clients`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n lastMutationID: {type: 'number'},\n userID: {type: 'string'},\n },\n [`${appID}_${shardNum}.mutations`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n mutationID: {type: 'number'},\n mutation: {type: 'json'},\n },\n [`${appID}.permissions`]: {\n permissions: {type: 'json'},\n hash: {type: 'string'},\n },\n };\n}\n\nfunction validateInitiallySyncedData(\n lc: LogContext,\n db: Database,\n shard: ShardID,\n) {\n const tables = computeZqlSpecs(lc, db, {includeBackfillingColumns: true});\n const required = getRequiredTables(shard);\n for (const [name, columns] of Object.entries(required)) {\n const table = tables.get(name)?.zqlSpec;\n if (!table) {\n throw new Error(\n `Upstream is missing the \"${name}\" table. (Found ${[\n ...tables.keys(),\n ]})` +\n `Please ensure that each table has a unique index over one ` +\n `or more non-null columns.`,\n );\n }\n for (const [col, {type}] of Object.entries(columns)) {\n const found = table[col];\n if (!found) {\n throw new Error(\n `Upstream \"${table}\" table is missing the \"${col}\" column`,\n );\n }\n if (found.type !== type) {\n throw new Error(\n `Upstream \"${table}.${col}\" column is a ${found.type} type but must be a ${type} type.`,\n );\n }\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAyCA,eAAsB,6BACpB,IACA,aACA,OACA,eACA,SAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,QAAQ,CAC/D;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,QAAQ,CAAC;AAC5E,SAAQ,OAAO;AAEf,KAAI,MAAM,aAAa,QAAQ;EAE7B,MAAM,YAAY,MAAM,aAAa,UAAU;EAC/C,MAAM,aAAa,kBAAkB,aAAa,MAAM;AACxD,MAAI,CAAC,UAAU,WAAW,WAAW,CACnC,OAAM,IAAI,MACR,gDAAgD,UAAU,uCAAuC,WAAW,GAC7G;;AAWL,QAAO;EAAC;EAAmB,cAPN,IAAI,mBACvB,IACA,aACA,OACA,kBACD;EAEuC;;AAG1C,IAAM,qBAAN,MAAiD;CAC/C;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,mBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,oBAA0B;;CAG5B,cAA4B;AAC1B,SAAO,MAAA,aAAmB;;CAG5B,mBAAmB;AACjB,SAAO;;CAGT,OAAsB;AACpB,SAAO,QAAQ,SAAS;;CAG1B,YACE,iBACA,mBAAsC,EAAE,EACjB;AACvB,MAAI,kBAAkB,OACpB,OAAM,IAAI,MACR,0DACD;AAEH,SAAO,QAAQ,QAAQ,MAAA,YAAkB,gBAAgB,CAAC;;CAG5D,aAAa,iBAAwC;EACnD,MAAM,EAAC,cAAc,mBAAkB,MAAA;EACvC,MAAM,EAAC,OAAO,aAAY,MAAA;EAC1B,MAAM,MAAM,IAAI,IAAI,MAAA,YAAkB;AACtC,MAAI,aAAa,IAAI,SAAS,MAAM;AACpC,MAAI,aAAa,IAAI,YAAY,OAAO,SAAS,CAAC;AAClD,OAAK,MAAM,OAAO,aAChB,KAAI,aAAa,OAAO,gBAAgB,IAAI;AAE9C,MAAI,iBAAiB;AACnB,UACE,eAAe,QACf,yDACD;AACD,OAAI,aAAa,IAAI,iBAAiB,gBAAgB;AACtD,OAAI,aAAa,IAAI,kBAAkB,eAAe;;EAGxD,MAAM,KAAK,IAAI,UAAU,IAAI;EAC7B,MAAM,EAAC,UAAU,cAAa,OAC5B,MAAA,IACA,IACA,2BAGA,EAAC,WAAW,SAA+B,MAAK,CACjD;AACD,SAAO;GAAC,SAAS;GAAU,MAAM;GAAU;;;;;;;;;;;;;;AAe/C,eAAsB,YACpB,IACA,OACA,IACA,aACA,SACA;CACA,MAAM,EAAC,OAAO,IAAI,iBAAgB;CAKlC,MAAM,EAAC,YAJc,IAAI,mBAAmB,IAAI,aAAa,OAAO;EAClE,gBAAgB;EAChB;EACD,CAAC,CAC6B,aAAa;AAE5C,8BAA6B,GAAG;CAChC,MAAM,YAAY,IAAI,gBACpB,IAAI,gBAAgB,GAAG,EACvB,iBACC,GAAG,QAAQ;AACV,QAAM;GAET;CAED,MAAM,kBAAkB,2BAA2B,sBAAsB,GAAG;AAC5E,KAAI;EACF,IAAI,MAAM;AACV,aAAW,MAAM,UAAU,SAAS;GAClC,MAAM,CAAC,OAAO;AACd,WAAQ,KAAR;IACE,KAAK,SAAS;KACZ,MAAM,EAAC,oBAAmB,OAAO;AACjC,QAAG,OACD,yBAAyB,GAAG,qBAAqB,kBAClD;AACD,qBAAgB,QACd,IACA,gBACA,sCAAsC,mBACtC,IACD;AACD,0BACE,IACA,aAAa,UAAU,EACvB,iBACA,SACA,MACD;AACD,eAAU,eAAe,IAAI,OAAO;AACpC;;IAEF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,SAAI,EAAE,MAAM,QAAS,EACnB,IAAG,QAAQ,aAAa,IAAI,UAAU;AAExC;IACF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,iCAA4B,IAAI,IAAI,MAAM;AAC1C,QAAG,OAAO,4BAA4B,IAAI,UAAU;AACpD;IAEF,KAAK,SACH;IAEF,KAAK,WAAW;KACd,MAAM,EAAC,KAAK,YAAW,OAAO;AAC9B,SAAI,QAAQ,iBACV,OAAM,IAAI,gBACR,WAAW,uCACZ;;IAIL,KAAK,WACH,OAAM,IAAI,MACR,2CAA2C,UAAU,OAAO,GAC7D;IACH,QACE,aAAY,OAAO;;;AAGzB,QAAM,IAAI,MACR,iBAAiB,YAAY,uCAC9B;UACM,GAAG;AACV,QAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,EAAE;WACzD;AACR,kBAAgB,MAAM;;;AAM1B,SAAS,kBAAkB,EACzB,OACA,YACuD;AACvD,QAAO;GACJ,GAAG,MAAM,GAAG,SAAS,YAAY;GAChC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,gBAAgB,EAAC,MAAM,UAAS;GAChC,QAAQ,EAAC,MAAM,UAAS;GACzB;GACA,GAAG,MAAM,GAAG,SAAS,cAAc;GAClC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,YAAY,EAAC,MAAM,UAAS;GAC5B,UAAU,EAAC,MAAM,QAAO;GACzB;GACA,GAAG,MAAM,gBAAgB;GACxB,aAAa,EAAC,MAAM,QAAO;GAC3B,MAAM,EAAC,MAAM,UAAS;GACvB;EACF;;AAGH,SAAS,4BACP,IACA,IACA,OACA;CACA,MAAM,SAAS,gBAAgB,IAAI,IAAI,EAAC,2BAA2B,MAAK,CAAC;CACzE,MAAM,WAAW,kBAAkB,MAAM;AACzC,MAAK,MAAM,CAAC,MAAM,YAAY,OAAO,QAAQ,SAAS,EAAE;EACtD,MAAM,QAAQ,OAAO,IAAI,KAAK,EAAE;AAChC,MAAI,CAAC,MACH,OAAM,IAAI,MACR,4BAA4B,KAAK,kBAAkB,CACjD,GAAG,OAAO,MAAM,CACjB,CAAC,sFAGH;AAEH,OAAK,MAAM,CAAC,KAAK,EAAC,WAAU,OAAO,QAAQ,QAAQ,EAAE;GACnD,MAAM,QAAQ,MAAM;AACpB,OAAI,CAAC,MACH,OAAM,IAAI,MACR,aAAa,MAAM,0BAA0B,IAAI,UAClD;AAEH,OAAI,MAAM,SAAS,KACjB,OAAM,IAAI,MACR,aAAa,MAAM,GAAG,IAAI,gBAAgB,MAAM,KAAK,sBAAsB,KAAK,QACjF"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAGhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,SAAI,GACtB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAuC7E;AA4cD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAED,QAAA,MAAM,eAAe;;;;aAInB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AAqxBxD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
1
+ {"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAGhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,SAAI,GACtB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAuC7E;AA4cD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAED,QAAA,MAAM,eAAe;;;;aAInB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AAqzBxD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
@@ -484,7 +484,7 @@ var ChangeMaker = class {
484
484
  return [];
485
485
  }
486
486
  try {
487
- return await this.#makeChanges(lc, msg);
487
+ return await this.#makeChanges(lc, lsn, msg);
488
488
  } catch (err) {
489
489
  this.#error = {
490
490
  lsn,
@@ -510,14 +510,22 @@ var ChangeMaker = class {
510
510
  const { lsn, msg, err, lastLogTime } = error;
511
511
  const now = Date.now();
512
512
  if (now - lastLogTime > 6e4) {
513
- lc.error?.(`Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(err)}`, err instanceof UnsupportedSchemaChangeError ? err.event.context : {
514
- ...msg,
515
- content: void 0
513
+ lc.error?.(`Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(err)}`, {
514
+ change: {
515
+ ...msg,
516
+ content: void 0
517
+ },
518
+ ...err instanceof UnsupportedSchemaChangeError && { context: err.event.context },
519
+ ...err instanceof Error && {
520
+ errorMsg: err.message,
521
+ name: err.name,
522
+ stack: err.stack
523
+ }
516
524
  });
517
525
  error.lastLogTime = now;
518
526
  }
519
527
  }
520
- async #makeChanges(lc, msg) {
528
+ async #makeChanges(lc, lsn, msg) {
521
529
  switch (msg.tag) {
522
530
  case "begin": return [[
523
531
  "begin",
@@ -554,7 +562,7 @@ var ChangeMaker = class {
554
562
  }
555
563
  switch (msg.prefix.substring(this.#shardPrefix.length)) {
556
564
  case "":
557
- case "/ddl": return this.#handleDdlMessage(lc, msg);
565
+ case "/ddl": return this.#handleDdlMessage(lc, lsn, msg);
558
566
  default:
559
567
  lc.debug?.("ignoring unknown message type", msg.prefix);
560
568
  return [];
@@ -571,33 +579,29 @@ var ChangeMaker = class {
571
579
  }
572
580
  }
573
581
  #lastReplicationEvent;
574
- #handleDdlMessage(lc, msg) {
582
+ #handleDdlMessage(lc, lsn, msg) {
575
583
  const event = parseLogicalMessageContent(msg, replicationEventSchema);
576
- lc = lc.withContext("tag", event.event.tag).withContext("query", event.context.query);
584
+ lc = lc.withContext("lsn", fromBigInt(lsn)).withContext("tag", event.event.tag).withContext("query", event.context.query);
577
585
  clearTimeout(this.#replicaIdentityTimer);
578
- let prevEvent = this.#lastReplicationEvent;
579
586
  const { type } = event;
580
587
  switch (type) {
581
588
  case "ddlStart":
582
- case "schemaSnapshot": break;
583
- case "ddlUpdate":
584
- if (!prevEvent) {
585
- lc.error?.(`ddlUpdate received without a ddlStart`, { event });
586
- assert(prevEvent, `ddlUpdate received without a ddlStart`);
587
- }
588
- break;
589
+ case "schemaSnapshot":
590
+ case "ddlUpdate": break;
589
591
  default:
590
592
  lc.info?.(`ignoring unknown ddl message type: ${type}`);
591
593
  return [];
592
594
  }
595
+ const prevEvent = this.#lastReplicationEvent;
593
596
  this.#lastReplicationEvent = event;
594
- if (!prevEvent) {
597
+ const prevSchema = event.previousSchema === void 0 ? prevEvent?.schema : event.previousSchema;
598
+ if (!prevSchema) {
595
599
  lc.info?.(`received ${msg.prefix}/${type} event`, { event });
596
600
  return [];
597
601
  }
598
- lc.info?.(`processing ${msg.prefix}/${type} event`, { event });
599
- const effectiveTag = prevEvent.type === "ddlStart" ? prevEvent.event.tag : event.event.tag;
600
- const changes = this.#makeSchemaChanges(lc, prevEvent.schema, event, effectiveTag).map((change) => ["data", change]);
602
+ const effectiveTag = prevEvent?.type === "ddlStart" ? prevEvent.event.tag : event.type === "ddlUpdate" ? event.event.tag : "UNKNOWN";
603
+ lc.info?.(`processing ${effectiveTag} command from ${msg.prefix}/${type}`, { event });
604
+ const changes = this.#makeSchemaChanges(lc, prevSchema, event, effectiveTag).map((change) => ["data", change]);
601
605
  lc.info?.(`${changes.length} schema change(s)`, { changes });
602
606
  const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(event.schema);
603
607
  if (replicaIdentities) this.#replicaIdentityTimer = setTimeout(async () => {