@rocicorp/zero 1.3.0 → 1.4.0-canary.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (332) hide show
  1. package/out/analyze-query/src/analyze-cli.d.ts +24 -0
  2. package/out/analyze-query/src/analyze-cli.d.ts.map +1 -0
  3. package/out/analyze-query/src/analyze-cli.js +289 -0
  4. package/out/analyze-query/src/analyze-cli.js.map +1 -0
  5. package/out/analyze-query/src/bin-analyze.js +6 -6
  6. package/out/analyze-query/src/bin-transform.js +2 -2
  7. package/out/ast-to-zql/src/bin.js +1 -1
  8. package/out/shared/src/logging.d.ts.map +1 -1
  9. package/out/shared/src/logging.js +1 -1
  10. package/out/shared/src/logging.js.map +1 -1
  11. package/out/shared/src/options.d.ts.map +1 -1
  12. package/out/shared/src/options.js +1 -1
  13. package/out/shared/src/options.js.map +1 -1
  14. package/out/z2s/src/compiler.d.ts.map +1 -1
  15. package/out/z2s/src/compiler.js +4 -1
  16. package/out/z2s/src/compiler.js.map +1 -1
  17. package/out/z2s/src/sql.d.ts.map +1 -1
  18. package/out/z2s/src/sql.js +1 -0
  19. package/out/z2s/src/sql.js.map +1 -1
  20. package/out/zero/package.js +95 -89
  21. package/out/zero/package.js.map +1 -1
  22. package/out/zero/src/analyze.d.ts +2 -0
  23. package/out/zero/src/analyze.d.ts.map +1 -0
  24. package/out/zero/src/analyze.js +2 -0
  25. package/out/zero/src/bindings.js +1 -1
  26. package/out/zero/src/zero-cache-dev.js +1 -1
  27. package/out/zero/src/zero-cache-dev.js.map +1 -1
  28. package/out/zero/src/zero-out.js +1 -1
  29. package/out/zero-cache/src/auth/auth.d.ts.map +1 -1
  30. package/out/zero-cache/src/auth/auth.js.map +1 -1
  31. package/out/zero-cache/src/auth/load-permissions.js +2 -2
  32. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  33. package/out/zero-cache/src/auth/write-authorizer.js +5 -14
  34. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  35. package/out/zero-cache/src/config/network.d.ts +1 -1
  36. package/out/zero-cache/src/config/network.d.ts.map +1 -1
  37. package/out/zero-cache/src/config/network.js +1 -1
  38. package/out/zero-cache/src/config/network.js.map +1 -1
  39. package/out/zero-cache/src/config/normalize.d.ts.map +1 -1
  40. package/out/zero-cache/src/config/normalize.js.map +1 -1
  41. package/out/zero-cache/src/config/zero-config.d.ts +5 -0
  42. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  43. package/out/zero-cache/src/config/zero-config.js +16 -3
  44. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  45. package/out/zero-cache/src/db/lite-tables.d.ts.map +1 -1
  46. package/out/zero-cache/src/db/lite-tables.js +3 -3
  47. package/out/zero-cache/src/db/lite-tables.js.map +1 -1
  48. package/out/zero-cache/src/db/transaction-pool.d.ts +43 -40
  49. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  50. package/out/zero-cache/src/db/transaction-pool.js +76 -56
  51. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  52. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  53. package/out/zero-cache/src/observability/events.js +1 -1
  54. package/out/zero-cache/src/observability/events.js.map +1 -1
  55. package/out/zero-cache/src/scripts/decommission.js +1 -1
  56. package/out/zero-cache/src/scripts/deploy-permissions.js +2 -2
  57. package/out/zero-cache/src/scripts/permissions.js +1 -1
  58. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  59. package/out/zero-cache/src/server/anonymous-otel-start.js +4 -4
  60. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  61. package/out/zero-cache/src/server/change-streamer.d.ts +1 -1
  62. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  63. package/out/zero-cache/src/server/change-streamer.js +27 -12
  64. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  65. package/out/zero-cache/src/server/logging.d.ts +1 -3
  66. package/out/zero-cache/src/server/logging.d.ts.map +1 -1
  67. package/out/zero-cache/src/server/logging.js +6 -3
  68. package/out/zero-cache/src/server/logging.js.map +1 -1
  69. package/out/zero-cache/src/server/main.d.ts.map +1 -1
  70. package/out/zero-cache/src/server/main.js +26 -26
  71. package/out/zero-cache/src/server/main.js.map +1 -1
  72. package/out/zero-cache/src/server/mutator.js +4 -2
  73. package/out/zero-cache/src/server/mutator.js.map +1 -1
  74. package/out/zero-cache/src/server/otel-log-sink.d.ts.map +1 -1
  75. package/out/zero-cache/src/server/otel-log-sink.js +0 -2
  76. package/out/zero-cache/src/server/otel-log-sink.js.map +1 -1
  77. package/out/zero-cache/src/server/otel-start.d.ts +1 -1
  78. package/out/zero-cache/src/server/otel-start.d.ts.map +1 -1
  79. package/out/zero-cache/src/server/otel-start.js +7 -3
  80. package/out/zero-cache/src/server/otel-start.js.map +1 -1
  81. package/out/zero-cache/src/server/reaper.js +6 -6
  82. package/out/zero-cache/src/server/reaper.js.map +1 -1
  83. package/out/zero-cache/src/server/replicator.d.ts.map +1 -1
  84. package/out/zero-cache/src/server/replicator.js +5 -3
  85. package/out/zero-cache/src/server/replicator.js.map +1 -1
  86. package/out/zero-cache/src/server/runner/run-worker.js +2 -2
  87. package/out/zero-cache/src/server/runner/run-worker.js.map +1 -1
  88. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  89. package/out/zero-cache/src/server/syncer.js +13 -12
  90. package/out/zero-cache/src/server/syncer.js.map +1 -1
  91. package/out/zero-cache/src/server/worker-dispatcher.js +1 -1
  92. package/out/zero-cache/src/services/analyze.js +1 -1
  93. package/out/zero-cache/src/services/change-source/common/backfill-manager.js +1 -1
  94. package/out/zero-cache/src/services/change-source/common/replica-schema.js +1 -1
  95. package/out/zero-cache/src/services/change-source/custom/change-source.js +2 -2
  96. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +4 -1
  97. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  98. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  99. package/out/zero-cache/src/services/change-source/pg/change-source.js +19 -23
  100. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  101. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +58 -3
  102. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  103. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +209 -52
  104. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  105. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +2 -2
  106. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +24 -15
  107. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  108. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +35 -58
  109. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  110. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  111. package/out/zero-cache/src/services/change-source/pg/schema/init.js +2 -2
  112. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  113. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +1 -2
  114. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
  115. package/out/zero-cache/src/services/change-source/pg/schema/published.js +15 -18
  116. package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
  117. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -1
  118. package/out/zero-cache/src/services/change-source/protocol/current/data.js +1 -1
  119. package/out/zero-cache/src/services/change-streamer/backup-monitor.js +1 -1
  120. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts +1 -1
  121. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
  122. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +1 -1
  123. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
  124. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +5 -1
  125. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  126. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +10 -7
  127. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  128. package/out/zero-cache/src/services/change-streamer/replica-monitor.js +2 -2
  129. package/out/zero-cache/src/services/change-streamer/storer.d.ts +19 -2
  130. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  131. package/out/zero-cache/src/services/change-streamer/storer.js +70 -6
  132. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  133. package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
  134. package/out/zero-cache/src/services/heapz.js +1 -1
  135. package/out/zero-cache/src/services/heapz.js.map +1 -1
  136. package/out/zero-cache/src/services/life-cycle.d.ts +2 -1
  137. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  138. package/out/zero-cache/src/services/life-cycle.js +10 -7
  139. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  140. package/out/zero-cache/src/services/litestream/commands.d.ts +15 -4
  141. package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
  142. package/out/zero-cache/src/services/litestream/commands.js +40 -34
  143. package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
  144. package/out/zero-cache/src/services/mutagen/mutagen.js +3 -3
  145. package/out/zero-cache/src/services/mutagen/pusher.d.ts +28 -28
  146. package/out/zero-cache/src/services/replicator/change-processor.js +2 -2
  147. package/out/zero-cache/src/services/replicator/incremental-sync.js +1 -1
  148. package/out/zero-cache/src/services/replicator/schema/replication-state.js +1 -1
  149. package/out/zero-cache/src/services/replicator/write-worker-client.js.map +1 -1
  150. package/out/zero-cache/src/services/replicator/write-worker.js +3 -3
  151. package/out/zero-cache/src/services/replicator/write-worker.js.map +1 -1
  152. package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
  153. package/out/zero-cache/src/services/run-ast.js +3 -3
  154. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  155. package/out/zero-cache/src/services/statz.d.ts.map +1 -1
  156. package/out/zero-cache/src/services/statz.js +3 -3
  157. package/out/zero-cache/src/services/statz.js.map +1 -1
  158. package/out/zero-cache/src/services/view-syncer/active-users-gauge.js +1 -1
  159. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts +2 -2
  160. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts.map +1 -1
  161. package/out/zero-cache/src/services/view-syncer/connection-context-manager.js.map +1 -1
  162. package/out/zero-cache/src/services/view-syncer/cvr-purger.js +1 -1
  163. package/out/zero-cache/src/services/view-syncer/cvr-store.js +3 -3
  164. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  165. package/out/zero-cache/src/services/view-syncer/cvr.js +1 -1
  166. package/out/zero-cache/src/services/view-syncer/inspect-handler.js +2 -2
  167. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +6 -16
  168. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  169. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +31 -39
  170. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  171. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
  172. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +4 -4
  173. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  174. package/out/zero-cache/src/services/view-syncer/snapshotter.js +2 -2
  175. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  176. package/out/zero-cache/src/services/view-syncer/view-syncer.js +6 -6
  177. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  178. package/out/zero-cache/src/types/profiler.d.ts.map +1 -1
  179. package/out/zero-cache/src/types/profiler.js.map +1 -1
  180. package/out/zero-cache/src/types/row-key.d.ts.map +1 -1
  181. package/out/zero-cache/src/types/row-key.js.map +1 -1
  182. package/out/zero-cache/src/types/streams.d.ts +1 -1
  183. package/out/zero-cache/src/types/streams.d.ts.map +1 -1
  184. package/out/zero-cache/src/types/streams.js.map +1 -1
  185. package/out/zero-cache/src/types/websocket-handoff.d.ts +1 -1
  186. package/out/zero-cache/src/types/websocket-handoff.d.ts.map +1 -1
  187. package/out/zero-cache/src/types/websocket-handoff.js +1 -1
  188. package/out/zero-cache/src/types/websocket-handoff.js.map +1 -1
  189. package/out/zero-cache/src/workers/connection.d.ts +1 -1
  190. package/out/zero-cache/src/workers/connection.d.ts.map +1 -1
  191. package/out/zero-cache/src/workers/connection.js.map +1 -1
  192. package/out/zero-cache/src/workers/mutator.js.map +1 -1
  193. package/out/zero-cache/src/workers/syncer.d.ts +1 -1
  194. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  195. package/out/zero-cache/src/workers/syncer.js +3 -3
  196. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  197. package/out/zero-client/src/client/bindings.js +1 -1
  198. package/out/zero-client/src/client/crud-impl.d.ts.map +1 -1
  199. package/out/zero-client/src/client/crud-impl.js +4 -13
  200. package/out/zero-client/src/client/crud-impl.js.map +1 -1
  201. package/out/zero-client/src/client/inspector/inspector.d.ts +24 -0
  202. package/out/zero-client/src/client/inspector/inspector.d.ts.map +1 -1
  203. package/out/zero-client/src/client/inspector/inspector.js +28 -0
  204. package/out/zero-client/src/client/inspector/inspector.js.map +1 -1
  205. package/out/zero-client/src/client/inspector/lazy-inspector.d.ts +9 -0
  206. package/out/zero-client/src/client/inspector/lazy-inspector.d.ts.map +1 -1
  207. package/out/zero-client/src/client/inspector/lazy-inspector.js +28 -1
  208. package/out/zero-client/src/client/inspector/lazy-inspector.js.map +1 -1
  209. package/out/zero-client/src/client/ivm-branch.d.ts.map +1 -1
  210. package/out/zero-client/src/client/ivm-branch.js +4 -13
  211. package/out/zero-client/src/client/ivm-branch.js.map +1 -1
  212. package/out/zero-client/src/client/log-options.d.ts +1 -0
  213. package/out/zero-client/src/client/log-options.d.ts.map +1 -1
  214. package/out/zero-client/src/client/log-options.js +3 -2
  215. package/out/zero-client/src/client/log-options.js.map +1 -1
  216. package/out/zero-client/src/client/options.d.ts +13 -1
  217. package/out/zero-client/src/client/options.d.ts.map +1 -1
  218. package/out/zero-client/src/client/options.js.map +1 -1
  219. package/out/zero-client/src/client/version.js +1 -1
  220. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  221. package/out/zero-client/src/client/zero.js +2 -1
  222. package/out/zero-client/src/client/zero.js.map +1 -1
  223. package/out/zero-protocol/src/error.d.ts.map +1 -1
  224. package/out/zero-protocol/src/error.js +1 -1
  225. package/out/zero-protocol/src/error.js.map +1 -1
  226. package/out/zero-react/src/bindings.js +1 -1
  227. package/out/zero-solid/src/bindings.js +1 -1
  228. package/out/zero-solid/src/solid-view.d.ts.map +1 -1
  229. package/out/zero-solid/src/solid-view.js +14 -14
  230. package/out/zero-solid/src/solid-view.js.map +1 -1
  231. package/out/zql/src/builder/builder.d.ts.map +1 -1
  232. package/out/zql/src/builder/builder.js.map +1 -1
  233. package/out/zql/src/ivm/array-view.d.ts.map +1 -1
  234. package/out/zql/src/ivm/array-view.js +27 -2
  235. package/out/zql/src/ivm/array-view.js.map +1 -1
  236. package/out/zql/src/ivm/change-index-enum.d.ts +9 -0
  237. package/out/zql/src/ivm/change-index-enum.d.ts.map +1 -0
  238. package/out/zql/src/ivm/change-index.d.ts +5 -0
  239. package/out/zql/src/ivm/change-index.d.ts.map +1 -0
  240. package/out/zql/src/ivm/change-type-enum.d.ts +9 -0
  241. package/out/zql/src/ivm/change-type-enum.d.ts.map +1 -0
  242. package/out/zql/src/ivm/change-type.d.ts +5 -0
  243. package/out/zql/src/ivm/change-type.d.ts.map +1 -0
  244. package/out/zql/src/ivm/change.d.ts +20 -22
  245. package/out/zql/src/ivm/change.d.ts.map +1 -1
  246. package/out/zql/src/ivm/change.js +33 -0
  247. package/out/zql/src/ivm/change.js.map +1 -0
  248. package/out/zql/src/ivm/exists.d.ts.map +1 -1
  249. package/out/zql/src/ivm/exists.js +27 -38
  250. package/out/zql/src/ivm/exists.js.map +1 -1
  251. package/out/zql/src/ivm/fan-in.d.ts +3 -2
  252. package/out/zql/src/ivm/fan-in.d.ts.map +1 -1
  253. package/out/zql/src/ivm/fan-in.js.map +1 -1
  254. package/out/zql/src/ivm/fan-out.d.ts +1 -1
  255. package/out/zql/src/ivm/fan-out.d.ts.map +1 -1
  256. package/out/zql/src/ivm/fan-out.js +1 -1
  257. package/out/zql/src/ivm/fan-out.js.map +1 -1
  258. package/out/zql/src/ivm/filter-operators.d.ts +3 -3
  259. package/out/zql/src/ivm/filter-operators.d.ts.map +1 -1
  260. package/out/zql/src/ivm/filter-operators.js.map +1 -1
  261. package/out/zql/src/ivm/filter-push.d.ts.map +1 -1
  262. package/out/zql/src/ivm/filter-push.js +7 -7
  263. package/out/zql/src/ivm/filter-push.js.map +1 -1
  264. package/out/zql/src/ivm/filter.d.ts +1 -1
  265. package/out/zql/src/ivm/filter.d.ts.map +1 -1
  266. package/out/zql/src/ivm/filter.js.map +1 -1
  267. package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
  268. package/out/zql/src/ivm/flipped-join.js +49 -58
  269. package/out/zql/src/ivm/flipped-join.js.map +1 -1
  270. package/out/zql/src/ivm/join-utils.d.ts +2 -6
  271. package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
  272. package/out/zql/src/ivm/join-utils.js +25 -25
  273. package/out/zql/src/ivm/join-utils.js.map +1 -1
  274. package/out/zql/src/ivm/join.d.ts.map +1 -1
  275. package/out/zql/src/ivm/join.js +32 -51
  276. package/out/zql/src/ivm/join.js.map +1 -1
  277. package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts +1 -1
  278. package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts.map +1 -1
  279. package/out/zql/src/ivm/maybe-split-and-push-edit-change.js +5 -10
  280. package/out/zql/src/ivm/maybe-split-and-push-edit-change.js.map +1 -1
  281. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  282. package/out/zql/src/ivm/memory-source.js +52 -60
  283. package/out/zql/src/ivm/memory-source.js.map +1 -1
  284. package/out/zql/src/ivm/operator.d.ts +1 -1
  285. package/out/zql/src/ivm/operator.d.ts.map +1 -1
  286. package/out/zql/src/ivm/operator.js +2 -4
  287. package/out/zql/src/ivm/operator.js.map +1 -1
  288. package/out/zql/src/ivm/push-accumulated.d.ts +3 -2
  289. package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
  290. package/out/zql/src/ivm/push-accumulated.js +98 -122
  291. package/out/zql/src/ivm/push-accumulated.js.map +1 -1
  292. package/out/zql/src/ivm/skip-yields.d.ts +4 -0
  293. package/out/zql/src/ivm/skip-yields.d.ts.map +1 -0
  294. package/out/zql/src/ivm/skip-yields.js +33 -0
  295. package/out/zql/src/ivm/skip-yields.js.map +1 -0
  296. package/out/zql/src/ivm/skip.d.ts +1 -1
  297. package/out/zql/src/ivm/skip.d.ts.map +1 -1
  298. package/out/zql/src/ivm/skip.js +2 -2
  299. package/out/zql/src/ivm/skip.js.map +1 -1
  300. package/out/zql/src/ivm/source-change-index-enum.d.ts +7 -0
  301. package/out/zql/src/ivm/source-change-index-enum.d.ts.map +1 -0
  302. package/out/zql/src/ivm/source-change-index.d.ts +5 -0
  303. package/out/zql/src/ivm/source-change-index.d.ts.map +1 -0
  304. package/out/zql/src/ivm/source.d.ts +11 -13
  305. package/out/zql/src/ivm/source.d.ts.map +1 -1
  306. package/out/zql/src/ivm/source.js +26 -0
  307. package/out/zql/src/ivm/source.js.map +1 -0
  308. package/out/zql/src/ivm/take.d.ts.map +1 -1
  309. package/out/zql/src/ivm/take.js +27 -50
  310. package/out/zql/src/ivm/take.js.map +1 -1
  311. package/out/zql/src/ivm/union-fan-in.d.ts +2 -1
  312. package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
  313. package/out/zql/src/ivm/union-fan-in.js +3 -3
  314. package/out/zql/src/ivm/union-fan-in.js.map +1 -1
  315. package/out/zql/src/ivm/union-fan-out.d.ts.map +1 -1
  316. package/out/zql/src/ivm/union-fan-out.js +1 -1
  317. package/out/zql/src/ivm/union-fan-out.js.map +1 -1
  318. package/out/zql/src/ivm/view-apply-change.js +1 -1
  319. package/out/zql/src/planner/planner-debug.d.ts +2 -2
  320. package/out/zql/src/planner/planner-debug.d.ts.map +1 -1
  321. package/out/zql/src/planner/planner-debug.js.map +1 -1
  322. package/out/zql/src/planner/planner-graph.d.ts +1 -1
  323. package/out/zql/src/planner/planner-graph.d.ts.map +1 -1
  324. package/out/zql/src/planner/planner-graph.js.map +1 -1
  325. package/out/zqlite/src/internal/sql-inline.d.ts.map +1 -1
  326. package/out/zqlite/src/internal/sql-inline.js.map +1 -1
  327. package/out/zqlite/src/query-builder.d.ts.map +1 -1
  328. package/out/zqlite/src/query-builder.js.map +1 -1
  329. package/out/zqlite/src/table-source.d.ts.map +1 -1
  330. package/out/zqlite/src/table-source.js +11 -11
  331. package/out/zqlite/src/table-source.js.map +1 -1
  332. package/package.json +99 -93
@@ -1 +1 @@
1
- {"version":3,"file":"otel-start.js","names":["#instance","#started"],"sources":["../../../../../zero-cache/src/server/otel-start.ts"],"sourcesContent":["import {logs} from '@opentelemetry/api-logs';\nimport {getNodeAutoInstrumentations} from '@opentelemetry/auto-instrumentations-node';\nimport {resourceFromAttributes} from '@opentelemetry/resources';\nimport {NodeSDK} from '@opentelemetry/sdk-node';\nimport {ATTR_SERVICE_VERSION} from '@opentelemetry/semantic-conventions';\nimport type {LogContext} from '@rocicorp/logger';\nimport {\n otelEnabled,\n otelLogsEnabled,\n otelMetricsEnabled,\n otelTracesEnabled,\n} from '../../../otel/src/enabled.ts';\nimport {setupOtelDiagnosticLogger} from './otel-diag-logger.ts';\n\nclass OtelManager {\n static #instance: OtelManager;\n #started = false;\n\n private constructor() {}\n\n static getInstance(): OtelManager {\n if (!OtelManager.#instance) {\n OtelManager.#instance = new OtelManager();\n }\n return OtelManager.#instance;\n }\n\n startOtelAuto(lc?: LogContext) {\n if (this.#started || !otelEnabled()) {\n return;\n }\n this.#started = true;\n\n // Store and temporarily remove OTEL_LOG_LEVEL to prevent NodeSDK from setting its own logger\n const otelLogLevel = process.env.OTEL_LOG_LEVEL;\n delete process.env.OTEL_LOG_LEVEL;\n\n // Use exponential histograms by default to reduce cardinality from auto-instrumentation\n // This affects HTTP server/client and other auto-instrumented histogram metrics\n // Exponential histograms automatically adjust bucket boundaries and use fewer buckets\n process.env.OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION ??=\n 'base2_exponential_bucket_histogram';\n\n const resource = resourceFromAttributes({\n [ATTR_SERVICE_VERSION]: process.env.ZERO_SERVER_VERSION ?? 'unknown',\n });\n\n // Set defaults to be backwards compatible with the previously\n // hard-coded exporters\n process.env.OTEL_EXPORTER_OTLP_PROTOCOL ??= 'http/json';\n process.env.OTEL_METRICS_EXPORTER ??= otelMetricsEnabled()\n ? 'otlp'\n : 'none';\n process.env.OTEL_TRACES_EXPORTER ??= otelTracesEnabled() ? 'otlp' : 'none';\n process.env.OTEL_LOGS_EXPORTER ??= otelLogsEnabled() ? 'otlp' : 'none';\n\n const sdk = new NodeSDK({\n resource,\n autoDetectResources: true,\n instrumentations:\n process.env.OTEL_NODE_ENABLED_INSTRUMENTATIONS ||\n process.env.OTEL_NODE_DISABLED_INSTRUMENTATIONS\n ? [getNodeAutoInstrumentations()]\n : [],\n });\n\n try {\n sdk.start();\n } finally {\n if (otelLogLevel) {\n process.env.OTEL_LOG_LEVEL = otelLogLevel;\n }\n }\n setupOtelDiagnosticLogger(lc, true);\n\n logs.getLogger('zero-cache').emit({\n severityText: 'INFO',\n body: 'OpenTelemetry SDK started successfully',\n });\n }\n}\n\nexport const startOtelAuto = (lc?: LogContext) =>\n OtelManager.getInstance().startOtelAuto(lc);\n"],"mappings":";;;;;;;;AAcA,IAAM,cAAN,MAAM,YAAY;CAChB,QAAA;CACA,WAAW;CAEX,cAAsB;CAEtB,OAAO,cAA2B;AAChC,MAAI,CAAC,aAAA,SACH,cAAA,WAAwB,IAAI,aAAa;AAE3C,SAAO,aAAA;;CAGT,cAAc,IAAiB;AAC7B,MAAI,MAAA,WAAiB,CAAC,aAAa,CACjC;AAEF,QAAA,UAAgB;EAGhB,MAAM,eAAe,QAAQ,IAAI;AACjC,SAAO,QAAQ,IAAI;AAKnB,UAAQ,IAAI,6DACV;EAEF,MAAM,WAAW,uBAAuB,GACrC,uBAAuB,QAAQ,IAAI,uBAAuB,WAC5D,CAAC;AAIF,UAAQ,IAAI,gCAAgC;AAC5C,UAAQ,IAAI,0BAA0B,oBAAoB,GACtD,SACA;AACJ,UAAQ,IAAI,yBAAyB,mBAAmB,GAAG,SAAS;AACpE,UAAQ,IAAI,uBAAuB,iBAAiB,GAAG,SAAS;EAEhE,MAAM,MAAM,IAAI,QAAQ;GACtB;GACA,qBAAqB;GACrB,kBACE,QAAQ,IAAI,sCACZ,QAAQ,IAAI,sCACR,CAAC,6BAA6B,CAAC,GAC/B,EAAE;GACT,CAAC;AAEF,MAAI;AACF,OAAI,OAAO;YACH;AACR,OAAI,aACF,SAAQ,IAAI,iBAAiB;;AAGjC,4BAA0B,IAAI,KAAK;AAEnC,OAAK,UAAU,aAAa,CAAC,KAAK;GAChC,cAAc;GACd,MAAM;GACP,CAAC;;;AAIN,IAAa,iBAAiB,OAC5B,YAAY,aAAa,CAAC,cAAc,GAAG"}
1
+ {"version":3,"file":"otel-start.js","names":["#instance","#started"],"sources":["../../../../../zero-cache/src/server/otel-start.ts"],"sourcesContent":["import {logs} from '@opentelemetry/api-logs';\nimport {getNodeAutoInstrumentations} from '@opentelemetry/auto-instrumentations-node';\nimport {resourceFromAttributes} from '@opentelemetry/resources';\nimport {NodeSDK} from '@opentelemetry/sdk-node';\nimport {ATTR_SERVICE_VERSION} from '@opentelemetry/semantic-conventions';\nimport type {LogContext} from '@rocicorp/logger';\nimport {\n otelEnabled,\n otelLogsEnabled,\n otelMetricsEnabled,\n otelTracesEnabled,\n} from '../../../otel/src/enabled.ts';\nimport {setupOtelDiagnosticLogger} from './otel-diag-logger.ts';\n\nclass OtelManager {\n static #instance: OtelManager;\n #started = false;\n\n private constructor() {}\n\n static getInstance(): OtelManager {\n if (!OtelManager.#instance) {\n OtelManager.#instance = new OtelManager();\n }\n return OtelManager.#instance;\n }\n\n startOtelAuto(\n lc: LogContext | undefined,\n workerName: string,\n workerIndex: number,\n ) {\n if (this.#started || !otelEnabled()) {\n return;\n }\n this.#started = true;\n\n // Store and temporarily remove OTEL_LOG_LEVEL to prevent NodeSDK from setting its own logger\n const otelLogLevel = process.env.OTEL_LOG_LEVEL;\n delete process.env.OTEL_LOG_LEVEL;\n\n // Use exponential histograms by default to reduce cardinality from auto-instrumentation\n // This affects HTTP server/client and other auto-instrumented histogram metrics\n // Exponential histograms automatically adjust bucket boundaries and use fewer buckets\n process.env.OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION ??=\n 'base2_exponential_bucket_histogram';\n\n const resource = resourceFromAttributes({\n [ATTR_SERVICE_VERSION]: process.env.ZERO_SERVER_VERSION ?? 'unknown',\n // Tag every metric/trace/log with the worker name and index so each\n // worker process in a multi-worker pod is distinguishable. Without\n // this, N syncer workers sharing the same pod labels clobber each\n // other in the OTel collector on every scrape interval.\n // These mirror the 'worker' and 'workerIndex' keys in every log\n // context so logs and metrics can be correlated on the same fields.\n // Using a stable index instead of PID avoids label churn in Prometheus.\n 'process.worker': workerName,\n 'process.worker_index': workerIndex,\n });\n\n // Set defaults to be backwards compatible with the previously\n // hard-coded exporters\n process.env.OTEL_EXPORTER_OTLP_PROTOCOL ??= 'http/json';\n process.env.OTEL_METRICS_EXPORTER ??= otelMetricsEnabled()\n ? 'otlp'\n : 'none';\n process.env.OTEL_TRACES_EXPORTER ??= otelTracesEnabled() ? 'otlp' : 'none';\n process.env.OTEL_LOGS_EXPORTER ??= otelLogsEnabled() ? 'otlp' : 'none';\n\n const sdk = new NodeSDK({\n resource,\n autoDetectResources: true,\n instrumentations:\n process.env.OTEL_NODE_ENABLED_INSTRUMENTATIONS ||\n process.env.OTEL_NODE_DISABLED_INSTRUMENTATIONS\n ? [getNodeAutoInstrumentations()]\n : [],\n });\n\n try {\n sdk.start();\n } finally {\n if (otelLogLevel) {\n process.env.OTEL_LOG_LEVEL = otelLogLevel;\n }\n }\n setupOtelDiagnosticLogger(lc, true);\n\n logs.getLogger('zero-cache').emit({\n severityText: 'INFO',\n body: 'OpenTelemetry SDK started successfully',\n });\n }\n}\n\nexport const startOtelAuto = (\n lc: LogContext | undefined,\n workerName: string,\n workerIndex: number,\n) => OtelManager.getInstance().startOtelAuto(lc, workerName, workerIndex);\n"],"mappings":";;;;;;;;AAcA,IAAM,cAAN,MAAM,YAAY;CAChB,QAAA;CACA,WAAW;CAEX,cAAsB;CAEtB,OAAO,cAA2B;AAChC,MAAI,CAAC,aAAA,SACH,cAAA,WAAwB,IAAI,aAAa;AAE3C,SAAO,aAAA;;CAGT,cACE,IACA,YACA,aACA;AACA,MAAI,MAAA,WAAiB,CAAC,aAAa,CACjC;AAEF,QAAA,UAAgB;EAGhB,MAAM,eAAe,QAAQ,IAAI;AACjC,SAAO,QAAQ,IAAI;AAKnB,UAAQ,IAAI,6DACV;EAEF,MAAM,WAAW,uBAAuB;IACrC,uBAAuB,QAAQ,IAAI,uBAAuB;GAQ3D,kBAAkB;GAClB,wBAAwB;GACzB,CAAC;AAIF,UAAQ,IAAI,gCAAgC;AAC5C,UAAQ,IAAI,0BAA0B,oBAAoB,GACtD,SACA;AACJ,UAAQ,IAAI,yBAAyB,mBAAmB,GAAG,SAAS;AACpE,UAAQ,IAAI,uBAAuB,iBAAiB,GAAG,SAAS;EAEhE,MAAM,MAAM,IAAI,QAAQ;GACtB;GACA,qBAAqB;GACrB,kBACE,QAAQ,IAAI,sCACZ,QAAQ,IAAI,sCACR,CAAC,6BAA6B,CAAC,GAC/B,EAAE;GACT,CAAC;AAEF,MAAI;AACF,OAAI,OAAO;YACH;AACR,OAAI,aACF,SAAQ,IAAI,iBAAiB;;AAGjC,4BAA0B,IAAI,KAAK;AAEnC,OAAK,UAAU,aAAa,CAAC,KAAK;GAChC,cAAc;GACd,MAAM;GACP,CAAC;;;AAIN,IAAa,iBACX,IACA,YACA,gBACG,YAAY,aAAa,CAAC,cAAc,IAAI,YAAY,YAAY"}
@@ -1,15 +1,15 @@
1
1
  import { must } from "../../../shared/src/must.js";
2
2
  import { parentWorker, singleProcessMode } from "../types/processes.js";
3
- import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
4
3
  import { getShardID } from "../types/shards.js";
5
4
  import { getNormalizedZeroConfig } from "../config/zero-config.js";
6
- import { startOtelAuto } from "./otel-start.js";
7
- import { createLogContext } from "./logging.js";
8
5
  import { pgClient } from "../types/pg.js";
6
+ import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
7
+ import { createLogContext } from "./logging.js";
9
8
  import { initEventSink } from "../observability/events.js";
10
- import { CVRPurger } from "../services/view-syncer/cvr-purger.js";
9
+ import { startOtelAuto } from "./otel-start.js";
11
10
  import { startAnonymousTelemetry } from "./anonymous-otel-start.js";
12
11
  import { ActiveUsersGauge } from "../services/view-syncer/active-users-gauge.js";
12
+ import { CVRPurger } from "../services/view-syncer/cvr-purger.js";
13
13
  import { initViewSyncerSchema } from "../services/view-syncer/schema/init.js";
14
14
  //#region ../zero-cache/src/server/reaper.ts
15
15
  var MS_PER_HOUR = 1e3 * 60 * 60;
@@ -18,8 +18,8 @@ async function runWorker(parent, env, ...argv) {
18
18
  env,
19
19
  argv
20
20
  });
21
- startOtelAuto(createLogContext(config, { worker: "reaper" }, false));
22
- const lc = createLogContext(config, { worker: "reaper" }, true);
21
+ startOtelAuto(createLogContext(config, "reaper", 0, false), "reaper", 0);
22
+ const lc = createLogContext(config, "reaper");
23
23
  initEventSink(lc, config);
24
24
  startAnonymousTelemetry(lc, config);
25
25
  const { cvr } = config;
@@ -1 +1 @@
1
- {"version":3,"file":"reaper.js","names":[],"sources":["../../../../../zero-cache/src/server/reaper.ts"],"sourcesContent":["import {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {CVRPurger} from '../services/view-syncer/cvr-purger.ts';\nimport {ActiveUsersGauge} from '../services/view-syncer/active-users-gauge.ts';\nimport {initViewSyncerSchema} from '../services/view-syncer/schema/init.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\n\nconst MS_PER_HOUR = 1000 * 60 * 60;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv});\n\n startOtelAuto(createLogContext(config, {worker: 'reaper'}, false));\n const lc = createLogContext(config, {worker: 'reaper'}, true);\n initEventSink(lc, config);\n startAnonymousTelemetry(lc, config);\n\n const {cvr} = config;\n const shard = getShardID(config);\n const cvrDB = pgClient(lc, cvr.db, {\n max: 1,\n connection: {['application_name']: `zero-sync-cvr-purger`},\n });\n await initViewSyncerSchema(lc, cvrDB, shard);\n parent.send(['ready', {ready: true}]);\n\n return runUntilKilled(\n lc,\n parent,\n new CVRPurger(lc, cvrDB, shard, {\n inactivityThresholdMs:\n cvr.garbageCollectionInactivityThresholdHours * MS_PER_HOUR,\n initialBatchSize: cvr.garbageCollectionInitialBatchSize,\n initialIntervalMs: cvr.garbageCollectionInitialIntervalSeconds * 1000,\n }),\n // Periodically computes and exports active users gauge to anonymous telemetry\n new ActiveUsersGauge(lc, cvrDB, shard, {\n // Default 10minutes refresh; can be made configurable later if needed\n updateIntervalMs: 10 * 60 * 1000,\n }),\n );\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;AAkBA,IAAM,cAAc,MAAO,KAAK;AAEhC,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;AAEnD,eAAc,iBAAiB,QAAQ,EAAC,QAAQ,UAAS,EAAE,MAAM,CAAC;CAClE,MAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,UAAS,EAAE,KAAK;AAC7D,eAAc,IAAI,OAAO;AACzB,yBAAwB,IAAI,OAAO;CAEnC,MAAM,EAAC,QAAO;CACd,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;EACjC,KAAK;EACL,YAAY,GAAE,qBAAqB,wBAAuB;EAC3D,CAAC;AACF,OAAM,qBAAqB,IAAI,OAAO,MAAM;AAC5C,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,QAAO,eACL,IACA,QACA,IAAI,UAAU,IAAI,OAAO,OAAO;EAC9B,uBACE,IAAI,4CAA4C;EAClD,kBAAkB,IAAI;EACtB,mBAAmB,IAAI,0CAA0C;EAClE,CAAC,EAEF,IAAI,iBAAiB,IAAI,OAAO,OAAO,EAErC,kBAAkB,MAAU,KAC7B,CAAC,CACH;;AAIH,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"reaper.js","names":[],"sources":["../../../../../zero-cache/src/server/reaper.ts"],"sourcesContent":["import {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {ActiveUsersGauge} from '../services/view-syncer/active-users-gauge.ts';\nimport {CVRPurger} from '../services/view-syncer/cvr-purger.ts';\nimport {initViewSyncerSchema} from '../services/view-syncer/schema/init.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nconst MS_PER_HOUR = 1000 * 60 * 60;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv});\n\n startOtelAuto(createLogContext(config, 'reaper', 0, false), 'reaper', 0);\n const lc = createLogContext(config, 'reaper');\n initEventSink(lc, config);\n startAnonymousTelemetry(lc, config);\n\n const {cvr} = config;\n const shard = getShardID(config);\n const cvrDB = pgClient(lc, cvr.db, {\n max: 1,\n connection: {['application_name']: `zero-sync-cvr-purger`},\n });\n await initViewSyncerSchema(lc, cvrDB, shard);\n parent.send(['ready', {ready: true}]);\n\n return runUntilKilled(\n lc,\n parent,\n new CVRPurger(lc, cvrDB, shard, {\n inactivityThresholdMs:\n cvr.garbageCollectionInactivityThresholdHours * MS_PER_HOUR,\n initialBatchSize: cvr.garbageCollectionInitialBatchSize,\n initialIntervalMs: cvr.garbageCollectionInitialIntervalSeconds * 1000,\n }),\n // Periodically computes and exports active users gauge to anonymous telemetry\n new ActiveUsersGauge(lc, cvrDB, shard, {\n // Default 10minutes refresh; can be made configurable later if needed\n updateIntervalMs: 10 * 60 * 1000,\n }),\n );\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;AAkBA,IAAM,cAAc,MAAO,KAAK;AAEhC,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;AAEnD,eAAc,iBAAiB,QAAQ,UAAU,GAAG,MAAM,EAAE,UAAU,EAAE;CACxE,MAAM,KAAK,iBAAiB,QAAQ,SAAS;AAC7C,eAAc,IAAI,OAAO;AACzB,yBAAwB,IAAI,OAAO;CAEnC,MAAM,EAAC,QAAO;CACd,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;EACjC,KAAK;EACL,YAAY,GAAE,qBAAqB,wBAAuB;EAC3D,CAAC;AACF,OAAM,qBAAqB,IAAI,OAAO,MAAM;AAC5C,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,QAAO,eACL,IACA,QACA,IAAI,UAAU,IAAI,OAAO,OAAO;EAC9B,uBACE,IAAI,4CAA4C;EAClD,kBAAkB,IAAI;EACtB,mBAAmB,IAAI,0CAA0C;EAClE,CAAC,EAEF,IAAI,iBAAiB,IAAI,OAAO,OAAO,EAErC,kBAAkB,MAAU,KAC7B,CAAC,CACH;;AAIH,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
@@ -1 +1 @@
1
- {"version":3,"file":"replicator.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/replicator.ts"],"names":[],"mappings":"AAkBA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAW/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAmEf"}
1
+ {"version":3,"file":"replicator.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/replicator.ts"],"names":[],"mappings":"AAkBA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAY/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAoEf"}
@@ -2,14 +2,15 @@ import { assert } from "../../../shared/src/asserts.js";
2
2
  import { parse } from "../../../shared/src/valita.js";
3
3
  import { must } from "../../../shared/src/must.js";
4
4
  import { parentWorker, singleProcessMode } from "../types/processes.js";
5
- import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
6
5
  import { getShardConfig } from "../types/shards.js";
7
6
  import { getNormalizedZeroConfig } from "../config/zero-config.js";
8
- import { createLogContext } from "./logging.js";
9
7
  import { getOrCreateGauge } from "../observability/metrics.js";
8
+ import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
9
+ import { createLogContext } from "./logging.js";
10
10
  import { initEventSink } from "../observability/events.js";
11
11
  import { ReplicationStatusPublisher } from "../services/replicator/replication-status.js";
12
12
  import { ChangeStreamerHttpClient } from "../services/change-streamer/change-streamer-http.js";
13
+ import { startOtelAuto } from "./otel-start.js";
13
14
  import { ThreadWriteWorkerClient } from "../services/replicator/write-worker-client.js";
14
15
  import { getPragmaConfig, replicaFileModeSchema, setUpMessageHandlers, setupReplica } from "../workers/replicator.js";
15
16
  import { ReplicatorService } from "../services/replicator/replicator.js";
@@ -25,7 +26,8 @@ async function runWorker(parent, env, ...args) {
25
26
  });
26
27
  const mode = fileMode === "backup" ? "backup" : "serving";
27
28
  const workerName = `${mode}-replicator`;
28
- const lc = createLogContext(config, { worker: workerName });
29
+ startOtelAuto(createLogContext(config, workerName, 0, false), workerName, 0);
30
+ const lc = createLogContext(config, workerName);
29
31
  initEventSink(lc, config);
30
32
  const { file: dbPath, walMode } = await setupReplica(lc, fileMode, config.replica);
31
33
  setupMetrics(lc, dbPath, walMode);
@@ -1 +1 @@
1
- {"version":3,"file":"replicator.js","names":[],"sources":["../../../../../zero-cache/src/server/replicator.ts"],"sourcesContent":["import type {ObservableCallback} from '@opentelemetry/api';\nimport type {LogContext} from '@rocicorp/logger';\nimport {stat} from 'node:fs/promises';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {getOrCreateGauge} from '../observability/metrics.ts';\nimport {ChangeStreamerHttpClient} from '../services/change-streamer/change-streamer-http.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {ReplicationStatusPublisher} from '../services/replicator/replication-status.ts';\nimport {\n ReplicatorService,\n type ReplicatorMode,\n} from '../services/replicator/replicator.ts';\nimport {ThreadWriteWorkerClient} from '../services/replicator/write-worker-client.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {\n getPragmaConfig,\n replicaFileModeSchema,\n setUpMessageHandlers,\n setupReplica,\n type WalMode,\n} from '../workers/replicator.ts';\nimport {createLogContext} from './logging.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length > 0, `replicator mode not specified`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n const mode: ReplicatorMode = fileMode === 'backup' ? 'backup' : 'serving';\n const workerName = `${mode}-replicator`;\n const lc = createLogContext(config, {worker: workerName});\n initEventSink(lc, config);\n\n const {file: dbPath, walMode} = await setupReplica(\n lc,\n fileMode,\n config.replica,\n );\n\n setupMetrics(lc, dbPath, walMode);\n\n // Create the write worker for async SQLite writes.\n const pragmas = getPragmaConfig(fileMode);\n const workerClient = new ThreadWriteWorkerClient();\n await workerClient.init(dbPath, mode, pragmas, config.log);\n\n const runningLocalChangeStreamer =\n config.changeStreamer.mode === 'dedicated' && !config.changeStreamer.uri;\n const shard = getShardConfig(config);\n const {\n taskID,\n change,\n changeStreamer: {\n port,\n uri: changeStreamerURI = runningLocalChangeStreamer\n ? `http://localhost:${port}/`\n : undefined,\n },\n } = config;\n const changeStreamer = new ChangeStreamerHttpClient(\n lc,\n shard,\n change.db,\n changeStreamerURI,\n );\n\n const replicator = new ReplicatorService(\n lc,\n taskID,\n `${workerName}-${pid}`,\n mode,\n changeStreamer,\n workerClient,\n runningLocalChangeStreamer\n ? // publish ReplicationStatusEvents from backup-replicator only\n ReplicationStatusPublisher.forReplicaFile(dbPath)\n : null,\n );\n\n setUpMessageHandlers(lc, replicator, parent);\n\n const running = runUntilKilled(lc, parent, replicator);\n\n // Signal readiness once the first ReplicaVersionReady notification is received.\n for await (const _ of replicator.subscribe()) {\n parent.send(['ready', {ready: true}]);\n break;\n }\n\n return running;\n}\n\nfunction setupMetrics(lc: LogContext, file: string, walMode: WalMode) {\n getOrCreateGauge('replica', 'db_size', {\n description:\n `The size of the replica's main db file, ` +\n `which does not include the wal file(s)`,\n unit: 'bytes',\n }).addCallback(observeFileSize(lc, file));\n\n getOrCreateGauge('replica', 'wal_size', {\n description: `The size of the replica's wal file`,\n unit: 'bytes',\n }).addCallback(observeFileSize(lc, `${file}-wal`));\n\n if (walMode === 'wal2') {\n getOrCreateGauge('replica', 'wal2_size', {\n description: `The size of the replica's wal2 file`,\n unit: 'bytes',\n }).addCallback(observeFileSize(lc, `${file}-wal2`));\n }\n}\n\nfunction observeFileSize(lc: LogContext, file: string): ObservableCallback {\n return async o => {\n try {\n const stats = await stat(file);\n o.observe(stats.size);\n } catch (e) {\n lc.warn?.(`unable to stat ${file} for size metrics`, e);\n }\n };\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAiCA,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,SAAS,GAAG,gCAAgC;CACxD,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CAExD,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;CAClE,MAAM,OAAuB,aAAa,WAAW,WAAW;CAChE,MAAM,aAAa,GAAG,KAAK;CAC3B,MAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,YAAW,CAAC;AACzD,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,MAAM,QAAQ,YAAW,MAAM,aACpC,IACA,UACA,OAAO,QACR;AAED,cAAa,IAAI,QAAQ,QAAQ;CAGjC,MAAM,UAAU,gBAAgB,SAAS;CACzC,MAAM,eAAe,IAAI,yBAAyB;AAClD,OAAM,aAAa,KAAK,QAAQ,MAAM,SAAS,OAAO,IAAI;CAE1D,MAAM,6BACJ,OAAO,eAAe,SAAS,eAAe,CAAC,OAAO,eAAe;CACvE,MAAM,QAAQ,eAAe,OAAO;CACpC,MAAM,EACJ,QACA,QACA,gBAAgB,EACd,MACA,KAAK,oBAAoB,6BACrB,oBAAoB,KAAK,KACzB,KAAA,QAEJ;CACJ,MAAM,iBAAiB,IAAI,yBACzB,IACA,OACA,OAAO,IACP,kBACD;CAED,MAAM,aAAa,IAAI,kBACrB,IACA,QACA,GAAG,WAAW,GAAG,OACjB,MACA,gBACA,cACA,6BAEI,2BAA2B,eAAe,OAAO,GACjD,KACL;AAED,sBAAqB,IAAI,YAAY,OAAO;CAE5C,MAAM,UAAU,eAAe,IAAI,QAAQ,WAAW;AAGtD,YAAW,MAAM,KAAK,WAAW,WAAW,EAAE;AAC5C,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AACrC;;AAGF,QAAO;;AAGT,SAAS,aAAa,IAAgB,MAAc,SAAkB;AACpE,kBAAiB,WAAW,WAAW;EACrC,aACE;EAEF,MAAM;EACP,CAAC,CAAC,YAAY,gBAAgB,IAAI,KAAK,CAAC;AAEzC,kBAAiB,WAAW,YAAY;EACtC,aAAa;EACb,MAAM;EACP,CAAC,CAAC,YAAY,gBAAgB,IAAI,GAAG,KAAK,MAAM,CAAC;AAElD,KAAI,YAAY,OACd,kBAAiB,WAAW,aAAa;EACvC,aAAa;EACb,MAAM;EACP,CAAC,CAAC,YAAY,gBAAgB,IAAI,GAAG,KAAK,OAAO,CAAC;;AAIvD,SAAS,gBAAgB,IAAgB,MAAkC;AACzE,QAAO,OAAM,MAAK;AAChB,MAAI;GACF,MAAM,QAAQ,MAAM,KAAK,KAAK;AAC9B,KAAE,QAAQ,MAAM,KAAK;WACd,GAAG;AACV,MAAG,OAAO,kBAAkB,KAAK,oBAAoB,EAAE;;;;AAM7D,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"replicator.js","names":[],"sources":["../../../../../zero-cache/src/server/replicator.ts"],"sourcesContent":["import {stat} from 'node:fs/promises';\nimport {pid} from 'node:process';\nimport type {ObservableCallback} from '@opentelemetry/api';\nimport type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {getOrCreateGauge} from '../observability/metrics.ts';\nimport {ChangeStreamerHttpClient} from '../services/change-streamer/change-streamer-http.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {ReplicationStatusPublisher} from '../services/replicator/replication-status.ts';\nimport {\n ReplicatorService,\n type ReplicatorMode,\n} from '../services/replicator/replicator.ts';\nimport {ThreadWriteWorkerClient} from '../services/replicator/write-worker-client.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {\n getPragmaConfig,\n replicaFileModeSchema,\n setUpMessageHandlers,\n setupReplica,\n type WalMode,\n} from '../workers/replicator.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length > 0, `replicator mode not specified`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n const mode: ReplicatorMode = fileMode === 'backup' ? 'backup' : 'serving';\n const workerName = `${mode}-replicator`;\n startOtelAuto(createLogContext(config, workerName, 0, false), workerName, 0);\n const lc = createLogContext(config, workerName);\n initEventSink(lc, config);\n\n const {file: dbPath, walMode} = await setupReplica(\n lc,\n fileMode,\n config.replica,\n );\n\n setupMetrics(lc, dbPath, walMode);\n\n // Create the write worker for async SQLite writes.\n const pragmas = getPragmaConfig(fileMode);\n const workerClient = new ThreadWriteWorkerClient();\n await workerClient.init(dbPath, mode, pragmas, config.log);\n\n const runningLocalChangeStreamer =\n config.changeStreamer.mode === 'dedicated' && !config.changeStreamer.uri;\n const shard = getShardConfig(config);\n const {\n taskID,\n change,\n changeStreamer: {\n port,\n uri: changeStreamerURI = runningLocalChangeStreamer\n ? `http://localhost:${port}/`\n : undefined,\n },\n } = config;\n const changeStreamer = new ChangeStreamerHttpClient(\n lc,\n shard,\n change.db,\n changeStreamerURI,\n );\n\n const replicator = new ReplicatorService(\n lc,\n taskID,\n `${workerName}-${pid}`,\n mode,\n changeStreamer,\n workerClient,\n runningLocalChangeStreamer\n ? // publish ReplicationStatusEvents from backup-replicator only\n ReplicationStatusPublisher.forReplicaFile(dbPath)\n : null,\n );\n\n setUpMessageHandlers(lc, replicator, parent);\n\n const running = runUntilKilled(lc, parent, replicator);\n\n // Signal readiness once the first ReplicaVersionReady notification is received.\n for await (const _ of replicator.subscribe()) {\n parent.send(['ready', {ready: true}]);\n break;\n }\n\n return running;\n}\n\nfunction setupMetrics(lc: LogContext, file: string, walMode: WalMode) {\n getOrCreateGauge('replica', 'db_size', {\n description:\n `The size of the replica's main db file, ` +\n `which does not include the wal file(s)`,\n unit: 'bytes',\n }).addCallback(observeFileSize(lc, file));\n\n getOrCreateGauge('replica', 'wal_size', {\n description: `The size of the replica's wal file`,\n unit: 'bytes',\n }).addCallback(observeFileSize(lc, `${file}-wal`));\n\n if (walMode === 'wal2') {\n getOrCreateGauge('replica', 'wal2_size', {\n description: `The size of the replica's wal2 file`,\n unit: 'bytes',\n }).addCallback(observeFileSize(lc, `${file}-wal2`));\n }\n}\n\nfunction observeFileSize(lc: LogContext, file: string): ObservableCallback {\n return async o => {\n try {\n const stats = await stat(file);\n o.observe(stats.size);\n } catch (e) {\n lc.warn?.(`unable to stat ${file} for size metrics`, e);\n }\n };\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAkCA,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,SAAS,GAAG,gCAAgC;CACxD,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CAExD,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;CAClE,MAAM,OAAuB,aAAa,WAAW,WAAW;CAChE,MAAM,aAAa,GAAG,KAAK;AAC3B,eAAc,iBAAiB,QAAQ,YAAY,GAAG,MAAM,EAAE,YAAY,EAAE;CAC5E,MAAM,KAAK,iBAAiB,QAAQ,WAAW;AAC/C,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,MAAM,QAAQ,YAAW,MAAM,aACpC,IACA,UACA,OAAO,QACR;AAED,cAAa,IAAI,QAAQ,QAAQ;CAGjC,MAAM,UAAU,gBAAgB,SAAS;CACzC,MAAM,eAAe,IAAI,yBAAyB;AAClD,OAAM,aAAa,KAAK,QAAQ,MAAM,SAAS,OAAO,IAAI;CAE1D,MAAM,6BACJ,OAAO,eAAe,SAAS,eAAe,CAAC,OAAO,eAAe;CACvE,MAAM,QAAQ,eAAe,OAAO;CACpC,MAAM,EACJ,QACA,QACA,gBAAgB,EACd,MACA,KAAK,oBAAoB,6BACrB,oBAAoB,KAAK,KACzB,KAAA,QAEJ;CACJ,MAAM,iBAAiB,IAAI,yBACzB,IACA,OACA,OAAO,IACP,kBACD;CAED,MAAM,aAAa,IAAI,kBACrB,IACA,QACA,GAAG,WAAW,GAAG,OACjB,MACA,gBACA,cACA,6BAEI,2BAA2B,eAAe,OAAO,GACjD,KACL;AAED,sBAAqB,IAAI,YAAY,OAAO;CAE5C,MAAM,UAAU,eAAe,IAAI,QAAQ,WAAW;AAGtD,YAAW,MAAM,KAAK,WAAW,WAAW,EAAE;AAC5C,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AACrC;;AAGF,QAAO;;AAGT,SAAS,aAAa,IAAgB,MAAc,SAAkB;AACpE,kBAAiB,WAAW,WAAW;EACrC,aACE;EAEF,MAAM;EACP,CAAC,CAAC,YAAY,gBAAgB,IAAI,KAAK,CAAC;AAEzC,kBAAiB,WAAW,YAAY;EACtC,aAAa;EACb,MAAM;EACP,CAAC,CAAC,YAAY,gBAAgB,IAAI,GAAG,KAAK,MAAM,CAAC;AAElD,KAAI,YAAY,OACd,kBAAiB,WAAW,aAAa;EACvC,aAAa;EACb,MAAM;EACP,CAAC,CAAC,YAAY,gBAAgB,IAAI,GAAG,KAAK,OAAO,CAAC;;AAIvD,SAAS,gBAAgB,IAAgB,MAAkC;AACzE,QAAO,OAAM,MAAK;AAChB,MAAI;GACF,MAAM,QAAQ,MAAM,KAAK,KAAK;AAC9B,KAAE,QAAQ,MAAM,KAAK;WACd,GAAG;AACV,MAAG,OAAO,kBAAkB,KAAK,oBAAoB,EAAE;;;;AAM7D,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
@@ -1,9 +1,9 @@
1
1
  import "../../../../zero-protocol/src/protocol-version.js";
2
2
  import "../../../../shared/src/dotenv.js";
3
3
  import { childWorker } from "../../types/processes.js";
4
- import { ProcessManager, runUntilKilled } from "../../services/life-cycle.js";
5
4
  import { normalizeZeroConfig } from "../../config/normalize.js";
6
5
  import { getServerVersion, getZeroConfig } from "../../config/zero-config.js";
6
+ import { ProcessManager, runUntilKilled } from "../../services/life-cycle.js";
7
7
  import { createLogContext } from "../logging.js";
8
8
  import { MAIN_URL } from "../worker-urls.js";
9
9
  import { getTaskID } from "./runtime.js";
@@ -22,7 +22,7 @@ async function runWorker(parent, env) {
22
22
  env,
23
23
  emitDeprecationWarnings: true
24
24
  });
25
- const lc = createLogContext(cfg, { worker: "runner" });
25
+ const lc = createLogContext(cfg, "runner");
26
26
  const config = normalizeZeroConfig(lc, cfg, env, await getTaskID(lc));
27
27
  const processes = new ProcessManager(lc, parent ?? process);
28
28
  const { port, lazyStartup } = config;
@@ -1 +1 @@
1
- {"version":3,"file":"run-worker.js","names":[],"sources":["../../../../../../zero-cache/src/server/runner/run-worker.ts"],"sourcesContent":["import '../../../../shared/src/dotenv.ts';\n\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {PROTOCOL_VERSION} from '../../../../zero-protocol/src/protocol-version.ts';\nimport {normalizeZeroConfig} from '../../config/normalize.ts';\nimport {getServerVersion, getZeroConfig} from '../../config/zero-config.ts';\nimport {ProcessManager, runUntilKilled} from '../../services/life-cycle.ts';\nimport {childWorker, type Worker} from '../../types/processes.ts';\nimport {createLogContext} from '../logging.ts';\nimport {MAIN_URL} from '../worker-urls.ts';\nimport {getTaskID} from './runtime.ts';\nimport {ZeroDispatcher} from './zero-dispatcher.ts';\n\n/**\n * Top-level `runner` entry point to the zero-cache. This layer is responsible for:\n * * runtime-based config normalization\n * * lazy startup\n * * serving /statsz\n * * auto-reset restarts (TODO)\n */\nexport async function runWorker(\n parent: Worker | null,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n // Note: Deprecation warnings are only emitted at this top-level parse;\n // they are suppressed when parsed in subprocesses.\n const cfg = getZeroConfig({env, emitDeprecationWarnings: true});\n const lc = createLogContext(cfg, {worker: 'runner'});\n\n const defaultTaskID = await getTaskID(lc);\n const config = normalizeZeroConfig(lc, cfg, env, defaultTaskID);\n const processes = new ProcessManager(lc, parent ?? process);\n\n const {port, lazyStartup} = config;\n const serverVersion = getServerVersion(config);\n lc.info?.(`starting server${!serverVersion ? '' : `@${serverVersion}`} `, {\n protocolVersion: PROTOCOL_VERSION,\n taskID: config.taskID,\n app: config.app,\n shard: config.shard,\n port: config.port,\n });\n\n let zeroCache: Resolver<Worker> | undefined;\n function startZeroCache(): Promise<Worker> {\n if (zeroCache === undefined) {\n const startMs = performance.now();\n lc.info?.('starting zero-cache');\n\n const r = (zeroCache = resolver<Worker>());\n const w = childWorker(MAIN_URL, env)\n .once('message', () => {\n r.resolve(w);\n lc.info?.(`zero-cache ready (${performance.now() - startMs} ms)`);\n })\n .once('error', r.reject);\n\n processes.addWorker(w, 'user-facing', 'zero-cache');\n }\n return zeroCache.promise;\n }\n\n // Eagerly start the zero-cache if it was not configured with --lazy-startup.\n if (!lazyStartup) {\n void startZeroCache();\n }\n\n await processes.allWorkersReady();\n parent?.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent ?? process,\n new ZeroDispatcher(config, lc, {port}, startZeroCache),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'main');\n }\n\n await processes.done();\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAoBA,eAAsB,UACpB,QACA,KACe;CAGf,MAAM,MAAM,cAAc;EAAC;EAAK,yBAAyB;EAAK,CAAC;CAC/D,MAAM,KAAK,iBAAiB,KAAK,EAAC,QAAQ,UAAS,CAAC;CAGpD,MAAM,SAAS,oBAAoB,IAAI,KAAK,KADtB,MAAM,UAAU,GAAG,CACsB;CAC/D,MAAM,YAAY,IAAI,eAAe,IAAI,UAAU,QAAQ;CAE3D,MAAM,EAAC,MAAM,gBAAe;CAC5B,MAAM,gBAAgB,iBAAiB,OAAO;AAC9C,IAAG,OAAO,kBAAkB,CAAC,gBAAgB,KAAK,IAAI,gBAAgB,IAAI;EACxE,iBAAA;EACA,QAAQ,OAAO;EACf,KAAK,OAAO;EACZ,OAAO,OAAO;EACd,MAAM,OAAO;EACd,CAAC;CAEF,IAAI;CACJ,SAAS,iBAAkC;AACzC,MAAI,cAAc,KAAA,GAAW;GAC3B,MAAM,UAAU,YAAY,KAAK;AACjC,MAAG,OAAO,sBAAsB;GAEhC,MAAM,IAAK,YAAY,UAAkB;GACzC,MAAM,IAAI,YAAY,UAAU,IAAI,CACjC,KAAK,iBAAiB;AACrB,MAAE,QAAQ,EAAE;AACZ,OAAG,OAAO,qBAAqB,YAAY,KAAK,GAAG,QAAQ,MAAM;KACjE,CACD,KAAK,SAAS,EAAE,OAAO;AAE1B,aAAU,UAAU,GAAG,eAAe,aAAa;;AAErD,SAAO,UAAU;;AAInB,KAAI,CAAC,YACE,iBAAgB;AAGvB,OAAM,UAAU,iBAAiB;AACjC,SAAQ,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAEtC,KAAI;AACF,QAAM,eACJ,IACA,UAAU,SACV,IAAI,eAAe,QAAQ,IAAI,EAAC,MAAK,EAAE,eAAe,CACvD;UACM,KAAK;AACZ,YAAU,gBAAgB,KAAK,OAAO;;AAGxC,OAAM,UAAU,MAAM"}
1
+ {"version":3,"file":"run-worker.js","names":[],"sources":["../../../../../../zero-cache/src/server/runner/run-worker.ts"],"sourcesContent":["import '../../../../shared/src/dotenv.ts';\n\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {PROTOCOL_VERSION} from '../../../../zero-protocol/src/protocol-version.ts';\nimport {normalizeZeroConfig} from '../../config/normalize.ts';\nimport {getServerVersion, getZeroConfig} from '../../config/zero-config.ts';\nimport {ProcessManager, runUntilKilled} from '../../services/life-cycle.ts';\nimport {childWorker, type Worker} from '../../types/processes.ts';\nimport {createLogContext} from '../logging.ts';\nimport {MAIN_URL} from '../worker-urls.ts';\nimport {getTaskID} from './runtime.ts';\nimport {ZeroDispatcher} from './zero-dispatcher.ts';\n\n/**\n * Top-level `runner` entry point to the zero-cache. This layer is responsible for:\n * * runtime-based config normalization\n * * lazy startup\n * * serving /statsz\n * * auto-reset restarts (TODO)\n */\nexport async function runWorker(\n parent: Worker | null,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n // Note: Deprecation warnings are only emitted at this top-level parse;\n // they are suppressed when parsed in subprocesses.\n const cfg = getZeroConfig({env, emitDeprecationWarnings: true});\n const lc = createLogContext(cfg, 'runner');\n\n const defaultTaskID = await getTaskID(lc);\n const config = normalizeZeroConfig(lc, cfg, env, defaultTaskID);\n const processes = new ProcessManager(lc, parent ?? process);\n\n const {port, lazyStartup} = config;\n const serverVersion = getServerVersion(config);\n lc.info?.(`starting server${!serverVersion ? '' : `@${serverVersion}`} `, {\n protocolVersion: PROTOCOL_VERSION,\n taskID: config.taskID,\n app: config.app,\n shard: config.shard,\n port: config.port,\n });\n\n let zeroCache: Resolver<Worker> | undefined;\n function startZeroCache(): Promise<Worker> {\n if (zeroCache === undefined) {\n const startMs = performance.now();\n lc.info?.('starting zero-cache');\n\n const r = (zeroCache = resolver<Worker>());\n const w = childWorker(MAIN_URL, env)\n .once('message', () => {\n r.resolve(w);\n lc.info?.(`zero-cache ready (${performance.now() - startMs} ms)`);\n })\n .once('error', r.reject);\n\n processes.addWorker(w, 'user-facing', 'zero-cache');\n }\n return zeroCache.promise;\n }\n\n // Eagerly start the zero-cache if it was not configured with --lazy-startup.\n if (!lazyStartup) {\n void startZeroCache();\n }\n\n await processes.allWorkersReady();\n parent?.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent ?? process,\n new ZeroDispatcher(config, lc, {port}, startZeroCache),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'main');\n }\n\n await processes.done();\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAoBA,eAAsB,UACpB,QACA,KACe;CAGf,MAAM,MAAM,cAAc;EAAC;EAAK,yBAAyB;EAAK,CAAC;CAC/D,MAAM,KAAK,iBAAiB,KAAK,SAAS;CAG1C,MAAM,SAAS,oBAAoB,IAAI,KAAK,KADtB,MAAM,UAAU,GAAG,CACsB;CAC/D,MAAM,YAAY,IAAI,eAAe,IAAI,UAAU,QAAQ;CAE3D,MAAM,EAAC,MAAM,gBAAe;CAC5B,MAAM,gBAAgB,iBAAiB,OAAO;AAC9C,IAAG,OAAO,kBAAkB,CAAC,gBAAgB,KAAK,IAAI,gBAAgB,IAAI;EACxE,iBAAA;EACA,QAAQ,OAAO;EACf,KAAK,OAAO;EACZ,OAAO,OAAO;EACd,MAAM,OAAO;EACd,CAAC;CAEF,IAAI;CACJ,SAAS,iBAAkC;AACzC,MAAI,cAAc,KAAA,GAAW;GAC3B,MAAM,UAAU,YAAY,KAAK;AACjC,MAAG,OAAO,sBAAsB;GAEhC,MAAM,IAAK,YAAY,UAAkB;GACzC,MAAM,IAAI,YAAY,UAAU,IAAI,CACjC,KAAK,iBAAiB;AACrB,MAAE,QAAQ,EAAE;AACZ,OAAG,OAAO,qBAAqB,YAAY,KAAK,GAAG,QAAQ,MAAM;KACjE,CACD,KAAK,SAAS,EAAE,OAAO;AAE1B,aAAU,UAAU,GAAG,eAAe,aAAa;;AAErD,SAAO,UAAU;;AAInB,KAAI,CAAC,YACE,iBAAgB;AAGvB,OAAM,UAAU,iBAAiB;AACjC,SAAQ,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAEtC,KAAI;AACF,QAAM,eACJ,IACA,UAAU,SACV,IAAI,eAAe,QAAQ,IAAI,EAAC,MAAK,EAAE,eAAe,CACvD;UACM,KAAK;AACZ,YAAU,gBAAgB,KAAK,OAAO;;AAGxC,OAAM,UAAU,MAAM"}
@@ -1 +1 @@
1
- {"version":3,"file":"syncer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/syncer.ts"],"names":[],"mappings":"AA4BA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAmC/B,MAAM,CAAC,OAAO,UAAU,SAAS,CAC/B,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAoMf"}
1
+ {"version":3,"file":"syncer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/syncer.ts"],"names":[],"mappings":"AA+BA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAgC/B,MAAM,CAAC,OAAO,UAAU,SAAS,CAC/B,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAwMf"}
@@ -3,33 +3,33 @@ import { promiseVoid } from "../../../shared/src/resolved-promises.js";
3
3
  import { parse } from "../../../shared/src/valita.js";
4
4
  import { must } from "../../../shared/src/must.js";
5
5
  import { parentWorker, singleProcessMode } from "../types/processes.js";
6
- import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
7
6
  import { getShardID } from "../types/shards.js";
8
7
  import { getNormalizedZeroConfig } from "../config/zero-config.js";
9
- import { startOtelAuto } from "./otel-start.js";
10
- import { createLogContext } from "./logging.js";
11
- import { randInt } from "../../../shared/src/rand.js";
12
- import { pgClient } from "../types/pg.js";
13
8
  import { Snapshotter } from "../services/view-syncer/snapshotter.js";
14
9
  import { PipelineDriver } from "../services/view-syncer/pipeline-driver.js";
10
+ import { randInt } from "../../../shared/src/rand.js";
11
+ import { pgClient } from "../types/pg.js";
12
+ import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
13
+ import { createLogContext } from "./logging.js";
15
14
  import { warmupConnections } from "../db/warmup.js";
16
15
  import { initEventSink } from "../observability/events.js";
16
+ import { startOtelAuto } from "./otel-start.js";
17
17
  import { replicaFileModeSchema, replicaFileName } from "../workers/replicator.js";
18
18
  import { startAnonymousTelemetry } from "./anonymous-otel-start.js";
19
19
  import { DatabaseStorage } from "../../../zqlite/src/database-storage.js";
20
+ import { tokenConfigOptions, verifyToken } from "../auth/jwt.js";
20
21
  import { ProtocolErrorWithLevel } from "../types/error-with-level.js";
21
22
  import { CustomQueryTransformer } from "../custom-queries/transform-query.js";
22
23
  import { MutagenService } from "../services/mutagen/mutagen.js";
23
24
  import { PusherService } from "../services/mutagen/pusher.js";
24
25
  import { ConnectionContextManagerImpl } from "../services/view-syncer/connection-context-manager.js";
25
26
  import { ViewSyncerService } from "../services/view-syncer/view-syncer.js";
26
- import { tokenConfigOptions, verifyToken } from "../auth/jwt.js";
27
27
  import { Syncer } from "../workers/syncer.js";
28
28
  import { InspectorDelegate } from "./inspector-delegate.js";
29
29
  import { isPriorityOpRunning, runPriorityOp } from "./priority-op.js";
30
30
  import { pid } from "node:process";
31
- import { tmpdir } from "node:os";
32
31
  import { randomUUID } from "node:crypto";
32
+ import { tmpdir } from "node:os";
33
33
  import path from "node:path";
34
34
  //#region ../zero-cache/src/server/syncer.ts
35
35
  function randomID() {
@@ -46,15 +46,16 @@ function getCustomQueryConfig(config) {
46
46
  };
47
47
  }
48
48
  function runWorker(parent, env, ...args) {
49
+ assert(args.length >= 2, `expected [fileMode, workerIndex, ...flags]`);
50
+ const fileMode = parse(args[0], replicaFileModeSchema);
51
+ const workerIndex = Number(args[1]);
49
52
  const config = getNormalizedZeroConfig({
50
53
  env,
51
- argv: args.slice(1)
54
+ argv: args.slice(2)
52
55
  });
53
- startOtelAuto(createLogContext(config, { worker: "syncer" }, false));
54
- const lc = createLogContext(config, { worker: "syncer" }, true);
56
+ startOtelAuto(createLogContext(config, "syncer", workerIndex, false), "syncer", workerIndex);
57
+ const lc = createLogContext(config, "syncer", workerIndex);
55
58
  initEventSink(lc, config);
56
- assert(args.length > 0, `replicator mode not specified`);
57
- const fileMode = parse(args[0], replicaFileModeSchema);
58
59
  const { cvr, upstream, enableCrudMutations } = config;
59
60
  const replicaFile = replicaFileName(config.replica.file, fileMode);
60
61
  lc.debug?.(`running view-syncer on ${replicaFile}`);
@@ -1 +1 @@
1
- {"version":3,"file":"syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport {\n type ConnectionContextManager,\n ConnectionContextManagerImpl,\n} from '../services/view-syncer/connection-context-manager.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {isPriorityOpRunning, runPriorityOp} from './priority-op.ts';\nimport type {ValidateLegacyJWT} from '../auth/auth.ts';\nimport {tokenConfigOptions, verifyToken} from '../auth/jwt.ts';\nimport {ProtocolErrorWithLevel} from '../types/error-with-level.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n apiKey: queryConfig.apiKey,\n allowedClientHeaders: queryConfig.allowedClientHeaders,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n\n startOtelAuto(createLogContext(config, {worker: 'syncer'}, false));\n const lc = createLogContext(config, {worker: 'syncer'}, true);\n initEventSink(lc, config);\n\n assert(args.length > 0, `replicator mode not specified`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n\n const {cvr, upstream, enableCrudMutations} = config;\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, {\n max: must(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set'),\n connection: {['application_name']: `zero-sync-worker-${pid}-cvr`},\n });\n\n const upstreamDB = enableCrudMutations\n ? pgClient(lc, upstream.db, {\n max: must(\n upstream.maxConnsPerWorker,\n 'upstream.maxConnsPerWorker must be set',\n ),\n connection: {['application_name']: `zero-sync-worker-${pid}-upstream`},\n })\n : undefined;\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n upstreamDB ? warmupConnections(lc, upstreamDB, 'upstream') : promiseVoid,\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n const customQueryConfig = getCustomQueryConfig(config);\n const pushConfig =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n };\n\n /** @deprecated used in JWT validation */\n let validateLegacyJWT: ValidateLegacyJWT | undefined = undefined;\n\n const tokenOptions = tokenConfigOptions(config.auth ?? {});\n if (tokenOptions.length === 1) {\n validateLegacyJWT = async (token, {userID}) => {\n if (!userID) {\n throw new ProtocolErrorWithLevel(\n {\n kind: 'Unauthorized',\n message: 'UserID is required for JWT validation.',\n origin: 'zeroCache',\n },\n 'warn',\n );\n }\n\n const decoded = await verifyToken(config.auth, token, {\n subject: userID,\n ...(config.auth?.issuer && {issuer: config.auth.issuer}),\n ...(config.auth?.audience && {\n audience: config.auth.audience,\n }),\n });\n return {\n type: 'jwt',\n raw: token,\n decoded,\n };\n };\n }\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n\n const customQueryTransformer =\n customQueryConfig && new CustomQueryTransformer(logger, shard);\n const contextManager = new ConnectionContextManagerImpl(\n logger,\n config.auth.revalidateIntervalSeconds,\n config.auth.retransformIntervalSeconds,\n customQueryConfig,\n pushConfig,\n validateLegacyJWT,\n );\n\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(logger, replicaFile, shard),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n config,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n contextManager,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = upstreamDB\n ? (id: string) =>\n new MutagenService(\n lc\n .withContext('component', 'mutagen')\n .withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n )\n : undefined;\n\n const pusherFactory =\n pushConfig === undefined\n ? undefined\n : (id: string, contextManager: ConnectionContextManager) =>\n new PusherService(\n config,\n lc.withContext('clientGroupID', id),\n id,\n contextManager,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n validateLegacyJWT,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8CA,SAAS,WAAW;AAClB,QAAO,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG;;AAGzD,SAAS,qBACP,QACA;CACA,MAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,KAAI,CAAC,aAAa,IAChB;AAGF,QAAO;EACL,KAAK,YAAY;EACjB,QAAQ,YAAY;EACpB,sBAAsB,YAAY;EAClC,gBAAgB,YAAY,kBAAkB;EAC/C;;AAGH,SAAwB,UACtB,QACA,KACA,GAAG,MACY;CACf,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;AAElE,eAAc,iBAAiB,QAAQ,EAAC,QAAQ,UAAS,EAAE,MAAM,CAAC;CAClE,MAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,UAAS,EAAE,KAAK;AAC7D,eAAc,IAAI,OAAO;AAEzB,QAAO,KAAK,SAAS,GAAG,gCAAgC;CACxD,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CAExD,MAAM,EAAC,KAAK,UAAU,wBAAuB;CAE7C,MAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,SAAS;AAClE,IAAG,QAAQ,0BAA0B,cAAc;CAEnD,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;EACjC,KAAK,KAAK,IAAI,mBAAmB,oCAAoC;EACrE,YAAY,GAAE,qBAAqB,oBAAoB,IAAI,OAAM;EAClE,CAAC;CAEF,MAAM,aAAa,sBACf,SAAS,IAAI,SAAS,IAAI;EACxB,KAAK,KACH,SAAS,mBACT,yCACD;EACD,YAAY,GAAE,qBAAqB,oBAAoB,IAAI,YAAW;EACvE,CAAC,GACF,KAAA;CAEJ,MAAM,WAAW,QAAQ,WAAW,CAClC,kBAAkB,IAAI,OAAO,MAAM,EACnC,aAAa,kBAAkB,IAAI,YAAY,WAAW,GAAG,YAC9D,CAAC;CAEF,MAAM,SAAS,OAAO,mBAAmB,QAAQ;CACjD,MAAM,kBAAkB,gBAAgB,OACtC,IACA,KAAK,KAAK,QAAQ,eAAe,YAAY,GAAG,CACjD;CACD,MAAM,oBAAoB,gBAAgB,OACxC,IACA,KAAK,KAAK,QAAQ,WAAW,YAAY,GAAG,CAC7C;CAED,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,oBAAoB,qBAAqB,OAAO;CACtD,MAAM,aACJ,OAAO,KAAK,QAAQ,KAAA,KAAa,OAAO,OAAO,QAAQ,KAAA,IACnD,KAAA,IACA;EACE,GAAG,OAAO;EACV,GAAG,OAAO;EACV,KAAK,KACH,OAAO,KAAK,OAAO,OAAO,OAAO,KACjC,mCACD;EACF;;CAGP,IAAI,oBAAmD,KAAA;AAGvD,KADqB,mBAAmB,OAAO,QAAQ,EAAE,CAAC,CACzC,WAAW,EAC1B,qBAAoB,OAAO,OAAO,EAAC,aAAY;AAC7C,MAAI,CAAC,OACH,OAAM,IAAI,uBACR;GACE,MAAM;GACN,SAAS;GACT,QAAQ;GACT,EACD,OACD;AAUH,SAAO;GACL,MAAM;GACN,KAAK;GACL,SAVc,MAAM,YAAY,OAAO,MAAM,OAAO;IACpD,SAAS;IACT,GAAI,OAAO,MAAM,UAAU,EAAC,QAAQ,OAAO,KAAK,QAAO;IACvD,GAAI,OAAO,MAAM,YAAY,EAC3B,UAAU,OAAO,KAAK,UACvB;IACF,CAAC;GAKD;;CAIL,MAAM,qBACJ,IACA,KACA,qBACG;EACH,MAAM,SAAS,GACZ,YAAY,aAAa,cAAc,CACvC,YAAY,iBAAiB,GAAG,CAChC,YAAY,YAAY,UAAU,CAAC;EAEtC,MAAM,yBACJ,qBAAqB,IAAI,uBAAuB,QAAQ,MAAM;EAChE,MAAM,iBAAiB,IAAI,6BACzB,QACA,OAAO,KAAK,2BACZ,OAAO,KAAK,4BACZ,mBACA,YACA,kBACD;AAED,KAAG,QACD,gDAAgD,OAAO,qBACxD;EAED,MAAM,oBAAoB,IAAI,kBAAkB,uBAAuB;EAEvE,MAAM,oCAAoC,KAAK,IAC7C,OAAO,mBAAmB,GAC1B,EACD;EACD,MAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,EAAE;AAEnE,SAAO,IAAI,kBACT,QACA,QACA,OACA,OAAO,QACP,IACA,OACA,IAAI,eACF,QACA,OAAO,KACP,IAAI,YAAY,QAAQ,aAAa,MAAM,EAC3C,OACA,gBAAgB,yBAAyB,GAAG,EAC5C,IACA,yBAEE,qBAAqB,GACjB,oCACA,wBACN,OAAO,oBACP,OACD,EACD,KACA,kBACA,OAAO,IAAI,sBACX,mBACA,gBACA,wBACA,cACD;;CA4BH,MAAM,SAAS,IAAI,OACjB,IACA,QACA,mBA5BqB,cAClB,OACC,IAAI,eACF,GACG,YAAY,aAAa,UAAU,CACnC,YAAY,iBAAiB,GAAG,EACnC,OACA,IACA,YACA,QACA,kBACD,GACH,KAAA,GAGF,eAAe,KAAA,IACX,KAAA,KACC,IAAY,mBACX,IAAI,cACF,QACA,GAAG,YAAY,iBAAiB,GAAG,EACnC,IACA,eACD,EAQP,QACA,kBACD;AAED,yBAAwB,IAAI,OAAO;AAE9B,UAAS,WAAW,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC,CAAC;AAE/D,QAAO,eAAe,IAAI,QAAQ,OAAO;;AAI3C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {ValidateLegacyJWT} from '../auth/auth.ts';\nimport {tokenConfigOptions, verifyToken} from '../auth/jwt.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport {\n type ConnectionContextManager,\n ConnectionContextManagerImpl,\n} from '../services/view-syncer/connection-context-manager.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {ProtocolErrorWithLevel} from '../types/error-with-level.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {isPriorityOpRunning, runPriorityOp} from './priority-op.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n apiKey: queryConfig.apiKey,\n allowedClientHeaders: queryConfig.allowedClientHeaders,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length >= 2, `expected [fileMode, workerIndex, ...flags]`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n const workerIndex = Number(args[1]);\n const config = getNormalizedZeroConfig({env, argv: args.slice(2)});\n\n startOtelAuto(\n createLogContext(config, 'syncer', workerIndex, false),\n 'syncer',\n workerIndex,\n );\n const lc = createLogContext(config, 'syncer', workerIndex);\n initEventSink(lc, config);\n\n const {cvr, upstream, enableCrudMutations} = config;\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, {\n max: must(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set'),\n connection: {['application_name']: `zero-sync-worker-${pid}-cvr`},\n });\n\n const upstreamDB = enableCrudMutations\n ? pgClient(lc, upstream.db, {\n max: must(\n upstream.maxConnsPerWorker,\n 'upstream.maxConnsPerWorker must be set',\n ),\n connection: {['application_name']: `zero-sync-worker-${pid}-upstream`},\n })\n : undefined;\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n upstreamDB ? warmupConnections(lc, upstreamDB, 'upstream') : promiseVoid,\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n const customQueryConfig = getCustomQueryConfig(config);\n const pushConfig =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n };\n\n /** @deprecated used in JWT validation */\n let validateLegacyJWT: ValidateLegacyJWT | undefined = undefined;\n\n const tokenOptions = tokenConfigOptions(config.auth ?? {});\n if (tokenOptions.length === 1) {\n validateLegacyJWT = async (token, {userID}) => {\n if (!userID) {\n throw new ProtocolErrorWithLevel(\n {\n kind: 'Unauthorized',\n message: 'UserID is required for JWT validation.',\n origin: 'zeroCache',\n },\n 'warn',\n );\n }\n\n const decoded = await verifyToken(config.auth, token, {\n subject: userID,\n ...(config.auth?.issuer && {issuer: config.auth.issuer}),\n ...(config.auth?.audience && {\n audience: config.auth.audience,\n }),\n });\n return {\n type: 'jwt',\n raw: token,\n decoded,\n };\n };\n }\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n\n const customQueryTransformer =\n customQueryConfig && new CustomQueryTransformer(logger, shard);\n const contextManager = new ConnectionContextManagerImpl(\n logger,\n config.auth.revalidateIntervalSeconds,\n config.auth.retransformIntervalSeconds,\n customQueryConfig,\n pushConfig,\n validateLegacyJWT,\n );\n\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(logger, replicaFile, shard),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n config,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n contextManager,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = upstreamDB\n ? (id: string) =>\n new MutagenService(\n lc\n .withContext('component', 'mutagen')\n .withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n )\n : undefined;\n\n const pusherFactory =\n pushConfig === undefined\n ? undefined\n : (id: string, contextManager: ConnectionContextManager) =>\n new PusherService(\n config,\n lc.withContext('clientGroupID', id),\n id,\n contextManager,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n validateLegacyJWT,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8CA,SAAS,WAAW;AAClB,QAAO,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG;;AAGzD,SAAS,qBACP,QACA;CACA,MAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,KAAI,CAAC,aAAa,IAChB;AAGF,QAAO;EACL,KAAK,YAAY;EACjB,QAAQ,YAAY;EACpB,sBAAsB,YAAY;EAClC,gBAAgB,YAAY,kBAAkB;EAC/C;;AAGH,SAAwB,UACtB,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,UAAU,GAAG,6CAA6C;CACtE,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CACxD,MAAM,cAAc,OAAO,KAAK,GAAG;CACnC,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;AAElE,eACE,iBAAiB,QAAQ,UAAU,aAAa,MAAM,EACtD,UACA,YACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,UAAU,YAAY;AAC1D,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,KAAK,UAAU,wBAAuB;CAE7C,MAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,SAAS;AAClE,IAAG,QAAQ,0BAA0B,cAAc;CAEnD,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;EACjC,KAAK,KAAK,IAAI,mBAAmB,oCAAoC;EACrE,YAAY,GAAE,qBAAqB,oBAAoB,IAAI,OAAM;EAClE,CAAC;CAEF,MAAM,aAAa,sBACf,SAAS,IAAI,SAAS,IAAI;EACxB,KAAK,KACH,SAAS,mBACT,yCACD;EACD,YAAY,GAAE,qBAAqB,oBAAoB,IAAI,YAAW;EACvE,CAAC,GACF,KAAA;CAEJ,MAAM,WAAW,QAAQ,WAAW,CAClC,kBAAkB,IAAI,OAAO,MAAM,EACnC,aAAa,kBAAkB,IAAI,YAAY,WAAW,GAAG,YAC9D,CAAC;CAEF,MAAM,SAAS,OAAO,mBAAmB,QAAQ;CACjD,MAAM,kBAAkB,gBAAgB,OACtC,IACA,KAAK,KAAK,QAAQ,eAAe,YAAY,GAAG,CACjD;CACD,MAAM,oBAAoB,gBAAgB,OACxC,IACA,KAAK,KAAK,QAAQ,WAAW,YAAY,GAAG,CAC7C;CAED,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,oBAAoB,qBAAqB,OAAO;CACtD,MAAM,aACJ,OAAO,KAAK,QAAQ,KAAA,KAAa,OAAO,OAAO,QAAQ,KAAA,IACnD,KAAA,IACA;EACE,GAAG,OAAO;EACV,GAAG,OAAO;EACV,KAAK,KACH,OAAO,KAAK,OAAO,OAAO,OAAO,KACjC,mCACD;EACF;;CAGP,IAAI,oBAAmD,KAAA;AAGvD,KADqB,mBAAmB,OAAO,QAAQ,EAAE,CAAC,CACzC,WAAW,EAC1B,qBAAoB,OAAO,OAAO,EAAC,aAAY;AAC7C,MAAI,CAAC,OACH,OAAM,IAAI,uBACR;GACE,MAAM;GACN,SAAS;GACT,QAAQ;GACT,EACD,OACD;AAUH,SAAO;GACL,MAAM;GACN,KAAK;GACL,SAVc,MAAM,YAAY,OAAO,MAAM,OAAO;IACpD,SAAS;IACT,GAAI,OAAO,MAAM,UAAU,EAAC,QAAQ,OAAO,KAAK,QAAO;IACvD,GAAI,OAAO,MAAM,YAAY,EAC3B,UAAU,OAAO,KAAK,UACvB;IACF,CAAC;GAKD;;CAIL,MAAM,qBACJ,IACA,KACA,qBACG;EACH,MAAM,SAAS,GACZ,YAAY,aAAa,cAAc,CACvC,YAAY,iBAAiB,GAAG,CAChC,YAAY,YAAY,UAAU,CAAC;EAEtC,MAAM,yBACJ,qBAAqB,IAAI,uBAAuB,QAAQ,MAAM;EAChE,MAAM,iBAAiB,IAAI,6BACzB,QACA,OAAO,KAAK,2BACZ,OAAO,KAAK,4BACZ,mBACA,YACA,kBACD;AAED,KAAG,QACD,gDAAgD,OAAO,qBACxD;EAED,MAAM,oBAAoB,IAAI,kBAAkB,uBAAuB;EAEvE,MAAM,oCAAoC,KAAK,IAC7C,OAAO,mBAAmB,GAC1B,EACD;EACD,MAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,EAAE;AAEnE,SAAO,IAAI,kBACT,QACA,QACA,OACA,OAAO,QACP,IACA,OACA,IAAI,eACF,QACA,OAAO,KACP,IAAI,YAAY,QAAQ,aAAa,MAAM,EAC3C,OACA,gBAAgB,yBAAyB,GAAG,EAC5C,IACA,yBAEE,qBAAqB,GACjB,oCACA,wBACN,OAAO,oBACP,OACD,EACD,KACA,kBACA,OAAO,IAAI,sBACX,mBACA,gBACA,wBACA,cACD;;CA4BH,MAAM,SAAS,IAAI,OACjB,IACA,QACA,mBA5BqB,cAClB,OACC,IAAI,eACF,GACG,YAAY,aAAa,UAAU,CACnC,YAAY,iBAAiB,GAAG,EACnC,OACA,IACA,YACA,QACA,kBACD,GACH,KAAA,GAGF,eAAe,KAAA,IACX,KAAA,KACC,IAAY,mBACX,IAAI,cACF,QACA,GAAG,YAAY,iBAAiB,GAAG,EACnC,IACA,eACD,EAQP,QACA,kBACD;AAED,yBAAwB,IAAI,OAAO;AAE9B,UAAS,WAAW,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC,CAAC;AAE/D,QAAO,eAAe,IAAI,QAAQ,OAAO;;AAI3C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
@@ -1,8 +1,8 @@
1
1
  import { assert } from "../../../shared/src/asserts.js";
2
2
  import { h32 } from "../../../shared/src/hash.js";
3
+ import { getOrCreateGauge } from "../observability/metrics.js";
3
4
  import { RunningState } from "../services/running-state.js";
4
5
  import { installWebSocketHandoff } from "../types/websocket-handoff.js";
5
- import { getOrCreateGauge } from "../observability/metrics.js";
6
6
  import { getConnectParams } from "../workers/connect-params.js";
7
7
  import UrlPattern from "url-pattern";
8
8
  //#region ../zero-cache/src/server/worker-dispatcher.ts
@@ -1,8 +1,8 @@
1
1
  import { MemoryStorage } from "../../../zql/src/ivm/memory-storage.js";
2
2
  import { AccumulatorDebugger, serializePlanDebugEvents } from "../../../zql/src/planner/planner-debug.js";
3
+ import { Debug } from "../../../zql/src/builder/debug-delegate.js";
3
4
  import { Database } from "../../../zqlite/src/db.js";
4
5
  import { TableSource } from "../../../zqlite/src/table-source.js";
5
- import { Debug } from "../../../zql/src/builder/debug-delegate.js";
6
6
  import { computeZqlSpecs, mustGetTableSpec } from "../db/lite-tables.js";
7
7
  import { createSQLiteCostModel } from "../../../zqlite/src/sqlite-cost-model.js";
8
8
  import { runAst } from "./run-ast.js";
@@ -1,9 +1,9 @@
1
1
  import { assert } from "../../../../../shared/src/asserts.js";
2
2
  import { must } from "../../../../../shared/src/must.js";
3
3
  import { stringify } from "../../../../../shared/src/bigint-json.js";
4
+ import "../../../types/lite.js";
4
5
  import { randInt } from "../../../../../shared/src/rand.js";
5
6
  import { stateVersionFromString, stateVersionToString } from "../../../types/state-version.js";
6
- import "../../../types/lite.js";
7
7
  import { CustomKeyMap } from "../../../../../shared/src/custom-key-map.js";
8
8
  import { resolver } from "@rocicorp/resolver";
9
9
  //#region ../zero-cache/src/services/change-source/common/backfill-manager.ts
@@ -1,6 +1,6 @@
1
1
  import { populateFromExistingTables } from "../../replicator/schema/column-metadata.js";
2
- import { CREATE_RUNTIME_EVENTS_TABLE, recordEvent } from "../../replicator/schema/replication-state.js";
3
2
  import { listTables } from "../../../db/lite-tables.js";
3
+ import { CREATE_RUNTIME_EVENTS_TABLE, recordEvent } from "../../replicator/schema/replication-state.js";
4
4
  import { runSchemaMigrations } from "../../../db/migration-lite.js";
5
5
  import { AutoResetSignal } from "../../change-streamer/schema/tables.js";
6
6
  import { SqliteError } from "@rocicorp/zero-sqlite3";
@@ -4,9 +4,9 @@ import { stringify } from "../../../../../shared/src/bigint-json.js";
4
4
  import { changeStreamMessageSchema } from "../protocol/current/downstream.js";
5
5
  import "../protocol/current/upstream.js";
6
6
  import { Database } from "../../../../../zqlite/src/db.js";
7
- import { StatementRunner } from "../../../db/statements.js";
8
- import { createReplicationStateTables, getSubscriptionState, initReplicationState } from "../../replicator/schema/replication-state.js";
9
7
  import { computeZqlSpecs } from "../../../db/lite-tables.js";
8
+ import { createReplicationStateTables, getSubscriptionState, initReplicationState } from "../../replicator/schema/replication-state.js";
9
+ import { StatementRunner } from "../../../db/statements.js";
10
10
  import { AutoResetSignal } from "../../change-streamer/schema/tables.js";
11
11
  import { initReplica } from "../common/replica-schema.js";
12
12
  import { stream } from "../../../types/streams.js";
@@ -127,7 +127,10 @@ async function createSnapshotTransaction(lc, upstreamURI, db, slotNamePrefix) {
127
127
  try {
128
128
  const { snapshot_name: snapshot, consistent_point: lsn } = await createReplicationSlot(lc, replicationSession, tempSlot);
129
129
  const { init, imported } = importSnapshot(snapshot);
130
- const tx = new TransactionPool(lc, READONLY, init).run(db);
130
+ const tx = new TransactionPool(lc, {
131
+ mode: READONLY,
132
+ init
133
+ }).run(db);
131
134
  await imported;
132
135
  await replicationSession.unsafe(`DROP_REPLICATION_SLOT "${tempSlot}"`);
133
136
  const watermark = toStateVersionString(lsn);
@@ -1 +1 @@
1
- {"version":3,"file":"backfill-stream.js","names":[],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n let tx: TransactionPool | undefined;\n let watermark: string;\n try {\n ({tx, watermark} = await createSnapshotTransaction(\n lc,\n upstreamURI,\n db,\n slot,\n ));\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeDownloadStatements(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx?.setDone();\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {select, getTotalRows, getTotalBytes}: DownloadStatements,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {\n status,\n });\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function createSnapshotTransaction(\n lc: LogContext,\n upstreamURI: string,\n db: PostgresDB,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n const {init, imported} = importSnapshot(snapshot);\n const tx = new TransactionPool(lc, READONLY, init).run(db);\n await imported;\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return {tx, watermark};\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AAgDA,IAAM,2BAA2B,KAAK;;;;;;;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,EAAE,EAC6B;AACrD,MAAK,GACF,YAAY,aAAa,WAAW,CACpC,YAAY,SAAS,GAAG,MAAM,KAAK;CAEtC,MAAM,EAAC,sBAAsB,6BAA4B;CACzD,MAAM,KAAK,SAAS,IAAI,aAAa;EACnC,YAAY,GAAE,qBAAqB,mBAAkB;GACpD,iBAAiB;EACnB,CAAC;CACF,IAAI;CACJ,IAAI;AACJ,KAAI;AACF,GAAC,CAAC,IAAI,aAAa,MAAM,0BACvB,IACA,aACA,IACA,KACD;EACD,MAAM,EAAC,WAAW,aAAY,MAAM,eAClC,IACA,cACA,IACA,UACD;EACD,MAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK,CAAC;EAGlE,MAAM,EAAC,UAAU,YAAW;EAC5B,MAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,QAAQ;AAErD,SAAO,OACL,IACA,IACA,UACA,uBAAuB,WAAW,KAAK,EACvC,KAAK,KAAI,QAAO,MAAM,cAAc,UAAU,QAAQ,KAAK,QAAQ,CAAC,EACpE,oBACD;UACM,GAAG;AASV,MACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,qBAE7C,OAAM,IAAI,2BAA2B,IAAI,OAAO,EAAE,EAAE,EAAC,OAAO,GAAE,CAAC;AAEjE,QAAM;WACE;AACR,MAAI,SAAS;AAGR,KAAG,KAAK,CAAC,OAAM,MAAK,GAAG,OAAO,qCAAqC,EAAE,CAAC;;;AAI/E,gBAAgB,OACd,IACA,IACA,UACA,EAAC,QAAQ,cAAc,iBACvB,YACA,qBACqD;CACrD,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,CAAC,MAAM,SAAS,MAAM,GAAG,iBAAgB,QAC7C,QAAQ,IAAI,CACV,IAAI,OAA8B,aAAa,EAC/C,IAAI,OAA+B,cAAc,CAClD,CAAC,CACH;CACD,MAAM,SAAyB;EAC7B,MAAM;EACN,WAAW,OAAO,KAAK,GAAG,UAAU;EACpC,YAAY,OAAO,MAAM,GAAG,WAAW;EACxC;CAED,IAAI,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACpD,IAAG,OAAO,sCAAsC,OAAO,IAAI,QAAQ,OAAO,EACxE,QACD,CAAC;CACF,MAAM,aAAa,MAAM,GAAG,iBAAgB,QAC1C,IAAI,OAAO,SAAS,OAAO,aAAa,CAAC,UAAU,CACpD;CAED,MAAM,YAAY,IAAI,WAAW;CACjC,IAAI,aAAa;CACjB,IAAI,YAAY;CAChB,IAAI,YAA2B,EAAE;CACjC,IAAI,gBAAgB;CAEpB,MAAM,mBAAmB;AACvB,KAAG,QACD,WAAW,UAAU,OAAO,SAAS,cAAc,sBACjC,OAAO,KAAK,SAAS,UAAU,UAAU,WAAW,GACvE;;CAIH,IAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO,CAAC;CAC9D,IAAI,MAAM;AAEV,YAAW,MAAM,QAAQ,YAAY;EACnC,MAAM,QAAQ;AACd,OAAK,MAAM,QAAQ,UAAU,MAAM,MAAM,EAAE;AACzC,OAAI,OAAO,SAAS,OAAO,OAAQ,WAAW,KAAK,KAAK;AAExD,OAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,cAAU,KAAK,IAAI;AACnB,WAAO;AACP,UAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO,CAAC;AAC7C,UAAM;;;AAGV,mBAAiB,MAAM;AACvB,gBAAc,MAAM;AAEpB,MAAI,iBAAiB,qBAAqB;AACxC,SAAM;IAAC,KAAK;IAAY,GAAG;IAAU;IAAW;IAAO;AACvD;AACA,eAAY;AACZ,eAAY,EAAE;AACd,mBAAgB;;;AAKpB,KAAI,UAAU,SAAS,GAAG;AACxB,QAAM;GAAC,KAAK;GAAY,GAAG;GAAU;GAAW;GAAO;AACvD;AACA,cAAY;;AAGd,OAAM;EAAC,KAAK;EAAsB,GAAG;EAAU;EAAO;AACtD,YAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AAChD,IAAG,OACD,sBAAsB,OAAO,KAAK,SAAS,UAAU,SAAS,WAAW,UACnE,QAAQ,MACf;;;;;;;;;;;AAYH,eAAe,0BACb,IACA,aACA,IACA,gBACA;CACA,MAAM,qBAAqB,SAAS,IAAI,aAAa;GAClD,gBAAgB;EACjB,YAAY,EAAC,aAAa,YAAW;EACtC,CAAC;CACF,MAAM,WAAW,GAAG,eAAe,MAAM,KAAK,KAAK;AACnD,KAAI;EACF,MAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,SAAS;EAE/D,MAAM,EAAC,MAAM,aAAY,eAAe,SAAS;EACjD,MAAM,KAAK,IAAI,gBAAgB,IAAI,UAAU,KAAK,CAAC,IAAI,GAAG;AAC1D,QAAM;AACN,QAAM,mBAAmB,OAAO,0BAA0B,SAAS,GAAG;EAEtE,MAAM,YAAY,qBAAqB,IAAI;AAC3C,KAAG,OAAO,sCAAsC,IAAI,IAAI,UAAU,GAAG;AACrE,SAAO;GAAC;GAAI;GAAU;UACf,GAAG;AAEV,QAAM,mBAAmB,OAEvB;8BACwB,SAAS,GAClC;AACD,KAAG,QAAQ,sCAAsC,EAAE;AACnD,QAAM;WACE;AACR,QAAM,mBAAmB,KAAK;;;AAIlC,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,QAAO,GAAG,gBAAgB,OAAM,QAAO;EACrC,MAAM,EAAC,WAAU,MAAM,mBAAmB,KAAK,aAAa;EAC5D,MAAM,OAAO,OAAO,MAClB,SAAQ,KAAK,WAAW,GAAG,MAAM,UAAU,KAAK,SAAS,GAAG,MAAM,KACnE;AACD,MAAI,CAAC,KACH,OAAM,IAAI,2BACR,IACA,oCACD;EAEH,MAAM,YAAY,MAAQ,GAAG,MAAM,UAAU,oBAAoB;AACjE,MAAI,KAAK,cAAc,UAAU,UAC/B,OAAM,IAAI,2BACR,IACA,sDACD;AAEH,MAAI,KAAK,QAAQ,UAAU,YACzB,OAAM,IAAI,2BACR,IACA,oDACD;AAEH,MACE,CAAC,OACC,IAAI,IAAI,OAAO,KAAK,UAAU,OAAO,CAAC,EACtC,IAAI,IAAI,KAAK,uBAAuB,CACrC,CAED,OAAM,IAAI,2BACR,IACA,kDACD;EAEH,MAAM,UAAU,CACd,GAAG,OAAO,QAAQ,UAAU,OAAO,EACnC,GAAG,OAAO,QAAQ,GAAG,QAAQ,CAC9B;AACD,OAAK,MAAM,CAAC,KAAK,QAAQ,SAAS;GAChC,MAAM,UAAU,KAAK,QAAQ;AAC7B,OAAI,CAAC,QACH,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,8BACf;AAGH,OADgB,MAAQ,KAAK,qBAAqB,CACtC,WAAW,QAAQ,IAC7B,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,+CACf;;AAcL,SAAO;GAAC,WAAW;GAAM,UAXQ;IAC/B,UAAU;KACR,QAAQ,GAAG,MAAM;KACjB,MAAM,GAAG,MAAM;KACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,OAAO,EAAC;KACjD;IACD,SAAS,OAAO,KAAK,GAAG,QAAQ,CAAC,QAC/B,QAAO,EAAE,OAAO,UAAU,QAC3B;IACD;IACD;GACiC;GAClC"}
1
+ {"version":3,"file":"backfill-stream.js","names":[],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n let tx: TransactionPool | undefined;\n let watermark: string;\n try {\n ({tx, watermark} = await createSnapshotTransaction(\n lc,\n upstreamURI,\n db,\n slot,\n ));\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeDownloadStatements(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx?.setDone();\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {select, getTotalRows, getTotalBytes}: DownloadStatements,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {\n status,\n });\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function createSnapshotTransaction(\n lc: LogContext,\n upstreamURI: string,\n db: PostgresDB,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n const {init, imported} = importSnapshot(snapshot);\n const tx = new TransactionPool(lc, {mode: READONLY, init}).run(db);\n await imported;\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return {tx, watermark};\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AAgDA,IAAM,2BAA2B,KAAK;;;;;;;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,EAAE,EAC6B;AACrD,MAAK,GACF,YAAY,aAAa,WAAW,CACpC,YAAY,SAAS,GAAG,MAAM,KAAK;CAEtC,MAAM,EAAC,sBAAsB,6BAA4B;CACzD,MAAM,KAAK,SAAS,IAAI,aAAa;EACnC,YAAY,GAAE,qBAAqB,mBAAkB;GACpD,iBAAiB;EACnB,CAAC;CACF,IAAI;CACJ,IAAI;AACJ,KAAI;AACF,GAAC,CAAC,IAAI,aAAa,MAAM,0BACvB,IACA,aACA,IACA,KACD;EACD,MAAM,EAAC,WAAW,aAAY,MAAM,eAClC,IACA,cACA,IACA,UACD;EACD,MAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK,CAAC;EAGlE,MAAM,EAAC,UAAU,YAAW;EAC5B,MAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,QAAQ;AAErD,SAAO,OACL,IACA,IACA,UACA,uBAAuB,WAAW,KAAK,EACvC,KAAK,KAAI,QAAO,MAAM,cAAc,UAAU,QAAQ,KAAK,QAAQ,CAAC,EACpE,oBACD;UACM,GAAG;AASV,MACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,qBAE7C,OAAM,IAAI,2BAA2B,IAAI,OAAO,EAAE,EAAE,EAAC,OAAO,GAAE,CAAC;AAEjE,QAAM;WACE;AACR,MAAI,SAAS;AAGR,KAAG,KAAK,CAAC,OAAM,MAAK,GAAG,OAAO,qCAAqC,EAAE,CAAC;;;AAI/E,gBAAgB,OACd,IACA,IACA,UACA,EAAC,QAAQ,cAAc,iBACvB,YACA,qBACqD;CACrD,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,CAAC,MAAM,SAAS,MAAM,GAAG,iBAAgB,QAC7C,QAAQ,IAAI,CACV,IAAI,OAA8B,aAAa,EAC/C,IAAI,OAA+B,cAAc,CAClD,CAAC,CACH;CACD,MAAM,SAAyB;EAC7B,MAAM;EACN,WAAW,OAAO,KAAK,GAAG,UAAU;EACpC,YAAY,OAAO,MAAM,GAAG,WAAW;EACxC;CAED,IAAI,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACpD,IAAG,OAAO,sCAAsC,OAAO,IAAI,QAAQ,OAAO,EACxE,QACD,CAAC;CACF,MAAM,aAAa,MAAM,GAAG,iBAAgB,QAC1C,IAAI,OAAO,SAAS,OAAO,aAAa,CAAC,UAAU,CACpD;CAED,MAAM,YAAY,IAAI,WAAW;CACjC,IAAI,aAAa;CACjB,IAAI,YAAY;CAChB,IAAI,YAA2B,EAAE;CACjC,IAAI,gBAAgB;CAEpB,MAAM,mBAAmB;AACvB,KAAG,QACD,WAAW,UAAU,OAAO,SAAS,cAAc,sBACjC,OAAO,KAAK,SAAS,UAAU,UAAU,WAAW,GACvE;;CAIH,IAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO,CAAC;CAC9D,IAAI,MAAM;AAEV,YAAW,MAAM,QAAQ,YAAY;EACnC,MAAM,QAAQ;AACd,OAAK,MAAM,QAAQ,UAAU,MAAM,MAAM,EAAE;AACzC,OAAI,OAAO,SAAS,OAAO,OAAQ,WAAW,KAAK,KAAK;AAExD,OAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,cAAU,KAAK,IAAI;AACnB,WAAO;AACP,UAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO,CAAC;AAC7C,UAAM;;;AAGV,mBAAiB,MAAM;AACvB,gBAAc,MAAM;AAEpB,MAAI,iBAAiB,qBAAqB;AACxC,SAAM;IAAC,KAAK;IAAY,GAAG;IAAU;IAAW;IAAO;AACvD;AACA,eAAY;AACZ,eAAY,EAAE;AACd,mBAAgB;;;AAKpB,KAAI,UAAU,SAAS,GAAG;AACxB,QAAM;GAAC,KAAK;GAAY,GAAG;GAAU;GAAW;GAAO;AACvD;AACA,cAAY;;AAGd,OAAM;EAAC,KAAK;EAAsB,GAAG;EAAU;EAAO;AACtD,YAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AAChD,IAAG,OACD,sBAAsB,OAAO,KAAK,SAAS,UAAU,SAAS,WAAW,UACnE,QAAQ,MACf;;;;;;;;;;;AAYH,eAAe,0BACb,IACA,aACA,IACA,gBACA;CACA,MAAM,qBAAqB,SAAS,IAAI,aAAa;GAClD,gBAAgB;EACjB,YAAY,EAAC,aAAa,YAAW;EACtC,CAAC;CACF,MAAM,WAAW,GAAG,eAAe,MAAM,KAAK,KAAK;AACnD,KAAI;EACF,MAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,SAAS;EAE/D,MAAM,EAAC,MAAM,aAAY,eAAe,SAAS;EACjD,MAAM,KAAK,IAAI,gBAAgB,IAAI;GAAC,MAAM;GAAU;GAAK,CAAC,CAAC,IAAI,GAAG;AAClE,QAAM;AACN,QAAM,mBAAmB,OAAO,0BAA0B,SAAS,GAAG;EAEtE,MAAM,YAAY,qBAAqB,IAAI;AAC3C,KAAG,OAAO,sCAAsC,IAAI,IAAI,UAAU,GAAG;AACrE,SAAO;GAAC;GAAI;GAAU;UACf,GAAG;AAEV,QAAM,mBAAmB,OAEvB;8BACwB,SAAS,GAClC;AACD,KAAG,QAAQ,sCAAsC,EAAE;AACnD,QAAM;WACE;AACR,QAAM,mBAAmB,KAAK;;;AAIlC,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,QAAO,GAAG,gBAAgB,OAAM,QAAO;EACrC,MAAM,EAAC,WAAU,MAAM,mBAAmB,KAAK,aAAa;EAC5D,MAAM,OAAO,OAAO,MAClB,SAAQ,KAAK,WAAW,GAAG,MAAM,UAAU,KAAK,SAAS,GAAG,MAAM,KACnE;AACD,MAAI,CAAC,KACH,OAAM,IAAI,2BACR,IACA,oCACD;EAEH,MAAM,YAAY,MAAQ,GAAG,MAAM,UAAU,oBAAoB;AACjE,MAAI,KAAK,cAAc,UAAU,UAC/B,OAAM,IAAI,2BACR,IACA,sDACD;AAEH,MAAI,KAAK,QAAQ,UAAU,YACzB,OAAM,IAAI,2BACR,IACA,oDACD;AAEH,MACE,CAAC,OACC,IAAI,IAAI,OAAO,KAAK,UAAU,OAAO,CAAC,EACtC,IAAI,IAAI,KAAK,uBAAuB,CACrC,CAED,OAAM,IAAI,2BACR,IACA,kDACD;EAEH,MAAM,UAAU,CACd,GAAG,OAAO,QAAQ,UAAU,OAAO,EACnC,GAAG,OAAO,QAAQ,GAAG,QAAQ,CAC9B;AACD,OAAK,MAAM,CAAC,KAAK,QAAQ,SAAS;GAChC,MAAM,UAAU,KAAK,QAAQ;AAC7B,OAAI,CAAC,QACH,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,8BACf;AAGH,OADgB,MAAQ,KAAK,qBAAqB,CACtC,WAAW,QAAQ,IAC7B,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,+CACf;;AAcL,SAAO;GAAC,WAAW;GAAM,UAXQ;IAC/B,UAAU;KACR,QAAQ,GAAG,MAAM;KACjB,MAAM,GAAG,MAAM;KACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,OAAO,EAAC;KACjD;IACD,SAAS,OAAO,KAAK,GAAG,QAAQ,CAAC,QAC/B,QAAO,EAAE,OAAO,UAAU,QAC3B;IACD;IACD;GACiC;GAClC"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAGhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AA2BjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,SAAI,GACtB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAsC7E;AAqcD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAED,QAAA,MAAM,eAAe;;;;aAInB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AAmwBxD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
1
+ {"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAGhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,SAAI,GACtB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAsC7E;AAqcD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAED,QAAA,MAAM,eAAe;;;;aAInB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AA2wBxD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
@@ -7,14 +7,14 @@ import { must } from "../../../../../shared/src/must.js";
7
7
  import { mapValues } from "../../../../../shared/src/objects.js";
8
8
  import { equals, intersection, symmetricDifferences } from "../../../../../shared/src/set-utils.js";
9
9
  import { stringify } from "../../../../../shared/src/bigint-json.js";
10
- import { Database } from "../../../../../zqlite/src/db.js";
11
10
  import { upstreamSchema } from "../../../types/shards.js";
11
+ import { Database } from "../../../../../zqlite/src/db.js";
12
+ import { UnsupportedColumnDefaultError, mapPostgresToLiteColumn } from "../../../db/pg-to-lite.js";
13
+ import { getSubscriptionStateAndContext } from "../../replicator/schema/replication-state.js";
12
14
  import { StatementRunner } from "../../../db/statements.js";
13
15
  import { isPostgresError, pgClient } from "../../../types/pg.js";
14
16
  import { majorVersionFromString, majorVersionToString } from "../../../types/state-version.js";
15
17
  import { fromBigInt, toBigInt, toStateVersionString } from "./lsn.js";
16
- import { UnsupportedColumnDefaultError, mapPostgresToLiteColumn } from "../../../db/pg-to-lite.js";
17
- import { getSubscriptionStateAndContext } from "../../replicator/schema/replication-state.js";
18
18
  import { runTx } from "../../../db/run-transaction.js";
19
19
  import { getPublicationInfo } from "./schema/published.js";
20
20
  import { replicationEventSchema } from "./schema/ddl.js";
@@ -29,8 +29,8 @@ import { updateShardSchema } from "./schema/init.js";
29
29
  import { initialSync } from "./initial-sync.js";
30
30
  import { streamBackfill } from "./backfill-stream.js";
31
31
  import { subscribe } from "./logical-replication/stream.js";
32
- import postgres from "postgres";
33
32
  import { nanoid } from "nanoid";
33
+ import postgres from "postgres";
34
34
  import { PG_ADMIN_SHUTDOWN, PG_INSUFFICIENT_PRIVILEGE, PG_OBJECT_IN_USE } from "@drdgvhbh/postgres-error-codes";
35
35
  //#region ../zero-cache/src/services/change-source/pg/change-source.ts
36
36
  /**
@@ -561,7 +561,7 @@ var ChangeMaker = class {
561
561
  return [];
562
562
  }
563
563
  case "commit":
564
- this.#lastSnapshotInTx = void 0;
564
+ this.#lastReplicationEventInTx = void 0;
565
565
  return [[
566
566
  "commit",
567
567
  msg,
@@ -573,34 +573,30 @@ var ChangeMaker = class {
573
573
  default: throw new Error(`Unexpected message type ${stringify(msg)}`);
574
574
  }
575
575
  }
576
- #preSchema;
577
- #lastSnapshotInTx;
576
+ #lastReplicationEventInTx;
578
577
  #handleDdlMessage(msg) {
579
578
  const event = parseLogicalMessageContent(msg, replicationEventSchema);
580
579
  clearTimeout(this.#replicaIdentityTimer);
581
- let previousSchema;
580
+ let prevEvent = this.#lastReplicationEventInTx;
582
581
  const { type } = event;
583
582
  switch (type) {
584
583
  case "ddlStart":
585
- this.#preSchema = event.schema;
586
- return [];
584
+ case "schemaSnapshot": break;
587
585
  case "ddlUpdate":
588
- previousSchema = must(this.#preSchema, `ddlUpdate received without a ddlStart`);
589
- break;
590
- case "schemaSnapshot":
591
- previousSchema = this.#lastSnapshotInTx ?? null;
586
+ assert(prevEvent, `ddlUpdate received without a ddlStart`);
592
587
  break;
593
588
  default:
594
589
  this.#lc.info?.(`ignoring unknown ddl message type: ${type}`);
595
590
  return [];
596
591
  }
597
- this.#lastSnapshotInTx = event.schema;
598
- if (!previousSchema) {
599
- this.#lc.info?.(`received ${msg.prefix}/${type} event`);
592
+ this.#lastReplicationEventInTx = event;
593
+ if (!prevEvent) {
594
+ this.#lc.info?.(`received ${msg.prefix}/${type} event`, event);
600
595
  return [];
601
596
  }
602
597
  this.#lc.info?.(`processing ${msg.prefix}/${type} event`, event);
603
- const changes = this.#makeSchemaChanges(previousSchema, event).map((change) => ["data", change]);
598
+ const effectiveTag = prevEvent.type === "ddlStart" ? prevEvent.event.tag : event.event.tag;
599
+ const changes = this.#makeSchemaChanges(prevEvent.schema, event, effectiveTag).map((change) => ["data", change]);
604
600
  this.#lc.withContext("tag", event.event.tag).withContext("query", event.context.query).info?.(`${changes.length} schema change(s)`, { changes });
605
601
  const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(event.schema);
606
602
  if (replicaIdentities) this.#replicaIdentityTimer = setTimeout(async () => {
@@ -639,10 +635,10 @@ var ChangeMaker = class {
639
635
  * columns. This, for example, would be needed to properly support changing
640
636
  * the type of a column that's indexed.
641
637
  */
642
- #makeSchemaChanges(preSchema, update) {
638
+ #makeSchemaChanges(preSchema, event, tag) {
643
639
  try {
644
640
  const [prevTbl, prevIdx] = specsByID(preSchema);
645
- const [nextTbl, nextIdx] = specsByID(update.schema);
641
+ const [nextTbl, nextIdx] = specsByID(event.schema);
646
642
  const changes = [];
647
643
  for (const table of nextTbl.values()) validate(this.#lc, table);
648
644
  const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);
@@ -673,7 +669,7 @@ var ChangeMaker = class {
673
669
  });
674
670
  }
675
671
  const tables = intersection(prevTbl, nextTbl);
676
- for (const id of tables) changes.push(...this.#getTableChanges(must(prevTbl.get(id)), must(nextTbl.get(id)), update.event.tag));
672
+ for (const id of tables) changes.push(...this.#getTableChanges(must(prevTbl.get(id)), must(nextTbl.get(id)), tag));
677
673
  for (const id of createdTbl) {
678
674
  const spec = must(nextTbl.get(id));
679
675
  const createTable = {
@@ -681,7 +677,7 @@ var ChangeMaker = class {
681
677
  spec,
682
678
  metadata: getMetadata(spec)
683
679
  };
684
- if (!update.event.tag.startsWith("CREATE")) createTable.backfill = mapValues(spec.columns, ({ pos: attNum }) => ({ attNum }));
680
+ if (!tag.startsWith("CREATE")) createTable.backfill = mapValues(spec.columns, ({ pos: attNum }) => ({ attNum }));
685
681
  changes.push(createTable);
686
682
  }
687
683
  for (const id of createdIdx) {
@@ -693,7 +689,7 @@ var ChangeMaker = class {
693
689
  }
694
690
  return changes;
695
691
  } catch (e) {
696
- throw new UnsupportedSchemaChangeError(String(e), update, { cause: e });
692
+ throw new UnsupportedSchemaChangeError(String(e), event, { cause: e });
697
693
  }
698
694
  }
699
695
  #getTableChanges(oldTable, newTable, ddlTag) {