@rocicorp/zero 1.3.0 → 1.4.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (311) hide show
  1. package/out/analyze-query/src/analyze-cli.d.ts +24 -0
  2. package/out/analyze-query/src/analyze-cli.d.ts.map +1 -0
  3. package/out/analyze-query/src/analyze-cli.js +279 -0
  4. package/out/analyze-query/src/analyze-cli.js.map +1 -0
  5. package/out/analyze-query/src/bin-analyze.js +6 -6
  6. package/out/analyze-query/src/bin-transform.js +2 -2
  7. package/out/ast-to-zql/src/bin.js +1 -1
  8. package/out/shared/src/logging.d.ts.map +1 -1
  9. package/out/shared/src/logging.js +1 -1
  10. package/out/shared/src/logging.js.map +1 -1
  11. package/out/shared/src/options.d.ts.map +1 -1
  12. package/out/shared/src/options.js +1 -1
  13. package/out/shared/src/options.js.map +1 -1
  14. package/out/z2s/src/compiler.d.ts.map +1 -1
  15. package/out/z2s/src/compiler.js +4 -1
  16. package/out/z2s/src/compiler.js.map +1 -1
  17. package/out/z2s/src/sql.d.ts.map +1 -1
  18. package/out/z2s/src/sql.js +1 -0
  19. package/out/z2s/src/sql.js.map +1 -1
  20. package/out/zero/package.js +95 -89
  21. package/out/zero/package.js.map +1 -1
  22. package/out/zero/src/analyze.d.ts +2 -0
  23. package/out/zero/src/analyze.d.ts.map +1 -0
  24. package/out/zero/src/analyze.js +2 -0
  25. package/out/zero/src/zero-cache-dev.js +1 -1
  26. package/out/zero/src/zero-cache-dev.js.map +1 -1
  27. package/out/zero/src/zero-out.js +1 -1
  28. package/out/zero-cache/src/auth/auth.d.ts.map +1 -1
  29. package/out/zero-cache/src/auth/auth.js.map +1 -1
  30. package/out/zero-cache/src/auth/load-permissions.js +2 -2
  31. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  32. package/out/zero-cache/src/auth/write-authorizer.js +5 -14
  33. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  34. package/out/zero-cache/src/config/network.d.ts +1 -1
  35. package/out/zero-cache/src/config/network.d.ts.map +1 -1
  36. package/out/zero-cache/src/config/network.js +1 -1
  37. package/out/zero-cache/src/config/network.js.map +1 -1
  38. package/out/zero-cache/src/config/normalize.d.ts.map +1 -1
  39. package/out/zero-cache/src/config/normalize.js.map +1 -1
  40. package/out/zero-cache/src/config/zero-config.d.ts +5 -0
  41. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  42. package/out/zero-cache/src/config/zero-config.js +16 -3
  43. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  44. package/out/zero-cache/src/db/lite-tables.d.ts.map +1 -1
  45. package/out/zero-cache/src/db/lite-tables.js +3 -3
  46. package/out/zero-cache/src/db/lite-tables.js.map +1 -1
  47. package/out/zero-cache/src/db/transaction-pool.d.ts +43 -40
  48. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  49. package/out/zero-cache/src/db/transaction-pool.js +76 -56
  50. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  51. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  52. package/out/zero-cache/src/observability/events.js +1 -1
  53. package/out/zero-cache/src/observability/events.js.map +1 -1
  54. package/out/zero-cache/src/scripts/decommission.js +1 -1
  55. package/out/zero-cache/src/scripts/deploy-permissions.js +2 -2
  56. package/out/zero-cache/src/scripts/permissions.js +1 -1
  57. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  58. package/out/zero-cache/src/server/anonymous-otel-start.js +4 -4
  59. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  60. package/out/zero-cache/src/server/change-streamer.d.ts +1 -1
  61. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  62. package/out/zero-cache/src/server/change-streamer.js +27 -12
  63. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  64. package/out/zero-cache/src/server/logging.d.ts +1 -3
  65. package/out/zero-cache/src/server/logging.d.ts.map +1 -1
  66. package/out/zero-cache/src/server/logging.js +6 -3
  67. package/out/zero-cache/src/server/logging.js.map +1 -1
  68. package/out/zero-cache/src/server/main.d.ts.map +1 -1
  69. package/out/zero-cache/src/server/main.js +26 -26
  70. package/out/zero-cache/src/server/main.js.map +1 -1
  71. package/out/zero-cache/src/server/mutator.js +4 -2
  72. package/out/zero-cache/src/server/mutator.js.map +1 -1
  73. package/out/zero-cache/src/server/otel-log-sink.d.ts.map +1 -1
  74. package/out/zero-cache/src/server/otel-log-sink.js +0 -2
  75. package/out/zero-cache/src/server/otel-log-sink.js.map +1 -1
  76. package/out/zero-cache/src/server/otel-start.d.ts +1 -1
  77. package/out/zero-cache/src/server/otel-start.d.ts.map +1 -1
  78. package/out/zero-cache/src/server/otel-start.js +7 -3
  79. package/out/zero-cache/src/server/otel-start.js.map +1 -1
  80. package/out/zero-cache/src/server/reaper.js +6 -6
  81. package/out/zero-cache/src/server/reaper.js.map +1 -1
  82. package/out/zero-cache/src/server/replicator.d.ts.map +1 -1
  83. package/out/zero-cache/src/server/replicator.js +5 -3
  84. package/out/zero-cache/src/server/replicator.js.map +1 -1
  85. package/out/zero-cache/src/server/runner/run-worker.js +2 -2
  86. package/out/zero-cache/src/server/runner/run-worker.js.map +1 -1
  87. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  88. package/out/zero-cache/src/server/syncer.js +13 -12
  89. package/out/zero-cache/src/server/syncer.js.map +1 -1
  90. package/out/zero-cache/src/server/worker-dispatcher.js +1 -1
  91. package/out/zero-cache/src/services/analyze.js +1 -1
  92. package/out/zero-cache/src/services/change-source/common/backfill-manager.js +1 -1
  93. package/out/zero-cache/src/services/change-source/common/replica-schema.js +1 -1
  94. package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -4
  95. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  96. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +4 -1
  97. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  98. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  99. package/out/zero-cache/src/services/change-source/pg/change-source.js +19 -23
  100. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  101. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +58 -3
  102. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  103. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +209 -52
  104. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  105. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +2 -2
  106. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +24 -15
  107. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  108. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +35 -58
  109. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  110. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  111. package/out/zero-cache/src/services/change-source/pg/schema/init.js +2 -2
  112. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  113. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +1 -2
  114. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
  115. package/out/zero-cache/src/services/change-source/pg/schema/published.js +15 -18
  116. package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
  117. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -1
  118. package/out/zero-cache/src/services/change-source/protocol/current/data.js +1 -1
  119. package/out/zero-cache/src/services/change-streamer/backup-monitor.js +1 -1
  120. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts +1 -1
  121. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
  122. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +4 -4
  123. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
  124. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +5 -1
  125. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  126. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +10 -7
  127. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  128. package/out/zero-cache/src/services/change-streamer/replica-monitor.js +2 -2
  129. package/out/zero-cache/src/services/change-streamer/storer.d.ts +19 -2
  130. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  131. package/out/zero-cache/src/services/change-streamer/storer.js +70 -6
  132. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  133. package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
  134. package/out/zero-cache/src/services/heapz.js +1 -1
  135. package/out/zero-cache/src/services/heapz.js.map +1 -1
  136. package/out/zero-cache/src/services/life-cycle.d.ts +2 -1
  137. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  138. package/out/zero-cache/src/services/life-cycle.js +10 -7
  139. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  140. package/out/zero-cache/src/services/litestream/commands.d.ts +15 -4
  141. package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
  142. package/out/zero-cache/src/services/litestream/commands.js +40 -34
  143. package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
  144. package/out/zero-cache/src/services/mutagen/mutagen.js +3 -3
  145. package/out/zero-cache/src/services/mutagen/pusher.d.ts +28 -28
  146. package/out/zero-cache/src/services/replicator/change-processor.js +2 -2
  147. package/out/zero-cache/src/services/replicator/incremental-sync.js +1 -1
  148. package/out/zero-cache/src/services/replicator/schema/replication-state.js +1 -1
  149. package/out/zero-cache/src/services/replicator/write-worker-client.js.map +1 -1
  150. package/out/zero-cache/src/services/replicator/write-worker.js +3 -3
  151. package/out/zero-cache/src/services/replicator/write-worker.js.map +1 -1
  152. package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
  153. package/out/zero-cache/src/services/run-ast.js +2 -2
  154. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  155. package/out/zero-cache/src/services/statz.d.ts.map +1 -1
  156. package/out/zero-cache/src/services/statz.js +3 -3
  157. package/out/zero-cache/src/services/statz.js.map +1 -1
  158. package/out/zero-cache/src/services/view-syncer/active-users-gauge.js +1 -1
  159. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts +2 -2
  160. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts.map +1 -1
  161. package/out/zero-cache/src/services/view-syncer/connection-context-manager.js.map +1 -1
  162. package/out/zero-cache/src/services/view-syncer/cvr-purger.js +1 -1
  163. package/out/zero-cache/src/services/view-syncer/cvr-store.js +3 -3
  164. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  165. package/out/zero-cache/src/services/view-syncer/cvr.js +1 -1
  166. package/out/zero-cache/src/services/view-syncer/inspect-handler.js +2 -2
  167. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +6 -16
  168. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  169. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +30 -38
  170. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  171. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
  172. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +4 -4
  173. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  174. package/out/zero-cache/src/services/view-syncer/snapshotter.js +2 -2
  175. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  176. package/out/zero-cache/src/services/view-syncer/view-syncer.js +6 -6
  177. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  178. package/out/zero-cache/src/types/profiler.d.ts.map +1 -1
  179. package/out/zero-cache/src/types/profiler.js.map +1 -1
  180. package/out/zero-cache/src/types/row-key.d.ts.map +1 -1
  181. package/out/zero-cache/src/types/row-key.js.map +1 -1
  182. package/out/zero-cache/src/types/streams.d.ts +1 -1
  183. package/out/zero-cache/src/types/streams.d.ts.map +1 -1
  184. package/out/zero-cache/src/types/streams.js.map +1 -1
  185. package/out/zero-cache/src/types/websocket-handoff.d.ts +1 -1
  186. package/out/zero-cache/src/types/websocket-handoff.d.ts.map +1 -1
  187. package/out/zero-cache/src/types/websocket-handoff.js +1 -1
  188. package/out/zero-cache/src/types/websocket-handoff.js.map +1 -1
  189. package/out/zero-cache/src/workers/connection.d.ts +1 -1
  190. package/out/zero-cache/src/workers/connection.d.ts.map +1 -1
  191. package/out/zero-cache/src/workers/connection.js +2 -2
  192. package/out/zero-cache/src/workers/connection.js.map +1 -1
  193. package/out/zero-cache/src/workers/mutator.js.map +1 -1
  194. package/out/zero-cache/src/workers/syncer.d.ts +1 -1
  195. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  196. package/out/zero-cache/src/workers/syncer.js +3 -3
  197. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  198. package/out/zero-client/src/client/crud-impl.d.ts.map +1 -1
  199. package/out/zero-client/src/client/crud-impl.js +4 -13
  200. package/out/zero-client/src/client/crud-impl.js.map +1 -1
  201. package/out/zero-client/src/client/inspector/inspector.d.ts +24 -0
  202. package/out/zero-client/src/client/inspector/inspector.d.ts.map +1 -1
  203. package/out/zero-client/src/client/inspector/inspector.js +28 -0
  204. package/out/zero-client/src/client/inspector/inspector.js.map +1 -1
  205. package/out/zero-client/src/client/inspector/lazy-inspector.d.ts +9 -0
  206. package/out/zero-client/src/client/inspector/lazy-inspector.d.ts.map +1 -1
  207. package/out/zero-client/src/client/inspector/lazy-inspector.js +28 -1
  208. package/out/zero-client/src/client/inspector/lazy-inspector.js.map +1 -1
  209. package/out/zero-client/src/client/ivm-branch.d.ts.map +1 -1
  210. package/out/zero-client/src/client/ivm-branch.js +4 -13
  211. package/out/zero-client/src/client/ivm-branch.js.map +1 -1
  212. package/out/zero-client/src/client/version.js +1 -1
  213. package/out/zero-protocol/src/error.d.ts.map +1 -1
  214. package/out/zero-protocol/src/error.js +1 -1
  215. package/out/zero-protocol/src/error.js.map +1 -1
  216. package/out/zero-solid/src/solid-view.d.ts.map +1 -1
  217. package/out/zero-solid/src/solid-view.js +13 -13
  218. package/out/zero-solid/src/solid-view.js.map +1 -1
  219. package/out/zql/src/builder/builder.d.ts.map +1 -1
  220. package/out/zql/src/builder/builder.js.map +1 -1
  221. package/out/zql/src/ivm/array-view.d.ts.map +1 -1
  222. package/out/zql/src/ivm/array-view.js +26 -1
  223. package/out/zql/src/ivm/array-view.js.map +1 -1
  224. package/out/zql/src/ivm/change-index-enum.d.ts +9 -0
  225. package/out/zql/src/ivm/change-index-enum.d.ts.map +1 -0
  226. package/out/zql/src/ivm/change-index.d.ts +5 -0
  227. package/out/zql/src/ivm/change-index.d.ts.map +1 -0
  228. package/out/zql/src/ivm/change-type-enum.d.ts +9 -0
  229. package/out/zql/src/ivm/change-type-enum.d.ts.map +1 -0
  230. package/out/zql/src/ivm/change-type.d.ts +5 -0
  231. package/out/zql/src/ivm/change-type.d.ts.map +1 -0
  232. package/out/zql/src/ivm/change.d.ts +20 -22
  233. package/out/zql/src/ivm/change.d.ts.map +1 -1
  234. package/out/zql/src/ivm/change.js +33 -0
  235. package/out/zql/src/ivm/change.js.map +1 -0
  236. package/out/zql/src/ivm/exists.d.ts.map +1 -1
  237. package/out/zql/src/ivm/exists.js +27 -38
  238. package/out/zql/src/ivm/exists.js.map +1 -1
  239. package/out/zql/src/ivm/fan-in.d.ts +3 -2
  240. package/out/zql/src/ivm/fan-in.d.ts.map +1 -1
  241. package/out/zql/src/ivm/fan-in.js.map +1 -1
  242. package/out/zql/src/ivm/fan-out.d.ts +1 -1
  243. package/out/zql/src/ivm/fan-out.d.ts.map +1 -1
  244. package/out/zql/src/ivm/fan-out.js +1 -1
  245. package/out/zql/src/ivm/fan-out.js.map +1 -1
  246. package/out/zql/src/ivm/filter-operators.d.ts +3 -3
  247. package/out/zql/src/ivm/filter-operators.d.ts.map +1 -1
  248. package/out/zql/src/ivm/filter-operators.js.map +1 -1
  249. package/out/zql/src/ivm/filter-push.d.ts.map +1 -1
  250. package/out/zql/src/ivm/filter-push.js +7 -7
  251. package/out/zql/src/ivm/filter-push.js.map +1 -1
  252. package/out/zql/src/ivm/filter.d.ts +1 -1
  253. package/out/zql/src/ivm/filter.d.ts.map +1 -1
  254. package/out/zql/src/ivm/filter.js.map +1 -1
  255. package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
  256. package/out/zql/src/ivm/flipped-join.js +49 -58
  257. package/out/zql/src/ivm/flipped-join.js.map +1 -1
  258. package/out/zql/src/ivm/join-utils.d.ts +2 -6
  259. package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
  260. package/out/zql/src/ivm/join-utils.js +25 -25
  261. package/out/zql/src/ivm/join-utils.js.map +1 -1
  262. package/out/zql/src/ivm/join.d.ts.map +1 -1
  263. package/out/zql/src/ivm/join.js +32 -51
  264. package/out/zql/src/ivm/join.js.map +1 -1
  265. package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts +1 -1
  266. package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts.map +1 -1
  267. package/out/zql/src/ivm/maybe-split-and-push-edit-change.js +5 -10
  268. package/out/zql/src/ivm/maybe-split-and-push-edit-change.js.map +1 -1
  269. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  270. package/out/zql/src/ivm/memory-source.js +51 -59
  271. package/out/zql/src/ivm/memory-source.js.map +1 -1
  272. package/out/zql/src/ivm/push-accumulated.d.ts +3 -2
  273. package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
  274. package/out/zql/src/ivm/push-accumulated.js +98 -122
  275. package/out/zql/src/ivm/push-accumulated.js.map +1 -1
  276. package/out/zql/src/ivm/skip.d.ts +1 -1
  277. package/out/zql/src/ivm/skip.d.ts.map +1 -1
  278. package/out/zql/src/ivm/skip.js +2 -2
  279. package/out/zql/src/ivm/skip.js.map +1 -1
  280. package/out/zql/src/ivm/source-change-index-enum.d.ts +7 -0
  281. package/out/zql/src/ivm/source-change-index-enum.d.ts.map +1 -0
  282. package/out/zql/src/ivm/source-change-index.d.ts +5 -0
  283. package/out/zql/src/ivm/source-change-index.d.ts.map +1 -0
  284. package/out/zql/src/ivm/source.d.ts +11 -13
  285. package/out/zql/src/ivm/source.d.ts.map +1 -1
  286. package/out/zql/src/ivm/source.js +26 -0
  287. package/out/zql/src/ivm/source.js.map +1 -0
  288. package/out/zql/src/ivm/take.d.ts.map +1 -1
  289. package/out/zql/src/ivm/take.js +27 -50
  290. package/out/zql/src/ivm/take.js.map +1 -1
  291. package/out/zql/src/ivm/union-fan-in.d.ts +2 -1
  292. package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
  293. package/out/zql/src/ivm/union-fan-in.js +3 -3
  294. package/out/zql/src/ivm/union-fan-in.js.map +1 -1
  295. package/out/zql/src/ivm/union-fan-out.d.ts.map +1 -1
  296. package/out/zql/src/ivm/union-fan-out.js +1 -1
  297. package/out/zql/src/ivm/union-fan-out.js.map +1 -1
  298. package/out/zql/src/planner/planner-debug.d.ts +2 -2
  299. package/out/zql/src/planner/planner-debug.d.ts.map +1 -1
  300. package/out/zql/src/planner/planner-debug.js.map +1 -1
  301. package/out/zql/src/planner/planner-graph.d.ts +1 -1
  302. package/out/zql/src/planner/planner-graph.d.ts.map +1 -1
  303. package/out/zql/src/planner/planner-graph.js.map +1 -1
  304. package/out/zqlite/src/internal/sql-inline.d.ts.map +1 -1
  305. package/out/zqlite/src/internal/sql-inline.js.map +1 -1
  306. package/out/zqlite/src/query-builder.d.ts.map +1 -1
  307. package/out/zqlite/src/query-builder.js.map +1 -1
  308. package/out/zqlite/src/table-source.d.ts.map +1 -1
  309. package/out/zqlite/src/table-source.js +11 -11
  310. package/out/zqlite/src/table-source.js.map +1 -1
  311. package/package.json +99 -93
@@ -1,10 +1,10 @@
1
1
  import { sleep } from "../../../../shared/src/sleep.js";
2
2
  import { must } from "../../../../shared/src/must.js";
3
- import { Database } from "../../../../zqlite/src/db.js";
4
- import { assertNormalized } from "../../config/normalize.js";
5
3
  import { getShardConfig } from "../../types/shards.js";
6
- import { StatementRunner } from "../../db/statements.js";
4
+ import { assertNormalized } from "../../config/normalize.js";
5
+ import { Database } from "../../../../zqlite/src/db.js";
7
6
  import { getSubscriptionState } from "../replicator/schema/replication-state.js";
7
+ import { StatementRunner } from "../../db/statements.js";
8
8
  import { deleteLiteDB } from "../../db/delete-lite-db.js";
9
9
  import { ChangeStreamerHttpClient } from "../change-streamer/change-streamer-http.js";
10
10
  import { resolver } from "@rocicorp/resolver";
@@ -13,23 +13,32 @@ import { existsSync } from "node:fs";
13
13
  //#region ../zero-cache/src/services/litestream/commands.ts
14
14
  var MAX_RETRIES = 60;
15
15
  var RETRY_INTERVAL_MS = 3e3;
16
+ var BackupNotFoundException = class extends Error {
17
+ static name = "BackupNotFoundException";
18
+ constructor(backupURL) {
19
+ super(`backup not found at ${backupURL}`);
20
+ }
21
+ };
16
22
  /**
17
- * @returns The time at which the last restore started
18
- * (i.e. not counting failed attempts).
23
+ * @param replicaConstraints The constraints of the restored backup when
24
+ * restoring for the change-streamer (replication-manager). For the
25
+ * view-syncer, this should be unspecified so that the constraints are
26
+ * retrieved from the replication-manager via the snapshot protocol.
19
27
  */
20
- async function restoreReplica(lc, config) {
21
- const { changeStreamer } = config;
28
+ async function restoreReplica(lc, config, replicaConstraints) {
22
29
  for (let i = 0; i < MAX_RETRIES; i++) {
23
- if (i > 0) {
24
- lc.info?.(`replica not found. retrying in ${RETRY_INTERVAL_MS / 1e3} seconds`);
25
- await sleep(RETRY_INTERVAL_MS);
26
- }
27
- const start = /* @__PURE__ */ new Date();
28
- if (await tryRestore(lc, config)) return start;
29
- if (changeStreamer.mode === "dedicated" && changeStreamer.uri === void 0) {
30
- lc.info?.("no litestream backup found");
31
- return start;
30
+ try {
31
+ if (await tryRestore(lc, config, replicaConstraints)) return;
32
+ } catch (e) {
33
+ if (i === 0) {
34
+ lc.warn?.(`initial restore attempt failed. retrying once`, e);
35
+ continue;
36
+ }
37
+ throw e;
32
38
  }
39
+ if (replicaConstraints) throw new BackupNotFoundException(config.litestream.backupURL);
40
+ lc.info?.(`replica not found. retrying in ${RETRY_INTERVAL_MS / 1e3} seconds`);
41
+ await sleep(RETRY_INTERVAL_MS);
33
42
  }
34
43
  throw new Error(`max attempts exceeded restoring replica`);
35
44
  }
@@ -56,15 +65,13 @@ function getLitestream(mode, config, logLevelOverride, backupURLOverride) {
56
65
  }
57
66
  };
58
67
  }
59
- async function tryRestore(lc, config) {
60
- const { changeStreamer } = config;
61
- const isViewSyncer = changeStreamer.mode === "discover" || changeStreamer.uri !== void 0;
62
- const firstMessage = reserveAndGetSnapshotStatus(lc, config, isViewSyncer);
68
+ async function tryRestore(lc, config, replicaConstraints) {
63
69
  let snapshotStatus;
64
- if (isViewSyncer) {
65
- snapshotStatus = await firstMessage;
70
+ if (!replicaConstraints) {
71
+ snapshotStatus = await reserveAndGetSnapshotStatus(lc, config);
66
72
  lc.info?.(`restoring backup from ${snapshotStatus.backupURL}`);
67
- } else firstMessage.catch((e) => lc.debug?.(e));
73
+ replicaConstraints = snapshotStatus;
74
+ }
68
75
  const { litestream, env } = getLitestream("restore", config, "debug", snapshotStatus?.backupURL);
69
76
  const { restoreParallelism: parallelism } = config.litestream;
70
77
  const proc = spawn(litestream, [
@@ -88,26 +95,26 @@ async function tryRestore(lc, config) {
88
95
  });
89
96
  await promise;
90
97
  if (!existsSync(config.replica.file)) return false;
91
- if (snapshotStatus && !replicaIsValid(lc, config.replica.file, snapshotStatus)) {
98
+ if (!replicaIsValid(lc, config.replica.file, replicaConstraints)) {
92
99
  lc.info?.(`Deleting local replica and retrying restore`);
93
100
  deleteLiteDB(config.replica.file);
94
101
  return false;
95
102
  }
96
103
  return true;
97
104
  }
98
- function replicaIsValid(lc, replica, snapshot) {
105
+ function replicaIsValid(lc, replica, constraints) {
99
106
  const db = new Database(lc, replica);
100
107
  try {
101
108
  const { replicaVersion, watermark } = getSubscriptionState(new StatementRunner(db));
102
- if (replicaVersion !== snapshot.replicaVersion) {
103
- lc.warn?.(`Local replica version ${replicaVersion} does not match change-streamer replicaVersion ${snapshot.replicaVersion}`, snapshot);
109
+ if (replicaVersion !== constraints.replicaVersion) {
110
+ lc.warn?.(`Local replica version ${replicaVersion} does not match expected replicaVersion ${constraints.replicaVersion}`, constraints);
104
111
  return false;
105
112
  }
106
- if (watermark < snapshot.minWatermark) {
107
- lc.warn?.(`Local replica watermark ${watermark} is earlier than change-streamer minWatermark ${snapshot.minWatermark}`);
113
+ if (watermark < constraints.minWatermark) {
114
+ lc.warn?.(`Local replica watermark ${watermark} is earlier than minWatermark ${constraints.minWatermark}`);
108
115
  return false;
109
116
  }
110
- lc.info?.(`Local replica at version ${replicaVersion} and watermark ${watermark} is compatible with change-streamer`, snapshot);
117
+ lc.info?.(`Local replica at version ${replicaVersion} and watermark ${watermark} is compatible`, constraints);
111
118
  return true;
112
119
  } catch (e) {
113
120
  lc.error?.("Error while validating restored replica", e);
@@ -125,14 +132,14 @@ function startReplicaBackupProcess(lc, config) {
125
132
  windowsHide: true
126
133
  });
127
134
  }
128
- function reserveAndGetSnapshotStatus(lc, config, isViewSyncer) {
135
+ function reserveAndGetSnapshotStatus(lc, config) {
129
136
  const { promise: status, resolve, reject } = resolver();
130
137
  (async function() {
131
138
  const abort = new AbortController();
132
139
  process.on("SIGINT", () => abort.abort());
133
140
  process.on("SIGTERM", () => abort.abort());
134
141
  for (let i = 0;; i++) {
135
- let err = "";
142
+ let err;
136
143
  try {
137
144
  let resolved = false;
138
145
  const stream = await reserveSnapshot(lc, config);
@@ -144,7 +151,6 @@ function reserveAndGetSnapshotStatus(lc, config, isViewSyncer) {
144
151
  } catch (e) {
145
152
  err = e;
146
153
  }
147
- if (!isViewSyncer) return reject(err);
148
154
  lc.warn?.(`Unable to reserve snapshot (attempt ${i + 1}). Retrying in 5 seconds.`, String(err));
149
155
  try {
150
156
  await sleep(5e3, abort.signal);
@@ -161,6 +167,6 @@ function reserveSnapshot(lc, config) {
161
167
  return new ChangeStreamerHttpClient(lc, getShardConfig(config), change.db, changeStreamer.uri).reserveSnapshot(taskID);
162
168
  }
163
169
  //#endregion
164
- export { restoreReplica, startReplicaBackupProcess };
170
+ export { BackupNotFoundException, restoreReplica, startReplicaBackupProcess };
165
171
 
166
172
  //# sourceMappingURL=commands.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"commands.js","names":[],"sources":["../../../../../../zero-cache/src/services/litestream/commands.ts"],"sourcesContent":["import type {LogContext, LogLevel} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport type {ChildProcess} from 'node:child_process';\nimport {spawn} from 'node:child_process';\nimport {existsSync} from 'node:fs';\nimport {must} from '../../../../shared/src/must.ts';\nimport {sleep} from '../../../../shared/src/sleep.ts';\nimport {Database} from '../../../../zqlite/src/db.ts';\nimport {assertNormalized} from '../../config/normalize.ts';\nimport type {ZeroConfig} from '../../config/zero-config.ts';\nimport {deleteLiteDB} from '../../db/delete-lite-db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {getShardConfig} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {ChangeStreamerHttpClient} from '../change-streamer/change-streamer-http.ts';\nimport type {\n SnapshotMessage,\n SnapshotStatus,\n} from '../change-streamer/snapshot.ts';\nimport {getSubscriptionState} from '../replicator/schema/replication-state.ts';\n\n// Retry for up to 3 minutes (60 times with 3 second delay).\n// Beyond that, let the container runner restart the task.\nconst MAX_RETRIES = 60;\nconst RETRY_INTERVAL_MS = 3000;\n\n/**\n * @returns The time at which the last restore started\n * (i.e. not counting failed attempts).\n */\nexport async function restoreReplica(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Date> {\n const {changeStreamer} = config;\n\n for (let i = 0; i < MAX_RETRIES; i++) {\n if (i > 0) {\n lc.info?.(\n `replica not found. retrying in ${RETRY_INTERVAL_MS / 1000} seconds`,\n );\n await sleep(RETRY_INTERVAL_MS);\n }\n const start = new Date();\n const restored = await tryRestore(lc, config);\n if (restored) {\n return start;\n }\n if (\n changeStreamer.mode === 'dedicated' &&\n changeStreamer.uri === undefined\n ) {\n lc.info?.('no litestream backup found');\n return start;\n }\n }\n throw new Error(`max attempts exceeded restoring replica`);\n}\n\nfunction getLitestream(\n mode: 'restore' | 'replicate',\n config: ZeroConfig,\n logLevelOverride?: LogLevel,\n backupURLOverride?: string,\n): {\n litestream: string;\n env: NodeJS.ProcessEnv;\n} {\n const {\n executable,\n executableV5,\n restoreUsingV5,\n backupURL,\n logLevel,\n configPath,\n endpoint,\n port = config.port + 2,\n checkpointThresholdMB,\n minCheckpointPageCount = checkpointThresholdMB * 250, // SQLite page size is 4KB\n maxCheckpointPageCount = minCheckpointPageCount * 10,\n incrementalBackupIntervalMinutes,\n snapshotBackupIntervalHours,\n multipartConcurrency,\n multipartSize,\n } = config.litestream;\n\n // Set the snapshot interval to something smaller than x hours so that\n // the hourly check triggers on the hour, rather than the hour after.\n const snapshotBackupIntervalMinutes = snapshotBackupIntervalHours * 60 - 5;\n\n const litestream =\n // The v0.5.8+ litestream executable can restore from either the new LTX\n // format or the legacy WAL format, allowing forwards-compatibility /\n // rollback safety with zero-cache versions that backup to LTX.\n (mode === 'restore' && restoreUsingV5 ? executableV5 : executable) ??\n must(executable, `Missing --litestream-executable`);\n return {\n litestream,\n env: {\n ...process.env,\n ['ZERO_REPLICA_FILE']: config.replica.file,\n ['ZERO_LITESTREAM_BACKUP_URL']: must(backupURLOverride ?? backupURL),\n ['ZERO_LITESTREAM_MIN_CHECKPOINT_PAGE_COUNT']: String(\n minCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_MAX_CHECKPOINT_PAGE_COUNT']: String(\n maxCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_INCREMENTAL_BACKUP_INTERVAL_MINUTES']: String(\n incrementalBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_LOG_LEVEL']: logLevelOverride ?? logLevel,\n ['ZERO_LITESTREAM_SNAPSHOT_BACKUP_INTERVAL_MINUTES']: String(\n snapshotBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_MULTIPART_CONCURRENCY']: String(multipartConcurrency),\n ['ZERO_LITESTREAM_MULTIPART_SIZE']: String(multipartSize),\n ['ZERO_LOG_FORMAT']: config.log.format,\n ['LITESTREAM_CONFIG']: configPath,\n ['LITESTREAM_PORT']: String(port),\n ...(endpoint ? {['ZERO_LITESTREAM_ENDPOINT']: endpoint} : {}),\n },\n };\n}\n\nasync function tryRestore(lc: LogContext, config: ZeroConfig) {\n const {changeStreamer} = config;\n\n const isViewSyncer =\n changeStreamer.mode === 'discover' || changeStreamer.uri !== undefined;\n\n // Fire off a snapshot reservation to the current replication-manager\n // (if there is one).\n const firstMessage = reserveAndGetSnapshotStatus(lc, config, isViewSyncer);\n let snapshotStatus: SnapshotStatus | undefined;\n if (isViewSyncer) {\n // The return value is required by view-syncers ...\n snapshotStatus = await firstMessage;\n lc.info?.(`restoring backup from ${snapshotStatus.backupURL}`);\n } else {\n // but it is also useful to pause change-log cleanup when a new\n // replication-manager is starting up. In this case, the request is\n // best-effort. In particular, there may not be a previous\n // replication-manager running at all.\n void firstMessage.catch(e => lc.debug?.(e));\n }\n\n const {litestream, env} = getLitestream(\n 'restore',\n config,\n 'debug', // Include all output from `litestream restore`, as it's minimal.\n snapshotStatus?.backupURL,\n );\n const {restoreParallelism: parallelism} = config.litestream;\n const proc = spawn(\n litestream,\n [\n 'restore',\n '-if-db-not-exists',\n '-if-replica-exists',\n '-parallelism',\n String(parallelism),\n config.replica.file,\n ],\n {env, stdio: 'inherit', windowsHide: true},\n );\n const {promise, resolve, reject} = resolver();\n proc.on('error', reject);\n proc.on('close', (code, signal) => {\n if (signal) {\n reject(`litestream killed with ${signal}`);\n } else if (code !== 0) {\n reject(`litestream exited with code ${code}`);\n } else {\n resolve();\n }\n });\n await promise;\n if (!existsSync(config.replica.file)) {\n return false;\n }\n if (\n snapshotStatus &&\n !replicaIsValid(lc, config.replica.file, snapshotStatus)\n ) {\n lc.info?.(`Deleting local replica and retrying restore`);\n deleteLiteDB(config.replica.file);\n return false;\n }\n return true;\n}\n\nfunction replicaIsValid(\n lc: LogContext,\n replica: string,\n snapshot: SnapshotStatus,\n) {\n const db = new Database(lc, replica);\n try {\n const {replicaVersion, watermark} = getSubscriptionState(\n new StatementRunner(db),\n );\n if (replicaVersion !== snapshot.replicaVersion) {\n lc.warn?.(\n `Local replica version ${replicaVersion} does not match change-streamer replicaVersion ${snapshot.replicaVersion}`,\n snapshot,\n );\n return false;\n }\n if (watermark < snapshot.minWatermark) {\n lc.warn?.(\n `Local replica watermark ${watermark} is earlier than change-streamer minWatermark ${snapshot.minWatermark}`,\n );\n return false;\n }\n lc.info?.(\n `Local replica at version ${replicaVersion} and watermark ${watermark} is compatible with change-streamer`,\n snapshot,\n );\n return true;\n } catch (e) {\n lc.error?.('Error while validating restored replica', e);\n return false;\n } finally {\n db.close();\n }\n}\n\nexport function startReplicaBackupProcess(\n lc: LogContext,\n config: ZeroConfig,\n): ChildProcess {\n const {litestream, env} = getLitestream('replicate', config);\n lc.info?.(`starting litestream backup to ${config.litestream.backupURL}`);\n return spawn(litestream, ['replicate'], {\n env,\n stdio: 'inherit',\n windowsHide: true,\n });\n}\n\nfunction reserveAndGetSnapshotStatus(\n lc: LogContext,\n config: ZeroConfig,\n isViewSyncer: boolean,\n): Promise<SnapshotStatus> {\n const {promise: status, resolve, reject} = resolver<SnapshotStatus>();\n\n void (async function () {\n const abort = new AbortController();\n process.on('SIGINT', () => abort.abort());\n process.on('SIGTERM', () => abort.abort());\n\n for (let i = 0; ; i++) {\n let err: unknown | string = '';\n try {\n let resolved = false;\n const stream = await reserveSnapshot(lc, config);\n for await (const msg of stream) {\n // Capture the value of the status message that the change-streamer\n // (i.e. BackupMonitor) returns, and hold the connection open to\n // \"reserve\" the snapshot and prevent change log cleanup.\n resolve(msg[1]);\n resolved = true;\n }\n // The change-streamer itself closes the connection when the\n // subscription is started (or the reservation retried).\n if (resolved) {\n break;\n }\n } catch (e) {\n err = e;\n }\n if (!isViewSyncer) {\n return reject(err);\n }\n // Retry in the view-syncer since it cannot proceed until it connects\n // to a (compatible) replication-manager. In particular, a\n // replication-manager that does not support the view-syncer's\n // change-streamer protocol will close the stream with an error; this\n // retry logic essentially delays the startup of a view-syncer until\n // a compatible replication-manager has been rolled out, allowing\n // replication-manager and view-syncer services to be updated in\n // parallel.\n lc.warn?.(\n `Unable to reserve snapshot (attempt ${i + 1}). Retrying in 5 seconds.`,\n String(err),\n );\n try {\n await sleep(5000, abort.signal);\n } catch (e) {\n return reject(e);\n }\n }\n })();\n\n return status;\n}\n\nfunction reserveSnapshot(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Source<SnapshotMessage>> {\n assertNormalized(config);\n const {taskID, change, changeStreamer} = config;\n const shardID = getShardConfig(config);\n\n const changeStreamerClient = new ChangeStreamerHttpClient(\n lc,\n shardID,\n change.db,\n changeStreamer.uri,\n );\n\n return changeStreamerClient.reserveSnapshot(taskID);\n}\n"],"mappings":";;;;;;;;;;;;;AAuBA,IAAM,cAAc;AACpB,IAAM,oBAAoB;;;;;AAM1B,eAAsB,eACpB,IACA,QACe;CACf,MAAM,EAAC,mBAAkB;AAEzB,MAAK,IAAI,IAAI,GAAG,IAAI,aAAa,KAAK;AACpC,MAAI,IAAI,GAAG;AACT,MAAG,OACD,kCAAkC,oBAAoB,IAAK,UAC5D;AACD,SAAM,MAAM,kBAAkB;;EAEhC,MAAM,wBAAQ,IAAI,MAAM;AAExB,MADiB,MAAM,WAAW,IAAI,OAAO,CAE3C,QAAO;AAET,MACE,eAAe,SAAS,eACxB,eAAe,QAAQ,KAAA,GACvB;AACA,MAAG,OAAO,6BAA6B;AACvC,UAAO;;;AAGX,OAAM,IAAI,MAAM,0CAA0C;;AAG5D,SAAS,cACP,MACA,QACA,kBACA,mBAIA;CACA,MAAM,EACJ,YACA,cACA,gBACA,WACA,UACA,YACA,UACA,OAAO,OAAO,OAAO,GACrB,uBACA,yBAAyB,wBAAwB,KACjD,yBAAyB,yBAAyB,IAClD,kCACA,6BACA,sBACA,kBACE,OAAO;CAIX,MAAM,gCAAgC,8BAA8B,KAAK;AAQzE,QAAO;EACL,aAHC,SAAS,aAAa,iBAAiB,eAAe,eACvD,KAAK,YAAY,kCAAkC;EAGnD,KAAK;GACH,GAAG,QAAQ;IACV,sBAAsB,OAAO,QAAQ;IACrC,+BAA+B,KAAK,qBAAqB,UAAU;IACnE,8CAA8C,OAC7C,uBACD;IACA,8CAA8C,OAC7C,uBACD;IACA,wDAAwD,OACvD,iCACD;IACA,8BAA8B,oBAAoB;IAClD,qDAAqD,OACpD,8BACD;IACA,0CAA0C,OAAO,qBAAqB;IACtE,mCAAmC,OAAO,cAAc;IACxD,oBAAoB,OAAO,IAAI;IAC/B,sBAAsB;IACtB,oBAAoB,OAAO,KAAK;GACjC,GAAI,WAAW,GAAE,6BAA6B,UAAS,GAAG,EAAE;GAC7D;EACF;;AAGH,eAAe,WAAW,IAAgB,QAAoB;CAC5D,MAAM,EAAC,mBAAkB;CAEzB,MAAM,eACJ,eAAe,SAAS,cAAc,eAAe,QAAQ,KAAA;CAI/D,MAAM,eAAe,4BAA4B,IAAI,QAAQ,aAAa;CAC1E,IAAI;AACJ,KAAI,cAAc;AAEhB,mBAAiB,MAAM;AACvB,KAAG,OAAO,yBAAyB,eAAe,YAAY;OAMzD,cAAa,OAAM,MAAK,GAAG,QAAQ,EAAE,CAAC;CAG7C,MAAM,EAAC,YAAY,QAAO,cACxB,WACA,QACA,SACA,gBAAgB,UACjB;CACD,MAAM,EAAC,oBAAoB,gBAAe,OAAO;CACjD,MAAM,OAAO,MACX,YACA;EACE;EACA;EACA;EACA;EACA,OAAO,YAAY;EACnB,OAAO,QAAQ;EAChB,EACD;EAAC;EAAK,OAAO;EAAW,aAAa;EAAK,CAC3C;CACD,MAAM,EAAC,SAAS,SAAS,WAAU,UAAU;AAC7C,MAAK,GAAG,SAAS,OAAO;AACxB,MAAK,GAAG,UAAU,MAAM,WAAW;AACjC,MAAI,OACF,QAAO,0BAA0B,SAAS;WACjC,SAAS,EAClB,QAAO,+BAA+B,OAAO;MAE7C,UAAS;GAEX;AACF,OAAM;AACN,KAAI,CAAC,WAAW,OAAO,QAAQ,KAAK,CAClC,QAAO;AAET,KACE,kBACA,CAAC,eAAe,IAAI,OAAO,QAAQ,MAAM,eAAe,EACxD;AACA,KAAG,OAAO,8CAA8C;AACxD,eAAa,OAAO,QAAQ,KAAK;AACjC,SAAO;;AAET,QAAO;;AAGT,SAAS,eACP,IACA,SACA,UACA;CACA,MAAM,KAAK,IAAI,SAAS,IAAI,QAAQ;AACpC,KAAI;EACF,MAAM,EAAC,gBAAgB,cAAa,qBAClC,IAAI,gBAAgB,GAAG,CACxB;AACD,MAAI,mBAAmB,SAAS,gBAAgB;AAC9C,MAAG,OACD,yBAAyB,eAAe,iDAAiD,SAAS,kBAClG,SACD;AACD,UAAO;;AAET,MAAI,YAAY,SAAS,cAAc;AACrC,MAAG,OACD,2BAA2B,UAAU,gDAAgD,SAAS,eAC/F;AACD,UAAO;;AAET,KAAG,OACD,4BAA4B,eAAe,iBAAiB,UAAU,sCACtE,SACD;AACD,SAAO;UACA,GAAG;AACV,KAAG,QAAQ,2CAA2C,EAAE;AACxD,SAAO;WACC;AACR,KAAG,OAAO;;;AAId,SAAgB,0BACd,IACA,QACc;CACd,MAAM,EAAC,YAAY,QAAO,cAAc,aAAa,OAAO;AAC5D,IAAG,OAAO,iCAAiC,OAAO,WAAW,YAAY;AACzE,QAAO,MAAM,YAAY,CAAC,YAAY,EAAE;EACtC;EACA,OAAO;EACP,aAAa;EACd,CAAC;;AAGJ,SAAS,4BACP,IACA,QACA,cACyB;CACzB,MAAM,EAAC,SAAS,QAAQ,SAAS,WAAU,UAA0B;AAErE,EAAM,iBAAkB;EACtB,MAAM,QAAQ,IAAI,iBAAiB;AACnC,UAAQ,GAAG,gBAAgB,MAAM,OAAO,CAAC;AACzC,UAAQ,GAAG,iBAAiB,MAAM,OAAO,CAAC;AAE1C,OAAK,IAAI,IAAI,IAAK,KAAK;GACrB,IAAI,MAAwB;AAC5B,OAAI;IACF,IAAI,WAAW;IACf,MAAM,SAAS,MAAM,gBAAgB,IAAI,OAAO;AAChD,eAAW,MAAM,OAAO,QAAQ;AAI9B,aAAQ,IAAI,GAAG;AACf,gBAAW;;AAIb,QAAI,SACF;YAEK,GAAG;AACV,UAAM;;AAER,OAAI,CAAC,aACH,QAAO,OAAO,IAAI;AAUpB,MAAG,OACD,uCAAuC,IAAI,EAAE,4BAC7C,OAAO,IAAI,CACZ;AACD,OAAI;AACF,UAAM,MAAM,KAAM,MAAM,OAAO;YACxB,GAAG;AACV,WAAO,OAAO,EAAE;;;KAGlB;AAEJ,QAAO;;AAGT,SAAS,gBACP,IACA,QACkC;AAClC,kBAAiB,OAAO;CACxB,MAAM,EAAC,QAAQ,QAAQ,mBAAkB;AAUzC,QAP6B,IAAI,yBAC/B,IAHc,eAAe,OAAO,EAKpC,OAAO,IACP,eAAe,IAChB,CAE2B,gBAAgB,OAAO"}
1
+ {"version":3,"file":"commands.js","names":[],"sources":["../../../../../../zero-cache/src/services/litestream/commands.ts"],"sourcesContent":["import type {ChildProcess} from 'node:child_process';\nimport {spawn} from 'node:child_process';\nimport {existsSync} from 'node:fs';\nimport type {LogContext, LogLevel} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {must} from '../../../../shared/src/must.ts';\nimport {sleep} from '../../../../shared/src/sleep.ts';\nimport {Database} from '../../../../zqlite/src/db.ts';\nimport {assertNormalized} from '../../config/normalize.ts';\nimport type {ZeroConfig} from '../../config/zero-config.ts';\nimport {deleteLiteDB} from '../../db/delete-lite-db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {getShardConfig} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {ChangeStreamerHttpClient} from '../change-streamer/change-streamer-http.ts';\nimport type {\n SnapshotMessage,\n SnapshotStatus,\n} from '../change-streamer/snapshot.ts';\nimport {getSubscriptionState} from '../replicator/schema/replication-state.ts';\n\n// Retry for up to 3 minutes (60 times with 3 second delay).\n// Beyond that, let the container runner restart the task.\nconst MAX_RETRIES = 60;\nconst RETRY_INTERVAL_MS = 3000;\n\ntype ReplicaConstraints = {\n replicaVersion: string;\n minWatermark: string;\n};\n\nexport class BackupNotFoundException extends Error {\n static readonly name = 'BackupNotFoundException';\n\n constructor(backupURL: string | undefined) {\n super(`backup not found at ${backupURL}`);\n }\n}\n\n/**\n * @param replicaConstraints The constraints of the restored backup when\n * restoring for the change-streamer (replication-manager). For the\n * view-syncer, this should be unspecified so that the constraints are\n * retrieved from the replication-manager via the snapshot protocol.\n */\nexport async function restoreReplica(\n lc: LogContext,\n config: ZeroConfig,\n replicaConstraints: ReplicaConstraints | null,\n) {\n for (let i = 0; i < MAX_RETRIES; i++) {\n try {\n if (await tryRestore(lc, config, replicaConstraints)) {\n return;\n }\n } catch (e) {\n if (i === 0) {\n // A restore will fail if the `replicate` process creates a new\n // snapshot (and compacts old files) at the same time. Snapshots are\n // infrequent (e.g. once every 12 hours), and the scenario is\n // recoverable with a retry.\n lc.warn?.(`initial restore attempt failed. retrying once`, e);\n continue;\n }\n // If it fails again on the retry, though, bail.\n throw e;\n }\n if (replicaConstraints) {\n // This can happen if the litestream URL is purposefully changed to\n // force a resync.\n throw new BackupNotFoundException(config.litestream.backupURL);\n }\n lc.info?.(\n `replica not found. retrying in ${RETRY_INTERVAL_MS / 1000} seconds`,\n );\n await sleep(RETRY_INTERVAL_MS);\n }\n throw new Error(`max attempts exceeded restoring replica`);\n}\n\nfunction getLitestream(\n mode: 'restore' | 'replicate',\n config: ZeroConfig,\n logLevelOverride?: LogLevel,\n backupURLOverride?: string,\n): {\n litestream: string;\n env: NodeJS.ProcessEnv;\n} {\n const {\n executable,\n executableV5,\n restoreUsingV5,\n backupURL,\n logLevel,\n configPath,\n endpoint,\n port = config.port + 2,\n checkpointThresholdMB,\n minCheckpointPageCount = checkpointThresholdMB * 250, // SQLite page size is 4KB\n maxCheckpointPageCount = minCheckpointPageCount * 10,\n incrementalBackupIntervalMinutes,\n snapshotBackupIntervalHours,\n multipartConcurrency,\n multipartSize,\n } = config.litestream;\n\n // Set the snapshot interval to something smaller than x hours so that\n // the hourly check triggers on the hour, rather than the hour after.\n const snapshotBackupIntervalMinutes = snapshotBackupIntervalHours * 60 - 5;\n\n const litestream =\n // The v0.5.8+ litestream executable can restore from either the new LTX\n // format or the legacy WAL format, allowing forwards-compatibility /\n // rollback safety with zero-cache versions that backup to LTX.\n (mode === 'restore' && restoreUsingV5 ? executableV5 : executable) ??\n must(executable, `Missing --litestream-executable`);\n return {\n litestream,\n env: {\n ...process.env,\n ['ZERO_REPLICA_FILE']: config.replica.file,\n ['ZERO_LITESTREAM_BACKUP_URL']: must(backupURLOverride ?? backupURL),\n ['ZERO_LITESTREAM_MIN_CHECKPOINT_PAGE_COUNT']: String(\n minCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_MAX_CHECKPOINT_PAGE_COUNT']: String(\n maxCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_INCREMENTAL_BACKUP_INTERVAL_MINUTES']: String(\n incrementalBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_LOG_LEVEL']: logLevelOverride ?? logLevel,\n ['ZERO_LITESTREAM_SNAPSHOT_BACKUP_INTERVAL_MINUTES']: String(\n snapshotBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_MULTIPART_CONCURRENCY']: String(multipartConcurrency),\n ['ZERO_LITESTREAM_MULTIPART_SIZE']: String(multipartSize),\n ['ZERO_LOG_FORMAT']: config.log.format,\n ['LITESTREAM_CONFIG']: configPath,\n ['LITESTREAM_PORT']: String(port),\n ...(endpoint ? {['ZERO_LITESTREAM_ENDPOINT']: endpoint} : {}),\n },\n };\n}\n\nasync function tryRestore(\n lc: LogContext,\n config: ZeroConfig,\n replicaConstraints: ReplicaConstraints | null,\n) {\n let snapshotStatus: SnapshotStatus | undefined;\n if (!replicaConstraints) {\n // view-syncers fetch replica constraints from the replication-manager\n // via the snapshot protocol.\n snapshotStatus = await reserveAndGetSnapshotStatus(lc, config);\n lc.info?.(`restoring backup from ${snapshotStatus.backupURL}`);\n replicaConstraints = snapshotStatus;\n }\n\n const {litestream, env} = getLitestream(\n 'restore',\n config,\n 'debug', // Include all output from `litestream restore`, as it's minimal.\n snapshotStatus?.backupURL,\n );\n const {restoreParallelism: parallelism} = config.litestream;\n const proc = spawn(\n litestream,\n [\n 'restore',\n '-if-db-not-exists',\n '-if-replica-exists',\n '-parallelism',\n String(parallelism),\n config.replica.file,\n ],\n {env, stdio: 'inherit', windowsHide: true},\n );\n const {promise, resolve, reject} = resolver();\n proc.on('error', reject);\n proc.on('close', (code, signal) => {\n if (signal) {\n reject(`litestream killed with ${signal}`);\n } else if (code !== 0) {\n reject(`litestream exited with code ${code}`);\n } else {\n resolve();\n }\n });\n await promise;\n if (!existsSync(config.replica.file)) {\n return false;\n }\n if (!replicaIsValid(lc, config.replica.file, replicaConstraints)) {\n lc.info?.(`Deleting local replica and retrying restore`);\n deleteLiteDB(config.replica.file);\n return false;\n }\n return true;\n}\n\nfunction replicaIsValid(\n lc: LogContext,\n replica: string,\n constraints: ReplicaConstraints,\n) {\n const db = new Database(lc, replica);\n try {\n const {replicaVersion, watermark} = getSubscriptionState(\n new StatementRunner(db),\n );\n if (replicaVersion !== constraints.replicaVersion) {\n lc.warn?.(\n `Local replica version ${replicaVersion} does not match expected replicaVersion ${constraints.replicaVersion}`,\n constraints,\n );\n return false;\n }\n if (watermark < constraints.minWatermark) {\n lc.warn?.(\n `Local replica watermark ${watermark} is earlier than minWatermark ${constraints.minWatermark}`,\n );\n return false;\n }\n lc.info?.(\n `Local replica at version ${replicaVersion} and watermark ${watermark} is compatible`,\n constraints,\n );\n return true;\n } catch (e) {\n lc.error?.('Error while validating restored replica', e);\n return false;\n } finally {\n db.close();\n }\n}\n\nexport function startReplicaBackupProcess(\n lc: LogContext,\n config: ZeroConfig,\n): ChildProcess {\n const {litestream, env} = getLitestream('replicate', config);\n lc.info?.(`starting litestream backup to ${config.litestream.backupURL}`);\n return spawn(litestream, ['replicate'], {\n env,\n stdio: 'inherit',\n windowsHide: true,\n });\n}\n\nfunction reserveAndGetSnapshotStatus(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<SnapshotStatus> {\n const {promise: status, resolve, reject} = resolver<SnapshotStatus>();\n\n void (async function () {\n const abort = new AbortController();\n process.on('SIGINT', () => abort.abort());\n process.on('SIGTERM', () => abort.abort());\n\n for (let i = 0; ; i++) {\n let err: unknown;\n try {\n let resolved = false;\n const stream = await reserveSnapshot(lc, config);\n for await (const msg of stream) {\n // Capture the value of the status message that the change-streamer\n // (i.e. BackupMonitor) returns, and hold the connection open to\n // \"reserve\" the snapshot and prevent change log cleanup.\n resolve(msg[1]);\n resolved = true;\n }\n // The change-streamer itself closes the connection when the\n // subscription is started (or the reservation retried).\n if (resolved) {\n break;\n }\n } catch (e) {\n err = e;\n }\n // Retry in the view-syncer since it cannot proceed until it connects\n // to a (compatible) replication-manager. In particular, a\n // replication-manager that does not support the view-syncer's\n // change-streamer protocol will close the stream with an error; this\n // retry logic essentially delays the startup of a view-syncer until\n // a compatible replication-manager has been rolled out, allowing\n // replication-manager and view-syncer services to be updated in\n // parallel.\n lc.warn?.(\n `Unable to reserve snapshot (attempt ${i + 1}). Retrying in 5 seconds.`,\n String(err),\n );\n try {\n await sleep(5000, abort.signal);\n } catch (e) {\n return reject(e);\n }\n }\n })();\n\n return status;\n}\n\nfunction reserveSnapshot(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Source<SnapshotMessage>> {\n assertNormalized(config);\n const {taskID, change, changeStreamer} = config;\n const shardID = getShardConfig(config);\n\n const changeStreamerClient = new ChangeStreamerHttpClient(\n lc,\n shardID,\n change.db,\n changeStreamer.uri,\n );\n\n return changeStreamerClient.reserveSnapshot(taskID);\n}\n"],"mappings":";;;;;;;;;;;;;AAuBA,IAAM,cAAc;AACpB,IAAM,oBAAoB;AAO1B,IAAa,0BAAb,cAA6C,MAAM;CACjD,OAAgB,OAAO;CAEvB,YAAY,WAA+B;AACzC,QAAM,uBAAuB,YAAY;;;;;;;;;AAU7C,eAAsB,eACpB,IACA,QACA,oBACA;AACA,MAAK,IAAI,IAAI,GAAG,IAAI,aAAa,KAAK;AACpC,MAAI;AACF,OAAI,MAAM,WAAW,IAAI,QAAQ,mBAAmB,CAClD;WAEK,GAAG;AACV,OAAI,MAAM,GAAG;AAKX,OAAG,OAAO,iDAAiD,EAAE;AAC7D;;AAGF,SAAM;;AAER,MAAI,mBAGF,OAAM,IAAI,wBAAwB,OAAO,WAAW,UAAU;AAEhE,KAAG,OACD,kCAAkC,oBAAoB,IAAK,UAC5D;AACD,QAAM,MAAM,kBAAkB;;AAEhC,OAAM,IAAI,MAAM,0CAA0C;;AAG5D,SAAS,cACP,MACA,QACA,kBACA,mBAIA;CACA,MAAM,EACJ,YACA,cACA,gBACA,WACA,UACA,YACA,UACA,OAAO,OAAO,OAAO,GACrB,uBACA,yBAAyB,wBAAwB,KACjD,yBAAyB,yBAAyB,IAClD,kCACA,6BACA,sBACA,kBACE,OAAO;CAIX,MAAM,gCAAgC,8BAA8B,KAAK;AAQzE,QAAO;EACL,aAHC,SAAS,aAAa,iBAAiB,eAAe,eACvD,KAAK,YAAY,kCAAkC;EAGnD,KAAK;GACH,GAAG,QAAQ;IACV,sBAAsB,OAAO,QAAQ;IACrC,+BAA+B,KAAK,qBAAqB,UAAU;IACnE,8CAA8C,OAC7C,uBACD;IACA,8CAA8C,OAC7C,uBACD;IACA,wDAAwD,OACvD,iCACD;IACA,8BAA8B,oBAAoB;IAClD,qDAAqD,OACpD,8BACD;IACA,0CAA0C,OAAO,qBAAqB;IACtE,mCAAmC,OAAO,cAAc;IACxD,oBAAoB,OAAO,IAAI;IAC/B,sBAAsB;IACtB,oBAAoB,OAAO,KAAK;GACjC,GAAI,WAAW,GAAE,6BAA6B,UAAS,GAAG,EAAE;GAC7D;EACF;;AAGH,eAAe,WACb,IACA,QACA,oBACA;CACA,IAAI;AACJ,KAAI,CAAC,oBAAoB;AAGvB,mBAAiB,MAAM,4BAA4B,IAAI,OAAO;AAC9D,KAAG,OAAO,yBAAyB,eAAe,YAAY;AAC9D,uBAAqB;;CAGvB,MAAM,EAAC,YAAY,QAAO,cACxB,WACA,QACA,SACA,gBAAgB,UACjB;CACD,MAAM,EAAC,oBAAoB,gBAAe,OAAO;CACjD,MAAM,OAAO,MACX,YACA;EACE;EACA;EACA;EACA;EACA,OAAO,YAAY;EACnB,OAAO,QAAQ;EAChB,EACD;EAAC;EAAK,OAAO;EAAW,aAAa;EAAK,CAC3C;CACD,MAAM,EAAC,SAAS,SAAS,WAAU,UAAU;AAC7C,MAAK,GAAG,SAAS,OAAO;AACxB,MAAK,GAAG,UAAU,MAAM,WAAW;AACjC,MAAI,OACF,QAAO,0BAA0B,SAAS;WACjC,SAAS,EAClB,QAAO,+BAA+B,OAAO;MAE7C,UAAS;GAEX;AACF,OAAM;AACN,KAAI,CAAC,WAAW,OAAO,QAAQ,KAAK,CAClC,QAAO;AAET,KAAI,CAAC,eAAe,IAAI,OAAO,QAAQ,MAAM,mBAAmB,EAAE;AAChE,KAAG,OAAO,8CAA8C;AACxD,eAAa,OAAO,QAAQ,KAAK;AACjC,SAAO;;AAET,QAAO;;AAGT,SAAS,eACP,IACA,SACA,aACA;CACA,MAAM,KAAK,IAAI,SAAS,IAAI,QAAQ;AACpC,KAAI;EACF,MAAM,EAAC,gBAAgB,cAAa,qBAClC,IAAI,gBAAgB,GAAG,CACxB;AACD,MAAI,mBAAmB,YAAY,gBAAgB;AACjD,MAAG,OACD,yBAAyB,eAAe,0CAA0C,YAAY,kBAC9F,YACD;AACD,UAAO;;AAET,MAAI,YAAY,YAAY,cAAc;AACxC,MAAG,OACD,2BAA2B,UAAU,gCAAgC,YAAY,eAClF;AACD,UAAO;;AAET,KAAG,OACD,4BAA4B,eAAe,iBAAiB,UAAU,iBACtE,YACD;AACD,SAAO;UACA,GAAG;AACV,KAAG,QAAQ,2CAA2C,EAAE;AACxD,SAAO;WACC;AACR,KAAG,OAAO;;;AAId,SAAgB,0BACd,IACA,QACc;CACd,MAAM,EAAC,YAAY,QAAO,cAAc,aAAa,OAAO;AAC5D,IAAG,OAAO,iCAAiC,OAAO,WAAW,YAAY;AACzE,QAAO,MAAM,YAAY,CAAC,YAAY,EAAE;EACtC;EACA,OAAO;EACP,aAAa;EACd,CAAC;;AAGJ,SAAS,4BACP,IACA,QACyB;CACzB,MAAM,EAAC,SAAS,QAAQ,SAAS,WAAU,UAA0B;AAErE,EAAM,iBAAkB;EACtB,MAAM,QAAQ,IAAI,iBAAiB;AACnC,UAAQ,GAAG,gBAAgB,MAAM,OAAO,CAAC;AACzC,UAAQ,GAAG,iBAAiB,MAAM,OAAO,CAAC;AAE1C,OAAK,IAAI,IAAI,IAAK,KAAK;GACrB,IAAI;AACJ,OAAI;IACF,IAAI,WAAW;IACf,MAAM,SAAS,MAAM,gBAAgB,IAAI,OAAO;AAChD,eAAW,MAAM,OAAO,QAAQ;AAI9B,aAAQ,IAAI,GAAG;AACf,gBAAW;;AAIb,QAAI,SACF;YAEK,GAAG;AACV,UAAM;;AAUR,MAAG,OACD,uCAAuC,IAAI,EAAE,4BAC7C,OAAO,IAAI,CACZ;AACD,OAAI;AACF,UAAM,MAAM,KAAM,MAAM,OAAO;YACxB,GAAG;AACV,WAAO,OAAO,EAAE;;;KAGlB;AAEJ,QAAO;;AAGT,SAAS,gBACP,IACA,QACkC;AAClC,kBAAiB,OAAO;CACxB,MAAM,EAAC,QAAQ,QAAQ,mBAAkB;AAUzC,QAP6B,IAAI,yBAC/B,IAHc,eAAe,OAAO,EAKpC,OAAO,IACP,eAAe,IAChB,CAE2B,gBAAgB,OAAO"}
@@ -6,13 +6,13 @@ import { ProtocolError, isProtocolError } from "../../../../zero-protocol/src/er
6
6
  import { CRUD } from "../../../../zero-protocol/src/mutation-type-enum.js";
7
7
  import { primaryKeyValueSchema } from "../../../../zero-protocol/src/primary-key.js";
8
8
  import "../../../../zero-protocol/src/push.js";
9
- import { MutationAlreadyProcessedError } from "./error.js";
10
- import { Database } from "../../../../zqlite/src/db.js";
11
9
  import { upstreamSchema } from "../../types/shards.js";
12
10
  import "../../config/zero-config.js";
11
+ import { MutationAlreadyProcessedError } from "./error.js";
12
+ import { Database } from "../../../../zqlite/src/db.js";
13
+ import { getOrCreateCounter } from "../../observability/metrics.js";
13
14
  import { SERIALIZABLE } from "../../db/mode-enum.js";
14
15
  import { runTx } from "../../db/run-transaction.js";
15
- import { getOrCreateCounter } from "../../observability/metrics.js";
16
16
  import { recordMutation } from "../../server/anonymous-otel-start.js";
17
17
  import { WriteAuthorizerImpl } from "../../auth/write-authorizer.js";
18
18
  import { SlidingWindowLimiter } from "../limiter/sliding-window-limiter.js";
@@ -37,19 +37,7 @@ export declare class PusherService implements Service, Pusher {
37
37
  }] | ["connected", {
38
38
  timestamp?: number | undefined;
39
39
  wsid: string;
40
- }] | import("../../../../zero-protocol/src/error.ts").ErrorMessage | ["transformError", ({
41
- message?: string | undefined;
42
- details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
43
- error: "app";
44
- id: string;
45
- name: string;
46
- } | {
47
- details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
48
- error: "parse";
49
- id: string;
50
- name: string;
51
- message: string;
52
- })[]] | ["pushResponse", {
40
+ }] | import("../../../../zero-protocol/src/error.ts").ErrorMessage | ["pushResponse", {
53
41
  mutationIDs?: {
54
42
  id: number;
55
43
  clientID: string;
@@ -93,7 +81,19 @@ export declare class PusherService implements Service, Pusher {
93
81
  data?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
94
82
  };
95
83
  }[];
96
- }] | ["inspect", {
84
+ }] | ["transformError", ({
85
+ message?: string | undefined;
86
+ details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
87
+ error: "app";
88
+ id: string;
89
+ name: string;
90
+ } | {
91
+ details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
92
+ error: "parse";
93
+ id: string;
94
+ name: string;
95
+ message: string;
96
+ })[]] | ["inspect", {
97
97
  id: string;
98
98
  op: "queries";
99
99
  value: {
@@ -336,19 +336,7 @@ export declare class PusherService implements Service, Pusher {
336
336
  }] | ["connected", {
337
337
  timestamp?: number | undefined;
338
338
  wsid: string;
339
- }] | import("../../../../zero-protocol/src/error.ts").ErrorMessage | ["transformError", ({
340
- message?: string | undefined;
341
- details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
342
- error: "app";
343
- id: string;
344
- name: string;
345
- } | {
346
- details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
347
- error: "parse";
348
- id: string;
349
- name: string;
350
- message: string;
351
- })[]] | ["pushResponse", {
339
+ }] | import("../../../../zero-protocol/src/error.ts").ErrorMessage | ["pushResponse", {
352
340
  mutationIDs?: {
353
341
  id: number;
354
342
  clientID: string;
@@ -392,7 +380,19 @@ export declare class PusherService implements Service, Pusher {
392
380
  data?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
393
381
  };
394
382
  }[];
395
- }] | ["inspect", {
383
+ }] | ["transformError", ({
384
+ message?: string | undefined;
385
+ details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
386
+ error: "app";
387
+ id: string;
388
+ name: string;
389
+ } | {
390
+ details?: import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined;
391
+ error: "parse";
392
+ id: string;
393
+ name: string;
394
+ message: string;
395
+ })[]] | ["inspect", {
396
396
  id: string;
397
397
  op: "queries";
398
398
  value: {
@@ -2,15 +2,15 @@ import { assert, unreachable } from "../../../../shared/src/asserts.js";
2
2
  import { AbortError } from "../../../../shared/src/abort-error.js";
3
3
  import { must } from "../../../../shared/src/must.js";
4
4
  import { stringify } from "../../../../shared/src/bigint-json.js";
5
- import { ChangeLog } from "./schema/change-log.js";
6
5
  import { ZERO_VERSION_COLUMN_NAME } from "./schema/constants.js";
7
6
  import { liteRow } from "../../types/lite.js";
8
7
  import { liteTableName } from "../../types/names.js";
9
8
  import { mapPostgresToLite, mapPostgresToLiteColumn, mapPostgresToLiteIndex } from "../../db/pg-to-lite.js";
10
9
  import { ColumnMetadataStore } from "./schema/column-metadata.js";
11
10
  import { TableMetadataTracker } from "./schema/table-metadata.js";
12
- import { updateReplicationWatermark } from "./schema/replication-state.js";
13
11
  import { computeZqlSpecs, listIndexes, listTables } from "../../db/lite-tables.js";
12
+ import { ChangeLog } from "./schema/change-log.js";
13
+ import { updateReplicationWatermark } from "./schema/replication-state.js";
14
14
  import { id } from "../../types/sql.js";
15
15
  import { createLiteIndexStatement, createLiteTableStatement, liteColumnDef } from "../../db/create.js";
16
16
  import { SqliteError } from "@rocicorp/zero-sqlite3";
@@ -1,6 +1,6 @@
1
1
  import { AbortError } from "../../../../shared/src/abort-error.js";
2
- import { RunningState } from "../running-state.js";
3
2
  import { getOrCreateCounter } from "../../observability/metrics.js";
3
+ import { RunningState } from "../running-state.js";
4
4
  import { errorTypeToReadableName } from "../change-streamer/change-streamer.js";
5
5
  import { Notifier } from "./notifier.js";
6
6
  import { ReplicationReportRecorder } from "./reporter/recorder.js";
@@ -1,9 +1,9 @@
1
1
  import { parse, valita_exports } from "../../../../../shared/src/valita.js";
2
2
  import { jsonObjectSchema, stringify } from "../../../../../shared/src/bigint-json.js";
3
- import { CREATE_CHANGELOG_SCHEMA } from "./change-log.js";
4
3
  import "./constants.js";
5
4
  import { CREATE_COLUMN_METADATA_TABLE } from "./column-metadata.js";
6
5
  import { CREATE_TABLE_METADATA_TABLE } from "./table-metadata.js";
6
+ import { CREATE_CHANGELOG_SCHEMA } from "./change-log.js";
7
7
  //#region ../zero-cache/src/services/replicator/schema/replication-state.ts
8
8
  /**
9
9
  * Replication metadata, used for incremental view maintenance and catchup.
@@ -1 +1 @@
1
- {"version":3,"file":"write-worker-client.js","names":["#worker","#rejectAll","#errorHandler","#pending","#terminated","#call"],"sources":["../../../../../../zero-cache/src/services/replicator/write-worker-client.ts"],"sourcesContent":["import {resolver, type Resolver} from '@rocicorp/resolver';\nimport {Worker} from 'node:worker_threads';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport type {LogConfig} from '../../../../shared/src/logging.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {WRITE_WORKER_URL} from '../../server/worker-urls.ts';\nimport type {ChangeStreamData} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeProcessorMode, CommitResult} from './change-processor.ts';\nimport type {SubscriptionState} from './schema/replication-state.ts';\n\nexport type PragmaConfig = {\n busyTimeout: number;\n analysisLimit: number;\n walAutocheckpoint?: number | undefined;\n};\n\ntype ErrorHandler = (err: Error) => void;\n\n/**\n * Interface for a write worker that processes replication messages.\n */\nexport interface WriteWorkerClient {\n getSubscriptionState(): Promise<SubscriptionState>;\n processMessage(downstream: ChangeStreamData): Promise<CommitResult | null>;\n abort(): void;\n stop(): Promise<void>;\n onError(handler: ErrorHandler): void;\n}\n\n// Wire protocol types — errors are passed directly via structured clone\nexport type ArgsMap = {\n init: [string, ChangeProcessorMode, PragmaConfig, LogConfig];\n getSubscriptionState: [];\n processMessage: [ChangeStreamData];\n abort: [];\n stop: [];\n};\n\nexport type Method = keyof ArgsMap;\n\nexport type Request<M extends Method = Method> = {method: M; args: ArgsMap[M]};\n\nexport type ResultMap = {\n init: void;\n getSubscriptionState: SubscriptionState;\n processMessage: CommitResult | null;\n abort: void;\n stop: void;\n};\n\nexport type Response<M extends Method = Method> =\n | {method: M; result: ResultMap[M]; error?: undefined}\n | {method: M; error: unknown; result?: undefined};\n\nexport type WriteError = {writeError: Error};\n\nexport function applyPragmas(db: Database, pragmas: PragmaConfig) {\n db.pragma(`busy_timeout = ${pragmas.busyTimeout}`);\n db.pragma(`analysis_limit = ${pragmas.analysisLimit}`);\n if (pragmas.walAutocheckpoint !== undefined) {\n db.pragma(`wal_autocheckpoint = ${pragmas.walAutocheckpoint}`);\n }\n}\n\n/**\n * Delegates SQLite writes to a worker_thread,\n * keeping the main event loop free for WebSocket heartbeats and IPC.\n */\nexport class ThreadWriteWorkerClient implements WriteWorkerClient {\n readonly #worker: Worker;\n #pending: Resolver<unknown, Error> | null = null;\n #errorHandler: ErrorHandler = () => {};\n #terminated = false;\n\n constructor() {\n this.#worker = new Worker(WRITE_WORKER_URL);\n\n this.#worker.on('message', (msg: Response | WriteError) => {\n if ('writeError' in msg) {\n const error =\n msg.writeError instanceof Error\n ? msg.writeError\n : new Error(String(msg.writeError));\n this.#rejectAll(error);\n this.#errorHandler(error);\n return;\n }\n const r = this.#pending;\n if (!r) return; // stale abort response\n this.#pending = null;\n if (msg.error !== undefined) {\n r.reject(\n msg.error instanceof Error ? msg.error : new Error(String(msg.error)),\n );\n } else {\n r.resolve(msg.result);\n }\n });\n\n this.#worker.on('error', (err: Error) => {\n this.#rejectAll(err);\n this.#errorHandler(err);\n });\n\n this.#worker.on('exit', (code: number) => {\n this.#terminated = true;\n if (code !== 0) {\n const err = new Error(`Worker exited with code ${code}`);\n this.#rejectAll(err);\n this.#errorHandler(err);\n }\n });\n }\n\n #rejectAll(err: Error) {\n const r = this.#pending;\n if (r) {\n this.#pending = null;\n r.reject(err);\n }\n }\n\n #call<M extends Method>(method: M, args: ArgsMap[M]): Promise<ResultMap[M]> {\n assert(this.#pending === null, `concurrent call: ${method}`);\n const r = resolver<ResultMap[M]>();\n this.#pending = r as Resolver<unknown, Error>;\n this.#worker.postMessage({method, args} satisfies Request);\n return r.promise;\n }\n\n init(\n dbPath: string,\n mode: ChangeProcessorMode,\n pragmas: PragmaConfig,\n logConfig: LogConfig,\n ): Promise<void> {\n return this.#call('init', [dbPath, mode, pragmas, logConfig]);\n }\n\n getSubscriptionState(): Promise<SubscriptionState> {\n return this.#call('getSubscriptionState', []);\n }\n\n processMessage(downstream: ChangeStreamData): Promise<CommitResult | null> {\n return this.#call('processMessage', [downstream]);\n }\n\n abort(): void {\n if (!this.#terminated) {\n this.#worker.postMessage({method: 'abort', args: []} satisfies Request);\n }\n }\n\n async stop(): Promise<void> {\n await this.#call('stop', []);\n if (!this.#terminated) {\n await this.#worker.terminate();\n }\n }\n\n onError(handler: ErrorHandler): void {\n this.#errorHandler = handler;\n }\n}\n"],"mappings":";;;;;AAwDA,SAAgB,aAAa,IAAc,SAAuB;AAChE,IAAG,OAAO,kBAAkB,QAAQ,cAAc;AAClD,IAAG,OAAO,oBAAoB,QAAQ,gBAAgB;AACtD,KAAI,QAAQ,sBAAsB,KAAA,EAChC,IAAG,OAAO,wBAAwB,QAAQ,oBAAoB;;;;;;AAQlE,IAAa,0BAAb,MAAkE;CAChE;CACA,WAA4C;CAC5C,sBAAoC;CACpC,cAAc;CAEd,cAAc;AACZ,QAAA,SAAe,IAAI,OAAO,iBAAiB;AAE3C,QAAA,OAAa,GAAG,YAAY,QAA+B;AACzD,OAAI,gBAAgB,KAAK;IACvB,MAAM,QACJ,IAAI,sBAAsB,QACtB,IAAI,aACJ,IAAI,MAAM,OAAO,IAAI,WAAW,CAAC;AACvC,UAAA,UAAgB,MAAM;AACtB,UAAA,aAAmB,MAAM;AACzB;;GAEF,MAAM,IAAI,MAAA;AACV,OAAI,CAAC,EAAG;AACR,SAAA,UAAgB;AAChB,OAAI,IAAI,UAAU,KAAA,EAChB,GAAE,OACA,IAAI,iBAAiB,QAAQ,IAAI,QAAQ,IAAI,MAAM,OAAO,IAAI,MAAM,CAAC,CACtE;OAED,GAAE,QAAQ,IAAI,OAAO;IAEvB;AAEF,QAAA,OAAa,GAAG,UAAU,QAAe;AACvC,SAAA,UAAgB,IAAI;AACpB,SAAA,aAAmB,IAAI;IACvB;AAEF,QAAA,OAAa,GAAG,SAAS,SAAiB;AACxC,SAAA,aAAmB;AACnB,OAAI,SAAS,GAAG;IACd,MAAM,sBAAM,IAAI,MAAM,2BAA2B,OAAO;AACxD,UAAA,UAAgB,IAAI;AACpB,UAAA,aAAmB,IAAI;;IAEzB;;CAGJ,WAAW,KAAY;EACrB,MAAM,IAAI,MAAA;AACV,MAAI,GAAG;AACL,SAAA,UAAgB;AAChB,KAAE,OAAO,IAAI;;;CAIjB,MAAwB,QAAW,MAAyC;AAC1E,SAAO,MAAA,YAAkB,MAAM,oBAAoB,SAAS;EAC5D,MAAM,IAAI,UAAwB;AAClC,QAAA,UAAgB;AAChB,QAAA,OAAa,YAAY;GAAC;GAAQ;GAAK,CAAmB;AAC1D,SAAO,EAAE;;CAGX,KACE,QACA,MACA,SACA,WACe;AACf,SAAO,MAAA,KAAW,QAAQ;GAAC;GAAQ;GAAM;GAAS;GAAU,CAAC;;CAG/D,uBAAmD;AACjD,SAAO,MAAA,KAAW,wBAAwB,EAAE,CAAC;;CAG/C,eAAe,YAA4D;AACzE,SAAO,MAAA,KAAW,kBAAkB,CAAC,WAAW,CAAC;;CAGnD,QAAc;AACZ,MAAI,CAAC,MAAA,WACH,OAAA,OAAa,YAAY;GAAC,QAAQ;GAAS,MAAM,EAAE;GAAC,CAAmB;;CAI3E,MAAM,OAAsB;AAC1B,QAAM,MAAA,KAAW,QAAQ,EAAE,CAAC;AAC5B,MAAI,CAAC,MAAA,WACH,OAAM,MAAA,OAAa,WAAW;;CAIlC,QAAQ,SAA6B;AACnC,QAAA,eAAqB"}
1
+ {"version":3,"file":"write-worker-client.js","names":["#worker","#rejectAll","#errorHandler","#pending","#terminated","#call"],"sources":["../../../../../../zero-cache/src/services/replicator/write-worker-client.ts"],"sourcesContent":["import {Worker} from 'node:worker_threads';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport type {LogConfig} from '../../../../shared/src/logging.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {WRITE_WORKER_URL} from '../../server/worker-urls.ts';\nimport type {ChangeStreamData} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeProcessorMode, CommitResult} from './change-processor.ts';\nimport type {SubscriptionState} from './schema/replication-state.ts';\n\nexport type PragmaConfig = {\n busyTimeout: number;\n analysisLimit: number;\n walAutocheckpoint?: number | undefined;\n};\n\ntype ErrorHandler = (err: Error) => void;\n\n/**\n * Interface for a write worker that processes replication messages.\n */\nexport interface WriteWorkerClient {\n getSubscriptionState(): Promise<SubscriptionState>;\n processMessage(downstream: ChangeStreamData): Promise<CommitResult | null>;\n abort(): void;\n stop(): Promise<void>;\n onError(handler: ErrorHandler): void;\n}\n\n// Wire protocol types — errors are passed directly via structured clone\nexport type ArgsMap = {\n init: [string, ChangeProcessorMode, PragmaConfig, LogConfig];\n getSubscriptionState: [];\n processMessage: [ChangeStreamData];\n abort: [];\n stop: [];\n};\n\nexport type Method = keyof ArgsMap;\n\nexport type Request<M extends Method = Method> = {method: M; args: ArgsMap[M]};\n\nexport type ResultMap = {\n init: void;\n getSubscriptionState: SubscriptionState;\n processMessage: CommitResult | null;\n abort: void;\n stop: void;\n};\n\nexport type Response<M extends Method = Method> =\n | {method: M; result: ResultMap[M]; error?: undefined}\n | {method: M; error: unknown; result?: undefined};\n\nexport type WriteError = {writeError: Error};\n\nexport function applyPragmas(db: Database, pragmas: PragmaConfig) {\n db.pragma(`busy_timeout = ${pragmas.busyTimeout}`);\n db.pragma(`analysis_limit = ${pragmas.analysisLimit}`);\n if (pragmas.walAutocheckpoint !== undefined) {\n db.pragma(`wal_autocheckpoint = ${pragmas.walAutocheckpoint}`);\n }\n}\n\n/**\n * Delegates SQLite writes to a worker_thread,\n * keeping the main event loop free for WebSocket heartbeats and IPC.\n */\nexport class ThreadWriteWorkerClient implements WriteWorkerClient {\n readonly #worker: Worker;\n #pending: Resolver<unknown, Error> | null = null;\n #errorHandler: ErrorHandler = () => {};\n #terminated = false;\n\n constructor() {\n this.#worker = new Worker(WRITE_WORKER_URL);\n\n this.#worker.on('message', (msg: Response | WriteError) => {\n if ('writeError' in msg) {\n const error =\n msg.writeError instanceof Error\n ? msg.writeError\n : new Error(String(msg.writeError));\n this.#rejectAll(error);\n this.#errorHandler(error);\n return;\n }\n const r = this.#pending;\n if (!r) return; // stale abort response\n this.#pending = null;\n if (msg.error !== undefined) {\n r.reject(\n msg.error instanceof Error ? msg.error : new Error(String(msg.error)),\n );\n } else {\n r.resolve(msg.result);\n }\n });\n\n this.#worker.on('error', (err: Error) => {\n this.#rejectAll(err);\n this.#errorHandler(err);\n });\n\n this.#worker.on('exit', (code: number) => {\n this.#terminated = true;\n if (code !== 0) {\n const err = new Error(`Worker exited with code ${code}`);\n this.#rejectAll(err);\n this.#errorHandler(err);\n }\n });\n }\n\n #rejectAll(err: Error) {\n const r = this.#pending;\n if (r) {\n this.#pending = null;\n r.reject(err);\n }\n }\n\n #call<M extends Method>(method: M, args: ArgsMap[M]): Promise<ResultMap[M]> {\n assert(this.#pending === null, `concurrent call: ${method}`);\n const r = resolver<ResultMap[M]>();\n this.#pending = r as Resolver<unknown, Error>;\n this.#worker.postMessage({method, args} satisfies Request);\n return r.promise;\n }\n\n init(\n dbPath: string,\n mode: ChangeProcessorMode,\n pragmas: PragmaConfig,\n logConfig: LogConfig,\n ): Promise<void> {\n return this.#call('init', [dbPath, mode, pragmas, logConfig]);\n }\n\n getSubscriptionState(): Promise<SubscriptionState> {\n return this.#call('getSubscriptionState', []);\n }\n\n processMessage(downstream: ChangeStreamData): Promise<CommitResult | null> {\n return this.#call('processMessage', [downstream]);\n }\n\n abort(): void {\n if (!this.#terminated) {\n this.#worker.postMessage({method: 'abort', args: []} satisfies Request);\n }\n }\n\n async stop(): Promise<void> {\n await this.#call('stop', []);\n if (!this.#terminated) {\n await this.#worker.terminate();\n }\n }\n\n onError(handler: ErrorHandler): void {\n this.#errorHandler = handler;\n }\n}\n"],"mappings":";;;;;AAwDA,SAAgB,aAAa,IAAc,SAAuB;AAChE,IAAG,OAAO,kBAAkB,QAAQ,cAAc;AAClD,IAAG,OAAO,oBAAoB,QAAQ,gBAAgB;AACtD,KAAI,QAAQ,sBAAsB,KAAA,EAChC,IAAG,OAAO,wBAAwB,QAAQ,oBAAoB;;;;;;AAQlE,IAAa,0BAAb,MAAkE;CAChE;CACA,WAA4C;CAC5C,sBAAoC;CACpC,cAAc;CAEd,cAAc;AACZ,QAAA,SAAe,IAAI,OAAO,iBAAiB;AAE3C,QAAA,OAAa,GAAG,YAAY,QAA+B;AACzD,OAAI,gBAAgB,KAAK;IACvB,MAAM,QACJ,IAAI,sBAAsB,QACtB,IAAI,aACJ,IAAI,MAAM,OAAO,IAAI,WAAW,CAAC;AACvC,UAAA,UAAgB,MAAM;AACtB,UAAA,aAAmB,MAAM;AACzB;;GAEF,MAAM,IAAI,MAAA;AACV,OAAI,CAAC,EAAG;AACR,SAAA,UAAgB;AAChB,OAAI,IAAI,UAAU,KAAA,EAChB,GAAE,OACA,IAAI,iBAAiB,QAAQ,IAAI,QAAQ,IAAI,MAAM,OAAO,IAAI,MAAM,CAAC,CACtE;OAED,GAAE,QAAQ,IAAI,OAAO;IAEvB;AAEF,QAAA,OAAa,GAAG,UAAU,QAAe;AACvC,SAAA,UAAgB,IAAI;AACpB,SAAA,aAAmB,IAAI;IACvB;AAEF,QAAA,OAAa,GAAG,SAAS,SAAiB;AACxC,SAAA,aAAmB;AACnB,OAAI,SAAS,GAAG;IACd,MAAM,sBAAM,IAAI,MAAM,2BAA2B,OAAO;AACxD,UAAA,UAAgB,IAAI;AACpB,UAAA,aAAmB,IAAI;;IAEzB;;CAGJ,WAAW,KAAY;EACrB,MAAM,IAAI,MAAA;AACV,MAAI,GAAG;AACL,SAAA,UAAgB;AAChB,KAAE,OAAO,IAAI;;;CAIjB,MAAwB,QAAW,MAAyC;AAC1E,SAAO,MAAA,YAAkB,MAAM,oBAAoB,SAAS;EAC5D,MAAM,IAAI,UAAwB;AAClC,QAAA,UAAgB;AAChB,QAAA,OAAa,YAAY;GAAC;GAAQ;GAAK,CAAmB;AAC1D,SAAO,EAAE;;CAGX,KACE,QACA,MACA,SACA,WACe;AACf,SAAO,MAAA,KAAW,QAAQ;GAAC;GAAQ;GAAM;GAAS;GAAU,CAAC;;CAG/D,uBAAmD;AACjD,SAAO,MAAA,KAAW,wBAAwB,EAAE,CAAC;;CAG/C,eAAe,YAA4D;AACzE,SAAO,MAAA,KAAW,kBAAkB,CAAC,WAAW,CAAC;;CAGnD,QAAc;AACZ,MAAI,CAAC,MAAA,WACH,OAAA,OAAa,YAAY;GAAC,QAAQ;GAAS,MAAM,EAAE;GAAC,CAAmB;;CAI3E,MAAM,OAAsB;AAC1B,QAAM,MAAA,KAAW,QAAQ,EAAE,CAAC;AAC5B,MAAI,CAAC,MAAA,WACH,OAAM,MAAA,OAAa,WAAW;;CAIlC,QAAQ,SAA6B;AACnC,QAAA,eAAqB"}
@@ -1,8 +1,8 @@
1
1
  import { must } from "../../../../shared/src/must.js";
2
2
  import { Database } from "../../../../zqlite/src/db.js";
3
- import { createLogContext } from "../../server/logging.js";
4
- import { StatementRunner } from "../../db/statements.js";
5
3
  import { getSubscriptionState } from "./schema/replication-state.js";
4
+ import { StatementRunner } from "../../db/statements.js";
5
+ import { createLogContext } from "../../server/logging.js";
6
6
  import { ChangeProcessor } from "./change-processor.js";
7
7
  import { applyPragmas } from "./write-worker-client.js";
8
8
  import { parentPort } from "node:worker_threads";
@@ -22,7 +22,7 @@ function createAPI() {
22
22
  }
23
23
  return {
24
24
  init(dbPath, cpMode, pragmas, logConfig) {
25
- lc = createLogContext({ log: logConfig }, { worker: "write-worker" });
25
+ lc = createLogContext({ log: logConfig }, "write-worker");
26
26
  db = new Database(lc, dbPath);
27
27
  applyPragmas(db, pragmas);
28
28
  runner = new StatementRunner(db);
@@ -1 +1 @@
1
- {"version":3,"file":"write-worker.js","names":[],"sources":["../../../../../../zero-cache/src/services/replicator/write-worker.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {parentPort} from 'node:worker_threads';\nimport type {LogConfig} from '../../../../shared/src/logging.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport {Database} from '../../../../zqlite/src/db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {createLogContext} from '../../server/logging.ts';\nimport type {ChangeStreamData} from '../change-source/protocol/current/downstream.ts';\nimport {ChangeProcessor, type ChangeProcessorMode} from './change-processor.ts';\nimport {getSubscriptionState} from './schema/replication-state.ts';\nimport {\n applyPragmas,\n type ArgsMap,\n type Method,\n type PragmaConfig,\n type Request,\n type Response,\n type ResultMap,\n type WriteError,\n} from './write-worker-client.ts';\n\nif (!parentPort) {\n throw new Error('write-worker must be run as a worker thread');\n}\n\nconst port = parentPort;\n\ntype API = {[M in Method]: (...args: ArgsMap[M]) => ResultMap[M]};\n\nfunction createAPI(): API {\n let db: Database | undefined;\n let runner: StatementRunner | undefined;\n let processor: ChangeProcessor | undefined;\n let mode: ChangeProcessorMode | undefined;\n let lc: LogContext | undefined;\n\n function createProcessor() {\n processor = new ChangeProcessor(must(runner), must(mode), (_lc, err) => {\n port.postMessage({\n writeError: err instanceof Error ? err : new Error(String(err)),\n } satisfies WriteError);\n });\n }\n\n return {\n init(\n dbPath: string,\n cpMode: ChangeProcessorMode,\n pragmas: PragmaConfig,\n logConfig: LogConfig,\n ) {\n lc = createLogContext({log: logConfig}, {worker: 'write-worker'});\n db = new Database(lc, dbPath);\n applyPragmas(db, pragmas);\n runner = new StatementRunner(db);\n mode = cpMode;\n createProcessor();\n },\n\n getSubscriptionState() {\n return getSubscriptionState(must(runner));\n },\n\n processMessage(downstream: ChangeStreamData) {\n return must(processor).processMessage(must(lc), downstream);\n },\n\n abort() {\n must(processor).abort(must(lc));\n createProcessor();\n },\n\n stop() {\n db?.close();\n db = undefined;\n runner = undefined;\n processor = undefined;\n },\n };\n}\n\nconst api = createAPI();\n\nport.on('message', (msg: Request) => {\n try {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any -- TS can't narrow msg.method + msg.args together\n const result = (api[msg.method] as (...args: any[]) => unknown)(\n ...msg.args,\n );\n // abort is fire-and-forget — no pending slot on the client side.\n if (msg.method !== 'abort') {\n port.postMessage({method: msg.method, result} as Response);\n }\n } catch (e) {\n if (msg.method !== 'abort') {\n port.postMessage({method: msg.method, error: e} as Response);\n }\n }\n});\n"],"mappings":";;;;;;;;;AAqBA,IAAI,CAAC,WACH,OAAM,IAAI,MAAM,8CAA8C;AAGhE,IAAM,OAAO;AAIb,SAAS,YAAiB;CACxB,IAAI;CACJ,IAAI;CACJ,IAAI;CACJ,IAAI;CACJ,IAAI;CAEJ,SAAS,kBAAkB;AACzB,cAAY,IAAI,gBAAgB,KAAK,OAAO,EAAE,KAAK,KAAK,GAAG,KAAK,QAAQ;AACtE,QAAK,YAAY,EACf,YAAY,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,IAAI,CAAC,EAChE,CAAsB;IACvB;;AAGJ,QAAO;EACL,KACE,QACA,QACA,SACA,WACA;AACA,QAAK,iBAAiB,EAAC,KAAK,WAAU,EAAE,EAAC,QAAQ,gBAAe,CAAC;AACjE,QAAK,IAAI,SAAS,IAAI,OAAO;AAC7B,gBAAa,IAAI,QAAQ;AACzB,YAAS,IAAI,gBAAgB,GAAG;AAChC,UAAO;AACP,oBAAiB;;EAGnB,uBAAuB;AACrB,UAAO,qBAAqB,KAAK,OAAO,CAAC;;EAG3C,eAAe,YAA8B;AAC3C,UAAO,KAAK,UAAU,CAAC,eAAe,KAAK,GAAG,EAAE,WAAW;;EAG7D,QAAQ;AACN,QAAK,UAAU,CAAC,MAAM,KAAK,GAAG,CAAC;AAC/B,oBAAiB;;EAGnB,OAAO;AACL,OAAI,OAAO;AACX,QAAK,KAAA;AACL,YAAS,KAAA;AACT,eAAY,KAAA;;EAEf;;AAGH,IAAM,MAAM,WAAW;AAEvB,KAAK,GAAG,YAAY,QAAiB;AACnC,KAAI;EAEF,MAAM,SAAU,IAAI,IAAI,QACtB,GAAG,IAAI,KACR;AAED,MAAI,IAAI,WAAW,QACjB,MAAK,YAAY;GAAC,QAAQ,IAAI;GAAQ;GAAO,CAAa;UAErD,GAAG;AACV,MAAI,IAAI,WAAW,QACjB,MAAK,YAAY;GAAC,QAAQ,IAAI;GAAQ,OAAO;GAAE,CAAa;;EAGhE"}
1
+ {"version":3,"file":"write-worker.js","names":[],"sources":["../../../../../../zero-cache/src/services/replicator/write-worker.ts"],"sourcesContent":["import {parentPort} from 'node:worker_threads';\nimport type {LogContext} from '@rocicorp/logger';\nimport type {LogConfig} from '../../../../shared/src/logging.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport {Database} from '../../../../zqlite/src/db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {createLogContext} from '../../server/logging.ts';\nimport type {ChangeStreamData} from '../change-source/protocol/current/downstream.ts';\nimport {ChangeProcessor, type ChangeProcessorMode} from './change-processor.ts';\nimport {getSubscriptionState} from './schema/replication-state.ts';\nimport {\n applyPragmas,\n type ArgsMap,\n type Method,\n type PragmaConfig,\n type Request,\n type Response,\n type ResultMap,\n type WriteError,\n} from './write-worker-client.ts';\n\nif (!parentPort) {\n throw new Error('write-worker must be run as a worker thread');\n}\n\nconst port = parentPort;\n\ntype API = {[M in Method]: (...args: ArgsMap[M]) => ResultMap[M]};\n\nfunction createAPI(): API {\n let db: Database | undefined;\n let runner: StatementRunner | undefined;\n let processor: ChangeProcessor | undefined;\n let mode: ChangeProcessorMode | undefined;\n let lc: LogContext | undefined;\n\n function createProcessor() {\n processor = new ChangeProcessor(must(runner), must(mode), (_lc, err) => {\n port.postMessage({\n writeError: err instanceof Error ? err : new Error(String(err)),\n } satisfies WriteError);\n });\n }\n\n return {\n init(\n dbPath: string,\n cpMode: ChangeProcessorMode,\n pragmas: PragmaConfig,\n logConfig: LogConfig,\n ) {\n lc = createLogContext({log: logConfig}, 'write-worker');\n db = new Database(lc, dbPath);\n applyPragmas(db, pragmas);\n runner = new StatementRunner(db);\n mode = cpMode;\n createProcessor();\n },\n\n getSubscriptionState() {\n return getSubscriptionState(must(runner));\n },\n\n processMessage(downstream: ChangeStreamData) {\n return must(processor).processMessage(must(lc), downstream);\n },\n\n abort() {\n must(processor).abort(must(lc));\n createProcessor();\n },\n\n stop() {\n db?.close();\n db = undefined;\n runner = undefined;\n processor = undefined;\n },\n };\n}\n\nconst api = createAPI();\n\nport.on('message', (msg: Request) => {\n try {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any -- TS can't narrow msg.method + msg.args together\n const result = (api[msg.method] as (...args: any[]) => unknown)(\n ...msg.args,\n );\n // abort is fire-and-forget — no pending slot on the client side.\n if (msg.method !== 'abort') {\n port.postMessage({method: msg.method, result} as Response);\n }\n } catch (e) {\n if (msg.method !== 'abort') {\n port.postMessage({method: msg.method, error: e} as Response);\n }\n }\n});\n"],"mappings":";;;;;;;;;AAqBA,IAAI,CAAC,WACH,OAAM,IAAI,MAAM,8CAA8C;AAGhE,IAAM,OAAO;AAIb,SAAS,YAAiB;CACxB,IAAI;CACJ,IAAI;CACJ,IAAI;CACJ,IAAI;CACJ,IAAI;CAEJ,SAAS,kBAAkB;AACzB,cAAY,IAAI,gBAAgB,KAAK,OAAO,EAAE,KAAK,KAAK,GAAG,KAAK,QAAQ;AACtE,QAAK,YAAY,EACf,YAAY,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,IAAI,CAAC,EAChE,CAAsB;IACvB;;AAGJ,QAAO;EACL,KACE,QACA,QACA,SACA,WACA;AACA,QAAK,iBAAiB,EAAC,KAAK,WAAU,EAAE,eAAe;AACvD,QAAK,IAAI,SAAS,IAAI,OAAO;AAC7B,gBAAa,IAAI,QAAQ;AACzB,YAAS,IAAI,gBAAgB,GAAG;AAChC,UAAO;AACP,oBAAiB;;EAGnB,uBAAuB;AACrB,UAAO,qBAAqB,KAAK,OAAO,CAAC;;EAG3C,eAAe,YAA8B;AAC3C,UAAO,KAAK,UAAU,CAAC,eAAe,KAAK,GAAG,EAAE,WAAW;;EAG7D,QAAQ;AACN,QAAK,UAAU,CAAC,MAAM,KAAK,GAAG,CAAC;AAC/B,oBAAiB;;EAGnB,OAAO;AACL,OAAI,OAAO;AACX,QAAK,KAAA;AACL,YAAS,KAAA;AACT,eAAY,KAAA;;EAEf;;AAGH,IAAM,MAAM,WAAW;AAEvB,KAAK,GAAG,YAAY,QAAiB;AACnC,KAAI;EAEF,MAAM,SAAU,IAAI,IAAI,QACtB,GAAG,IAAI,KACR;AAED,MAAI,IAAI,WAAW,QACjB,MAAK,YAAY;GAAC,QAAQ,IAAI;GAAQ;GAAO,CAAa;UAErD,GAAG;AACV,MAAI,IAAI,WAAW,QACjB,MAAK,YAAY;GAAC,QAAQ,IAAI;GAAQ,OAAO;GAAE,CAAa;;EAGhE"}
@@ -1 +1 @@
1
- {"version":3,"file":"run-ast.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAQjD,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,oDAAoD,CAAC;AAC3F,OAAO,KAAK,EAAC,GAAG,EAAe,MAAM,mCAAmC,CAAC;AAEzE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6CAA6C,CAAC;AAG9E,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,kDAAkD,CAAC;AACxF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yCAAyC,CAAC;AACxE,OAAO,EAEL,KAAK,eAAe,EACrB,MAAM,qCAAqC,CAAC;AAG7C,OAAO,KAAK,EAAC,mBAAmB,EAAC,MAAM,gDAAgD,CAAC;AACxF,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,2CAA2C,CAAC;AAC5E,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AAExD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,iBAAiB,CAAC;AAG7C,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gBAAgB,CAAC;AAGnD,MAAM,MAAM,aAAa,GAAG;IAC1B,gBAAgB,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACvC,IAAI,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IAC3B,oBAAoB,CAAC,EAAE,UAAU,GAAG,SAAS,CAAC;IAC9C,SAAS,CAAC,EAAE,mBAAmB,GAAG,SAAS,CAAC;IAC5C,EAAE,EAAE,QAAQ,CAAC;IACb,IAAI,EAAE,eAAe,CAAC;IACtB,WAAW,CAAC,EAAE,iBAAiB,GAAG,SAAS,CAAC;IAC5C,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACjC,UAAU,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAClC,CAAC;AAEF,wBAAsB,MAAM,CAC1B,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,YAAY,EAC1B,GAAG,EAAE,GAAG,EACR,aAAa,EAAE,OAAO,EACtB,OAAO,EAAE,aAAa,EACtB,YAAY,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAChC,OAAO,CAAC,kBAAkB,CAAC,CAyI7B"}
1
+ {"version":3,"file":"run-ast.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAQjD,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,oDAAoD,CAAC;AAC3F,OAAO,KAAK,EAAC,GAAG,EAAe,MAAM,mCAAmC,CAAC;AAEzE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6CAA6C,CAAC;AAG9E,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,kDAAkD,CAAC;AACxF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yCAAyC,CAAC;AACxE,OAAO,EAEL,KAAK,eAAe,EACrB,MAAM,qCAAqC,CAAC;AAI7C,OAAO,KAAK,EAAC,mBAAmB,EAAC,MAAM,gDAAgD,CAAC;AACxF,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,2CAA2C,CAAC;AAC5E,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AAExD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,iBAAiB,CAAC;AAG7C,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gBAAgB,CAAC;AAGnD,MAAM,MAAM,aAAa,GAAG;IAC1B,gBAAgB,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACvC,IAAI,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IAC3B,oBAAoB,CAAC,EAAE,UAAU,GAAG,SAAS,CAAC;IAC9C,SAAS,CAAC,EAAE,mBAAmB,GAAG,SAAS,CAAC;IAC5C,EAAE,EAAE,QAAQ,CAAC;IACb,IAAI,EAAE,eAAe,CAAC;IACtB,WAAW,CAAC,EAAE,iBAAiB,GAAG,SAAS,CAAC;IAC5C,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACjC,UAAU,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAClC,CAAC;AAEF,wBAAsB,MAAM,CAC1B,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,YAAY,EAC1B,GAAG,EAAE,GAAG,EACR,aAAa,EAAE,OAAO,EACtB,OAAO,EAAE,aAAa,EACtB,YAAY,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAChC,OAAO,CAAC,kBAAkB,CAAC,CA4I7B"}
@@ -5,9 +5,9 @@ import { mapAST } from "../../../zero-protocol/src/ast.js";
5
5
  import { hashOfAST } from "../../../zero-protocol/src/query-hash.js";
6
6
  import { skipYields } from "../../../zql/src/ivm/operator.js";
7
7
  import { buildPipeline } from "../../../zql/src/builder/builder.js";
8
- import { computeZqlSpecs } from "../db/lite-tables.js";
9
8
  import { astToZQL } from "../../../ast-to-zql/src/ast-to-zql.js";
10
9
  import { formatOutput } from "../../../ast-to-zql/src/format.js";
10
+ import { computeZqlSpecs } from "../db/lite-tables.js";
11
11
  import { resolveSimpleScalarSubqueries } from "../../../zqlite/src/resolve-scalar-subqueries.js";
12
12
  import { transformAndHashQuery } from "../auth/read-authorizer.js";
13
13
  import { hydrate } from "./view-syncer/pipeline-driver.js";
@@ -51,7 +51,7 @@ async function runAst(lc, clientSchema, ast, isTransformed, options, yieldProces
51
51
  await yieldProcess();
52
52
  continue;
53
53
  }
54
- assert(rowChange.type === "add", "Hydration only handles add row changes");
54
+ assert(rowChange.type === 0, "Hydration only handles add row changes");
55
55
  if (syncedRowCount % 10 === 0) await Promise.resolve();
56
56
  if (syncedRowCount % 100 === 0) await sleep(1);
57
57
  let rows = rowsByTable[rowChange.table];
@@ -1 +1 @@
1
- {"version":3,"file":"run-ast.js","names":[],"sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST, LiteralValue} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {Node} from '../../../zql/src/ivm/data.ts';\nimport {skipYields} from '../../../zql/src/ivm/operator.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {resolveSimpleScalarSubqueries} from '../../../zqlite/src/resolve-scalar-subqueries.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport {computeZqlSpecs} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n auth?: JWTAuth | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host, db} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const auth = options.auth;\n if (!auth) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n auth,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n // Resolve scalar subqueries (e.g. whereExists with {scalar: true}) to\n // literal equality conditions so that SQLite can use indexes effectively.\n // Without this, correlated subqueries get stripped from SQL filters and\n // queries on large tables fall back to full table scans.\n const executor = (\n subqueryAST: AST,\n childField: string,\n ): LiteralValue | null | undefined => {\n const input = buildPipeline(subqueryAST, host, 'scalar-subquery');\n // Consume the full stream rather than using first() to avoid\n // triggering early return on Take's #initialFetch assertion.\n // The subquery AST already has limit: 1, so at most one row is produced.\n let node: Node | undefined;\n for (const n of skipYields(input.fetch({}))) {\n node ??= n;\n }\n input.destroy();\n return node ? ((node.row[childField] as LiteralValue) ?? null) : undefined;\n };\n\n const {ast: resolvedAst} = resolveSimpleScalarSubqueries(\n ast,\n options.tableSpecs,\n executor,\n );\n\n const pipeline = buildPipeline(\n resolvedAst,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(\n pipeline,\n hashOfAST(resolvedAst),\n clientSchema,\n computeZqlSpecs(lc, db, {includeBackfillingColumns: false}),\n )) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add', 'Hydration only handles add row changes');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"mappings":";;;;;;;;;;;;;;AA8CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;CAC7B,MAAM,EAAC,sBAAsB,aAAa,MAAM,OAAM;CACtD,MAAM,SAA6B;EACjC,UAAU,EAAE;EACZ,YAAY,KAAA;EACZ,gBAAgB;EAChB,OAAO;EACP,KAAK;EACL,SAAS;EACT,kBAAkB,KAAA;EAClB,UAAU,KAAA;EACV,sBAAsB,EAAE;EACxB,cAAc,KAAA;EACf;AAED,KAAI,CAAC,cAEH,OAAM,OAAO,KAAK,KAAK,qBAAqB,CAAC;AAE/C,KAAI,QAAQ,kBAAkB;EAC5B,MAAM,OAAO,QAAQ;AACrB,MAAI,CAAC,KACH,QAAO,SAAS,KACd,4GACD;AAEH,QAAM,sBACJ,IACA,2BACA,KACA,KAAK,YAAY,EACjB,MACA,MACD,CAAC;AACF,SAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,IAAI,CAAC;;CAOzE,MAAM,YACJ,aACA,eACoC;EACpC,MAAM,QAAQ,cAAc,aAAa,MAAM,kBAAkB;EAIjE,IAAI;AACJ,OAAK,MAAM,KAAK,WAAW,MAAM,MAAM,EAAE,CAAC,CAAC,CACzC,UAAS;AAEX,QAAM,SAAS;AACf,SAAO,OAAS,KAAK,IAAI,eAAgC,OAAQ,KAAA;;CAGnE,MAAM,EAAC,KAAK,gBAAe,8BACzB,KACA,QAAQ,YACR,SACD;CAED,MAAM,WAAW,cACf,aACA,MACA,YACA,QAAQ,WACR,IACA,QAAQ,aACT;CAED,MAAM,QAAQ,YAAY,KAAK;CAE/B,IAAI,iBAAiB;CACrB,MAAM,cAAqC,EAAE;CAC7C,MAAM,8BAA2B,IAAI,KAAK;AAC1C,MAAK,MAAM,aAAa,QACtB,UACA,UAAU,YAAY,EACtB,cACA,gBAAgB,IAAI,IAAI,EAAC,2BAA2B,OAAM,CAAC,CAC5D,EAAE;AACD,MAAI,cAAc,SAAS;AACzB,SAAM,cAAc;AACpB;;AAEF,SAAO,UAAU,SAAS,OAAO,yCAAyC;AAG1E,MAAI,iBAAiB,OAAO,EAC1B,OAAM,QAAQ,SAAS;AAEzB,MAAI,iBAAiB,QAAQ,EAC3B,OAAM,MAAM,EAAE;EAGhB,IAAI,OAAc,YAAY,UAAU;EACxC,MAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,IAAI;AAC/D,MAAI,YAAY,IAAI,EAAE,CACpB;AAEF;AACA,cAAY,IAAI,EAAE;AAClB,MAAI,QAAQ,YAAY;AACtB,OAAI,CAAC,MAAM;AACT,WAAO,EAAE;AACT,gBAAY,UAAU,SAAS;;AAEjC,QAAK,KAAK,UAAU,IAAI;;;CAI5B,MAAM,MAAM,YAAY,KAAK;AAC7B,KAAI,QAAQ,WACV,QAAO,aAAa;AAEtB,QAAO,QAAQ;AACf,QAAO,MAAM;AACb,QAAO,UAAU,MAAM;AAGvB,QAAO,iBAAiB;AACxB,QAAO,uBAAuB,KAAK,OAAO,oBAAoB,IAAI,EAAE;CACpE,IAAI,eAAe;AACnB,MAAK,MAAM,KAAK,OAAO,OAAO,OAAO,qBAAqB,CACxD,MAAK,MAAM,KAAK,OAAO,OAAO,EAAE,CAC9B,iBAAgB;AAGpB,QAAO,eAAe;AACtB,QAAO,iBAAiB,KAAK,OAAO,iBAAiB,IAAI,EAAE;AAE3D,KAAI,QAAQ,WACV,QAAO,WAAW,KAAK,OAAO,eAAe;AAE/C,QAAO"}
1
+ {"version":3,"file":"run-ast.js","names":[],"sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST, LiteralValue} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport {ChangeType} from '../../../zql/src/ivm/change-type.ts';\nimport type {Node} from '../../../zql/src/ivm/data.ts';\nimport {skipYields} from '../../../zql/src/ivm/operator.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {resolveSimpleScalarSubqueries} from '../../../zqlite/src/resolve-scalar-subqueries.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport {computeZqlSpecs} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n auth?: JWTAuth | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host, db} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const auth = options.auth;\n if (!auth) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n auth,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n // Resolve scalar subqueries (e.g. whereExists with {scalar: true}) to\n // literal equality conditions so that SQLite can use indexes effectively.\n // Without this, correlated subqueries get stripped from SQL filters and\n // queries on large tables fall back to full table scans.\n const executor = (\n subqueryAST: AST,\n childField: string,\n ): LiteralValue | null | undefined => {\n const input = buildPipeline(subqueryAST, host, 'scalar-subquery');\n // Consume the full stream rather than using first() to avoid\n // triggering early return on Take's #initialFetch assertion.\n // The subquery AST already has limit: 1, so at most one row is produced.\n let node: Node | undefined;\n for (const n of skipYields(input.fetch({}))) {\n node ??= n;\n }\n input.destroy();\n return node ? ((node.row[childField] as LiteralValue) ?? null) : undefined;\n };\n\n const {ast: resolvedAst} = resolveSimpleScalarSubqueries(\n ast,\n options.tableSpecs,\n executor,\n );\n\n const pipeline = buildPipeline(\n resolvedAst,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(\n pipeline,\n hashOfAST(resolvedAst),\n clientSchema,\n computeZqlSpecs(lc, db, {includeBackfillingColumns: false}),\n )) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(\n rowChange.type === ChangeType.ADD,\n 'Hydration only handles add row changes',\n );\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"mappings":";;;;;;;;;;;;;;AA+CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;CAC7B,MAAM,EAAC,sBAAsB,aAAa,MAAM,OAAM;CACtD,MAAM,SAA6B;EACjC,UAAU,EAAE;EACZ,YAAY,KAAA;EACZ,gBAAgB;EAChB,OAAO;EACP,KAAK;EACL,SAAS;EACT,kBAAkB,KAAA;EAClB,UAAU,KAAA;EACV,sBAAsB,EAAE;EACxB,cAAc,KAAA;EACf;AAED,KAAI,CAAC,cAEH,OAAM,OAAO,KAAK,KAAK,qBAAqB,CAAC;AAE/C,KAAI,QAAQ,kBAAkB;EAC5B,MAAM,OAAO,QAAQ;AACrB,MAAI,CAAC,KACH,QAAO,SAAS,KACd,4GACD;AAEH,QAAM,sBACJ,IACA,2BACA,KACA,KAAK,YAAY,EACjB,MACA,MACD,CAAC;AACF,SAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,IAAI,CAAC;;CAOzE,MAAM,YACJ,aACA,eACoC;EACpC,MAAM,QAAQ,cAAc,aAAa,MAAM,kBAAkB;EAIjE,IAAI;AACJ,OAAK,MAAM,KAAK,WAAW,MAAM,MAAM,EAAE,CAAC,CAAC,CACzC,UAAS;AAEX,QAAM,SAAS;AACf,SAAO,OAAS,KAAK,IAAI,eAAgC,OAAQ,KAAA;;CAGnE,MAAM,EAAC,KAAK,gBAAe,8BACzB,KACA,QAAQ,YACR,SACD;CAED,MAAM,WAAW,cACf,aACA,MACA,YACA,QAAQ,WACR,IACA,QAAQ,aACT;CAED,MAAM,QAAQ,YAAY,KAAK;CAE/B,IAAI,iBAAiB;CACrB,MAAM,cAAqC,EAAE;CAC7C,MAAM,8BAA2B,IAAI,KAAK;AAC1C,MAAK,MAAM,aAAa,QACtB,UACA,UAAU,YAAY,EACtB,cACA,gBAAgB,IAAI,IAAI,EAAC,2BAA2B,OAAM,CAAC,CAC5D,EAAE;AACD,MAAI,cAAc,SAAS;AACzB,SAAM,cAAc;AACpB;;AAEF,SACE,UAAU,SAAS,GACnB,yCACD;AAGD,MAAI,iBAAiB,OAAO,EAC1B,OAAM,QAAQ,SAAS;AAEzB,MAAI,iBAAiB,QAAQ,EAC3B,OAAM,MAAM,EAAE;EAGhB,IAAI,OAAc,YAAY,UAAU;EACxC,MAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,IAAI;AAC/D,MAAI,YAAY,IAAI,EAAE,CACpB;AAEF;AACA,cAAY,IAAI,EAAE;AAClB,MAAI,QAAQ,YAAY;AACtB,OAAI,CAAC,MAAM;AACT,WAAO,EAAE;AACT,gBAAY,UAAU,SAAS;;AAEjC,QAAK,KAAK,UAAU,IAAI;;;CAI5B,MAAM,MAAM,YAAY,KAAK;AAC7B,KAAI,QAAQ,WACV,QAAO,aAAa;AAEtB,QAAO,QAAQ;AACf,QAAO,MAAM;AACb,QAAO,UAAU,MAAM;AAGvB,QAAO,iBAAiB;AACxB,QAAO,uBAAuB,KAAK,OAAO,oBAAoB,IAAI,EAAE;CACpE,IAAI,eAAe;AACnB,MAAK,MAAM,KAAK,OAAO,OAAO,OAAO,qBAAqB,CACxD,MAAK,MAAM,KAAK,OAAO,OAAO,EAAE,CAC9B,iBAAgB;AAGpB,QAAO,eAAe;AACtB,QAAO,iBAAiB,KAAK,OAAO,iBAAiB,IAAI,EAAE;AAE3D,KAAI,QAAQ,WACV,QAAO,WAAW,KAAK,OAAO,eAAe;AAE/C,QAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"statz.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/statz.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,SAAS,CAAC;AAK1D,OAAO,KAAK,EAAC,oBAAoB,IAAI,UAAU,EAAC,MAAM,wBAAwB,CAAC;AA6R/E;;;;;GAKG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,UAAU,EAClB,GAAG,EAAE,cAAc,EACnB,GAAG,EAAE,YAAY,iBAuDlB"}
1
+ {"version":3,"file":"statz.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/statz.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,SAAS,CAAC;AAG1D,OAAO,KAAK,EAAC,oBAAoB,IAAI,UAAU,EAAC,MAAM,wBAAwB,CAAC;AA6R/E;;;;;GAKG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,UAAU,EAClB,GAAG,EAAE,cAAc,EACnB,GAAG,EAAE,YAAY,iBAuDlB"}
@@ -1,14 +1,14 @@
1
1
  import { BigIntJSON } from "../../../shared/src/bigint-json.js";
2
- import { Database } from "../../../zqlite/src/db.js";
3
2
  import { getShardID, upstreamSchema } from "../types/shards.js";
4
3
  import { isAdminPasswordValid } from "../config/zero-config.js";
4
+ import { Database } from "../../../zqlite/src/db.js";
5
+ import { getReplicationState } from "./replicator/schema/replication-state.js";
5
6
  import { StatementRunner } from "../db/statements.js";
6
7
  import { pgClient } from "../types/pg.js";
7
8
  import { fromStateVersionString } from "./change-source/pg/lsn.js";
8
- import { getReplicationState } from "./replicator/schema/replication-state.js";
9
9
  import os from "os";
10
- import auth from "basic-auth";
11
10
  import fs from "fs";
11
+ import auth from "basic-auth";
12
12
  //#region ../zero-cache/src/services/statz.ts
13
13
  async function upstreamStats(lc, config) {
14
14
  const schema = upstreamSchema(getShardID(config));