@rocicorp/zero 0.25.0-canary.9 → 0.25.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (517) hide show
  1. package/out/analyze-query/src/bin-analyze.js.map +1 -1
  2. package/out/analyze-query/src/run-ast.d.ts +1 -1
  3. package/out/analyze-query/src/run-ast.d.ts.map +1 -1
  4. package/out/analyze-query/src/run-ast.js +10 -8
  5. package/out/analyze-query/src/run-ast.js.map +1 -1
  6. package/out/otel/src/log-options.d.ts +1 -1
  7. package/out/otel/src/log-options.d.ts.map +1 -1
  8. package/out/otel/src/log-options.js +0 -1
  9. package/out/otel/src/log-options.js.map +1 -1
  10. package/out/replicache/src/persist/idb-databases-store.d.ts +1 -0
  11. package/out/replicache/src/persist/idb-databases-store.d.ts.map +1 -1
  12. package/out/replicache/src/persist/idb-databases-store.js +13 -2
  13. package/out/replicache/src/persist/idb-databases-store.js.map +1 -1
  14. package/out/shared/src/deep-merge.d.ts +6 -4
  15. package/out/shared/src/deep-merge.d.ts.map +1 -1
  16. package/out/shared/src/deep-merge.js +2 -1
  17. package/out/shared/src/deep-merge.js.map +1 -1
  18. package/out/shared/src/iterables.d.ts +0 -1
  19. package/out/shared/src/iterables.d.ts.map +1 -1
  20. package/out/shared/src/iterables.js +0 -34
  21. package/out/shared/src/iterables.js.map +1 -1
  22. package/out/shared/src/options-types.d.ts +113 -0
  23. package/out/shared/src/options-types.d.ts.map +1 -0
  24. package/out/shared/src/options.d.ts +2 -111
  25. package/out/shared/src/options.d.ts.map +1 -1
  26. package/out/shared/src/options.js.map +1 -1
  27. package/out/shared/src/record-proxy.d.ts +13 -0
  28. package/out/shared/src/record-proxy.d.ts.map +1 -0
  29. package/out/shared/src/record-proxy.js +59 -0
  30. package/out/shared/src/record-proxy.js.map +1 -0
  31. package/out/z2s/src/compiler.d.ts.map +1 -1
  32. package/out/z2s/src/compiler.js +4 -2
  33. package/out/z2s/src/compiler.js.map +1 -1
  34. package/out/zero/package.json.js +1 -1
  35. package/out/zero/src/adapters/drizzle.d.ts +1 -1
  36. package/out/zero/src/adapters/drizzle.d.ts.map +1 -1
  37. package/out/zero/src/adapters/drizzle.js +4 -1
  38. package/out/zero/src/bindings.d.ts +2 -0
  39. package/out/zero/src/bindings.d.ts.map +1 -0
  40. package/out/zero/src/bindings.js +27 -0
  41. package/out/zero/src/bindings.js.map +1 -0
  42. package/out/zero/src/pg.js +7 -5
  43. package/out/zero/src/react.js +2 -4
  44. package/out/zero/src/react.js.map +1 -1
  45. package/out/zero/src/server.js +7 -5
  46. package/out/zero/src/solid.js +2 -2
  47. package/out/zero/src/zero-cache-dev.js +11 -5
  48. package/out/zero/src/zero-cache-dev.js.map +1 -1
  49. package/out/zero/src/zero.js +6 -6
  50. package/out/zero-cache/src/auth/read-authorizer.d.ts +1 -1
  51. package/out/zero-cache/src/auth/read-authorizer.d.ts.map +1 -1
  52. package/out/zero-cache/src/auth/read-authorizer.js +1 -1
  53. package/out/zero-cache/src/auth/read-authorizer.js.map +1 -1
  54. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  55. package/out/zero-cache/src/auth/write-authorizer.js +25 -17
  56. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  57. package/out/zero-cache/src/config/zero-config.d.ts +40 -4
  58. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  59. package/out/zero-cache/src/config/zero-config.js +58 -19
  60. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  61. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  62. package/out/zero-cache/src/db/transaction-pool.js +3 -6
  63. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  64. package/out/zero-cache/src/scripts/deploy-permissions.js +6 -3
  65. package/out/zero-cache/src/scripts/deploy-permissions.js.map +1 -1
  66. package/out/zero-cache/src/scripts/permissions.d.ts.map +1 -1
  67. package/out/zero-cache/src/scripts/permissions.js +11 -13
  68. package/out/zero-cache/src/scripts/permissions.js.map +1 -1
  69. package/out/zero-cache/src/server/anonymous-otel-start.d.ts +10 -1
  70. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  71. package/out/zero-cache/src/server/anonymous-otel-start.js +34 -18
  72. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  73. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  74. package/out/zero-cache/src/server/change-streamer.js +2 -8
  75. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  76. package/out/zero-cache/src/server/otel-diag-logger.d.ts.map +1 -1
  77. package/out/zero-cache/src/server/otel-diag-logger.js +1 -21
  78. package/out/zero-cache/src/server/otel-diag-logger.js.map +1 -1
  79. package/out/zero-cache/src/server/otel-start.d.ts.map +1 -1
  80. package/out/zero-cache/src/server/otel-start.js +1 -5
  81. package/out/zero-cache/src/server/otel-start.js.map +1 -1
  82. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  83. package/out/zero-cache/src/server/syncer.js +7 -1
  84. package/out/zero-cache/src/server/syncer.js.map +1 -1
  85. package/out/zero-cache/src/services/analyze.d.ts +2 -2
  86. package/out/zero-cache/src/services/analyze.d.ts.map +1 -1
  87. package/out/zero-cache/src/services/analyze.js +55 -42
  88. package/out/zero-cache/src/services/analyze.js.map +1 -1
  89. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  90. package/out/zero-cache/src/services/change-source/pg/change-source.js +62 -42
  91. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  92. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
  93. package/out/zero-cache/src/services/change-source/pg/schema/published.js +3 -2
  94. package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
  95. package/out/zero-cache/src/services/change-source/protocol/current/control.d.ts +1 -0
  96. package/out/zero-cache/src/services/change-source/protocol/current/control.d.ts.map +1 -1
  97. package/out/zero-cache/src/services/change-source/protocol/current/control.js +5 -1
  98. package/out/zero-cache/src/services/change-source/protocol/current/control.js.map +1 -1
  99. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +2 -0
  100. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
  101. package/out/zero-cache/src/services/change-source/protocol/current/json.d.ts +8 -0
  102. package/out/zero-cache/src/services/change-source/protocol/current/json.d.ts.map +1 -0
  103. package/out/zero-cache/src/services/change-source/protocol/current/json.js +19 -0
  104. package/out/zero-cache/src/services/change-source/protocol/current/json.js.map +1 -0
  105. package/out/zero-cache/src/services/change-source/protocol/current.d.ts +1 -0
  106. package/out/zero-cache/src/services/change-source/protocol/current.d.ts.map +1 -1
  107. package/out/zero-cache/src/services/change-source/protocol/current.js +3 -0
  108. package/out/zero-cache/src/services/change-source/protocol/current.js.map +1 -1
  109. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts +0 -2
  110. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
  111. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +0 -5
  112. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
  113. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  114. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +8 -1
  115. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  116. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  117. package/out/zero-cache/src/services/change-streamer/storer.js +2 -3
  118. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  119. package/out/zero-cache/src/services/http-service.d.ts +0 -1
  120. package/out/zero-cache/src/services/http-service.d.ts.map +1 -1
  121. package/out/zero-cache/src/services/http-service.js +0 -4
  122. package/out/zero-cache/src/services/http-service.js.map +1 -1
  123. package/out/zero-cache/src/services/litestream/commands.js +3 -2
  124. package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
  125. package/out/zero-cache/src/services/mutagen/pusher.d.ts +4 -4
  126. package/out/zero-cache/src/services/replicator/replication-status.d.ts +2 -0
  127. package/out/zero-cache/src/services/replicator/replication-status.d.ts.map +1 -1
  128. package/out/zero-cache/src/services/replicator/replication-status.js +14 -1
  129. package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
  130. package/out/zero-cache/src/services/run-ast.d.ts +1 -1
  131. package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
  132. package/out/zero-cache/src/services/run-ast.js +5 -1
  133. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  134. package/out/zero-cache/src/services/view-syncer/active-users-gauge.d.ts +2 -1
  135. package/out/zero-cache/src/services/view-syncer/active-users-gauge.d.ts.map +1 -1
  136. package/out/zero-cache/src/services/view-syncer/active-users-gauge.js +26 -13
  137. package/out/zero-cache/src/services/view-syncer/active-users-gauge.js.map +1 -1
  138. package/out/zero-cache/src/services/view-syncer/cvr-purger.d.ts +1 -1
  139. package/out/zero-cache/src/services/view-syncer/cvr-purger.d.ts.map +1 -1
  140. package/out/zero-cache/src/services/view-syncer/cvr-purger.js +39 -15
  141. package/out/zero-cache/src/services/view-syncer/cvr-purger.js.map +1 -1
  142. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +4 -1
  143. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
  144. package/out/zero-cache/src/services/view-syncer/cvr-store.js +31 -9
  145. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  146. package/out/zero-cache/src/services/view-syncer/cvr.d.ts +3 -0
  147. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  148. package/out/zero-cache/src/services/view-syncer/cvr.js +11 -0
  149. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  150. package/out/zero-cache/src/services/view-syncer/inspect-handler.js +1 -1
  151. package/out/zero-cache/src/services/view-syncer/inspect-handler.js.map +1 -1
  152. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +11 -11
  153. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  154. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +81 -27
  155. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  156. package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts +1 -0
  157. package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts.map +1 -1
  158. package/out/zero-cache/src/services/view-syncer/schema/cvr.js +23 -10
  159. package/out/zero-cache/src/services/view-syncer/schema/cvr.js.map +1 -1
  160. package/out/zero-cache/src/services/view-syncer/schema/init.d.ts.map +1 -1
  161. package/out/zero-cache/src/services/view-syncer/schema/init.js +31 -1
  162. package/out/zero-cache/src/services/view-syncer/schema/init.js.map +1 -1
  163. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +2 -2
  164. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  165. package/out/zero-cache/src/services/view-syncer/snapshotter.js +19 -4
  166. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  167. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +2 -1
  168. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  169. package/out/zero-cache/src/services/view-syncer/view-syncer.js +31 -29
  170. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  171. package/out/zero-cache/src/workers/connect-params.d.ts +1 -0
  172. package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
  173. package/out/zero-cache/src/workers/connect-params.js +2 -0
  174. package/out/zero-cache/src/workers/connect-params.js.map +1 -1
  175. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  176. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +2 -0
  177. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  178. package/out/zero-client/src/client/bindings.d.ts +12 -42
  179. package/out/zero-client/src/client/bindings.d.ts.map +1 -1
  180. package/out/zero-client/src/client/connection-manager.d.ts +3 -3
  181. package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
  182. package/out/zero-client/src/client/connection-manager.js.map +1 -1
  183. package/out/zero-client/src/client/connection.d.ts.map +1 -1
  184. package/out/zero-client/src/client/connection.js +8 -1
  185. package/out/zero-client/src/client/connection.js.map +1 -1
  186. package/out/zero-client/src/client/crud-impl.d.ts +11 -0
  187. package/out/zero-client/src/client/crud-impl.d.ts.map +1 -0
  188. package/out/zero-client/src/client/crud-impl.js +102 -0
  189. package/out/zero-client/src/client/crud-impl.js.map +1 -0
  190. package/out/zero-client/src/client/crud.d.ts +10 -42
  191. package/out/zero-client/src/client/crud.d.ts.map +1 -1
  192. package/out/zero-client/src/client/crud.js +28 -110
  193. package/out/zero-client/src/client/crud.js.map +1 -1
  194. package/out/zero-client/src/client/custom.d.ts +11 -6
  195. package/out/zero-client/src/client/custom.d.ts.map +1 -1
  196. package/out/zero-client/src/client/custom.js +12 -53
  197. package/out/zero-client/src/client/custom.js.map +1 -1
  198. package/out/zero-client/src/client/delete-clients-manager.d.ts +1 -1
  199. package/out/zero-client/src/client/delete-clients-manager.d.ts.map +1 -1
  200. package/out/zero-client/src/client/delete-clients-manager.js +30 -3
  201. package/out/zero-client/src/client/delete-clients-manager.js.map +1 -1
  202. package/out/zero-client/src/client/error.d.ts +6 -1
  203. package/out/zero-client/src/client/error.d.ts.map +1 -1
  204. package/out/zero-client/src/client/error.js +2 -2
  205. package/out/zero-client/src/client/error.js.map +1 -1
  206. package/out/zero-client/src/client/ivm-branch.d.ts.map +1 -1
  207. package/out/zero-client/src/client/ivm-branch.js +20 -13
  208. package/out/zero-client/src/client/ivm-branch.js.map +1 -1
  209. package/out/zero-client/src/client/make-mutate-property.d.ts +6 -9
  210. package/out/zero-client/src/client/make-mutate-property.d.ts.map +1 -1
  211. package/out/zero-client/src/client/make-mutate-property.js +5 -10
  212. package/out/zero-client/src/client/make-mutate-property.js.map +1 -1
  213. package/out/zero-client/src/client/make-replicache-mutators.d.ts +2 -2
  214. package/out/zero-client/src/client/make-replicache-mutators.d.ts.map +1 -1
  215. package/out/zero-client/src/client/make-replicache-mutators.js +16 -11
  216. package/out/zero-client/src/client/make-replicache-mutators.js.map +1 -1
  217. package/out/zero-client/src/client/mutator-proxy.d.ts +3 -2
  218. package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
  219. package/out/zero-client/src/client/mutator-proxy.js +16 -5
  220. package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
  221. package/out/zero-client/src/client/options.d.ts +5 -4
  222. package/out/zero-client/src/client/options.d.ts.map +1 -1
  223. package/out/zero-client/src/client/options.js.map +1 -1
  224. package/out/zero-client/src/client/version.js +1 -1
  225. package/out/zero-client/src/client/zero.d.ts +27 -13
  226. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  227. package/out/zero-client/src/client/zero.js +81 -40
  228. package/out/zero-client/src/client/zero.js.map +1 -1
  229. package/out/zero-client/src/mod.d.ts +17 -16
  230. package/out/zero-client/src/mod.d.ts.map +1 -1
  231. package/out/zero-events/src/status.d.ts +1 -1
  232. package/out/zero-events/src/status.d.ts.map +1 -1
  233. package/out/zero-protocol/src/analyze-query-result.d.ts +2 -2
  234. package/out/zero-protocol/src/analyze-query-result.js +2 -2
  235. package/out/zero-protocol/src/analyze-query-result.js.map +1 -1
  236. package/out/zero-protocol/src/down.d.ts +2 -2
  237. package/out/zero-protocol/src/inspect-down.d.ts +6 -6
  238. package/out/zero-protocol/src/inspect-up.d.ts +4 -4
  239. package/out/zero-protocol/src/inspect-up.js +1 -1
  240. package/out/zero-protocol/src/inspect-up.js.map +1 -1
  241. package/out/zero-protocol/src/protocol-version.d.ts +1 -1
  242. package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
  243. package/out/zero-protocol/src/protocol-version.js +1 -1
  244. package/out/zero-protocol/src/protocol-version.js.map +1 -1
  245. package/out/zero-protocol/src/up.d.ts +1 -1
  246. package/out/zero-react/src/bindings.d.ts +2 -0
  247. package/out/zero-react/src/bindings.d.ts.map +1 -0
  248. package/out/zero-react/src/mod.d.ts +1 -10
  249. package/out/zero-react/src/mod.d.ts.map +1 -1
  250. package/out/zero-react/src/{use-zero-connection-state.d.ts → use-connection-state.d.ts} +3 -3
  251. package/out/zero-react/src/use-connection-state.d.ts.map +1 -0
  252. package/out/zero-react/src/{use-zero-connection-state.js → use-connection-state.js} +3 -3
  253. package/out/zero-react/src/use-connection-state.js.map +1 -0
  254. package/out/zero-react/src/use-query.d.ts +4 -10
  255. package/out/zero-react/src/use-query.d.ts.map +1 -1
  256. package/out/zero-react/src/use-query.js +26 -21
  257. package/out/zero-react/src/use-query.js.map +1 -1
  258. package/out/zero-react/src/use-zero-online.d.ts +1 -1
  259. package/out/zero-react/src/use-zero-online.js.map +1 -1
  260. package/out/zero-react/src/zero-provider.d.ts +17 -10
  261. package/out/zero-react/src/zero-provider.d.ts.map +1 -1
  262. package/out/zero-react/src/zero-provider.js +19 -1
  263. package/out/zero-react/src/zero-provider.js.map +1 -1
  264. package/out/zero-react/src/zero.d.ts +2 -0
  265. package/out/zero-react/src/zero.d.ts.map +1 -0
  266. package/out/zero-schema/src/compiled-permissions.d.ts +22 -2
  267. package/out/zero-schema/src/compiled-permissions.d.ts.map +1 -1
  268. package/out/zero-schema/src/compiled-permissions.js +7 -6
  269. package/out/zero-schema/src/compiled-permissions.js.map +1 -1
  270. package/out/zero-schema/src/permissions.d.ts +11 -8
  271. package/out/zero-schema/src/permissions.d.ts.map +1 -1
  272. package/out/zero-schema/src/permissions.js +2 -8
  273. package/out/zero-schema/src/permissions.js.map +1 -1
  274. package/out/zero-schema/src/schema-config.d.ts +0 -5
  275. package/out/zero-schema/src/schema-config.d.ts.map +1 -1
  276. package/out/zero-schema/src/schema-config.js +1 -1
  277. package/out/zero-schema/src/schema-config.js.map +1 -1
  278. package/out/zero-server/src/custom.d.ts +41 -14
  279. package/out/zero-server/src/custom.d.ts.map +1 -1
  280. package/out/zero-server/src/custom.js +129 -37
  281. package/out/zero-server/src/custom.js.map +1 -1
  282. package/out/zero-server/src/mod.d.ts +1 -1
  283. package/out/zero-server/src/mod.d.ts.map +1 -1
  284. package/out/zero-server/src/process-mutations.d.ts +10 -6
  285. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  286. package/out/zero-server/src/process-mutations.js +9 -18
  287. package/out/zero-server/src/process-mutations.js.map +1 -1
  288. package/out/zero-server/src/push-processor.d.ts.map +1 -1
  289. package/out/zero-server/src/push-processor.js +10 -8
  290. package/out/zero-server/src/push-processor.js.map +1 -1
  291. package/out/zero-server/src/queries/process-queries.d.ts +14 -2
  292. package/out/zero-server/src/queries/process-queries.d.ts.map +1 -1
  293. package/out/zero-server/src/queries/process-queries.js +18 -15
  294. package/out/zero-server/src/queries/process-queries.js.map +1 -1
  295. package/out/zero-server/src/zql-database.d.ts +6 -6
  296. package/out/zero-server/src/zql-database.d.ts.map +1 -1
  297. package/out/zero-server/src/zql-database.js +5 -17
  298. package/out/zero-server/src/zql-database.js.map +1 -1
  299. package/out/zero-solid/src/bindings.d.ts +2 -0
  300. package/out/zero-solid/src/bindings.d.ts.map +1 -0
  301. package/out/zero-solid/src/mod.d.ts +1 -8
  302. package/out/zero-solid/src/mod.d.ts.map +1 -1
  303. package/out/zero-solid/src/solid-view.d.ts +3 -5
  304. package/out/zero-solid/src/solid-view.d.ts.map +1 -1
  305. package/out/zero-solid/src/solid-view.js +9 -6
  306. package/out/zero-solid/src/solid-view.js.map +1 -1
  307. package/out/zero-solid/src/{use-zero-connection-state.d.ts → use-connection-state.d.ts} +3 -3
  308. package/out/zero-solid/src/use-connection-state.d.ts.map +1 -0
  309. package/out/zero-solid/src/{use-zero-connection-state.js → use-connection-state.js} +3 -3
  310. package/out/zero-solid/src/use-connection-state.js.map +1 -0
  311. package/out/zero-solid/src/use-query.d.ts +3 -6
  312. package/out/zero-solid/src/use-query.d.ts.map +1 -1
  313. package/out/zero-solid/src/use-query.js +44 -11
  314. package/out/zero-solid/src/use-query.js.map +1 -1
  315. package/out/zero-solid/src/use-zero-online.d.ts +1 -1
  316. package/out/zero-solid/src/use-zero-online.js.map +1 -1
  317. package/out/zero-solid/src/use-zero.d.ts +19 -9
  318. package/out/zero-solid/src/use-zero.d.ts.map +1 -1
  319. package/out/zero-solid/src/use-zero.js +17 -1
  320. package/out/zero-solid/src/use-zero.js.map +1 -1
  321. package/out/zero-solid/src/zero.d.ts +2 -0
  322. package/out/zero-solid/src/zero.d.ts.map +1 -0
  323. package/out/zero-types/src/default-types.d.ts +38 -0
  324. package/out/zero-types/src/default-types.d.ts.map +1 -0
  325. package/out/zero-types/src/schema.d.ts +4 -4
  326. package/out/zql/src/builder/builder.d.ts.map +1 -1
  327. package/out/zql/src/builder/builder.js +1 -13
  328. package/out/zql/src/builder/builder.js.map +1 -1
  329. package/out/zql/src/error.js +1 -10
  330. package/out/zql/src/error.js.map +1 -1
  331. package/out/zql/src/ivm/array-view.d.ts +2 -2
  332. package/out/zql/src/ivm/array-view.d.ts.map +1 -1
  333. package/out/zql/src/ivm/array-view.js +4 -1
  334. package/out/zql/src/ivm/array-view.js.map +1 -1
  335. package/out/zql/src/ivm/data.d.ts +7 -2
  336. package/out/zql/src/ivm/data.d.ts.map +1 -1
  337. package/out/zql/src/ivm/data.js +0 -8
  338. package/out/zql/src/ivm/data.js.map +1 -1
  339. package/out/zql/src/ivm/exists.d.ts +6 -4
  340. package/out/zql/src/ivm/exists.d.ts.map +1 -1
  341. package/out/zql/src/ivm/exists.js +60 -91
  342. package/out/zql/src/ivm/exists.js.map +1 -1
  343. package/out/zql/src/ivm/fan-in.d.ts +5 -3
  344. package/out/zql/src/ivm/fan-in.d.ts.map +1 -1
  345. package/out/zql/src/ivm/fan-in.js +12 -5
  346. package/out/zql/src/ivm/fan-in.js.map +1 -1
  347. package/out/zql/src/ivm/fan-out.d.ts +4 -2
  348. package/out/zql/src/ivm/fan-out.d.ts.map +1 -1
  349. package/out/zql/src/ivm/fan-out.js +16 -6
  350. package/out/zql/src/ivm/fan-out.js.map +1 -1
  351. package/out/zql/src/ivm/filter-operators.d.ts +13 -11
  352. package/out/zql/src/ivm/filter-operators.d.ts.map +1 -1
  353. package/out/zql/src/ivm/filter-operators.js +27 -24
  354. package/out/zql/src/ivm/filter-operators.js.map +1 -1
  355. package/out/zql/src/ivm/filter-push.d.ts +2 -1
  356. package/out/zql/src/ivm/filter-push.d.ts.map +1 -1
  357. package/out/zql/src/ivm/filter-push.js +5 -5
  358. package/out/zql/src/ivm/filter-push.js.map +1 -1
  359. package/out/zql/src/ivm/filter.d.ts +4 -2
  360. package/out/zql/src/ivm/filter.d.ts.map +1 -1
  361. package/out/zql/src/ivm/filter.js +10 -4
  362. package/out/zql/src/ivm/filter.js.map +1 -1
  363. package/out/zql/src/ivm/flipped-join.d.ts +1 -2
  364. package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
  365. package/out/zql/src/ivm/flipped-join.js +133 -103
  366. package/out/zql/src/ivm/flipped-join.js.map +1 -1
  367. package/out/zql/src/ivm/join-utils.d.ts +9 -2
  368. package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
  369. package/out/zql/src/ivm/join-utils.js +20 -0
  370. package/out/zql/src/ivm/join-utils.js.map +1 -1
  371. package/out/zql/src/ivm/join.d.ts +3 -16
  372. package/out/zql/src/ivm/join.d.ts.map +1 -1
  373. package/out/zql/src/ivm/join.js +62 -128
  374. package/out/zql/src/ivm/join.js.map +1 -1
  375. package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts +1 -1
  376. package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts.map +1 -1
  377. package/out/zql/src/ivm/maybe-split-and-push-edit-change.js +4 -4
  378. package/out/zql/src/ivm/maybe-split-and-push-edit-change.js.map +1 -1
  379. package/out/zql/src/ivm/memory-source.d.ts +7 -6
  380. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  381. package/out/zql/src/ivm/memory-source.js +39 -28
  382. package/out/zql/src/ivm/memory-source.js.map +1 -1
  383. package/out/zql/src/ivm/operator.d.ts +15 -12
  384. package/out/zql/src/ivm/operator.d.ts.map +1 -1
  385. package/out/zql/src/ivm/operator.js +8 -0
  386. package/out/zql/src/ivm/operator.js.map +1 -1
  387. package/out/zql/src/ivm/push-accumulated.d.ts +2 -2
  388. package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
  389. package/out/zql/src/ivm/push-accumulated.js +8 -8
  390. package/out/zql/src/ivm/push-accumulated.js.map +1 -1
  391. package/out/zql/src/ivm/skip.d.ts +2 -3
  392. package/out/zql/src/ivm/skip.d.ts.map +1 -1
  393. package/out/zql/src/ivm/skip.js +14 -11
  394. package/out/zql/src/ivm/skip.js.map +1 -1
  395. package/out/zql/src/ivm/source.d.ts +15 -7
  396. package/out/zql/src/ivm/source.d.ts.map +1 -1
  397. package/out/zql/src/ivm/stream.d.ts +2 -0
  398. package/out/zql/src/ivm/stream.d.ts.map +1 -1
  399. package/out/zql/src/ivm/stream.js +5 -14
  400. package/out/zql/src/ivm/stream.js.map +1 -1
  401. package/out/zql/src/ivm/take.d.ts +2 -3
  402. package/out/zql/src/ivm/take.d.ts.map +1 -1
  403. package/out/zql/src/ivm/take.js +168 -140
  404. package/out/zql/src/ivm/take.js.map +1 -1
  405. package/out/zql/src/ivm/union-fan-in.d.ts +4 -4
  406. package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
  407. package/out/zql/src/ivm/union-fan-in.js +74 -15
  408. package/out/zql/src/ivm/union-fan-in.js.map +1 -1
  409. package/out/zql/src/ivm/union-fan-out.d.ts +2 -3
  410. package/out/zql/src/ivm/union-fan-out.d.ts.map +1 -1
  411. package/out/zql/src/ivm/union-fan-out.js +3 -6
  412. package/out/zql/src/ivm/union-fan-out.js.map +1 -1
  413. package/out/zql/src/ivm/view-apply-change.d.ts.map +1 -1
  414. package/out/zql/src/ivm/view-apply-change.js +4 -4
  415. package/out/zql/src/ivm/view-apply-change.js.map +1 -1
  416. package/out/zql/src/ivm/view.d.ts +2 -2
  417. package/out/zql/src/ivm/view.d.ts.map +1 -1
  418. package/out/zql/src/mutate/crud.d.ts +116 -0
  419. package/out/zql/src/mutate/crud.d.ts.map +1 -0
  420. package/out/zql/src/mutate/crud.js +41 -0
  421. package/out/zql/src/mutate/crud.js.map +1 -0
  422. package/out/zql/src/mutate/custom.d.ts +24 -62
  423. package/out/zql/src/mutate/custom.d.ts.map +1 -1
  424. package/out/zql/src/mutate/custom.js +1 -5
  425. package/out/zql/src/mutate/custom.js.map +1 -1
  426. package/out/zql/src/mutate/mutator-registry.d.ts +43 -73
  427. package/out/zql/src/mutate/mutator-registry.d.ts.map +1 -1
  428. package/out/zql/src/mutate/mutator-registry.js +25 -34
  429. package/out/zql/src/mutate/mutator-registry.js.map +1 -1
  430. package/out/zql/src/mutate/mutator.d.ts +60 -64
  431. package/out/zql/src/mutate/mutator.d.ts.map +1 -1
  432. package/out/zql/src/mutate/mutator.js +8 -9
  433. package/out/zql/src/mutate/mutator.js.map +1 -1
  434. package/out/zql/src/planner/planner-builder.d.ts +2 -1
  435. package/out/zql/src/planner/planner-builder.d.ts.map +1 -1
  436. package/out/zql/src/planner/planner-builder.js +5 -5
  437. package/out/zql/src/planner/planner-builder.js.map +1 -1
  438. package/out/zql/src/planner/planner-debug.d.ts +3 -3
  439. package/out/zql/src/planner/planner-debug.js.map +1 -1
  440. package/out/zql/src/planner/planner-graph.d.ts +3 -1
  441. package/out/zql/src/planner/planner-graph.d.ts.map +1 -1
  442. package/out/zql/src/planner/planner-graph.js +5 -5
  443. package/out/zql/src/planner/planner-graph.js.map +1 -1
  444. package/out/zql/src/planner/planner-join.d.ts.map +1 -1
  445. package/out/zql/src/planner/planner-join.js +3 -1
  446. package/out/zql/src/planner/planner-join.js.map +1 -1
  447. package/out/zql/src/query/create-builder.d.ts +4 -1
  448. package/out/zql/src/query/create-builder.d.ts.map +1 -1
  449. package/out/zql/src/query/create-builder.js +24 -36
  450. package/out/zql/src/query/create-builder.js.map +1 -1
  451. package/out/zql/src/query/expression.d.ts +5 -5
  452. package/out/zql/src/query/expression.d.ts.map +1 -1
  453. package/out/zql/src/query/expression.js.map +1 -1
  454. package/out/zql/src/query/measure-push-operator.d.ts +2 -3
  455. package/out/zql/src/query/measure-push-operator.d.ts.map +1 -1
  456. package/out/zql/src/query/measure-push-operator.js +2 -5
  457. package/out/zql/src/query/measure-push-operator.js.map +1 -1
  458. package/out/zql/src/query/query-delegate-base.d.ts +12 -6
  459. package/out/zql/src/query/query-delegate-base.d.ts.map +1 -1
  460. package/out/zql/src/query/query-delegate-base.js +132 -2
  461. package/out/zql/src/query/query-delegate-base.js.map +1 -1
  462. package/out/zql/src/query/query-delegate.d.ts +6 -6
  463. package/out/zql/src/query/query-delegate.d.ts.map +1 -1
  464. package/out/zql/src/query/query-impl.d.ts +27 -28
  465. package/out/zql/src/query/query-impl.d.ts.map +1 -1
  466. package/out/zql/src/query/query-impl.js +41 -168
  467. package/out/zql/src/query/query-impl.js.map +1 -1
  468. package/out/zql/src/query/query-internals.d.ts +6 -6
  469. package/out/zql/src/query/query-internals.d.ts.map +1 -1
  470. package/out/zql/src/query/query-internals.js +2 -2
  471. package/out/zql/src/query/query-internals.js.map +1 -1
  472. package/out/zql/src/query/query-registry.d.ts +108 -122
  473. package/out/zql/src/query/query-registry.d.ts.map +1 -1
  474. package/out/zql/src/query/query-registry.js +43 -53
  475. package/out/zql/src/query/query-registry.js.map +1 -1
  476. package/out/zql/src/query/query.d.ts +63 -37
  477. package/out/zql/src/query/query.d.ts.map +1 -1
  478. package/out/zql/src/query/runnable-query-impl.d.ts +22 -0
  479. package/out/zql/src/query/runnable-query-impl.d.ts.map +1 -0
  480. package/out/zql/src/query/runnable-query-impl.js +60 -0
  481. package/out/zql/src/query/runnable-query-impl.js.map +1 -0
  482. package/out/zql/src/query/schema-query.d.ts +2 -1
  483. package/out/zql/src/query/schema-query.d.ts.map +1 -1
  484. package/out/zql/src/query/static-query.d.ts +2 -15
  485. package/out/zql/src/query/static-query.d.ts.map +1 -1
  486. package/out/zql/src/query/static-query.js +10 -37
  487. package/out/zql/src/query/static-query.js.map +1 -1
  488. package/out/zqlite/src/internal/sql-inline.d.ts +13 -0
  489. package/out/zqlite/src/internal/sql-inline.d.ts.map +1 -0
  490. package/out/zqlite/src/internal/sql-inline.js +45 -0
  491. package/out/zqlite/src/internal/sql-inline.js.map +1 -0
  492. package/out/zqlite/src/sqlite-cost-model.d.ts.map +1 -1
  493. package/out/zqlite/src/sqlite-cost-model.js +2 -2
  494. package/out/zqlite/src/sqlite-cost-model.js.map +1 -1
  495. package/out/zqlite/src/table-source.d.ts +10 -3
  496. package/out/zqlite/src/table-source.d.ts.map +1 -1
  497. package/out/zqlite/src/table-source.js +42 -23
  498. package/out/zqlite/src/table-source.js.map +1 -1
  499. package/package.json +9 -5
  500. package/out/zero-client/src/client/bindings.js +0 -33
  501. package/out/zero-client/src/client/bindings.js.map +0 -1
  502. package/out/zero-react/src/components/inspector.d.ts +0 -9
  503. package/out/zero-react/src/components/inspector.d.ts.map +0 -1
  504. package/out/zero-react/src/components/inspector.js +0 -38
  505. package/out/zero-react/src/components/inspector.js.map +0 -1
  506. package/out/zero-react/src/components/mark-icon.d.ts +0 -3
  507. package/out/zero-react/src/components/mark-icon.d.ts.map +0 -1
  508. package/out/zero-react/src/components/mark-icon.js +0 -28
  509. package/out/zero-react/src/components/mark-icon.js.map +0 -1
  510. package/out/zero-react/src/components/zero-inspector.d.ts +0 -8
  511. package/out/zero-react/src/components/zero-inspector.d.ts.map +0 -1
  512. package/out/zero-react/src/components/zero-inspector.js +0 -44
  513. package/out/zero-react/src/components/zero-inspector.js.map +0 -1
  514. package/out/zero-react/src/use-zero-connection-state.d.ts.map +0 -1
  515. package/out/zero-react/src/use-zero-connection-state.js.map +0 -1
  516. package/out/zero-solid/src/use-zero-connection-state.d.ts.map +0 -1
  517. package/out/zero-solid/src/use-zero-connection-state.js.map +0 -1
@@ -4,6 +4,7 @@ import { getOrCreateCounter } from "../../observability/metrics.js";
4
4
  import { min } from "../../types/lexi-version.js";
5
5
  import { Subscription } from "../../types/subscription.js";
6
6
  import "../change-source/protocol/current/downstream.js";
7
+ import { publishReplicationError } from "../replicator/replication-status.js";
7
8
  import { RunningState, UnrecoverableError, DEFAULT_MAX_RETRY_DELAY_MS } from "../running-state.js";
8
9
  import "./change-streamer.js";
9
10
  import { WrongReplicaVersion } from "./error-type-enum.js";
@@ -87,7 +88,7 @@ class ChangeStreamerImpl {
87
88
  async run() {
88
89
  this.#lc.info?.("starting change stream");
89
90
  await this.#storer.assumeOwnership();
90
- this.#storer.run().catch((e) => this.stop(e));
91
+ this.#storer.run().then(() => this.stop()).catch((e) => this.stop(e));
91
92
  while (this.#state.shouldRun()) {
92
93
  let err;
93
94
  let watermark = null;
@@ -157,6 +158,12 @@ class ChangeStreamerImpl {
157
158
  switch (tag) {
158
159
  case "reset-required":
159
160
  await markResetRequired(this.#changeDB, this.#shard);
161
+ await publishReplicationError(
162
+ this.#lc,
163
+ "Replicating",
164
+ msg.message ?? "Resync required",
165
+ msg.errorDetails
166
+ );
160
167
  if (this.#autoReset) {
161
168
  this.#lc.warn?.("shutting down for auto-reset");
162
169
  await this.stop(new AutoResetSignal());
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer-service.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Sink, Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n type ChangeStreamMessage,\n} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeSourceUpstream} from '../change-source/protocol/current/upstream.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n autoReset,\n setTimeoutFn,\n );\n}\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\nexport type ChangeStream = {\n changes: Source<ChangeStreamMessage>;\n\n /**\n * A Sink to push the {@link StatusMessage}s that reflect Commits\n * that have been successfully stored by the {@link Storer}, or\n * downstream {@link StatusMessage}s henceforth.\n */\n acks: Sink<ChangeSourceUpstream>;\n};\n\n/** Encapsulates an upstream-specific implementation of a stream of Changes. */\nexport interface ChangeSource {\n /**\n * Starts a stream of changes starting after the specific watermark,\n * with a corresponding sink for upstream acknowledgements.\n */\n startStream(afterWatermark: string): Promise<ChangeStream>;\n}\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n );\n this.#forwarder = new Forwarder();\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n // The storer will, in turn, detect changes to ownership and stop\n // the change-streamer appropriately.\n this.#storer.run().catch(e => this.stop(e));\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n try {\n const startAfter = await this.#storer.getLastWatermarkToStartStream();\n const stream = await this.#source.startStream(startAfter);\n this.#stream = stream;\n this.#state.resetBackoff();\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n this.#storer.status(change); // storer acks once it gets through its queue\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n this.#storer.store([watermark, change]);\n this.#forwarder.forward([watermark, change]);\n\n if (type === 'commit') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n await this.#state.backoff(this.#lc, err);\n }\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark, initial} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n\n if (initial) {\n this.scheduleCleanup(watermark);\n }\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n this.#lc.info?.(`Purged ${deleted} changes before ${earliestInitial}`);\n this.#initialWatermarks.delete(earliestInitial);\n }\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"names":["ErrorType.WrongReplicaVersion"],"mappings":";;;;;;;;;;;;;;AA4CA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,mBACA,WACA,eAAe,YACiB;AAEhC,QAAM,yBAAyB,IAAI,UAAU,KAAK;AAClD,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,EAAC,mBAAkB;AACzB,SAAO,IAAI;AAAA,IACT;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AA4KA,MAAM,mBAAoD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,EACA,yCAAyB,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUzB,WAAW,SAAA;AAAA,EAEX,aAAa;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF;AAAA,EAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,WACA,eAAe,YACf;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,iBAAiB;AACxD,SAAK,SAAS;AACd,SAAK,YAAY;AACjB,SAAK,kBAAkB;AACvB,SAAK,UAAU;AACf,SAAK,UAAU,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,CAAA,aAAY,KAAK,SAAS,KAAK,KAAK,CAAC,UAAU,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC;AAAA,MACxE,CAAA,QAAO,KAAK,KAAK,GAAG;AAAA,IAAA;AAEtB,SAAK,aAAa,IAAI,UAAA;AACtB,SAAK,aAAa;AAClB,SAAK,SAAS,IAAI,aAAa,KAAK,IAAI,QAAW,YAAY;AAAA,EACjE;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,IAAI,OAAO,wBAAwB;AAIxC,UAAM,KAAK,QAAQ,gBAAA;AAGnB,SAAK,QAAQ,MAAM,MAAM,OAAK,KAAK,KAAK,CAAC,CAAC;AAE1C,WAAO,KAAK,OAAO,aAAa;AAC9B,UAAI;AACJ,UAAI,YAA2B;AAC/B,UAAI;AACF,cAAM,aAAa,MAAM,KAAK,QAAQ,8BAAA;AACtC,cAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,UAAU;AACxD,aAAK,UAAU;AACf,aAAK,OAAO,aAAA;AACZ,oBAAY;AAEZ,yBAAiB,UAAU,OAAO,SAAS;AACzC,gBAAM,CAAC,MAAM,GAAG,IAAI;AACpB,kBAAQ,MAAA;AAAA,YACN,KAAK;AACH,mBAAK,QAAQ,OAAO,MAAM;AAC1B;AAAA,YACF,KAAK;AACH,oBAAM,KAAK,sBAAsB,GAAG;AACpC;AAAA;AAAA,YACF,KAAK;AACH,0BAAY,OAAO,CAAC,EAAE;AACtB;AAAA,YACF,KAAK;AACH,kBAAI,cAAc,OAAO,CAAC,EAAE,WAAW;AACrC,sBAAM,IAAI;AAAA,kBACR,oBAAoB,OAAO,CAAC,EAAE,SAAS,qCAAqC,SAAS;AAAA,gBAAA;AAAA,cAEzF;AACA,mBAAK,WAAW,IAAI,CAAC;AACrB;AAAA,YACF;AACE,kBAAI,cAAc,MAAM;AACtB,sBAAM,IAAI;AAAA,kBACR,GAAG,IAAI,YAAY,IAAI,GAAG;AAAA,gBAAA;AAAA,cAE9B;AACA;AAAA,UAAA;AAGJ,eAAK,QAAQ,MAAM,CAAC,WAAW,MAAM,CAAC;AACtC,eAAK,WAAW,QAAQ,CAAC,WAAW,MAAM,CAAC;AAE3C,cAAI,SAAS,UAAU;AACrB,wBAAY;AAAA,UACd;AAGA,gBAAM,eAAe,KAAK,QAAQ,aAAA;AAClC,cAAI,cAAc;AAChB,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR,UAAA;AACE,aAAK,SAAS,QAAQ,OAAA;AACtB,aAAK,UAAU;AAAA,MACjB;AAGA,UAAI,WAAW;AACb,aAAK,IAAI,OAAO,oCAAoC,SAAS,EAAE;AAC/D,aAAK,QAAQ,MAAA;AACb,aAAK,WAAW,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,WAAA,CAAW,CAAC,CAAC;AAAA,MACtE;AAEA,YAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,GAAG;AAAA,IACzC;AACA,SAAK,IAAI,OAAO,wBAAwB;AAAA,EAC1C;AAAA,EAEA,MAAM,sBAAsB,KAA6B;AACvD,SAAK,IAAI,OAAO,4BAA4B,GAAG;AAC/C,UAAM,EAAC,QAAO;AAEd,YAAQ,KAAA;AAAA,MACN,KAAK;AACH,cAAM,kBAAkB,KAAK,WAAW,KAAK,MAAM;AACnD,YAAI,KAAK,YAAY;AACnB,eAAK,IAAI,OAAO,8BAA8B;AAC9C,gBAAM,KAAK,KAAK,IAAI,iBAAiB;AAAA,QACvC;AACA;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAAA,EAErB;AAAA,EAEA,UAAU,KAAqD;AAC7D,UAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,WAAW,YAAW;AACxE,QAAI,SAAS,WAAW;AACtB,WAAK,SAAS,QAAA;AAAA,IAChB;AACA,UAAM,aAAa,aAAa,OAAmB;AAAA,MACjD,SAAS,MAAM,KAAK,WAAW,OAAO,UAAU;AAAA,IAAA,CACjD;AACD,UAAM,aAAa,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,mBAAmB,KAAK,iBAAiB;AAC3C,WAAK,IAAI;AAAA,QACP,2CAA2C,cAAc;AAAA,MAAA;AAE3D,iBAAW;AAAA,QACTA;AAAAA,QACA,8BACE,KAAK,eACP,eAAe,cAAc;AAAA,MAAA;AAAA,IAEjC,OAAO;AACL,WAAK,IAAI,QAAQ,qBAAqB,WAAW,EAAE,EAAE;AAErD,WAAK,WAAW,IAAI,UAAU;AAC9B,WAAK,QAAQ,QAAQ,YAAY,IAAI;AAErC,UAAI,SAAS;AACX,aAAK,gBAAgB,SAAS;AAAA,MAChC;AAAA,IACF;AACA,WAAO,QAAQ,QAAQ,UAAU;AAAA,EACnC;AAAA,EAEA,gBAAgB,WAAmB;AACjC,UAAM,WAAW,KAAK,mBAAmB;AACzC,SAAK,mBAAmB,IAAI,SAAS;AAErC,QAAI,aAAa,GAAG;AAClB,WAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,IACxE;AAAA,EACF;AAAA,EAEA,MAAM,oBAGH;AACD,UAAM,eAAe,MAAM,KAAK,QAAQ,0BAAA;AACxC,WAAO;AAAA,MACL,gBAAgB,KAAK;AAAA,MACrB,cAAc,gBAAgB,KAAK;AAAA,IAAA;AAAA,EAEvC;AAAA,EAEA,MAAM,mBAAkC;AACtC,UAAM,UAAU,CAAC,GAAG,KAAK,kBAAkB;AAC3C,QAAI,QAAQ,WAAW,GAAG;AACxB,WAAK,IAAI,OAAO,4CAA4C;AAC5D;AAAA,IACF;AACA,UAAM,UAAU,CAAC,GAAG,KAAK,WAAW,SAAS;AAC7C,QAAI,QAAQ,WAAW,GAAG;AAGxB,WAAK,IAAI,OAAO,mCAAmC;AACnD;AAAA,IACF;AACA,QAAI;AACF,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,UAAI,kBAAkB,iBAAiB;AACrC,aAAK,IAAI;AAAA,UACP,yCAAyC,eAAe,MAAM,eAAe;AAAA,QAAA;AAAA,MAEjF,OAAO;AACL,cAAM,UAAU,MAAM,KAAK,QAAQ,mBAAmB,eAAe;AACrE,aAAK,IAAI,OAAO,UAAU,OAAO,mBAAmB,eAAe,EAAE;AACrE,aAAK,mBAAmB,OAAO,eAAe;AAAA,MAChD;AAAA,IACF,UAAA;AACE,UAAI,KAAK,mBAAmB,MAAM;AAEhC,aAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,MACxE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,KAAK,KAAe;AACxB,SAAK,OAAO,KAAK,KAAK,KAAK,GAAG;AAC9B,SAAK,SAAS,QAAQ,OAAA;AACtB,UAAM,KAAK,QAAQ,KAAA;AAAA,EACrB;AACF;AAiBA,MAAM,mBAAmB,6BAA6B;"}
1
+ {"version":3,"file":"change-streamer-service.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Sink, Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n type ChangeStreamMessage,\n} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeSourceUpstream} from '../change-source/protocol/current/upstream.ts';\nimport {publishReplicationError} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n autoReset,\n setTimeoutFn,\n );\n}\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\nexport type ChangeStream = {\n changes: Source<ChangeStreamMessage>;\n\n /**\n * A Sink to push the {@link StatusMessage}s that reflect Commits\n * that have been successfully stored by the {@link Storer}, or\n * downstream {@link StatusMessage}s henceforth.\n */\n acks: Sink<ChangeSourceUpstream>;\n};\n\n/** Encapsulates an upstream-specific implementation of a stream of Changes. */\nexport interface ChangeSource {\n /**\n * Starts a stream of changes starting after the specific watermark,\n * with a corresponding sink for upstream acknowledgements.\n */\n startStream(afterWatermark: string): Promise<ChangeStream>;\n}\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n );\n this.#forwarder = new Forwarder();\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n // The storer will, in turn, detect changes to ownership and stop\n // the change-streamer appropriately.\n this.#storer\n .run()\n .then(() => this.stop())\n .catch(e => this.stop(e));\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n try {\n const startAfter = await this.#storer.getLastWatermarkToStartStream();\n const stream = await this.#source.startStream(startAfter);\n this.#stream = stream;\n this.#state.resetBackoff();\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n this.#storer.status(change); // storer acks once it gets through its queue\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n this.#storer.store([watermark, change]);\n this.#forwarder.forward([watermark, change]);\n\n if (type === 'commit') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n await this.#state.backoff(this.#lc, err);\n }\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark, initial} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n\n if (initial) {\n this.scheduleCleanup(watermark);\n }\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n this.#lc.info?.(`Purged ${deleted} changes before ${earliestInitial}`);\n this.#initialWatermarks.delete(earliestInitial);\n }\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"names":["ErrorType.WrongReplicaVersion"],"mappings":";;;;;;;;;;;;;;;AA6CA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,mBACA,WACA,eAAe,YACiB;AAEhC,QAAM,yBAAyB,IAAI,UAAU,KAAK;AAClD,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,EAAC,mBAAkB;AACzB,SAAO,IAAI;AAAA,IACT;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AA4KA,MAAM,mBAAoD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,EACA,yCAAyB,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUzB,WAAW,SAAA;AAAA,EAEX,aAAa;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF;AAAA,EAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,WACA,eAAe,YACf;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,iBAAiB;AACxD,SAAK,SAAS;AACd,SAAK,YAAY;AACjB,SAAK,kBAAkB;AACvB,SAAK,UAAU;AACf,SAAK,UAAU,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,CAAA,aAAY,KAAK,SAAS,KAAK,KAAK,CAAC,UAAU,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC;AAAA,MACxE,CAAA,QAAO,KAAK,KAAK,GAAG;AAAA,IAAA;AAEtB,SAAK,aAAa,IAAI,UAAA;AACtB,SAAK,aAAa;AAClB,SAAK,SAAS,IAAI,aAAa,KAAK,IAAI,QAAW,YAAY;AAAA,EACjE;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,IAAI,OAAO,wBAAwB;AAIxC,UAAM,KAAK,QAAQ,gBAAA;AAGnB,SAAK,QACF,IAAA,EACA,KAAK,MAAM,KAAK,KAAA,CAAM,EACtB,MAAM,CAAA,MAAK,KAAK,KAAK,CAAC,CAAC;AAE1B,WAAO,KAAK,OAAO,aAAa;AAC9B,UAAI;AACJ,UAAI,YAA2B;AAC/B,UAAI;AACF,cAAM,aAAa,MAAM,KAAK,QAAQ,8BAAA;AACtC,cAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,UAAU;AACxD,aAAK,UAAU;AACf,aAAK,OAAO,aAAA;AACZ,oBAAY;AAEZ,yBAAiB,UAAU,OAAO,SAAS;AACzC,gBAAM,CAAC,MAAM,GAAG,IAAI;AACpB,kBAAQ,MAAA;AAAA,YACN,KAAK;AACH,mBAAK,QAAQ,OAAO,MAAM;AAC1B;AAAA,YACF,KAAK;AACH,oBAAM,KAAK,sBAAsB,GAAG;AACpC;AAAA;AAAA,YACF,KAAK;AACH,0BAAY,OAAO,CAAC,EAAE;AACtB;AAAA,YACF,KAAK;AACH,kBAAI,cAAc,OAAO,CAAC,EAAE,WAAW;AACrC,sBAAM,IAAI;AAAA,kBACR,oBAAoB,OAAO,CAAC,EAAE,SAAS,qCAAqC,SAAS;AAAA,gBAAA;AAAA,cAEzF;AACA,mBAAK,WAAW,IAAI,CAAC;AACrB;AAAA,YACF;AACE,kBAAI,cAAc,MAAM;AACtB,sBAAM,IAAI;AAAA,kBACR,GAAG,IAAI,YAAY,IAAI,GAAG;AAAA,gBAAA;AAAA,cAE9B;AACA;AAAA,UAAA;AAGJ,eAAK,QAAQ,MAAM,CAAC,WAAW,MAAM,CAAC;AACtC,eAAK,WAAW,QAAQ,CAAC,WAAW,MAAM,CAAC;AAE3C,cAAI,SAAS,UAAU;AACrB,wBAAY;AAAA,UACd;AAGA,gBAAM,eAAe,KAAK,QAAQ,aAAA;AAClC,cAAI,cAAc;AAChB,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR,UAAA;AACE,aAAK,SAAS,QAAQ,OAAA;AACtB,aAAK,UAAU;AAAA,MACjB;AAGA,UAAI,WAAW;AACb,aAAK,IAAI,OAAO,oCAAoC,SAAS,EAAE;AAC/D,aAAK,QAAQ,MAAA;AACb,aAAK,WAAW,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,WAAA,CAAW,CAAC,CAAC;AAAA,MACtE;AAEA,YAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,GAAG;AAAA,IACzC;AACA,SAAK,IAAI,OAAO,wBAAwB;AAAA,EAC1C;AAAA,EAEA,MAAM,sBAAsB,KAA6B;AACvD,SAAK,IAAI,OAAO,4BAA4B,GAAG;AAC/C,UAAM,EAAC,QAAO;AAEd,YAAQ,KAAA;AAAA,MACN,KAAK;AACH,cAAM,kBAAkB,KAAK,WAAW,KAAK,MAAM;AACnD,cAAM;AAAA,UACJ,KAAK;AAAA,UACL;AAAA,UACA,IAAI,WAAW;AAAA,UACf,IAAI;AAAA,QAAA;AAEN,YAAI,KAAK,YAAY;AACnB,eAAK,IAAI,OAAO,8BAA8B;AAC9C,gBAAM,KAAK,KAAK,IAAI,iBAAiB;AAAA,QACvC;AACA;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAAA,EAErB;AAAA,EAEA,UAAU,KAAqD;AAC7D,UAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,WAAW,YAAW;AACxE,QAAI,SAAS,WAAW;AACtB,WAAK,SAAS,QAAA;AAAA,IAChB;AACA,UAAM,aAAa,aAAa,OAAmB;AAAA,MACjD,SAAS,MAAM,KAAK,WAAW,OAAO,UAAU;AAAA,IAAA,CACjD;AACD,UAAM,aAAa,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,mBAAmB,KAAK,iBAAiB;AAC3C,WAAK,IAAI;AAAA,QACP,2CAA2C,cAAc;AAAA,MAAA;AAE3D,iBAAW;AAAA,QACTA;AAAAA,QACA,8BACE,KAAK,eACP,eAAe,cAAc;AAAA,MAAA;AAAA,IAEjC,OAAO;AACL,WAAK,IAAI,QAAQ,qBAAqB,WAAW,EAAE,EAAE;AAErD,WAAK,WAAW,IAAI,UAAU;AAC9B,WAAK,QAAQ,QAAQ,YAAY,IAAI;AAErC,UAAI,SAAS;AACX,aAAK,gBAAgB,SAAS;AAAA,MAChC;AAAA,IACF;AACA,WAAO,QAAQ,QAAQ,UAAU;AAAA,EACnC;AAAA,EAEA,gBAAgB,WAAmB;AACjC,UAAM,WAAW,KAAK,mBAAmB;AACzC,SAAK,mBAAmB,IAAI,SAAS;AAErC,QAAI,aAAa,GAAG;AAClB,WAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,IACxE;AAAA,EACF;AAAA,EAEA,MAAM,oBAGH;AACD,UAAM,eAAe,MAAM,KAAK,QAAQ,0BAAA;AACxC,WAAO;AAAA,MACL,gBAAgB,KAAK;AAAA,MACrB,cAAc,gBAAgB,KAAK;AAAA,IAAA;AAAA,EAEvC;AAAA,EAEA,MAAM,mBAAkC;AACtC,UAAM,UAAU,CAAC,GAAG,KAAK,kBAAkB;AAC3C,QAAI,QAAQ,WAAW,GAAG;AACxB,WAAK,IAAI,OAAO,4CAA4C;AAC5D;AAAA,IACF;AACA,UAAM,UAAU,CAAC,GAAG,KAAK,WAAW,SAAS;AAC7C,QAAI,QAAQ,WAAW,GAAG;AAGxB,WAAK,IAAI,OAAO,mCAAmC;AACnD;AAAA,IACF;AACA,QAAI;AACF,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,UAAI,kBAAkB,iBAAiB;AACrC,aAAK,IAAI;AAAA,UACP,yCAAyC,eAAe,MAAM,eAAe;AAAA,QAAA;AAAA,MAEjF,OAAO;AACL,cAAM,UAAU,MAAM,KAAK,QAAQ,mBAAmB,eAAe;AACrE,aAAK,IAAI,OAAO,UAAU,OAAO,mBAAmB,eAAe,EAAE;AACrE,aAAK,mBAAmB,OAAO,eAAe;AAAA,MAChD;AAAA,IACF,UAAA;AACE,UAAI,KAAK,mBAAmB,MAAM;AAEhC,aAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,MACxE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,KAAK,KAAe;AACxB,SAAK,OAAO,KAAK,KAAK,KAAK,GAAG;AAC9B,SAAK,SAAS,QAAQ,OAAA;AACtB,UAAM,KAAK,QAAQ,KAAA;AAAA,EACrB;AACF;AAiBA,MAAM,mBAAmB,6BAA6B;"}
@@ -1 +1 @@
1
- {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAUjD,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAC3E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,6CAA6C,CAAC;AAC/E,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAQpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAoChD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAerB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,aAAa,KAAK,IAAI,EAC/C,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI;IAkBzB,eAAe;IAcf,6BAA6B,IAAI,OAAO,CAAC,MAAM,CAAC;IAahD,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAQzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IA0BtD,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAI9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,aAAa;IAIvB,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA8BnC,GAAG;IA6PT,IAAI;CAIL"}
1
+ {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAUjD,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAC3E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,6CAA6C,CAAC;AAC/E,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAQpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAoChD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAerB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,aAAa,KAAK,IAAI,EAC/C,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI;IAkBzB,eAAe;IAcf,6BAA6B,IAAI,OAAO,CAAC,MAAM,CAAC;IAahD,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAQzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAyBtD,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAI9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,aAAa;IAIvB,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA8BnC,GAAG;IA6PT,IAAI;CAIL"}
@@ -71,10 +71,9 @@ class Storer {
71
71
  const [{ owner }] = await sql`
72
72
  SELECT * FROM ${this.#cdc("replicationState")}`;
73
73
  if (owner !== this.#taskID) {
74
- this.#lc.error?.(
75
- `Change log purge requested (${watermark}) while no longer owner`
74
+ this.#lc.warn?.(
75
+ `Ignoring change log purge request (${watermark}) while not owner`
76
76
  );
77
- void this.stop();
78
77
  return 0;
79
78
  }
80
79
  const [{ deleted }] = await sql`
@@ -1 +1 @@
1
- {"version":3,"file":"storer.js","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import {PG_SERIALIZATION_FAILURE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {type JSONValue} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {disableStatementTimeout, type PostgresDB} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {StatusMessage} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type ReplicationState,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | ['change', WatermarkedChange]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | StatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n};\n\n// Technically, any threshold is fine because the point of back pressure\n// is to adjust the rate of incoming messages, and the size of the pending\n// work queue does not affect that mechanism.\n//\n// However, it is theoretically possible to exceed the available memory if\n// the size of changes is very large. This threshold can be improved by\n// roughly measuring the size of the enqueued contents and setting the\n// threshold based on available memory.\n//\n// TODO: switch to a message size-based thresholding when migrating over\n// to stringified JSON messages, which will bound the computation involved\n// in measuring the size of row messages.\nconst QUEUE_SIZE_BACK_PRESSURE_THRESHOLD = 100_000;\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | StatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | StatusMessage) => void,\n onFatal: (err: Error) => void,\n ) {\n this.#lc = lc;\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership() {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n this.#lc.info?.(`assumed ownership at ${addressWithProtocol}`);\n }\n\n async getLastWatermarkToStartStream(): Promise<string> {\n // Before starting or restarting a stream from the change source,\n // wait for all queued changes to be processed so that we pick up\n // from the right spot.\n const {promise: ready, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await ready;\n\n const [{lastWatermark}] = await this.#db<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`;\n return lastWatermark;\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<\n {minWatermark: string | null}[]\n > /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return this.#db.begin(Mode.SERIALIZABLE, async sql => {\n disableStatementTimeout(sql);\n\n // Check ownership before performing the purge. The server is expected to\n // exit immediately when an ownership change is detected, but checking\n // explicitly guards against race conditions.\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}`;\n if (owner !== this.#taskID) {\n this.#lc.error?.(\n `Change log purge requested (${watermark}) while no longer owner`,\n );\n void this.stop();\n return 0;\n }\n\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n return Number(deleted);\n });\n }\n\n store(entry: WatermarkedChange) {\n this.#queue.enqueue(['change', entry]);\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: StatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#queue.size() > QUEUE_SIZE_BACK_PRESSURE_THRESHOLD\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 10% of the threshold to free up.\n this.#queue.size() < QUEUE_SIZE_BACK_PRESSURE_THRESHOLD * 0.9\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n async run() {\n this.#running = true;\n try {\n await this.#processQueue();\n } finally {\n this.#running = false;\n this.#lc.info?.('storer stopped');\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n this.#maybeReleaseBackPressure();\n\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [watermark, downstream] = msg[1];\n const [tag, change] = downstream;\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n Mode.SERIALIZABLE,\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n };\n tx.pool.run(this.#db);\n // Pipeline a read of the current ReplicationState,\n // which will be checked before committing.\n void tx.pool.process(tx => {\n tx<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, `received ${tag} outside of transaction`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: change as unknown as JSONValue,\n };\n\n const processed = tx.pool.process(tx => [\n tx`\n INSERT INTO ${this.#cdc('changeLog')} ${tx(entry)}`,\n ]);\n\n if (tag === 'data' && tx.pos % 10_000 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(`changeLog ownership has been assumed by ${owner}`),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n try {\n await tx.pool.done();\n } catch (e) {\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_SERIALIZATION_FAILURE\n ) {\n // Ownership change happened after the replicationState was read in 'begin'.\n throw new AbortError(`changeLog ownership has changed`, {cause: e});\n }\n throw e;\n }\n\n tx = null;\n\n // ACK the LSN to the upstream Postgres.\n this.#onConsumed(downstream);\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n Mode.READONLY,\n );\n reader.run(this.#db);\n\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by waiting for a no-op task to be processed by the pool, which\n // indicates that the BEGIN statement has been sent to the database.\n await reader.processReadTask(() => {});\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(subs.map(sub => this.#catchup(sub, reader))).finally(() =>\n reader.setDone(),\n );\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]>`\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry)).result;\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n stop() {\n this.#queue.enqueue('stop');\n return promiseVoid;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n"],"names":["Mode.SERIALIZABLE","tx","Mode.READONLY","start","ErrorType.WatermarkTooOld"],"mappings":";;;;;;;;;;;;;;;;AA2DA,MAAM,qCAAqC;AAgCpC,MAAM,OAA0B;AAAA,EAC5B,KAAK;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAAS,IAAI,MAAA;AAAA,EAEtB,WAAW;AAAA,EAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA;AACA,SAAK,MAAM;AACX,SAAK,SAAS;AACd,SAAK,UAAU;AACf,SAAK,oBAAoB;AACzB,SAAK,qBAAqB;AAC1B,SAAK,MAAM;AACX,SAAK,kBAAkB;AACvB,SAAK,cAAc;AACnB,SAAK,WAAW;AAAA,EAClB;AAAA;AAAA,EAGA,KAAK,OAAe;AAClB,WAAO,KAAK,IAAI,GAAG,UAAU,KAAK,MAAM,CAAC,IAAI,KAAK,EAAE;AAAA,EACtD;AAAA,EAEA,MAAM,kBAAkB;AACtB,UAAM,KAAK,KAAK;AAChB,UAAM,QAAQ,KAAK;AACnB,UAAM,eAAe,KAAK;AAC1B,UAAM,gBAAgB,KAAK;AAE3B,UAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,aAAa,MAAM,YAAY;AACxC,UAAM,YAAY,KAAK,KAAK,kBAAkB,CAAC,QAAQ,GAAG,EAAC,OAAO,cAAc,oBAAA,CAAoB,CAAC;AACrG,SAAK,IAAI,OAAO,wBAAwB,mBAAmB,EAAE;AAAA,EAC/D;AAAA,EAEA,MAAM,gCAAiD;AAIrD,UAAM,EAAC,SAAS,OAAO,QAAA,IAAW,SAAA;AAClC,SAAK,OAAO,QAAQ,CAAC,SAAS,OAAO,CAAC;AACtC,UAAM;AAEN,UAAM,CAAC,EAAC,cAAA,CAAc,IAAI,MAAM,KAAK;AAAA,oCACL,KAAK,KAAK,kBAAkB,CAAC;AAC7D,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,4BAAoD;AACxD,UAAM,CAAC,EAAC,aAAA,CAAa,IAAI,MAAM,KAAK;AAAA,qDAGa,KAAK,KAAK,WAAW,CAAC;AACvE,WAAO;AAAA,EACT;AAAA,EAEA,mBAAmB,WAAoC;AACrD,WAAO,KAAK,IAAI,MAAMA,cAAmB,OAAM,QAAO;AACpD,8BAAwB,GAAG;AAK3B,YAAM,CAAC,EAAC,OAAM,IAAI,MAAM;AAAA,wBACN,KAAK,KAAK,kBAAkB,CAAC;AAC/C,UAAI,UAAU,KAAK,SAAS;AAC1B,aAAK,IAAI;AAAA,UACP,+BAA+B,SAAS;AAAA,QAAA;AAE1C,aAAK,KAAK,KAAA;AACV,eAAO;AAAA,MACT;AAEA,YAAM,CAAC,EAAC,SAAQ,IAAI,MAAM;AAAA;AAAA,wBAER,KAAK,KAAK,WAAW,CAAC,sBAAsB,SAAS;AAAA;AAAA;AAGvE,aAAO,OAAO,OAAO;AAAA,IACvB,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,OAA0B;AAC9B,SAAK,OAAO,QAAQ,CAAC,UAAU,KAAK,CAAC;AAAA,EACvC;AAAA,EAEA,QAAQ;AACN,SAAK,OAAO,QAAQ,CAAC,OAAO,CAAC;AAAA,EAC/B;AAAA,EAEA,OAAO,GAAkB;AACvB,SAAK,OAAO,QAAQ,CAAC;AAAA,EACvB;AAAA,EAEA,QAAQ,YAAwB,MAAsB;AACpD,SAAK,OAAO,QAAQ,CAAC,cAAc,EAAC,YAAY,KAAA,CAAK,CAAC;AAAA,EACxD;AAAA,EAEA,gBAAuC;AAAA,EAEvC,eAA0C;AACxC,QAAI,CAAC,KAAK,UAAU;AAClB,aAAO;AAAA,IACT;AACA,QACE,KAAK,kBAAkB,QACvB,KAAK,OAAO,KAAA,IAAS,oCACrB;AACA,WAAK,IAAI;AAAA,QACP,+BAA+B,KAAK,OAAO,KAAA,CAAM;AAAA,MAAA;AAEnD,WAAK,gBAAgB,SAAA;AAAA,IACvB;AACA,WAAO,KAAK,eAAe;AAAA,EAC7B;AAAA,EAEA,4BAA4B;AAC1B,QACE,KAAK,kBAAkB;AAAA,IAEvB,KAAK,OAAO,SAAS,qCAAqC,KAC1D;AACA,WAAK,IAAI;AAAA,QACP,gCAAgC,KAAK,OAAO,KAAA,CAAM;AAAA,MAAA;AAEpD,WAAK,cAAc,QAAA;AACnB,WAAK,gBAAgB;AAAA,IACvB;AAAA,EACF;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,WAAW;AAChB,QAAI;AACF,YAAM,KAAK,cAAA;AAAA,IACb,UAAA;AACE,WAAK,WAAW;AAChB,WAAK,IAAI,OAAO,gBAAgB;AAAA,IAClC;AAAA,EACF;AAAA,EAEA,MAAM,gBAAgB;AACpB,QAAI,KAAgC;AACpC,QAAI;AAEJ,UAAM,eAAoC,CAAA;AAC1C,YAAQ,MAAM,MAAM,KAAK,OAAO,QAAA,OAAe,QAAQ;AACrD,WAAK,0BAAA;AAEL,YAAM,CAAC,OAAO,IAAI;AAClB,cAAQ,SAAA;AAAA,QACN,KAAK,SAAS;AACZ,gBAAM,cAAc,IAAI,CAAC;AACzB,sBAAA;AACA;AAAA,QACF;AAAA,QACA,KAAK,cAAc;AACjB,gBAAM,aAAa,IAAI,CAAC;AACxB,cAAI,IAAI;AACN,yBAAa,KAAK,UAAU;AAAA,UAC9B,OAAO;AACL,kBAAM,KAAK,cAAc,CAAC,UAAU,CAAC;AAAA,UACvC;AACA;AAAA,QACF;AAAA,QACA,KAAK;AACH,eAAK,YAAY,GAAG;AACpB;AAAA,QACF,KAAK,SAAS;AACZ,cAAI,IAAI;AACN,eAAG,KAAK,MAAA;AACR,kBAAM,GAAG,KAAK,KAAA;AACd,iBAAK;AAAA,UACP;AACA;AAAA,QACF;AAAA,MAAA;AAGF,YAAM,CAAC,WAAW,UAAU,IAAI,IAAI,CAAC;AACrC,YAAM,CAAC,KAAK,MAAM,IAAI;AACtB,UAAI,QAAQ,SAAS;AACnB,eAAO,CAAC,IAAI,+CAA+C;AAC3D,cAAM,EAAC,SAAS,SAAS,OAAA,IAAU,SAAA;AACnC,aAAK;AAAA,UACH,MAAM,IAAI;AAAA,YACR,KAAK,IAAI,YAAY,aAAa,SAAS;AAAA,YAC3CA;AAAAA,UAAK;AAAA,UAEP,oBAAoB;AAAA,UACpB,KAAK;AAAA,UACL,0BAA0B;AAAA,QAAA;AAE5B,WAAG,KAAK,IAAI,KAAK,GAAG;AAGpB,aAAK,GAAG,KAAK,QAAQ,CAAAC,QAAM;AACzBA;AAAAA,0BACgB,KAAK,KAAK,kBAAkB,CAAC,GAAG;AAAA,YAC9C,CAAC,CAAC,MAAM,MAAM,QAAQ,MAAM;AAAA,YAC5B;AAAA,UAAA;AAEF,iBAAO,CAAA;AAAA,QACT,CAAC;AAAA,MACH,OAAO;AACL,eAAO,IAAI,YAAY,GAAG,yBAAyB;AACnD,WAAG;AAAA,MACL;AAEA,YAAM,QAAQ;AAAA,QACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;AAAA,QAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;AAAA,QACtD,KAAK,GAAG;AAAA,QACR;AAAA,MAAA;AAGF,YAAM,YAAY,GAAG,KAAK,QAAQ,CAAAA,QAAM;AAAA,QACtCA;AAAAA,sBACc,KAAK,KAAK,WAAW,CAAC,IAAIA,IAAG,KAAK,CAAC;AAAA,MAAA,CAClD;AAED,UAAI,QAAQ,UAAU,GAAG,MAAM,QAAW,GAAG;AAI3C,cAAM;AAAA,MACR;AAEA,UAAI,QAAQ,UAAU;AACpB,cAAM,EAAC,MAAA,IAAS,MAAM,GAAG;AACzB,YAAI,UAAU,KAAK,SAAS;AAE1B,aAAG,KAAK;AAAA,YACN,IAAI,WAAW,2CAA2C,KAAK,EAAE;AAAA,UAAA;AAAA,QAErE,OAAO;AAEL,gBAAM,gBAAgB;AACtB,eAAK,GAAG,KAAK,QAAQ,CAAAA,QAAM;AAAA,YACzBA;AAAAA,qBACS,KAAK,KAAK,kBAAkB,CAAC,QAAQA,IAAG,EAAC,cAAA,CAAc,CAAC;AAAA,UAAA,CAClE;AACD,aAAG,KAAK,QAAA;AAAA,QACV;AAEA,YAAI;AACF,gBAAM,GAAG,KAAK,KAAA;AAAA,QAChB,SAAS,GAAG;AACV,cACE,aAAa,SAAS,iBACtB,EAAE,SAAS,0BACX;AAEA,kBAAM,IAAI,WAAW,mCAAmC,EAAC,OAAO,GAAE;AAAA,UACpE;AACA,gBAAM;AAAA,QACR;AAEA,aAAK;AAGL,aAAK,YAAY,UAAU;AAI3B,cAAM,KAAK,cAAc,aAAa,OAAO,CAAC,CAAC;AAAA,MACjD,WAAW,QAAQ,YAAY;AAG7B,WAAG,KAAK,MAAA;AACR,cAAM,GAAG,KAAK,KAAA;AACd,aAAK;AAEL,cAAM,KAAK,cAAc,aAAa,OAAO,CAAC,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,cAAc,MAA2B;AAC7C,QAAI,KAAK,WAAW,GAAG;AACrB;AAAA,IACF;AAEA,UAAM,SAAS,IAAI;AAAA,MACjB,KAAK,IAAI,YAAY,QAAQ,SAAS;AAAA,MACtCC;AAAAA,IAAK;AAEP,WAAO,IAAI,KAAK,GAAG;AAMnB,UAAM,OAAO,gBAAgB,MAAM;AAAA,IAAC,CAAC;AAIrC,SAAK,QAAQ,IAAI,KAAK,IAAI,CAAA,QAAO,KAAK,SAAS,KAAK,MAAM,CAAC,CAAC,EAAE;AAAA,MAAQ,MACpE,OAAO,QAAA;AAAA,IAAQ;AAAA,EAEnB;AAAA,EAEA,MAAM,SACJ,EAAC,YAAY,KAAK,KAAA,GAClB,QACA;AACA,QAAI;AACF,YAAM,OAAO,gBAAgB,OAAM,OAAM;AACvC,cAAM,QAAQ,KAAK,IAAA;AAInB,YAAI,iBAAiB,IAAI,cAAc,KAAK;AAC5C,YAAI,QAAQ;AACZ,YAAI;AAEJ,yBAAiB,WAAW;AAAA,0CACM,KAAK,KAAK,WAAW,CAAC;AAAA,gCAChC,IAAI,SAAS;AAAA,oCACT,OAAO,GAAI,GAAG;AAOxC,gBAAMC,SAAQ,YAAY,IAAA;AAC1B,gBAAM;AACN,gBAAM,UAAU,YAAY,IAAA,IAAQA;AACpC,cAAI,mBAAmB;AACrB,aAAC,UAAU,MAAM,KAAK,IAAI,OAAO,KAAK,IAAI;AAAA,cACxC,UAAU,QAAQ,QAAQ,CAAC,CAAC,WAAW,IAAI,EAAE;AAAA,YAAA;AAAA,UAEjD;AAEA,qBAAW,SAAS,SAAS;AAC3B,gBAAI,MAAM,cAAc,IAAI,WAAW;AAGrC,+BAAiB;AAAA,YACnB,WAAW,gBAAgB;AACzB,kCAAoB,IAAI,QAAQ,aAAa,KAAK,CAAC,EAAE;AACrD;AAAA,YACF,WAAW,SAAS,UAAU;AAC5B,oBAAM,IAAI;AAAA,gBACR,+BAA+B,IAAI,SAAS,yBAAyB,MAAM,SAAS;AAAA,cAAA;AAAA,YAExF,OAAO;AACL,mBAAK,IAAI;AAAA,gBACP,qCAAqC,IAAI,SAAS,yBAAyB,MAAM,SAAS;AAAA,cAAA;AAE5F,kBAAI;AAAA,gBACFC;AAAAA,gBACA,mCAAmC,MAAM,SAAS,eAAe,IAAI,SAAS;AAAA,cAAA;AAEhF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AACA,YAAI,gBAAgB;AAClB,gBAAM;AACN,eAAK,IAAI;AAAA,YACP,aAAa,IAAI,EAAE,SAAS,KAAK,aAC/B,KAAK,QAAQ,KACf;AAAA,UAAA;AAAA,QAEJ,OAAO;AACL,eAAK,IAAI;AAAA,YACP,2BAA2B,IAAI,SAAS;AAAA,UAAA;AAAA,QAE5C;AAGA,YAAI,YAAA;AAAA,MACN,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,WAAK,IAAI,QAAQ,sCAAsC,IAAI,EAAE,IAAI,GAAG;AACpE,UAAI,eAAe,iBAAiB;AAClC,cAAM,kBAAkB,KAAK,KAAK,KAAK,MAAM;AAC7C,aAAK,SAAS,GAAG;AAAA,MACnB;AACA,UAAI,KAAK,GAAG;AAAA,IACd;AAAA,EACF;AAAA,EAEA,OAAO;AACL,SAAK,OAAO,QAAQ,MAAM;AAC1B,WAAO;AAAA,EACT;AACF;AAEA,SAAS,aAAa,OAAuC;AAC3D,QAAM,EAAC,WAAW,OAAA,IAAU;AAC5B,UAAQ,OAAO,KAAA;AAAA,IACb,KAAK;AACH,aAAO,CAAC,WAAW,CAAC,SAAS,QAAQ,EAAC,iBAAiB,UAAA,CAAU,CAAC;AAAA,IACpE,KAAK;AACH,aAAO,CAAC,WAAW,CAAC,UAAU,QAAQ,EAAC,UAAA,CAAU,CAAC;AAAA,IACpD,KAAK;AACH,aAAO,CAAC,WAAW,CAAC,YAAY,MAAM,CAAC;AAAA,IACzC;AACE,aAAO,CAAC,WAAW,CAAC,QAAQ,MAAM,CAAC;AAAA,EAAA;AAEzC;"}
1
+ {"version":3,"file":"storer.js","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import {PG_SERIALIZATION_FAILURE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {type JSONValue} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {disableStatementTimeout, type PostgresDB} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {StatusMessage} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type ReplicationState,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | ['change', WatermarkedChange]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | StatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n};\n\n// Technically, any threshold is fine because the point of back pressure\n// is to adjust the rate of incoming messages, and the size of the pending\n// work queue does not affect that mechanism.\n//\n// However, it is theoretically possible to exceed the available memory if\n// the size of changes is very large. This threshold can be improved by\n// roughly measuring the size of the enqueued contents and setting the\n// threshold based on available memory.\n//\n// TODO: switch to a message size-based thresholding when migrating over\n// to stringified JSON messages, which will bound the computation involved\n// in measuring the size of row messages.\nconst QUEUE_SIZE_BACK_PRESSURE_THRESHOLD = 100_000;\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | StatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | StatusMessage) => void,\n onFatal: (err: Error) => void,\n ) {\n this.#lc = lc;\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership() {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n this.#lc.info?.(`assumed ownership at ${addressWithProtocol}`);\n }\n\n async getLastWatermarkToStartStream(): Promise<string> {\n // Before starting or restarting a stream from the change source,\n // wait for all queued changes to be processed so that we pick up\n // from the right spot.\n const {promise: ready, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await ready;\n\n const [{lastWatermark}] = await this.#db<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`;\n return lastWatermark;\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<\n {minWatermark: string | null}[]\n > /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return this.#db.begin(Mode.SERIALIZABLE, async sql => {\n disableStatementTimeout(sql);\n\n // Check ownership before performing the purge. The server is expected to\n // exit immediately when an ownership change is detected, but checking\n // explicitly guards against race conditions.\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}`;\n if (owner !== this.#taskID) {\n this.#lc.warn?.(\n `Ignoring change log purge request (${watermark}) while not owner`,\n );\n return 0;\n }\n\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n return Number(deleted);\n });\n }\n\n store(entry: WatermarkedChange) {\n this.#queue.enqueue(['change', entry]);\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: StatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#queue.size() > QUEUE_SIZE_BACK_PRESSURE_THRESHOLD\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 10% of the threshold to free up.\n this.#queue.size() < QUEUE_SIZE_BACK_PRESSURE_THRESHOLD * 0.9\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n async run() {\n this.#running = true;\n try {\n await this.#processQueue();\n } finally {\n this.#running = false;\n this.#lc.info?.('storer stopped');\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n this.#maybeReleaseBackPressure();\n\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [watermark, downstream] = msg[1];\n const [tag, change] = downstream;\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n Mode.SERIALIZABLE,\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n };\n tx.pool.run(this.#db);\n // Pipeline a read of the current ReplicationState,\n // which will be checked before committing.\n void tx.pool.process(tx => {\n tx<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, `received ${tag} outside of transaction`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: change as unknown as JSONValue,\n };\n\n const processed = tx.pool.process(tx => [\n tx`\n INSERT INTO ${this.#cdc('changeLog')} ${tx(entry)}`,\n ]);\n\n if (tag === 'data' && tx.pos % 10_000 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(`changeLog ownership has been assumed by ${owner}`),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n try {\n await tx.pool.done();\n } catch (e) {\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_SERIALIZATION_FAILURE\n ) {\n // Ownership change happened after the replicationState was read in 'begin'.\n throw new AbortError(`changeLog ownership has changed`, {cause: e});\n }\n throw e;\n }\n\n tx = null;\n\n // ACK the LSN to the upstream Postgres.\n this.#onConsumed(downstream);\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n Mode.READONLY,\n );\n reader.run(this.#db);\n\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by waiting for a no-op task to be processed by the pool, which\n // indicates that the BEGIN statement has been sent to the database.\n await reader.processReadTask(() => {});\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(subs.map(sub => this.#catchup(sub, reader))).finally(() =>\n reader.setDone(),\n );\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]>`\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry)).result;\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n stop() {\n this.#queue.enqueue('stop');\n return promiseVoid;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n"],"names":["Mode.SERIALIZABLE","tx","Mode.READONLY","start","ErrorType.WatermarkTooOld"],"mappings":";;;;;;;;;;;;;;;;AA2DA,MAAM,qCAAqC;AAgCpC,MAAM,OAA0B;AAAA,EAC5B,KAAK;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAAS,IAAI,MAAA;AAAA,EAEtB,WAAW;AAAA,EAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA;AACA,SAAK,MAAM;AACX,SAAK,SAAS;AACd,SAAK,UAAU;AACf,SAAK,oBAAoB;AACzB,SAAK,qBAAqB;AAC1B,SAAK,MAAM;AACX,SAAK,kBAAkB;AACvB,SAAK,cAAc;AACnB,SAAK,WAAW;AAAA,EAClB;AAAA;AAAA,EAGA,KAAK,OAAe;AAClB,WAAO,KAAK,IAAI,GAAG,UAAU,KAAK,MAAM,CAAC,IAAI,KAAK,EAAE;AAAA,EACtD;AAAA,EAEA,MAAM,kBAAkB;AACtB,UAAM,KAAK,KAAK;AAChB,UAAM,QAAQ,KAAK;AACnB,UAAM,eAAe,KAAK;AAC1B,UAAM,gBAAgB,KAAK;AAE3B,UAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,aAAa,MAAM,YAAY;AACxC,UAAM,YAAY,KAAK,KAAK,kBAAkB,CAAC,QAAQ,GAAG,EAAC,OAAO,cAAc,oBAAA,CAAoB,CAAC;AACrG,SAAK,IAAI,OAAO,wBAAwB,mBAAmB,EAAE;AAAA,EAC/D;AAAA,EAEA,MAAM,gCAAiD;AAIrD,UAAM,EAAC,SAAS,OAAO,QAAA,IAAW,SAAA;AAClC,SAAK,OAAO,QAAQ,CAAC,SAAS,OAAO,CAAC;AACtC,UAAM;AAEN,UAAM,CAAC,EAAC,cAAA,CAAc,IAAI,MAAM,KAAK;AAAA,oCACL,KAAK,KAAK,kBAAkB,CAAC;AAC7D,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,4BAAoD;AACxD,UAAM,CAAC,EAAC,aAAA,CAAa,IAAI,MAAM,KAAK;AAAA,qDAGa,KAAK,KAAK,WAAW,CAAC;AACvE,WAAO;AAAA,EACT;AAAA,EAEA,mBAAmB,WAAoC;AACrD,WAAO,KAAK,IAAI,MAAMA,cAAmB,OAAM,QAAO;AACpD,8BAAwB,GAAG;AAK3B,YAAM,CAAC,EAAC,OAAM,IAAI,MAAM;AAAA,wBACN,KAAK,KAAK,kBAAkB,CAAC;AAC/C,UAAI,UAAU,KAAK,SAAS;AAC1B,aAAK,IAAI;AAAA,UACP,sCAAsC,SAAS;AAAA,QAAA;AAEjD,eAAO;AAAA,MACT;AAEA,YAAM,CAAC,EAAC,SAAQ,IAAI,MAAM;AAAA;AAAA,wBAER,KAAK,KAAK,WAAW,CAAC,sBAAsB,SAAS;AAAA;AAAA;AAGvE,aAAO,OAAO,OAAO;AAAA,IACvB,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,OAA0B;AAC9B,SAAK,OAAO,QAAQ,CAAC,UAAU,KAAK,CAAC;AAAA,EACvC;AAAA,EAEA,QAAQ;AACN,SAAK,OAAO,QAAQ,CAAC,OAAO,CAAC;AAAA,EAC/B;AAAA,EAEA,OAAO,GAAkB;AACvB,SAAK,OAAO,QAAQ,CAAC;AAAA,EACvB;AAAA,EAEA,QAAQ,YAAwB,MAAsB;AACpD,SAAK,OAAO,QAAQ,CAAC,cAAc,EAAC,YAAY,KAAA,CAAK,CAAC;AAAA,EACxD;AAAA,EAEA,gBAAuC;AAAA,EAEvC,eAA0C;AACxC,QAAI,CAAC,KAAK,UAAU;AAClB,aAAO;AAAA,IACT;AACA,QACE,KAAK,kBAAkB,QACvB,KAAK,OAAO,KAAA,IAAS,oCACrB;AACA,WAAK,IAAI;AAAA,QACP,+BAA+B,KAAK,OAAO,KAAA,CAAM;AAAA,MAAA;AAEnD,WAAK,gBAAgB,SAAA;AAAA,IACvB;AACA,WAAO,KAAK,eAAe;AAAA,EAC7B;AAAA,EAEA,4BAA4B;AAC1B,QACE,KAAK,kBAAkB;AAAA,IAEvB,KAAK,OAAO,SAAS,qCAAqC,KAC1D;AACA,WAAK,IAAI;AAAA,QACP,gCAAgC,KAAK,OAAO,KAAA,CAAM;AAAA,MAAA;AAEpD,WAAK,cAAc,QAAA;AACnB,WAAK,gBAAgB;AAAA,IACvB;AAAA,EACF;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,WAAW;AAChB,QAAI;AACF,YAAM,KAAK,cAAA;AAAA,IACb,UAAA;AACE,WAAK,WAAW;AAChB,WAAK,IAAI,OAAO,gBAAgB;AAAA,IAClC;AAAA,EACF;AAAA,EAEA,MAAM,gBAAgB;AACpB,QAAI,KAAgC;AACpC,QAAI;AAEJ,UAAM,eAAoC,CAAA;AAC1C,YAAQ,MAAM,MAAM,KAAK,OAAO,QAAA,OAAe,QAAQ;AACrD,WAAK,0BAAA;AAEL,YAAM,CAAC,OAAO,IAAI;AAClB,cAAQ,SAAA;AAAA,QACN,KAAK,SAAS;AACZ,gBAAM,cAAc,IAAI,CAAC;AACzB,sBAAA;AACA;AAAA,QACF;AAAA,QACA,KAAK,cAAc;AACjB,gBAAM,aAAa,IAAI,CAAC;AACxB,cAAI,IAAI;AACN,yBAAa,KAAK,UAAU;AAAA,UAC9B,OAAO;AACL,kBAAM,KAAK,cAAc,CAAC,UAAU,CAAC;AAAA,UACvC;AACA;AAAA,QACF;AAAA,QACA,KAAK;AACH,eAAK,YAAY,GAAG;AACpB;AAAA,QACF,KAAK,SAAS;AACZ,cAAI,IAAI;AACN,eAAG,KAAK,MAAA;AACR,kBAAM,GAAG,KAAK,KAAA;AACd,iBAAK;AAAA,UACP;AACA;AAAA,QACF;AAAA,MAAA;AAGF,YAAM,CAAC,WAAW,UAAU,IAAI,IAAI,CAAC;AACrC,YAAM,CAAC,KAAK,MAAM,IAAI;AACtB,UAAI,QAAQ,SAAS;AACnB,eAAO,CAAC,IAAI,+CAA+C;AAC3D,cAAM,EAAC,SAAS,SAAS,OAAA,IAAU,SAAA;AACnC,aAAK;AAAA,UACH,MAAM,IAAI;AAAA,YACR,KAAK,IAAI,YAAY,aAAa,SAAS;AAAA,YAC3CA;AAAAA,UAAK;AAAA,UAEP,oBAAoB;AAAA,UACpB,KAAK;AAAA,UACL,0BAA0B;AAAA,QAAA;AAE5B,WAAG,KAAK,IAAI,KAAK,GAAG;AAGpB,aAAK,GAAG,KAAK,QAAQ,CAAAC,QAAM;AACzBA;AAAAA,0BACgB,KAAK,KAAK,kBAAkB,CAAC,GAAG;AAAA,YAC9C,CAAC,CAAC,MAAM,MAAM,QAAQ,MAAM;AAAA,YAC5B;AAAA,UAAA;AAEF,iBAAO,CAAA;AAAA,QACT,CAAC;AAAA,MACH,OAAO;AACL,eAAO,IAAI,YAAY,GAAG,yBAAyB;AACnD,WAAG;AAAA,MACL;AAEA,YAAM,QAAQ;AAAA,QACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;AAAA,QAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;AAAA,QACtD,KAAK,GAAG;AAAA,QACR;AAAA,MAAA;AAGF,YAAM,YAAY,GAAG,KAAK,QAAQ,CAAAA,QAAM;AAAA,QACtCA;AAAAA,sBACc,KAAK,KAAK,WAAW,CAAC,IAAIA,IAAG,KAAK,CAAC;AAAA,MAAA,CAClD;AAED,UAAI,QAAQ,UAAU,GAAG,MAAM,QAAW,GAAG;AAI3C,cAAM;AAAA,MACR;AAEA,UAAI,QAAQ,UAAU;AACpB,cAAM,EAAC,MAAA,IAAS,MAAM,GAAG;AACzB,YAAI,UAAU,KAAK,SAAS;AAE1B,aAAG,KAAK;AAAA,YACN,IAAI,WAAW,2CAA2C,KAAK,EAAE;AAAA,UAAA;AAAA,QAErE,OAAO;AAEL,gBAAM,gBAAgB;AACtB,eAAK,GAAG,KAAK,QAAQ,CAAAA,QAAM;AAAA,YACzBA;AAAAA,qBACS,KAAK,KAAK,kBAAkB,CAAC,QAAQA,IAAG,EAAC,cAAA,CAAc,CAAC;AAAA,UAAA,CAClE;AACD,aAAG,KAAK,QAAA;AAAA,QACV;AAEA,YAAI;AACF,gBAAM,GAAG,KAAK,KAAA;AAAA,QAChB,SAAS,GAAG;AACV,cACE,aAAa,SAAS,iBACtB,EAAE,SAAS,0BACX;AAEA,kBAAM,IAAI,WAAW,mCAAmC,EAAC,OAAO,GAAE;AAAA,UACpE;AACA,gBAAM;AAAA,QACR;AAEA,aAAK;AAGL,aAAK,YAAY,UAAU;AAI3B,cAAM,KAAK,cAAc,aAAa,OAAO,CAAC,CAAC;AAAA,MACjD,WAAW,QAAQ,YAAY;AAG7B,WAAG,KAAK,MAAA;AACR,cAAM,GAAG,KAAK,KAAA;AACd,aAAK;AAEL,cAAM,KAAK,cAAc,aAAa,OAAO,CAAC,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,cAAc,MAA2B;AAC7C,QAAI,KAAK,WAAW,GAAG;AACrB;AAAA,IACF;AAEA,UAAM,SAAS,IAAI;AAAA,MACjB,KAAK,IAAI,YAAY,QAAQ,SAAS;AAAA,MACtCC;AAAAA,IAAK;AAEP,WAAO,IAAI,KAAK,GAAG;AAMnB,UAAM,OAAO,gBAAgB,MAAM;AAAA,IAAC,CAAC;AAIrC,SAAK,QAAQ,IAAI,KAAK,IAAI,CAAA,QAAO,KAAK,SAAS,KAAK,MAAM,CAAC,CAAC,EAAE;AAAA,MAAQ,MACpE,OAAO,QAAA;AAAA,IAAQ;AAAA,EAEnB;AAAA,EAEA,MAAM,SACJ,EAAC,YAAY,KAAK,KAAA,GAClB,QACA;AACA,QAAI;AACF,YAAM,OAAO,gBAAgB,OAAM,OAAM;AACvC,cAAM,QAAQ,KAAK,IAAA;AAInB,YAAI,iBAAiB,IAAI,cAAc,KAAK;AAC5C,YAAI,QAAQ;AACZ,YAAI;AAEJ,yBAAiB,WAAW;AAAA,0CACM,KAAK,KAAK,WAAW,CAAC;AAAA,gCAChC,IAAI,SAAS;AAAA,oCACT,OAAO,GAAI,GAAG;AAOxC,gBAAMC,SAAQ,YAAY,IAAA;AAC1B,gBAAM;AACN,gBAAM,UAAU,YAAY,IAAA,IAAQA;AACpC,cAAI,mBAAmB;AACrB,aAAC,UAAU,MAAM,KAAK,IAAI,OAAO,KAAK,IAAI;AAAA,cACxC,UAAU,QAAQ,QAAQ,CAAC,CAAC,WAAW,IAAI,EAAE;AAAA,YAAA;AAAA,UAEjD;AAEA,qBAAW,SAAS,SAAS;AAC3B,gBAAI,MAAM,cAAc,IAAI,WAAW;AAGrC,+BAAiB;AAAA,YACnB,WAAW,gBAAgB;AACzB,kCAAoB,IAAI,QAAQ,aAAa,KAAK,CAAC,EAAE;AACrD;AAAA,YACF,WAAW,SAAS,UAAU;AAC5B,oBAAM,IAAI;AAAA,gBACR,+BAA+B,IAAI,SAAS,yBAAyB,MAAM,SAAS;AAAA,cAAA;AAAA,YAExF,OAAO;AACL,mBAAK,IAAI;AAAA,gBACP,qCAAqC,IAAI,SAAS,yBAAyB,MAAM,SAAS;AAAA,cAAA;AAE5F,kBAAI;AAAA,gBACFC;AAAAA,gBACA,mCAAmC,MAAM,SAAS,eAAe,IAAI,SAAS;AAAA,cAAA;AAEhF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AACA,YAAI,gBAAgB;AAClB,gBAAM;AACN,eAAK,IAAI;AAAA,YACP,aAAa,IAAI,EAAE,SAAS,KAAK,aAC/B,KAAK,QAAQ,KACf;AAAA,UAAA;AAAA,QAEJ,OAAO;AACL,eAAK,IAAI;AAAA,YACP,2BAA2B,IAAI,SAAS;AAAA,UAAA;AAAA,QAE5C;AAGA,YAAI,YAAA;AAAA,MACN,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,WAAK,IAAI,QAAQ,sCAAsC,IAAI,EAAE,IAAI,GAAG;AACpE,UAAI,eAAe,iBAAiB;AAClC,cAAM,kBAAkB,KAAK,KAAK,KAAK,MAAM;AAC7C,aAAK,SAAS,GAAG;AAAA,MACnB;AACA,UAAI,KAAK,GAAG;AAAA,IACd;AAAA,EACF;AAAA,EAEA,OAAO;AACL,SAAK,OAAO,QAAQ,MAAM;AAC1B,WAAO;AAAA,EACT;AACF;AAEA,SAAS,aAAa,OAAuC;AAC3D,QAAM,EAAC,WAAW,OAAA,IAAU;AAC5B,UAAQ,OAAO,KAAA;AAAA,IACb,KAAK;AACH,aAAO,CAAC,WAAW,CAAC,SAAS,QAAQ,EAAC,iBAAiB,UAAA,CAAU,CAAC;AAAA,IACpE,KAAK;AACH,aAAO,CAAC,WAAW,CAAC,UAAU,QAAQ,EAAC,UAAA,CAAU,CAAC;AAAA,IACpD,KAAK;AACH,aAAO,CAAC,WAAW,CAAC,YAAY,MAAM,CAAC;AAAA,IACzC;AACE,aAAO,CAAC,WAAW,CAAC,QAAQ,MAAM,CAAC;AAAA,EAAA;AAEzC;"}
@@ -18,7 +18,6 @@ export declare class HttpService implements Service {
18
18
  constructor(id: string, lc: LogContext, opts: Options, init: (fastify: FastifyInstance) => void | Promise<void>);
19
19
  protected _onStart(): void;
20
20
  protected _onStop(): Promise<void>;
21
- protected _onHeartbeat(_count: number): void;
22
21
  start(): Promise<string>;
23
22
  run(): Promise<void>;
24
23
  stop(): Promise<void>;
@@ -1 +1 @@
1
- {"version":3,"file":"http-service.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/http-service.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAgB,EAAC,KAAK,eAAe,EAAC,MAAM,SAAS,CAAC;AAGtD,OAAO,EAAC,YAAY,EAAC,MAAM,oBAAoB,CAAC;AAChD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,cAAc,CAAC;AAE1C,MAAM,MAAM,OAAO,GAAG;IACpB,IAAI,EAAE,MAAM,CAAC;CACd,CAAC;AAEF;;;;GAIG;AACH,qBAAa,WAAY,YAAW,OAAO;;IACzC,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IACpB,SAAS,CAAC,QAAQ,CAAC,GAAG,EAAE,UAAU,CAAC;IAGnC,SAAS,CAAC,QAAQ,CAAC,MAAM,EAAE,YAAY,CAAC;gBAKtC,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,OAAO,EACb,IAAI,EAAE,CAAC,OAAO,EAAE,eAAe,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IAY1D,SAAS,CAAC,QAAQ;IAClB,SAAS,CAAC,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAGlC,SAAS,CAAC,YAAY,CAAC,MAAM,EAAE,MAAM;IAI/B,KAAK,IAAI,OAAO,CAAC,MAAM,CAAC;IAmBxB,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IAKpB,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;CAO5B"}
1
+ {"version":3,"file":"http-service.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/http-service.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAgB,EAAC,KAAK,eAAe,EAAC,MAAM,SAAS,CAAC;AAGtD,OAAO,EAAC,YAAY,EAAC,MAAM,oBAAoB,CAAC;AAChD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,cAAc,CAAC;AAE1C,MAAM,MAAM,OAAO,GAAG;IACpB,IAAI,EAAE,MAAM,CAAC;CACd,CAAC;AAEF;;;;GAIG;AACH,qBAAa,WAAY,YAAW,OAAO;;IACzC,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IACpB,SAAS,CAAC,QAAQ,CAAC,GAAG,EAAE,UAAU,CAAC;IAGnC,SAAS,CAAC,QAAQ,CAAC,MAAM,EAAE,YAAY,CAAC;gBAKtC,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,OAAO,EACb,IAAI,EAAE,CAAC,OAAO,EAAE,eAAe,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IAY1D,SAAS,CAAC,QAAQ;IAClB,SAAS,CAAC,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAK5B,KAAK,IAAI,OAAO,CAAC,MAAM,CAAC;IAgBxB,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IAKpB,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;CAO5B"}
@@ -25,16 +25,12 @@ class HttpService {
25
25
  _onStop() {
26
26
  return promiseVoid;
27
27
  }
28
- _onHeartbeat(_count) {
29
- }
30
28
  // start() is used in unit tests.
31
29
  // run() is the lifecycle method called by the ServiceRunner.
32
30
  async start() {
33
- let heartbeats = 0;
34
31
  this.#fastify.get("/", (_req, res) => res.send("OK"));
35
32
  this.#fastify.get("/keepalive", ({ headers }, res) => {
36
33
  this.#heartbeatMonitor.onHeartbeat(headers);
37
- this._onHeartbeat(++heartbeats);
38
34
  return res.send("OK");
39
35
  });
40
36
  await this.#init(this.#fastify);
@@ -1 +1 @@
1
- {"version":3,"file":"http-service.js","sources":["../../../../../zero-cache/src/services/http-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport Fastify, {type FastifyInstance} from 'fastify';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {HeartbeatMonitor} from './life-cycle.ts';\nimport {RunningState} from './running-state.ts';\nimport type {Service} from './service.ts';\n\nexport type Options = {\n port: number;\n};\n\n/**\n * Common functionality for all HttpServices. These include:\n * * Responding to health checks at \"/\"\n * * Tracking optional heartbeats at \"/keepalive\" and draining when they stop.\n */\nexport class HttpService implements Service {\n readonly id: string;\n protected readonly _lc: LogContext;\n readonly #fastify: FastifyInstance;\n readonly #port: number;\n protected readonly _state: RunningState;\n readonly #heartbeatMonitor: HeartbeatMonitor;\n readonly #init: (fastify: FastifyInstance) => void | Promise<void>;\n\n constructor(\n id: string,\n lc: LogContext,\n opts: Options,\n init: (fastify: FastifyInstance) => void | Promise<void>,\n ) {\n this.id = id;\n this._lc = lc.withContext('component', this.id);\n this.#fastify = Fastify();\n this.#port = opts.port;\n this.#init = init;\n this._state = new RunningState(id);\n this.#heartbeatMonitor = new HeartbeatMonitor(this._lc);\n }\n\n // Life-cycle hooks for subclass implementations\n protected _onStart() {}\n protected _onStop(): Promise<void> {\n return promiseVoid;\n }\n protected _onHeartbeat(_count: number) {}\n\n // start() is used in unit tests.\n // run() is the lifecycle method called by the ServiceRunner.\n async start(): Promise<string> {\n let heartbeats = 0;\n\n this.#fastify.get('/', (_req, res) => res.send('OK'));\n this.#fastify.get('/keepalive', ({headers}, res) => {\n this.#heartbeatMonitor.onHeartbeat(headers);\n this._onHeartbeat(++heartbeats);\n return res.send('OK');\n });\n await this.#init(this.#fastify);\n const address = await this.#fastify.listen({\n host: '::',\n port: this.#port,\n });\n this._lc.info?.(`${this.id} listening at ${address}`);\n this._onStart();\n return address;\n }\n\n async run(): Promise<void> {\n await this.start();\n await this._state.stopped();\n }\n\n async stop(): Promise<void> {\n this._lc.info?.(`${this.id}: no longer accepting connections`);\n this.#heartbeatMonitor.stop();\n this._state.stop(this._lc);\n await this.#fastify.close();\n await this._onStop();\n }\n}\n"],"names":[],"mappings":";;;;AAgBO,MAAM,YAA+B;AAAA,EACjC;AAAA,EACU;AAAA,EACV;AAAA,EACA;AAAA,EACU;AAAA,EACV;AAAA,EACA;AAAA,EAET,YACE,IACA,IACA,MACA,MACA;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,KAAK,EAAE;AAC9C,SAAK,WAAW,QAAA;AAChB,SAAK,QAAQ,KAAK;AAClB,SAAK,QAAQ;AACb,SAAK,SAAS,IAAI,aAAa,EAAE;AACjC,SAAK,oBAAoB,IAAI,iBAAiB,KAAK,GAAG;AAAA,EACxD;AAAA;AAAA,EAGU,WAAW;AAAA,EAAC;AAAA,EACZ,UAAyB;AACjC,WAAO;AAAA,EACT;AAAA,EACU,aAAa,QAAgB;AAAA,EAAC;AAAA;AAAA;AAAA,EAIxC,MAAM,QAAyB;AAC7B,QAAI,aAAa;AAEjB,SAAK,SAAS,IAAI,KAAK,CAAC,MAAM,QAAQ,IAAI,KAAK,IAAI,CAAC;AACpD,SAAK,SAAS,IAAI,cAAc,CAAC,EAAC,QAAA,GAAU,QAAQ;AAClD,WAAK,kBAAkB,YAAY,OAAO;AAC1C,WAAK,aAAa,EAAE,UAAU;AAC9B,aAAO,IAAI,KAAK,IAAI;AAAA,IACtB,CAAC;AACD,UAAM,KAAK,MAAM,KAAK,QAAQ;AAC9B,UAAM,UAAU,MAAM,KAAK,SAAS,OAAO;AAAA,MACzC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,IAAA,CACZ;AACD,SAAK,IAAI,OAAO,GAAG,KAAK,EAAE,iBAAiB,OAAO,EAAE;AACpD,SAAK,SAAA;AACL,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,MAAqB;AACzB,UAAM,KAAK,MAAA;AACX,UAAM,KAAK,OAAO,QAAA;AAAA,EACpB;AAAA,EAEA,MAAM,OAAsB;AAC1B,SAAK,IAAI,OAAO,GAAG,KAAK,EAAE,mCAAmC;AAC7D,SAAK,kBAAkB,KAAA;AACvB,SAAK,OAAO,KAAK,KAAK,GAAG;AACzB,UAAM,KAAK,SAAS,MAAA;AACpB,UAAM,KAAK,QAAA;AAAA,EACb;AACF;"}
1
+ {"version":3,"file":"http-service.js","sources":["../../../../../zero-cache/src/services/http-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport Fastify, {type FastifyInstance} from 'fastify';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {HeartbeatMonitor} from './life-cycle.ts';\nimport {RunningState} from './running-state.ts';\nimport type {Service} from './service.ts';\n\nexport type Options = {\n port: number;\n};\n\n/**\n * Common functionality for all HttpServices. These include:\n * * Responding to health checks at \"/\"\n * * Tracking optional heartbeats at \"/keepalive\" and draining when they stop.\n */\nexport class HttpService implements Service {\n readonly id: string;\n protected readonly _lc: LogContext;\n readonly #fastify: FastifyInstance;\n readonly #port: number;\n protected readonly _state: RunningState;\n readonly #heartbeatMonitor: HeartbeatMonitor;\n readonly #init: (fastify: FastifyInstance) => void | Promise<void>;\n\n constructor(\n id: string,\n lc: LogContext,\n opts: Options,\n init: (fastify: FastifyInstance) => void | Promise<void>,\n ) {\n this.id = id;\n this._lc = lc.withContext('component', this.id);\n this.#fastify = Fastify();\n this.#port = opts.port;\n this.#init = init;\n this._state = new RunningState(id);\n this.#heartbeatMonitor = new HeartbeatMonitor(this._lc);\n }\n\n // Life-cycle hooks for subclass implementations\n protected _onStart() {}\n protected _onStop(): Promise<void> {\n return promiseVoid;\n }\n // start() is used in unit tests.\n // run() is the lifecycle method called by the ServiceRunner.\n async start(): Promise<string> {\n this.#fastify.get('/', (_req, res) => res.send('OK'));\n this.#fastify.get('/keepalive', ({headers}, res) => {\n this.#heartbeatMonitor.onHeartbeat(headers);\n return res.send('OK');\n });\n await this.#init(this.#fastify);\n const address = await this.#fastify.listen({\n host: '::',\n port: this.#port,\n });\n this._lc.info?.(`${this.id} listening at ${address}`);\n this._onStart();\n return address;\n }\n\n async run(): Promise<void> {\n await this.start();\n await this._state.stopped();\n }\n\n async stop(): Promise<void> {\n this._lc.info?.(`${this.id}: no longer accepting connections`);\n this.#heartbeatMonitor.stop();\n this._state.stop(this._lc);\n await this.#fastify.close();\n await this._onStop();\n }\n}\n"],"names":[],"mappings":";;;;AAgBO,MAAM,YAA+B;AAAA,EACjC;AAAA,EACU;AAAA,EACV;AAAA,EACA;AAAA,EACU;AAAA,EACV;AAAA,EACA;AAAA,EAET,YACE,IACA,IACA,MACA,MACA;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,KAAK,EAAE;AAC9C,SAAK,WAAW,QAAA;AAChB,SAAK,QAAQ,KAAK;AAClB,SAAK,QAAQ;AACb,SAAK,SAAS,IAAI,aAAa,EAAE;AACjC,SAAK,oBAAoB,IAAI,iBAAiB,KAAK,GAAG;AAAA,EACxD;AAAA;AAAA,EAGU,WAAW;AAAA,EAAC;AAAA,EACZ,UAAyB;AACjC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA,EAGA,MAAM,QAAyB;AAC7B,SAAK,SAAS,IAAI,KAAK,CAAC,MAAM,QAAQ,IAAI,KAAK,IAAI,CAAC;AACpD,SAAK,SAAS,IAAI,cAAc,CAAC,EAAC,QAAA,GAAU,QAAQ;AAClD,WAAK,kBAAkB,YAAY,OAAO;AAC1C,aAAO,IAAI,KAAK,IAAI;AAAA,IACtB,CAAC;AACD,UAAM,KAAK,MAAM,KAAK,QAAQ;AAC9B,UAAM,UAAU,MAAM,KAAK,SAAS,OAAO;AAAA,MACzC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,IAAA,CACZ;AACD,SAAK,IAAI,OAAO,GAAG,KAAK,EAAE,iBAAiB,OAAO,EAAE;AACpD,SAAK,SAAA;AACL,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,MAAqB;AACzB,UAAM,KAAK,MAAA;AACX,UAAM,KAAK,OAAO,QAAA;AAAA,EACpB;AAAA,EAEA,MAAM,OAAsB;AAC1B,SAAK,IAAI,OAAO,GAAG,KAAK,EAAE,mCAAmC;AAC7D,SAAK,kBAAkB,KAAA;AACvB,SAAK,OAAO,KAAK,KAAK,GAAG;AACzB,UAAM,KAAK,SAAS,MAAA;AACpB,UAAM,KAAK,QAAA;AAAA,EACb;AACF;"}
@@ -41,14 +41,15 @@ function getLitestream(config, logLevelOverride, backupURLOverride) {
41
41
  configPath,
42
42
  port = config.port + 2,
43
43
  checkpointThresholdMB,
44
+ minCheckpointPageCount = checkpointThresholdMB * 250,
45
+ // SQLite page size is 4KB
46
+ maxCheckpointPageCount = minCheckpointPageCount * 10,
44
47
  incrementalBackupIntervalMinutes,
45
48
  snapshotBackupIntervalHours,
46
49
  multipartConcurrency,
47
50
  multipartSize
48
51
  } = config.litestream;
49
52
  const snapshotBackupIntervalMinutes = snapshotBackupIntervalHours * 60 - 5;
50
- const minCheckpointPageCount = checkpointThresholdMB * 250;
51
- const maxCheckpointPageCount = minCheckpointPageCount * 10;
52
53
  return {
53
54
  litestream: must(executable, `Missing --litestream-executable`),
54
55
  env: {
@@ -1 +1 @@
1
- {"version":3,"file":"commands.js","sources":["../../../../../../zero-cache/src/services/litestream/commands.ts"],"sourcesContent":["import type {LogContext, LogLevel} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport type {ChildProcess} from 'node:child_process';\nimport {spawn} from 'node:child_process';\nimport {existsSync} from 'node:fs';\nimport {must} from '../../../../shared/src/must.ts';\nimport {sleep} from '../../../../shared/src/sleep.ts';\nimport {Database} from '../../../../zqlite/src/db.ts';\nimport {assertNormalized} from '../../config/normalize.ts';\nimport type {ZeroConfig} from '../../config/zero-config.ts';\nimport {deleteLiteDB} from '../../db/delete-lite-db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {getShardConfig} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {ChangeStreamerHttpClient} from '../change-streamer/change-streamer-http.ts';\nimport type {\n SnapshotMessage,\n SnapshotStatus,\n} from '../change-streamer/snapshot.ts';\nimport {getSubscriptionState} from '../replicator/schema/replication-state.ts';\n\n// Retry for up to 3 minutes (60 times with 3 second delay).\n// Beyond that, let the container runner restart the task.\nconst MAX_RETRIES = 60;\nconst RETRY_INTERVAL_MS = 3000;\n\n/**\n * @returns The time at which the last restore started\n * (i.e. not counting failed attempts).\n */\nexport async function restoreReplica(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Date> {\n const {changeStreamer} = config;\n\n for (let i = 0; i < MAX_RETRIES; i++) {\n if (i > 0) {\n lc.info?.(\n `replica not found. retrying in ${RETRY_INTERVAL_MS / 1000} seconds`,\n );\n await sleep(RETRY_INTERVAL_MS);\n }\n const start = new Date();\n const restored = await tryRestore(lc, config);\n if (restored) {\n return start;\n }\n if (\n changeStreamer.mode === 'dedicated' &&\n changeStreamer.uri === undefined\n ) {\n lc.info?.('no litestream backup found');\n return start;\n }\n }\n throw new Error(`max attempts exceeded restoring replica`);\n}\n\nfunction getLitestream(\n config: ZeroConfig,\n logLevelOverride?: LogLevel,\n backupURLOverride?: string,\n): {\n litestream: string;\n env: NodeJS.ProcessEnv;\n} {\n const {\n executable,\n backupURL,\n logLevel,\n configPath,\n port = config.port + 2,\n checkpointThresholdMB,\n incrementalBackupIntervalMinutes,\n snapshotBackupIntervalHours,\n multipartConcurrency,\n multipartSize,\n } = config.litestream;\n\n // Set the snapshot interval to something smaller than x hours so that\n // the hourly check triggers on the hour, rather than the hour after.\n const snapshotBackupIntervalMinutes = snapshotBackupIntervalHours * 60 - 5;\n const minCheckpointPageCount = checkpointThresholdMB * 250; // SQLite page size is 4k\n const maxCheckpointPageCount = minCheckpointPageCount * 10;\n\n return {\n litestream: must(executable, `Missing --litestream-executable`),\n env: {\n ...process.env,\n ['ZERO_REPLICA_FILE']: config.replica.file,\n ['ZERO_LITESTREAM_BACKUP_URL']: must(backupURLOverride ?? backupURL),\n ['ZERO_LITESTREAM_MIN_CHECKPOINT_PAGE_COUNT']: String(\n minCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_MAX_CHECKPOINT_PAGE_COUNT']: String(\n maxCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_INCREMENTAL_BACKUP_INTERVAL_MINUTES']: String(\n incrementalBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_LOG_LEVEL']: logLevelOverride ?? logLevel,\n ['ZERO_LITESTREAM_SNAPSHOT_BACKUP_INTERVAL_MINUTES']: String(\n snapshotBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_MULTIPART_CONCURRENCY']: String(multipartConcurrency),\n ['ZERO_LITESTREAM_MULTIPART_SIZE']: String(multipartSize),\n ['ZERO_LOG_FORMAT']: config.log.format,\n ['LITESTREAM_CONFIG']: configPath,\n ['LITESTREAM_PORT']: String(port),\n },\n };\n}\n\nasync function tryRestore(lc: LogContext, config: ZeroConfig) {\n const {changeStreamer} = config;\n\n const isViewSyncer =\n changeStreamer.mode === 'discover' || changeStreamer.uri !== undefined;\n\n // Fire off a snapshot reservation to the current replication-manager\n // (if there is one).\n const firstMessage = reserveAndGetSnapshotStatus(lc, config, isViewSyncer);\n let snapshotStatus: SnapshotStatus | undefined;\n if (isViewSyncer) {\n // The return value is required by view-syncers ...\n snapshotStatus = await firstMessage;\n lc.info?.(`restoring backup from ${snapshotStatus.backupURL}`);\n } else {\n // but it is also useful to pause change-log cleanup when a new\n // replication-manager is starting up. In this case, the request is\n // best-effort. In particular, there may not be a previous\n // replication-manager running at all.\n void firstMessage.catch(e => lc.debug?.(e));\n }\n\n const {litestream, env} = getLitestream(\n config,\n 'debug', // Include all output from `litestream restore`, as it's minimal.\n snapshotStatus?.backupURL,\n );\n const {restoreParallelism: parallelism} = config.litestream;\n const proc = spawn(\n litestream,\n [\n 'restore',\n '-if-db-not-exists',\n '-if-replica-exists',\n '-parallelism',\n String(parallelism),\n config.replica.file,\n ],\n {env, stdio: 'inherit', windowsHide: true},\n );\n const {promise, resolve, reject} = resolver();\n proc.on('error', reject);\n proc.on('close', (code, signal) => {\n if (signal) {\n reject(`litestream killed with ${signal}`);\n } else if (code !== 0) {\n reject(`litestream exited with code ${code}`);\n } else {\n resolve();\n }\n });\n await promise;\n if (!existsSync(config.replica.file)) {\n return false;\n }\n if (\n snapshotStatus &&\n !replicaIsValid(lc, config.replica.file, snapshotStatus)\n ) {\n lc.info?.(`Deleting local replica and retrying restore`);\n deleteLiteDB(config.replica.file);\n return false;\n }\n return true;\n}\n\nfunction replicaIsValid(\n lc: LogContext,\n replica: string,\n snapshot: SnapshotStatus,\n) {\n const db = new Database(lc, replica);\n try {\n const {replicaVersion, watermark} = getSubscriptionState(\n new StatementRunner(db),\n );\n if (replicaVersion !== snapshot.replicaVersion) {\n lc.warn?.(\n `Local replica version ${replicaVersion} does not match change-streamer replicaVersion ${snapshot.replicaVersion}`,\n snapshot,\n );\n return false;\n }\n if (watermark < snapshot.minWatermark) {\n lc.warn?.(\n `Local replica watermark ${watermark} is earlier than change-streamer minWatermark ${snapshot.minWatermark}`,\n );\n return false;\n }\n lc.info?.(\n `Local replica at version ${replicaVersion} and watermark ${watermark} is compatible with change-streamer`,\n snapshot,\n );\n return true;\n } finally {\n db.close();\n }\n}\n\nexport function startReplicaBackupProcess(config: ZeroConfig): ChildProcess {\n const {litestream, env} = getLitestream(config);\n return spawn(litestream, ['replicate'], {\n env,\n stdio: 'inherit',\n windowsHide: true,\n });\n}\n\nfunction reserveAndGetSnapshotStatus(\n lc: LogContext,\n config: ZeroConfig,\n isViewSyncer: boolean,\n): Promise<SnapshotStatus> {\n const {promise: status, resolve, reject} = resolver<SnapshotStatus>();\n\n void (async function () {\n const abort = new AbortController();\n process.on('SIGINT', () => abort.abort());\n process.on('SIGTERM', () => abort.abort());\n\n for (let i = 0; ; i++) {\n let err: unknown | string = '';\n try {\n let resolved = false;\n const stream = await reserveSnapshot(lc, config);\n for await (const msg of stream) {\n // Capture the value of the status message that the change-streamer\n // (i.e. BackupMonitor) returns, and hold the connection open to\n // \"reserve\" the snapshot and prevent change log cleanup.\n resolve(msg[1]);\n resolved = true;\n }\n // The change-streamer itself closes the connection when the\n // subscription is started (or the reservation retried).\n if (resolved) {\n break;\n }\n } catch (e) {\n err = e;\n }\n if (!isViewSyncer) {\n return reject(err);\n }\n // Retry in the view-syncer since it cannot proceed until it connects\n // to a (compatible) replication-manager. In particular, a\n // replication-manager that does not support the view-syncer's\n // change-streamer protocol will close the stream with an error; this\n // retry logic essentially delays the startup of a view-syncer until\n // a compatible replication-manager has been rolled out, allowing\n // replication-manager and view-syncer services to be updated in\n // parallel.\n lc.warn?.(\n `Unable to reserve snapshot (attempt ${i + 1}). Retrying in 5 seconds.`,\n String(err),\n );\n try {\n await sleep(5000, abort.signal);\n } catch (e) {\n return reject(e);\n }\n }\n })();\n\n return status;\n}\n\nfunction reserveSnapshot(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Source<SnapshotMessage>> {\n assertNormalized(config);\n const {taskID, change, changeStreamer} = config;\n const shardID = getShardConfig(config);\n\n const changeStreamerClient = new ChangeStreamerHttpClient(\n lc,\n shardID,\n change.db,\n changeStreamer.uri,\n );\n\n return changeStreamerClient.reserveSnapshot(taskID);\n}\n"],"names":[],"mappings":";;;;;;;;;;;;AAuBA,MAAM,cAAc;AACpB,MAAM,oBAAoB;AAM1B,eAAsB,eACpB,IACA,QACe;AACf,QAAM,EAAC,mBAAkB;AAEzB,WAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AACpC,QAAI,IAAI,GAAG;AACT,SAAG;AAAA,QACD,kCAAkC,oBAAoB,GAAI;AAAA,MAAA;AAE5D,YAAM,MAAM,iBAAiB;AAAA,IAC/B;AACA,UAAM,4BAAY,KAAA;AAClB,UAAM,WAAW,MAAM,WAAW,IAAI,MAAM;AAC5C,QAAI,UAAU;AACZ,aAAO;AAAA,IACT;AACA,QACE,eAAe,SAAS,eACxB,eAAe,QAAQ,QACvB;AACA,SAAG,OAAO,4BAA4B;AACtC,aAAO;AAAA,IACT;AAAA,EACF;AACA,QAAM,IAAI,MAAM,yCAAyC;AAC3D;AAEA,SAAS,cACP,QACA,kBACA,mBAIA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAO,OAAO,OAAO;AAAA,IACrB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE,OAAO;AAIX,QAAM,gCAAgC,8BAA8B,KAAK;AACzE,QAAM,yBAAyB,wBAAwB;AACvD,QAAM,yBAAyB,yBAAyB;AAExD,SAAO;AAAA,IACL,YAAY,KAAK,YAAY,iCAAiC;AAAA,IAC9D,KAAK;AAAA,MACH,GAAG,QAAQ;AAAA,MACX,CAAC,mBAAmB,GAAG,OAAO,QAAQ;AAAA,MACtC,CAAC,4BAA4B,GAAG,KAAK,qBAAqB,SAAS;AAAA,MACnE,CAAC,2CAA2C,GAAG;AAAA,QAC7C;AAAA,MAAA;AAAA,MAEF,CAAC,2CAA2C,GAAG;AAAA,QAC7C;AAAA,MAAA;AAAA,MAEF,CAAC,qDAAqD,GAAG;AAAA,QACvD;AAAA,MAAA;AAAA,MAEF,CAAC,2BAA2B,GAAG,oBAAoB;AAAA,MACnD,CAAC,kDAAkD,GAAG;AAAA,QACpD;AAAA,MAAA;AAAA,MAEF,CAAC,uCAAuC,GAAG,OAAO,oBAAoB;AAAA,MACtE,CAAC,gCAAgC,GAAG,OAAO,aAAa;AAAA,MACxD,CAAC,iBAAiB,GAAG,OAAO,IAAI;AAAA,MAChC,CAAC,mBAAmB,GAAG;AAAA,MACvB,CAAC,iBAAiB,GAAG,OAAO,IAAI;AAAA,IAAA;AAAA,EAClC;AAEJ;AAEA,eAAe,WAAW,IAAgB,QAAoB;AAC5D,QAAM,EAAC,mBAAkB;AAEzB,QAAM,eACJ,eAAe,SAAS,cAAc,eAAe,QAAQ;AAI/D,QAAM,eAAe,4BAA4B,IAAI,QAAQ,YAAY;AACzE,MAAI;AACJ,MAAI,cAAc;AAEhB,qBAAiB,MAAM;AACvB,OAAG,OAAO,yBAAyB,eAAe,SAAS,EAAE;AAAA,EAC/D,OAAO;AAKL,SAAK,aAAa,MAAM,CAAA,MAAK,GAAG,QAAQ,CAAC,CAAC;AAAA,EAC5C;AAEA,QAAM,EAAC,YAAY,IAAA,IAAO;AAAA,IACxB;AAAA,IACA;AAAA;AAAA,IACA,gBAAgB;AAAA,EAAA;AAElB,QAAM,EAAC,oBAAoB,YAAA,IAAe,OAAO;AACjD,QAAM,OAAO;AAAA,IACX;AAAA,IACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,OAAO,WAAW;AAAA,MAClB,OAAO,QAAQ;AAAA,IAAA;AAAA,IAEjB,EAAC,KAAK,OAAO,WAAW,aAAa,KAAA;AAAA,EAAI;AAE3C,QAAM,EAAC,SAAS,SAAS,OAAA,IAAU,SAAA;AACnC,OAAK,GAAG,SAAS,MAAM;AACvB,OAAK,GAAG,SAAS,CAAC,MAAM,WAAW;AACjC,QAAI,QAAQ;AACV,aAAO,0BAA0B,MAAM,EAAE;AAAA,IAC3C,WAAW,SAAS,GAAG;AACrB,aAAO,+BAA+B,IAAI,EAAE;AAAA,IAC9C,OAAO;AACL,cAAA;AAAA,IACF;AAAA,EACF,CAAC;AACD,QAAM;AACN,MAAI,CAAC,WAAW,OAAO,QAAQ,IAAI,GAAG;AACpC,WAAO;AAAA,EACT;AACA,MACE,kBACA,CAAC,eAAe,IAAI,OAAO,QAAQ,MAAM,cAAc,GACvD;AACA,OAAG,OAAO,6CAA6C;AACvD,iBAAa,OAAO,QAAQ,IAAI;AAChC,WAAO;AAAA,EACT;AACA,SAAO;AACT;AAEA,SAAS,eACP,IACA,SACA,UACA;AACA,QAAM,KAAK,IAAI,SAAS,IAAI,OAAO;AACnC,MAAI;AACF,UAAM,EAAC,gBAAgB,UAAA,IAAa;AAAA,MAClC,IAAI,gBAAgB,EAAE;AAAA,IAAA;AAExB,QAAI,mBAAmB,SAAS,gBAAgB;AAC9C,SAAG;AAAA,QACD,yBAAyB,cAAc,kDAAkD,SAAS,cAAc;AAAA,QAChH;AAAA,MAAA;AAEF,aAAO;AAAA,IACT;AACA,QAAI,YAAY,SAAS,cAAc;AACrC,SAAG;AAAA,QACD,2BAA2B,SAAS,iDAAiD,SAAS,YAAY;AAAA,MAAA;AAE5G,aAAO;AAAA,IACT;AACA,OAAG;AAAA,MACD,4BAA4B,cAAc,kBAAkB,SAAS;AAAA,MACrE;AAAA,IAAA;AAEF,WAAO;AAAA,EACT,UAAA;AACE,OAAG,MAAA;AAAA,EACL;AACF;AAEO,SAAS,0BAA0B,QAAkC;AAC1E,QAAM,EAAC,YAAY,QAAO,cAAc,MAAM;AAC9C,SAAO,MAAM,YAAY,CAAC,WAAW,GAAG;AAAA,IACtC;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,EAAA,CACd;AACH;AAEA,SAAS,4BACP,IACA,QACA,cACyB;AACzB,QAAM,EAAC,SAAS,QAAQ,SAAS,OAAA,IAAU,SAAA;AAE3C,QAAM,iBAAkB;AACtB,UAAM,QAAQ,IAAI,gBAAA;AAClB,YAAQ,GAAG,UAAU,MAAM,MAAM,OAAO;AACxC,YAAQ,GAAG,WAAW,MAAM,MAAM,OAAO;AAEzC,aAAS,IAAI,KAAK,KAAK;AACrB,UAAI,MAAwB;AAC5B,UAAI;AACF,YAAI,WAAW;AACf,cAAM,SAAS,MAAM,gBAAgB,IAAI,MAAM;AAC/C,yBAAiB,OAAO,QAAQ;AAI9B,kBAAQ,IAAI,CAAC,CAAC;AACd,qBAAW;AAAA,QACb;AAGA,YAAI,UAAU;AACZ;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR;AACA,UAAI,CAAC,cAAc;AACjB,eAAO,OAAO,GAAG;AAAA,MACnB;AASA,SAAG;AAAA,QACD,uCAAuC,IAAI,CAAC;AAAA,QAC5C,OAAO,GAAG;AAAA,MAAA;AAEZ,UAAI;AACF,cAAM,MAAM,KAAM,MAAM,MAAM;AAAA,MAChC,SAAS,GAAG;AACV,eAAO,OAAO,CAAC;AAAA,MACjB;AAAA,IACF;AAAA,EACF,GAAA;AAEA,SAAO;AACT;AAEA,SAAS,gBACP,IACA,QACkC;AAClC,mBAAiB,MAAM;AACvB,QAAM,EAAC,QAAQ,QAAQ,eAAA,IAAkB;AACzC,QAAM,UAAU,eAAe,MAAM;AAErC,QAAM,uBAAuB,IAAI;AAAA,IAC/B;AAAA,IACA;AAAA,IACA,OAAO;AAAA,IACP,eAAe;AAAA,EAAA;AAGjB,SAAO,qBAAqB,gBAAgB,MAAM;AACpD;"}
1
+ {"version":3,"file":"commands.js","sources":["../../../../../../zero-cache/src/services/litestream/commands.ts"],"sourcesContent":["import type {LogContext, LogLevel} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport type {ChildProcess} from 'node:child_process';\nimport {spawn} from 'node:child_process';\nimport {existsSync} from 'node:fs';\nimport {must} from '../../../../shared/src/must.ts';\nimport {sleep} from '../../../../shared/src/sleep.ts';\nimport {Database} from '../../../../zqlite/src/db.ts';\nimport {assertNormalized} from '../../config/normalize.ts';\nimport type {ZeroConfig} from '../../config/zero-config.ts';\nimport {deleteLiteDB} from '../../db/delete-lite-db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {getShardConfig} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {ChangeStreamerHttpClient} from '../change-streamer/change-streamer-http.ts';\nimport type {\n SnapshotMessage,\n SnapshotStatus,\n} from '../change-streamer/snapshot.ts';\nimport {getSubscriptionState} from '../replicator/schema/replication-state.ts';\n\n// Retry for up to 3 minutes (60 times with 3 second delay).\n// Beyond that, let the container runner restart the task.\nconst MAX_RETRIES = 60;\nconst RETRY_INTERVAL_MS = 3000;\n\n/**\n * @returns The time at which the last restore started\n * (i.e. not counting failed attempts).\n */\nexport async function restoreReplica(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Date> {\n const {changeStreamer} = config;\n\n for (let i = 0; i < MAX_RETRIES; i++) {\n if (i > 0) {\n lc.info?.(\n `replica not found. retrying in ${RETRY_INTERVAL_MS / 1000} seconds`,\n );\n await sleep(RETRY_INTERVAL_MS);\n }\n const start = new Date();\n const restored = await tryRestore(lc, config);\n if (restored) {\n return start;\n }\n if (\n changeStreamer.mode === 'dedicated' &&\n changeStreamer.uri === undefined\n ) {\n lc.info?.('no litestream backup found');\n return start;\n }\n }\n throw new Error(`max attempts exceeded restoring replica`);\n}\n\nfunction getLitestream(\n config: ZeroConfig,\n logLevelOverride?: LogLevel,\n backupURLOverride?: string,\n): {\n litestream: string;\n env: NodeJS.ProcessEnv;\n} {\n const {\n executable,\n backupURL,\n logLevel,\n configPath,\n port = config.port + 2,\n checkpointThresholdMB,\n minCheckpointPageCount = checkpointThresholdMB * 250, // SQLite page size is 4KB\n maxCheckpointPageCount = minCheckpointPageCount * 10,\n incrementalBackupIntervalMinutes,\n snapshotBackupIntervalHours,\n multipartConcurrency,\n multipartSize,\n } = config.litestream;\n\n // Set the snapshot interval to something smaller than x hours so that\n // the hourly check triggers on the hour, rather than the hour after.\n const snapshotBackupIntervalMinutes = snapshotBackupIntervalHours * 60 - 5;\n\n return {\n litestream: must(executable, `Missing --litestream-executable`),\n env: {\n ...process.env,\n ['ZERO_REPLICA_FILE']: config.replica.file,\n ['ZERO_LITESTREAM_BACKUP_URL']: must(backupURLOverride ?? backupURL),\n ['ZERO_LITESTREAM_MIN_CHECKPOINT_PAGE_COUNT']: String(\n minCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_MAX_CHECKPOINT_PAGE_COUNT']: String(\n maxCheckpointPageCount,\n ),\n ['ZERO_LITESTREAM_INCREMENTAL_BACKUP_INTERVAL_MINUTES']: String(\n incrementalBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_LOG_LEVEL']: logLevelOverride ?? logLevel,\n ['ZERO_LITESTREAM_SNAPSHOT_BACKUP_INTERVAL_MINUTES']: String(\n snapshotBackupIntervalMinutes,\n ),\n ['ZERO_LITESTREAM_MULTIPART_CONCURRENCY']: String(multipartConcurrency),\n ['ZERO_LITESTREAM_MULTIPART_SIZE']: String(multipartSize),\n ['ZERO_LOG_FORMAT']: config.log.format,\n ['LITESTREAM_CONFIG']: configPath,\n ['LITESTREAM_PORT']: String(port),\n },\n };\n}\n\nasync function tryRestore(lc: LogContext, config: ZeroConfig) {\n const {changeStreamer} = config;\n\n const isViewSyncer =\n changeStreamer.mode === 'discover' || changeStreamer.uri !== undefined;\n\n // Fire off a snapshot reservation to the current replication-manager\n // (if there is one).\n const firstMessage = reserveAndGetSnapshotStatus(lc, config, isViewSyncer);\n let snapshotStatus: SnapshotStatus | undefined;\n if (isViewSyncer) {\n // The return value is required by view-syncers ...\n snapshotStatus = await firstMessage;\n lc.info?.(`restoring backup from ${snapshotStatus.backupURL}`);\n } else {\n // but it is also useful to pause change-log cleanup when a new\n // replication-manager is starting up. In this case, the request is\n // best-effort. In particular, there may not be a previous\n // replication-manager running at all.\n void firstMessage.catch(e => lc.debug?.(e));\n }\n\n const {litestream, env} = getLitestream(\n config,\n 'debug', // Include all output from `litestream restore`, as it's minimal.\n snapshotStatus?.backupURL,\n );\n const {restoreParallelism: parallelism} = config.litestream;\n const proc = spawn(\n litestream,\n [\n 'restore',\n '-if-db-not-exists',\n '-if-replica-exists',\n '-parallelism',\n String(parallelism),\n config.replica.file,\n ],\n {env, stdio: 'inherit', windowsHide: true},\n );\n const {promise, resolve, reject} = resolver();\n proc.on('error', reject);\n proc.on('close', (code, signal) => {\n if (signal) {\n reject(`litestream killed with ${signal}`);\n } else if (code !== 0) {\n reject(`litestream exited with code ${code}`);\n } else {\n resolve();\n }\n });\n await promise;\n if (!existsSync(config.replica.file)) {\n return false;\n }\n if (\n snapshotStatus &&\n !replicaIsValid(lc, config.replica.file, snapshotStatus)\n ) {\n lc.info?.(`Deleting local replica and retrying restore`);\n deleteLiteDB(config.replica.file);\n return false;\n }\n return true;\n}\n\nfunction replicaIsValid(\n lc: LogContext,\n replica: string,\n snapshot: SnapshotStatus,\n) {\n const db = new Database(lc, replica);\n try {\n const {replicaVersion, watermark} = getSubscriptionState(\n new StatementRunner(db),\n );\n if (replicaVersion !== snapshot.replicaVersion) {\n lc.warn?.(\n `Local replica version ${replicaVersion} does not match change-streamer replicaVersion ${snapshot.replicaVersion}`,\n snapshot,\n );\n return false;\n }\n if (watermark < snapshot.minWatermark) {\n lc.warn?.(\n `Local replica watermark ${watermark} is earlier than change-streamer minWatermark ${snapshot.minWatermark}`,\n );\n return false;\n }\n lc.info?.(\n `Local replica at version ${replicaVersion} and watermark ${watermark} is compatible with change-streamer`,\n snapshot,\n );\n return true;\n } finally {\n db.close();\n }\n}\n\nexport function startReplicaBackupProcess(config: ZeroConfig): ChildProcess {\n const {litestream, env} = getLitestream(config);\n return spawn(litestream, ['replicate'], {\n env,\n stdio: 'inherit',\n windowsHide: true,\n });\n}\n\nfunction reserveAndGetSnapshotStatus(\n lc: LogContext,\n config: ZeroConfig,\n isViewSyncer: boolean,\n): Promise<SnapshotStatus> {\n const {promise: status, resolve, reject} = resolver<SnapshotStatus>();\n\n void (async function () {\n const abort = new AbortController();\n process.on('SIGINT', () => abort.abort());\n process.on('SIGTERM', () => abort.abort());\n\n for (let i = 0; ; i++) {\n let err: unknown | string = '';\n try {\n let resolved = false;\n const stream = await reserveSnapshot(lc, config);\n for await (const msg of stream) {\n // Capture the value of the status message that the change-streamer\n // (i.e. BackupMonitor) returns, and hold the connection open to\n // \"reserve\" the snapshot and prevent change log cleanup.\n resolve(msg[1]);\n resolved = true;\n }\n // The change-streamer itself closes the connection when the\n // subscription is started (or the reservation retried).\n if (resolved) {\n break;\n }\n } catch (e) {\n err = e;\n }\n if (!isViewSyncer) {\n return reject(err);\n }\n // Retry in the view-syncer since it cannot proceed until it connects\n // to a (compatible) replication-manager. In particular, a\n // replication-manager that does not support the view-syncer's\n // change-streamer protocol will close the stream with an error; this\n // retry logic essentially delays the startup of a view-syncer until\n // a compatible replication-manager has been rolled out, allowing\n // replication-manager and view-syncer services to be updated in\n // parallel.\n lc.warn?.(\n `Unable to reserve snapshot (attempt ${i + 1}). Retrying in 5 seconds.`,\n String(err),\n );\n try {\n await sleep(5000, abort.signal);\n } catch (e) {\n return reject(e);\n }\n }\n })();\n\n return status;\n}\n\nfunction reserveSnapshot(\n lc: LogContext,\n config: ZeroConfig,\n): Promise<Source<SnapshotMessage>> {\n assertNormalized(config);\n const {taskID, change, changeStreamer} = config;\n const shardID = getShardConfig(config);\n\n const changeStreamerClient = new ChangeStreamerHttpClient(\n lc,\n shardID,\n change.db,\n changeStreamer.uri,\n );\n\n return changeStreamerClient.reserveSnapshot(taskID);\n}\n"],"names":[],"mappings":";;;;;;;;;;;;AAuBA,MAAM,cAAc;AACpB,MAAM,oBAAoB;AAM1B,eAAsB,eACpB,IACA,QACe;AACf,QAAM,EAAC,mBAAkB;AAEzB,WAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AACpC,QAAI,IAAI,GAAG;AACT,SAAG;AAAA,QACD,kCAAkC,oBAAoB,GAAI;AAAA,MAAA;AAE5D,YAAM,MAAM,iBAAiB;AAAA,IAC/B;AACA,UAAM,4BAAY,KAAA;AAClB,UAAM,WAAW,MAAM,WAAW,IAAI,MAAM;AAC5C,QAAI,UAAU;AACZ,aAAO;AAAA,IACT;AACA,QACE,eAAe,SAAS,eACxB,eAAe,QAAQ,QACvB;AACA,SAAG,OAAO,4BAA4B;AACtC,aAAO;AAAA,IACT;AAAA,EACF;AACA,QAAM,IAAI,MAAM,yCAAyC;AAC3D;AAEA,SAAS,cACP,QACA,kBACA,mBAIA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAO,OAAO,OAAO;AAAA,IACrB;AAAA,IACA,yBAAyB,wBAAwB;AAAA;AAAA,IACjD,yBAAyB,yBAAyB;AAAA,IAClD;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE,OAAO;AAIX,QAAM,gCAAgC,8BAA8B,KAAK;AAEzE,SAAO;AAAA,IACL,YAAY,KAAK,YAAY,iCAAiC;AAAA,IAC9D,KAAK;AAAA,MACH,GAAG,QAAQ;AAAA,MACX,CAAC,mBAAmB,GAAG,OAAO,QAAQ;AAAA,MACtC,CAAC,4BAA4B,GAAG,KAAK,qBAAqB,SAAS;AAAA,MACnE,CAAC,2CAA2C,GAAG;AAAA,QAC7C;AAAA,MAAA;AAAA,MAEF,CAAC,2CAA2C,GAAG;AAAA,QAC7C;AAAA,MAAA;AAAA,MAEF,CAAC,qDAAqD,GAAG;AAAA,QACvD;AAAA,MAAA;AAAA,MAEF,CAAC,2BAA2B,GAAG,oBAAoB;AAAA,MACnD,CAAC,kDAAkD,GAAG;AAAA,QACpD;AAAA,MAAA;AAAA,MAEF,CAAC,uCAAuC,GAAG,OAAO,oBAAoB;AAAA,MACtE,CAAC,gCAAgC,GAAG,OAAO,aAAa;AAAA,MACxD,CAAC,iBAAiB,GAAG,OAAO,IAAI;AAAA,MAChC,CAAC,mBAAmB,GAAG;AAAA,MACvB,CAAC,iBAAiB,GAAG,OAAO,IAAI;AAAA,IAAA;AAAA,EAClC;AAEJ;AAEA,eAAe,WAAW,IAAgB,QAAoB;AAC5D,QAAM,EAAC,mBAAkB;AAEzB,QAAM,eACJ,eAAe,SAAS,cAAc,eAAe,QAAQ;AAI/D,QAAM,eAAe,4BAA4B,IAAI,QAAQ,YAAY;AACzE,MAAI;AACJ,MAAI,cAAc;AAEhB,qBAAiB,MAAM;AACvB,OAAG,OAAO,yBAAyB,eAAe,SAAS,EAAE;AAAA,EAC/D,OAAO;AAKL,SAAK,aAAa,MAAM,CAAA,MAAK,GAAG,QAAQ,CAAC,CAAC;AAAA,EAC5C;AAEA,QAAM,EAAC,YAAY,IAAA,IAAO;AAAA,IACxB;AAAA,IACA;AAAA;AAAA,IACA,gBAAgB;AAAA,EAAA;AAElB,QAAM,EAAC,oBAAoB,YAAA,IAAe,OAAO;AACjD,QAAM,OAAO;AAAA,IACX;AAAA,IACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,OAAO,WAAW;AAAA,MAClB,OAAO,QAAQ;AAAA,IAAA;AAAA,IAEjB,EAAC,KAAK,OAAO,WAAW,aAAa,KAAA;AAAA,EAAI;AAE3C,QAAM,EAAC,SAAS,SAAS,OAAA,IAAU,SAAA;AACnC,OAAK,GAAG,SAAS,MAAM;AACvB,OAAK,GAAG,SAAS,CAAC,MAAM,WAAW;AACjC,QAAI,QAAQ;AACV,aAAO,0BAA0B,MAAM,EAAE;AAAA,IAC3C,WAAW,SAAS,GAAG;AACrB,aAAO,+BAA+B,IAAI,EAAE;AAAA,IAC9C,OAAO;AACL,cAAA;AAAA,IACF;AAAA,EACF,CAAC;AACD,QAAM;AACN,MAAI,CAAC,WAAW,OAAO,QAAQ,IAAI,GAAG;AACpC,WAAO;AAAA,EACT;AACA,MACE,kBACA,CAAC,eAAe,IAAI,OAAO,QAAQ,MAAM,cAAc,GACvD;AACA,OAAG,OAAO,6CAA6C;AACvD,iBAAa,OAAO,QAAQ,IAAI;AAChC,WAAO;AAAA,EACT;AACA,SAAO;AACT;AAEA,SAAS,eACP,IACA,SACA,UACA;AACA,QAAM,KAAK,IAAI,SAAS,IAAI,OAAO;AACnC,MAAI;AACF,UAAM,EAAC,gBAAgB,UAAA,IAAa;AAAA,MAClC,IAAI,gBAAgB,EAAE;AAAA,IAAA;AAExB,QAAI,mBAAmB,SAAS,gBAAgB;AAC9C,SAAG;AAAA,QACD,yBAAyB,cAAc,kDAAkD,SAAS,cAAc;AAAA,QAChH;AAAA,MAAA;AAEF,aAAO;AAAA,IACT;AACA,QAAI,YAAY,SAAS,cAAc;AACrC,SAAG;AAAA,QACD,2BAA2B,SAAS,iDAAiD,SAAS,YAAY;AAAA,MAAA;AAE5G,aAAO;AAAA,IACT;AACA,OAAG;AAAA,MACD,4BAA4B,cAAc,kBAAkB,SAAS;AAAA,MACrE;AAAA,IAAA;AAEF,WAAO;AAAA,EACT,UAAA;AACE,OAAG,MAAA;AAAA,EACL;AACF;AAEO,SAAS,0BAA0B,QAAkC;AAC1E,QAAM,EAAC,YAAY,QAAO,cAAc,MAAM;AAC9C,SAAO,MAAM,YAAY,CAAC,WAAW,GAAG;AAAA,IACtC;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,EAAA,CACd;AACH;AAEA,SAAS,4BACP,IACA,QACA,cACyB;AACzB,QAAM,EAAC,SAAS,QAAQ,SAAS,OAAA,IAAU,SAAA;AAE3C,QAAM,iBAAkB;AACtB,UAAM,QAAQ,IAAI,gBAAA;AAClB,YAAQ,GAAG,UAAU,MAAM,MAAM,OAAO;AACxC,YAAQ,GAAG,WAAW,MAAM,MAAM,OAAO;AAEzC,aAAS,IAAI,KAAK,KAAK;AACrB,UAAI,MAAwB;AAC5B,UAAI;AACF,YAAI,WAAW;AACf,cAAM,SAAS,MAAM,gBAAgB,IAAI,MAAM;AAC/C,yBAAiB,OAAO,QAAQ;AAI9B,kBAAQ,IAAI,CAAC,CAAC;AACd,qBAAW;AAAA,QACb;AAGA,YAAI,UAAU;AACZ;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR;AACA,UAAI,CAAC,cAAc;AACjB,eAAO,OAAO,GAAG;AAAA,MACnB;AASA,SAAG;AAAA,QACD,uCAAuC,IAAI,CAAC;AAAA,QAC5C,OAAO,GAAG;AAAA,MAAA;AAEZ,UAAI;AACF,cAAM,MAAM,KAAM,MAAM,MAAM;AAAA,MAChC,SAAS,GAAG;AACV,eAAO,OAAO,CAAC;AAAA,MACjB;AAAA,IACF;AAAA,EACF,GAAA;AAEA,SAAO;AACT;AAEA,SAAS,gBACP,IACA,QACkC;AAClC,mBAAiB,MAAM;AACvB,QAAM,EAAC,QAAQ,QAAQ,eAAA,IAAkB;AACzC,QAAM,UAAU,eAAe,MAAM;AAErC,QAAM,uBAAuB,IAAI;AAAA,IAC/B;AAAA,IACA;AAAA,IACA,OAAO;AAAA,IACP,eAAe;AAAA,EAAA;AAGjB,SAAO,qBAAqB,gBAAgB,MAAM;AACpD;"}
@@ -135,12 +135,12 @@ export declare class PusherService implements Service, Pusher {
135
135
  afterPermissions?: string | undefined;
136
136
  vendedRowCounts?: Record<string, Record<string, number>> | undefined;
137
137
  vendedRows?: Record<string, Record<string, Readonly<Record<string, import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined>>[]>> | undefined;
138
- plans?: Record<string, string[]> | undefined;
138
+ sqlitePlans?: Record<string, string[]> | undefined;
139
139
  readRows?: Record<string, Record<string, Readonly<Record<string, import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined>>[]>> | undefined;
140
140
  readRowCountsByQuery?: Record<string, Record<string, number>> | undefined;
141
141
  readRowCount?: number | undefined;
142
142
  dbScansByQuery?: Record<string, Record<string, number>> | undefined;
143
- plannerEvents?: ({
143
+ joinPlans?: ({
144
144
  type: "attempt-start";
145
145
  attemptNumber: number;
146
146
  totalAttempts: number;
@@ -434,12 +434,12 @@ export declare class PusherService implements Service, Pusher {
434
434
  afterPermissions?: string | undefined;
435
435
  vendedRowCounts?: Record<string, Record<string, number>> | undefined;
436
436
  vendedRows?: Record<string, Record<string, Readonly<Record<string, import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined>>[]>> | undefined;
437
- plans?: Record<string, string[]> | undefined;
437
+ sqlitePlans?: Record<string, string[]> | undefined;
438
438
  readRows?: Record<string, Record<string, Readonly<Record<string, import("../../../../shared/src/json.ts").ReadonlyJSONValue | undefined>>[]>> | undefined;
439
439
  readRowCountsByQuery?: Record<string, Record<string, number>> | undefined;
440
440
  readRowCount?: number | undefined;
441
441
  dbScansByQuery?: Record<string, Record<string, number>> | undefined;
442
- plannerEvents?: ({
442
+ joinPlans?: ({
443
443
  type: "attempt-start";
444
444
  attemptNumber: number;
445
445
  totalAttempts: number;
@@ -1,4 +1,5 @@
1
1
  import type { LogContext } from '@rocicorp/logger';
2
+ import type { JSONObject } from '../../../../zero-events/src/json.ts';
2
3
  import type { ReplicationStage, ReplicationStatusEvent, Status } from '../../../../zero-events/src/status.ts';
3
4
  import type { Database } from '../../../../zqlite/src/db.ts';
4
5
  export declare class ReplicationStatusPublisher {
@@ -8,5 +9,6 @@ export declare class ReplicationStatusPublisher {
8
9
  publishAndThrowError(lc: LogContext, stage: ReplicationStage, e: unknown): Promise<never>;
9
10
  stop(): this;
10
11
  }
12
+ export declare function publishReplicationError(lc: LogContext, stage: ReplicationStage, description: string, errorDetails?: JSONObject, now?: Date): Promise<void>;
11
13
  export declare function replicationStatusEvent(lc: LogContext, db: Database, stage: ReplicationStage, status: Status, description?: string, now?: Date): ReplicationStatusEvent;
12
14
  //# sourceMappingURL=replication-status.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"replication-status.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAGV,gBAAgB,EAChB,sBAAsB,EACtB,MAAM,EACP,MAAM,uCAAuC,CAAC;AAC/C,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAY3D,qBAAa,0BAA0B;;gBAIzB,EAAE,EAAE,QAAQ;IAIxB,OAAO,CACL,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,CAAC,EAAE,MAAM,EACpB,QAAQ,SAAI,GACX,IAAI;IAgBD,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,CAAC,EAAE,OAAO,GACT,OAAO,CAAC,KAAK,CAAC;IAcjB,IAAI,IAAI,IAAI;CAIb;AAGD,wBAAgB,sBAAsB,CACpC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,QAAQ,EACZ,KAAK,EAAE,gBAAgB,EACvB,MAAM,EAAE,MAAM,EACd,WAAW,CAAC,EAAE,MAAM,EACpB,GAAG,OAAa,GACf,sBAAsB,CA+BxB"}
1
+ {"version":3,"file":"replication-status.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,qCAAqC,CAAC;AACpE,OAAO,KAAK,EAGV,gBAAgB,EAChB,sBAAsB,EACtB,MAAM,EACP,MAAM,uCAAuC,CAAC;AAC/C,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAY3D,qBAAa,0BAA0B;;gBAIzB,EAAE,EAAE,QAAQ;IAIxB,OAAO,CACL,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,CAAC,EAAE,MAAM,EACpB,QAAQ,SAAI,GACX,IAAI;IAgBD,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,CAAC,EAAE,OAAO,GACT,OAAO,CAAC,KAAK,CAAC;IAcjB,IAAI,IAAI,IAAI;CAIb;AAED,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,YAAY,CAAC,EAAE,UAAU,EACzB,GAAG,OAAa,iBAYjB;AAGD,wBAAgB,sBAAsB,CACpC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,QAAQ,EACZ,KAAK,EAAE,gBAAgB,EACvB,MAAM,EAAE,MAAM,EACd,WAAW,CAAC,EAAE,MAAM,EACpB,GAAG,OAAa,GACf,sBAAsB,CA+BxB"}
@@ -1,6 +1,6 @@
1
1
  import { createSilentLogContext } from "../../../../shared/src/logging-test-utils.js";
2
2
  import { listIndexes, computeZqlSpecs } from "../../db/lite-tables.js";
3
- import { publishEvent, makeErrorDetails, publishCriticalEvent } from "../../observability/events.js";
3
+ import { publishCriticalEvent, publishEvent, makeErrorDetails } from "../../observability/events.js";
4
4
  const byKeys = (a, b) => a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;
5
5
  class ReplicationStatusPublisher {
6
6
  #db;
@@ -40,6 +40,18 @@ class ReplicationStatusPublisher {
40
40
  return this;
41
41
  }
42
42
  }
43
+ async function publishReplicationError(lc, stage, description, errorDetails, now = /* @__PURE__ */ new Date()) {
44
+ const event = {
45
+ type: "zero/events/status/replication/v1",
46
+ component: "replication",
47
+ status: "ERROR",
48
+ stage,
49
+ description,
50
+ errorDetails,
51
+ time: now.toISOString()
52
+ };
53
+ await publishCriticalEvent(lc, event);
54
+ }
43
55
  function replicationStatusEvent(lc, db, stage, status, description, now = /* @__PURE__ */ new Date()) {
44
56
  try {
45
57
  return {
@@ -106,6 +118,7 @@ function getReplicaSize(db) {
106
118
  }
107
119
  export {
108
120
  ReplicationStatusPublisher,
121
+ publishReplicationError,
109
122
  replicationStatusEvent
110
123
  };
111
124
  //# sourceMappingURL=replication-status.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"replication-status.js","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {createSilentLogContext} from '../../../../shared/src/logging-test-utils.ts';\nimport type {\n ReplicatedIndex,\n ReplicatedTable,\n ReplicationStage,\n ReplicationStatusEvent,\n Status,\n} from '../../../../zero-events/src/status.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs, listIndexes} from '../../db/lite-tables.ts';\nimport type {LiteTableSpec} from '../../db/specs.ts';\nimport {\n makeErrorDetails,\n publishCriticalEvent,\n publishEvent,\n} from '../../observability/events.ts';\n\nconst byKeys = (a: [string, unknown], b: [string, unknown]) =>\n a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;\n\nexport class ReplicationStatusPublisher {\n readonly #db: Database;\n #timer: NodeJS.Timeout | undefined;\n\n constructor(db: Database) {\n this.#db = db;\n }\n\n publish(\n lc: LogContext,\n stage: ReplicationStage,\n description?: string,\n interval = 0,\n ): this {\n this.stop();\n publishEvent(\n lc,\n replicationStatusEvent(lc, this.#db, stage, 'OK', description),\n );\n\n if (interval) {\n this.#timer = setInterval(\n () => this.publish(lc, stage, description, interval),\n interval,\n );\n }\n return this;\n }\n\n async publishAndThrowError(\n lc: LogContext,\n stage: ReplicationStage,\n e: unknown,\n ): Promise<never> {\n this.stop();\n const event = replicationStatusEvent(\n lc,\n this.#db,\n stage,\n 'ERROR',\n String(e),\n );\n event.errorDetails = makeErrorDetails(e);\n await publishCriticalEvent(lc, event);\n throw e;\n }\n\n stop(): this {\n clearInterval(this.#timer);\n return this;\n }\n}\n\n// Exported for testing.\nexport function replicationStatusEvent(\n lc: LogContext,\n db: Database,\n stage: ReplicationStage,\n status: Status,\n description?: string,\n now = new Date(),\n): ReplicationStatusEvent {\n try {\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: getReplicatedTables(db),\n indexes: getReplicatedIndexes(db),\n replicaSize: getReplicaSize(db),\n },\n };\n } catch (e) {\n lc.warn?.(`Unable to create full ReplicationStatusEvent`, e);\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: [],\n indexes: [],\n replicaSize: 0,\n },\n };\n }\n}\n\nfunction getReplicatedTables(db: Database): ReplicatedTable[] {\n const fullTables = new Map<string, LiteTableSpec>();\n const clientSchema = computeZqlSpecs(\n createSilentLogContext(), // avoid logging warnings about indexes\n db,\n new Map(),\n fullTables,\n );\n\n return [...fullTables.entries()].sort(byKeys).map(([table, spec]) => ({\n table,\n columns: Object.entries(spec.columns)\n .sort(byKeys)\n .map(([column, spec]) => ({\n column,\n upstreamType: spec.dataType.split('|')[0],\n clientType: clientSchema.get(table)?.zqlSpec[column]?.type ?? null,\n })),\n }));\n}\n\nfunction getReplicatedIndexes(db: Database): ReplicatedIndex[] {\n return listIndexes(db).map(({tableName: table, columns, unique}) => ({\n table,\n unique,\n columns: Object.entries(columns)\n .sort(byKeys)\n .map(([column, dir]) => ({column, dir})),\n }));\n}\n\nfunction getReplicaSize(db: Database) {\n const [{page_count: pageCount}] = db.pragma<{page_count: number}>(\n 'page_count',\n );\n const [{page_size: pageSize}] = db.pragma<{page_size: number}>('page_size');\n return pageCount * pageSize;\n}\n"],"names":["spec"],"mappings":";;;AAkBA,MAAM,SAAS,CAAC,GAAsB,MACpC,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,KAAK,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,IAAI;AAEhC,MAAM,2BAA2B;AAAA,EAC7B;AAAA,EACT;AAAA,EAEA,YAAY,IAAc;AACxB,SAAK,MAAM;AAAA,EACb;AAAA,EAEA,QACE,IACA,OACA,aACA,WAAW,GACL;AACN,SAAK,KAAA;AACL;AAAA,MACE;AAAA,MACA,uBAAuB,IAAI,KAAK,KAAK,OAAO,MAAM,WAAW;AAAA,IAAA;AAG/D,QAAI,UAAU;AACZ,WAAK,SAAS;AAAA,QACZ,MAAM,KAAK,QAAQ,IAAI,OAAO,aAAa,QAAQ;AAAA,QACnD;AAAA,MAAA;AAAA,IAEJ;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,qBACJ,IACA,OACA,GACgB;AAChB,SAAK,KAAA;AACL,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,OAAO,CAAC;AAAA,IAAA;AAEV,UAAM,eAAe,iBAAiB,CAAC;AACvC,UAAM,qBAAqB,IAAI,KAAK;AACpC,UAAM;AAAA,EACR;AAAA,EAEA,OAAa;AACX,kBAAc,KAAK,MAAM;AACzB,WAAO;AAAA,EACT;AACF;AAGO,SAAS,uBACd,IACA,IACA,OACA,QACA,aACA,MAAM,oBAAI,QACc;AACxB,MAAI;AACF,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,oBAAoB,EAAE;AAAA,QAC9B,SAAS,qBAAqB,EAAE;AAAA,QAChC,aAAa,eAAe,EAAE;AAAA,MAAA;AAAA,IAChC;AAAA,EAEJ,SAAS,GAAG;AACV,OAAG,OAAO,gDAAgD,CAAC;AAC3D,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,CAAA;AAAA,QACR,SAAS,CAAA;AAAA,QACT,aAAa;AAAA,MAAA;AAAA,IACf;AAAA,EAEJ;AACF;AAEA,SAAS,oBAAoB,IAAiC;AAC5D,QAAM,iCAAiB,IAAA;AACvB,QAAM,eAAe;AAAA,IACnB,uBAAA;AAAA;AAAA,IACA;AAAA,wBACI,IAAA;AAAA,IACJ;AAAA,EAAA;AAGF,SAAO,CAAC,GAAG,WAAW,QAAA,CAAS,EAAE,KAAK,MAAM,EAAE,IAAI,CAAC,CAAC,OAAO,IAAI,OAAO;AAAA,IACpE;AAAA,IACA,SAAS,OAAO,QAAQ,KAAK,OAAO,EACjC,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQA,KAAI,OAAO;AAAA,MACxB;AAAA,MACA,cAAcA,MAAK,SAAS,MAAM,GAAG,EAAE,CAAC;AAAA,MACxC,YAAY,aAAa,IAAI,KAAK,GAAG,QAAQ,MAAM,GAAG,QAAQ;AAAA,IAAA,EAC9D;AAAA,EAAA,EACJ;AACJ;AAEA,SAAS,qBAAqB,IAAiC;AAC7D,SAAO,YAAY,EAAE,EAAE,IAAI,CAAC,EAAC,WAAW,OAAO,SAAS,cAAa;AAAA,IACnE;AAAA,IACA;AAAA,IACA,SAAS,OAAO,QAAQ,OAAO,EAC5B,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQ,GAAG,OAAO,EAAC,QAAQ,MAAK;AAAA,EAAA,EACzC;AACJ;AAEA,SAAS,eAAe,IAAc;AACpC,QAAM,CAAC,EAAC,YAAY,UAAA,CAAU,IAAI,GAAG;AAAA,IACnC;AAAA,EAAA;AAEF,QAAM,CAAC,EAAC,WAAW,SAAA,CAAS,IAAI,GAAG,OAA4B,WAAW;AAC1E,SAAO,YAAY;AACrB;"}
1
+ {"version":3,"file":"replication-status.js","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {createSilentLogContext} from '../../../../shared/src/logging-test-utils.ts';\nimport type {JSONObject} from '../../../../zero-events/src/json.ts';\nimport type {\n ReplicatedIndex,\n ReplicatedTable,\n ReplicationStage,\n ReplicationStatusEvent,\n Status,\n} from '../../../../zero-events/src/status.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs, listIndexes} from '../../db/lite-tables.ts';\nimport type {LiteTableSpec} from '../../db/specs.ts';\nimport {\n makeErrorDetails,\n publishCriticalEvent,\n publishEvent,\n} from '../../observability/events.ts';\n\nconst byKeys = (a: [string, unknown], b: [string, unknown]) =>\n a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;\n\nexport class ReplicationStatusPublisher {\n readonly #db: Database;\n #timer: NodeJS.Timeout | undefined;\n\n constructor(db: Database) {\n this.#db = db;\n }\n\n publish(\n lc: LogContext,\n stage: ReplicationStage,\n description?: string,\n interval = 0,\n ): this {\n this.stop();\n publishEvent(\n lc,\n replicationStatusEvent(lc, this.#db, stage, 'OK', description),\n );\n\n if (interval) {\n this.#timer = setInterval(\n () => this.publish(lc, stage, description, interval),\n interval,\n );\n }\n return this;\n }\n\n async publishAndThrowError(\n lc: LogContext,\n stage: ReplicationStage,\n e: unknown,\n ): Promise<never> {\n this.stop();\n const event = replicationStatusEvent(\n lc,\n this.#db,\n stage,\n 'ERROR',\n String(e),\n );\n event.errorDetails = makeErrorDetails(e);\n await publishCriticalEvent(lc, event);\n throw e;\n }\n\n stop(): this {\n clearInterval(this.#timer);\n return this;\n }\n}\n\nexport async function publishReplicationError(\n lc: LogContext,\n stage: ReplicationStage,\n description: string,\n errorDetails?: JSONObject,\n now = new Date(),\n) {\n const event: ReplicationStatusEvent = {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status: 'ERROR',\n stage,\n description,\n errorDetails,\n time: now.toISOString(),\n };\n await publishCriticalEvent(lc, event);\n}\n\n// Exported for testing.\nexport function replicationStatusEvent(\n lc: LogContext,\n db: Database,\n stage: ReplicationStage,\n status: Status,\n description?: string,\n now = new Date(),\n): ReplicationStatusEvent {\n try {\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: getReplicatedTables(db),\n indexes: getReplicatedIndexes(db),\n replicaSize: getReplicaSize(db),\n },\n };\n } catch (e) {\n lc.warn?.(`Unable to create full ReplicationStatusEvent`, e);\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: [],\n indexes: [],\n replicaSize: 0,\n },\n };\n }\n}\n\nfunction getReplicatedTables(db: Database): ReplicatedTable[] {\n const fullTables = new Map<string, LiteTableSpec>();\n const clientSchema = computeZqlSpecs(\n createSilentLogContext(), // avoid logging warnings about indexes\n db,\n new Map(),\n fullTables,\n );\n\n return [...fullTables.entries()].sort(byKeys).map(([table, spec]) => ({\n table,\n columns: Object.entries(spec.columns)\n .sort(byKeys)\n .map(([column, spec]) => ({\n column,\n upstreamType: spec.dataType.split('|')[0],\n clientType: clientSchema.get(table)?.zqlSpec[column]?.type ?? null,\n })),\n }));\n}\n\nfunction getReplicatedIndexes(db: Database): ReplicatedIndex[] {\n return listIndexes(db).map(({tableName: table, columns, unique}) => ({\n table,\n unique,\n columns: Object.entries(columns)\n .sort(byKeys)\n .map(([column, dir]) => ({column, dir})),\n }));\n}\n\nfunction getReplicaSize(db: Database) {\n const [{page_count: pageCount}] = db.pragma<{page_count: number}>(\n 'page_count',\n );\n const [{page_size: pageSize}] = db.pragma<{page_size: number}>('page_size');\n return pageCount * pageSize;\n}\n"],"names":["spec"],"mappings":";;;AAmBA,MAAM,SAAS,CAAC,GAAsB,MACpC,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,KAAK,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,IAAI;AAEhC,MAAM,2BAA2B;AAAA,EAC7B;AAAA,EACT;AAAA,EAEA,YAAY,IAAc;AACxB,SAAK,MAAM;AAAA,EACb;AAAA,EAEA,QACE,IACA,OACA,aACA,WAAW,GACL;AACN,SAAK,KAAA;AACL;AAAA,MACE;AAAA,MACA,uBAAuB,IAAI,KAAK,KAAK,OAAO,MAAM,WAAW;AAAA,IAAA;AAG/D,QAAI,UAAU;AACZ,WAAK,SAAS;AAAA,QACZ,MAAM,KAAK,QAAQ,IAAI,OAAO,aAAa,QAAQ;AAAA,QACnD;AAAA,MAAA;AAAA,IAEJ;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,qBACJ,IACA,OACA,GACgB;AAChB,SAAK,KAAA;AACL,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,OAAO,CAAC;AAAA,IAAA;AAEV,UAAM,eAAe,iBAAiB,CAAC;AACvC,UAAM,qBAAqB,IAAI,KAAK;AACpC,UAAM;AAAA,EACR;AAAA,EAEA,OAAa;AACX,kBAAc,KAAK,MAAM;AACzB,WAAO;AAAA,EACT;AACF;AAEA,eAAsB,wBACpB,IACA,OACA,aACA,cACA,MAAM,oBAAI,QACV;AACA,QAAM,QAAgC;AAAA,IACpC,MAAM;AAAA,IACN,WAAW;AAAA,IACX,QAAQ;AAAA,IACR;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,IAAI,YAAA;AAAA,EAAY;AAExB,QAAM,qBAAqB,IAAI,KAAK;AACtC;AAGO,SAAS,uBACd,IACA,IACA,OACA,QACA,aACA,MAAM,oBAAI,QACc;AACxB,MAAI;AACF,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,oBAAoB,EAAE;AAAA,QAC9B,SAAS,qBAAqB,EAAE;AAAA,QAChC,aAAa,eAAe,EAAE;AAAA,MAAA;AAAA,IAChC;AAAA,EAEJ,SAAS,GAAG;AACV,OAAG,OAAO,gDAAgD,CAAC;AAC3D,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,CAAA;AAAA,QACR,SAAS,CAAA;AAAA,QACT,aAAa;AAAA,MAAA;AAAA,IACf;AAAA,EAEJ;AACF;AAEA,SAAS,oBAAoB,IAAiC;AAC5D,QAAM,iCAAiB,IAAA;AACvB,QAAM,eAAe;AAAA,IACnB,uBAAA;AAAA;AAAA,IACA;AAAA,wBACI,IAAA;AAAA,IACJ;AAAA,EAAA;AAGF,SAAO,CAAC,GAAG,WAAW,QAAA,CAAS,EAAE,KAAK,MAAM,EAAE,IAAI,CAAC,CAAC,OAAO,IAAI,OAAO;AAAA,IACpE;AAAA,IACA,SAAS,OAAO,QAAQ,KAAK,OAAO,EACjC,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQA,KAAI,OAAO;AAAA,MACxB;AAAA,MACA,cAAcA,MAAK,SAAS,MAAM,GAAG,EAAE,CAAC;AAAA,MACxC,YAAY,aAAa,IAAI,KAAK,GAAG,QAAQ,MAAM,GAAG,QAAQ;AAAA,IAAA,EAC9D;AAAA,EAAA,EACJ;AACJ;AAEA,SAAS,qBAAqB,IAAiC;AAC7D,SAAO,YAAY,EAAE,EAAE,IAAI,CAAC,EAAC,WAAW,OAAO,SAAS,cAAa;AAAA,IACnE;AAAA,IACA;AAAA,IACA,SAAS,OAAO,QAAQ,OAAO,EAC5B,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQ,GAAG,OAAO,EAAC,QAAQ,MAAK;AAAA,EAAA,EACzC;AACJ;AAEA,SAAS,eAAe,IAAc;AACpC,QAAM,CAAC,EAAC,YAAY,UAAA,CAAU,IAAI,GAAG;AAAA,IACnC;AAAA,EAAA;AAEF,QAAM,CAAC,EAAC,WAAW,SAAA,CAAS,IAAI,GAAG,OAA4B,WAAW;AAC1E,SAAO,YAAY;AACrB;"}
@@ -23,5 +23,5 @@ export type RunAstOptions = {
23
23
  tableSpecs: Map<string, LiteAndZqlSpec>;
24
24
  vendedRows?: boolean | undefined;
25
25
  };
26
- export declare function runAst(lc: LogContext, clientSchema: ClientSchema, ast: AST, isTransformed: boolean, options: RunAstOptions): Promise<AnalyzeQueryResult>;
26
+ export declare function runAst(lc: LogContext, clientSchema: ClientSchema, ast: AST, isTransformed: boolean, options: RunAstOptions, yieldProcess: () => Promise<void>): Promise<AnalyzeQueryResult>;
27
27
  //# sourceMappingURL=run-ast.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"run-ast.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAQjD,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,oDAAoD,CAAC;AAC3F,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,mCAAmC,CAAC;AAI3D,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,kDAAkD,CAAC;AACxF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yCAAyC,CAAC;AACxE,OAAO,EAEL,KAAK,eAAe,EACrB,MAAM,qCAAqC,CAAC;AAC7C,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,2CAA2C,CAAC;AAC5E,OAAO,KAAK,EAAC,mBAAmB,EAAC,MAAM,gDAAgD,CAAC;AACxF,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AAExD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gBAAgB,CAAC;AAEnD,OAAO,KAAK,EAAC,SAAS,EAAC,MAAM,8BAA8B,CAAC;AAC5D,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6CAA6C,CAAC;AAE9E,MAAM,MAAM,aAAa,GAAG;IAC1B,gBAAgB,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACvC,QAAQ,CAAC,EAAE,SAAS,GAAG,SAAS,CAAC;IACjC,oBAAoB,CAAC,EAAE,UAAU,GAAG,SAAS,CAAC;IAC9C,SAAS,CAAC,EAAE,mBAAmB,GAAG,SAAS,CAAC;IAC5C,EAAE,EAAE,QAAQ,CAAC;IACb,IAAI,EAAE,eAAe,CAAC;IACtB,WAAW,CAAC,EAAE,iBAAiB,GAAG,SAAS,CAAC;IAC5C,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACjC,UAAU,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAClC,CAAC;AAEF,wBAAsB,MAAM,CAC1B,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,YAAY,EAC1B,GAAG,EAAE,GAAG,EACR,aAAa,EAAE,OAAO,EACtB,OAAO,EAAE,aAAa,GACrB,OAAO,CAAC,kBAAkB,CAAC,CAsG7B"}
1
+ {"version":3,"file":"run-ast.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAQjD,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,oDAAoD,CAAC;AAC3F,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,mCAAmC,CAAC;AAI3D,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,kDAAkD,CAAC;AACxF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yCAAyC,CAAC;AACxE,OAAO,EAEL,KAAK,eAAe,EACrB,MAAM,qCAAqC,CAAC;AAC7C,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,2CAA2C,CAAC;AAC5E,OAAO,KAAK,EAAC,mBAAmB,EAAC,MAAM,gDAAgD,CAAC;AACxF,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AAExD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gBAAgB,CAAC;AAEnD,OAAO,KAAK,EAAC,SAAS,EAAC,MAAM,8BAA8B,CAAC;AAC5D,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6CAA6C,CAAC;AAE9E,MAAM,MAAM,aAAa,GAAG;IAC1B,gBAAgB,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACvC,QAAQ,CAAC,EAAE,SAAS,GAAG,SAAS,CAAC;IACjC,oBAAoB,CAAC,EAAE,UAAU,GAAG,SAAS,CAAC;IAC9C,SAAS,CAAC,EAAE,mBAAmB,GAAG,SAAS,CAAC;IAC5C,EAAE,EAAE,QAAQ,CAAC;IACb,IAAI,EAAE,eAAe,CAAC;IACtB,WAAW,CAAC,EAAE,iBAAiB,GAAG,SAAS,CAAC;IAC5C,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACjC,UAAU,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAClC,CAAC;AAEF,wBAAsB,MAAM,CAC1B,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,YAAY,EAC1B,GAAG,EAAE,GAAG,EACR,aAAa,EAAE,OAAO,EACtB,OAAO,EAAE,aAAa,EACtB,YAAY,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAChC,OAAO,CAAC,kBAAkB,CAAC,CA0G7B"}