@rocicorp/zero 0.26.0-canary.2 → 0.26.0-canary.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (333) hide show
  1. package/README.md +1 -1
  2. package/out/replicache/src/persist/collect-idb-databases.d.ts +4 -4
  3. package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
  4. package/out/replicache/src/persist/collect-idb-databases.js +22 -19
  5. package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
  6. package/out/replicache/src/persist/refresh.d.ts.map +1 -1
  7. package/out/replicache/src/persist/refresh.js +0 -8
  8. package/out/replicache/src/persist/refresh.js.map +1 -1
  9. package/out/replicache/src/process-scheduler.d.ts +23 -0
  10. package/out/replicache/src/process-scheduler.d.ts.map +1 -1
  11. package/out/replicache/src/process-scheduler.js +50 -1
  12. package/out/replicache/src/process-scheduler.js.map +1 -1
  13. package/out/replicache/src/replicache-impl.d.ts +8 -0
  14. package/out/replicache/src/replicache-impl.d.ts.map +1 -1
  15. package/out/replicache/src/replicache-impl.js +11 -2
  16. package/out/replicache/src/replicache-impl.js.map +1 -1
  17. package/out/shared/src/falsy.d.ts +3 -0
  18. package/out/shared/src/falsy.d.ts.map +1 -0
  19. package/out/zero/package.json.js +1 -1
  20. package/out/zero/src/adapters/drizzle.js +1 -2
  21. package/out/zero/src/adapters/prisma.d.ts +2 -0
  22. package/out/zero/src/adapters/prisma.d.ts.map +1 -0
  23. package/out/zero/src/adapters/prisma.js +6 -0
  24. package/out/zero/src/adapters/prisma.js.map +1 -0
  25. package/out/zero/src/pg.js +4 -7
  26. package/out/zero/src/react.js +3 -1
  27. package/out/zero/src/react.js.map +1 -1
  28. package/out/zero/src/server.js +5 -8
  29. package/out/zero-cache/src/auth/load-permissions.d.ts +3 -2
  30. package/out/zero-cache/src/auth/load-permissions.d.ts.map +1 -1
  31. package/out/zero-cache/src/auth/load-permissions.js +14 -8
  32. package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
  33. package/out/zero-cache/src/auth/write-authorizer.d.ts +6 -0
  34. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  35. package/out/zero-cache/src/auth/write-authorizer.js +16 -3
  36. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  37. package/out/zero-cache/src/config/zero-config.d.ts +44 -8
  38. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  39. package/out/zero-cache/src/config/zero-config.js +53 -13
  40. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  41. package/out/zero-cache/src/custom/fetch.d.ts +3 -0
  42. package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
  43. package/out/zero-cache/src/custom/fetch.js +26 -0
  44. package/out/zero-cache/src/custom/fetch.js.map +1 -1
  45. package/out/zero-cache/src/db/lite-tables.js +1 -1
  46. package/out/zero-cache/src/db/lite-tables.js.map +1 -1
  47. package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
  48. package/out/zero-cache/src/db/migration-lite.js +9 -3
  49. package/out/zero-cache/src/db/migration-lite.js.map +1 -1
  50. package/out/zero-cache/src/db/migration.d.ts.map +1 -1
  51. package/out/zero-cache/src/db/migration.js +9 -3
  52. package/out/zero-cache/src/db/migration.js.map +1 -1
  53. package/out/zero-cache/src/db/specs.d.ts +4 -3
  54. package/out/zero-cache/src/db/specs.d.ts.map +1 -1
  55. package/out/zero-cache/src/db/specs.js +4 -1
  56. package/out/zero-cache/src/db/specs.js.map +1 -1
  57. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  58. package/out/zero-cache/src/db/transaction-pool.js +9 -3
  59. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  60. package/out/zero-cache/src/server/inspector-delegate.d.ts +1 -1
  61. package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
  62. package/out/zero-cache/src/server/inspector-delegate.js +11 -30
  63. package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
  64. package/out/zero-cache/src/server/main.js +1 -1
  65. package/out/zero-cache/src/server/main.js.map +1 -1
  66. package/out/zero-cache/src/server/priority-op.d.ts +8 -0
  67. package/out/zero-cache/src/server/priority-op.d.ts.map +1 -0
  68. package/out/zero-cache/src/server/priority-op.js +29 -0
  69. package/out/zero-cache/src/server/priority-op.js.map +1 -0
  70. package/out/zero-cache/src/server/syncer.d.ts +0 -1
  71. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  72. package/out/zero-cache/src/server/syncer.js +3 -21
  73. package/out/zero-cache/src/server/syncer.js.map +1 -1
  74. package/out/zero-cache/src/services/analyze.js +1 -1
  75. package/out/zero-cache/src/services/analyze.js.map +1 -1
  76. package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
  77. package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -3
  78. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  79. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  80. package/out/zero-cache/src/services/change-source/pg/change-source.js +68 -13
  81. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  82. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  83. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +7 -2
  84. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  85. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.d.ts.map +1 -1
  86. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +7 -4
  87. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js.map +1 -1
  88. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +125 -180
  89. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  90. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  91. package/out/zero-cache/src/services/change-source/pg/schema/init.js +18 -10
  92. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  93. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +36 -90
  94. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
  95. package/out/zero-cache/src/services/change-source/pg/schema/published.js +51 -14
  96. package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
  97. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts +31 -36
  98. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
  99. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +24 -3
  100. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  101. package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts +2 -2
  102. package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts.map +1 -1
  103. package/out/zero-cache/src/services/change-source/pg/schema/validation.js +2 -4
  104. package/out/zero-cache/src/services/change-source/pg/schema/validation.js.map +1 -1
  105. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +158 -53
  106. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
  107. package/out/zero-cache/src/services/change-source/protocol/current/data.js +55 -10
  108. package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
  109. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +210 -72
  110. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
  111. package/out/zero-cache/src/services/change-source/protocol/current.js +4 -2
  112. package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
  113. package/out/zero-cache/src/services/change-source/replica-schema.js +19 -10
  114. package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
  115. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +1 -1
  116. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  117. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +71 -25
  118. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
  119. package/out/zero-cache/src/services/change-streamer/change-streamer.js +1 -1
  120. package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
  121. package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts +1 -0
  122. package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts.map +1 -1
  123. package/out/zero-cache/src/services/change-streamer/schema/tables.js +6 -5
  124. package/out/zero-cache/src/services/change-streamer/schema/tables.js.map +1 -1
  125. package/out/zero-cache/src/services/change-streamer/storer.js +1 -1
  126. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  127. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +2 -0
  128. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
  129. package/out/zero-cache/src/services/change-streamer/subscriber.js +14 -1
  130. package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
  131. package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
  132. package/out/zero-cache/src/services/heapz.js +1 -0
  133. package/out/zero-cache/src/services/heapz.js.map +1 -1
  134. package/out/zero-cache/src/services/mutagen/error.d.ts.map +1 -1
  135. package/out/zero-cache/src/services/mutagen/error.js +4 -1
  136. package/out/zero-cache/src/services/mutagen/error.js.map +1 -1
  137. package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
  138. package/out/zero-cache/src/services/mutagen/mutagen.js +1 -0
  139. package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
  140. package/out/zero-cache/src/services/mutagen/pusher.d.ts +7 -4
  141. package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
  142. package/out/zero-cache/src/services/mutagen/pusher.js +80 -8
  143. package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
  144. package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
  145. package/out/zero-cache/src/services/replicator/change-processor.js +21 -29
  146. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  147. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +1 -2
  148. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
  149. package/out/zero-cache/src/services/replicator/schema/change-log.js +2 -5
  150. package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
  151. package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.d.ts +3 -3
  152. package/out/zero-cache/src/services/replicator/schema/column-metadata.d.ts.map +1 -0
  153. package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.js +3 -3
  154. package/out/zero-cache/src/services/replicator/schema/column-metadata.js.map +1 -0
  155. package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
  156. package/out/zero-cache/src/services/replicator/schema/replication-state.js +3 -1
  157. package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
  158. package/out/zero-cache/src/services/run-ast.js +1 -1
  159. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  160. package/out/zero-cache/src/services/statz.d.ts.map +1 -1
  161. package/out/zero-cache/src/services/statz.js +1 -0
  162. package/out/zero-cache/src/services/statz.js.map +1 -1
  163. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -1
  164. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
  165. package/out/zero-cache/src/services/view-syncer/cvr-store.js +59 -40
  166. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  167. package/out/zero-cache/src/services/view-syncer/cvr.d.ts +0 -1
  168. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  169. package/out/zero-cache/src/services/view-syncer/cvr.js +23 -6
  170. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  171. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +13 -14
  172. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  173. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +44 -56
  174. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  175. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts +1 -1
  176. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
  177. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +22 -11
  178. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  179. package/out/zero-cache/src/services/view-syncer/snapshotter.js +1 -1
  180. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  181. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +6 -3
  182. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  183. package/out/zero-cache/src/services/view-syncer/view-syncer.js +192 -217
  184. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  185. package/out/zero-cache/src/types/lexi-version.d.ts.map +1 -1
  186. package/out/zero-cache/src/types/lexi-version.js +4 -1
  187. package/out/zero-cache/src/types/lexi-version.js.map +1 -1
  188. package/out/zero-cache/src/types/lite.d.ts.map +1 -1
  189. package/out/zero-cache/src/types/lite.js +8 -2
  190. package/out/zero-cache/src/types/lite.js.map +1 -1
  191. package/out/zero-cache/src/types/shards.js +1 -1
  192. package/out/zero-cache/src/types/shards.js.map +1 -1
  193. package/out/zero-cache/src/types/sql.d.ts +5 -0
  194. package/out/zero-cache/src/types/sql.d.ts.map +1 -1
  195. package/out/zero-cache/src/types/sql.js +5 -1
  196. package/out/zero-cache/src/types/sql.js.map +1 -1
  197. package/out/zero-cache/src/types/subscription.js +1 -1
  198. package/out/zero-cache/src/types/subscription.js.map +1 -1
  199. package/out/zero-cache/src/workers/connect-params.d.ts +1 -0
  200. package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
  201. package/out/zero-cache/src/workers/connect-params.js +2 -1
  202. package/out/zero-cache/src/workers/connect-params.js.map +1 -1
  203. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  204. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +14 -6
  205. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  206. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  207. package/out/zero-cache/src/workers/syncer.js +17 -10
  208. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  209. package/out/zero-client/src/client/connection-manager.d.ts +8 -0
  210. package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
  211. package/out/zero-client/src/client/connection-manager.js +33 -0
  212. package/out/zero-client/src/client/connection-manager.js.map +1 -1
  213. package/out/zero-client/src/client/connection.d.ts.map +1 -1
  214. package/out/zero-client/src/client/connection.js +6 -3
  215. package/out/zero-client/src/client/connection.js.map +1 -1
  216. package/out/zero-client/src/client/error.js +1 -1
  217. package/out/zero-client/src/client/error.js.map +1 -1
  218. package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
  219. package/out/zero-client/src/client/mutator-proxy.js +15 -1
  220. package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
  221. package/out/zero-client/src/client/options.d.ts +10 -0
  222. package/out/zero-client/src/client/options.d.ts.map +1 -1
  223. package/out/zero-client/src/client/options.js.map +1 -1
  224. package/out/zero-client/src/client/query-manager.d.ts +4 -0
  225. package/out/zero-client/src/client/query-manager.d.ts.map +1 -1
  226. package/out/zero-client/src/client/query-manager.js +7 -0
  227. package/out/zero-client/src/client/query-manager.js.map +1 -1
  228. package/out/zero-client/src/client/version.js +1 -1
  229. package/out/zero-client/src/client/zero.d.ts +3 -1
  230. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  231. package/out/zero-client/src/client/zero.js +52 -7
  232. package/out/zero-client/src/client/zero.js.map +1 -1
  233. package/out/zero-client/src/mod.d.ts +1 -0
  234. package/out/zero-client/src/mod.d.ts.map +1 -1
  235. package/out/zero-protocol/src/connect.d.ts +4 -0
  236. package/out/zero-protocol/src/connect.d.ts.map +1 -1
  237. package/out/zero-protocol/src/connect.js +3 -1
  238. package/out/zero-protocol/src/connect.js.map +1 -1
  239. package/out/zero-protocol/src/protocol-version.d.ts +1 -1
  240. package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
  241. package/out/zero-protocol/src/protocol-version.js +1 -1
  242. package/out/zero-protocol/src/protocol-version.js.map +1 -1
  243. package/out/zero-protocol/src/push.d.ts +11 -2
  244. package/out/zero-protocol/src/push.d.ts.map +1 -1
  245. package/out/zero-protocol/src/push.js +22 -6
  246. package/out/zero-protocol/src/push.js.map +1 -1
  247. package/out/zero-protocol/src/up.d.ts +2 -0
  248. package/out/zero-protocol/src/up.d.ts.map +1 -1
  249. package/out/zero-react/src/mod.d.ts +3 -1
  250. package/out/zero-react/src/mod.d.ts.map +1 -1
  251. package/out/zero-react/src/paging-reducer.d.ts +61 -0
  252. package/out/zero-react/src/paging-reducer.d.ts.map +1 -0
  253. package/out/zero-react/src/paging-reducer.js +77 -0
  254. package/out/zero-react/src/paging-reducer.js.map +1 -0
  255. package/out/zero-react/src/use-query.d.ts +11 -1
  256. package/out/zero-react/src/use-query.d.ts.map +1 -1
  257. package/out/zero-react/src/use-query.js +13 -11
  258. package/out/zero-react/src/use-query.js.map +1 -1
  259. package/out/zero-react/src/use-rows.d.ts +39 -0
  260. package/out/zero-react/src/use-rows.d.ts.map +1 -0
  261. package/out/zero-react/src/use-rows.js +130 -0
  262. package/out/zero-react/src/use-rows.js.map +1 -0
  263. package/out/zero-react/src/use-zero-virtualizer.d.ts +122 -0
  264. package/out/zero-react/src/use-zero-virtualizer.d.ts.map +1 -0
  265. package/out/zero-react/src/use-zero-virtualizer.js +342 -0
  266. package/out/zero-react/src/use-zero-virtualizer.js.map +1 -0
  267. package/out/zero-react/src/zero-provider.js +1 -1
  268. package/out/zero-react/src/zero-provider.js.map +1 -1
  269. package/out/zero-server/src/adapters/drizzle.d.ts +18 -18
  270. package/out/zero-server/src/adapters/drizzle.d.ts.map +1 -1
  271. package/out/zero-server/src/adapters/drizzle.js +8 -22
  272. package/out/zero-server/src/adapters/drizzle.js.map +1 -1
  273. package/out/zero-server/src/adapters/pg.d.ts +19 -13
  274. package/out/zero-server/src/adapters/pg.d.ts.map +1 -1
  275. package/out/zero-server/src/adapters/pg.js.map +1 -1
  276. package/out/zero-server/src/adapters/postgresjs.d.ts +19 -13
  277. package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
  278. package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
  279. package/out/zero-server/src/adapters/prisma.d.ts +66 -0
  280. package/out/zero-server/src/adapters/prisma.d.ts.map +1 -0
  281. package/out/zero-server/src/adapters/prisma.js +63 -0
  282. package/out/zero-server/src/adapters/prisma.js.map +1 -0
  283. package/out/zero-server/src/custom.js +1 -15
  284. package/out/zero-server/src/custom.js.map +1 -1
  285. package/out/zero-server/src/mod.d.ts +9 -8
  286. package/out/zero-server/src/mod.d.ts.map +1 -1
  287. package/out/zero-server/src/process-mutations.d.ts +2 -2
  288. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  289. package/out/zero-server/src/process-mutations.js +4 -8
  290. package/out/zero-server/src/process-mutations.js.map +1 -1
  291. package/out/zero-server/src/push-processor.js +1 -1
  292. package/out/zero-server/src/push-processor.js.map +1 -1
  293. package/out/zero-server/src/schema.d.ts.map +1 -1
  294. package/out/zero-server/src/schema.js +4 -1
  295. package/out/zero-server/src/schema.js.map +1 -1
  296. package/out/zero-server/src/zql-database.d.ts.map +1 -1
  297. package/out/zero-server/src/zql-database.js +17 -8
  298. package/out/zero-server/src/zql-database.js.map +1 -1
  299. package/out/zero-solid/src/mod.d.ts +1 -1
  300. package/out/zero-solid/src/mod.d.ts.map +1 -1
  301. package/out/zero-solid/src/use-query.d.ts +10 -1
  302. package/out/zero-solid/src/use-query.d.ts.map +1 -1
  303. package/out/zero-solid/src/use-query.js +21 -5
  304. package/out/zero-solid/src/use-query.js.map +1 -1
  305. package/out/zero-solid/src/use-zero.js +1 -1
  306. package/out/zero-solid/src/use-zero.js.map +1 -1
  307. package/out/zql/src/ivm/constraint.d.ts.map +1 -1
  308. package/out/zql/src/ivm/constraint.js +4 -1
  309. package/out/zql/src/ivm/constraint.js.map +1 -1
  310. package/out/zql/src/ivm/exists.d.ts.map +1 -1
  311. package/out/zql/src/ivm/exists.js +4 -1
  312. package/out/zql/src/ivm/exists.js.map +1 -1
  313. package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
  314. package/out/zql/src/ivm/join-utils.js +8 -2
  315. package/out/zql/src/ivm/join-utils.js.map +1 -1
  316. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  317. package/out/zql/src/ivm/memory-source.js +12 -3
  318. package/out/zql/src/ivm/memory-source.js.map +1 -1
  319. package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
  320. package/out/zql/src/ivm/push-accumulated.js +25 -2
  321. package/out/zql/src/ivm/push-accumulated.js.map +1 -1
  322. package/out/zql/src/ivm/take.d.ts.map +1 -1
  323. package/out/zql/src/ivm/take.js +24 -6
  324. package/out/zql/src/ivm/take.js.map +1 -1
  325. package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
  326. package/out/zql/src/ivm/union-fan-in.js +12 -3
  327. package/out/zql/src/ivm/union-fan-in.js.map +1 -1
  328. package/out/zqlite/src/table-source.d.ts.map +1 -1
  329. package/out/zqlite/src/table-source.js +1 -2
  330. package/out/zqlite/src/table-source.js.map +1 -1
  331. package/package.json +6 -2
  332. package/out/zero-cache/src/services/change-source/column-metadata.d.ts.map +0 -1
  333. package/out/zero-cache/src/services/change-source/column-metadata.js.map +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {promiseVoid} from '../../../../../shared/src/resolved-promises.ts';\nimport {\n equals,\n intersection,\n symmetricDifferences,\n} from '../../../../../shared/src/set-utils.ts';\nimport {sleep} from '../../../../../shared/src/sleep.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {mapPostgresToLiteColumn} from '../../../db/pg-to-lite.ts';\nimport type {\n ColumnSpec,\n PublishedTableSpec,\n TableSpec,\n} from '../../../db/specs.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport {\n oneAfter,\n versionFromLexi,\n versionToLexi,\n type LexiVersion,\n} from '../../../types/lexi-version.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {\n upstreamSchema,\n type ShardConfig,\n type ShardID,\n} from '../../../types/shards.ts';\nimport type {Sink} from '../../../types/streams.ts';\nimport {Subscription, type PendingResult} from '../../../types/subscription.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../../change-streamer/change-streamer-service.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {\n getSubscriptionState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport type {JSONObject} from '../protocol/current.ts';\nimport type {\n DataChange,\n Identifier,\n MessageRelation,\n} from '../protocol/current/data.ts';\nimport type {\n ChangeStreamData,\n ChangeStreamMessage,\n Data,\n} from '../protocol/current/downstream.ts';\nimport {type InitialSyncOptions} from './initial-sync.ts';\nimport type {\n Message,\n MessageMessage,\n MessageRelation as PostgresRelation,\n} from './logical-replication/pgoutput.types.ts';\nimport {subscribe} from './logical-replication/stream.ts';\nimport {fromBigInt, toLexiVersion, type LSN} from './lsn.ts';\nimport {replicationEventSchema, type DdlUpdateEvent} from './schema/ddl.ts';\nimport {updateShardSchema} from './schema/init.ts';\nimport {getPublicationInfo, type PublishedSchema} from './schema/published.ts';\nimport {\n dropShard,\n getInternalShardConfig,\n getReplicaAtVersion,\n internalPublicationPrefix,\n legacyReplicationSlot,\n replicaIdentitiesForTablesWithoutPrimaryKeys,\n replicationSlotExpression,\n type InternalShardConfig,\n type Replica,\n} from './schema/shard.ts';\nimport {validate} from './schema/validation.ts';\nimport {initSyncSchema} from './sync-schema.ts';\n\n/**\n * Initializes a Postgres change source, including the initial sync of the\n * replica, before streaming changes from the corresponding logical replication\n * stream.\n */\nexport async function initializePostgresChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n syncOptions: InitialSyncOptions,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initSyncSchema(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n shard,\n replicaDbFile,\n upstreamURI,\n syncOptions,\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n // Check that upstream is properly setup, and throw an AutoReset to re-run\n // initial sync if not.\n const db = pgClient(lc, upstreamURI);\n try {\n const upstreamReplica = await checkAndUpdateUpstream(\n lc,\n db,\n shard,\n subscriptionState,\n );\n\n const changeSource = new PostgresChangeSource(\n lc,\n upstreamURI,\n shard,\n upstreamReplica,\n );\n\n return {subscriptionState, changeSource};\n } finally {\n await db.end();\n }\n}\n\nasync function checkAndUpdateUpstream(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n {replicaVersion, publications: subscribed}: SubscriptionState,\n) {\n // Perform any shard schema updates\n await updateShardSchema(lc, sql, shard, replicaVersion);\n\n const upstreamReplica = await getReplicaAtVersion(\n lc,\n sql,\n shard,\n replicaVersion,\n );\n if (!upstreamReplica) {\n throw new AutoResetSignal(\n `No replication slot for replica at version ${replicaVersion}`,\n );\n }\n\n // Verify that the publications match what is being replicated.\n const requested = [...shard.publications].sort();\n const replicated = upstreamReplica.publications\n .filter(p => !p.startsWith(internalPublicationPrefix(shard)))\n .sort();\n if (!deepEqual(requested, replicated)) {\n lc.warn?.(`Dropping shard to change publications to: [${requested}]`);\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n throw new AutoResetSignal(\n `Requested publications [${requested}] do not match configured ` +\n `publications: [${replicated}]`,\n );\n }\n\n // Sanity check: The subscription state on the replica should have the\n // same publications. This should be guaranteed by the equivalence of the\n // replicaVersion, but it doesn't hurt to verify.\n if (!deepEqual(upstreamReplica.publications, subscribed)) {\n throw new AutoResetSignal(\n `Upstream publications [${upstreamReplica.publications}] do not ` +\n `match subscribed publications [${subscribed}]`,\n );\n }\n\n // Verify that the publications exist.\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(subscribed)};\n `.values();\n if (exists.length !== subscribed.length) {\n throw new AutoResetSignal(\n `Upstream publications [${exists.flat()}] do not contain ` +\n `all subscribed publications [${subscribed}]`,\n );\n }\n\n const {slot} = upstreamReplica;\n const result = await sql<\n {restartLSN: LSN | null; walStatus: string | null}[]\n > /*sql*/ `\n SELECT restart_lsn as \"restartLSN\", wal_status as \"walStatus\" FROM pg_replication_slots\n WHERE slot_name = ${slot}`;\n if (result.length === 0) {\n throw new AutoResetSignal(`replication slot ${slot} is missing`);\n }\n const [{restartLSN, walStatus}] = result;\n if (restartLSN === null || walStatus === 'lost') {\n throw new AutoResetSignal(\n `replication slot ${slot} has been invalidated for exceeding the max_slot_wal_keep_size`,\n );\n }\n return upstreamReplica;\n}\n\n/**\n * Postgres implementation of a {@link ChangeSource} backed by a logical\n * replication stream.\n */\nclass PostgresChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replica: Replica;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replica: Replica,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replica = replica;\n }\n\n async startStream(clientWatermark: string): Promise<ChangeStream> {\n const db = pgClient(this.#lc, this.#upstreamUri, {}, 'json-as-string');\n const {slot} = this.#replica;\n\n let cleanup = promiseVoid;\n try {\n ({cleanup} = await this.#stopExistingReplicationSlotSubscribers(\n db,\n slot,\n ));\n const config = await getInternalShardConfig(db, this.#shard);\n this.#lc.info?.(`starting replication stream@${slot}`);\n return await this.#startStream(db, slot, clientWatermark, config);\n } finally {\n void cleanup.then(() => db.end());\n }\n }\n\n async #startStream(\n db: PostgresDB,\n slot: string,\n clientWatermark: string,\n shardConfig: InternalShardConfig,\n ): Promise<ChangeStream> {\n const clientStart = oneAfter(clientWatermark);\n const {messages, acks} = await subscribe(\n this.#lc,\n db,\n slot,\n [...shardConfig.publications],\n versionFromLexi(clientStart),\n );\n\n const changes = Subscription.create<ChangeStreamMessage>({\n cleanup: () => messages.cancel(),\n });\n const acker = new Acker(acks);\n\n const changeMaker = new ChangeMaker(\n this.#lc,\n this.#shard,\n shardConfig,\n this.#replica.initialSchema,\n this.#upstreamUri,\n );\n\n void (async function () {\n try {\n for await (const [lsn, msg] of messages) {\n if (msg.tag === 'keepalive') {\n changes.push(['status', msg, {watermark: versionToLexi(lsn)}]);\n continue;\n }\n let last: PendingResult | undefined;\n for (const change of await changeMaker.makeChanges(lsn, msg)) {\n last = changes.push(change);\n }\n await last?.result; // Allow the change-streamer to push back.\n }\n } catch (e) {\n changes.fail(translateError(e));\n }\n })();\n\n this.#lc.info?.(\n `started replication stream@${slot} from ${clientWatermark} (replicaVersion: ${\n this.#replica.version\n })`,\n );\n\n return {\n changes,\n acks: {push: status => acker.ack(status[2].watermark)},\n };\n }\n\n /**\n * Stops replication slots associated with this shard, and returns\n * a `cleanup` task that drops any slot other than the specified\n * `slotToKeep`.\n *\n * Note that replication slots created after `slotToKeep` (as indicated by\n * the timestamp suffix) are preserved, as those are newly syncing replicas\n * that will soon take over the slot.\n */\n async #stopExistingReplicationSlotSubscribers(\n sql: PostgresDB,\n slotToKeep: string,\n ): Promise<{cleanup: Promise<void>}> {\n const slotExpression = replicationSlotExpression(this.#shard);\n const legacySlotName = legacyReplicationSlot(this.#shard);\n\n // Note: `slot_name <= slotToKeep` uses a string compare of the millisecond\n // timestamp, which works until it exceeds 13 digits (sometime in 2286).\n const result = await sql<{slot: string; pid: string | null}[]>`\n SELECT slot_name as slot, pg_terminate_backend(active_pid), active_pid as pid\n FROM pg_replication_slots \n WHERE (slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName})\n AND slot_name <= ${slotToKeep}`;\n if (result.length === 0) {\n throw new AbortError(\n `replication slot ${slotToKeep} is missing. A different ` +\n `replication-manager should now be running on a new ` +\n `replication slot.`,\n );\n }\n // Clean up the replicas table.\n const replicasTable = `${upstreamSchema(this.#shard)}.replicas`;\n await sql`DELETE FROM ${sql(replicasTable)} WHERE slot != ${slotToKeep}`;\n\n const pids = result.filter(({pid}) => pid !== null).map(({pid}) => pid);\n if (pids.length) {\n this.#lc.info?.(`signaled subscriber ${pids} to shut down`);\n }\n const otherSlots = result\n .filter(({slot}) => slot !== slotToKeep)\n .map(({slot}) => slot);\n return {\n cleanup: otherSlots.length\n ? this.#dropReplicationSlots(sql, otherSlots)\n : promiseVoid,\n };\n }\n\n async #dropReplicationSlots(sql: PostgresDB, slots: string[]) {\n this.#lc.info?.(`dropping other replication slot(s) ${slots}`);\n for (let i = 0; i < 5; i++) {\n try {\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name IN ${sql(slots)}\n `;\n this.#lc.info?.(`successfully dropped ${slots}`);\n return;\n } catch (e) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_OBJECT_IN_USE\n ) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n this.#lc.debug?.(`attempt ${i + 1}: ${String(e)}`, e);\n } else {\n this.#lc.warn?.(`error dropping ${slots}`, e);\n }\n await sleep(1000);\n }\n }\n this.#lc.warn?.(`maximum attempts exceeded dropping ${slots}`);\n }\n}\n\n// Exported for testing.\nexport class Acker {\n #acks: Sink<bigint>;\n #keepaliveTimer: NodeJS.Timeout | undefined;\n\n constructor(acks: Sink<bigint>) {\n this.#acks = acks;\n }\n\n keepalive() {\n // Sets a timeout to send a standby status update in response to\n // a primary keepalive message.\n //\n // https://www.postgresql.org/docs/current/protocol-replication.html#PROTOCOL-REPLICATION-PRIMARY-KEEPALIVE-MESSAGE\n //\n // A primary keepalive message is streamed to the change-streamer as a\n // 'status' message, which in turn responds with an ack. However, in the\n // event that the change-streamer is backed up processing preceding\n // changes, this timeout will fire to send a status update that does not\n // change the confirmed flush position. This timeout must be shorter than\n // the `wal_sender_timeout`, which defaults to 60 seconds.\n //\n // https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-WAL-SENDER-TIMEOUT\n this.#keepaliveTimer ??= setTimeout(() => this.#sendAck(), 1000);\n }\n\n ack(watermark: LexiVersion) {\n this.#sendAck(watermark);\n }\n\n #sendAck(watermark?: LexiVersion) {\n clearTimeout(this.#keepaliveTimer);\n this.#keepaliveTimer = undefined;\n\n // Note: Sending '0/0' means \"keep alive but do not update confirmed_flush_lsn\"\n // https://github.com/postgres/postgres/blob/3edc67d337c2e498dad1cd200e460f7c63e512e6/src/backend/replication/walsender.c#L2457\n const lsn = watermark ? versionFromLexi(watermark) : 0n;\n this.#acks.push(lsn);\n }\n}\n\ntype ReplicationError = {\n lsn: bigint;\n msg: Message;\n err: unknown;\n lastLogTime: number;\n};\n\nconst SET_REPLICA_IDENTITY_DELAY_MS = 500;\n\nclass ChangeMaker {\n readonly #lc: LogContext;\n readonly #shardPrefix: string;\n readonly #shardConfig: InternalShardConfig;\n readonly #initialSchema: PublishedSchema;\n readonly #upstreamDB: PostgresDB;\n\n #replicaIdentityTimer: NodeJS.Timeout | undefined;\n #error: ReplicationError | undefined;\n\n constructor(\n lc: LogContext,\n {appID, shardNum}: ShardID,\n shardConfig: InternalShardConfig,\n initialSchema: PublishedSchema,\n upstreamURI: string,\n ) {\n this.#lc = lc;\n // Note: This matches the prefix used in pg_logical_emit_message() in pg/schema/ddl.ts.\n this.#shardPrefix = `${appID}/${shardNum}`;\n this.#shardConfig = shardConfig;\n this.#initialSchema = initialSchema;\n this.#upstreamDB = pgClient(lc, upstreamURI, {\n ['idle_timeout']: 10, // only used occasionally\n connection: {['application_name']: 'zero-schema-change-detector'},\n });\n }\n\n async makeChanges(lsn: bigint, msg: Message): Promise<ChangeStreamMessage[]> {\n if (this.#error) {\n this.#logError(this.#error);\n return [];\n }\n try {\n return await this.#makeChanges(msg);\n } catch (err) {\n this.#error = {lsn, msg, err, lastLogTime: 0};\n this.#logError(this.#error);\n\n const message = `Unable to continue replication from LSN ${fromBigInt(lsn)}`;\n const errorDetails: JSONObject = {error: message};\n if (err instanceof UnsupportedSchemaChangeError) {\n errorDetails.reason = err.description;\n errorDetails.context = err.ddlUpdate.context;\n } else {\n errorDetails.reason = String(err);\n }\n\n // Rollback the current transaction to avoid dangling transactions in\n // downstream processors (i.e. changeLog, replicator).\n return [\n ['rollback', {tag: 'rollback'}],\n ['control', {tag: 'reset-required', message, errorDetails}],\n ];\n }\n }\n\n #logError(error: ReplicationError) {\n const {lsn, msg, err, lastLogTime} = error;\n const now = Date.now();\n\n // Output an error to logs as replication messages continue to be dropped,\n // at most once a minute.\n if (now - lastLogTime > 60_000) {\n this.#lc.error?.(\n `Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(\n err,\n )}`,\n err instanceof UnsupportedSchemaChangeError\n ? err.ddlUpdate.context\n : // 'content' can be a large byte Buffer. Exclude it from logging output.\n {...msg, content: undefined},\n );\n error.lastLogTime = now;\n }\n }\n\n // oxlint-disable-next-line require-await\n async #makeChanges(msg: Message): Promise<ChangeStreamData[]> {\n switch (msg.tag) {\n case 'begin':\n return [\n [\n 'begin',\n {...msg, json: 's'},\n {commitWatermark: toLexiVersion(must(msg.commitLsn))},\n ],\n ];\n\n case 'delete': {\n if (!(msg.key ?? msg.old)) {\n throw new Error(\n `Invalid DELETE msg (missing key): ${stringify(msg)}`,\n );\n }\n return [\n [\n 'data',\n {\n ...msg,\n relation: withoutColumns(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-DELETE\n key: must(msg.old ?? msg.key),\n },\n ],\n ];\n }\n\n case 'update': {\n return [\n [\n 'data',\n {\n ...msg,\n relation: withoutColumns(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-UPDATE\n key: msg.old ?? msg.key,\n },\n ],\n ];\n }\n\n case 'insert':\n return [['data', {...msg, relation: withoutColumns(msg.relation)}]];\n case 'truncate':\n return [\n ['data', {...msg, relations: msg.relations.map(withoutColumns)}],\n ];\n\n case 'message':\n if (msg.prefix !== this.#shardPrefix) {\n this.#lc.debug?.('ignoring message for different shard', msg.prefix);\n return [];\n }\n return this.#handleCustomMessage(msg);\n\n case 'commit':\n return [\n ['commit', msg, {watermark: toLexiVersion(must(msg.commitLsn))}],\n ];\n\n case 'relation':\n return this.#handleRelation(msg);\n case 'type':\n return []; // Nothing need be done for custom types.\n case 'origin':\n // No need to detect replication loops since we are not a\n // PG replication source.\n return [];\n default:\n msg satisfies never;\n throw new Error(`Unexpected message type ${stringify(msg)}`);\n }\n }\n\n #preSchema: PublishedSchema | undefined;\n\n #handleCustomMessage(msg: MessageMessage) {\n const event = this.#parseReplicationEvent(msg.content);\n // Cancel manual schema adjustment timeouts when an upstream schema change\n // is about to happen, so as to avoid interfering / redundant work.\n clearTimeout(this.#replicaIdentityTimer);\n\n if (event.type === 'ddlStart') {\n // Store the schema in order to diff it with a potential ddlUpdate.\n this.#preSchema = event.schema;\n return [];\n }\n // ddlUpdate\n const changes = this.#makeSchemaChanges(\n must(this.#preSchema, `ddlUpdate received without a ddlStart`),\n event,\n ).map(change => ['data', change] satisfies Data);\n\n this.#lc\n .withContext('query', event.context.query)\n .info?.(`${changes.length} schema change(s)`, changes);\n\n const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(\n event.schema,\n );\n if (replicaIdentities) {\n this.#replicaIdentityTimer = setTimeout(async () => {\n try {\n await replicaIdentities.apply(this.#lc, this.#upstreamDB);\n } catch (err) {\n this.#lc.warn?.(`error setting replica identities`, err);\n }\n }, SET_REPLICA_IDENTITY_DELAY_MS);\n }\n\n return changes;\n }\n\n /**\n * A note on operation order:\n *\n * Postgres will drop related indexes when columns are dropped,\n * but SQLite will error instead (https://sqlite.org/forum/forumpost/2e62dba69f?t=c&hist).\n * The current workaround is to drop indexes first.\n *\n * Also note that although it should not be possible to both rename and\n * add/drop tables/columns in a single statement, the operations are\n * ordered to handle that possibility, by always dropping old entities,\n * then modifying kept entities, and then adding new entities.\n *\n * Thus, the order of replicating DDL updates is:\n * - drop indexes\n * - drop tables\n * - alter tables\n * - drop columns\n * - alter columns\n * - add columns\n * - create tables\n * - create indexes\n *\n * In the future the replication logic should be improved to handle this\n * behavior in SQLite by dropping dependent indexes manually before dropping\n * columns. This, for example, would be needed to properly support changing\n * the type of a column that's indexed.\n */\n #makeSchemaChanges(\n preSchema: PublishedSchema,\n update: DdlUpdateEvent,\n ): DataChange[] {\n try {\n const [prevTbl, prevIdx] = specsByID(preSchema);\n const [nextTbl, nextIdx] = specsByID(update.schema);\n const changes: DataChange[] = [];\n\n // Validate the new table schemas\n for (const table of nextTbl.values()) {\n validate(this.#lc, table, update.schema.indexes);\n }\n\n const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);\n for (const id of droppedIdx) {\n const {schema, name} = must(prevIdx.get(id));\n changes.push({tag: 'drop-index', id: {schema, name}});\n }\n\n // DROP\n const [droppedTbl, createdTbl] = symmetricDifferences(prevTbl, nextTbl);\n for (const id of droppedTbl) {\n const {schema, name} = must(prevTbl.get(id));\n changes.push({tag: 'drop-table', id: {schema, name}});\n }\n // ALTER\n const tables = intersection(prevTbl, nextTbl);\n for (const id of tables) {\n changes.push(\n ...this.#getTableChanges(\n must(prevTbl.get(id)),\n must(nextTbl.get(id)),\n ),\n );\n }\n // CREATE\n for (const id of createdTbl) {\n const spec = must(nextTbl.get(id));\n changes.push({tag: 'create-table', spec});\n }\n\n // Add indexes last since they may reference tables / columns that need\n // to be created first.\n for (const id of createdIdx) {\n const spec = must(nextIdx.get(id));\n changes.push({tag: 'create-index', spec});\n }\n return changes;\n } catch (e) {\n throw new UnsupportedSchemaChangeError(String(e), update, {cause: e});\n }\n }\n\n #getTableChanges(oldTable: TableSpec, newTable: TableSpec): DataChange[] {\n const changes: DataChange[] = [];\n if (\n oldTable.schema !== newTable.schema ||\n oldTable.name !== newTable.name\n ) {\n changes.push({\n tag: 'rename-table',\n old: {schema: oldTable.schema, name: oldTable.name},\n new: {schema: newTable.schema, name: newTable.name},\n });\n }\n const table = {schema: newTable.schema, name: newTable.name};\n const oldColumns = columnsByID(oldTable.columns);\n const newColumns = columnsByID(newTable.columns);\n\n // DROP\n const [dropped, added] = symmetricDifferences(oldColumns, newColumns);\n for (const id of dropped) {\n const {name: column} = must(oldColumns.get(id));\n changes.push({tag: 'drop-column', table, column});\n }\n\n // ALTER\n const both = intersection(oldColumns, newColumns);\n for (const id of both) {\n const {name: oldName, ...oldSpec} = must(oldColumns.get(id));\n const {name: newName, ...newSpec} = must(newColumns.get(id));\n // The three things that we care about are:\n // 1. name\n // 2. type\n // 3. not-null\n if (\n oldName !== newName ||\n oldSpec.dataType !== newSpec.dataType ||\n oldSpec.notNull !== newSpec.notNull\n ) {\n changes.push({\n tag: 'update-column',\n table,\n old: {name: oldName, spec: oldSpec},\n new: {name: newName, spec: newSpec},\n });\n }\n }\n\n // ADD\n for (const id of added) {\n const {name, ...spec} = must(newColumns.get(id));\n const column = {name, spec};\n // Validate that the ChangeProcessor will accept the column change.\n mapPostgresToLiteColumn(table.name, column);\n changes.push({tag: 'add-column', table, column});\n }\n return changes;\n }\n\n #parseReplicationEvent(content: Uint8Array) {\n const str =\n content instanceof Buffer\n ? content.toString('utf-8')\n : new TextDecoder().decode(content);\n const json = JSON.parse(str);\n return v.parse(json, replicationEventSchema, 'passthrough');\n }\n\n /**\n * If `ddlDetection === true`, relation messages are irrelevant,\n * as schema changes are detected by event triggers that\n * emit custom messages.\n *\n * For degraded-mode replication (`ddlDetection === false`):\n * 1. query the current published schemas on upstream\n * 2. compare that with the InternalShardConfig.initialSchema\n * 3. compare that with the incoming MessageRelation\n * 4. On any discrepancy, throw an UnsupportedSchemaChangeError\n * to halt replication.\n *\n * Note that schemas queried in step [1] will be *post-transaction*\n * schemas, which are not necessarily suitable for actually processing\n * the statements in the transaction being replicated. In other words,\n * this mechanism cannot be used to reliably *replicate* schema changes.\n * However, they serve the purpose determining if schemas have changed.\n */\n async #handleRelation(rel: PostgresRelation): Promise<ChangeStreamData[]> {\n const {publications, ddlDetection} = this.#shardConfig;\n if (ddlDetection) {\n return [];\n }\n const currentSchema = await getPublicationInfo(\n this.#upstreamDB,\n publications,\n );\n const difference = getSchemaDifference(this.#initialSchema, currentSchema);\n if (difference !== null) {\n throw new MissingEventTriggerSupport(difference);\n }\n // Even if the currentSchema is equal to the initialSchema, the\n // MessageRelation itself must be checked to detect transient\n // schema changes within the transaction (e.g. adding and dropping\n // a table, or renaming a column and then renaming it back).\n const orel = this.#initialSchema.tables.find(\n t => t.oid === rel.relationOid,\n );\n if (!orel) {\n // Can happen if a table is created and then dropped in the same transaction.\n throw new MissingEventTriggerSupport(\n `relation not in initialSchema: ${stringify(rel)}`,\n );\n }\n if (relationDifferent(orel, rel)) {\n throw new MissingEventTriggerSupport(\n `relation has changed within the transaction: ${stringify(orel)} vs ${stringify(rel)}`,\n );\n }\n return [];\n }\n}\n\nfunction getSchemaDifference(\n a: PublishedSchema,\n b: PublishedSchema,\n): string | null {\n // Note: ignore indexes since changes need not to halt replication\n if (a.tables.length !== b.tables.length) {\n return `tables created or dropped`;\n }\n for (let i = 0; i < a.tables.length; i++) {\n const at = a.tables[i];\n const bt = b.tables[i];\n const difference = getTableDifference(at, bt);\n if (difference) {\n return difference;\n }\n }\n return null;\n}\n\n// ColumnSpec comparator\nconst byColumnPos = (a: [string, ColumnSpec], b: [string, ColumnSpec]) =>\n a[1].pos < b[1].pos ? -1 : a[1].pos > b[1].pos ? 1 : 0;\n\nfunction getTableDifference(\n a: PublishedTableSpec,\n b: PublishedTableSpec,\n): string | null {\n if (a.oid !== b.oid || a.schema !== b.schema || a.name !== b.name) {\n return `Table \"${a.name}\" differs from table \"${b.name}\"`;\n }\n if (!deepEqual(a.primaryKey, b.primaryKey)) {\n return `Primary key of table \"${a.name}\" has changed`;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = Object.entries(b.columns).sort(byColumnPos);\n if (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const [bname, bcol] = bcols[i];\n return (\n aname !== bname ||\n acol.pos !== bcol.pos ||\n acol.typeOID !== bcol.typeOID ||\n acol.notNull !== bcol.notNull\n );\n })\n ) {\n return `Columns of table \"${a.name}\" have changed`;\n }\n return null;\n}\n\nexport function relationDifferent(a: PublishedTableSpec, b: PostgresRelation) {\n if (a.oid !== b.relationOid || a.schema !== b.schema || a.name !== b.name) {\n return true;\n }\n if (\n // The MessageRelation's `keyColumns` field contains the columns in column\n // declaration order, whereas the PublishedTableSpec's `primaryKey`\n // contains the columns in primary key (i.e. index) order. Do an\n // order-agnostic compare here since it is not possible to detect\n // key-order changes from the MessageRelation message alone.\n b.replicaIdentity === 'default' &&\n !equals(new Set(a.primaryKey), new Set(b.keyColumns))\n ) {\n return true;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = b.columns;\n return (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const bcol = bcols[i];\n return aname !== bcol.name || acol.typeOID !== bcol.typeOid;\n })\n );\n}\n\nfunction translateError(e: unknown): Error {\n if (!(e instanceof Error)) {\n return new Error(String(e));\n }\n if (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) {\n return new ShutdownSignal(e);\n }\n return e;\n}\nconst idString = (id: Identifier) => `${id.schema}.${id.name}`;\n\nfunction specsByID(published: PublishedSchema) {\n return [\n // It would have been nice to use a CustomKeyMap here, but we rely on set-utils\n // operations which use plain Sets.\n new Map(published.tables.map(t => [t.oid, t])),\n new Map(published.indexes.map(i => [idString(i), i])),\n ] as const;\n}\n\nfunction columnsByID(\n columns: Record<string, ColumnSpec>,\n): Map<number, ColumnSpec & {name: string}> {\n const colsByID = new Map<number, ColumnSpec & {name: string}>();\n for (const [name, spec] of Object.entries(columns)) {\n // The `pos` field is the `attnum` in `pg_attribute`, which is a stable\n // identifier for the column in this table (i.e. never reused).\n colsByID.set(spec.pos, {...spec, name});\n }\n return colsByID;\n}\n\n// Avoid sending the `columns` from the Postgres MessageRelation message.\n// They are not used downstream and the message can be large.\nfunction withoutColumns(relation: PostgresRelation): MessageRelation {\n const {columns: _, ...rest} = relation;\n return rest;\n}\n\nclass UnsupportedSchemaChangeError extends Error {\n readonly name = 'UnsupportedSchemaChangeError';\n readonly description: string;\n readonly ddlUpdate: DdlUpdateEvent;\n\n constructor(\n description: string,\n ddlUpdate: DdlUpdateEvent,\n options?: ErrorOptions,\n ) {\n super(\n `Replication halted. Resync the replica to recover: ${description}`,\n options,\n );\n this.description = description;\n this.ddlUpdate = ddlUpdate;\n }\n}\n\nclass MissingEventTriggerSupport extends Error {\n readonly name = 'MissingEventTriggerSupport';\n\n constructor(msg: string) {\n super(\n `${msg}. Schema changes cannot be reliably replicated without event trigger support.`,\n );\n }\n}\n\n// TODO(0xcadams): should this be a ProtocolError?\nclass ShutdownSignal extends AbortError {\n readonly name = 'ShutdownSignal';\n\n constructor(cause: unknown) {\n super(\n 'shutdown signal received (e.g. another zero-cache taking over the replication stream)',\n {\n cause,\n },\n );\n }\n}\n"],"names":["v.parse"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA0FA,eAAsB,+BACpB,IACA,aACA,OACA,eACA,aAC6E;AAC7E,QAAM;AAAA,IACJ;AAAA,IACA,WAAW,MAAM,KAAK,IAAI,MAAM,QAAQ;AAAA,IACxC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,UAAU,IAAI,SAAS,IAAI,aAAa;AAC9C,QAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,OAAO,CAAC;AAC3E,UAAQ,MAAA;AAIR,QAAM,KAAK,SAAS,IAAI,WAAW;AACnC,MAAI;AACF,UAAM,kBAAkB,MAAM;AAAA,MAC5B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAGF,UAAM,eAAe,IAAI;AAAA,MACvB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAGF,WAAO,EAAC,mBAAmB,aAAA;AAAA,EAC7B,UAAA;AACE,UAAM,GAAG,IAAA;AAAA,EACX;AACF;AAEA,eAAe,uBACb,IACA,KACA,OACA,EAAC,gBAAgB,cAAc,cAC/B;AAEA,QAAM,kBAAkB,IAAI,KAAK,OAAO,cAAc;AAEtD,QAAM,kBAAkB,MAAM;AAAA,IAC5B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,MAAI,CAAC,iBAAiB;AACpB,UAAM,IAAI;AAAA,MACR,8CAA8C,cAAc;AAAA,IAAA;AAAA,EAEhE;AAGA,QAAM,YAAY,CAAC,GAAG,MAAM,YAAY,EAAE,KAAA;AAC1C,QAAM,aAAa,gBAAgB,aAChC,OAAO,CAAA,MAAK,CAAC,EAAE,WAAW,0BAA0B,KAAK,CAAC,CAAC,EAC3D,KAAA;AACH,MAAI,CAAC,UAAU,WAAW,UAAU,GAAG;AACrC,OAAG,OAAO,8CAA8C,SAAS,GAAG;AACpE,UAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,QAAQ,CAAC;AACvD,UAAM,IAAI;AAAA,MACR,2BAA2B,SAAS,4CAChB,UAAU;AAAA,IAAA;AAAA,EAElC;AAKA,MAAI,CAAC,UAAU,gBAAgB,cAAc,UAAU,GAAG;AACxD,UAAM,IAAI;AAAA,MACR,0BAA0B,gBAAgB,YAAY,2CAClB,UAAU;AAAA,IAAA;AAAA,EAElD;AAGA,QAAM,SAAS,MAAM;AAAA,0DACmC,IAAI,UAAU,CAAC;AAAA,IACrE,OAAA;AACF,MAAI,OAAO,WAAW,WAAW,QAAQ;AACvC,UAAM,IAAI;AAAA,MACR,0BAA0B,OAAO,KAAA,CAAM,iDACL,UAAU;AAAA,IAAA;AAAA,EAEhD;AAEA,QAAM,EAAC,SAAQ;AACf,QAAM,SAAS,MAAM;AAAA;AAAA,0BAIG,IAAI;AAC5B,MAAI,OAAO,WAAW,GAAG;AACvB,UAAM,IAAI,gBAAgB,oBAAoB,IAAI,aAAa;AAAA,EACjE;AACA,QAAM,CAAC,EAAC,YAAY,UAAA,CAAU,IAAI;AAClC,MAAI,eAAe,QAAQ,cAAc,QAAQ;AAC/C,UAAM,IAAI;AAAA,MACR,oBAAoB,IAAI;AAAA,IAAA;AAAA,EAE5B;AACA,SAAO;AACT;AAMA,MAAM,qBAA6C;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,YACE,IACA,aACA,OACA,SACA;AACA,SAAK,MAAM,GAAG,YAAY,aAAa,eAAe;AACtD,SAAK,eAAe;AACpB,SAAK,SAAS;AACd,SAAK,WAAW;AAAA,EAClB;AAAA,EAEA,MAAM,YAAY,iBAAgD;AAChE,UAAM,KAAK,SAAS,KAAK,KAAK,KAAK,cAAc,CAAA,GAAI,gBAAgB;AACrE,UAAM,EAAC,SAAQ,KAAK;AAEpB,QAAI,UAAU;AACd,QAAI;AACF,OAAC,EAAC,QAAA,IAAW,MAAM,KAAK;AAAA,QACtB;AAAA,QACA;AAAA,MAAA;AAEF,YAAM,SAAS,MAAM,uBAAuB,IAAI,KAAK,MAAM;AAC3D,WAAK,IAAI,OAAO,+BAA+B,IAAI,EAAE;AACrD,aAAO,MAAM,KAAK,aAAa,IAAI,MAAM,iBAAiB,MAAM;AAAA,IAClE,UAAA;AACE,WAAK,QAAQ,KAAK,MAAM,GAAG,KAAK;AAAA,IAClC;AAAA,EACF;AAAA,EAEA,MAAM,aACJ,IACA,MACA,iBACA,aACuB;AACvB,UAAM,cAAc,SAAS,eAAe;AAC5C,UAAM,EAAC,UAAU,KAAA,IAAQ,MAAM;AAAA,MAC7B,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,CAAC,GAAG,YAAY,YAAY;AAAA,MAC5B,gBAAgB,WAAW;AAAA,IAAA;AAG7B,UAAM,UAAU,aAAa,OAA4B;AAAA,MACvD,SAAS,MAAM,SAAS,OAAA;AAAA,IAAO,CAChC;AACD,UAAM,QAAQ,IAAI,MAAM,IAAI;AAE5B,UAAM,cAAc,IAAI;AAAA,MACtB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,KAAK,SAAS;AAAA,MACd,KAAK;AAAA,IAAA;AAGP,UAAM,iBAAkB;AACtB,UAAI;AACF,yBAAiB,CAAC,KAAK,GAAG,KAAK,UAAU;AACvC,cAAI,IAAI,QAAQ,aAAa;AAC3B,oBAAQ,KAAK,CAAC,UAAU,KAAK,EAAC,WAAW,cAAc,GAAG,EAAA,CAAE,CAAC;AAC7D;AAAA,UACF;AACA,cAAI;AACJ,qBAAW,UAAU,MAAM,YAAY,YAAY,KAAK,GAAG,GAAG;AAC5D,mBAAO,QAAQ,KAAK,MAAM;AAAA,UAC5B;AACA,gBAAM,MAAM;AAAA,QACd;AAAA,MACF,SAAS,GAAG;AACV,gBAAQ,KAAK,eAAe,CAAC,CAAC;AAAA,MAChC;AAAA,IACF,GAAA;AAEA,SAAK,IAAI;AAAA,MACP,8BAA8B,IAAI,SAAS,eAAe,qBACxD,KAAK,SAAS,OAChB;AAAA,IAAA;AAGF,WAAO;AAAA,MACL;AAAA,MACA,MAAM,EAAC,MAAM,CAAA,WAAU,MAAM,IAAI,OAAO,CAAC,EAAE,SAAS,EAAA;AAAA,IAAC;AAAA,EAEzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,wCACJ,KACA,YACmC;AACnC,UAAM,iBAAiB,0BAA0B,KAAK,MAAM;AAC5D,UAAM,iBAAiB,sBAAsB,KAAK,MAAM;AAIxD,UAAM,SAAS,MAAM;AAAA;AAAA;AAAA,8BAGK,cAAc,mBAAmB,cAAc;AAAA,+BAC9C,UAAU;AACrC,QAAI,OAAO,WAAW,GAAG;AACvB,YAAM,IAAI;AAAA,QACR,oBAAoB,UAAU;AAAA,MAAA;AAAA,IAIlC;AAEA,UAAM,gBAAgB,GAAG,eAAe,KAAK,MAAM,CAAC;AACpD,UAAM,kBAAkB,IAAI,aAAa,CAAC,kBAAkB,UAAU;AAEtE,UAAM,OAAO,OAAO,OAAO,CAAC,EAAC,IAAA,MAAS,QAAQ,IAAI,EAAE,IAAI,CAAC,EAAC,IAAA,MAAS,GAAG;AACtE,QAAI,KAAK,QAAQ;AACf,WAAK,IAAI,OAAO,uBAAuB,IAAI,eAAe;AAAA,IAC5D;AACA,UAAM,aAAa,OAChB,OAAO,CAAC,EAAC,KAAA,MAAU,SAAS,UAAU,EACtC,IAAI,CAAC,EAAC,KAAA,MAAU,IAAI;AACvB,WAAO;AAAA,MACL,SAAS,WAAW,SAChB,KAAK,sBAAsB,KAAK,UAAU,IAC1C;AAAA,IAAA;AAAA,EAER;AAAA,EAEA,MAAM,sBAAsB,KAAiB,OAAiB;AAC5D,SAAK,IAAI,OAAO,sCAAsC,KAAK,EAAE;AAC7D,aAAS,IAAI,GAAG,IAAI,GAAG,KAAK;AAC1B,UAAI;AACF,cAAM;AAAA;AAAA,iCAEmB,IAAI,KAAK,CAAC;AAAA;AAEnC,aAAK,IAAI,OAAO,wBAAwB,KAAK,EAAE;AAC/C;AAAA,MACF,SAAS,GAAG;AAEV,YACE,aAAa,SAAS,iBACtB,EAAE,SAAS,kBACX;AAIA,eAAK,IAAI,QAAQ,WAAW,IAAI,CAAC,KAAK,OAAO,CAAC,CAAC,IAAI,CAAC;AAAA,QACtD,OAAO;AACL,eAAK,IAAI,OAAO,kBAAkB,KAAK,IAAI,CAAC;AAAA,QAC9C;AACA,cAAM,MAAM,GAAI;AAAA,MAClB;AAAA,IACF;AACA,SAAK,IAAI,OAAO,sCAAsC,KAAK,EAAE;AAAA,EAC/D;AACF;AAGO,MAAM,MAAM;AAAA,EACjB;AAAA,EACA;AAAA,EAEA,YAAY,MAAoB;AAC9B,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,YAAY;AAcV,SAAK,oBAAoB,WAAW,MAAM,KAAK,SAAA,GAAY,GAAI;AAAA,EACjE;AAAA,EAEA,IAAI,WAAwB;AAC1B,SAAK,SAAS,SAAS;AAAA,EACzB;AAAA,EAEA,SAAS,WAAyB;AAChC,iBAAa,KAAK,eAAe;AACjC,SAAK,kBAAkB;AAIvB,UAAM,MAAM,YAAY,gBAAgB,SAAS,IAAI;AACrD,SAAK,MAAM,KAAK,GAAG;AAAA,EACrB;AACF;AASA,MAAM,gCAAgC;AAEtC,MAAM,YAAY;AAAA,EACP;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET;AAAA,EACA;AAAA,EAEA,YACE,IACA,EAAC,OAAO,YACR,aACA,eACA,aACA;AACA,SAAK,MAAM;AAEX,SAAK,eAAe,GAAG,KAAK,IAAI,QAAQ;AACxC,SAAK,eAAe;AACpB,SAAK,iBAAiB;AACtB,SAAK,cAAc,SAAS,IAAI,aAAa;AAAA,MAC3C,CAAC,cAAc,GAAG;AAAA;AAAA,MAClB,YAAY,EAAC,CAAC,kBAAkB,GAAG,8BAAA;AAAA,IAA6B,CACjE;AAAA,EACH;AAAA,EAEA,MAAM,YAAY,KAAa,KAA8C;AAC3E,QAAI,KAAK,QAAQ;AACf,WAAK,UAAU,KAAK,MAAM;AAC1B,aAAO,CAAA;AAAA,IACT;AACA,QAAI;AACF,aAAO,MAAM,KAAK,aAAa,GAAG;AAAA,IACpC,SAAS,KAAK;AACZ,WAAK,SAAS,EAAC,KAAK,KAAK,KAAK,aAAa,EAAA;AAC3C,WAAK,UAAU,KAAK,MAAM;AAE1B,YAAM,UAAU,2CAA2C,WAAW,GAAG,CAAC;AAC1E,YAAM,eAA2B,EAAC,OAAO,QAAA;AACzC,UAAI,eAAe,8BAA8B;AAC/C,qBAAa,SAAS,IAAI;AAC1B,qBAAa,UAAU,IAAI,UAAU;AAAA,MACvC,OAAO;AACL,qBAAa,SAAS,OAAO,GAAG;AAAA,MAClC;AAIA,aAAO;AAAA,QACL,CAAC,YAAY,EAAC,KAAK,YAAW;AAAA,QAC9B,CAAC,WAAW,EAAC,KAAK,kBAAkB,SAAS,cAAa;AAAA,MAAA;AAAA,IAE9D;AAAA,EACF;AAAA,EAEA,UAAU,OAAyB;AACjC,UAAM,EAAC,KAAK,KAAK,KAAK,gBAAe;AACrC,UAAM,MAAM,KAAK,IAAA;AAIjB,QAAI,MAAM,cAAc,KAAQ;AAC9B,WAAK,IAAI;AAAA,QACP,2CAA2C,WAAW,GAAG,CAAC,KAAK;AAAA,UAC7D;AAAA,QAAA,CACD;AAAA,QACD,eAAe,+BACX,IAAI,UAAU;AAAA;AAAA,UAEd,EAAC,GAAG,KAAK,SAAS,OAAA;AAAA;AAAA,MAAS;AAEjC,YAAM,cAAc;AAAA,IACtB;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,aAAa,KAA2C;AAC5D,YAAQ,IAAI,KAAA;AAAA,MACV,KAAK;AACH,eAAO;AAAA,UACL;AAAA,YACE;AAAA,YACA,EAAC,GAAG,KAAK,MAAM,IAAA;AAAA,YACf,EAAC,iBAAiB,cAAc,KAAK,IAAI,SAAS,CAAC,EAAA;AAAA,UAAC;AAAA,QACtD;AAAA,MAGJ,KAAK,UAAU;AACb,YAAI,EAAE,IAAI,OAAO,IAAI,MAAM;AACzB,gBAAM,IAAI;AAAA,YACR,qCAAqC,UAAU,GAAG,CAAC;AAAA,UAAA;AAAA,QAEvD;AACA,eAAO;AAAA,UACL;AAAA,YACE;AAAA,YACA;AAAA,cACE,GAAG;AAAA,cACH,UAAU,eAAe,IAAI,QAAQ;AAAA;AAAA,cAErC,KAAK,KAAK,IAAI,OAAO,IAAI,GAAG;AAAA,YAAA;AAAA,UAC9B;AAAA,QACF;AAAA,MAEJ;AAAA,MAEA,KAAK,UAAU;AACb,eAAO;AAAA,UACL;AAAA,YACE;AAAA,YACA;AAAA,cACE,GAAG;AAAA,cACH,UAAU,eAAe,IAAI,QAAQ;AAAA;AAAA,cAErC,KAAK,IAAI,OAAO,IAAI;AAAA,YAAA;AAAA,UACtB;AAAA,QACF;AAAA,MAEJ;AAAA,MAEA,KAAK;AACH,eAAO,CAAC,CAAC,QAAQ,EAAC,GAAG,KAAK,UAAU,eAAe,IAAI,QAAQ,EAAA,CAAE,CAAC;AAAA,MACpE,KAAK;AACH,eAAO;AAAA,UACL,CAAC,QAAQ,EAAC,GAAG,KAAK,WAAW,IAAI,UAAU,IAAI,cAAc,EAAA,CAAE;AAAA,QAAA;AAAA,MAGnE,KAAK;AACH,YAAI,IAAI,WAAW,KAAK,cAAc;AACpC,eAAK,IAAI,QAAQ,wCAAwC,IAAI,MAAM;AACnE,iBAAO,CAAA;AAAA,QACT;AACA,eAAO,KAAK,qBAAqB,GAAG;AAAA,MAEtC,KAAK;AACH,eAAO;AAAA,UACL,CAAC,UAAU,KAAK,EAAC,WAAW,cAAc,KAAK,IAAI,SAAS,CAAC,EAAA,CAAE;AAAA,QAAA;AAAA,MAGnE,KAAK;AACH,eAAO,KAAK,gBAAgB,GAAG;AAAA,MACjC,KAAK;AACH,eAAO,CAAA;AAAA;AAAA,MACT,KAAK;AAGH,eAAO,CAAA;AAAA,MACT;AAEE,cAAM,IAAI,MAAM,2BAA2B,UAAU,GAAG,CAAC,EAAE;AAAA,IAAA;AAAA,EAEjE;AAAA,EAEA;AAAA,EAEA,qBAAqB,KAAqB;AACxC,UAAM,QAAQ,KAAK,uBAAuB,IAAI,OAAO;AAGrD,iBAAa,KAAK,qBAAqB;AAEvC,QAAI,MAAM,SAAS,YAAY;AAE7B,WAAK,aAAa,MAAM;AACxB,aAAO,CAAA;AAAA,IACT;AAEA,UAAM,UAAU,KAAK;AAAA,MACnB,KAAK,KAAK,YAAY,uCAAuC;AAAA,MAC7D;AAAA,IAAA,EACA,IAAI,CAAA,WAAU,CAAC,QAAQ,MAAM,CAAgB;AAE/C,SAAK,IACF,YAAY,SAAS,MAAM,QAAQ,KAAK,EACxC,OAAO,GAAG,QAAQ,MAAM,qBAAqB,OAAO;AAEvD,UAAM,oBAAoB;AAAA,MACxB,MAAM;AAAA,IAAA;AAER,QAAI,mBAAmB;AACrB,WAAK,wBAAwB,WAAW,YAAY;AAClD,YAAI;AACF,gBAAM,kBAAkB,MAAM,KAAK,KAAK,KAAK,WAAW;AAAA,QAC1D,SAAS,KAAK;AACZ,eAAK,IAAI,OAAO,oCAAoC,GAAG;AAAA,QACzD;AAAA,MACF,GAAG,6BAA6B;AAAA,IAClC;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA6BA,mBACE,WACA,QACc;AACd,QAAI;AACF,YAAM,CAAC,SAAS,OAAO,IAAI,UAAU,SAAS;AAC9C,YAAM,CAAC,SAAS,OAAO,IAAI,UAAU,OAAO,MAAM;AAClD,YAAM,UAAwB,CAAA;AAG9B,iBAAW,SAAS,QAAQ,UAAU;AACpC,iBAAS,KAAK,KAAK,OAAO,OAAO,OAAO,OAAO;AAAA,MACjD;AAEA,YAAM,CAAC,YAAY,UAAU,IAAI,qBAAqB,SAAS,OAAO;AACtE,iBAAW,MAAM,YAAY;AAC3B,cAAM,EAAC,QAAQ,KAAA,IAAQ,KAAK,QAAQ,IAAI,EAAE,CAAC;AAC3C,gBAAQ,KAAK,EAAC,KAAK,cAAc,IAAI,EAAC,QAAQ,KAAA,GAAM;AAAA,MACtD;AAGA,YAAM,CAAC,YAAY,UAAU,IAAI,qBAAqB,SAAS,OAAO;AACtE,iBAAW,MAAM,YAAY;AAC3B,cAAM,EAAC,QAAQ,KAAA,IAAQ,KAAK,QAAQ,IAAI,EAAE,CAAC;AAC3C,gBAAQ,KAAK,EAAC,KAAK,cAAc,IAAI,EAAC,QAAQ,KAAA,GAAM;AAAA,MACtD;AAEA,YAAM,SAAS,aAAa,SAAS,OAAO;AAC5C,iBAAW,MAAM,QAAQ;AACvB,gBAAQ;AAAA,UACN,GAAG,KAAK;AAAA,YACN,KAAK,QAAQ,IAAI,EAAE,CAAC;AAAA,YACpB,KAAK,QAAQ,IAAI,EAAE,CAAC;AAAA,UAAA;AAAA,QACtB;AAAA,MAEJ;AAEA,iBAAW,MAAM,YAAY;AAC3B,cAAM,OAAO,KAAK,QAAQ,IAAI,EAAE,CAAC;AACjC,gBAAQ,KAAK,EAAC,KAAK,gBAAgB,MAAK;AAAA,MAC1C;AAIA,iBAAW,MAAM,YAAY;AAC3B,cAAM,OAAO,KAAK,QAAQ,IAAI,EAAE,CAAC;AACjC,gBAAQ,KAAK,EAAC,KAAK,gBAAgB,MAAK;AAAA,MAC1C;AACA,aAAO;AAAA,IACT,SAAS,GAAG;AACV,YAAM,IAAI,6BAA6B,OAAO,CAAC,GAAG,QAAQ,EAAC,OAAO,GAAE;AAAA,IACtE;AAAA,EACF;AAAA,EAEA,iBAAiB,UAAqB,UAAmC;AACvE,UAAM,UAAwB,CAAA;AAC9B,QACE,SAAS,WAAW,SAAS,UAC7B,SAAS,SAAS,SAAS,MAC3B;AACA,cAAQ,KAAK;AAAA,QACX,KAAK;AAAA,QACL,KAAK,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AAAA,QAC9C,KAAK,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AAAA,MAAI,CACnD;AAAA,IACH;AACA,UAAM,QAAQ,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AACvD,UAAM,aAAa,YAAY,SAAS,OAAO;AAC/C,UAAM,aAAa,YAAY,SAAS,OAAO;AAG/C,UAAM,CAAC,SAAS,KAAK,IAAI,qBAAqB,YAAY,UAAU;AACpE,eAAW,MAAM,SAAS;AACxB,YAAM,EAAC,MAAM,OAAA,IAAU,KAAK,WAAW,IAAI,EAAE,CAAC;AAC9C,cAAQ,KAAK,EAAC,KAAK,eAAe,OAAO,QAAO;AAAA,IAClD;AAGA,UAAM,OAAO,aAAa,YAAY,UAAU;AAChD,eAAW,MAAM,MAAM;AACrB,YAAM,EAAC,MAAM,SAAS,GAAG,QAAA,IAAW,KAAK,WAAW,IAAI,EAAE,CAAC;AAC3D,YAAM,EAAC,MAAM,SAAS,GAAG,QAAA,IAAW,KAAK,WAAW,IAAI,EAAE,CAAC;AAK3D,UACE,YAAY,WACZ,QAAQ,aAAa,QAAQ,YAC7B,QAAQ,YAAY,QAAQ,SAC5B;AACA,gBAAQ,KAAK;AAAA,UACX,KAAK;AAAA,UACL;AAAA,UACA,KAAK,EAAC,MAAM,SAAS,MAAM,QAAA;AAAA,UAC3B,KAAK,EAAC,MAAM,SAAS,MAAM,QAAA;AAAA,QAAO,CACnC;AAAA,MACH;AAAA,IACF;AAGA,eAAW,MAAM,OAAO;AACtB,YAAM,EAAC,MAAM,GAAG,KAAA,IAAQ,KAAK,WAAW,IAAI,EAAE,CAAC;AAC/C,YAAM,SAAS,EAAC,MAAM,KAAA;AAEtB,8BAAwB,MAAM,MAAM,MAAM;AAC1C,cAAQ,KAAK,EAAC,KAAK,cAAc,OAAO,QAAO;AAAA,IACjD;AACA,WAAO;AAAA,EACT;AAAA,EAEA,uBAAuB,SAAqB;AAC1C,UAAM,MACJ,mBAAmB,SACf,QAAQ,SAAS,OAAO,IACxB,IAAI,cAAc,OAAO,OAAO;AACtC,UAAM,OAAO,KAAK,MAAM,GAAG;AAC3B,WAAOA,MAAQ,MAAM,wBAAwB,aAAa;AAAA,EAC5D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,MAAM,gBAAgB,KAAoD;AACxE,UAAM,EAAC,cAAc,aAAA,IAAgB,KAAK;AAC1C,QAAI,cAAc;AAChB,aAAO,CAAA;AAAA,IACT;AACA,UAAM,gBAAgB,MAAM;AAAA,MAC1B,KAAK;AAAA,MACL;AAAA,IAAA;AAEF,UAAM,aAAa,oBAAoB,KAAK,gBAAgB,aAAa;AACzE,QAAI,eAAe,MAAM;AACvB,YAAM,IAAI,2BAA2B,UAAU;AAAA,IACjD;AAKA,UAAM,OAAO,KAAK,eAAe,OAAO;AAAA,MACtC,CAAA,MAAK,EAAE,QAAQ,IAAI;AAAA,IAAA;AAErB,QAAI,CAAC,MAAM;AAET,YAAM,IAAI;AAAA,QACR,kCAAkC,UAAU,GAAG,CAAC;AAAA,MAAA;AAAA,IAEpD;AACA,QAAI,kBAAkB,MAAM,GAAG,GAAG;AAChC,YAAM,IAAI;AAAA,QACR,gDAAgD,UAAU,IAAI,CAAC,OAAO,UAAU,GAAG,CAAC;AAAA,MAAA;AAAA,IAExF;AACA,WAAO,CAAA;AAAA,EACT;AACF;AAEA,SAAS,oBACP,GACA,GACe;AAEf,MAAI,EAAE,OAAO,WAAW,EAAE,OAAO,QAAQ;AACvC,WAAO;AAAA,EACT;AACA,WAAS,IAAI,GAAG,IAAI,EAAE,OAAO,QAAQ,KAAK;AACxC,UAAM,KAAK,EAAE,OAAO,CAAC;AACrB,UAAM,KAAK,EAAE,OAAO,CAAC;AACrB,UAAM,aAAa,mBAAmB,IAAI,EAAE;AAC5C,QAAI,YAAY;AACd,aAAO;AAAA,IACT;AAAA,EACF;AACA,SAAO;AACT;AAGA,MAAM,cAAc,CAAC,GAAyB,MAC5C,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,MAAM,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,MAAM,IAAI;AAEvD,SAAS,mBACP,GACA,GACe;AACf,MAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,MAAM;AACjE,WAAO,UAAU,EAAE,IAAI,yBAAyB,EAAE,IAAI;AAAA,EACxD;AACA,MAAI,CAAC,UAAU,EAAE,YAAY,EAAE,UAAU,GAAG;AAC1C,WAAO,yBAAyB,EAAE,IAAI;AAAA,EACxC;AACA,QAAM,QAAQ,OAAO,QAAQ,EAAE,OAAO,EAAE,KAAK,WAAW;AACxD,QAAM,QAAQ,OAAO,QAAQ,EAAE,OAAO,EAAE,KAAK,WAAW;AACxD,MACE,MAAM,WAAW,MAAM,UACvB,MAAM,KAAK,CAAC,CAAC,OAAO,IAAI,GAAG,MAAM;AAC/B,UAAM,CAAC,OAAO,IAAI,IAAI,MAAM,CAAC;AAC7B,WACE,UAAU,SACV,KAAK,QAAQ,KAAK,OAClB,KAAK,YAAY,KAAK,WACtB,KAAK,YAAY,KAAK;AAAA,EAE1B,CAAC,GACD;AACA,WAAO,qBAAqB,EAAE,IAAI;AAAA,EACpC;AACA,SAAO;AACT;AAEO,SAAS,kBAAkB,GAAuB,GAAqB;AAC5E,MAAI,EAAE,QAAQ,EAAE,eAAe,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,MAAM;AACzE,WAAO;AAAA,EACT;AACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAME,EAAE,oBAAoB,aACtB,CAAC,OAAO,IAAI,IAAI,EAAE,UAAU,GAAG,IAAI,IAAI,EAAE,UAAU,CAAC;AAAA,IACpD;AACA,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,OAAO,QAAQ,EAAE,OAAO,EAAE,KAAK,WAAW;AACxD,QAAM,QAAQ,EAAE;AAChB,SACE,MAAM,WAAW,MAAM,UACvB,MAAM,KAAK,CAAC,CAAC,OAAO,IAAI,GAAG,MAAM;AAC/B,UAAM,OAAO,MAAM,CAAC;AACpB,WAAO,UAAU,KAAK,QAAQ,KAAK,YAAY,KAAK;AAAA,EACtD,CAAC;AAEL;AAEA,SAAS,eAAe,GAAmB;AACzC,MAAI,EAAE,aAAa,QAAQ;AACzB,WAAO,IAAI,MAAM,OAAO,CAAC,CAAC;AAAA,EAC5B;AACA,MAAI,aAAa,SAAS,iBAAiB,EAAE,SAAS,mBAAmB;AACvE,WAAO,IAAI,eAAe,CAAC;AAAA,EAC7B;AACA,SAAO;AACT;AACA,MAAM,WAAW,CAAC,OAAmB,GAAG,GAAG,MAAM,IAAI,GAAG,IAAI;AAE5D,SAAS,UAAU,WAA4B;AAC7C,SAAO;AAAA;AAAA;AAAA,IAGL,IAAI,IAAI,UAAU,OAAO,IAAI,CAAA,MAAK,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC;AAAA,IAC7C,IAAI,IAAI,UAAU,QAAQ,IAAI,CAAA,MAAK,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC;AAAA,EAAA;AAExD;AAEA,SAAS,YACP,SAC0C;AAC1C,QAAM,+BAAe,IAAA;AACrB,aAAW,CAAC,MAAM,IAAI,KAAK,OAAO,QAAQ,OAAO,GAAG;AAGlD,aAAS,IAAI,KAAK,KAAK,EAAC,GAAG,MAAM,MAAK;AAAA,EACxC;AACA,SAAO;AACT;AAIA,SAAS,eAAe,UAA6C;AACnE,QAAM,EAAC,SAAS,GAAG,GAAG,SAAQ;AAC9B,SAAO;AACT;AAEA,MAAM,qCAAqC,MAAM;AAAA,EACtC,OAAO;AAAA,EACP;AAAA,EACA;AAAA,EAET,YACE,aACA,WACA,SACA;AACA;AAAA,MACE,sDAAsD,WAAW;AAAA,MACjE;AAAA,IAAA;AAEF,SAAK,cAAc;AACnB,SAAK,YAAY;AAAA,EACnB;AACF;AAEA,MAAM,mCAAmC,MAAM;AAAA,EACpC,OAAO;AAAA,EAEhB,YAAY,KAAa;AACvB;AAAA,MACE,GAAG,GAAG;AAAA,IAAA;AAAA,EAEV;AACF;AAGA,MAAM,uBAAuB,WAAW;AAAA,EAC7B,OAAO;AAAA,EAEhB,YAAY,OAAgB;AAC1B;AAAA,MACE;AAAA,MACA;AAAA,QACE;AAAA,MAAA;AAAA,IACF;AAAA,EAEJ;AACF;"}
1
+ {"version":3,"file":"change-source.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {areEqual} from '../../../../../shared/src/arrays.ts';\nimport {unreachable} from '../../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {promiseVoid} from '../../../../../shared/src/resolved-promises.ts';\nimport {\n equals,\n intersection,\n symmetricDifferences,\n} from '../../../../../shared/src/set-utils.ts';\nimport {sleep} from '../../../../../shared/src/sleep.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {mapPostgresToLiteColumn} from '../../../db/pg-to-lite.ts';\nimport type {ColumnSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport {\n oneAfter,\n versionFromLexi,\n versionToLexi,\n type LexiVersion,\n} from '../../../types/lexi-version.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {\n upstreamSchema,\n type ShardConfig,\n type ShardID,\n} from '../../../types/shards.ts';\nimport type {Sink} from '../../../types/streams.ts';\nimport {Subscription, type PendingResult} from '../../../types/subscription.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../../change-streamer/change-streamer-service.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {\n getSubscriptionState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport type {JSONObject} from '../protocol/current.ts';\nimport type {\n DataChange,\n Identifier,\n MessageRelation,\n TableMetadata,\n} from '../protocol/current/data.ts';\nimport type {\n ChangeStreamData,\n ChangeStreamMessage,\n Data,\n} from '../protocol/current/downstream.ts';\nimport {type InitialSyncOptions} from './initial-sync.ts';\nimport type {\n Message,\n MessageMessage,\n MessageRelation as PostgresRelation,\n} from './logical-replication/pgoutput.types.ts';\nimport {subscribe} from './logical-replication/stream.ts';\nimport {fromBigInt, toLexiVersion, type LSN} from './lsn.ts';\nimport {replicationEventSchema, type DdlUpdateEvent} from './schema/ddl.ts';\nimport {updateShardSchema} from './schema/init.ts';\nimport {\n getPublicationInfo,\n type PublishedSchema,\n type PublishedTableWithReplicaIdentity,\n} from './schema/published.ts';\nimport {\n dropShard,\n getInternalShardConfig,\n getReplicaAtVersion,\n internalPublicationPrefix,\n legacyReplicationSlot,\n replicaIdentitiesForTablesWithoutPrimaryKeys,\n replicationSlotExpression,\n type InternalShardConfig,\n type Replica,\n} from './schema/shard.ts';\nimport {validate} from './schema/validation.ts';\nimport {initSyncSchema} from './sync-schema.ts';\n\n/**\n * Initializes a Postgres change source, including the initial sync of the\n * replica, before streaming changes from the corresponding logical replication\n * stream.\n */\nexport async function initializePostgresChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n syncOptions: InitialSyncOptions,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initSyncSchema(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n shard,\n replicaDbFile,\n upstreamURI,\n syncOptions,\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n // Check that upstream is properly setup, and throw an AutoReset to re-run\n // initial sync if not.\n const db = pgClient(lc, upstreamURI);\n try {\n const upstreamReplica = await checkAndUpdateUpstream(\n lc,\n db,\n shard,\n subscriptionState,\n );\n\n const changeSource = new PostgresChangeSource(\n lc,\n upstreamURI,\n shard,\n upstreamReplica,\n );\n\n return {subscriptionState, changeSource};\n } finally {\n await db.end();\n }\n}\n\nasync function checkAndUpdateUpstream(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n {replicaVersion, publications: subscribed}: SubscriptionState,\n) {\n // Perform any shard schema updates\n await updateShardSchema(lc, sql, shard, replicaVersion);\n\n const upstreamReplica = await getReplicaAtVersion(\n lc,\n sql,\n shard,\n replicaVersion,\n );\n if (!upstreamReplica) {\n throw new AutoResetSignal(\n `No replication slot for replica at version ${replicaVersion}`,\n );\n }\n\n // Verify that the publications match what is being replicated.\n const requested = [...shard.publications].sort();\n const replicated = upstreamReplica.publications\n .filter(p => !p.startsWith(internalPublicationPrefix(shard)))\n .sort();\n if (!deepEqual(requested, replicated)) {\n lc.warn?.(`Dropping shard to change publications to: [${requested}]`);\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n throw new AutoResetSignal(\n `Requested publications [${requested}] do not match configured ` +\n `publications: [${replicated}]`,\n );\n }\n\n // Sanity check: The subscription state on the replica should have the\n // same publications. This should be guaranteed by the equivalence of the\n // replicaVersion, but it doesn't hurt to verify.\n if (!deepEqual(upstreamReplica.publications, subscribed)) {\n throw new AutoResetSignal(\n `Upstream publications [${upstreamReplica.publications}] do not ` +\n `match subscribed publications [${subscribed}]`,\n );\n }\n\n // Verify that the publications exist.\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(subscribed)};\n `.values();\n if (exists.length !== subscribed.length) {\n throw new AutoResetSignal(\n `Upstream publications [${exists.flat()}] do not contain ` +\n `all subscribed publications [${subscribed}]`,\n );\n }\n\n const {slot} = upstreamReplica;\n const result = await sql<\n {restartLSN: LSN | null; walStatus: string | null}[]\n > /*sql*/ `\n SELECT restart_lsn as \"restartLSN\", wal_status as \"walStatus\" FROM pg_replication_slots\n WHERE slot_name = ${slot}`;\n if (result.length === 0) {\n throw new AutoResetSignal(`replication slot ${slot} is missing`);\n }\n const [{restartLSN, walStatus}] = result;\n if (restartLSN === null || walStatus === 'lost') {\n throw new AutoResetSignal(\n `replication slot ${slot} has been invalidated for exceeding the max_slot_wal_keep_size`,\n );\n }\n return upstreamReplica;\n}\n\n/**\n * Postgres implementation of a {@link ChangeSource} backed by a logical\n * replication stream.\n */\nclass PostgresChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replica: Replica;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replica: Replica,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replica = replica;\n }\n\n async startStream(clientWatermark: string): Promise<ChangeStream> {\n const db = pgClient(this.#lc, this.#upstreamUri, {}, 'json-as-string');\n const {slot} = this.#replica;\n\n let cleanup = promiseVoid;\n try {\n ({cleanup} = await this.#stopExistingReplicationSlotSubscribers(\n db,\n slot,\n ));\n const config = await getInternalShardConfig(db, this.#shard);\n this.#lc.info?.(`starting replication stream@${slot}`);\n return await this.#startStream(db, slot, clientWatermark, config);\n } finally {\n void cleanup.then(() => db.end());\n }\n }\n\n async #startStream(\n db: PostgresDB,\n slot: string,\n clientWatermark: string,\n shardConfig: InternalShardConfig,\n ): Promise<ChangeStream> {\n const clientStart = oneAfter(clientWatermark);\n const {messages, acks} = await subscribe(\n this.#lc,\n db,\n slot,\n [...shardConfig.publications],\n versionFromLexi(clientStart),\n );\n\n const changes = Subscription.create<ChangeStreamMessage>({\n cleanup: () => messages.cancel(),\n });\n const acker = new Acker(acks);\n\n const changeMaker = new ChangeMaker(\n this.#lc,\n this.#shard,\n shardConfig,\n this.#replica.initialSchema,\n this.#upstreamUri,\n );\n\n void (async function () {\n try {\n for await (const [lsn, msg] of messages) {\n if (msg.tag === 'keepalive') {\n changes.push(['status', msg, {watermark: versionToLexi(lsn)}]);\n continue;\n }\n let last: PendingResult | undefined;\n for (const change of await changeMaker.makeChanges(lsn, msg)) {\n last = changes.push(change);\n }\n await last?.result; // Allow the change-streamer to push back.\n }\n } catch (e) {\n changes.fail(translateError(e));\n }\n })();\n\n this.#lc.info?.(\n `started replication stream@${slot} from ${clientWatermark} (replicaVersion: ${\n this.#replica.version\n })`,\n );\n\n return {\n changes,\n acks: {push: status => acker.ack(status[2].watermark)},\n };\n }\n\n /**\n * Stops replication slots associated with this shard, and returns\n * a `cleanup` task that drops any slot other than the specified\n * `slotToKeep`.\n *\n * Note that replication slots created after `slotToKeep` (as indicated by\n * the timestamp suffix) are preserved, as those are newly syncing replicas\n * that will soon take over the slot.\n */\n async #stopExistingReplicationSlotSubscribers(\n sql: PostgresDB,\n slotToKeep: string,\n ): Promise<{cleanup: Promise<void>}> {\n const slotExpression = replicationSlotExpression(this.#shard);\n const legacySlotName = legacyReplicationSlot(this.#shard);\n\n // Note: `slot_name <= slotToKeep` uses a string compare of the millisecond\n // timestamp, which works until it exceeds 13 digits (sometime in 2286).\n const result = await sql<{slot: string; pid: string | null}[]>`\n SELECT slot_name as slot, pg_terminate_backend(active_pid), active_pid as pid\n FROM pg_replication_slots \n WHERE (slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName})\n AND slot_name <= ${slotToKeep}`;\n if (result.length === 0) {\n throw new AbortError(\n `replication slot ${slotToKeep} is missing. A different ` +\n `replication-manager should now be running on a new ` +\n `replication slot.`,\n );\n }\n // Clean up the replicas table.\n const replicasTable = `${upstreamSchema(this.#shard)}.replicas`;\n await sql`DELETE FROM ${sql(replicasTable)} WHERE slot != ${slotToKeep}`;\n\n const pids = result.filter(({pid}) => pid !== null).map(({pid}) => pid);\n if (pids.length) {\n this.#lc.info?.(`signaled subscriber ${pids} to shut down`);\n }\n const otherSlots = result\n .filter(({slot}) => slot !== slotToKeep)\n .map(({slot}) => slot);\n return {\n cleanup: otherSlots.length\n ? this.#dropReplicationSlots(sql, otherSlots)\n : promiseVoid,\n };\n }\n\n async #dropReplicationSlots(sql: PostgresDB, slots: string[]) {\n this.#lc.info?.(`dropping other replication slot(s) ${slots}`);\n for (let i = 0; i < 5; i++) {\n try {\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name IN ${sql(slots)}\n `;\n this.#lc.info?.(`successfully dropped ${slots}`);\n return;\n } catch (e) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_OBJECT_IN_USE\n ) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n this.#lc.debug?.(`attempt ${i + 1}: ${String(e)}`, e);\n } else {\n this.#lc.warn?.(`error dropping ${slots}`, e);\n }\n await sleep(1000);\n }\n }\n this.#lc.warn?.(`maximum attempts exceeded dropping ${slots}`);\n }\n}\n\n// Exported for testing.\nexport class Acker {\n #acks: Sink<bigint>;\n #keepaliveTimer: NodeJS.Timeout | undefined;\n\n constructor(acks: Sink<bigint>) {\n this.#acks = acks;\n }\n\n keepalive() {\n // Sets a timeout to send a standby status update in response to\n // a primary keepalive message.\n //\n // https://www.postgresql.org/docs/current/protocol-replication.html#PROTOCOL-REPLICATION-PRIMARY-KEEPALIVE-MESSAGE\n //\n // A primary keepalive message is streamed to the change-streamer as a\n // 'status' message, which in turn responds with an ack. However, in the\n // event that the change-streamer is backed up processing preceding\n // changes, this timeout will fire to send a status update that does not\n // change the confirmed flush position. This timeout must be shorter than\n // the `wal_sender_timeout`, which defaults to 60 seconds.\n //\n // https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-WAL-SENDER-TIMEOUT\n this.#keepaliveTimer ??= setTimeout(() => this.#sendAck(), 1000);\n }\n\n ack(watermark: LexiVersion) {\n this.#sendAck(watermark);\n }\n\n #sendAck(watermark?: LexiVersion) {\n clearTimeout(this.#keepaliveTimer);\n this.#keepaliveTimer = undefined;\n\n // Note: Sending '0/0' means \"keep alive but do not update confirmed_flush_lsn\"\n // https://github.com/postgres/postgres/blob/3edc67d337c2e498dad1cd200e460f7c63e512e6/src/backend/replication/walsender.c#L2457\n const lsn = watermark ? versionFromLexi(watermark) : 0n;\n this.#acks.push(lsn);\n }\n}\n\ntype ReplicationError = {\n lsn: bigint;\n msg: Message;\n err: unknown;\n lastLogTime: number;\n};\n\nconst SET_REPLICA_IDENTITY_DELAY_MS = 500;\n\nclass ChangeMaker {\n readonly #lc: LogContext;\n readonly #shardPrefix: string;\n readonly #shardConfig: InternalShardConfig;\n readonly #initialSchema: PublishedSchema;\n readonly #upstreamDB: PostgresDB;\n\n #replicaIdentityTimer: NodeJS.Timeout | undefined;\n #error: ReplicationError | undefined;\n\n constructor(\n lc: LogContext,\n {appID, shardNum}: ShardID,\n shardConfig: InternalShardConfig,\n initialSchema: PublishedSchema,\n upstreamURI: string,\n ) {\n this.#lc = lc;\n // Note: This matches the prefix used in pg_logical_emit_message() in pg/schema/ddl.ts.\n this.#shardPrefix = `${appID}/${shardNum}`;\n this.#shardConfig = shardConfig;\n this.#initialSchema = initialSchema;\n this.#upstreamDB = pgClient(lc, upstreamURI, {\n ['idle_timeout']: 10, // only used occasionally\n connection: {['application_name']: 'zero-schema-change-detector'},\n });\n }\n\n async makeChanges(lsn: bigint, msg: Message): Promise<ChangeStreamMessage[]> {\n if (this.#error) {\n this.#logError(this.#error);\n return [];\n }\n try {\n return await this.#makeChanges(msg);\n } catch (err) {\n this.#error = {lsn, msg, err, lastLogTime: 0};\n this.#logError(this.#error);\n\n const message = `Unable to continue replication from LSN ${fromBigInt(lsn)}`;\n const errorDetails: JSONObject = {error: message};\n if (err instanceof UnsupportedSchemaChangeError) {\n errorDetails.reason = err.description;\n errorDetails.context = err.ddlUpdate.context;\n } else {\n errorDetails.reason = String(err);\n }\n\n // Rollback the current transaction to avoid dangling transactions in\n // downstream processors (i.e. changeLog, replicator).\n return [\n ['rollback', {tag: 'rollback'}],\n ['control', {tag: 'reset-required', message, errorDetails}],\n ];\n }\n }\n\n #logError(error: ReplicationError) {\n const {lsn, msg, err, lastLogTime} = error;\n const now = Date.now();\n\n // Output an error to logs as replication messages continue to be dropped,\n // at most once a minute.\n if (now - lastLogTime > 60_000) {\n this.#lc.error?.(\n `Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(\n err,\n )}`,\n err instanceof UnsupportedSchemaChangeError\n ? err.ddlUpdate.context\n : // 'content' can be a large byte Buffer. Exclude it from logging output.\n {...msg, content: undefined},\n );\n error.lastLogTime = now;\n }\n }\n\n // oxlint-disable-next-line require-await\n async #makeChanges(msg: Message): Promise<ChangeStreamData[]> {\n switch (msg.tag) {\n case 'begin':\n return [\n [\n 'begin',\n {...msg, json: 's'},\n {commitWatermark: toLexiVersion(must(msg.commitLsn))},\n ],\n ];\n\n case 'delete': {\n if (!(msg.key ?? msg.old)) {\n throw new Error(\n `Invalid DELETE msg (missing key): ${stringify(msg)}`,\n );\n }\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-DELETE\n key: must(msg.old ?? msg.key),\n },\n ],\n ];\n }\n\n case 'update': {\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-UPDATE\n key: msg.old ?? msg.key,\n },\n ],\n ];\n }\n\n case 'insert':\n return [['data', {...msg, relation: makeRelation(msg.relation)}]];\n case 'truncate':\n return [['data', {...msg, relations: msg.relations.map(makeRelation)}]];\n\n case 'message':\n if (msg.prefix !== this.#shardPrefix) {\n this.#lc.debug?.('ignoring message for different shard', msg.prefix);\n return [];\n }\n return this.#handleCustomMessage(msg);\n\n case 'commit':\n return [\n ['commit', msg, {watermark: toLexiVersion(must(msg.commitLsn))}],\n ];\n\n case 'relation':\n return this.#handleRelation(msg);\n case 'type':\n return []; // Nothing need be done for custom types.\n case 'origin':\n // No need to detect replication loops since we are not a\n // PG replication source.\n return [];\n default:\n msg satisfies never;\n throw new Error(`Unexpected message type ${stringify(msg)}`);\n }\n }\n\n #preSchema: PublishedSchema | undefined;\n\n #handleCustomMessage(msg: MessageMessage) {\n const event = this.#parseReplicationEvent(msg.content);\n // Cancel manual schema adjustment timeouts when an upstream schema change\n // is about to happen, so as to avoid interfering / redundant work.\n clearTimeout(this.#replicaIdentityTimer);\n\n if (event.type === 'ddlStart') {\n // Store the schema in order to diff it with a potential ddlUpdate.\n this.#preSchema = event.schema;\n return [];\n }\n // ddlUpdate\n const changes = this.#makeSchemaChanges(\n must(this.#preSchema, `ddlUpdate received without a ddlStart`),\n event,\n ).map(change => ['data', change] satisfies Data);\n\n this.#lc\n .withContext('query', event.context.query)\n .info?.(`${changes.length} schema change(s)`, changes);\n\n const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(\n event.schema,\n );\n if (replicaIdentities) {\n this.#replicaIdentityTimer = setTimeout(async () => {\n try {\n await replicaIdentities.apply(this.#lc, this.#upstreamDB);\n } catch (err) {\n this.#lc.warn?.(`error setting replica identities`, err);\n }\n }, SET_REPLICA_IDENTITY_DELAY_MS);\n }\n\n return changes;\n }\n\n /**\n * A note on operation order:\n *\n * Postgres will drop related indexes when columns are dropped,\n * but SQLite will error instead (https://sqlite.org/forum/forumpost/2e62dba69f?t=c&hist).\n * The current workaround is to drop indexes first.\n *\n * Also note that although it should not be possible to both rename and\n * add/drop tables/columns in a single statement, the operations are\n * ordered to handle that possibility, by always dropping old entities,\n * then modifying kept entities, and then adding new entities.\n *\n * Thus, the order of replicating DDL updates is:\n * - drop indexes\n * - drop tables\n * - alter tables\n * - drop columns\n * - alter columns\n * - add columns\n * - create tables\n * - create indexes\n *\n * In the future the replication logic should be improved to handle this\n * behavior in SQLite by dropping dependent indexes manually before dropping\n * columns. This, for example, would be needed to properly support changing\n * the type of a column that's indexed.\n */\n #makeSchemaChanges(\n preSchema: PublishedSchema,\n update: DdlUpdateEvent,\n ): DataChange[] {\n try {\n const [prevTbl, prevIdx] = specsByID(preSchema);\n const [nextTbl, nextIdx] = specsByID(update.schema);\n const changes: DataChange[] = [];\n\n // Validate the new table schemas\n for (const table of nextTbl.values()) {\n validate(this.#lc, table);\n }\n\n const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);\n for (const id of droppedIdx) {\n const {schema, name} = must(prevIdx.get(id));\n changes.push({tag: 'drop-index', id: {schema, name}});\n }\n\n // DROP\n const [droppedTbl, createdTbl] = symmetricDifferences(prevTbl, nextTbl);\n for (const id of droppedTbl) {\n const {schema, name} = must(prevTbl.get(id));\n changes.push({tag: 'drop-table', id: {schema, name}});\n }\n // ALTER\n const tables = intersection(prevTbl, nextTbl);\n for (const id of tables) {\n changes.push(\n ...this.#getTableChanges(\n must(prevTbl.get(id)),\n must(nextTbl.get(id)),\n ),\n );\n }\n // CREATE\n for (const id of createdTbl) {\n const spec = must(nextTbl.get(id));\n changes.push({\n tag: 'create-table',\n spec,\n metadata: getMetadata(spec),\n });\n }\n\n // Add indexes last since they may reference tables / columns that need\n // to be created first.\n for (const id of createdIdx) {\n const spec = must(nextIdx.get(id));\n changes.push({tag: 'create-index', spec});\n }\n return changes;\n } catch (e) {\n throw new UnsupportedSchemaChangeError(String(e), update, {cause: e});\n }\n }\n\n #getTableChanges(\n oldTable: PublishedTableWithReplicaIdentity,\n newTable: PublishedTableWithReplicaIdentity,\n ): DataChange[] {\n const changes: DataChange[] = [];\n if (\n oldTable.schema !== newTable.schema ||\n oldTable.name !== newTable.name\n ) {\n changes.push({\n tag: 'rename-table',\n old: {schema: oldTable.schema, name: oldTable.name},\n new: {schema: newTable.schema, name: newTable.name},\n });\n }\n if (\n oldTable.replicaIdentity !== newTable.replicaIdentity ||\n !areEqual(\n oldTable.replicaIdentityColumns,\n newTable.replicaIdentityColumns,\n )\n ) {\n changes.push({\n tag: 'update-table-metadata',\n table: {schema: newTable.schema, name: newTable.name},\n old: getMetadata(oldTable),\n new: getMetadata(newTable),\n });\n }\n const table = {schema: newTable.schema, name: newTable.name};\n const oldColumns = columnsByID(oldTable.columns);\n const newColumns = columnsByID(newTable.columns);\n\n // DROP\n const [dropped, added] = symmetricDifferences(oldColumns, newColumns);\n for (const id of dropped) {\n const {name: column} = must(oldColumns.get(id));\n changes.push({tag: 'drop-column', table, column});\n }\n\n // ALTER\n const both = intersection(oldColumns, newColumns);\n for (const id of both) {\n const {name: oldName, ...oldSpec} = must(oldColumns.get(id));\n const {name: newName, ...newSpec} = must(newColumns.get(id));\n // The three things that we care about are:\n // 1. name\n // 2. type\n // 3. not-null\n if (\n oldName !== newName ||\n oldSpec.dataType !== newSpec.dataType ||\n oldSpec.notNull !== newSpec.notNull\n ) {\n changes.push({\n tag: 'update-column',\n table,\n old: {name: oldName, spec: oldSpec},\n new: {name: newName, spec: newSpec},\n });\n }\n }\n\n // ADD\n for (const id of added) {\n const {name, ...spec} = must(newColumns.get(id));\n const column = {name, spec};\n // Validate that the ChangeProcessor will accept the column change.\n mapPostgresToLiteColumn(table.name, column);\n changes.push({\n tag: 'add-column',\n table,\n column,\n tableMetadata: getMetadata(newTable),\n });\n }\n return changes;\n }\n\n #parseReplicationEvent(content: Uint8Array) {\n const str =\n content instanceof Buffer\n ? content.toString('utf-8')\n : new TextDecoder().decode(content);\n const json = JSON.parse(str);\n return v.parse(json, replicationEventSchema, 'passthrough');\n }\n\n /**\n * If `ddlDetection === true`, relation messages are irrelevant,\n * as schema changes are detected by event triggers that\n * emit custom messages.\n *\n * For degraded-mode replication (`ddlDetection === false`):\n * 1. query the current published schemas on upstream\n * 2. compare that with the InternalShardConfig.initialSchema\n * 3. compare that with the incoming MessageRelation\n * 4. On any discrepancy, throw an UnsupportedSchemaChangeError\n * to halt replication.\n *\n * Note that schemas queried in step [1] will be *post-transaction*\n * schemas, which are not necessarily suitable for actually processing\n * the statements in the transaction being replicated. In other words,\n * this mechanism cannot be used to reliably *replicate* schema changes.\n * However, they serve the purpose determining if schemas have changed.\n */\n async #handleRelation(rel: PostgresRelation): Promise<ChangeStreamData[]> {\n const {publications, ddlDetection} = this.#shardConfig;\n if (ddlDetection) {\n return [];\n }\n const currentSchema = await getPublicationInfo(\n this.#upstreamDB,\n publications,\n );\n const difference = getSchemaDifference(this.#initialSchema, currentSchema);\n if (difference !== null) {\n throw new MissingEventTriggerSupport(difference);\n }\n // Even if the currentSchema is equal to the initialSchema, the\n // MessageRelation itself must be checked to detect transient\n // schema changes within the transaction (e.g. adding and dropping\n // a table, or renaming a column and then renaming it back).\n const orel = this.#initialSchema.tables.find(\n t => t.oid === rel.relationOid,\n );\n if (!orel) {\n // Can happen if a table is created and then dropped in the same transaction.\n throw new MissingEventTriggerSupport(\n `relation not in initialSchema: ${stringify(rel)}`,\n );\n }\n if (relationDifferent(orel, rel)) {\n throw new MissingEventTriggerSupport(\n `relation has changed within the transaction: ${stringify(orel)} vs ${stringify(rel)}`,\n );\n }\n return [];\n }\n}\n\nfunction getSchemaDifference(\n a: PublishedSchema,\n b: PublishedSchema,\n): string | null {\n // Note: ignore indexes since changes need not to halt replication\n if (a.tables.length !== b.tables.length) {\n return `tables created or dropped`;\n }\n for (let i = 0; i < a.tables.length; i++) {\n const at = a.tables[i];\n const bt = b.tables[i];\n const difference = getTableDifference(at, bt);\n if (difference) {\n return difference;\n }\n }\n return null;\n}\n\n// ColumnSpec comparator\nconst byColumnPos = (a: [string, ColumnSpec], b: [string, ColumnSpec]) =>\n a[1].pos < b[1].pos ? -1 : a[1].pos > b[1].pos ? 1 : 0;\n\nfunction getTableDifference(\n a: PublishedTableSpec,\n b: PublishedTableSpec,\n): string | null {\n if (a.oid !== b.oid || a.schema !== b.schema || a.name !== b.name) {\n return `Table \"${a.name}\" differs from table \"${b.name}\"`;\n }\n if (!deepEqual(a.primaryKey, b.primaryKey)) {\n return `Primary key of table \"${a.name}\" has changed`;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = Object.entries(b.columns).sort(byColumnPos);\n if (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const [bname, bcol] = bcols[i];\n return (\n aname !== bname ||\n acol.pos !== bcol.pos ||\n acol.typeOID !== bcol.typeOID ||\n acol.notNull !== bcol.notNull\n );\n })\n ) {\n return `Columns of table \"${a.name}\" have changed`;\n }\n return null;\n}\n\nexport function relationDifferent(a: PublishedTableSpec, b: PostgresRelation) {\n if (a.oid !== b.relationOid || a.schema !== b.schema || a.name !== b.name) {\n return true;\n }\n if (\n // The MessageRelation's `keyColumns` field contains the columns in column\n // declaration order, whereas the PublishedTableSpec's `primaryKey`\n // contains the columns in primary key (i.e. index) order. Do an\n // order-agnostic compare here since it is not possible to detect\n // key-order changes from the MessageRelation message alone.\n b.replicaIdentity === 'default' &&\n !equals(new Set(a.primaryKey), new Set(b.keyColumns))\n ) {\n return true;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = b.columns;\n return (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const bcol = bcols[i];\n return aname !== bcol.name || acol.typeOID !== bcol.typeOid;\n })\n );\n}\n\nfunction translateError(e: unknown): Error {\n if (!(e instanceof Error)) {\n return new Error(String(e));\n }\n if (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) {\n return new ShutdownSignal(e);\n }\n return e;\n}\nconst idString = (id: Identifier) => `${id.schema}.${id.name}`;\n\nfunction specsByID(published: PublishedSchema) {\n return [\n // It would have been nice to use a CustomKeyMap here, but we rely on set-utils\n // operations which use plain Sets.\n new Map(published.tables.map(t => [t.oid, t])),\n new Map(published.indexes.map(i => [idString(i), i])),\n ] as const;\n}\n\nfunction columnsByID(\n columns: Record<string, ColumnSpec>,\n): Map<number, ColumnSpec & {name: string}> {\n const colsByID = new Map<number, ColumnSpec & {name: string}>();\n for (const [name, spec] of Object.entries(columns)) {\n // The `pos` field is the `attnum` in `pg_attribute`, which is a stable\n // identifier for the column in this table (i.e. never reused).\n colsByID.set(spec.pos, {...spec, name});\n }\n return colsByID;\n}\n\nfunction getMetadata(table: PublishedTableWithReplicaIdentity): TableMetadata {\n const metadata: TableMetadata = {\n rowKey: {\n columns: table.replicaIdentityColumns,\n },\n };\n switch (table.replicaIdentity) {\n case 'd':\n metadata.rowKey.type = 'default';\n break;\n case 'i':\n metadata.rowKey.type = 'index';\n break;\n case 'f':\n metadata.rowKey.type = 'full';\n break;\n case 'n':\n metadata.rowKey.type = 'nothing';\n break;\n case undefined:\n break;\n default:\n unreachable(table.replicaIdentity);\n }\n return metadata;\n}\n\n// Avoid sending the `columns` from the Postgres MessageRelation message.\n// They are not used downstream and the message can be large.\nfunction makeRelation(relation: PostgresRelation): MessageRelation {\n // Avoid sending the `columns` from the Postgres MessageRelation message.\n // They are not used downstream and the message can be large.\n const {columns: _, keyColumns, replicaIdentity, ...rest} = relation;\n return {\n ...rest,\n rowKey: {\n columns: keyColumns,\n type: replicaIdentity,\n },\n // For now, deprecated columns are sent for backwards compatibility.\n // These can be removed when bumping the MIN_PROTOCOL_VERSION to 5.\n keyColumns,\n replicaIdentity,\n };\n}\n\nclass UnsupportedSchemaChangeError extends Error {\n readonly name = 'UnsupportedSchemaChangeError';\n readonly description: string;\n readonly ddlUpdate: DdlUpdateEvent;\n\n constructor(\n description: string,\n ddlUpdate: DdlUpdateEvent,\n options?: ErrorOptions,\n ) {\n super(\n `Replication halted. Resync the replica to recover: ${description}`,\n options,\n );\n this.description = description;\n this.ddlUpdate = ddlUpdate;\n }\n}\n\nclass MissingEventTriggerSupport extends Error {\n readonly name = 'MissingEventTriggerSupport';\n\n constructor(msg: string) {\n super(\n `${msg}. Schema changes cannot be reliably replicated without event trigger support.`,\n );\n }\n}\n\n// TODO(0xcadams): should this be a ProtocolError?\nclass ShutdownSignal extends AbortError {\n readonly name = 'ShutdownSignal';\n\n constructor(cause: unknown) {\n super(\n 'shutdown signal received (e.g. another zero-cache taking over the replication stream)',\n {\n cause,\n },\n );\n }\n}\n"],"names":["v.parse"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6FA,eAAsB,+BACpB,IACA,aACA,OACA,eACA,aAC6E;AAC7E,QAAM;AAAA,IACJ;AAAA,IACA,WAAW,MAAM,KAAK,IAAI,MAAM,QAAQ;AAAA,IACxC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,UAAU,IAAI,SAAS,IAAI,aAAa;AAC9C,QAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,OAAO,CAAC;AAC3E,UAAQ,MAAA;AAIR,QAAM,KAAK,SAAS,IAAI,WAAW;AACnC,MAAI;AACF,UAAM,kBAAkB,MAAM;AAAA,MAC5B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAGF,UAAM,eAAe,IAAI;AAAA,MACvB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAGF,WAAO,EAAC,mBAAmB,aAAA;AAAA,EAC7B,UAAA;AACE,UAAM,GAAG,IAAA;AAAA,EACX;AACF;AAEA,eAAe,uBACb,IACA,KACA,OACA,EAAC,gBAAgB,cAAc,cAC/B;AAEA,QAAM,kBAAkB,IAAI,KAAK,OAAO,cAAc;AAEtD,QAAM,kBAAkB,MAAM;AAAA,IAC5B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,MAAI,CAAC,iBAAiB;AACpB,UAAM,IAAI;AAAA,MACR,8CAA8C,cAAc;AAAA,IAAA;AAAA,EAEhE;AAGA,QAAM,YAAY,CAAC,GAAG,MAAM,YAAY,EAAE,KAAA;AAC1C,QAAM,aAAa,gBAAgB,aAChC,OAAO,CAAA,MAAK,CAAC,EAAE,WAAW,0BAA0B,KAAK,CAAC,CAAC,EAC3D,KAAA;AACH,MAAI,CAAC,UAAU,WAAW,UAAU,GAAG;AACrC,OAAG,OAAO,8CAA8C,SAAS,GAAG;AACpE,UAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,QAAQ,CAAC;AACvD,UAAM,IAAI;AAAA,MACR,2BAA2B,SAAS,4CAChB,UAAU;AAAA,IAAA;AAAA,EAElC;AAKA,MAAI,CAAC,UAAU,gBAAgB,cAAc,UAAU,GAAG;AACxD,UAAM,IAAI;AAAA,MACR,0BAA0B,gBAAgB,YAAY,2CAClB,UAAU;AAAA,IAAA;AAAA,EAElD;AAGA,QAAM,SAAS,MAAM;AAAA,0DACmC,IAAI,UAAU,CAAC;AAAA,IACrE,OAAA;AACF,MAAI,OAAO,WAAW,WAAW,QAAQ;AACvC,UAAM,IAAI;AAAA,MACR,0BAA0B,OAAO,KAAA,CAAM,iDACL,UAAU;AAAA,IAAA;AAAA,EAEhD;AAEA,QAAM,EAAC,SAAQ;AACf,QAAM,SAAS,MAAM;AAAA;AAAA,0BAIG,IAAI;AAC5B,MAAI,OAAO,WAAW,GAAG;AACvB,UAAM,IAAI,gBAAgB,oBAAoB,IAAI,aAAa;AAAA,EACjE;AACA,QAAM,CAAC,EAAC,YAAY,UAAA,CAAU,IAAI;AAClC,MAAI,eAAe,QAAQ,cAAc,QAAQ;AAC/C,UAAM,IAAI;AAAA,MACR,oBAAoB,IAAI;AAAA,IAAA;AAAA,EAE5B;AACA,SAAO;AACT;AAMA,MAAM,qBAA6C;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,YACE,IACA,aACA,OACA,SACA;AACA,SAAK,MAAM,GAAG,YAAY,aAAa,eAAe;AACtD,SAAK,eAAe;AACpB,SAAK,SAAS;AACd,SAAK,WAAW;AAAA,EAClB;AAAA,EAEA,MAAM,YAAY,iBAAgD;AAChE,UAAM,KAAK,SAAS,KAAK,KAAK,KAAK,cAAc,CAAA,GAAI,gBAAgB;AACrE,UAAM,EAAC,SAAQ,KAAK;AAEpB,QAAI,UAAU;AACd,QAAI;AACF,OAAC,EAAC,QAAA,IAAW,MAAM,KAAK;AAAA,QACtB;AAAA,QACA;AAAA,MAAA;AAEF,YAAM,SAAS,MAAM,uBAAuB,IAAI,KAAK,MAAM;AAC3D,WAAK,IAAI,OAAO,+BAA+B,IAAI,EAAE;AACrD,aAAO,MAAM,KAAK,aAAa,IAAI,MAAM,iBAAiB,MAAM;AAAA,IAClE,UAAA;AACE,WAAK,QAAQ,KAAK,MAAM,GAAG,KAAK;AAAA,IAClC;AAAA,EACF;AAAA,EAEA,MAAM,aACJ,IACA,MACA,iBACA,aACuB;AACvB,UAAM,cAAc,SAAS,eAAe;AAC5C,UAAM,EAAC,UAAU,KAAA,IAAQ,MAAM;AAAA,MAC7B,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,CAAC,GAAG,YAAY,YAAY;AAAA,MAC5B,gBAAgB,WAAW;AAAA,IAAA;AAG7B,UAAM,UAAU,aAAa,OAA4B;AAAA,MACvD,SAAS,MAAM,SAAS,OAAA;AAAA,IAAO,CAChC;AACD,UAAM,QAAQ,IAAI,MAAM,IAAI;AAE5B,UAAM,cAAc,IAAI;AAAA,MACtB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,KAAK,SAAS;AAAA,MACd,KAAK;AAAA,IAAA;AAGP,UAAM,iBAAkB;AACtB,UAAI;AACF,yBAAiB,CAAC,KAAK,GAAG,KAAK,UAAU;AACvC,cAAI,IAAI,QAAQ,aAAa;AAC3B,oBAAQ,KAAK,CAAC,UAAU,KAAK,EAAC,WAAW,cAAc,GAAG,EAAA,CAAE,CAAC;AAC7D;AAAA,UACF;AACA,cAAI;AACJ,qBAAW,UAAU,MAAM,YAAY,YAAY,KAAK,GAAG,GAAG;AAC5D,mBAAO,QAAQ,KAAK,MAAM;AAAA,UAC5B;AACA,gBAAM,MAAM;AAAA,QACd;AAAA,MACF,SAAS,GAAG;AACV,gBAAQ,KAAK,eAAe,CAAC,CAAC;AAAA,MAChC;AAAA,IACF,GAAA;AAEA,SAAK,IAAI;AAAA,MACP,8BAA8B,IAAI,SAAS,eAAe,qBACxD,KAAK,SAAS,OAChB;AAAA,IAAA;AAGF,WAAO;AAAA,MACL;AAAA,MACA,MAAM,EAAC,MAAM,CAAA,WAAU,MAAM,IAAI,OAAO,CAAC,EAAE,SAAS,EAAA;AAAA,IAAC;AAAA,EAEzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,wCACJ,KACA,YACmC;AACnC,UAAM,iBAAiB,0BAA0B,KAAK,MAAM;AAC5D,UAAM,iBAAiB,sBAAsB,KAAK,MAAM;AAIxD,UAAM,SAAS,MAAM;AAAA;AAAA;AAAA,8BAGK,cAAc,mBAAmB,cAAc;AAAA,+BAC9C,UAAU;AACrC,QAAI,OAAO,WAAW,GAAG;AACvB,YAAM,IAAI;AAAA,QACR,oBAAoB,UAAU;AAAA,MAAA;AAAA,IAIlC;AAEA,UAAM,gBAAgB,GAAG,eAAe,KAAK,MAAM,CAAC;AACpD,UAAM,kBAAkB,IAAI,aAAa,CAAC,kBAAkB,UAAU;AAEtE,UAAM,OAAO,OAAO,OAAO,CAAC,EAAC,IAAA,MAAS,QAAQ,IAAI,EAAE,IAAI,CAAC,EAAC,IAAA,MAAS,GAAG;AACtE,QAAI,KAAK,QAAQ;AACf,WAAK,IAAI,OAAO,uBAAuB,IAAI,eAAe;AAAA,IAC5D;AACA,UAAM,aAAa,OAChB,OAAO,CAAC,EAAC,KAAA,MAAU,SAAS,UAAU,EACtC,IAAI,CAAC,EAAC,KAAA,MAAU,IAAI;AACvB,WAAO;AAAA,MACL,SAAS,WAAW,SAChB,KAAK,sBAAsB,KAAK,UAAU,IAC1C;AAAA,IAAA;AAAA,EAER;AAAA,EAEA,MAAM,sBAAsB,KAAiB,OAAiB;AAC5D,SAAK,IAAI,OAAO,sCAAsC,KAAK,EAAE;AAC7D,aAAS,IAAI,GAAG,IAAI,GAAG,KAAK;AAC1B,UAAI;AACF,cAAM;AAAA;AAAA,iCAEmB,IAAI,KAAK,CAAC;AAAA;AAEnC,aAAK,IAAI,OAAO,wBAAwB,KAAK,EAAE;AAC/C;AAAA,MACF,SAAS,GAAG;AAEV,YACE,aAAa,SAAS,iBACtB,EAAE,SAAS,kBACX;AAIA,eAAK,IAAI,QAAQ,WAAW,IAAI,CAAC,KAAK,OAAO,CAAC,CAAC,IAAI,CAAC;AAAA,QACtD,OAAO;AACL,eAAK,IAAI,OAAO,kBAAkB,KAAK,IAAI,CAAC;AAAA,QAC9C;AACA,cAAM,MAAM,GAAI;AAAA,MAClB;AAAA,IACF;AACA,SAAK,IAAI,OAAO,sCAAsC,KAAK,EAAE;AAAA,EAC/D;AACF;AAGO,MAAM,MAAM;AAAA,EACjB;AAAA,EACA;AAAA,EAEA,YAAY,MAAoB;AAC9B,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,YAAY;AAcV,SAAK,oBAAoB,WAAW,MAAM,KAAK,SAAA,GAAY,GAAI;AAAA,EACjE;AAAA,EAEA,IAAI,WAAwB;AAC1B,SAAK,SAAS,SAAS;AAAA,EACzB;AAAA,EAEA,SAAS,WAAyB;AAChC,iBAAa,KAAK,eAAe;AACjC,SAAK,kBAAkB;AAIvB,UAAM,MAAM,YAAY,gBAAgB,SAAS,IAAI;AACrD,SAAK,MAAM,KAAK,GAAG;AAAA,EACrB;AACF;AASA,MAAM,gCAAgC;AAEtC,MAAM,YAAY;AAAA,EACP;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET;AAAA,EACA;AAAA,EAEA,YACE,IACA,EAAC,OAAO,YACR,aACA,eACA,aACA;AACA,SAAK,MAAM;AAEX,SAAK,eAAe,GAAG,KAAK,IAAI,QAAQ;AACxC,SAAK,eAAe;AACpB,SAAK,iBAAiB;AACtB,SAAK,cAAc,SAAS,IAAI,aAAa;AAAA,MAC3C,CAAC,cAAc,GAAG;AAAA;AAAA,MAClB,YAAY,EAAC,CAAC,kBAAkB,GAAG,8BAAA;AAAA,IAA6B,CACjE;AAAA,EACH;AAAA,EAEA,MAAM,YAAY,KAAa,KAA8C;AAC3E,QAAI,KAAK,QAAQ;AACf,WAAK,UAAU,KAAK,MAAM;AAC1B,aAAO,CAAA;AAAA,IACT;AACA,QAAI;AACF,aAAO,MAAM,KAAK,aAAa,GAAG;AAAA,IACpC,SAAS,KAAK;AACZ,WAAK,SAAS,EAAC,KAAK,KAAK,KAAK,aAAa,EAAA;AAC3C,WAAK,UAAU,KAAK,MAAM;AAE1B,YAAM,UAAU,2CAA2C,WAAW,GAAG,CAAC;AAC1E,YAAM,eAA2B,EAAC,OAAO,QAAA;AACzC,UAAI,eAAe,8BAA8B;AAC/C,qBAAa,SAAS,IAAI;AAC1B,qBAAa,UAAU,IAAI,UAAU;AAAA,MACvC,OAAO;AACL,qBAAa,SAAS,OAAO,GAAG;AAAA,MAClC;AAIA,aAAO;AAAA,QACL,CAAC,YAAY,EAAC,KAAK,YAAW;AAAA,QAC9B,CAAC,WAAW,EAAC,KAAK,kBAAkB,SAAS,cAAa;AAAA,MAAA;AAAA,IAE9D;AAAA,EACF;AAAA,EAEA,UAAU,OAAyB;AACjC,UAAM,EAAC,KAAK,KAAK,KAAK,gBAAe;AACrC,UAAM,MAAM,KAAK,IAAA;AAIjB,QAAI,MAAM,cAAc,KAAQ;AAC9B,WAAK,IAAI;AAAA,QACP,2CAA2C,WAAW,GAAG,CAAC,KAAK;AAAA,UAC7D;AAAA,QAAA,CACD;AAAA,QACD,eAAe,+BACX,IAAI,UAAU;AAAA;AAAA,UAEd,EAAC,GAAG,KAAK,SAAS,OAAA;AAAA;AAAA,MAAS;AAEjC,YAAM,cAAc;AAAA,IACtB;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,aAAa,KAA2C;AAC5D,YAAQ,IAAI,KAAA;AAAA,MACV,KAAK;AACH,eAAO;AAAA,UACL;AAAA,YACE;AAAA,YACA,EAAC,GAAG,KAAK,MAAM,IAAA;AAAA,YACf,EAAC,iBAAiB,cAAc,KAAK,IAAI,SAAS,CAAC,EAAA;AAAA,UAAC;AAAA,QACtD;AAAA,MAGJ,KAAK,UAAU;AACb,YAAI,EAAE,IAAI,OAAO,IAAI,MAAM;AACzB,gBAAM,IAAI;AAAA,YACR,qCAAqC,UAAU,GAAG,CAAC;AAAA,UAAA;AAAA,QAEvD;AACA,eAAO;AAAA,UACL;AAAA,YACE;AAAA,YACA;AAAA,cACE,GAAG;AAAA,cACH,UAAU,aAAa,IAAI,QAAQ;AAAA;AAAA,cAEnC,KAAK,KAAK,IAAI,OAAO,IAAI,GAAG;AAAA,YAAA;AAAA,UAC9B;AAAA,QACF;AAAA,MAEJ;AAAA,MAEA,KAAK,UAAU;AACb,eAAO;AAAA,UACL;AAAA,YACE;AAAA,YACA;AAAA,cACE,GAAG;AAAA,cACH,UAAU,aAAa,IAAI,QAAQ;AAAA;AAAA,cAEnC,KAAK,IAAI,OAAO,IAAI;AAAA,YAAA;AAAA,UACtB;AAAA,QACF;AAAA,MAEJ;AAAA,MAEA,KAAK;AACH,eAAO,CAAC,CAAC,QAAQ,EAAC,GAAG,KAAK,UAAU,aAAa,IAAI,QAAQ,EAAA,CAAE,CAAC;AAAA,MAClE,KAAK;AACH,eAAO,CAAC,CAAC,QAAQ,EAAC,GAAG,KAAK,WAAW,IAAI,UAAU,IAAI,YAAY,EAAA,CAAE,CAAC;AAAA,MAExE,KAAK;AACH,YAAI,IAAI,WAAW,KAAK,cAAc;AACpC,eAAK,IAAI,QAAQ,wCAAwC,IAAI,MAAM;AACnE,iBAAO,CAAA;AAAA,QACT;AACA,eAAO,KAAK,qBAAqB,GAAG;AAAA,MAEtC,KAAK;AACH,eAAO;AAAA,UACL,CAAC,UAAU,KAAK,EAAC,WAAW,cAAc,KAAK,IAAI,SAAS,CAAC,EAAA,CAAE;AAAA,QAAA;AAAA,MAGnE,KAAK;AACH,eAAO,KAAK,gBAAgB,GAAG;AAAA,MACjC,KAAK;AACH,eAAO,CAAA;AAAA;AAAA,MACT,KAAK;AAGH,eAAO,CAAA;AAAA,MACT;AAEE,cAAM,IAAI,MAAM,2BAA2B,UAAU,GAAG,CAAC,EAAE;AAAA,IAAA;AAAA,EAEjE;AAAA,EAEA;AAAA,EAEA,qBAAqB,KAAqB;AACxC,UAAM,QAAQ,KAAK,uBAAuB,IAAI,OAAO;AAGrD,iBAAa,KAAK,qBAAqB;AAEvC,QAAI,MAAM,SAAS,YAAY;AAE7B,WAAK,aAAa,MAAM;AACxB,aAAO,CAAA;AAAA,IACT;AAEA,UAAM,UAAU,KAAK;AAAA,MACnB,KAAK,KAAK,YAAY,uCAAuC;AAAA,MAC7D;AAAA,IAAA,EACA,IAAI,CAAA,WAAU,CAAC,QAAQ,MAAM,CAAgB;AAE/C,SAAK,IACF,YAAY,SAAS,MAAM,QAAQ,KAAK,EACxC,OAAO,GAAG,QAAQ,MAAM,qBAAqB,OAAO;AAEvD,UAAM,oBAAoB;AAAA,MACxB,MAAM;AAAA,IAAA;AAER,QAAI,mBAAmB;AACrB,WAAK,wBAAwB,WAAW,YAAY;AAClD,YAAI;AACF,gBAAM,kBAAkB,MAAM,KAAK,KAAK,KAAK,WAAW;AAAA,QAC1D,SAAS,KAAK;AACZ,eAAK,IAAI,OAAO,oCAAoC,GAAG;AAAA,QACzD;AAAA,MACF,GAAG,6BAA6B;AAAA,IAClC;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA6BA,mBACE,WACA,QACc;AACd,QAAI;AACF,YAAM,CAAC,SAAS,OAAO,IAAI,UAAU,SAAS;AAC9C,YAAM,CAAC,SAAS,OAAO,IAAI,UAAU,OAAO,MAAM;AAClD,YAAM,UAAwB,CAAA;AAG9B,iBAAW,SAAS,QAAQ,UAAU;AACpC,iBAAS,KAAK,KAAK,KAAK;AAAA,MAC1B;AAEA,YAAM,CAAC,YAAY,UAAU,IAAI,qBAAqB,SAAS,OAAO;AACtE,iBAAW,MAAM,YAAY;AAC3B,cAAM,EAAC,QAAQ,KAAA,IAAQ,KAAK,QAAQ,IAAI,EAAE,CAAC;AAC3C,gBAAQ,KAAK,EAAC,KAAK,cAAc,IAAI,EAAC,QAAQ,KAAA,GAAM;AAAA,MACtD;AAGA,YAAM,CAAC,YAAY,UAAU,IAAI,qBAAqB,SAAS,OAAO;AACtE,iBAAW,MAAM,YAAY;AAC3B,cAAM,EAAC,QAAQ,KAAA,IAAQ,KAAK,QAAQ,IAAI,EAAE,CAAC;AAC3C,gBAAQ,KAAK,EAAC,KAAK,cAAc,IAAI,EAAC,QAAQ,KAAA,GAAM;AAAA,MACtD;AAEA,YAAM,SAAS,aAAa,SAAS,OAAO;AAC5C,iBAAW,MAAM,QAAQ;AACvB,gBAAQ;AAAA,UACN,GAAG,KAAK;AAAA,YACN,KAAK,QAAQ,IAAI,EAAE,CAAC;AAAA,YACpB,KAAK,QAAQ,IAAI,EAAE,CAAC;AAAA,UAAA;AAAA,QACtB;AAAA,MAEJ;AAEA,iBAAW,MAAM,YAAY;AAC3B,cAAM,OAAO,KAAK,QAAQ,IAAI,EAAE,CAAC;AACjC,gBAAQ,KAAK;AAAA,UACX,KAAK;AAAA,UACL;AAAA,UACA,UAAU,YAAY,IAAI;AAAA,QAAA,CAC3B;AAAA,MACH;AAIA,iBAAW,MAAM,YAAY;AAC3B,cAAM,OAAO,KAAK,QAAQ,IAAI,EAAE,CAAC;AACjC,gBAAQ,KAAK,EAAC,KAAK,gBAAgB,MAAK;AAAA,MAC1C;AACA,aAAO;AAAA,IACT,SAAS,GAAG;AACV,YAAM,IAAI,6BAA6B,OAAO,CAAC,GAAG,QAAQ,EAAC,OAAO,GAAE;AAAA,IACtE;AAAA,EACF;AAAA,EAEA,iBACE,UACA,UACc;AACd,UAAM,UAAwB,CAAA;AAC9B,QACE,SAAS,WAAW,SAAS,UAC7B,SAAS,SAAS,SAAS,MAC3B;AACA,cAAQ,KAAK;AAAA,QACX,KAAK;AAAA,QACL,KAAK,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AAAA,QAC9C,KAAK,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AAAA,MAAI,CACnD;AAAA,IACH;AACA,QACE,SAAS,oBAAoB,SAAS,mBACtC,CAAC;AAAA,MACC,SAAS;AAAA,MACT,SAAS;AAAA,IAAA,GAEX;AACA,cAAQ,KAAK;AAAA,QACX,KAAK;AAAA,QACL,OAAO,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AAAA,QAChD,KAAK,YAAY,QAAQ;AAAA,QACzB,KAAK,YAAY,QAAQ;AAAA,MAAA,CAC1B;AAAA,IACH;AACA,UAAM,QAAQ,EAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,KAAA;AACvD,UAAM,aAAa,YAAY,SAAS,OAAO;AAC/C,UAAM,aAAa,YAAY,SAAS,OAAO;AAG/C,UAAM,CAAC,SAAS,KAAK,IAAI,qBAAqB,YAAY,UAAU;AACpE,eAAW,MAAM,SAAS;AACxB,YAAM,EAAC,MAAM,OAAA,IAAU,KAAK,WAAW,IAAI,EAAE,CAAC;AAC9C,cAAQ,KAAK,EAAC,KAAK,eAAe,OAAO,QAAO;AAAA,IAClD;AAGA,UAAM,OAAO,aAAa,YAAY,UAAU;AAChD,eAAW,MAAM,MAAM;AACrB,YAAM,EAAC,MAAM,SAAS,GAAG,QAAA,IAAW,KAAK,WAAW,IAAI,EAAE,CAAC;AAC3D,YAAM,EAAC,MAAM,SAAS,GAAG,QAAA,IAAW,KAAK,WAAW,IAAI,EAAE,CAAC;AAK3D,UACE,YAAY,WACZ,QAAQ,aAAa,QAAQ,YAC7B,QAAQ,YAAY,QAAQ,SAC5B;AACA,gBAAQ,KAAK;AAAA,UACX,KAAK;AAAA,UACL;AAAA,UACA,KAAK,EAAC,MAAM,SAAS,MAAM,QAAA;AAAA,UAC3B,KAAK,EAAC,MAAM,SAAS,MAAM,QAAA;AAAA,QAAO,CACnC;AAAA,MACH;AAAA,IACF;AAGA,eAAW,MAAM,OAAO;AACtB,YAAM,EAAC,MAAM,GAAG,KAAA,IAAQ,KAAK,WAAW,IAAI,EAAE,CAAC;AAC/C,YAAM,SAAS,EAAC,MAAM,KAAA;AAEtB,8BAAwB,MAAM,MAAM,MAAM;AAC1C,cAAQ,KAAK;AAAA,QACX,KAAK;AAAA,QACL;AAAA,QACA;AAAA,QACA,eAAe,YAAY,QAAQ;AAAA,MAAA,CACpC;AAAA,IACH;AACA,WAAO;AAAA,EACT;AAAA,EAEA,uBAAuB,SAAqB;AAC1C,UAAM,MACJ,mBAAmB,SACf,QAAQ,SAAS,OAAO,IACxB,IAAI,cAAc,OAAO,OAAO;AACtC,UAAM,OAAO,KAAK,MAAM,GAAG;AAC3B,WAAOA,MAAQ,MAAM,wBAAwB,aAAa;AAAA,EAC5D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,MAAM,gBAAgB,KAAoD;AACxE,UAAM,EAAC,cAAc,aAAA,IAAgB,KAAK;AAC1C,QAAI,cAAc;AAChB,aAAO,CAAA;AAAA,IACT;AACA,UAAM,gBAAgB,MAAM;AAAA,MAC1B,KAAK;AAAA,MACL;AAAA,IAAA;AAEF,UAAM,aAAa,oBAAoB,KAAK,gBAAgB,aAAa;AACzE,QAAI,eAAe,MAAM;AACvB,YAAM,IAAI,2BAA2B,UAAU;AAAA,IACjD;AAKA,UAAM,OAAO,KAAK,eAAe,OAAO;AAAA,MACtC,CAAA,MAAK,EAAE,QAAQ,IAAI;AAAA,IAAA;AAErB,QAAI,CAAC,MAAM;AAET,YAAM,IAAI;AAAA,QACR,kCAAkC,UAAU,GAAG,CAAC;AAAA,MAAA;AAAA,IAEpD;AACA,QAAI,kBAAkB,MAAM,GAAG,GAAG;AAChC,YAAM,IAAI;AAAA,QACR,gDAAgD,UAAU,IAAI,CAAC,OAAO,UAAU,GAAG,CAAC;AAAA,MAAA;AAAA,IAExF;AACA,WAAO,CAAA;AAAA,EACT;AACF;AAEA,SAAS,oBACP,GACA,GACe;AAEf,MAAI,EAAE,OAAO,WAAW,EAAE,OAAO,QAAQ;AACvC,WAAO;AAAA,EACT;AACA,WAAS,IAAI,GAAG,IAAI,EAAE,OAAO,QAAQ,KAAK;AACxC,UAAM,KAAK,EAAE,OAAO,CAAC;AACrB,UAAM,KAAK,EAAE,OAAO,CAAC;AACrB,UAAM,aAAa,mBAAmB,IAAI,EAAE;AAC5C,QAAI,YAAY;AACd,aAAO;AAAA,IACT;AAAA,EACF;AACA,SAAO;AACT;AAGA,MAAM,cAAc,CAAC,GAAyB,MAC5C,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,MAAM,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,MAAM,IAAI;AAEvD,SAAS,mBACP,GACA,GACe;AACf,MAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,MAAM;AACjE,WAAO,UAAU,EAAE,IAAI,yBAAyB,EAAE,IAAI;AAAA,EACxD;AACA,MAAI,CAAC,UAAU,EAAE,YAAY,EAAE,UAAU,GAAG;AAC1C,WAAO,yBAAyB,EAAE,IAAI;AAAA,EACxC;AACA,QAAM,QAAQ,OAAO,QAAQ,EAAE,OAAO,EAAE,KAAK,WAAW;AACxD,QAAM,QAAQ,OAAO,QAAQ,EAAE,OAAO,EAAE,KAAK,WAAW;AACxD,MACE,MAAM,WAAW,MAAM,UACvB,MAAM,KAAK,CAAC,CAAC,OAAO,IAAI,GAAG,MAAM;AAC/B,UAAM,CAAC,OAAO,IAAI,IAAI,MAAM,CAAC;AAC7B,WACE,UAAU,SACV,KAAK,QAAQ,KAAK,OAClB,KAAK,YAAY,KAAK,WACtB,KAAK,YAAY,KAAK;AAAA,EAE1B,CAAC,GACD;AACA,WAAO,qBAAqB,EAAE,IAAI;AAAA,EACpC;AACA,SAAO;AACT;AAEO,SAAS,kBAAkB,GAAuB,GAAqB;AAC5E,MAAI,EAAE,QAAQ,EAAE,eAAe,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,MAAM;AACzE,WAAO;AAAA,EACT;AACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAME,EAAE,oBAAoB,aACtB,CAAC,OAAO,IAAI,IAAI,EAAE,UAAU,GAAG,IAAI,IAAI,EAAE,UAAU,CAAC;AAAA,IACpD;AACA,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,OAAO,QAAQ,EAAE,OAAO,EAAE,KAAK,WAAW;AACxD,QAAM,QAAQ,EAAE;AAChB,SACE,MAAM,WAAW,MAAM,UACvB,MAAM,KAAK,CAAC,CAAC,OAAO,IAAI,GAAG,MAAM;AAC/B,UAAM,OAAO,MAAM,CAAC;AACpB,WAAO,UAAU,KAAK,QAAQ,KAAK,YAAY,KAAK;AAAA,EACtD,CAAC;AAEL;AAEA,SAAS,eAAe,GAAmB;AACzC,MAAI,EAAE,aAAa,QAAQ;AACzB,WAAO,IAAI,MAAM,OAAO,CAAC,CAAC;AAAA,EAC5B;AACA,MAAI,aAAa,SAAS,iBAAiB,EAAE,SAAS,mBAAmB;AACvE,WAAO,IAAI,eAAe,CAAC;AAAA,EAC7B;AACA,SAAO;AACT;AACA,MAAM,WAAW,CAAC,OAAmB,GAAG,GAAG,MAAM,IAAI,GAAG,IAAI;AAE5D,SAAS,UAAU,WAA4B;AAC7C,SAAO;AAAA;AAAA;AAAA,IAGL,IAAI,IAAI,UAAU,OAAO,IAAI,CAAA,MAAK,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC;AAAA,IAC7C,IAAI,IAAI,UAAU,QAAQ,IAAI,CAAA,MAAK,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC;AAAA,EAAA;AAExD;AAEA,SAAS,YACP,SAC0C;AAC1C,QAAM,+BAAe,IAAA;AACrB,aAAW,CAAC,MAAM,IAAI,KAAK,OAAO,QAAQ,OAAO,GAAG;AAGlD,aAAS,IAAI,KAAK,KAAK,EAAC,GAAG,MAAM,MAAK;AAAA,EACxC;AACA,SAAO;AACT;AAEA,SAAS,YAAY,OAAyD;AAC5E,QAAM,WAA0B;AAAA,IAC9B,QAAQ;AAAA,MACN,SAAS,MAAM;AAAA,IAAA;AAAA,EACjB;AAEF,UAAQ,MAAM,iBAAA;AAAA,IACZ,KAAK;AACH,eAAS,OAAO,OAAO;AACvB;AAAA,IACF,KAAK;AACH,eAAS,OAAO,OAAO;AACvB;AAAA,IACF,KAAK;AACH,eAAS,OAAO,OAAO;AACvB;AAAA,IACF,KAAK;AACH,eAAS,OAAO,OAAO;AACvB;AAAA,IACF,KAAK;AACH;AAAA,IACF;AACE,kBAAY,MAAM,eAAe;AAAA,EAAA;AAErC,SAAO;AACT;AAIA,SAAS,aAAa,UAA6C;AAGjE,QAAM,EAAC,SAAS,GAAG,YAAY,iBAAiB,GAAG,SAAQ;AAC3D,SAAO;AAAA,IACL,GAAG;AAAA,IACH,QAAQ;AAAA,MACN,SAAS;AAAA,MACT,MAAM;AAAA,IAAA;AAAA;AAAA;AAAA,IAIR;AAAA,IACA;AAAA,EAAA;AAEJ;AAEA,MAAM,qCAAqC,MAAM;AAAA,EACtC,OAAO;AAAA,EACP;AAAA,EACA;AAAA,EAET,YACE,aACA,WACA,SACA;AACA;AAAA,MACE,sDAAsD,WAAW;AAAA,MACjE;AAAA,IAAA;AAEF,SAAK,cAAc;AACnB,SAAK,YAAY;AAAA,EACnB;AACF;AAEA,MAAM,mCAAmC,MAAM;AAAA,EACpC,OAAO;AAAA,EAEhB,YAAY,KAAa;AACvB;AAAA,MACE,GAAG,GAAG;AAAA,IAAA;AAAA,EAEV;AACF;AAGA,MAAM,uBAAuB,WAAW;AAAA,EAC7B,OAAO;AAAA,EAEhB,YAAY,OAAgB;AAC1B;AAAA,MACE;AAAA,MACA;AAAA,QACE;AAAA,MAAA;AAAA,IACF;AAAA,EAEJ;AACF;"}
@@ -1 +1 @@
1
- {"version":3,"file":"initial-sync.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAKjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AA2B9D,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0BAA0B,CAAC;AAkB1D,MAAM,MAAM,kBAAkB,GAAG;IAC/B,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CACnC,CAAC;AAEF,wBAAsB,WAAW,CAC/B,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,EAAE,EAAE,QAAQ,EACZ,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,kBAAkB,iBAoLhC;AA+HD,eAAO,MAAM,iBAAiB,KAAK,CAAC"}
1
+ {"version":3,"file":"initial-sync.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAMjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AA2B9D,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0BAA0B,CAAC;AAkB1D,MAAM,MAAM,kBAAkB,GAAG;IAC/B,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CACnC,CAAC;AAEF,wBAAsB,WAAW,CAC/B,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,EAAE,EAAE,QAAQ,EACZ,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,kBAAkB,iBAmLhC;AAsID,eAAO,MAAM,iBAAiB,KAAK,CAAC"}
@@ -3,6 +3,7 @@ import { platform } from "node:os";
3
3
  import { Writable } from "node:stream";
4
4
  import { pipeline } from "node:stream/promises";
5
5
  import postgres from "postgres";
6
+ import { must } from "../../../../../shared/src/must.js";
6
7
  import { createLiteTableStatement, createLiteIndexStatement } from "../../../db/create.js";
7
8
  import { READONLY } from "../../../db/mode-enum.js";
8
9
  import { TsvParser } from "../../../db/pg-copy.js";
@@ -16,7 +17,7 @@ import { CpuProfiler } from "../../../types/profiler.js";
16
17
  import { ALLOWED_APP_ID_CHARACTERS } from "../../../types/shards.js";
17
18
  import { id } from "../../../types/sql.js";
18
19
  import { ReplicationStatusPublisher } from "../../replicator/replication-status.js";
19
- import { initChangeLog } from "../../replicator/schema/change-log.js";
20
+ import { ColumnMetadataStore } from "../../replicator/schema/column-metadata.js";
20
21
  import { initReplicationState } from "../../replicator/schema/replication-state.js";
21
22
  import { toLexiVersion } from "./lsn.js";
22
23
  import { ensureShardSchema } from "./schema/init.js";
@@ -82,7 +83,6 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions) {
82
83
  const { snapshot_name: snapshot, consistent_point: lsn } = slot;
83
84
  const initialVersion = toLexiVersion(lsn);
84
85
  initReplicationState(tx, publications, initialVersion);
85
- initChangeLog(tx);
86
86
  const start = performance.now();
87
87
  const published = await sql.begin(READONLY, async (tx2) => {
88
88
  await tx2.unsafe(
@@ -247,8 +247,13 @@ async function createReplicationSlot(lc, session, slotName) {
247
247
  return slot;
248
248
  }
249
249
  function createLiteTables(tx, tables, initialVersion) {
250
+ const columnMetadata = must(ColumnMetadataStore.getInstance(tx));
250
251
  for (const t of tables) {
251
252
  tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));
253
+ const tableName = liteTableName(t);
254
+ for (const [colName, colSpec] of Object.entries(t.columns)) {
255
+ columnMetadata.insert(tableName, colName, colSpec);
256
+ }
252
257
  }
253
258
  }
254
259
  function createLiteIndices(tx, indices) {
@@ -1 +1 @@
1
- {"version":3,"file":"initial-sync.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"sourcesContent":["import {\n PG_CONFIGURATION_LIMIT_EXCEEDED,\n PG_INSUFFICIENT_PRIVILEGE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {platform} from 'node:os';\nimport {Writable} from 'node:stream';\nimport {pipeline} from 'node:stream/promises';\nimport postgres from 'postgres';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n} from '../../../db/create.ts';\nimport * as Mode from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../db/pg-to-lite.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport type {IndexSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {\n JSON_STRINGIFIED,\n liteValue,\n type LiteValueType,\n} from '../../../types/lite.ts';\nimport {liteTableName} from '../../../types/names.ts';\nimport {\n pgClient,\n type PostgresDB,\n type PostgresTransaction,\n type PostgresValueType,\n} from '../../../types/pg.ts';\nimport {CpuProfiler} from '../../../types/profiler.ts';\nimport type {ShardConfig} from '../../../types/shards.ts';\nimport {ALLOWED_APP_ID_CHARACTERS} from '../../../types/shards.ts';\nimport {id} from '../../../types/sql.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {initChangeLog} from '../../replicator/schema/change-log.ts';\nimport {initReplicationState} from '../../replicator/schema/replication-state.ts';\nimport {toLexiVersion} from './lsn.ts';\nimport {ensureShardSchema} from './schema/init.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport {\n addReplica,\n dropShard,\n getInternalShardConfig,\n newReplicationSlot,\n replicationSlotExpression,\n validatePublications,\n} from './schema/shard.ts';\n\nexport type InitialSyncOptions = {\n tableCopyWorkers: number;\n profileCopy?: boolean | undefined;\n};\n\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n syncOptions: InitialSyncOptions,\n) {\n if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) {\n throw new Error(\n 'The App ID may only consist of lower-case letters, numbers, and the underscore character',\n );\n }\n const {tableCopyWorkers, profileCopy} = syncOptions;\n const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;\n const sql = pgClient(lc, upstreamURI);\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const slotName = newReplicationSlot(shard);\n const statusPublisher = new ReplicationStatusPublisher(tx).publish(\n lc,\n 'Initializing',\n );\n try {\n await checkUpstreamConfig(sql);\n\n const {publications} = await ensurePublishedTables(lc, sql, shard);\n lc.info?.(`Upstream is setup with publications [${publications}]`);\n\n const {database, host} = sql.options;\n lc.info?.(`opening replication session to ${database}@${host}`);\n\n let slot: ReplicationSlot;\n for (let first = true; ; first = false) {\n try {\n slot = await createReplicationSlot(lc, replicationSession, slotName);\n break;\n } catch (e) {\n if (first && e instanceof postgres.PostgresError) {\n if (e.code === PG_INSUFFICIENT_PRIVILEGE) {\n // Some Postgres variants (e.g. Google Cloud SQL) require that\n // the user have the REPLICATION role in order to create a slot.\n // Note that this must be done by the upstreamDB connection, and\n // does not work in the replicationSession itself.\n await sql`ALTER ROLE current_user WITH REPLICATION`;\n lc.info?.(`Added the REPLICATION role to database user`);\n continue;\n }\n if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {\n const slotExpression = replicationSlotExpression(shard);\n\n const dropped = await sql<{slot: string}[]>`\n SELECT slot_name as slot, pg_drop_replication_slot(slot_name) \n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} AND NOT active`;\n if (dropped.length) {\n lc.warn?.(\n `Dropped inactive replication slots: ${dropped.map(({slot}) => slot)}`,\n e,\n );\n continue;\n }\n lc.error?.(`Unable to drop replication slots`, e);\n }\n }\n throw e;\n }\n }\n const {snapshot_name: snapshot, consistent_point: lsn} = slot;\n const initialVersion = toLexiVersion(lsn);\n\n initReplicationState(tx, publications, initialVersion);\n initChangeLog(tx);\n\n // Run up to MAX_WORKERS to copy of tables at the replication slot's snapshot.\n const start = performance.now();\n // Retrieve the published schema at the consistent_point.\n const published = await sql.begin(Mode.READONLY, async tx => {\n await tx.unsafe(/* sql*/ `SET TRANSACTION SNAPSHOT '${snapshot}'`);\n return getPublicationInfo(tx, publications);\n });\n // Note: If this throws, initial-sync is aborted.\n validatePublications(lc, published);\n\n // Now that tables have been validated, kick off the copiers.\n const {tables, indexes} = published;\n const numTables = tables.length;\n if (platform() === 'win32' && tableCopyWorkers < numTables) {\n lc.warn?.(\n `Increasing the number of copy workers from ${tableCopyWorkers} to ` +\n `${numTables} to work around a Node/Postgres connection bug`,\n );\n }\n const numWorkers =\n platform() === 'win32'\n ? numTables\n : Math.min(tableCopyWorkers, numTables);\n\n const copyPool = pgClient(\n lc,\n upstreamURI,\n {\n max: numWorkers,\n connection: {['application_name']: 'initial-sync-copy-worker'},\n },\n 'json-as-string',\n );\n const copiers = startTableCopyWorkers(\n lc,\n copyPool,\n snapshot,\n numWorkers,\n numTables,\n );\n try {\n createLiteTables(tx, tables, initialVersion);\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying ${numTables} upstream tables at version ${initialVersion}`,\n 5000,\n );\n\n void copyProfiler?.start();\n const rowCounts = await Promise.all(\n tables.map(table =>\n copiers.processReadTask((db, lc) =>\n copy(lc, table, copyPool, db, tx),\n ),\n ),\n );\n void copyProfiler?.stopAndDispose(lc, 'initial-copy');\n\n const total = rowCounts.reduce(\n (acc, curr) => ({\n rows: acc.rows + curr.rows,\n flushTime: acc.flushTime + curr.flushTime,\n }),\n {rows: 0, flushTime: 0},\n );\n\n statusPublisher.publish(\n lc,\n 'Indexing',\n `Creating ${indexes.length} indexes`,\n 5000,\n );\n const indexStart = performance.now();\n createLiteIndices(tx, indexes);\n const index = performance.now() - indexStart;\n lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);\n\n await addReplica(sql, shard, slotName, initialVersion, published);\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} ` +\n `(flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`,\n );\n } finally {\n copiers.setDone();\n if (platform() === 'win32') {\n // Workaround a Node bug in Windows in which certain COPY streams result\n // in hanging the connection, which causes this await to never resolve.\n void copyPool.end().catch(e => lc.warn?.(`Error closing copyPool`, e));\n } else {\n await copyPool.end();\n }\n }\n } catch (e) {\n // If initial-sync did not succeed, make a best effort to drop the\n // orphaned replication slot to avoid running out of slots in\n // pathological cases that result in repeated failures.\n lc.warn?.(`dropping replication slot ${slotName}`, e);\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = ${slotName};\n `.catch(e => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n await replicationSession.end();\n await sql.end();\n }\n}\n\nasync function checkUpstreamConfig(sql: PostgresDB) {\n const {walLevel, version} = (\n await sql<{walLevel: string; version: number}[]>`\n SELECT current_setting('wal_level') as \"walLevel\", \n current_setting('server_version_num') as \"version\";\n `\n )[0];\n\n if (walLevel !== 'logical') {\n throw new Error(\n `Postgres must be configured with \"wal_level = logical\" (currently: \"${walLevel})`,\n );\n }\n if (version < 150000) {\n throw new Error(\n `Must be running Postgres 15 or higher (currently: \"${version}\")`,\n );\n }\n}\n\nasync function ensurePublishedTables(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n validate = true,\n): Promise<{publications: string[]}> {\n const {database, host} = sql.options;\n lc.info?.(`Ensuring upstream PUBLICATION on ${database}@${host}`);\n\n await ensureShardSchema(lc, sql, shard);\n const {publications} = await getInternalShardConfig(sql, shard);\n\n if (validate) {\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(publications)}\n `.values();\n if (exists.length !== publications.length) {\n lc.warn?.(\n `some configured publications [${publications}] are missing: ` +\n `[${exists.flat()}]. resyncing`,\n );\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n return ensurePublishedTables(lc, sql, shard, false);\n }\n }\n return {publications};\n}\n\nfunction startTableCopyWorkers(\n lc: LogContext,\n db: PostgresDB,\n snapshot: string,\n numWorkers: number,\n numTables: number,\n): TransactionPool {\n const {init} = importSnapshot(snapshot);\n const tableCopiers = new TransactionPool(\n lc,\n Mode.READONLY,\n init,\n undefined,\n numWorkers,\n );\n tableCopiers.run(db);\n\n lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);\n\n if (parseInt(process.versions.node) < 22) {\n lc.warn?.(\n `\\n\\n\\n` +\n `Older versions of Node have a bug that results in an unresponsive\\n` +\n `Postgres connection after running certain combinations of COPY commands.\\n` +\n `If initial sync hangs, run zero-cache with Node v22+. This has the additional\\n` +\n `benefit of being consistent with the Node version run in the production container image.` +\n `\\n\\n\\n`,\n );\n }\n return tableCopiers;\n}\n\n// Row returned by `CREATE_REPLICATION_SLOT`\ntype ReplicationSlot = {\n slot_name: string;\n consistent_point: string;\n snapshot_name: string;\n output_plugin: string;\n};\n\n// Note: The replication connection does not support the extended query protocol,\n// so all commands must be sent using sql.unsafe(). This is technically safe\n// because all placeholder values are under our control (i.e. \"slotName\").\nasync function createReplicationSlot(\n lc: LogContext,\n session: postgres.Sql,\n slotName: string,\n): Promise<ReplicationSlot> {\n const slot = (\n await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput`,\n )\n )[0];\n lc.info?.(`Created replication slot ${slotName}`, slot);\n return slot;\n}\n\nfunction createLiteTables(\n tx: Database,\n tables: PublishedTableSpec[],\n initialVersion: string,\n) {\n for (const t of tables) {\n tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));\n }\n}\n\nfunction createLiteIndices(tx: Database, indices: IndexSpec[]) {\n for (const index of indices) {\n tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));\n }\n}\n\n// Verified empirically that batches of 50 seem to be the sweet spot,\n// similar to the report in https://sqlite.org/forum/forumpost/8878a512d3652655\n//\n// Exported for testing.\nexport const INSERT_BATCH_SIZE = 50;\n\nconst MB = 1024 * 1024;\nconst MAX_BUFFERED_ROWS = 10_000;\nconst BUFFERED_SIZE_THRESHOLD = 8 * MB;\n\nasync function copy(\n lc: LogContext,\n table: PublishedTableSpec,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n) {\n const start = performance.now();\n let rows = 0;\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const selectColumns = orderedColumns.map(([c]) => id(c)).join(',');\n const insertColumns = orderedColumns.map(([c]) => c);\n const insertColumnList = insertColumns.map(c => id(c)).join(',');\n\n // (?,?,?,?,?)\n const valuesSql =\n insertColumns.length > 0\n ? `(${'?,'.repeat(insertColumns.length - 1)}?)`\n : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n // INSERT VALUES (?,?,?,?,?),... x INSERT_BATCH_SIZE\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f); // remove nulls\n const selectStmt =\n /*sql*/ `\n SELECT ${selectColumns} FROM ${id(table.schema)}.${id(table.name)}` +\n (filterConditions.length === 0\n ? ''\n : /*sql*/ ` WHERE ${filterConditions.join(' OR ')}`);\n\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n // Preallocate the buffer of values to reduce memory allocation churn.\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n // Insert the remaining rows individually.\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n for (let i = 0; i < flushedRows; i++) {\n // Reuse the array and unreference the values to allow GC.\n // This is faster than allocating a new array every time.\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n lc.info?.(`Starting copy stream of ${tableName}:`, selectStmt);\n const pgParsers = await getTypeParsers(dbClient);\n const parsers = columnSpecs.map(c => {\n const pgParse = pgParsers.getTypeParser(c.typeOID);\n return (val: string) =>\n liteValue(\n pgParse(val) as PostgresValueType,\n c.dataType,\n JSON_STRINGIFIED,\n );\n });\n\n const tsvParser = new TsvParser();\n let col = 0;\n\n await pipeline(\n await from.unsafe(`COPY (${selectStmt}) TO STDOUT`).readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const text of tsvParser.parse(chunk)) {\n pendingSize += text === null ? 4 : text.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n text === null ? null : parsers[col](text);\n\n if (++col === parsers.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows, flushTime};\n}\n"],"names":["slot","Mode.READONLY","tx","lc","e","start","elapsed"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;AA2DA,eAAsB,YACpB,IACA,OACA,IACA,aACA,aACA;AACA,MAAI,CAAC,0BAA0B,KAAK,MAAM,KAAK,GAAG;AAChD,UAAM,IAAI;AAAA,MACR;AAAA,IAAA;AAAA,EAEJ;AACA,QAAM,EAAC,kBAAkB,YAAA,IAAe;AACxC,QAAM,eAAe,cAAc,MAAM,YAAY,YAAY;AACjE,QAAM,MAAM,SAAS,IAAI,WAAW;AACpC,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,mBAAmB,KAAK;AACzC,QAAM,kBAAkB,IAAI,2BAA2B,EAAE,EAAE;AAAA,IACzD;AAAA,IACA;AAAA,EAAA;AAEF,MAAI;AACF,UAAM,oBAAoB,GAAG;AAE7B,UAAM,EAAC,aAAA,IAAgB,MAAM,sBAAsB,IAAI,KAAK,KAAK;AACjE,OAAG,OAAO,wCAAwC,YAAY,GAAG;AAEjE,UAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,OAAG,OAAO,kCAAkC,QAAQ,IAAI,IAAI,EAAE;AAE9D,QAAI;AACJ,aAAS,QAAQ,QAAQ,QAAQ,OAAO;AACtC,UAAI;AACF,eAAO,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AACnE;AAAA,MACF,SAAS,GAAG;AACV,YAAI,SAAS,aAAa,SAAS,eAAe;AAChD,cAAI,EAAE,SAAS,2BAA2B;AAKxC,kBAAM;AACN,eAAG,OAAO,6CAA6C;AACvD;AAAA,UACF;AACA,cAAI,EAAE,SAAS,iCAAiC;AAC9C,kBAAM,iBAAiB,0BAA0B,KAAK;AAEtD,kBAAM,UAAU,MAAM;AAAA;AAAA;AAAA,uCAGK,cAAc;AACzC,gBAAI,QAAQ,QAAQ;AAClB,iBAAG;AAAA,gBACD,uCAAuC,QAAQ,IAAI,CAAC,EAAC,MAAAA,MAAAA,MAAUA,KAAI,CAAC;AAAA,gBACpE;AAAA,cAAA;AAEF;AAAA,YACF;AACA,eAAG,QAAQ,oCAAoC,CAAC;AAAA,UAClD;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AACA,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAAO;AACzD,UAAM,iBAAiB,cAAc,GAAG;AAExC,yBAAqB,IAAI,cAAc,cAAc;AACrD,kBAAc,EAAE;AAGhB,UAAM,QAAQ,YAAY,IAAA;AAE1B,UAAM,YAAY,MAAM,IAAI,MAAMC,UAAe,OAAMC,QAAM;AAC3D,YAAMA,IAAG;AAAA;AAAA,QAAgB,6BAA6B,QAAQ;AAAA,MAAA;AAC9D,aAAO,mBAAmBA,KAAI,YAAY;AAAA,IAC5C,CAAC;AAED,yBAAqB,IAAI,SAAS;AAGlC,UAAM,EAAC,QAAQ,QAAA,IAAW;AAC1B,UAAM,YAAY,OAAO;AACzB,QAAI,SAAA,MAAe,WAAW,mBAAmB,WAAW;AAC1D,SAAG;AAAA,QACD,8CAA8C,gBAAgB,OACzD,SAAS;AAAA,MAAA;AAAA,IAElB;AACA,UAAM,aACJ,eAAe,UACX,YACA,KAAK,IAAI,kBAAkB,SAAS;AAE1C,UAAM,WAAW;AAAA,MACf;AAAA,MACA;AAAA,MACA;AAAA,QACE,KAAK;AAAA,QACL,YAAY,EAAC,CAAC,kBAAkB,GAAG,2BAAA;AAAA,MAA0B;AAAA,MAE/D;AAAA,IAAA;AAEF,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI;AACF,uBAAiB,IAAI,QAAQ,cAAc;AAC3C,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,WAAW,SAAS,+BAA+B,cAAc;AAAA,QACjE;AAAA,MAAA;AAGF,WAAK,cAAc,MAAA;AACnB,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,OAAO;AAAA,UAAI,WACT,QAAQ;AAAA,YAAgB,CAAC,IAAIC,QAC3B,KAAKA,KAAI,OAAO,UAAU,IAAI,EAAE;AAAA,UAAA;AAAA,QAClC;AAAA,MACF;AAEF,WAAK,cAAc,eAAe,IAAI,cAAc;AAEpD,YAAM,QAAQ,UAAU;AAAA,QACtB,CAAC,KAAK,UAAU;AAAA,UACd,MAAM,IAAI,OAAO,KAAK;AAAA,UACtB,WAAW,IAAI,YAAY,KAAK;AAAA,QAAA;AAAA,QAElC,EAAC,MAAM,GAAG,WAAW,EAAA;AAAA,MAAC;AAGxB,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,YAAY,QAAQ,MAAM;AAAA,QAC1B;AAAA,MAAA;AAEF,YAAM,aAAa,YAAY,IAAA;AAC/B,wBAAkB,IAAI,OAAO;AAC7B,YAAM,QAAQ,YAAY,IAAA,IAAQ;AAClC,SAAG,OAAO,oBAAoB,MAAM,QAAQ,CAAC,CAAC,MAAM;AAEpD,YAAM,WAAW,KAAK,OAAO,UAAU,gBAAgB,SAAS;AAEhE,YAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,SAAG;AAAA,QACD,UAAU,MAAM,KAAK,eAAA,CAAgB,YAAY,SAAS,cAAc,YAAY,UAAU,GAAG,YACpF,MAAM,UAAU,QAAQ,CAAC,CAAC,YAAY,MAAM,QAAQ,CAAC,CAAC,YAAY,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAAA;AAAA,IAErG,UAAA;AACE,cAAQ,QAAA;AACR,UAAI,SAAA,MAAe,SAAS;AAG1B,aAAK,SAAS,MAAM,MAAM,OAAK,GAAG,OAAO,0BAA0B,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,SAAS,IAAA;AAAA,MACjB;AAAA,IACF;AAAA,EACF,SAAS,GAAG;AAIV,OAAG,OAAO,6BAA6B,QAAQ,IAAI,CAAC;AACpD,UAAM;AAAA;AAAA,4BAEkB,QAAQ;AAAA,MAC9B,MAAM,CAAAC,OAAK,GAAG,OAAO,mCAAmC,QAAQ,IAAIA,EAAC,CAAC;AACxE,UAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,CAAC;AAAA,EAClE,UAAA;AACE,oBAAgB,KAAA;AAChB,UAAM,mBAAmB,IAAA;AACzB,UAAM,IAAI,IAAA;AAAA,EACZ;AACF;AAEA,eAAe,oBAAoB,KAAiB;AAClD,QAAM,EAAC,UAAU,QAAA,KACf,MAAM;AAAA;AAAA;AAAA,KAIN,CAAC;AAEH,MAAI,aAAa,WAAW;AAC1B,UAAM,IAAI;AAAA,MACR,uEAAuE,QAAQ;AAAA,IAAA;AAAA,EAEnF;AACA,MAAI,UAAU,MAAQ;AACpB,UAAM,IAAI;AAAA,MACR,sDAAsD,OAAO;AAAA,IAAA;AAAA,EAEjE;AACF;AAEA,eAAe,sBACb,IACA,KACA,OACA,WAAW,MACwB;AACnC,QAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,KAAG,OAAO,oCAAoC,QAAQ,IAAI,IAAI,EAAE;AAEhE,QAAM,kBAAkB,IAAI,KAAK,KAAK;AACtC,QAAM,EAAC,aAAA,IAAgB,MAAM,uBAAuB,KAAK,KAAK;AAE9D,MAAI,UAAU;AACZ,UAAM,SAAS,MAAM;AAAA,4DACmC,IAAI,YAAY,CAAC;AAAA,QACrE,OAAA;AACJ,QAAI,OAAO,WAAW,aAAa,QAAQ;AACzC,SAAG;AAAA,QACD,iCAAiC,YAAY,mBACvC,OAAO,MAAM;AAAA,MAAA;AAErB,YAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,QAAQ,CAAC;AACvD,aAAO,sBAAsB,IAAI,KAAK,OAAO,KAAK;AAAA,IACpD;AAAA,EACF;AACA,SAAO,EAAC,aAAA;AACV;AAEA,SAAS,sBACP,IACA,IACA,UACA,YACA,WACiB;AACjB,QAAM,EAAC,KAAA,IAAQ,eAAe,QAAQ;AACtC,QAAM,eAAe,IAAI;AAAA,IACvB;AAAA,IACAH;AAAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,eAAa,IAAI,EAAE;AAEnB,KAAG,OAAO,WAAW,UAAU,oBAAoB,SAAS,SAAS;AAErE,MAAI,SAAS,QAAQ,SAAS,IAAI,IAAI,IAAI;AACxC,OAAG;AAAA,MACD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAOJ;AACA,SAAO;AACT;AAaA,eAAe,sBACb,IACA,SACA,UAC0B;AAC1B,QAAM,QACJ,MAAM,QAAQ;AAAA;AAAA,IACJ,4BAA4B,QAAQ;AAAA,EAAA,GAE9C,CAAC;AACH,KAAG,OAAO,4BAA4B,QAAQ,IAAI,IAAI;AACtD,SAAO;AACT;AAEA,SAAS,iBACP,IACA,QACA,gBACA;AACA,aAAW,KAAK,QAAQ;AACtB,OAAG,KAAK,yBAAyB,kBAAkB,GAAG,cAAc,CAAC,CAAC;AAAA,EACxE;AACF;AAEA,SAAS,kBAAkB,IAAc,SAAsB;AAC7D,aAAW,SAAS,SAAS;AAC3B,OAAG,KAAK,yBAAyB,uBAAuB,KAAK,CAAC,CAAC;AAAA,EACjE;AACF;AAMO,MAAM,oBAAoB;AAEjC,MAAM,KAAK,OAAO;AAClB,MAAM,oBAAoB;AAC1B,MAAM,0BAA0B,IAAI;AAEpC,eAAe,KACb,IACA,OACA,UACA,MACA,IACA;AACA,QAAM,QAAQ,YAAY,IAAA;AAC1B,MAAI,OAAO;AACX,MAAI,YAAY;AAEhB,QAAM,YAAY,cAAc,KAAK;AACrC,QAAM,iBAAiB,OAAO,QAAQ,MAAM,OAAO;AAEnD,QAAM,cAAc,eAAe,IAAI,CAAC,CAAC,OAAO,IAAI,MAAM,IAAI;AAC9D,QAAM,gBAAgB,eAAe,IAAI,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,EAAE,KAAK,GAAG;AACjE,QAAM,gBAAgB,eAAe,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC;AACnD,QAAM,mBAAmB,cAAc,IAAI,CAAA,MAAK,GAAG,CAAC,CAAC,EAAE,KAAK,GAAG;AAG/D,QAAM,YACJ,cAAc,SAAS,IACnB,IAAI,KAAK,OAAO,cAAc,SAAS,CAAC,CAAC,OACzC;AACN,QAAM;AAAA;AAAA,IAAoB;AAAA,mBACT,SAAS,MAAM,gBAAgB,YAAY,SAAS;AAAA;AACrE,QAAM,aAAa,GAAG,QAAQ,SAAS;AAEvC,QAAM,kBAAkB,GAAG;AAAA,IACzB,YAAY,IAAI,SAAS,GAAG,OAAO,oBAAoB,CAAC;AAAA,EAAA;AAG1D,QAAM,mBAAmB,OAAO,OAAO,MAAM,YAAY,EACtD,IAAI,CAAC,EAAC,UAAA,MAAe,SAAS,EAC9B,OAAO,CAAA,MAAK,CAAC,CAAC,CAAC;AAClB,QAAM;AAAA;AAAA,IACI;AAAA,aACC,aAAa,SAAS,GAAG,MAAM,MAAM,CAAC,IAAI,GAAG,MAAM,IAAI,CAAC,MAChE,iBAAiB,WAAW,IACzB;AAAA;AAAA,MACQ,UAAU,iBAAiB,KAAK,MAAM,CAAC;AAAA;AAAA;AAErD,QAAM,eAAe,YAAY;AACjC,QAAM,iBAAiB,eAAe;AAGtC,QAAM,gBAAiC,MAAM,KAAK;AAAA,IAChD,QAAQ,oBAAoB;AAAA,EAAA,CAC7B;AACD,MAAI,cAAc;AAClB,MAAI,cAAc;AAElB,WAAS,QAAQ;AACf,UAAMI,SAAQ,YAAY,IAAA;AAC1B,UAAM,cAAc;AACpB,UAAM,cAAc;AAEpB,QAAI,IAAI;AACR,WAAO,cAAc,mBAAmB,eAAe,mBAAmB;AACxE,sBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,cAAe,CAAC;AAAA,IACnE;AAEA,WAAO,cAAc,GAAG,eAAe;AACrC,iBAAW,IAAI,cAAc,MAAM,GAAI,KAAK,YAAa,CAAC;AAAA,IAC5D;AACA,aAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AAGpC,oBAAc,CAAC,IAAI;AAAA,IACrB;AACA,kBAAc;AACd,YAAQ;AAER,UAAMC,WAAU,YAAY,IAAA,IAAQD;AACpC,iBAAaC;AACb,OAAG;AAAA,MACD,WAAW,WAAW,IAAI,SAAS,UAAU,WAAW,cAAcA,SAAQ,QAAQ,CAAC,CAAC;AAAA,IAAA;AAAA,EAE5F;AAEA,KAAG,OAAO,2BAA2B,SAAS,KAAK,UAAU;AAC7D,QAAM,YAAY,MAAM,eAAe,QAAQ;AAC/C,QAAM,UAAU,YAAY,IAAI,CAAA,MAAK;AACnC,UAAM,UAAU,UAAU,cAAc,EAAE,OAAO;AACjD,WAAO,CAAC,QACN;AAAA,MACE,QAAQ,GAAG;AAAA,MACX,EAAE;AAAA,MACF;AAAA,IAAA;AAAA,EAEN,CAAC;AAED,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,MAAM;AAEV,QAAM;AAAA,IACJ,MAAM,KAAK,OAAO,SAAS,UAAU,aAAa,EAAE,SAAA;AAAA,IACpD,IAAI,SAAS;AAAA,MACX,eAAe;AAAA,MAEf,MACE,OACA,WACA,UACA;AACA,YAAI;AACF,qBAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,2BAAe,SAAS,OAAO,IAAI,KAAK;AACxC,0BAAc,cAAc,eAAe,GAAG,IAC5C,SAAS,OAAO,OAAO,QAAQ,GAAG,EAAE,IAAI;AAE1C,gBAAI,EAAE,QAAQ,QAAQ,QAAQ;AAC5B,oBAAM;AACN,kBACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,yBACf;AACA,sBAAA;AAAA,cACF;AAAA,YACF;AAAA,UACF;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,MAEA,OAAO,CAAC,aAAsC;AAC5C,YAAI;AACF,gBAAA;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,IAAA,CACD;AAAA,EAAA;AAGH,QAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,KAAG;AAAA,IACD,oBAAoB,IAAI,cAAc,SAAS,YAClC,UAAU,QAAQ,CAAC,CAAC,gBAAgB,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAAA;AAErE,SAAO,EAAC,MAAM,UAAA;AAChB;"}
1
+ {"version":3,"file":"initial-sync.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"sourcesContent":["import {\n PG_CONFIGURATION_LIMIT_EXCEEDED,\n PG_INSUFFICIENT_PRIVILEGE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {platform} from 'node:os';\nimport {Writable} from 'node:stream';\nimport {pipeline} from 'node:stream/promises';\nimport postgres from 'postgres';\nimport {must} from '../../../../../shared/src/must.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n} from '../../../db/create.ts';\nimport * as Mode from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../db/pg-to-lite.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport type {IndexSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {\n JSON_STRINGIFIED,\n liteValue,\n type LiteValueType,\n} from '../../../types/lite.ts';\nimport {liteTableName} from '../../../types/names.ts';\nimport {\n pgClient,\n type PostgresDB,\n type PostgresTransaction,\n type PostgresValueType,\n} from '../../../types/pg.ts';\nimport {CpuProfiler} from '../../../types/profiler.ts';\nimport type {ShardConfig} from '../../../types/shards.ts';\nimport {ALLOWED_APP_ID_CHARACTERS} from '../../../types/shards.ts';\nimport {id} from '../../../types/sql.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {ColumnMetadataStore} from '../../replicator/schema/column-metadata.ts';\nimport {initReplicationState} from '../../replicator/schema/replication-state.ts';\nimport {toLexiVersion} from './lsn.ts';\nimport {ensureShardSchema} from './schema/init.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport {\n addReplica,\n dropShard,\n getInternalShardConfig,\n newReplicationSlot,\n replicationSlotExpression,\n validatePublications,\n} from './schema/shard.ts';\n\nexport type InitialSyncOptions = {\n tableCopyWorkers: number;\n profileCopy?: boolean | undefined;\n};\n\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n syncOptions: InitialSyncOptions,\n) {\n if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) {\n throw new Error(\n 'The App ID may only consist of lower-case letters, numbers, and the underscore character',\n );\n }\n const {tableCopyWorkers, profileCopy} = syncOptions;\n const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;\n const sql = pgClient(lc, upstreamURI);\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const slotName = newReplicationSlot(shard);\n const statusPublisher = new ReplicationStatusPublisher(tx).publish(\n lc,\n 'Initializing',\n );\n try {\n await checkUpstreamConfig(sql);\n\n const {publications} = await ensurePublishedTables(lc, sql, shard);\n lc.info?.(`Upstream is setup with publications [${publications}]`);\n\n const {database, host} = sql.options;\n lc.info?.(`opening replication session to ${database}@${host}`);\n\n let slot: ReplicationSlot;\n for (let first = true; ; first = false) {\n try {\n slot = await createReplicationSlot(lc, replicationSession, slotName);\n break;\n } catch (e) {\n if (first && e instanceof postgres.PostgresError) {\n if (e.code === PG_INSUFFICIENT_PRIVILEGE) {\n // Some Postgres variants (e.g. Google Cloud SQL) require that\n // the user have the REPLICATION role in order to create a slot.\n // Note that this must be done by the upstreamDB connection, and\n // does not work in the replicationSession itself.\n await sql`ALTER ROLE current_user WITH REPLICATION`;\n lc.info?.(`Added the REPLICATION role to database user`);\n continue;\n }\n if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {\n const slotExpression = replicationSlotExpression(shard);\n\n const dropped = await sql<{slot: string}[]>`\n SELECT slot_name as slot, pg_drop_replication_slot(slot_name) \n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} AND NOT active`;\n if (dropped.length) {\n lc.warn?.(\n `Dropped inactive replication slots: ${dropped.map(({slot}) => slot)}`,\n e,\n );\n continue;\n }\n lc.error?.(`Unable to drop replication slots`, e);\n }\n }\n throw e;\n }\n }\n const {snapshot_name: snapshot, consistent_point: lsn} = slot;\n const initialVersion = toLexiVersion(lsn);\n\n initReplicationState(tx, publications, initialVersion);\n\n // Run up to MAX_WORKERS to copy of tables at the replication slot's snapshot.\n const start = performance.now();\n // Retrieve the published schema at the consistent_point.\n const published = await sql.begin(Mode.READONLY, async tx => {\n await tx.unsafe(/* sql*/ `SET TRANSACTION SNAPSHOT '${snapshot}'`);\n return getPublicationInfo(tx, publications);\n });\n // Note: If this throws, initial-sync is aborted.\n validatePublications(lc, published);\n\n // Now that tables have been validated, kick off the copiers.\n const {tables, indexes} = published;\n const numTables = tables.length;\n if (platform() === 'win32' && tableCopyWorkers < numTables) {\n lc.warn?.(\n `Increasing the number of copy workers from ${tableCopyWorkers} to ` +\n `${numTables} to work around a Node/Postgres connection bug`,\n );\n }\n const numWorkers =\n platform() === 'win32'\n ? numTables\n : Math.min(tableCopyWorkers, numTables);\n\n const copyPool = pgClient(\n lc,\n upstreamURI,\n {\n max: numWorkers,\n connection: {['application_name']: 'initial-sync-copy-worker'},\n },\n 'json-as-string',\n );\n const copiers = startTableCopyWorkers(\n lc,\n copyPool,\n snapshot,\n numWorkers,\n numTables,\n );\n try {\n createLiteTables(tx, tables, initialVersion);\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying ${numTables} upstream tables at version ${initialVersion}`,\n 5000,\n );\n\n void copyProfiler?.start();\n const rowCounts = await Promise.all(\n tables.map(table =>\n copiers.processReadTask((db, lc) =>\n copy(lc, table, copyPool, db, tx),\n ),\n ),\n );\n void copyProfiler?.stopAndDispose(lc, 'initial-copy');\n\n const total = rowCounts.reduce(\n (acc, curr) => ({\n rows: acc.rows + curr.rows,\n flushTime: acc.flushTime + curr.flushTime,\n }),\n {rows: 0, flushTime: 0},\n );\n\n statusPublisher.publish(\n lc,\n 'Indexing',\n `Creating ${indexes.length} indexes`,\n 5000,\n );\n const indexStart = performance.now();\n createLiteIndices(tx, indexes);\n const index = performance.now() - indexStart;\n lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);\n\n await addReplica(sql, shard, slotName, initialVersion, published);\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} ` +\n `(flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`,\n );\n } finally {\n copiers.setDone();\n if (platform() === 'win32') {\n // Workaround a Node bug in Windows in which certain COPY streams result\n // in hanging the connection, which causes this await to never resolve.\n void copyPool.end().catch(e => lc.warn?.(`Error closing copyPool`, e));\n } else {\n await copyPool.end();\n }\n }\n } catch (e) {\n // If initial-sync did not succeed, make a best effort to drop the\n // orphaned replication slot to avoid running out of slots in\n // pathological cases that result in repeated failures.\n lc.warn?.(`dropping replication slot ${slotName}`, e);\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = ${slotName};\n `.catch(e => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n await replicationSession.end();\n await sql.end();\n }\n}\n\nasync function checkUpstreamConfig(sql: PostgresDB) {\n const {walLevel, version} = (\n await sql<{walLevel: string; version: number}[]>`\n SELECT current_setting('wal_level') as \"walLevel\", \n current_setting('server_version_num') as \"version\";\n `\n )[0];\n\n if (walLevel !== 'logical') {\n throw new Error(\n `Postgres must be configured with \"wal_level = logical\" (currently: \"${walLevel})`,\n );\n }\n if (version < 150000) {\n throw new Error(\n `Must be running Postgres 15 or higher (currently: \"${version}\")`,\n );\n }\n}\n\nasync function ensurePublishedTables(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n validate = true,\n): Promise<{publications: string[]}> {\n const {database, host} = sql.options;\n lc.info?.(`Ensuring upstream PUBLICATION on ${database}@${host}`);\n\n await ensureShardSchema(lc, sql, shard);\n const {publications} = await getInternalShardConfig(sql, shard);\n\n if (validate) {\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(publications)}\n `.values();\n if (exists.length !== publications.length) {\n lc.warn?.(\n `some configured publications [${publications}] are missing: ` +\n `[${exists.flat()}]. resyncing`,\n );\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n return ensurePublishedTables(lc, sql, shard, false);\n }\n }\n return {publications};\n}\n\nfunction startTableCopyWorkers(\n lc: LogContext,\n db: PostgresDB,\n snapshot: string,\n numWorkers: number,\n numTables: number,\n): TransactionPool {\n const {init} = importSnapshot(snapshot);\n const tableCopiers = new TransactionPool(\n lc,\n Mode.READONLY,\n init,\n undefined,\n numWorkers,\n );\n tableCopiers.run(db);\n\n lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);\n\n if (parseInt(process.versions.node) < 22) {\n lc.warn?.(\n `\\n\\n\\n` +\n `Older versions of Node have a bug that results in an unresponsive\\n` +\n `Postgres connection after running certain combinations of COPY commands.\\n` +\n `If initial sync hangs, run zero-cache with Node v22+. This has the additional\\n` +\n `benefit of being consistent with the Node version run in the production container image.` +\n `\\n\\n\\n`,\n );\n }\n return tableCopiers;\n}\n\n// Row returned by `CREATE_REPLICATION_SLOT`\ntype ReplicationSlot = {\n slot_name: string;\n consistent_point: string;\n snapshot_name: string;\n output_plugin: string;\n};\n\n// Note: The replication connection does not support the extended query protocol,\n// so all commands must be sent using sql.unsafe(). This is technically safe\n// because all placeholder values are under our control (i.e. \"slotName\").\nasync function createReplicationSlot(\n lc: LogContext,\n session: postgres.Sql,\n slotName: string,\n): Promise<ReplicationSlot> {\n const slot = (\n await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput`,\n )\n )[0];\n lc.info?.(`Created replication slot ${slotName}`, slot);\n return slot;\n}\n\nfunction createLiteTables(\n tx: Database,\n tables: PublishedTableSpec[],\n initialVersion: string,\n) {\n // TODO: Figure out how to reuse the ChangeProcessor here to avoid\n // duplicating the ColumnMetadata logic.\n const columnMetadata = must(ColumnMetadataStore.getInstance(tx));\n for (const t of tables) {\n tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));\n const tableName = liteTableName(t);\n for (const [colName, colSpec] of Object.entries(t.columns)) {\n columnMetadata.insert(tableName, colName, colSpec);\n }\n }\n}\n\nfunction createLiteIndices(tx: Database, indices: IndexSpec[]) {\n for (const index of indices) {\n tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));\n }\n}\n\n// Verified empirically that batches of 50 seem to be the sweet spot,\n// similar to the report in https://sqlite.org/forum/forumpost/8878a512d3652655\n//\n// Exported for testing.\nexport const INSERT_BATCH_SIZE = 50;\n\nconst MB = 1024 * 1024;\nconst MAX_BUFFERED_ROWS = 10_000;\nconst BUFFERED_SIZE_THRESHOLD = 8 * MB;\n\nasync function copy(\n lc: LogContext,\n table: PublishedTableSpec,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n) {\n const start = performance.now();\n let rows = 0;\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const selectColumns = orderedColumns.map(([c]) => id(c)).join(',');\n const insertColumns = orderedColumns.map(([c]) => c);\n const insertColumnList = insertColumns.map(c => id(c)).join(',');\n\n // (?,?,?,?,?)\n const valuesSql =\n insertColumns.length > 0\n ? `(${'?,'.repeat(insertColumns.length - 1)}?)`\n : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n // INSERT VALUES (?,?,?,?,?),... x INSERT_BATCH_SIZE\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f); // remove nulls\n const selectStmt =\n /*sql*/ `\n SELECT ${selectColumns} FROM ${id(table.schema)}.${id(table.name)}` +\n (filterConditions.length === 0\n ? ''\n : /*sql*/ ` WHERE ${filterConditions.join(' OR ')}`);\n\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n // Preallocate the buffer of values to reduce memory allocation churn.\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n // Insert the remaining rows individually.\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n for (let i = 0; i < flushedRows; i++) {\n // Reuse the array and unreference the values to allow GC.\n // This is faster than allocating a new array every time.\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n lc.info?.(`Starting copy stream of ${tableName}:`, selectStmt);\n const pgParsers = await getTypeParsers(dbClient);\n const parsers = columnSpecs.map(c => {\n const pgParse = pgParsers.getTypeParser(c.typeOID);\n return (val: string) =>\n liteValue(\n pgParse(val) as PostgresValueType,\n c.dataType,\n JSON_STRINGIFIED,\n );\n });\n\n const tsvParser = new TsvParser();\n let col = 0;\n\n await pipeline(\n await from.unsafe(`COPY (${selectStmt}) TO STDOUT`).readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const text of tsvParser.parse(chunk)) {\n pendingSize += text === null ? 4 : text.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n text === null ? null : parsers[col](text);\n\n if (++col === parsers.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows, flushTime};\n}\n"],"names":["slot","Mode.READONLY","tx","lc","e","start","elapsed"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;AA4DA,eAAsB,YACpB,IACA,OACA,IACA,aACA,aACA;AACA,MAAI,CAAC,0BAA0B,KAAK,MAAM,KAAK,GAAG;AAChD,UAAM,IAAI;AAAA,MACR;AAAA,IAAA;AAAA,EAEJ;AACA,QAAM,EAAC,kBAAkB,YAAA,IAAe;AACxC,QAAM,eAAe,cAAc,MAAM,YAAY,YAAY;AACjE,QAAM,MAAM,SAAS,IAAI,WAAW;AACpC,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,mBAAmB,KAAK;AACzC,QAAM,kBAAkB,IAAI,2BAA2B,EAAE,EAAE;AAAA,IACzD;AAAA,IACA;AAAA,EAAA;AAEF,MAAI;AACF,UAAM,oBAAoB,GAAG;AAE7B,UAAM,EAAC,aAAA,IAAgB,MAAM,sBAAsB,IAAI,KAAK,KAAK;AACjE,OAAG,OAAO,wCAAwC,YAAY,GAAG;AAEjE,UAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,OAAG,OAAO,kCAAkC,QAAQ,IAAI,IAAI,EAAE;AAE9D,QAAI;AACJ,aAAS,QAAQ,QAAQ,QAAQ,OAAO;AACtC,UAAI;AACF,eAAO,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AACnE;AAAA,MACF,SAAS,GAAG;AACV,YAAI,SAAS,aAAa,SAAS,eAAe;AAChD,cAAI,EAAE,SAAS,2BAA2B;AAKxC,kBAAM;AACN,eAAG,OAAO,6CAA6C;AACvD;AAAA,UACF;AACA,cAAI,EAAE,SAAS,iCAAiC;AAC9C,kBAAM,iBAAiB,0BAA0B,KAAK;AAEtD,kBAAM,UAAU,MAAM;AAAA;AAAA;AAAA,uCAGK,cAAc;AACzC,gBAAI,QAAQ,QAAQ;AAClB,iBAAG;AAAA,gBACD,uCAAuC,QAAQ,IAAI,CAAC,EAAC,MAAAA,MAAAA,MAAUA,KAAI,CAAC;AAAA,gBACpE;AAAA,cAAA;AAEF;AAAA,YACF;AACA,eAAG,QAAQ,oCAAoC,CAAC;AAAA,UAClD;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AACA,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAAO;AACzD,UAAM,iBAAiB,cAAc,GAAG;AAExC,yBAAqB,IAAI,cAAc,cAAc;AAGrD,UAAM,QAAQ,YAAY,IAAA;AAE1B,UAAM,YAAY,MAAM,IAAI,MAAMC,UAAe,OAAMC,QAAM;AAC3D,YAAMA,IAAG;AAAA;AAAA,QAAgB,6BAA6B,QAAQ;AAAA,MAAA;AAC9D,aAAO,mBAAmBA,KAAI,YAAY;AAAA,IAC5C,CAAC;AAED,yBAAqB,IAAI,SAAS;AAGlC,UAAM,EAAC,QAAQ,QAAA,IAAW;AAC1B,UAAM,YAAY,OAAO;AACzB,QAAI,SAAA,MAAe,WAAW,mBAAmB,WAAW;AAC1D,SAAG;AAAA,QACD,8CAA8C,gBAAgB,OACzD,SAAS;AAAA,MAAA;AAAA,IAElB;AACA,UAAM,aACJ,eAAe,UACX,YACA,KAAK,IAAI,kBAAkB,SAAS;AAE1C,UAAM,WAAW;AAAA,MACf;AAAA,MACA;AAAA,MACA;AAAA,QACE,KAAK;AAAA,QACL,YAAY,EAAC,CAAC,kBAAkB,GAAG,2BAAA;AAAA,MAA0B;AAAA,MAE/D;AAAA,IAAA;AAEF,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI;AACF,uBAAiB,IAAI,QAAQ,cAAc;AAC3C,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,WAAW,SAAS,+BAA+B,cAAc;AAAA,QACjE;AAAA,MAAA;AAGF,WAAK,cAAc,MAAA;AACnB,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,OAAO;AAAA,UAAI,WACT,QAAQ;AAAA,YAAgB,CAAC,IAAIC,QAC3B,KAAKA,KAAI,OAAO,UAAU,IAAI,EAAE;AAAA,UAAA;AAAA,QAClC;AAAA,MACF;AAEF,WAAK,cAAc,eAAe,IAAI,cAAc;AAEpD,YAAM,QAAQ,UAAU;AAAA,QACtB,CAAC,KAAK,UAAU;AAAA,UACd,MAAM,IAAI,OAAO,KAAK;AAAA,UACtB,WAAW,IAAI,YAAY,KAAK;AAAA,QAAA;AAAA,QAElC,EAAC,MAAM,GAAG,WAAW,EAAA;AAAA,MAAC;AAGxB,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,YAAY,QAAQ,MAAM;AAAA,QAC1B;AAAA,MAAA;AAEF,YAAM,aAAa,YAAY,IAAA;AAC/B,wBAAkB,IAAI,OAAO;AAC7B,YAAM,QAAQ,YAAY,IAAA,IAAQ;AAClC,SAAG,OAAO,oBAAoB,MAAM,QAAQ,CAAC,CAAC,MAAM;AAEpD,YAAM,WAAW,KAAK,OAAO,UAAU,gBAAgB,SAAS;AAEhE,YAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,SAAG;AAAA,QACD,UAAU,MAAM,KAAK,eAAA,CAAgB,YAAY,SAAS,cAAc,YAAY,UAAU,GAAG,YACpF,MAAM,UAAU,QAAQ,CAAC,CAAC,YAAY,MAAM,QAAQ,CAAC,CAAC,YAAY,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAAA;AAAA,IAErG,UAAA;AACE,cAAQ,QAAA;AACR,UAAI,SAAA,MAAe,SAAS;AAG1B,aAAK,SAAS,MAAM,MAAM,OAAK,GAAG,OAAO,0BAA0B,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,SAAS,IAAA;AAAA,MACjB;AAAA,IACF;AAAA,EACF,SAAS,GAAG;AAIV,OAAG,OAAO,6BAA6B,QAAQ,IAAI,CAAC;AACpD,UAAM;AAAA;AAAA,4BAEkB,QAAQ;AAAA,MAC9B,MAAM,CAAAC,OAAK,GAAG,OAAO,mCAAmC,QAAQ,IAAIA,EAAC,CAAC;AACxE,UAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,CAAC;AAAA,EAClE,UAAA;AACE,oBAAgB,KAAA;AAChB,UAAM,mBAAmB,IAAA;AACzB,UAAM,IAAI,IAAA;AAAA,EACZ;AACF;AAEA,eAAe,oBAAoB,KAAiB;AAClD,QAAM,EAAC,UAAU,QAAA,KACf,MAAM;AAAA;AAAA;AAAA,KAIN,CAAC;AAEH,MAAI,aAAa,WAAW;AAC1B,UAAM,IAAI;AAAA,MACR,uEAAuE,QAAQ;AAAA,IAAA;AAAA,EAEnF;AACA,MAAI,UAAU,MAAQ;AACpB,UAAM,IAAI;AAAA,MACR,sDAAsD,OAAO;AAAA,IAAA;AAAA,EAEjE;AACF;AAEA,eAAe,sBACb,IACA,KACA,OACA,WAAW,MACwB;AACnC,QAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,KAAG,OAAO,oCAAoC,QAAQ,IAAI,IAAI,EAAE;AAEhE,QAAM,kBAAkB,IAAI,KAAK,KAAK;AACtC,QAAM,EAAC,aAAA,IAAgB,MAAM,uBAAuB,KAAK,KAAK;AAE9D,MAAI,UAAU;AACZ,UAAM,SAAS,MAAM;AAAA,4DACmC,IAAI,YAAY,CAAC;AAAA,QACrE,OAAA;AACJ,QAAI,OAAO,WAAW,aAAa,QAAQ;AACzC,SAAG;AAAA,QACD,iCAAiC,YAAY,mBACvC,OAAO,MAAM;AAAA,MAAA;AAErB,YAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,QAAQ,CAAC;AACvD,aAAO,sBAAsB,IAAI,KAAK,OAAO,KAAK;AAAA,IACpD;AAAA,EACF;AACA,SAAO,EAAC,aAAA;AACV;AAEA,SAAS,sBACP,IACA,IACA,UACA,YACA,WACiB;AACjB,QAAM,EAAC,KAAA,IAAQ,eAAe,QAAQ;AACtC,QAAM,eAAe,IAAI;AAAA,IACvB;AAAA,IACAH;AAAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,eAAa,IAAI,EAAE;AAEnB,KAAG,OAAO,WAAW,UAAU,oBAAoB,SAAS,SAAS;AAErE,MAAI,SAAS,QAAQ,SAAS,IAAI,IAAI,IAAI;AACxC,OAAG;AAAA,MACD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAOJ;AACA,SAAO;AACT;AAaA,eAAe,sBACb,IACA,SACA,UAC0B;AAC1B,QAAM,QACJ,MAAM,QAAQ;AAAA;AAAA,IACJ,4BAA4B,QAAQ;AAAA,EAAA,GAE9C,CAAC;AACH,KAAG,OAAO,4BAA4B,QAAQ,IAAI,IAAI;AACtD,SAAO;AACT;AAEA,SAAS,iBACP,IACA,QACA,gBACA;AAGA,QAAM,iBAAiB,KAAK,oBAAoB,YAAY,EAAE,CAAC;AAC/D,aAAW,KAAK,QAAQ;AACtB,OAAG,KAAK,yBAAyB,kBAAkB,GAAG,cAAc,CAAC,CAAC;AACtE,UAAM,YAAY,cAAc,CAAC;AACjC,eAAW,CAAC,SAAS,OAAO,KAAK,OAAO,QAAQ,EAAE,OAAO,GAAG;AAC1D,qBAAe,OAAO,WAAW,SAAS,OAAO;AAAA,IACnD;AAAA,EACF;AACF;AAEA,SAAS,kBAAkB,IAAc,SAAsB;AAC7D,aAAW,SAAS,SAAS;AAC3B,OAAG,KAAK,yBAAyB,uBAAuB,KAAK,CAAC,CAAC;AAAA,EACjE;AACF;AAMO,MAAM,oBAAoB;AAEjC,MAAM,KAAK,OAAO;AAClB,MAAM,oBAAoB;AAC1B,MAAM,0BAA0B,IAAI;AAEpC,eAAe,KACb,IACA,OACA,UACA,MACA,IACA;AACA,QAAM,QAAQ,YAAY,IAAA;AAC1B,MAAI,OAAO;AACX,MAAI,YAAY;AAEhB,QAAM,YAAY,cAAc,KAAK;AACrC,QAAM,iBAAiB,OAAO,QAAQ,MAAM,OAAO;AAEnD,QAAM,cAAc,eAAe,IAAI,CAAC,CAAC,OAAO,IAAI,MAAM,IAAI;AAC9D,QAAM,gBAAgB,eAAe,IAAI,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,EAAE,KAAK,GAAG;AACjE,QAAM,gBAAgB,eAAe,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC;AACnD,QAAM,mBAAmB,cAAc,IAAI,CAAA,MAAK,GAAG,CAAC,CAAC,EAAE,KAAK,GAAG;AAG/D,QAAM,YACJ,cAAc,SAAS,IACnB,IAAI,KAAK,OAAO,cAAc,SAAS,CAAC,CAAC,OACzC;AACN,QAAM;AAAA;AAAA,IAAoB;AAAA,mBACT,SAAS,MAAM,gBAAgB,YAAY,SAAS;AAAA;AACrE,QAAM,aAAa,GAAG,QAAQ,SAAS;AAEvC,QAAM,kBAAkB,GAAG;AAAA,IACzB,YAAY,IAAI,SAAS,GAAG,OAAO,oBAAoB,CAAC;AAAA,EAAA;AAG1D,QAAM,mBAAmB,OAAO,OAAO,MAAM,YAAY,EACtD,IAAI,CAAC,EAAC,UAAA,MAAe,SAAS,EAC9B,OAAO,CAAA,MAAK,CAAC,CAAC,CAAC;AAClB,QAAM;AAAA;AAAA,IACI;AAAA,aACC,aAAa,SAAS,GAAG,MAAM,MAAM,CAAC,IAAI,GAAG,MAAM,IAAI,CAAC,MAChE,iBAAiB,WAAW,IACzB;AAAA;AAAA,MACQ,UAAU,iBAAiB,KAAK,MAAM,CAAC;AAAA;AAAA;AAErD,QAAM,eAAe,YAAY;AACjC,QAAM,iBAAiB,eAAe;AAGtC,QAAM,gBAAiC,MAAM,KAAK;AAAA,IAChD,QAAQ,oBAAoB;AAAA,EAAA,CAC7B;AACD,MAAI,cAAc;AAClB,MAAI,cAAc;AAElB,WAAS,QAAQ;AACf,UAAMI,SAAQ,YAAY,IAAA;AAC1B,UAAM,cAAc;AACpB,UAAM,cAAc;AAEpB,QAAI,IAAI;AACR,WAAO,cAAc,mBAAmB,eAAe,mBAAmB;AACxE,sBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,cAAe,CAAC;AAAA,IACnE;AAEA,WAAO,cAAc,GAAG,eAAe;AACrC,iBAAW,IAAI,cAAc,MAAM,GAAI,KAAK,YAAa,CAAC;AAAA,IAC5D;AACA,aAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AAGpC,oBAAc,CAAC,IAAI;AAAA,IACrB;AACA,kBAAc;AACd,YAAQ;AAER,UAAMC,WAAU,YAAY,IAAA,IAAQD;AACpC,iBAAaC;AACb,OAAG;AAAA,MACD,WAAW,WAAW,IAAI,SAAS,UAAU,WAAW,cAAcA,SAAQ,QAAQ,CAAC,CAAC;AAAA,IAAA;AAAA,EAE5F;AAEA,KAAG,OAAO,2BAA2B,SAAS,KAAK,UAAU;AAC7D,QAAM,YAAY,MAAM,eAAe,QAAQ;AAC/C,QAAM,UAAU,YAAY,IAAI,CAAA,MAAK;AACnC,UAAM,UAAU,UAAU,cAAc,EAAE,OAAO;AACjD,WAAO,CAAC,QACN;AAAA,MACE,QAAQ,GAAG;AAAA,MACX,EAAE;AAAA,MACF;AAAA,IAAA;AAAA,EAEN,CAAC;AAED,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,MAAM;AAEV,QAAM;AAAA,IACJ,MAAM,KAAK,OAAO,SAAS,UAAU,aAAa,EAAE,SAAA;AAAA,IACpD,IAAI,SAAS;AAAA,MACX,eAAe;AAAA,MAEf,MACE,OACA,WACA,UACA;AACA,YAAI;AACF,qBAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,2BAAe,SAAS,OAAO,IAAI,KAAK;AACxC,0BAAc,cAAc,eAAe,GAAG,IAC5C,SAAS,OAAO,OAAO,QAAQ,GAAG,EAAE,IAAI;AAE1C,gBAAI,EAAE,QAAQ,QAAQ,QAAQ;AAC5B,oBAAM;AACN,kBACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,yBACf;AACA,sBAAA;AAAA,cACF;AAAA,YACF;AAAA,UACF;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,MAEA,OAAO,CAAC,aAAsC;AAC5C,YAAI;AACF,gBAAA;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,IAAA,CACD;AAAA,EAAA;AAGH,QAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,KAAG;AAAA,IACD,oBAAoB,IAAI,cAAc,SAAS,YAClC,UAAU,QAAQ,CAAC,CAAC,gBAAgB,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAAA;AAErE,SAAO,EAAC,MAAM,UAAA;AAChB;"}
@@ -1 +1 @@
1
- {"version":3,"file":"stream.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/logical-replication/stream.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAKjD,OAAO,EAAC,KAAK,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAAO,KAAK,IAAI,EAAE,KAAK,MAAM,EAAC,MAAM,8BAA8B,CAAC;AAK1E,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,qBAAqB,CAAC;AAIjD,MAAM,MAAM,aAAa,GAAG,CAAC,GAAG,EAAE,MAAM,EAAE,OAAO,GAAG;IAAC,GAAG,EAAE,WAAW,CAAA;CAAC,CAAC,CAAC;AAExE,wBAAsB,SAAS,CAC7B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,MAAM,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,GAAG,EAAE,MAAM,EACX,8BAA8B,SAA6C,EAC3E,eAAe,SAAoB,GAClC,OAAO,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC,aAAa,CAAC,CAAC;IAAC,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC,CAAA;CAAC,CAAC,CA8FhE"}
1
+ {"version":3,"file":"stream.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/logical-replication/stream.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAKjD,OAAO,EAAC,KAAK,UAAU,EAAC,MAAM,yBAAyB,CAAC;AAExD,OAAO,EAAO,KAAK,IAAI,EAAE,KAAK,MAAM,EAAC,MAAM,8BAA8B,CAAC;AAK1E,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,qBAAqB,CAAC;AAIjD,MAAM,MAAM,aAAa,GAAG,CAAC,GAAG,EAAE,MAAM,EAAE,OAAO,GAAG;IAAC,GAAG,EAAE,WAAW,CAAA;CAAC,CAAC,CAAC;AAExE,wBAAsB,SAAS,CAC7B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,MAAM,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,GAAG,EAAE,MAAM,EACX,8BAA8B,SAA6C,EAC3E,eAAe,SAAoB,GAClC,OAAO,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC,aAAa,CAAC,CAAC;IAAC,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC,CAAA;CAAC,CAAC,CA8FhE"}
@@ -4,6 +4,7 @@ import postgres from "postgres";
4
4
  import { sleep } from "../../../../../../shared/src/sleep.js";
5
5
  import { getTypeParsers } from "../../../../db/pg-type-parser.js";
6
6
  import "../../../../types/pg.js";
7
+ import { id, lit } from "../../../../types/sql.js";
7
8
  import { pipe } from "../../../../types/streams.js";
8
9
  import { Subscription } from "../../../../types/subscription.js";
9
10
  import { AutoResetSignal } from "../../../change-streamer/schema/tables.js";
@@ -91,13 +92,16 @@ async function subscribe(lc, db, slot, publications, lsn, retriesIfReplicationSl
91
92
  acks: { push: sendAck }
92
93
  };
93
94
  }
95
+ function formatPublicationNames(publications) {
96
+ return publications.map((p) => lit(p).slice(1, -1)).join(",");
97
+ }
94
98
  async function startReplicationStream(lc, session, slot, publications, lsn, maxAttempts) {
95
99
  for (let i = 0; i < maxAttempts; i++) {
96
100
  try {
97
101
  const stream = session.unsafe(
98
- `START_REPLICATION SLOT "${slot}" LOGICAL ${fromBigInt(lsn)} (
99
- proto_version '1',
100
- publication_names '${publications}',
102
+ `START_REPLICATION SLOT ${id(slot)} LOGICAL ${fromBigInt(lsn)} (
103
+ proto_version '1',
104
+ publication_names '${formatPublicationNames(publications)}',
101
105
  messages 'true'
102
106
  )`
103
107
  ).execute();
@@ -133,7 +137,6 @@ function parseStreamMessage(lc, buffer, parser) {
133
137
  return [lsn, parser.parse(buffer.subarray(25))];
134
138
  }
135
139
  if (buffer.readInt8(17)) {
136
- lc.debug?.(`pg keepalive (shouldRespond: true) ${lsn}`);
137
140
  return [lsn, { tag: "keepalive" }];
138
141
  }
139
142
  return null;
@@ -1 +1 @@
1
- {"version":3,"file":"stream.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/logical-replication/stream.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n PG_OBJECT_NOT_IN_PREREQUISITE_STATE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {defu} from 'defu';\nimport postgres, {type Options, type PostgresType} from 'postgres';\nimport {sleep} from '../../../../../../shared/src/sleep.ts';\nimport {getTypeParsers} from '../../../../db/pg-type-parser.ts';\nimport {type PostgresDB} from '../../../../types/pg.ts';\nimport {pipe, type Sink, type Source} from '../../../../types/streams.ts';\nimport {Subscription} from '../../../../types/subscription.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {fromBigInt} from '../lsn.ts';\nimport {PgoutputParser} from './pgoutput-parser.ts';\nimport type {Message} from './pgoutput.types.ts';\n\nconst DEFAULT_RETRIES_IF_REPLICATION_SLOT_ACTIVE = 5;\n\nexport type StreamMessage = [lsn: bigint, Message | {tag: 'keepalive'}];\n\nexport async function subscribe(\n lc: LogContext,\n db: PostgresDB,\n slot: string,\n publications: string[],\n lsn: bigint,\n retriesIfReplicationSlotActive = DEFAULT_RETRIES_IF_REPLICATION_SLOT_ACTIVE,\n applicationName = 'zero-replicator',\n): Promise<{messages: Source<StreamMessage>; acks: Sink<bigint>}> {\n const session = postgres(\n defu(\n {\n max: 1,\n ['fetch_types']: false, // Necessary for the streaming protocol\n ['idle_timeout']: null,\n ['max_lifetime']: null as unknown as number,\n connection: {\n ['application_name']: applicationName,\n replication: 'database', // https://www.postgresql.org/docs/current/protocol-replication.html\n },\n },\n // ParsedOptions are technically compatible with Options, but happen\n // to not be typed that way. The postgres.js author does an equivalent\n // merge of ParsedOptions and Options here:\n // https://github.com/porsager/postgres/blob/089214e85c23c90cf142d47fb30bd03f42874984/src/subscribe.js#L13\n db.options as unknown as Options<Record<string, PostgresType>>,\n ),\n );\n\n // Postgres will send keepalives before timing out a wal_sender. It is possible that\n // these keepalives are not received if there is back-pressure in the replication\n // stream. To keep the connection alive, explicitly send keepalives if none have been\n // sent within the last 75% of the wal_sender_timeout.\n //\n // https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-WAL-SENDER-TIMEOUT\n const [{walSenderTimeoutMs}] = await session<\n {walSenderTimeoutMs: number}[]\n >`SELECT EXTRACT(EPOCH FROM (setting || unit)::interval) * 1000 \n AS \"walSenderTimeoutMs\" FROM pg_settings\n WHERE name = 'wal_sender_timeout'`.simple();\n const manualKeepaliveTimeout = Math.floor(walSenderTimeoutMs * 0.75);\n lc.info?.(\n `wal_sender_timeout: ${walSenderTimeoutMs}ms. ` +\n `Ensuring manual keepalives at least every ${manualKeepaliveTimeout}ms`,\n );\n\n const [readable, writable] = await startReplicationStream(\n lc,\n session,\n slot,\n publications,\n lsn,\n retriesIfReplicationSlotActive + 1,\n );\n\n let lastAckTime = Date.now();\n function sendAck(lsn: bigint) {\n writable.write(makeAck(lsn));\n lastAckTime = Date.now();\n }\n\n const livenessTimer = setInterval(() => {\n const now = Date.now();\n if (now - lastAckTime > manualKeepaliveTimeout) {\n sendAck(0n);\n lc.debug?.(`sent manual keepalive`);\n }\n }, manualKeepaliveTimeout / 5);\n\n let destroyed = false;\n const typeParsers = await getTypeParsers(db);\n const parser = new PgoutputParser(typeParsers);\n const messages = Subscription.create<StreamMessage>({\n cleanup: () => {\n destroyed = true;\n readable.destroyed || readable.destroy();\n clearInterval(livenessTimer);\n return session.end();\n },\n });\n\n readable.once(\n 'close',\n () =>\n // Only log a warning if the stream was not manually closed.\n destroyed || lc.warn?.(`replication stream closed by ${db.options.host}`),\n );\n readable.once(\n 'error',\n e =>\n // Don't log the shutdown signal. This is the expected way for upstream\n // to close the connection (and will be logged downstream).\n (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) ||\n lc.error?.(`error from ${db.options.host}`, e),\n );\n\n pipe(readable, messages, buffer => parseStreamMessage(lc, buffer, parser));\n\n return {\n messages,\n acks: {push: sendAck},\n };\n}\n\nasync function startReplicationStream(\n lc: LogContext,\n session: postgres.Sql,\n slot: string,\n publications: string[],\n lsn: bigint,\n maxAttempts: number,\n) {\n for (let i = 0; i < maxAttempts; i++) {\n try {\n const stream = session\n .unsafe(\n `START_REPLICATION SLOT \"${slot}\" LOGICAL ${fromBigInt(lsn)} (\n proto_version '1', \n publication_names '${publications}',\n messages 'true'\n )`,\n )\n .execute();\n return await Promise.all([stream.readable(), stream.writable()]);\n } catch (e) {\n if (e instanceof postgres.PostgresError) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (e.code === PG_OBJECT_IN_USE) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n lc.warn?.(`attempt ${i + 1}: ${String(e)}`, e);\n await sleep(10);\n continue;\n }\n // error: This slot has been invalidated because it exceeded the maximum reserved size.\n // (This is a different manifestation of a slot being invalidated when\n // the wal exceeds the max_slot_wal_keep_size)\n if (e.code === PG_OBJECT_NOT_IN_PREREQUISITE_STATE) {\n lc.error?.(`error starting replication stream`, e);\n throw new AutoResetSignal(`unable to start replication stream`, {\n cause: e,\n });\n }\n }\n throw e;\n }\n }\n throw new Error(\n `exceeded max attempts (${maxAttempts}) to start the Postgres stream`,\n );\n}\n\nfunction parseStreamMessage(\n lc: LogContext,\n buffer: Buffer,\n parser: PgoutputParser,\n): StreamMessage | null {\n // https://www.postgresql.org/docs/current/protocol-replication.html#PROTOCOL-REPLICATION-XLOGDATA\n if (buffer[0] !== 0x77 && buffer[0] !== 0x6b) {\n lc.warn?.('Unknown message', buffer[0]);\n return null;\n }\n const lsn = buffer.readBigUInt64BE(1);\n if (buffer[0] === 0x77) {\n // XLogData\n return [lsn, parser.parse(buffer.subarray(25))];\n }\n if (buffer.readInt8(17)) {\n // Primary keepalive message: shouldRespond\n lc.debug?.(`pg keepalive (shouldRespond: true) ${lsn}`);\n return [lsn, {tag: 'keepalive'}];\n }\n return null;\n}\n\n// https://www.postgresql.org/docs/current/protocol-replication.html#PROTOCOL-REPLICATION-STANDBY-STATUS-UPDATE\nfunction makeAck(lsn: bigint): Buffer {\n const microNow = BigInt(Date.now() - Date.UTC(2000, 0, 1)) * BigInt(1000);\n\n const x = Buffer.alloc(34);\n x[0] = 'r'.charCodeAt(0);\n x.writeBigInt64BE(lsn, 1);\n x.writeBigInt64BE(lsn, 9);\n x.writeBigInt64BE(lsn, 17);\n x.writeBigInt64BE(microNow, 25);\n return x;\n}\n"],"names":["lsn"],"mappings":";;;;;;;;;;;AAkBA,MAAM,6CAA6C;AAInD,eAAsB,UACpB,IACA,IACA,MACA,cACA,KACA,iCAAiC,4CACjC,kBAAkB,mBAC8C;AAChE,QAAM,UAAU;AAAA,IACd;AAAA,MACE;AAAA,QACE,KAAK;AAAA,QACL,CAAC,aAAa,GAAG;AAAA;AAAA,QACjB,CAAC,cAAc,GAAG;AAAA,QAClB,CAAC,cAAc,GAAG;AAAA,QAClB,YAAY;AAAA,UACV,CAAC,kBAAkB,GAAG;AAAA,UACtB,aAAa;AAAA;AAAA,QAAA;AAAA,MACf;AAAA;AAAA;AAAA;AAAA;AAAA,MAMF,GAAG;AAAA,IAAA;AAAA,EACL;AASF,QAAM,CAAC,EAAC,oBAAmB,IAAI,MAAM;AAAA;AAAA,2CAII,OAAA;AACzC,QAAM,yBAAyB,KAAK,MAAM,qBAAqB,IAAI;AACnE,KAAG;AAAA,IACD,uBAAuB,kBAAkB,iDACM,sBAAsB;AAAA,EAAA;AAGvE,QAAM,CAAC,UAAU,QAAQ,IAAI,MAAM;AAAA,IACjC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,iCAAiC;AAAA,EAAA;AAGnC,MAAI,cAAc,KAAK,IAAA;AACvB,WAAS,QAAQA,MAAa;AAC5B,aAAS,MAAM,QAAQA,IAAG,CAAC;AAC3B,kBAAc,KAAK,IAAA;AAAA,EACrB;AAEA,QAAM,gBAAgB,YAAY,MAAM;AACtC,UAAM,MAAM,KAAK,IAAA;AACjB,QAAI,MAAM,cAAc,wBAAwB;AAC9C,cAAQ,EAAE;AACV,SAAG,QAAQ,uBAAuB;AAAA,IACpC;AAAA,EACF,GAAG,yBAAyB,CAAC;AAE7B,MAAI,YAAY;AAChB,QAAM,cAAc,MAAM,eAAe,EAAE;AAC3C,QAAM,SAAS,IAAI,eAAe,WAAW;AAC7C,QAAM,WAAW,aAAa,OAAsB;AAAA,IAClD,SAAS,MAAM;AACb,kBAAY;AACZ,eAAS,aAAa,SAAS,QAAA;AAC/B,oBAAc,aAAa;AAC3B,aAAO,QAAQ,IAAA;AAAA,IACjB;AAAA,EAAA,CACD;AAED,WAAS;AAAA,IACP;AAAA,IACA;AAAA;AAAA,MAEE,aAAa,GAAG,OAAO,gCAAgC,GAAG,QAAQ,IAAI,EAAE;AAAA;AAAA,EAAA;AAE5E,WAAS;AAAA,IACP;AAAA,IACA,CAAA;AAAA;AAAA;AAAA,MAGG,aAAa,SAAS,iBAAiB,EAAE,SAAS,qBACnD,GAAG,QAAQ,cAAc,GAAG,QAAQ,IAAI,IAAI,CAAC;AAAA;AAAA,EAAA;AAGjD,OAAK,UAAU,UAAU,CAAA,WAAU,mBAAmB,IAAI,QAAQ,MAAM,CAAC;AAEzE,SAAO;AAAA,IACL;AAAA,IACA,MAAM,EAAC,MAAM,QAAA;AAAA,EAAO;AAExB;AAEA,eAAe,uBACb,IACA,SACA,MACA,cACA,KACA,aACA;AACA,WAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AACpC,QAAI;AACF,YAAM,SAAS,QACZ;AAAA,QACC,2BAA2B,IAAI,aAAa,WAAW,GAAG,CAAC;AAAA;AAAA,6BAExC,YAAY;AAAA;AAAA;AAAA,MAAA,EAIhC,QAAA;AACH,aAAO,MAAM,QAAQ,IAAI,CAAC,OAAO,YAAY,OAAO,SAAA,CAAU,CAAC;AAAA,IACjE,SAAS,GAAG;AACV,UAAI,aAAa,SAAS,eAAe;AAEvC,YAAI,EAAE,SAAS,kBAAkB;AAI/B,aAAG,OAAO,WAAW,IAAI,CAAC,KAAK,OAAO,CAAC,CAAC,IAAI,CAAC;AAC7C,gBAAM,MAAM,EAAE;AACd;AAAA,QACF;AAIA,YAAI,EAAE,SAAS,qCAAqC;AAClD,aAAG,QAAQ,qCAAqC,CAAC;AACjD,gBAAM,IAAI,gBAAgB,sCAAsC;AAAA,YAC9D,OAAO;AAAA,UAAA,CACR;AAAA,QACH;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF;AACA,QAAM,IAAI;AAAA,IACR,0BAA0B,WAAW;AAAA,EAAA;AAEzC;AAEA,SAAS,mBACP,IACA,QACA,QACsB;AAEtB,MAAI,OAAO,CAAC,MAAM,OAAQ,OAAO,CAAC,MAAM,KAAM;AAC5C,OAAG,OAAO,mBAAmB,OAAO,CAAC,CAAC;AACtC,WAAO;AAAA,EACT;AACA,QAAM,MAAM,OAAO,gBAAgB,CAAC;AACpC,MAAI,OAAO,CAAC,MAAM,KAAM;AAEtB,WAAO,CAAC,KAAK,OAAO,MAAM,OAAO,SAAS,EAAE,CAAC,CAAC;AAAA,EAChD;AACA,MAAI,OAAO,SAAS,EAAE,GAAG;AAEvB,OAAG,QAAQ,sCAAsC,GAAG,EAAE;AACtD,WAAO,CAAC,KAAK,EAAC,KAAK,aAAY;AAAA,EACjC;AACA,SAAO;AACT;AAGA,SAAS,QAAQ,KAAqB;AACpC,QAAM,WAAW,OAAO,KAAK,IAAA,IAAQ,KAAK,IAAI,KAAM,GAAG,CAAC,CAAC,IAAI,OAAO,GAAI;AAExE,QAAM,IAAI,OAAO,MAAM,EAAE;AACzB,IAAE,CAAC,IAAI,IAAI,WAAW,CAAC;AACvB,IAAE,gBAAgB,KAAK,CAAC;AACxB,IAAE,gBAAgB,KAAK,CAAC;AACxB,IAAE,gBAAgB,KAAK,EAAE;AACzB,IAAE,gBAAgB,UAAU,EAAE;AAC9B,SAAO;AACT;"}
1
+ {"version":3,"file":"stream.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/logical-replication/stream.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n PG_OBJECT_NOT_IN_PREREQUISITE_STATE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {defu} from 'defu';\nimport postgres, {type Options, type PostgresType} from 'postgres';\nimport {sleep} from '../../../../../../shared/src/sleep.ts';\nimport {getTypeParsers} from '../../../../db/pg-type-parser.ts';\nimport {type PostgresDB} from '../../../../types/pg.ts';\nimport {id, lit} from '../../../../types/sql.ts';\nimport {pipe, type Sink, type Source} from '../../../../types/streams.ts';\nimport {Subscription} from '../../../../types/subscription.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {fromBigInt} from '../lsn.ts';\nimport {PgoutputParser} from './pgoutput-parser.ts';\nimport type {Message} from './pgoutput.types.ts';\n\nconst DEFAULT_RETRIES_IF_REPLICATION_SLOT_ACTIVE = 5;\n\nexport type StreamMessage = [lsn: bigint, Message | {tag: 'keepalive'}];\n\nexport async function subscribe(\n lc: LogContext,\n db: PostgresDB,\n slot: string,\n publications: string[],\n lsn: bigint,\n retriesIfReplicationSlotActive = DEFAULT_RETRIES_IF_REPLICATION_SLOT_ACTIVE,\n applicationName = 'zero-replicator',\n): Promise<{messages: Source<StreamMessage>; acks: Sink<bigint>}> {\n const session = postgres(\n defu(\n {\n max: 1,\n ['fetch_types']: false, // Necessary for the streaming protocol\n ['idle_timeout']: null,\n ['max_lifetime']: null as unknown as number,\n connection: {\n ['application_name']: applicationName,\n replication: 'database', // https://www.postgresql.org/docs/current/protocol-replication.html\n },\n },\n // ParsedOptions are technically compatible with Options, but happen\n // to not be typed that way. The postgres.js author does an equivalent\n // merge of ParsedOptions and Options here:\n // https://github.com/porsager/postgres/blob/089214e85c23c90cf142d47fb30bd03f42874984/src/subscribe.js#L13\n db.options as unknown as Options<Record<string, PostgresType>>,\n ),\n );\n\n // Postgres will send keepalives before timing out a wal_sender. It is possible that\n // these keepalives are not received if there is back-pressure in the replication\n // stream. To keep the connection alive, explicitly send keepalives if none have been\n // sent within the last 75% of the wal_sender_timeout.\n //\n // https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-WAL-SENDER-TIMEOUT\n const [{walSenderTimeoutMs}] = await session<\n {walSenderTimeoutMs: number}[]\n >`SELECT EXTRACT(EPOCH FROM (setting || unit)::interval) * 1000 \n AS \"walSenderTimeoutMs\" FROM pg_settings\n WHERE name = 'wal_sender_timeout'`.simple();\n const manualKeepaliveTimeout = Math.floor(walSenderTimeoutMs * 0.75);\n lc.info?.(\n `wal_sender_timeout: ${walSenderTimeoutMs}ms. ` +\n `Ensuring manual keepalives at least every ${manualKeepaliveTimeout}ms`,\n );\n\n const [readable, writable] = await startReplicationStream(\n lc,\n session,\n slot,\n publications,\n lsn,\n retriesIfReplicationSlotActive + 1,\n );\n\n let lastAckTime = Date.now();\n function sendAck(lsn: bigint) {\n writable.write(makeAck(lsn));\n lastAckTime = Date.now();\n }\n\n const livenessTimer = setInterval(() => {\n const now = Date.now();\n if (now - lastAckTime > manualKeepaliveTimeout) {\n sendAck(0n);\n lc.debug?.(`sent manual keepalive`);\n }\n }, manualKeepaliveTimeout / 5);\n\n let destroyed = false;\n const typeParsers = await getTypeParsers(db);\n const parser = new PgoutputParser(typeParsers);\n const messages = Subscription.create<StreamMessage>({\n cleanup: () => {\n destroyed = true;\n readable.destroyed || readable.destroy();\n clearInterval(livenessTimer);\n return session.end();\n },\n });\n\n readable.once(\n 'close',\n () =>\n // Only log a warning if the stream was not manually closed.\n destroyed || lc.warn?.(`replication stream closed by ${db.options.host}`),\n );\n readable.once(\n 'error',\n e =>\n // Don't log the shutdown signal. This is the expected way for upstream\n // to close the connection (and will be logged downstream).\n (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) ||\n lc.error?.(`error from ${db.options.host}`, e),\n );\n\n pipe(readable, messages, buffer => parseStreamMessage(lc, buffer, parser));\n\n return {\n messages,\n acks: {push: sendAck},\n };\n}\n\n/**\n * Formats publication names for the START_REPLICATION command.\n * The replication protocol expects format: publication_names 'pub1,pub2'\n * Each name is escaped to handle any quotes that may have passed validation.\n */\nfunction formatPublicationNames(publications: string[]): string {\n // lit() returns 'escaped_name' with surrounding quotes\n // We strip the quotes since the outer quotes are in the template\n return publications.map(p => lit(p).slice(1, -1)).join(',');\n}\n\nasync function startReplicationStream(\n lc: LogContext,\n session: postgres.Sql,\n slot: string,\n publications: string[],\n lsn: bigint,\n maxAttempts: number,\n) {\n for (let i = 0; i < maxAttempts; i++) {\n try {\n const stream = session\n .unsafe(\n `START_REPLICATION SLOT ${id(slot)} LOGICAL ${fromBigInt(lsn)} (\n proto_version '1',\n publication_names '${formatPublicationNames(publications)}',\n messages 'true'\n )`,\n )\n .execute();\n return await Promise.all([stream.readable(), stream.writable()]);\n } catch (e) {\n if (e instanceof postgres.PostgresError) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (e.code === PG_OBJECT_IN_USE) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n lc.warn?.(`attempt ${i + 1}: ${String(e)}`, e);\n await sleep(10);\n continue;\n }\n // error: This slot has been invalidated because it exceeded the maximum reserved size.\n // (This is a different manifestation of a slot being invalidated when\n // the wal exceeds the max_slot_wal_keep_size)\n if (e.code === PG_OBJECT_NOT_IN_PREREQUISITE_STATE) {\n lc.error?.(`error starting replication stream`, e);\n throw new AutoResetSignal(`unable to start replication stream`, {\n cause: e,\n });\n }\n }\n throw e;\n }\n }\n throw new Error(\n `exceeded max attempts (${maxAttempts}) to start the Postgres stream`,\n );\n}\n\nfunction parseStreamMessage(\n lc: LogContext,\n buffer: Buffer,\n parser: PgoutputParser,\n): StreamMessage | null {\n // https://www.postgresql.org/docs/current/protocol-replication.html#PROTOCOL-REPLICATION-XLOGDATA\n if (buffer[0] !== 0x77 && buffer[0] !== 0x6b) {\n lc.warn?.('Unknown message', buffer[0]);\n return null;\n }\n const lsn = buffer.readBigUInt64BE(1);\n if (buffer[0] === 0x77) {\n // XLogData\n return [lsn, parser.parse(buffer.subarray(25))];\n }\n if (buffer.readInt8(17)) {\n // Primary keepalive message: shouldRespond\n return [lsn, {tag: 'keepalive'}];\n }\n return null;\n}\n\n// https://www.postgresql.org/docs/current/protocol-replication.html#PROTOCOL-REPLICATION-STANDBY-STATUS-UPDATE\nfunction makeAck(lsn: bigint): Buffer {\n const microNow = BigInt(Date.now() - Date.UTC(2000, 0, 1)) * BigInt(1000);\n\n const x = Buffer.alloc(34);\n x[0] = 'r'.charCodeAt(0);\n x.writeBigInt64BE(lsn, 1);\n x.writeBigInt64BE(lsn, 9);\n x.writeBigInt64BE(lsn, 17);\n x.writeBigInt64BE(microNow, 25);\n return x;\n}\n"],"names":["lsn"],"mappings":";;;;;;;;;;;;AAmBA,MAAM,6CAA6C;AAInD,eAAsB,UACpB,IACA,IACA,MACA,cACA,KACA,iCAAiC,4CACjC,kBAAkB,mBAC8C;AAChE,QAAM,UAAU;AAAA,IACd;AAAA,MACE;AAAA,QACE,KAAK;AAAA,QACL,CAAC,aAAa,GAAG;AAAA;AAAA,QACjB,CAAC,cAAc,GAAG;AAAA,QAClB,CAAC,cAAc,GAAG;AAAA,QAClB,YAAY;AAAA,UACV,CAAC,kBAAkB,GAAG;AAAA,UACtB,aAAa;AAAA;AAAA,QAAA;AAAA,MACf;AAAA;AAAA;AAAA;AAAA;AAAA,MAMF,GAAG;AAAA,IAAA;AAAA,EACL;AASF,QAAM,CAAC,EAAC,oBAAmB,IAAI,MAAM;AAAA;AAAA,2CAII,OAAA;AACzC,QAAM,yBAAyB,KAAK,MAAM,qBAAqB,IAAI;AACnE,KAAG;AAAA,IACD,uBAAuB,kBAAkB,iDACM,sBAAsB;AAAA,EAAA;AAGvE,QAAM,CAAC,UAAU,QAAQ,IAAI,MAAM;AAAA,IACjC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,iCAAiC;AAAA,EAAA;AAGnC,MAAI,cAAc,KAAK,IAAA;AACvB,WAAS,QAAQA,MAAa;AAC5B,aAAS,MAAM,QAAQA,IAAG,CAAC;AAC3B,kBAAc,KAAK,IAAA;AAAA,EACrB;AAEA,QAAM,gBAAgB,YAAY,MAAM;AACtC,UAAM,MAAM,KAAK,IAAA;AACjB,QAAI,MAAM,cAAc,wBAAwB;AAC9C,cAAQ,EAAE;AACV,SAAG,QAAQ,uBAAuB;AAAA,IACpC;AAAA,EACF,GAAG,yBAAyB,CAAC;AAE7B,MAAI,YAAY;AAChB,QAAM,cAAc,MAAM,eAAe,EAAE;AAC3C,QAAM,SAAS,IAAI,eAAe,WAAW;AAC7C,QAAM,WAAW,aAAa,OAAsB;AAAA,IAClD,SAAS,MAAM;AACb,kBAAY;AACZ,eAAS,aAAa,SAAS,QAAA;AAC/B,oBAAc,aAAa;AAC3B,aAAO,QAAQ,IAAA;AAAA,IACjB;AAAA,EAAA,CACD;AAED,WAAS;AAAA,IACP;AAAA,IACA;AAAA;AAAA,MAEE,aAAa,GAAG,OAAO,gCAAgC,GAAG,QAAQ,IAAI,EAAE;AAAA;AAAA,EAAA;AAE5E,WAAS;AAAA,IACP;AAAA,IACA,CAAA;AAAA;AAAA;AAAA,MAGG,aAAa,SAAS,iBAAiB,EAAE,SAAS,qBACnD,GAAG,QAAQ,cAAc,GAAG,QAAQ,IAAI,IAAI,CAAC;AAAA;AAAA,EAAA;AAGjD,OAAK,UAAU,UAAU,CAAA,WAAU,mBAAmB,IAAI,QAAQ,MAAM,CAAC;AAEzE,SAAO;AAAA,IACL;AAAA,IACA,MAAM,EAAC,MAAM,QAAA;AAAA,EAAO;AAExB;AAOA,SAAS,uBAAuB,cAAgC;AAG9D,SAAO,aAAa,IAAI,CAAA,MAAK,IAAI,CAAC,EAAE,MAAM,GAAG,EAAE,CAAC,EAAE,KAAK,GAAG;AAC5D;AAEA,eAAe,uBACb,IACA,SACA,MACA,cACA,KACA,aACA;AACA,WAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AACpC,QAAI;AACF,YAAM,SAAS,QACZ;AAAA,QACC,0BAA0B,GAAG,IAAI,CAAC,YAAY,WAAW,GAAG,CAAC;AAAA;AAAA,6BAE1C,uBAAuB,YAAY,CAAC;AAAA;AAAA;AAAA,MAAA,EAIxD,QAAA;AACH,aAAO,MAAM,QAAQ,IAAI,CAAC,OAAO,YAAY,OAAO,SAAA,CAAU,CAAC;AAAA,IACjE,SAAS,GAAG;AACV,UAAI,aAAa,SAAS,eAAe;AAEvC,YAAI,EAAE,SAAS,kBAAkB;AAI/B,aAAG,OAAO,WAAW,IAAI,CAAC,KAAK,OAAO,CAAC,CAAC,IAAI,CAAC;AAC7C,gBAAM,MAAM,EAAE;AACd;AAAA,QACF;AAIA,YAAI,EAAE,SAAS,qCAAqC;AAClD,aAAG,QAAQ,qCAAqC,CAAC;AACjD,gBAAM,IAAI,gBAAgB,sCAAsC;AAAA,YAC9D,OAAO;AAAA,UAAA,CACR;AAAA,QACH;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF;AACA,QAAM,IAAI;AAAA,IACR,0BAA0B,WAAW;AAAA,EAAA;AAEzC;AAEA,SAAS,mBACP,IACA,QACA,QACsB;AAEtB,MAAI,OAAO,CAAC,MAAM,OAAQ,OAAO,CAAC,MAAM,KAAM;AAC5C,OAAG,OAAO,mBAAmB,OAAO,CAAC,CAAC;AACtC,WAAO;AAAA,EACT;AACA,QAAM,MAAM,OAAO,gBAAgB,CAAC;AACpC,MAAI,OAAO,CAAC,MAAM,KAAM;AAEtB,WAAO,CAAC,KAAK,OAAO,MAAM,OAAO,SAAS,EAAE,CAAC,CAAC;AAAA,EAChD;AACA,MAAI,OAAO,SAAS,EAAE,GAAG;AAEvB,WAAO,CAAC,KAAK,EAAC,KAAK,aAAY;AAAA,EACjC;AACA,SAAO;AACT;AAGA,SAAS,QAAQ,KAAqB;AACpC,QAAM,WAAW,OAAO,KAAK,IAAA,IAAQ,KAAK,IAAI,KAAM,GAAG,CAAC,CAAC,IAAI,OAAO,GAAI;AAExE,QAAM,IAAI,OAAO,MAAM,EAAE;AACzB,IAAE,CAAC,IAAI,IAAI,WAAW,CAAC;AACvB,IAAE,gBAAgB,KAAK,CAAC;AACxB,IAAE,gBAAgB,KAAK,CAAC;AACxB,IAAE,gBAAgB,KAAK,EAAE;AACzB,IAAE,gBAAgB,UAAU,EAAE;AAC9B,SAAO;AACT;"}