@rocicorp/zero 0.26.0-canary.0 → 0.26.0-canary.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (391) hide show
  1. package/README.md +1 -1
  2. package/out/replicache/src/persist/collect-idb-databases.d.ts +4 -4
  3. package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
  4. package/out/replicache/src/persist/collect-idb-databases.js +22 -19
  5. package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
  6. package/out/replicache/src/persist/refresh.d.ts.map +1 -1
  7. package/out/replicache/src/persist/refresh.js +0 -8
  8. package/out/replicache/src/persist/refresh.js.map +1 -1
  9. package/out/replicache/src/process-scheduler.d.ts +23 -0
  10. package/out/replicache/src/process-scheduler.d.ts.map +1 -1
  11. package/out/replicache/src/process-scheduler.js +50 -1
  12. package/out/replicache/src/process-scheduler.js.map +1 -1
  13. package/out/replicache/src/replicache-impl.d.ts +8 -0
  14. package/out/replicache/src/replicache-impl.d.ts.map +1 -1
  15. package/out/replicache/src/replicache-impl.js +11 -2
  16. package/out/replicache/src/replicache-impl.js.map +1 -1
  17. package/out/shared/src/custom-key-map.d.ts +4 -4
  18. package/out/shared/src/custom-key-map.d.ts.map +1 -1
  19. package/out/shared/src/custom-key-map.js.map +1 -1
  20. package/out/shared/src/falsy.d.ts +3 -0
  21. package/out/shared/src/falsy.d.ts.map +1 -0
  22. package/out/shared/src/iterables.d.ts +6 -8
  23. package/out/shared/src/iterables.d.ts.map +1 -1
  24. package/out/shared/src/iterables.js +13 -7
  25. package/out/shared/src/iterables.js.map +1 -1
  26. package/out/shared/src/options.d.ts +1 -0
  27. package/out/shared/src/options.d.ts.map +1 -1
  28. package/out/shared/src/options.js +5 -1
  29. package/out/shared/src/options.js.map +1 -1
  30. package/out/zero/package.json.js +1 -1
  31. package/out/zero/src/adapters/drizzle.js +1 -2
  32. package/out/zero/src/adapters/prisma.d.ts +2 -0
  33. package/out/zero/src/adapters/prisma.d.ts.map +1 -0
  34. package/out/zero/src/adapters/prisma.js +6 -0
  35. package/out/zero/src/adapters/prisma.js.map +1 -0
  36. package/out/zero/src/pg.js +4 -7
  37. package/out/zero/src/react.js +3 -1
  38. package/out/zero/src/react.js.map +1 -1
  39. package/out/zero/src/server.js +5 -8
  40. package/out/zero/src/zero-cache-dev.js +7 -3
  41. package/out/zero/src/zero-cache-dev.js.map +1 -1
  42. package/out/zero-cache/src/auth/load-permissions.d.ts +3 -2
  43. package/out/zero-cache/src/auth/load-permissions.d.ts.map +1 -1
  44. package/out/zero-cache/src/auth/load-permissions.js +14 -8
  45. package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
  46. package/out/zero-cache/src/auth/write-authorizer.d.ts +6 -0
  47. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  48. package/out/zero-cache/src/auth/write-authorizer.js +16 -3
  49. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  50. package/out/zero-cache/src/config/zero-config.d.ts +54 -9
  51. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  52. package/out/zero-cache/src/config/zero-config.js +80 -20
  53. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  54. package/out/zero-cache/src/custom/fetch.d.ts +3 -0
  55. package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
  56. package/out/zero-cache/src/custom/fetch.js +26 -0
  57. package/out/zero-cache/src/custom/fetch.js.map +1 -1
  58. package/out/zero-cache/src/db/lite-tables.js +1 -1
  59. package/out/zero-cache/src/db/lite-tables.js.map +1 -1
  60. package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
  61. package/out/zero-cache/src/db/migration-lite.js +9 -3
  62. package/out/zero-cache/src/db/migration-lite.js.map +1 -1
  63. package/out/zero-cache/src/db/migration.d.ts.map +1 -1
  64. package/out/zero-cache/src/db/migration.js +9 -3
  65. package/out/zero-cache/src/db/migration.js.map +1 -1
  66. package/out/zero-cache/src/db/specs.d.ts +4 -3
  67. package/out/zero-cache/src/db/specs.d.ts.map +1 -1
  68. package/out/zero-cache/src/db/specs.js +4 -1
  69. package/out/zero-cache/src/db/specs.js.map +1 -1
  70. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  71. package/out/zero-cache/src/db/transaction-pool.js +9 -3
  72. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  73. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  74. package/out/zero-cache/src/observability/events.js +15 -5
  75. package/out/zero-cache/src/observability/events.js.map +1 -1
  76. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  77. package/out/zero-cache/src/server/change-streamer.js +10 -2
  78. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  79. package/out/zero-cache/src/server/inspector-delegate.d.ts +1 -1
  80. package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
  81. package/out/zero-cache/src/server/inspector-delegate.js +11 -30
  82. package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
  83. package/out/zero-cache/src/server/main.js +1 -1
  84. package/out/zero-cache/src/server/main.js.map +1 -1
  85. package/out/zero-cache/src/server/priority-op.d.ts +8 -0
  86. package/out/zero-cache/src/server/priority-op.d.ts.map +1 -0
  87. package/out/zero-cache/src/server/priority-op.js +29 -0
  88. package/out/zero-cache/src/server/priority-op.js.map +1 -0
  89. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  90. package/out/zero-cache/src/server/syncer.js +10 -10
  91. package/out/zero-cache/src/server/syncer.js.map +1 -1
  92. package/out/zero-cache/src/services/analyze.js +1 -1
  93. package/out/zero-cache/src/services/analyze.js.map +1 -1
  94. package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
  95. package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -7
  96. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  97. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  98. package/out/zero-cache/src/services/change-source/pg/change-source.js +68 -13
  99. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  100. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  101. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +7 -2
  102. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  103. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.d.ts.map +1 -1
  104. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +7 -4
  105. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js.map +1 -1
  106. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +125 -180
  107. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  108. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +1 -10
  109. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  110. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  111. package/out/zero-cache/src/services/change-source/pg/schema/init.js +26 -12
  112. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  113. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +36 -90
  114. package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
  115. package/out/zero-cache/src/services/change-source/pg/schema/published.js +51 -14
  116. package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
  117. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts +31 -36
  118. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
  119. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +25 -17
  120. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  121. package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts +2 -2
  122. package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts.map +1 -1
  123. package/out/zero-cache/src/services/change-source/pg/schema/validation.js +2 -4
  124. package/out/zero-cache/src/services/change-source/pg/schema/validation.js.map +1 -1
  125. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +158 -53
  126. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
  127. package/out/zero-cache/src/services/change-source/protocol/current/data.js +55 -10
  128. package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
  129. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +210 -72
  130. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
  131. package/out/zero-cache/src/services/change-source/protocol/current.js +4 -2
  132. package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
  133. package/out/zero-cache/src/services/change-source/replica-schema.js +20 -4
  134. package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
  135. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
  136. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  137. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +6 -4
  138. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  139. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +71 -25
  140. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
  141. package/out/zero-cache/src/services/change-streamer/change-streamer.js +1 -1
  142. package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
  143. package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts +1 -0
  144. package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts.map +1 -1
  145. package/out/zero-cache/src/services/change-streamer/schema/tables.js +6 -5
  146. package/out/zero-cache/src/services/change-streamer/schema/tables.js.map +1 -1
  147. package/out/zero-cache/src/services/change-streamer/storer.d.ts +1 -1
  148. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  149. package/out/zero-cache/src/services/change-streamer/storer.js +17 -6
  150. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  151. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +2 -0
  152. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
  153. package/out/zero-cache/src/services/change-streamer/subscriber.js +14 -1
  154. package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
  155. package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
  156. package/out/zero-cache/src/services/heapz.js +1 -0
  157. package/out/zero-cache/src/services/heapz.js.map +1 -1
  158. package/out/zero-cache/src/services/life-cycle.d.ts +1 -1
  159. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  160. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  161. package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
  162. package/out/zero-cache/src/services/litestream/commands.js +3 -1
  163. package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
  164. package/out/zero-cache/src/services/litestream/config.yml +1 -0
  165. package/out/zero-cache/src/services/mutagen/error.d.ts.map +1 -1
  166. package/out/zero-cache/src/services/mutagen/error.js +4 -1
  167. package/out/zero-cache/src/services/mutagen/error.js.map +1 -1
  168. package/out/zero-cache/src/services/mutagen/mutagen.d.ts +4 -4
  169. package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
  170. package/out/zero-cache/src/services/mutagen/mutagen.js +10 -24
  171. package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
  172. package/out/zero-cache/src/services/mutagen/pusher.d.ts +8 -6
  173. package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
  174. package/out/zero-cache/src/services/mutagen/pusher.js +130 -19
  175. package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
  176. package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
  177. package/out/zero-cache/src/services/replicator/change-processor.js +24 -31
  178. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  179. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +4 -4
  180. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
  181. package/out/zero-cache/src/services/replicator/schema/change-log.js +38 -36
  182. package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
  183. package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.d.ts +3 -3
  184. package/out/zero-cache/src/services/replicator/schema/column-metadata.d.ts.map +1 -0
  185. package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.js +3 -3
  186. package/out/zero-cache/src/services/replicator/schema/column-metadata.js.map +1 -0
  187. package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
  188. package/out/zero-cache/src/services/replicator/schema/replication-state.js +3 -1
  189. package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
  190. package/out/zero-cache/src/services/run-ast.js +1 -1
  191. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  192. package/out/zero-cache/src/services/statz.d.ts.map +1 -1
  193. package/out/zero-cache/src/services/statz.js +1 -0
  194. package/out/zero-cache/src/services/statz.js.map +1 -1
  195. package/out/zero-cache/src/services/view-syncer/client-handler.d.ts +5 -6
  196. package/out/zero-cache/src/services/view-syncer/client-handler.d.ts.map +1 -1
  197. package/out/zero-cache/src/services/view-syncer/client-handler.js +5 -23
  198. package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
  199. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -1
  200. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
  201. package/out/zero-cache/src/services/view-syncer/cvr-store.js +65 -44
  202. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  203. package/out/zero-cache/src/services/view-syncer/cvr.d.ts +0 -1
  204. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  205. package/out/zero-cache/src/services/view-syncer/cvr.js +23 -6
  206. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  207. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +14 -22
  208. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  209. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +46 -67
  210. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  211. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts +1 -1
  212. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
  213. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +22 -11
  214. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  215. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +0 -2
  216. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  217. package/out/zero-cache/src/services/view-syncer/snapshotter.js +3 -11
  218. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  219. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +6 -4
  220. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  221. package/out/zero-cache/src/services/view-syncer/view-syncer.js +216 -243
  222. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  223. package/out/zero-cache/src/types/lexi-version.d.ts.map +1 -1
  224. package/out/zero-cache/src/types/lexi-version.js +4 -1
  225. package/out/zero-cache/src/types/lexi-version.js.map +1 -1
  226. package/out/zero-cache/src/types/lite.d.ts.map +1 -1
  227. package/out/zero-cache/src/types/lite.js +8 -2
  228. package/out/zero-cache/src/types/lite.js.map +1 -1
  229. package/out/zero-cache/src/types/shards.js +1 -1
  230. package/out/zero-cache/src/types/shards.js.map +1 -1
  231. package/out/zero-cache/src/types/sql.d.ts +5 -0
  232. package/out/zero-cache/src/types/sql.d.ts.map +1 -1
  233. package/out/zero-cache/src/types/sql.js +5 -1
  234. package/out/zero-cache/src/types/sql.js.map +1 -1
  235. package/out/zero-cache/src/types/subscription.js +1 -1
  236. package/out/zero-cache/src/types/subscription.js.map +1 -1
  237. package/out/zero-cache/src/workers/connect-params.d.ts +1 -1
  238. package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
  239. package/out/zero-cache/src/workers/connect-params.js +2 -3
  240. package/out/zero-cache/src/workers/connect-params.js.map +1 -1
  241. package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
  242. package/out/zero-cache/src/workers/replicator.js +2 -5
  243. package/out/zero-cache/src/workers/replicator.js.map +1 -1
  244. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  245. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +15 -10
  246. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  247. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  248. package/out/zero-cache/src/workers/syncer.js +17 -10
  249. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  250. package/out/zero-client/src/client/connection-manager.d.ts +8 -0
  251. package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
  252. package/out/zero-client/src/client/connection-manager.js +33 -0
  253. package/out/zero-client/src/client/connection-manager.js.map +1 -1
  254. package/out/zero-client/src/client/connection.d.ts.map +1 -1
  255. package/out/zero-client/src/client/connection.js +6 -3
  256. package/out/zero-client/src/client/connection.js.map +1 -1
  257. package/out/zero-client/src/client/context.js +1 -0
  258. package/out/zero-client/src/client/context.js.map +1 -1
  259. package/out/zero-client/src/client/error.js +1 -1
  260. package/out/zero-client/src/client/error.js.map +1 -1
  261. package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
  262. package/out/zero-client/src/client/mutator-proxy.js +15 -1
  263. package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
  264. package/out/zero-client/src/client/options.d.ts +11 -1
  265. package/out/zero-client/src/client/options.d.ts.map +1 -1
  266. package/out/zero-client/src/client/options.js.map +1 -1
  267. package/out/zero-client/src/client/query-manager.d.ts +4 -0
  268. package/out/zero-client/src/client/query-manager.d.ts.map +1 -1
  269. package/out/zero-client/src/client/query-manager.js +7 -0
  270. package/out/zero-client/src/client/query-manager.js.map +1 -1
  271. package/out/zero-client/src/client/version.js +1 -1
  272. package/out/zero-client/src/client/zero.d.ts +5 -5
  273. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  274. package/out/zero-client/src/client/zero.js +53 -8
  275. package/out/zero-client/src/client/zero.js.map +1 -1
  276. package/out/zero-client/src/mod.d.ts +1 -0
  277. package/out/zero-client/src/mod.d.ts.map +1 -1
  278. package/out/zero-protocol/src/connect.d.ts +4 -0
  279. package/out/zero-protocol/src/connect.d.ts.map +1 -1
  280. package/out/zero-protocol/src/connect.js +3 -1
  281. package/out/zero-protocol/src/connect.js.map +1 -1
  282. package/out/zero-protocol/src/protocol-version.d.ts +1 -1
  283. package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
  284. package/out/zero-protocol/src/protocol-version.js +1 -1
  285. package/out/zero-protocol/src/protocol-version.js.map +1 -1
  286. package/out/zero-protocol/src/push.d.ts +16 -0
  287. package/out/zero-protocol/src/push.d.ts.map +1 -1
  288. package/out/zero-protocol/src/push.js +25 -1
  289. package/out/zero-protocol/src/push.js.map +1 -1
  290. package/out/zero-protocol/src/up.d.ts +2 -0
  291. package/out/zero-protocol/src/up.d.ts.map +1 -1
  292. package/out/zero-react/src/mod.d.ts +3 -1
  293. package/out/zero-react/src/mod.d.ts.map +1 -1
  294. package/out/zero-react/src/paging-reducer.d.ts +61 -0
  295. package/out/zero-react/src/paging-reducer.d.ts.map +1 -0
  296. package/out/zero-react/src/paging-reducer.js +77 -0
  297. package/out/zero-react/src/paging-reducer.js.map +1 -0
  298. package/out/zero-react/src/use-query.d.ts +11 -1
  299. package/out/zero-react/src/use-query.d.ts.map +1 -1
  300. package/out/zero-react/src/use-query.js +13 -11
  301. package/out/zero-react/src/use-query.js.map +1 -1
  302. package/out/zero-react/src/use-rows.d.ts +39 -0
  303. package/out/zero-react/src/use-rows.d.ts.map +1 -0
  304. package/out/zero-react/src/use-rows.js +130 -0
  305. package/out/zero-react/src/use-rows.js.map +1 -0
  306. package/out/zero-react/src/use-zero-virtualizer.d.ts +122 -0
  307. package/out/zero-react/src/use-zero-virtualizer.d.ts.map +1 -0
  308. package/out/zero-react/src/use-zero-virtualizer.js +342 -0
  309. package/out/zero-react/src/use-zero-virtualizer.js.map +1 -0
  310. package/out/zero-react/src/zero-provider.js +1 -1
  311. package/out/zero-react/src/zero-provider.js.map +1 -1
  312. package/out/zero-server/src/adapters/drizzle.d.ts +18 -18
  313. package/out/zero-server/src/adapters/drizzle.d.ts.map +1 -1
  314. package/out/zero-server/src/adapters/drizzle.js +8 -22
  315. package/out/zero-server/src/adapters/drizzle.js.map +1 -1
  316. package/out/zero-server/src/adapters/pg.d.ts +19 -13
  317. package/out/zero-server/src/adapters/pg.d.ts.map +1 -1
  318. package/out/zero-server/src/adapters/pg.js.map +1 -1
  319. package/out/zero-server/src/adapters/postgresjs.d.ts +19 -13
  320. package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
  321. package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
  322. package/out/zero-server/src/adapters/prisma.d.ts +66 -0
  323. package/out/zero-server/src/adapters/prisma.d.ts.map +1 -0
  324. package/out/zero-server/src/adapters/prisma.js +63 -0
  325. package/out/zero-server/src/adapters/prisma.js.map +1 -0
  326. package/out/zero-server/src/custom.js +1 -15
  327. package/out/zero-server/src/custom.js.map +1 -1
  328. package/out/zero-server/src/mod.d.ts +9 -8
  329. package/out/zero-server/src/mod.d.ts.map +1 -1
  330. package/out/zero-server/src/process-mutations.d.ts +2 -1
  331. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  332. package/out/zero-server/src/process-mutations.js +39 -4
  333. package/out/zero-server/src/process-mutations.js.map +1 -1
  334. package/out/zero-server/src/push-processor.js +1 -1
  335. package/out/zero-server/src/push-processor.js.map +1 -1
  336. package/out/zero-server/src/schema.d.ts.map +1 -1
  337. package/out/zero-server/src/schema.js +4 -1
  338. package/out/zero-server/src/schema.js.map +1 -1
  339. package/out/zero-server/src/zql-database.d.ts.map +1 -1
  340. package/out/zero-server/src/zql-database.js +18 -0
  341. package/out/zero-server/src/zql-database.js.map +1 -1
  342. package/out/zero-solid/src/mod.d.ts +1 -1
  343. package/out/zero-solid/src/mod.d.ts.map +1 -1
  344. package/out/zero-solid/src/solid-view.js +1 -0
  345. package/out/zero-solid/src/solid-view.js.map +1 -1
  346. package/out/zero-solid/src/use-query.d.ts +10 -1
  347. package/out/zero-solid/src/use-query.d.ts.map +1 -1
  348. package/out/zero-solid/src/use-query.js +22 -5
  349. package/out/zero-solid/src/use-query.js.map +1 -1
  350. package/out/zero-solid/src/use-zero.js +1 -1
  351. package/out/zero-solid/src/use-zero.js.map +1 -1
  352. package/out/zql/src/ivm/constraint.d.ts.map +1 -1
  353. package/out/zql/src/ivm/constraint.js +4 -1
  354. package/out/zql/src/ivm/constraint.js.map +1 -1
  355. package/out/zql/src/ivm/exists.d.ts.map +1 -1
  356. package/out/zql/src/ivm/exists.js +4 -1
  357. package/out/zql/src/ivm/exists.js.map +1 -1
  358. package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
  359. package/out/zql/src/ivm/join-utils.js +8 -2
  360. package/out/zql/src/ivm/join-utils.js.map +1 -1
  361. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  362. package/out/zql/src/ivm/memory-source.js +12 -3
  363. package/out/zql/src/ivm/memory-source.js.map +1 -1
  364. package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
  365. package/out/zql/src/ivm/push-accumulated.js +25 -2
  366. package/out/zql/src/ivm/push-accumulated.js.map +1 -1
  367. package/out/zql/src/ivm/stream.d.ts.map +1 -1
  368. package/out/zql/src/ivm/stream.js +1 -1
  369. package/out/zql/src/ivm/stream.js.map +1 -1
  370. package/out/zql/src/ivm/take.d.ts.map +1 -1
  371. package/out/zql/src/ivm/take.js +24 -6
  372. package/out/zql/src/ivm/take.js.map +1 -1
  373. package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
  374. package/out/zql/src/ivm/union-fan-in.js +12 -3
  375. package/out/zql/src/ivm/union-fan-in.js.map +1 -1
  376. package/out/zql/src/mutate/mutator.js +4 -4
  377. package/out/zql/src/mutate/mutator.js.map +1 -1
  378. package/out/zql/src/query/create-builder.js +3 -5
  379. package/out/zql/src/query/create-builder.js.map +1 -1
  380. package/out/zql/src/query/query-registry.js +4 -4
  381. package/out/zql/src/query/query-registry.js.map +1 -1
  382. package/out/zqlite/src/table-source.d.ts.map +1 -1
  383. package/out/zqlite/src/table-source.js +1 -2
  384. package/out/zqlite/src/table-source.js.map +1 -1
  385. package/package.json +8 -4
  386. package/out/zero-cache/src/services/change-source/column-metadata.d.ts.map +0 -1
  387. package/out/zero-cache/src/services/change-source/column-metadata.js.map +0 -1
  388. package/out/zero-cache/src/types/schema-versions.d.ts +0 -12
  389. package/out/zero-cache/src/types/schema-versions.d.ts.map +0 -1
  390. package/out/zero-cache/src/types/schema-versions.js +0 -28
  391. package/out/zero-cache/src/types/schema-versions.js.map +0 -1
@@ -1,64 +1,67 @@
1
1
  import { jsonObjectSchema, parse as parse$1, stringify } from "../../../../../shared/src/bigint-json.js";
2
2
  import { literalUnion, parse } from "../../../../../shared/src/valita.js";
3
3
  import { normalizedKeyOrder } from "../../../types/row-key.js";
4
- import { object, string } from "@badrap/valita";
4
+ import { object, string, number } from "@badrap/valita";
5
5
  const SET_OP = "s";
6
6
  const DEL_OP = "d";
7
7
  const TRUNCATE_OP = "t";
8
8
  const RESET_OP = "r";
9
9
  const CREATE_CHANGELOG_SCHEMA = (
10
10
  // stateVersion : a.k.a. row version
11
+ // pos : order in which to process the change (within the version)
11
12
  // table : The table associated with the change
12
- // rowKey : JSON row key for a row change. For table-wide changes,
13
- // this is set to '', which guarantees that they sort before
14
- // any (subsequent) row-level changes. Note that because
15
- // RESET and TRUNCATE use the same rowKey, only the last
16
- // one will persist. This is fine because they are both
17
- // handled in the same way, i.e. by resetting the pipelines,
18
- // as they cannot be processed via change log entries.
19
- // op : 't' for table truncation
13
+ // rowKey : JSON row key for a row change. For table-wide changes RESET
14
+ // and TRUNCATE, there is no associated row; instead, `pos` is
15
+ // set to -1 and the rowKey is set to the stateVersion,
16
+ // guaranteeing when attempting to process the transaction,
17
+ // the pipeline is reset (and the change log traversal
18
+ // aborted).
19
+ // op : 's' for set (insert/update)
20
+ // : 'd' for delete
20
21
  // : 'r' for table reset (schema change)
21
- // 's' for set (insert/update)
22
- // 'd' for delete
22
+ // : 't' for table truncation (which also resets the pipeline)
23
+ //
24
+ // Naming note: To maintain compatibility between a new replication-manager
25
+ // and old view-syncers, the previous _zero.changeLog table is preserved
26
+ // and its replacement given a new name "changeLog2".
23
27
  `
24
- CREATE TABLE "_zero.changeLog" (
28
+ CREATE TABLE "_zero.changeLog2" (
25
29
  "stateVersion" TEXT NOT NULL,
30
+ "pos" INT NOT NULL,
26
31
  "table" TEXT NOT NULL,
27
- "rowKey" TEXT,
32
+ "rowKey" TEXT NOT NULL,
28
33
  "op" TEXT NOT NULL,
29
- PRIMARY KEY("stateVersion", "table", "rowKey"),
34
+ PRIMARY KEY("stateVersion", "pos"),
30
35
  UNIQUE("table", "rowKey")
31
- )
36
+ );
32
37
  `
33
38
  );
34
39
  const changeLogEntrySchema = object({
35
40
  stateVersion: string(),
41
+ pos: number(),
36
42
  table: string(),
37
- rowKey: string().nullable(),
43
+ rowKey: string(),
38
44
  op: literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP)
39
45
  }).map((val) => ({
40
46
  ...val,
41
- // Note: the empty string "" (for table-wide ops) will result in `null`
42
- rowKey: val.rowKey ? parse(parse$1(val.rowKey), jsonObjectSchema) : null
47
+ // Note: sets the rowKey to `null` for table-wide ops / resets
48
+ rowKey: val.op === "t" || val.op === "r" ? null : parse(parse$1(val.rowKey), jsonObjectSchema)
43
49
  }));
44
- function initChangeLog(db) {
45
- db.exec(CREATE_CHANGELOG_SCHEMA);
46
- }
47
- function logSetOp(db, version, table, row) {
48
- return logRowOp(db, version, table, row, SET_OP);
50
+ function logSetOp(db, version, pos, table, row) {
51
+ return logRowOp(db, version, pos, table, row, SET_OP);
49
52
  }
50
- function logDeleteOp(db, version, table, row) {
51
- return logRowOp(db, version, table, row, DEL_OP);
53
+ function logDeleteOp(db, version, pos, table, row) {
54
+ return logRowOp(db, version, pos, table, row, DEL_OP);
52
55
  }
53
- function logRowOp(db, version, table, row, op) {
56
+ function logRowOp(db, version, pos, table, row, op) {
54
57
  const rowKey = stringify(normalizedKeyOrder(row));
55
58
  db.run(
56
59
  `
57
- INSERT OR REPLACE INTO "_zero.changeLog"
58
- (stateVersion, "table", rowKey, op)
59
- VALUES (@version, @table, JSON(@rowKey), @op)
60
+ INSERT OR REPLACE INTO "_zero.changeLog2"
61
+ (stateVersion, pos, "table", rowKey, op)
62
+ VALUES (@version, @pos, @table, JSON(@rowKey), @op)
60
63
  `,
61
- { version, table, rowKey, op }
64
+ { version, pos, table, rowKey, op }
62
65
  );
63
66
  return rowKey;
64
67
  }
@@ -71,27 +74,26 @@ function logResetOp(db, version, table) {
71
74
  function logTableWideOp(db, version, table, op) {
72
75
  db.run(
73
76
  `
74
- DELETE FROM "_zero.changeLog" WHERE stateVersion = ? AND "table" = ?
77
+ DELETE FROM "_zero.changeLog2" WHERE stateVersion = ? AND "table" = ?
75
78
  `,
76
79
  version,
77
80
  table
78
81
  );
79
82
  db.run(
80
83
  `
81
- INSERT OR REPLACE INTO "_zero.changeLog" (stateVersion, "table", rowKey, op)
82
- VALUES (@version, @table, @rowKey, @op)
84
+ INSERT OR REPLACE INTO "_zero.changeLog2" (stateVersion, pos, "table", rowKey, op)
85
+ VALUES (@version, -1, @table, @version, @op)
83
86
  `,
84
- // See file JSDoc for explanation of the rowKey w.r.t. ordering of table-wide ops.
85
- { version, table, rowKey: "", op }
87
+ { version, table, op }
86
88
  );
87
89
  }
88
90
  export {
91
+ CREATE_CHANGELOG_SCHEMA,
89
92
  DEL_OP,
90
93
  RESET_OP,
91
94
  SET_OP,
92
95
  TRUNCATE_OP,
93
96
  changeLogEntrySchema,
94
- initChangeLog,
95
97
  logDeleteOp,
96
98
  logResetOp,
97
99
  logSetOp,
@@ -1 +1 @@
1
- {"version":3,"file":"change-log.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/change-log.ts"],"sourcesContent":["import {\n jsonObjectSchema,\n parse,\n stringify,\n} from '../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport type {LexiVersion} from '../../../types/lexi-version.ts';\nimport type {LiteRowKey} from '../../../types/lite.ts';\nimport {normalizedKeyOrder} from '../../../types/row-key.ts';\n\n/**\n * The Change Log tracks the last operation (set or delete) for each row in the\n * data base, ordered by state version; in other words, a cross-table\n * index of row changes ordered by version. This facilitates a minimal \"diff\"\n * of row changes needed to advance a pipeline from one state version to another.\n *\n * The Change Log stores identifiers only, i.e. it does not store contents.\n * A database snapshot at the previous version can be used to query a row's\n * old contents, if any, and the current snapshot can be used to query a row's\n * new contents. (In the common case, the new contents will have just been applied\n * and thus has a high likelihood of being in the SQLite cache.)\n *\n * There are two table-wide operations:\n * - `t` corresponds to the postgres `TRUNCATE` operation\n * - `r` represents any schema (i.e. column) change\n *\n * For both operations, the corresponding row changes are not explicitly included\n * in the change log. The consumer has the option of simulating them be reading\n * from pre- and post- snapshots, or resetting their state entirely with the current\n * snapshot.\n *\n * To achieve the desired ordering semantics when processing tables that have been\n * truncated, reset, and modified, the \"rowKey\" is set to `null` for resets and\n * the empty string `\"\"` for truncates. This means that resets will be encountered\n * before truncates, which will be processed before any subsequent row changes.\n *\n * This ordering is chosen because resets are currently the more \"destructive\" op\n * and result in aborting the processing (and starting from scratch); doing this\n * earlier reduces wasted work.\n */\n\nexport const SET_OP = 's';\nexport const DEL_OP = 'd';\nexport const TRUNCATE_OP = 't';\nexport const RESET_OP = 'r';\n\nconst CREATE_CHANGELOG_SCHEMA =\n // stateVersion : a.k.a. row version\n // table : The table associated with the change\n // rowKey : JSON row key for a row change. For table-wide changes,\n // this is set to '', which guarantees that they sort before\n // any (subsequent) row-level changes. Note that because\n // RESET and TRUNCATE use the same rowKey, only the last\n // one will persist. This is fine because they are both\n // handled in the same way, i.e. by resetting the pipelines,\n // as they cannot be processed via change log entries.\n // op : 't' for table truncation\n // : 'r' for table reset (schema change)\n // 's' for set (insert/update)\n // 'd' for delete\n `\n CREATE TABLE \"_zero.changeLog\" (\n \"stateVersion\" TEXT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT,\n \"op\" TEXT NOT NULL,\n PRIMARY KEY(\"stateVersion\", \"table\", \"rowKey\"),\n UNIQUE(\"table\", \"rowKey\")\n )\n `;\n\nexport const changeLogEntrySchema = v\n .object({\n stateVersion: v.string(),\n table: v.string(),\n rowKey: v.string().nullable(),\n op: v.literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP),\n })\n .map(val => ({\n ...val,\n // Note: the empty string \"\" (for table-wide ops) will result in `null`\n rowKey: val.rowKey ? v.parse(parse(val.rowKey), jsonObjectSchema) : null,\n }));\n\nexport type ChangeLogEntry = v.Infer<typeof changeLogEntrySchema>;\n\nexport function initChangeLog(db: Database) {\n db.exec(CREATE_CHANGELOG_SCHEMA);\n}\n\nexport function logSetOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n row: LiteRowKey,\n): string {\n return logRowOp(db, version, table, row, SET_OP);\n}\n\nexport function logDeleteOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n row: LiteRowKey,\n): string {\n return logRowOp(db, version, table, row, DEL_OP);\n}\n\nfunction logRowOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n row: LiteRowKey,\n op: string,\n): string {\n const rowKey = stringify(normalizedKeyOrder(row));\n db.run(\n `\n INSERT OR REPLACE INTO \"_zero.changeLog\" \n (stateVersion, \"table\", rowKey, op)\n VALUES (@version, @table, JSON(@rowKey), @op)\n `,\n {version, table, rowKey, op},\n );\n return rowKey;\n}\nexport function logTruncateOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n) {\n logTableWideOp(db, version, table, TRUNCATE_OP);\n}\n\nexport function logResetOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n) {\n logTableWideOp(db, version, table, RESET_OP);\n}\n\nfunction logTableWideOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n op: 't' | 'r',\n) {\n // Delete any existing changes for the table (in this version) since the\n // table wide op invalidates them.\n db.run(\n `\n DELETE FROM \"_zero.changeLog\" WHERE stateVersion = ? AND \"table\" = ?\n `,\n version,\n table,\n );\n\n db.run(\n `\n INSERT OR REPLACE INTO \"_zero.changeLog\" (stateVersion, \"table\", rowKey, op) \n VALUES (@version, @table, @rowKey, @op)\n `,\n // See file JSDoc for explanation of the rowKey w.r.t. ordering of table-wide ops.\n {version, table, rowKey: '', op},\n );\n}\n"],"names":["v.object","v.string","v.literalUnion","v.parse","parse"],"mappings":";;;;AA2CO,MAAM,SAAS;AACf,MAAM,SAAS;AACf,MAAM,cAAc;AACpB,MAAM,WAAW;AAExB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAWK,MAAM,uBAAuBA,OAC1B;AAAA,EACN,cAAcC,OAAE;AAAA,EAChB,OAAOA,OAAE;AAAA,EACT,QAAQA,OAAE,EAAS,SAAA;AAAA,EACnB,IAAIC,aAAe,QAAQ,QAAQ,aAAa,QAAQ;AAC1D,CAAC,EACA,IAAI,CAAA,SAAQ;AAAA,EACX,GAAG;AAAA;AAAA,EAEH,QAAQ,IAAI,SAASC,MAAQC,QAAM,IAAI,MAAM,GAAG,gBAAgB,IAAI;AACtE,EAAE;AAIG,SAAS,cAAc,IAAc;AAC1C,KAAG,KAAK,uBAAuB;AACjC;AAEO,SAAS,SACd,IACA,SACA,OACA,KACQ;AACR,SAAO,SAAS,IAAI,SAAS,OAAO,KAAK,MAAM;AACjD;AAEO,SAAS,YACd,IACA,SACA,OACA,KACQ;AACR,SAAO,SAAS,IAAI,SAAS,OAAO,KAAK,MAAM;AACjD;AAEA,SAAS,SACP,IACA,SACA,OACA,KACA,IACQ;AACR,QAAM,SAAS,UAAU,mBAAmB,GAAG,CAAC;AAChD,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA;AAAA,IAKA,EAAC,SAAS,OAAO,QAAQ,GAAA;AAAA,EAAE;AAE7B,SAAO;AACT;AACO,SAAS,cACd,IACA,SACA,OACA;AACA,iBAAe,IAAI,SAAS,OAAO,WAAW;AAChD;AAEO,SAAS,WACd,IACA,SACA,OACA;AACA,iBAAe,IAAI,SAAS,OAAO,QAAQ;AAC7C;AAEA,SAAS,eACP,IACA,SACA,OACA,IACA;AAGA,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA,EAAA;AAGF,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA;AAAA,IAKA,EAAC,SAAS,OAAO,QAAQ,IAAI,GAAA;AAAA,EAAE;AAEnC;"}
1
+ {"version":3,"file":"change-log.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/change-log.ts"],"sourcesContent":["import {\n jsonObjectSchema,\n parse,\n stringify,\n} from '../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport type {LexiVersion} from '../../../types/lexi-version.ts';\nimport type {LiteRowKey} from '../../../types/lite.ts';\nimport {normalizedKeyOrder} from '../../../types/row-key.ts';\n\n/**\n * The Change Log tracks the last operation (set or delete) for each row in the\n * data base, ordered by state version; in other words, a cross-table\n * index of row changes ordered by version. This facilitates a minimal \"diff\"\n * of row changes needed to advance a pipeline from one state version to another.\n *\n * The Change Log stores identifiers only, i.e. it does not store contents.\n * A database snapshot at the previous version can be used to query a row's\n * old contents, if any, and the current snapshot can be used to query a row's\n * new contents. (In the common case, the new contents will have just been applied\n * and thus has a high likelihood of being in the SQLite cache.)\n *\n * There are two table-wide operations:\n * - `t` corresponds to the postgres `TRUNCATE` operation\n * - `r` represents any schema (i.e. column) change\n *\n * For both operations, the corresponding row changes are not explicitly included\n * in the change log. The consumer has the option of simulating them be reading\n * from pre- and post- snapshots, or resetting their state entirely with the current\n * snapshot.\n *\n * To achieve the desired ordering semantics when processing tables that have been\n * truncated, reset, and modified, the \"rowKey\" is set to `null` for resets and\n * the empty string `\"\"` for truncates. This means that resets will be encountered\n * before truncates, which will be processed before any subsequent row changes.\n *\n * This ordering is chosen because resets are currently the more \"destructive\" op\n * and result in aborting the processing (and starting from scratch); doing this\n * earlier reduces wasted work.\n */\n\nexport const SET_OP = 's';\nexport const DEL_OP = 'd';\nexport const TRUNCATE_OP = 't';\nexport const RESET_OP = 'r';\n\n// Exported for testing (and migrations)\nexport const CREATE_CHANGELOG_SCHEMA =\n // stateVersion : a.k.a. row version\n // pos : order in which to process the change (within the version)\n // table : The table associated with the change\n // rowKey : JSON row key for a row change. For table-wide changes RESET\n // and TRUNCATE, there is no associated row; instead, `pos` is\n // set to -1 and the rowKey is set to the stateVersion,\n // guaranteeing when attempting to process the transaction,\n // the pipeline is reset (and the change log traversal\n // aborted).\n // op : 's' for set (insert/update)\n // : 'd' for delete\n // : 'r' for table reset (schema change)\n // : 't' for table truncation (which also resets the pipeline)\n //\n // Naming note: To maintain compatibility between a new replication-manager\n // and old view-syncers, the previous _zero.changeLog table is preserved\n // and its replacement given a new name \"changeLog2\".\n `\n CREATE TABLE \"_zero.changeLog2\" (\n \"stateVersion\" TEXT NOT NULL,\n \"pos\" INT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT NOT NULL,\n \"op\" TEXT NOT NULL,\n PRIMARY KEY(\"stateVersion\", \"pos\"),\n UNIQUE(\"table\", \"rowKey\")\n );\n `;\n\nexport const changeLogEntrySchema = v\n .object({\n stateVersion: v.string(),\n pos: v.number(),\n table: v.string(),\n rowKey: v.string(),\n op: v.literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP),\n })\n .map(val => ({\n ...val,\n // Note: sets the rowKey to `null` for table-wide ops / resets\n rowKey:\n val.op === 't' || val.op === 'r'\n ? null\n : v.parse(parse(val.rowKey), jsonObjectSchema),\n }));\n\nexport type ChangeLogEntry = v.Infer<typeof changeLogEntrySchema>;\n\nexport function logSetOp(\n db: StatementRunner,\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n): string {\n return logRowOp(db, version, pos, table, row, SET_OP);\n}\n\nexport function logDeleteOp(\n db: StatementRunner,\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n): string {\n return logRowOp(db, version, pos, table, row, DEL_OP);\n}\n\nfunction logRowOp(\n db: StatementRunner,\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n op: string,\n): string {\n const rowKey = stringify(normalizedKeyOrder(row));\n db.run(\n `\n INSERT OR REPLACE INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op)\n VALUES (@version, @pos, @table, JSON(@rowKey), @op)\n `,\n {version, pos, table, rowKey, op},\n );\n return rowKey;\n}\nexport function logTruncateOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n) {\n logTableWideOp(db, version, table, TRUNCATE_OP);\n}\n\nexport function logResetOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n) {\n logTableWideOp(db, version, table, RESET_OP);\n}\n\nfunction logTableWideOp(\n db: StatementRunner,\n version: LexiVersion,\n table: string,\n op: 't' | 'r',\n) {\n // Delete any existing changes for the table (in this version) since the\n // table wide op invalidates them.\n db.run(\n `\n DELETE FROM \"_zero.changeLog2\" WHERE stateVersion = ? AND \"table\" = ?\n `,\n version,\n table,\n );\n\n db.run(\n `\n INSERT OR REPLACE INTO \"_zero.changeLog2\" (stateVersion, pos, \"table\", rowKey, op) \n VALUES (@version, -1, @table, @version, @op)\n `,\n {version, table, op},\n );\n}\n"],"names":["v.object","v.string","v.number","v.literalUnion","v.parse","parse"],"mappings":";;;;AA0CO,MAAM,SAAS;AACf,MAAM,SAAS;AACf,MAAM,cAAc;AACpB,MAAM,WAAW;AAGjB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYK,MAAM,uBAAuBA,OAC1B;AAAA,EACN,cAAcC,OAAE;AAAA,EAChB,KAAKC,OAAE;AAAA,EACP,OAAOD,OAAE;AAAA,EACT,QAAQA,OAAE;AAAA,EACV,IAAIE,aAAe,QAAQ,QAAQ,aAAa,QAAQ;AAC1D,CAAC,EACA,IAAI,CAAA,SAAQ;AAAA,EACX,GAAG;AAAA;AAAA,EAEH,QACE,IAAI,OAAO,OAAO,IAAI,OAAO,MACzB,OACAC,MAAQC,QAAM,IAAI,MAAM,GAAG,gBAAgB;AACnD,EAAE;AAIG,SAAS,SACd,IACA,SACA,KACA,OACA,KACQ;AACR,SAAO,SAAS,IAAI,SAAS,KAAK,OAAO,KAAK,MAAM;AACtD;AAEO,SAAS,YACd,IACA,SACA,KACA,OACA,KACQ;AACR,SAAO,SAAS,IAAI,SAAS,KAAK,OAAO,KAAK,MAAM;AACtD;AAEA,SAAS,SACP,IACA,SACA,KACA,OACA,KACA,IACQ;AACR,QAAM,SAAS,UAAU,mBAAmB,GAAG,CAAC;AAChD,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA;AAAA,IAKA,EAAC,SAAS,KAAK,OAAO,QAAQ,GAAA;AAAA,EAAE;AAElC,SAAO;AACT;AACO,SAAS,cACd,IACA,SACA,OACA;AACA,iBAAe,IAAI,SAAS,OAAO,WAAW;AAChD;AAEO,SAAS,WACd,IACA,SACA,OACA;AACA,iBAAe,IAAI,SAAS,OAAO,QAAQ;AAC7C;AAEA,SAAS,eACP,IACA,SACA,OACA,IACA;AAGA,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA,EAAA;AAGF,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA,IAIA,EAAC,SAAS,OAAO,GAAA;AAAA,EAAE;AAEvB;"}
@@ -8,8 +8,8 @@
8
8
  * This table stores that metadata separately, allowing SQLite columns to use
9
9
  * plain type names while preserving all necessary upstream type information.
10
10
  */
11
- import type { Database } from '../../../../zqlite/src/db.ts';
12
- import type { ColumnSpec, LiteTableSpec } from '../../db/specs.ts';
11
+ import type { Database } from '../../../../../zqlite/src/db.ts';
12
+ import type { ColumnSpec, LiteTableSpec } from '../../../db/specs.ts';
13
13
  /**
14
14
  * Structured column metadata, replacing the old pipe-delimited string format.
15
15
  */
@@ -48,7 +48,7 @@ export declare class ColumnMetadataStore {
48
48
  hasTable(): boolean;
49
49
  /**
50
50
  * Populates metadata table from existing tables that use pipe notation.
51
- * This is used during migration v6 to backfill the metadata table.
51
+ * This is used during migration v8 to backfill the metadata table.
52
52
  */
53
53
  populateFromExistingTables(tables: LiteTableSpec[]): void;
54
54
  }
@@ -0,0 +1 @@
1
+ {"version":3,"file":"column-metadata.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/column-metadata.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAEH,OAAO,KAAK,EAAC,QAAQ,EAAY,MAAM,iCAAiC,CAAC;AAEzE,OAAO,KAAK,EAAC,UAAU,EAAE,aAAa,EAAC,MAAM,sBAAsB,CAAC;AASpE;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,2EAA2E;IAC3E,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE,OAAO,CAAC;IACnB,MAAM,EAAE,OAAO,CAAC;IAChB,OAAO,EAAE,OAAO,CAAC;IACjB,sDAAsD;IACtD,kBAAkB,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;CACpC;AAUD,eAAO,MAAM,4BAA4B,0UAWxC,CAAC;AAEF;;;;;;GAMG;AACH,qBAAa,mBAAmB;;IAY9B,OAAO;IAqDP;;;OAGG;IACH,MAAM,CAAC,WAAW,CAAC,EAAE,EAAE,QAAQ,GAAG,mBAAmB,GAAG,SAAS;IAoBjE,MAAM,CAAC,SAAS,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,EAAE,IAAI,EAAE,UAAU,GAAG,IAAI;IAqBrE,MAAM,CACJ,SAAS,EAAE,MAAM,EACjB,aAAa,EAAE,MAAM,EACrB,aAAa,EAAE,MAAM,EACrB,IAAI,EAAE,UAAU,GACf,IAAI;IAcP,YAAY,CAAC,SAAS,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,GAAG,IAAI;IAIzD,WAAW,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI;IAIpC,WAAW,CAAC,YAAY,EAAE,MAAM,EAAE,YAAY,EAAE,MAAM,GAAG,IAAI;IAI7D,SAAS,CAAC,SAAS,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,GAAG,cAAc,GAAG,SAAS;IAkB5E,QAAQ,CAAC,SAAS,EAAE,MAAM,GAAG,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC;IAmBxD,QAAQ,IAAI,OAAO;IAKnB;;;OAGG;IACH,0BAA0B,CAAC,MAAM,EAAE,aAAa,EAAE,GAAG,IAAI;CAW1D;AAED;;;GAGG;AACH,wBAAgB,wBAAwB,CACtC,cAAc,EAAE,MAAM,EACtB,kBAAkB,CAAC,EAAE,MAAM,GAAG,IAAI,GACjC,cAAc,CAiBhB;AAED;;;GAGG;AACH,wBAAgB,wBAAwB,CAAC,QAAQ,EAAE,cAAc,GAAG,MAAM,CAOzE;AAED;;;;;GAKG;AACH,wBAAgB,sBAAsB,CAAC,IAAI,EAAE,UAAU,GAAG,cAAc,CAQvE"}
@@ -1,5 +1,5 @@
1
- import { isArrayColumn, isEnumColumn } from "../../db/pg-to-lite.js";
2
- import { liteTypeString, upstreamDataType, isArray, isEnum, nullableUpstream } from "../../types/lite.js";
1
+ import { isArrayColumn, isEnumColumn } from "../../../db/pg-to-lite.js";
2
+ import { liteTypeString, upstreamDataType, isArray, isEnum, nullableUpstream } from "../../../types/lite.js";
3
3
  const CREATE_COLUMN_METADATA_TABLE = `
4
4
  CREATE TABLE "_zero.column_metadata" (
5
5
  table_name TEXT NOT NULL,
@@ -155,7 +155,7 @@ class ColumnMetadataStore {
155
155
  }
156
156
  /**
157
157
  * Populates metadata table from existing tables that use pipe notation.
158
- * This is used during migration v6 to backfill the metadata table.
158
+ * This is used during migration v8 to backfill the metadata table.
159
159
  */
160
160
  populateFromExistingTables(tables) {
161
161
  for (const table of tables) {
@@ -0,0 +1 @@
1
+ {"version":3,"file":"column-metadata.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/column-metadata.ts"],"sourcesContent":["/**\n * Column metadata table for storing upstream PostgreSQL schema information.\n *\n * Previously, upstream type metadata was embedded in SQLite column type strings\n * using pipe-delimited notation (e.g., \"int8|NOT_NULL|TEXT_ENUM\"). This caused\n * issues with SQLite type affinity and made schema inspection difficult.\n *\n * This table stores that metadata separately, allowing SQLite columns to use\n * plain type names while preserving all necessary upstream type information.\n */\n\nimport type {Database, Statement} from '../../../../../zqlite/src/db.ts';\nimport {isArrayColumn, isEnumColumn} from '../../../db/pg-to-lite.ts';\nimport type {ColumnSpec, LiteTableSpec} from '../../../db/specs.ts';\nimport {\n isArray as checkIsArray,\n isEnum as checkIsEnum,\n liteTypeString,\n nullableUpstream,\n upstreamDataType,\n} from '../../../types/lite.ts';\n\n/**\n * Structured column metadata, replacing the old pipe-delimited string format.\n */\nexport interface ColumnMetadata {\n /** PostgreSQL type name, e.g., 'int8', 'varchar', 'text[]', 'user_role' */\n upstreamType: string;\n isNotNull: boolean;\n isEnum: boolean;\n isArray: boolean;\n /** Maximum character length for varchar/char types */\n characterMaxLength?: number | null;\n}\n\ntype ColumnMetadataRow = {\n upstream_type: string;\n is_not_null: number;\n is_enum: number;\n is_array: number;\n character_max_length: number | null;\n};\n\nexport const CREATE_COLUMN_METADATA_TABLE = `\n CREATE TABLE \"_zero.column_metadata\" (\n table_name TEXT NOT NULL,\n column_name TEXT NOT NULL,\n upstream_type TEXT NOT NULL,\n is_not_null INTEGER NOT NULL,\n is_enum INTEGER NOT NULL,\n is_array INTEGER NOT NULL,\n character_max_length INTEGER,\n PRIMARY KEY (table_name, column_name)\n );\n`;\n\n/**\n * Efficient column metadata store that prepares all statements upfront.\n * Use this class to avoid re-preparing statements on every operation.\n *\n * Access via `ColumnMetadataStore.getInstance(db)`, which returns `undefined`\n * if the metadata table doesn't exist yet.\n */\nexport class ColumnMetadataStore {\n static #instances = new WeakMap<Database, ColumnMetadataStore>();\n\n readonly #insertStmt: Statement;\n readonly #updateStmt: Statement;\n readonly #deleteColumnStmt: Statement;\n readonly #deleteTableStmt: Statement;\n readonly #renameTableStmt: Statement;\n readonly #getColumnStmt: Statement;\n readonly #getTableStmt: Statement;\n readonly #hasTableStmt: Statement;\n\n private constructor(db: Database) {\n this.#insertStmt = db.prepare(`\n INSERT INTO \"_zero.column_metadata\"\n (table_name, column_name, upstream_type, is_not_null, is_enum, is_array, character_max_length)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n `);\n\n this.#updateStmt = db.prepare(`\n UPDATE \"_zero.column_metadata\"\n SET column_name = ?,\n upstream_type = ?,\n is_not_null = ?,\n is_enum = ?,\n is_array = ?,\n character_max_length = ?\n WHERE table_name = ? AND column_name = ?\n `);\n\n this.#deleteColumnStmt = db.prepare(`\n DELETE FROM \"_zero.column_metadata\"\n WHERE table_name = ? AND column_name = ?\n `);\n\n this.#deleteTableStmt = db.prepare(`\n DELETE FROM \"_zero.column_metadata\"\n WHERE table_name = ?\n `);\n\n this.#renameTableStmt = db.prepare(`\n UPDATE \"_zero.column_metadata\"\n SET table_name = ?\n WHERE table_name = ?\n `);\n\n this.#getColumnStmt = db.prepare(`\n SELECT upstream_type, is_not_null, is_enum, is_array, character_max_length\n FROM \"_zero.column_metadata\"\n WHERE table_name = ? AND column_name = ?\n `);\n\n this.#getTableStmt = db.prepare(`\n SELECT column_name, upstream_type, is_not_null, is_enum, is_array, character_max_length\n FROM \"_zero.column_metadata\"\n WHERE table_name = ?\n ORDER BY column_name\n `);\n\n this.#hasTableStmt = db.prepare(`\n SELECT 1 FROM sqlite_master\n WHERE type = 'table' AND name = '_zero.column_metadata'\n `);\n }\n\n /**\n * Gets the singleton instance of ColumnMetadataStore for the given database.\n * Returns `undefined` if the metadata table doesn't exist yet.\n */\n static getInstance(db: Database): ColumnMetadataStore | undefined {\n // Check if table exists\n const tableExists = db\n .prepare(\n `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = '_zero.column_metadata'`,\n )\n .get();\n\n if (!tableExists) {\n return undefined;\n }\n\n let instance = ColumnMetadataStore.#instances.get(db);\n if (!instance) {\n instance = new ColumnMetadataStore(db);\n ColumnMetadataStore.#instances.set(db, instance);\n }\n return instance;\n }\n\n insert(tableName: string, columnName: string, spec: ColumnSpec): void {\n const metadata = pgColumnSpecToMetadata(spec);\n this.#insertMetadata(tableName, columnName, metadata);\n }\n\n #insertMetadata(\n tableName: string,\n columnName: string,\n metadata: ColumnMetadata,\n ): void {\n this.#insertStmt.run(\n tableName,\n columnName,\n metadata.upstreamType,\n metadata.isNotNull ? 1 : 0,\n metadata.isEnum ? 1 : 0,\n metadata.isArray ? 1 : 0,\n metadata.characterMaxLength ?? null,\n );\n }\n\n update(\n tableName: string,\n oldColumnName: string,\n newColumnName: string,\n spec: ColumnSpec,\n ): void {\n const metadata = pgColumnSpecToMetadata(spec);\n this.#updateStmt.run(\n newColumnName,\n metadata.upstreamType,\n metadata.isNotNull ? 1 : 0,\n metadata.isEnum ? 1 : 0,\n metadata.isArray ? 1 : 0,\n metadata.characterMaxLength ?? null,\n tableName,\n oldColumnName,\n );\n }\n\n deleteColumn(tableName: string, columnName: string): void {\n this.#deleteColumnStmt.run(tableName, columnName);\n }\n\n deleteTable(tableName: string): void {\n this.#deleteTableStmt.run(tableName);\n }\n\n renameTable(oldTableName: string, newTableName: string): void {\n this.#renameTableStmt.run(newTableName, oldTableName);\n }\n\n getColumn(tableName: string, columnName: string): ColumnMetadata | undefined {\n const row = this.#getColumnStmt.get(tableName, columnName) as\n | ColumnMetadataRow\n | undefined;\n\n if (!row) {\n return undefined;\n }\n\n return {\n upstreamType: row.upstream_type,\n isNotNull: row.is_not_null !== 0,\n isEnum: row.is_enum !== 0,\n isArray: row.is_array !== 0,\n characterMaxLength: row.character_max_length,\n };\n }\n\n getTable(tableName: string): Map<string, ColumnMetadata> {\n const rows = this.#getTableStmt.all(tableName) as Array<\n ColumnMetadataRow & {column_name: string}\n >;\n\n const metadata = new Map<string, ColumnMetadata>();\n for (const row of rows) {\n metadata.set(row.column_name, {\n upstreamType: row.upstream_type,\n isNotNull: row.is_not_null !== 0,\n isEnum: row.is_enum !== 0,\n isArray: row.is_array !== 0,\n characterMaxLength: row.character_max_length,\n });\n }\n\n return metadata;\n }\n\n hasTable(): boolean {\n const result = this.#hasTableStmt.get();\n return result !== undefined;\n }\n\n /**\n * Populates metadata table from existing tables that use pipe notation.\n * This is used during migration v8 to backfill the metadata table.\n */\n populateFromExistingTables(tables: LiteTableSpec[]): void {\n for (const table of tables) {\n for (const [columnName, columnSpec] of Object.entries(table.columns)) {\n const metadata = liteTypeStringToMetadata(\n columnSpec.dataType,\n columnSpec.characterMaximumLength,\n );\n this.#insertMetadata(table.name, columnName, metadata);\n }\n }\n }\n}\n\n/**\n * Converts pipe-delimited LiteTypeString to structured ColumnMetadata.\n * This is a compatibility helper for the migration period.\n */\nexport function liteTypeStringToMetadata(\n liteTypeString: string,\n characterMaxLength?: number | null,\n): ColumnMetadata {\n const baseType = upstreamDataType(liteTypeString);\n const isArrayType = checkIsArray(liteTypeString);\n\n // Reconstruct the full upstream type including array notation\n // For new-style arrays like 'text[]', upstreamDataType returns 'text[]'\n // For old-style arrays like 'int4|NOT_NULL[]', upstreamDataType returns 'int4', so we append '[]'\n const fullUpstreamType =\n isArrayType && !baseType.includes('[]') ? `${baseType}[]` : baseType;\n\n return {\n upstreamType: fullUpstreamType,\n isNotNull: !nullableUpstream(liteTypeString),\n isEnum: checkIsEnum(liteTypeString),\n isArray: isArrayType,\n characterMaxLength: characterMaxLength ?? null,\n };\n}\n\n/**\n * Converts structured ColumnMetadata back to pipe-delimited LiteTypeString.\n * This is a compatibility helper for the migration period.\n */\nexport function metadataToLiteTypeString(metadata: ColumnMetadata): string {\n return liteTypeString(\n metadata.upstreamType,\n metadata.isNotNull,\n metadata.isEnum,\n metadata.isArray,\n );\n}\n\n/**\n * Converts PostgreSQL ColumnSpec to structured ColumnMetadata.\n * Used during replication to populate the metadata table from upstream schema.\n *\n * Uses the same logic as liteTypeString() and mapPostgresToLiteColumn() via shared helpers.\n */\nexport function pgColumnSpecToMetadata(spec: ColumnSpec): ColumnMetadata {\n return {\n upstreamType: spec.dataType,\n isNotNull: spec.notNull ?? false,\n isEnum: isEnumColumn(spec),\n isArray: isArrayColumn(spec),\n characterMaxLength: spec.characterMaximumLength ?? null,\n };\n}\n"],"names":["liteTypeString","checkIsArray","checkIsEnum"],"mappings":";;AA2CO,MAAM,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAoBrC,MAAM,oBAAoB;AAAA,EAC/B,OAAO,aAAa,oBAAI,QAAA;AAAA,EAEf;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAED,YAAY,IAAc;AAChC,SAAK,cAAc,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA,KAI7B;AAED,SAAK,cAAc,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,KAS7B;AAED,SAAK,oBAAoB,GAAG,QAAQ;AAAA;AAAA;AAAA,KAGnC;AAED,SAAK,mBAAmB,GAAG,QAAQ;AAAA;AAAA;AAAA,KAGlC;AAED,SAAK,mBAAmB,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA,KAIlC;AAED,SAAK,iBAAiB,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA,KAIhC;AAED,SAAK,gBAAgB,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA,KAK/B;AAED,SAAK,gBAAgB,GAAG,QAAQ;AAAA;AAAA;AAAA,KAG/B;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,OAAO,YAAY,IAA+C;AAEhE,UAAM,cAAc,GACjB;AAAA,MACC;AAAA,IAAA,EAED,IAAA;AAEH,QAAI,CAAC,aAAa;AAChB,aAAO;AAAA,IACT;AAEA,QAAI,WAAW,oBAAoB,WAAW,IAAI,EAAE;AACpD,QAAI,CAAC,UAAU;AACb,iBAAW,IAAI,oBAAoB,EAAE;AACrC,0BAAoB,WAAW,IAAI,IAAI,QAAQ;AAAA,IACjD;AACA,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,WAAmB,YAAoB,MAAwB;AACpE,UAAM,WAAW,uBAAuB,IAAI;AAC5C,SAAK,gBAAgB,WAAW,YAAY,QAAQ;AAAA,EACtD;AAAA,EAEA,gBACE,WACA,YACA,UACM;AACN,SAAK,YAAY;AAAA,MACf;AAAA,MACA;AAAA,MACA,SAAS;AAAA,MACT,SAAS,YAAY,IAAI;AAAA,MACzB,SAAS,SAAS,IAAI;AAAA,MACtB,SAAS,UAAU,IAAI;AAAA,MACvB,SAAS,sBAAsB;AAAA,IAAA;AAAA,EAEnC;AAAA,EAEA,OACE,WACA,eACA,eACA,MACM;AACN,UAAM,WAAW,uBAAuB,IAAI;AAC5C,SAAK,YAAY;AAAA,MACf;AAAA,MACA,SAAS;AAAA,MACT,SAAS,YAAY,IAAI;AAAA,MACzB,SAAS,SAAS,IAAI;AAAA,MACtB,SAAS,UAAU,IAAI;AAAA,MACvB,SAAS,sBAAsB;AAAA,MAC/B;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ;AAAA,EAEA,aAAa,WAAmB,YAA0B;AACxD,SAAK,kBAAkB,IAAI,WAAW,UAAU;AAAA,EAClD;AAAA,EAEA,YAAY,WAAyB;AACnC,SAAK,iBAAiB,IAAI,SAAS;AAAA,EACrC;AAAA,EAEA,YAAY,cAAsB,cAA4B;AAC5D,SAAK,iBAAiB,IAAI,cAAc,YAAY;AAAA,EACtD;AAAA,EAEA,UAAU,WAAmB,YAAgD;AAC3E,UAAM,MAAM,KAAK,eAAe,IAAI,WAAW,UAAU;AAIzD,QAAI,CAAC,KAAK;AACR,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL,cAAc,IAAI;AAAA,MAClB,WAAW,IAAI,gBAAgB;AAAA,MAC/B,QAAQ,IAAI,YAAY;AAAA,MACxB,SAAS,IAAI,aAAa;AAAA,MAC1B,oBAAoB,IAAI;AAAA,IAAA;AAAA,EAE5B;AAAA,EAEA,SAAS,WAAgD;AACvD,UAAM,OAAO,KAAK,cAAc,IAAI,SAAS;AAI7C,UAAM,+BAAe,IAAA;AACrB,eAAW,OAAO,MAAM;AACtB,eAAS,IAAI,IAAI,aAAa;AAAA,QAC5B,cAAc,IAAI;AAAA,QAClB,WAAW,IAAI,gBAAgB;AAAA,QAC/B,QAAQ,IAAI,YAAY;AAAA,QACxB,SAAS,IAAI,aAAa;AAAA,QAC1B,oBAAoB,IAAI;AAAA,MAAA,CACzB;AAAA,IACH;AAEA,WAAO;AAAA,EACT;AAAA,EAEA,WAAoB;AAClB,UAAM,SAAS,KAAK,cAAc,IAAA;AAClC,WAAO,WAAW;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,2BAA2B,QAA+B;AACxD,eAAW,SAAS,QAAQ;AAC1B,iBAAW,CAAC,YAAY,UAAU,KAAK,OAAO,QAAQ,MAAM,OAAO,GAAG;AACpE,cAAM,WAAW;AAAA,UACf,WAAW;AAAA,UACX,WAAW;AAAA,QAAA;AAEb,aAAK,gBAAgB,MAAM,MAAM,YAAY,QAAQ;AAAA,MACvD;AAAA,IACF;AAAA,EACF;AACF;AAMO,SAAS,yBACdA,iBACA,oBACgB;AAChB,QAAM,WAAW,iBAAiBA,eAAc;AAChD,QAAM,cAAcC,QAAaD,eAAc;AAK/C,QAAM,mBACJ,eAAe,CAAC,SAAS,SAAS,IAAI,IAAI,GAAG,QAAQ,OAAO;AAE9D,SAAO;AAAA,IACL,cAAc;AAAA,IACd,WAAW,CAAC,iBAAiBA,eAAc;AAAA,IAC3C,QAAQE,OAAYF,eAAc;AAAA,IAClC,SAAS;AAAA,IACT,oBAAoB,sBAAsB;AAAA,EAAA;AAE9C;AAMO,SAAS,yBAAyB,UAAkC;AACzE,SAAO;AAAA,IACL,SAAS;AAAA,IACT,SAAS;AAAA,IACT,SAAS;AAAA,IACT,SAAS;AAAA,EAAA;AAEb;AAQO,SAAS,uBAAuB,MAAkC;AACvE,SAAO;AAAA,IACL,cAAc,KAAK;AAAA,IACnB,WAAW,KAAK,WAAW;AAAA,IAC3B,QAAQ,aAAa,IAAI;AAAA,IACzB,SAAS,cAAc,IAAI;AAAA,IAC3B,oBAAoB,KAAK,0BAA0B;AAAA,EAAA;AAEvD;"}
@@ -1 +1 @@
1
- {"version":3,"file":"replication-state.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAC9D,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,2BAA2B,CAAC;AAC/D,OAAO,EAAC,wBAAwB,EAAC,MAAM,gBAAgB,CAAC;AAExD,OAAO,EAAC,wBAAwB,EAAC,CAAC;AAElC,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,CAAC;AAMzD,eAAO,MAAM,2BAA2B,iKAKvC,CAAC;AA+BF,QAAA,MAAM,uBAAuB;;;;EASxB,CAAC;AAEN,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,QAAA,MAAM,sBAAsB;;aAE1B,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,QAAQ,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,SAAS,EAAE,MAAM,QAelB;AAED,wBAAgB,WAAW,CAAC,EAAE,EAAE,QAAQ,EAAE,KAAK,EAAE,YAAY,QAM5D;AAED,wBAAgB,kBAAkB,CAAC,EAAE,EAAE,QAAQ;;;IAY9C;AAED,wBAAgB,oBAAoB,CAAC,EAAE,EAAE,eAAe,GAAG,iBAAiB,CAU3E;AAED,wBAAgB,0BAA0B,CACxC,EAAE,EAAE,eAAe,EACnB,SAAS,EAAE,MAAM,QAGlB;AAED,wBAAgB,mBAAmB,CAAC,EAAE,EAAE,eAAe,GAAG,gBAAgB,CAGzE"}
1
+ {"version":3,"file":"replication-state.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAC9D,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,2BAA2B,CAAC;AAG/D,OAAO,EAAC,wBAAwB,EAAC,MAAM,gBAAgB,CAAC;AAExD,OAAO,EAAC,wBAAwB,EAAC,CAAC;AAElC,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,CAAC;AAMzD,eAAO,MAAM,2BAA2B,iKAKvC,CAAC;AAiCF,QAAA,MAAM,uBAAuB;;;;EASxB,CAAC;AAEN,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,QAAA,MAAM,sBAAsB;;aAE1B,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,QAAQ,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,SAAS,EAAE,MAAM,QAelB;AAED,wBAAgB,WAAW,CAAC,EAAE,EAAE,QAAQ,EAAE,KAAK,EAAE,YAAY,QAM5D;AAED,wBAAgB,kBAAkB,CAAC,EAAE,EAAE,QAAQ;;;IAY9C;AAED,wBAAgB,oBAAoB,CAAC,EAAE,EAAE,eAAe,GAAG,iBAAiB,CAU3E;AAED,wBAAgB,0BAA0B,CACxC,EAAE,EAAE,eAAe,EACnB,SAAS,EAAE,MAAM,QAGlB;AAED,wBAAgB,mBAAmB,CAAC,EAAE,EAAE,eAAe,GAAG,gBAAgB,CAGzE"}
@@ -1,4 +1,6 @@
1
1
  import { parse } from "../../../../../shared/src/valita.js";
2
+ import { CREATE_CHANGELOG_SCHEMA } from "./change-log.js";
3
+ import { CREATE_COLUMN_METADATA_TABLE } from "./column-metadata.js";
2
4
  import { array, string, object } from "@badrap/valita";
3
5
  const CREATE_RUNTIME_EVENTS_TABLE = `
4
6
  CREATE TABLE "_zero.runtimeEvents" (
@@ -24,7 +26,7 @@ const CREATE_REPLICATION_STATE_SCHEMA = (
24
26
  stateVersion TEXT NOT NULL,
25
27
  lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)
26
28
  );
27
- ` + CREATE_RUNTIME_EVENTS_TABLE
29
+ ` + CREATE_CHANGELOG_SCHEMA + CREATE_RUNTIME_EVENTS_TABLE + CREATE_COLUMN_METADATA_TABLE
28
30
  );
29
31
  const stringArray = array(string());
30
32
  const subscriptionStateSchema = object({
@@ -1 +1 @@
1
- {"version":3,"file":"replication-state.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"sourcesContent":["/**\n * Replication metadata, used for incremental view maintenance and catchup.\n *\n * These tables are created atomically in {@link setupReplicationTables}\n * after the logical replication handoff when initial data synchronization has completed.\n */\n\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport {ZERO_VERSION_COLUMN_NAME} from './constants.ts';\n\nexport {ZERO_VERSION_COLUMN_NAME};\n\nexport type RuntimeEvent = 'sync' | 'upgrade' | 'vacuum';\n\n// event : The RuntimeEvent. Only one row per event is tracked.\n// Inserting an event will REPLACE any row for the same event.\n// timestamp : SQLite timestamp string, e.g. \"2024-04-12 11:37:46\".\n// Append a `Z` when parsing with `new Date(...)`;\nexport const CREATE_RUNTIME_EVENTS_TABLE = `\n CREATE TABLE \"_zero.runtimeEvents\" (\n event TEXT PRIMARY KEY ON CONFLICT REPLACE,\n timestamp TEXT NOT NULL DEFAULT (current_timestamp)\n );\n`;\n\nconst CREATE_REPLICATION_STATE_SCHEMA =\n // replicaVersion : A value identifying the version at which the initial sync happened, i.e.\n // the version at which all rows were copied, and to `_0_version` was set.\n // This value is used to distinguish data from other replicas (e.g. if a\n // replica is reset or if there are ever multiple replicas).\n // publications : JSON stringified array of publication names\n // lock : Auto-magic column for enforcing single-row semantics.\n `\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n // stateVersion : The latest version replicated from upstream, starting with the initial\n // `replicaVersion` and moving forward to each subsequent commit watermark\n // (e.g. corresponding to a Postgres LSN). Versions are represented as\n // lexicographically sortable watermarks (e.g. LexiVersions).\n //\n `\n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n CREATE_RUNTIME_EVENTS_TABLE;\n\nconst stringArray = v.array(v.string());\n\nconst subscriptionStateSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n }));\n\nexport type SubscriptionState = v.Infer<typeof subscriptionStateSchema>;\n\nconst replicationStateSchema = v.object({\n stateVersion: v.string(),\n});\n\nexport type ReplicationState = v.Infer<typeof replicationStateSchema>;\n\nexport function initReplicationState(\n db: Database,\n publications: string[],\n watermark: string,\n) {\n db.exec(CREATE_REPLICATION_STATE_SCHEMA);\n db.prepare(\n `\n INSERT INTO \"_zero.replicationConfig\" \n (replicaVersion, publications) VALUES (?, ?)\n `,\n ).run(watermark, JSON.stringify(publications.sort()));\n db.prepare(\n `\n INSERT INTO \"_zero.replicationState\" (stateVersion) VALUES (?)\n `,\n ).run(watermark);\n recordEvent(db, 'sync');\n}\n\nexport function recordEvent(db: Database, event: RuntimeEvent) {\n db.prepare(\n `\n INSERT INTO \"_zero.runtimeEvents\" (event) VALUES (?) \n `,\n ).run(event);\n}\n\nexport function getAscendingEvents(db: Database) {\n const result = db\n .prepare(\n `SELECT event, timestamp FROM \"_zero.runtimeEvents\" \n ORDER BY timestamp ASC\n `,\n )\n .all<{event: string; timestamp: string}>();\n return result.map(({event, timestamp}) => ({\n event,\n timestamp: new Date(timestamp + 'Z'),\n }));\n}\n\nexport function getSubscriptionState(db: StatementRunner): SubscriptionState {\n const result = db.get(\n `\n SELECT c.replicaVersion, c.publications, s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `,\n );\n return v.parse(result, subscriptionStateSchema);\n}\n\nexport function updateReplicationWatermark(\n db: StatementRunner,\n watermark: string,\n) {\n db.run(`UPDATE \"_zero.replicationState\" SET stateVersion=?`, watermark);\n}\n\nexport function getReplicationState(db: StatementRunner): ReplicationState {\n const result = db.get(`SELECT stateVersion FROM \"_zero.replicationState\"`);\n return v.parse(result, replicationStateSchema);\n}\n"],"names":["v.array","v.string","v.object","v.parse"],"mappings":";;AAoBO,MAAM,8BAA8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAO3C,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAkBA;AAAA;AAEF,MAAM,cAAcA,MAAQC,QAAU;AAEtC,MAAM,0BAA0BC,OACtB;AAAA,EACN,gBAAgBD,OAAE;AAAA,EAClB,cAAcA,OAAE;AAAA,EAChB,WAAWA,OAAE;AACf,CAAC,EACA,IAAI,CAAA,OAAM;AAAA,EACT,GAAG;AAAA,EACH,cAAcE,MAAQ,KAAK,MAAM,EAAE,YAAY,GAAG,WAAW;AAC/D,EAAE;AAIJ,MAAM,yBAAyBD,OAAS;AAAA,EACtC,cAAcD,OAAE;AAClB,CAAC;AAIM,SAAS,qBACd,IACA,cACA,WACA;AACA,KAAG,KAAK,+BAA+B;AACvC,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA,EAAA,EAIA,IAAI,WAAW,KAAK,UAAU,aAAa,KAAA,CAAM,CAAC;AACpD,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,EAAA,EAGA,IAAI,SAAS;AACf,cAAY,IAAI,MAAM;AACxB;AAEO,SAAS,YAAY,IAAc,OAAqB;AAC7D,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,EAAA,EAGA,IAAI,KAAK;AACb;AAEO,SAAS,mBAAmB,IAAc;AAC/C,QAAM,SAAS,GACZ;AAAA,IACC;AAAA;AAAA;AAAA,EAAA,EAID,IAAA;AACH,SAAO,OAAO,IAAI,CAAC,EAAC,OAAO,iBAAgB;AAAA,IACzC;AAAA,IACA,WAAW,oBAAI,KAAK,YAAY,GAAG;AAAA,EAAA,EACnC;AACJ;AAEO,SAAS,qBAAqB,IAAwC;AAC3E,QAAM,SAAS,GAAG;AAAA,IAChB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA;AAOF,SAAOE,MAAQ,QAAQ,uBAAuB;AAChD;AAEO,SAAS,2BACd,IACA,WACA;AACA,KAAG,IAAI,sDAAsD,SAAS;AACxE;AAEO,SAAS,oBAAoB,IAAuC;AACzE,QAAM,SAAS,GAAG,IAAI,mDAAmD;AACzE,SAAOA,MAAQ,QAAQ,sBAAsB;AAC/C;"}
1
+ {"version":3,"file":"replication-state.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"sourcesContent":["/**\n * Replication metadata, used for incremental view maintenance and catchup.\n *\n * These tables are created atomically in {@link setupReplicationTables}\n * after the logical replication handoff when initial data synchronization has completed.\n */\n\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport {CREATE_CHANGELOG_SCHEMA} from './change-log.ts';\nimport {CREATE_COLUMN_METADATA_TABLE} from './column-metadata.ts';\nimport {ZERO_VERSION_COLUMN_NAME} from './constants.ts';\n\nexport {ZERO_VERSION_COLUMN_NAME};\n\nexport type RuntimeEvent = 'sync' | 'upgrade' | 'vacuum';\n\n// event : The RuntimeEvent. Only one row per event is tracked.\n// Inserting an event will REPLACE any row for the same event.\n// timestamp : SQLite timestamp string, e.g. \"2024-04-12 11:37:46\".\n// Append a `Z` when parsing with `new Date(...)`;\nexport const CREATE_RUNTIME_EVENTS_TABLE = `\n CREATE TABLE \"_zero.runtimeEvents\" (\n event TEXT PRIMARY KEY ON CONFLICT REPLACE,\n timestamp TEXT NOT NULL DEFAULT (current_timestamp)\n );\n`;\n\nconst CREATE_REPLICATION_STATE_SCHEMA =\n // replicaVersion : A value identifying the version at which the initial sync happened, i.e.\n // the version at which all rows were copied, and to `_0_version` was set.\n // This value is used to distinguish data from other replicas (e.g. if a\n // replica is reset or if there are ever multiple replicas).\n // publications : JSON stringified array of publication names\n // lock : Auto-magic column for enforcing single-row semantics.\n `\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n // stateVersion : The latest version replicated from upstream, starting with the initial\n // `replicaVersion` and moving forward to each subsequent commit watermark\n // (e.g. corresponding to a Postgres LSN). Versions are represented as\n // lexicographically sortable watermarks (e.g. LexiVersions).\n //\n `\n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n CREATE_CHANGELOG_SCHEMA +\n CREATE_RUNTIME_EVENTS_TABLE +\n CREATE_COLUMN_METADATA_TABLE;\n\nconst stringArray = v.array(v.string());\n\nconst subscriptionStateSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n }));\n\nexport type SubscriptionState = v.Infer<typeof subscriptionStateSchema>;\n\nconst replicationStateSchema = v.object({\n stateVersion: v.string(),\n});\n\nexport type ReplicationState = v.Infer<typeof replicationStateSchema>;\n\nexport function initReplicationState(\n db: Database,\n publications: string[],\n watermark: string,\n) {\n db.exec(CREATE_REPLICATION_STATE_SCHEMA);\n db.prepare(\n `\n INSERT INTO \"_zero.replicationConfig\" \n (replicaVersion, publications) VALUES (?, ?)\n `,\n ).run(watermark, JSON.stringify(publications.sort()));\n db.prepare(\n `\n INSERT INTO \"_zero.replicationState\" (stateVersion) VALUES (?)\n `,\n ).run(watermark);\n recordEvent(db, 'sync');\n}\n\nexport function recordEvent(db: Database, event: RuntimeEvent) {\n db.prepare(\n `\n INSERT INTO \"_zero.runtimeEvents\" (event) VALUES (?) \n `,\n ).run(event);\n}\n\nexport function getAscendingEvents(db: Database) {\n const result = db\n .prepare(\n `SELECT event, timestamp FROM \"_zero.runtimeEvents\" \n ORDER BY timestamp ASC\n `,\n )\n .all<{event: string; timestamp: string}>();\n return result.map(({event, timestamp}) => ({\n event,\n timestamp: new Date(timestamp + 'Z'),\n }));\n}\n\nexport function getSubscriptionState(db: StatementRunner): SubscriptionState {\n const result = db.get(\n `\n SELECT c.replicaVersion, c.publications, s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `,\n );\n return v.parse(result, subscriptionStateSchema);\n}\n\nexport function updateReplicationWatermark(\n db: StatementRunner,\n watermark: string,\n) {\n db.run(`UPDATE \"_zero.replicationState\" SET stateVersion=?`, watermark);\n}\n\nexport function getReplicationState(db: StatementRunner): ReplicationState {\n const result = db.get(`SELECT stateVersion FROM \"_zero.replicationState\"`);\n return v.parse(result, replicationStateSchema);\n}\n"],"names":["v.array","v.string","v.object","v.parse"],"mappings":";;;;AAsBO,MAAM,8BAA8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAO3C,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAkBA,0BACA,8BACA;AAAA;AAEF,MAAM,cAAcA,MAAQC,QAAU;AAEtC,MAAM,0BAA0BC,OACtB;AAAA,EACN,gBAAgBD,OAAE;AAAA,EAClB,cAAcA,OAAE;AAAA,EAChB,WAAWA,OAAE;AACf,CAAC,EACA,IAAI,CAAA,OAAM;AAAA,EACT,GAAG;AAAA,EACH,cAAcE,MAAQ,KAAK,MAAM,EAAE,YAAY,GAAG,WAAW;AAC/D,EAAE;AAIJ,MAAM,yBAAyBD,OAAS;AAAA,EACtC,cAAcD,OAAE;AAClB,CAAC;AAIM,SAAS,qBACd,IACA,cACA,WACA;AACA,KAAG,KAAK,+BAA+B;AACvC,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA,EAAA,EAIA,IAAI,WAAW,KAAK,UAAU,aAAa,KAAA,CAAM,CAAC;AACpD,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,EAAA,EAGA,IAAI,SAAS;AACf,cAAY,IAAI,MAAM;AACxB;AAEO,SAAS,YAAY,IAAc,OAAqB;AAC7D,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,EAAA,EAGA,IAAI,KAAK;AACb;AAEO,SAAS,mBAAmB,IAAc;AAC/C,QAAM,SAAS,GACZ;AAAA,IACC;AAAA;AAAA;AAAA,EAAA,EAID,IAAA;AACH,SAAO,OAAO,IAAI,CAAC,EAAC,OAAO,iBAAgB;AAAA,IACzC;AAAA,IACA,WAAW,oBAAI,KAAK,YAAY,GAAG;AAAA,EAAA,EACnC;AACJ;AAEO,SAAS,qBAAqB,IAAwC;AAC3E,QAAM,SAAS,GAAG;AAAA,IAChB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA;AAOF,SAAOE,MAAQ,QAAQ,uBAAuB;AAChD;AAEO,SAAS,2BACd,IACA,WACA;AACA,KAAG,IAAI,sDAAsD,SAAS;AACxE;AAEO,SAAS,oBAAoB,IAAuC;AACzE,QAAM,SAAS,GAAG,IAAI,mDAAmD;AACzE,SAAOA,MAAQ,QAAQ,sBAAsB;AAC/C;"}
@@ -56,7 +56,7 @@ async function runAst(lc, clientSchema, ast, isTransformed, options, yieldProces
56
56
  await yieldProcess();
57
57
  continue;
58
58
  }
59
- assert(rowChange.type === "add");
59
+ assert(rowChange.type === "add", "Hydration only handles add row changes");
60
60
  if (syncedRowCount % 10 === 0) {
61
61
  await Promise.resolve();
62
62
  }
@@ -1 +1 @@
1
- {"version":3,"file":"run-ast.js","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\nimport type {TokenData} from './view-syncer/view-syncer.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n authData?: TokenData | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const authData = options.authData?.decoded;\n if (!authData) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n authData,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n const pipeline = buildPipeline(\n ast,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(pipeline, hashOfAST(ast), clientSchema)) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;AA0CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;AAC7B,QAAM,EAAuB,aAAa,KAAA,IAAQ;AAClD,QAAM,SAA6B;AAAA,IACjC,UAAU,CAAA;AAAA,IACV,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,sBAAsB,CAAA;AAAA,IACtB,cAAc;AAAA,EAAA;AAOhB,MAAI,QAAQ,kBAAkB;AAC5B,UAAM,WAAW,QAAQ,UAAU;AACnC,QAAI,CAAC,UAAU;AACb,aAAO,SAAS;AAAA,QACd;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,KAAK,WAAW;AAAA,MAChB;AAAA,MACA;AAAA,IAAA,EACA;AACF,WAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,GAAG,CAAC;AAAA,EACxE;AAEA,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,QAAQ;AAAA,EAAA;AAGV,QAAM,QAAQ,YAAY,IAAA;AAE1B,MAAI,iBAAiB;AACrB,QAAM,cAAqC,CAAA;AAC3C,QAAM,kCAA+B,IAAA;AACrC,aAAW,aAAa,QAAQ,UAAU,UAAU,GAAG,GAAG,YAAY,GAAG;AACvE,QAAI,cAAc,SAAS;AACzB,YAAM,aAAA;AACN;AAAA,IACF;AACA,WAAO,UAAU,SAAS,KAAK;AAG/B,QAAI,iBAAiB,OAAO,GAAG;AAC7B,YAAM,QAAQ,QAAA;AAAA,IAChB;AACA,QAAI,iBAAiB,QAAQ,GAAG;AAC9B,YAAM,MAAM,CAAC;AAAA,IACf;AAEA,QAAI,OAAc,YAAY,UAAU,KAAK;AAC7C,UAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,GAAG;AAC9D,QAAI,YAAY,IAAI,CAAC,GAAG;AACtB;AAAA,IACF;AACA;AACA,gBAAY,IAAI,CAAC;AACjB,QAAI,QAAQ,YAAY;AACtB,UAAI,CAAC,MAAM;AACT,eAAO,CAAA;AACP,oBAAY,UAAU,KAAK,IAAI;AAAA,MACjC;AACA,WAAK,KAAK,UAAU,GAAG;AAAA,IACzB;AAAA,EACF;AAEA,QAAM,MAAM,YAAY,IAAA;AACxB,MAAI,QAAQ,YAAY;AACtB,WAAO,aAAa;AAAA,EACtB;AACA,SAAO,QAAQ;AACf,SAAO,MAAM;AACb,SAAO,UAAU,MAAM;AAGvB,SAAO,iBAAiB;AACxB,SAAO,uBAAuB,KAAK,OAAO,mBAAA,KAAwB,CAAA;AAClE,MAAI,eAAe;AACnB,aAAW,KAAK,OAAO,OAAO,OAAO,oBAAoB,GAAG;AAC1D,eAAW,KAAK,OAAO,OAAO,CAAC,GAAG;AAChC,sBAAgB;AAAA,IAClB;AAAA,EACF;AACA,SAAO,eAAe;AACtB,SAAO,iBAAiB,KAAK,OAAO,gBAAA,KAAqB,CAAA;AAEzD,MAAI,QAAQ,YAAY;AACtB,WAAO,WAAW,KAAK,OAAO,cAAA;AAAA,EAChC;AACA,SAAO;AACT;"}
1
+ {"version":3,"file":"run-ast.js","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\nimport type {TokenData} from './view-syncer/view-syncer.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n authData?: TokenData | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const authData = options.authData?.decoded;\n if (!authData) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n authData,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n const pipeline = buildPipeline(\n ast,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(pipeline, hashOfAST(ast), clientSchema)) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add', 'Hydration only handles add row changes');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;AA0CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;AAC7B,QAAM,EAAuB,aAAa,KAAA,IAAQ;AAClD,QAAM,SAA6B;AAAA,IACjC,UAAU,CAAA;AAAA,IACV,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,sBAAsB,CAAA;AAAA,IACtB,cAAc;AAAA,EAAA;AAOhB,MAAI,QAAQ,kBAAkB;AAC5B,UAAM,WAAW,QAAQ,UAAU;AACnC,QAAI,CAAC,UAAU;AACb,aAAO,SAAS;AAAA,QACd;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,KAAK,WAAW;AAAA,MAChB;AAAA,MACA;AAAA,IAAA,EACA;AACF,WAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,GAAG,CAAC;AAAA,EACxE;AAEA,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,QAAQ;AAAA,EAAA;AAGV,QAAM,QAAQ,YAAY,IAAA;AAE1B,MAAI,iBAAiB;AACrB,QAAM,cAAqC,CAAA;AAC3C,QAAM,kCAA+B,IAAA;AACrC,aAAW,aAAa,QAAQ,UAAU,UAAU,GAAG,GAAG,YAAY,GAAG;AACvE,QAAI,cAAc,SAAS;AACzB,YAAM,aAAA;AACN;AAAA,IACF;AACA,WAAO,UAAU,SAAS,OAAO,wCAAwC;AAGzE,QAAI,iBAAiB,OAAO,GAAG;AAC7B,YAAM,QAAQ,QAAA;AAAA,IAChB;AACA,QAAI,iBAAiB,QAAQ,GAAG;AAC9B,YAAM,MAAM,CAAC;AAAA,IACf;AAEA,QAAI,OAAc,YAAY,UAAU,KAAK;AAC7C,UAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,GAAG;AAC9D,QAAI,YAAY,IAAI,CAAC,GAAG;AACtB;AAAA,IACF;AACA;AACA,gBAAY,IAAI,CAAC;AACjB,QAAI,QAAQ,YAAY;AACtB,UAAI,CAAC,MAAM;AACT,eAAO,CAAA;AACP,oBAAY,UAAU,KAAK,IAAI;AAAA,MACjC;AACA,WAAK,KAAK,UAAU,GAAG;AAAA,IACzB;AAAA,EACF;AAEA,QAAM,MAAM,YAAY,IAAA;AACxB,MAAI,QAAQ,YAAY;AACtB,WAAO,aAAa;AAAA,EACtB;AACA,SAAO,QAAQ;AACf,SAAO,MAAM;AACb,SAAO,UAAU,MAAM;AAGvB,SAAO,iBAAiB;AACxB,SAAO,uBAAuB,KAAK,OAAO,mBAAA,KAAwB,CAAA;AAClE,MAAI,eAAe;AACnB,aAAW,KAAK,OAAO,OAAO,OAAO,oBAAoB,GAAG;AAC1D,eAAW,KAAK,OAAO,OAAO,CAAC,GAAG;AAChC,sBAAgB;AAAA,IAClB;AAAA,EACF;AACA,SAAO,eAAe;AACtB,SAAO,iBAAiB,KAAK,OAAO,gBAAA,KAAqB,CAAA;AAEzD,MAAI,QAAQ,YAAY;AACtB,WAAO,WAAW,KAAK,OAAO,cAAA;AAAA,EAChC;AACA,SAAO;AACT;"}
@@ -1 +1 @@
1
- {"version":3,"file":"statz.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/statz.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,SAAS,CAAC;AAM1D,OAAO,KAAK,EAAC,oBAAoB,IAAI,UAAU,EAAC,MAAM,wBAAwB,CAAC;AAsS/E,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,UAAU,EAClB,GAAG,EAAE,cAAc,EACnB,GAAG,EAAE,YAAY,iBAqBlB"}
1
+ {"version":3,"file":"statz.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/statz.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,SAAS,CAAC;AAM1D,OAAO,KAAK,EAAC,oBAAoB,IAAI,UAAU,EAAC,MAAM,wBAAwB,CAAC;AAuS/E,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,UAAU,EAClB,GAAG,EAAE,cAAc,EACnB,GAAG,EAAE,YAAY,iBAqBlB"}
@@ -230,6 +230,7 @@ function osStats(out) {
230
230
  ["total mem", os.totalmem()],
231
231
  ["free mem", os.freemem()],
232
232
  ["cpus", os.cpus().length],
233
+ ["available parallelism", os.availableParallelism()],
233
234
  ["platform", os.platform()],
234
235
  ["arch", os.arch()],
235
236
  ["release", os.release()],
@@ -1 +1 @@
1
- {"version":3,"file":"statz.js","sources":["../../../../../zero-cache/src/services/statz.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport auth from 'basic-auth';\nimport type {FastifyReply, FastifyRequest} from 'fastify';\nimport fs from 'fs';\nimport os from 'os';\nimport type {Writable} from 'stream';\nimport {BigIntJSON} from '../../../shared/src/bigint-json.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport type {NormalizedZeroConfig as ZeroConfig} from '../config/normalize.ts';\nimport {isAdminPasswordValid} from '../config/zero-config.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {getShardID, upstreamSchema} from '../types/shards.ts';\n\nasync function upstreamStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n const schema = upstreamSchema(getShardID(config));\n const sql = pgClient(lc, config.upstream.db);\n\n out.write(header('Upstream'));\n\n await printPgStats(\n [\n [\n 'num replicas',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"replicas\"`,\n ],\n [\n 'num clients with mutations',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n [\n 'num mutations processed',\n sql`SELECT SUM(\"lastMutationID\") as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n ],\n out,\n );\n\n await sql.end();\n}\n\nasync function cvrStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('CVR'));\n\n const schema = upstreamSchema(getShardID(config)) + '/cvr';\n const sql = pgClient(lc, config.cvr.db);\n\n function numQueriesPerClientGroup(\n active: boolean,\n ): ReturnType<ReturnType<typeof pgClient>> {\n const filter = active\n ? sql`WHERE \"inactivatedAt\" IS NULL AND deleted = false`\n : sql`WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`;\n return sql`WITH\n group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(*) AS num_queries\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n ),\n -- Count distinct clientIDs per clientGroupID\n client_per_group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(DISTINCT \"clientID\") AS num_clients\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n )\n -- Combine all the information\n SELECT\n g.\"clientGroupID\",\n cpg.num_clients,\n g.num_queries\n FROM group_counts g\n JOIN client_per_group_counts cpg ON g.\"clientGroupID\" = cpg.\"clientGroupID\"\n ORDER BY g.num_queries DESC;`;\n }\n\n await printPgStats(\n [\n [\n 'total num queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\"`,\n ],\n [\n 'num unique query hashes',\n sql`SELECT COUNT(DISTINCT \"queryHash\") as \"c\" FROM ${sql(\n schema,\n )}.\"desires\"`,\n ],\n [\n 'num active queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NULL AND \"deleted\" = false`,\n ],\n [\n 'num inactive queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`,\n ],\n [\n 'num deleted queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"deleted\" = true`,\n ],\n [\n 'fresh queries percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS fresh_count\n FROM ${sql(schema)}.\"desires\"\n WHERE\n (\"inactivatedAt\" IS NOT NULL\n AND (\"inactivatedAt\" + \"ttl\") > NOW()) OR (\"inactivatedAt\" IS NULL\n AND deleted = false)\n GROUP BY \"clientGroupID\"\n )\n\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY fresh_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY fresh_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY fresh_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY fresh_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY fresh_count) AS \"p99\",\n MIN(fresh_count) AS \"min\",\n MAX(fresh_count) AS \"max\",\n AVG(fresh_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n 'rows per client group percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\"\n GROUP BY \"clientGroupID\"\n )\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY row_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY row_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY row_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY row_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY row_count) AS \"p99\",\n MIN(row_count) AS \"min\",\n MAX(row_count) AS \"max\",\n AVG(row_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n // check for AST blowup due to DNF conversion.\n 'ast sizes',\n sql`SELECT\n percentile_cont(0.25) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"25th_percentile\",\n percentile_cont(0.5) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"50th_percentile\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"75th_percentile\",\n percentile_cont(0.9) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"90th_percentile\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"95th_percentile\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"99th_percentile\",\n MIN(length(\"clientAST\"::text)) AS \"minimum_length\",\n MAX(length(\"clientAST\"::text)) AS \"maximum_length\",\n AVG(length(\"clientAST\"::text))::integer AS \"average_length\",\n COUNT(*) AS \"total_records\"\n FROM ${sql(schema)}.\"queries\";`,\n ],\n [\n // output the hash of the largest AST\n 'biggest ast hash',\n sql`SELECT \"queryHash\", length(\"clientAST\"::text) AS \"ast_length\"\n FROM ${sql(schema)}.\"queries\"\n ORDER BY length(\"clientAST\"::text) DESC\n LIMIT 1;`,\n ],\n [\n 'total active queries per client and client group',\n numQueriesPerClientGroup(true),\n ],\n [\n 'total inactive queries per client and client group',\n numQueriesPerClientGroup(false),\n ],\n [\n 'total rows per client group',\n sql`SELECT \"clientGroupID\", COUNT(*) as \"c\" FROM ${sql(\n schema,\n )}.\"rows\" GROUP BY \"clientGroupID\" ORDER BY \"c\" DESC`,\n ],\n [\n 'num rows per query',\n sql`SELECT\n k.key AS \"queryHash\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\" r,\n LATERAL jsonb_each(r.\"refCounts\") k\n GROUP BY k.key\n ORDER BY row_count DESC;`,\n ],\n ] satisfies [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out,\n );\n\n await sql.end();\n}\n\nasync function changelogStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n out.write(header('Change DB'));\n const schema = upstreamSchema(getShardID(config)) + '/cdc';\n const sql = pgClient(lc, config.change.db);\n\n await printPgStats(\n [\n [\n 'change log size',\n sql`SELECT COUNT(*) as \"change_log_size\" FROM ${sql(schema)}.\"changeLog\"`,\n ],\n ],\n out,\n );\n await sql.end();\n}\n\nfunction replicaStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('Replica'));\n const db = new Database(lc, config.replica.file);\n printStats(\n 'replica',\n [\n ['wal checkpoint', pick(first(db.pragma('WAL_CHECKPOINT')))],\n ['page count', pick(first(db.pragma('PAGE_COUNT')))],\n ['page size', pick(first(db.pragma('PAGE_SIZE')))],\n ['journal mode', pick(first(db.pragma('JOURNAL_MODE')))],\n ['synchronous', pick(first(db.pragma('SYNCHRONOUS')))],\n ['cache size', pick(first(db.pragma('CACHE_SIZE')))],\n ['auto vacuum', pick(first(db.pragma('AUTO_VACUUM')))],\n ['freelist count', pick(first(db.pragma('FREELIST_COUNT')))],\n ['wal autocheckpoint', pick(first(db.pragma('WAL_AUTOCHECKPOINT')))],\n ['db file stats', fs.statSync(config.replica.file)],\n ] as const,\n out,\n );\n}\n\nfunction osStats(out: Writable) {\n printStats(\n 'os',\n [\n ['load avg', os.loadavg()],\n ['uptime', os.uptime()],\n ['total mem', os.totalmem()],\n ['free mem', os.freemem()],\n ['cpus', os.cpus().length],\n ['platform', os.platform()],\n ['arch', os.arch()],\n ['release', os.release()],\n ['uptime', os.uptime()],\n ] as const,\n out,\n );\n}\n\nasync function printPgStats(\n pendingQueries: [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out: Writable,\n) {\n const results = await Promise.all(\n pendingQueries.map(async ([name, query]) => [name, await query]),\n );\n for (const [name, data] of results) {\n out.write('\\n');\n out.write(name);\n out.write('\\n');\n out.write(BigIntJSON.stringify(data, null, 2));\n }\n}\n\nfunction printStats(\n group: string,\n queries: readonly [name: string, result: unknown][],\n out: Writable,\n) {\n out.write('\\n' + header(group));\n for (const [name, result] of queries) {\n out.write('\\n' + name + BigIntJSON.stringify(result, null, 2));\n }\n}\n\nexport async function handleStatzRequest(\n lc: LogContext,\n config: ZeroConfig,\n req: FastifyRequest,\n res: FastifyReply,\n) {\n const credentials = auth(req);\n if (!isAdminPasswordValid(lc, config, credentials?.pass)) {\n void res\n .code(401)\n .header('WWW-Authenticate', 'Basic realm=\"Statz Protected Area\"')\n .send('Unauthorized');\n return;\n }\n\n await upstreamStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await cvrStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await changelogStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n replicaStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n osStats(res.raw);\n res.raw.end();\n}\n\nfunction first(x: object[]): object {\n return x[0];\n}\n\nfunction pick(x: object): unknown {\n return Object.values(x)[0];\n}\n\nfunction header(name: string): string {\n return `=== ${name} ===\\n`;\n}\n"],"names":[],"mappings":";;;;;;;;AAaA,eAAe,cACb,IACA,QACA,KACA;AACA,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC;AAChD,QAAM,MAAM,SAAS,IAAI,OAAO,SAAS,EAAE;AAE3C,MAAI,MAAM,OAAO,UAAU,CAAC;AAE5B,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,+CAA+C,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC5D;AAAA,IAEF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,SAAS,IAAgB,QAAoB,KAAe;AACzE,MAAI,MAAM,OAAO,KAAK,CAAC;AAEvB,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,IAAI,EAAE;AAEtC,WAAS,yBACP,QACyC;AACzC,UAAM,SAAS,SACX,yDACA;AACJ,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA,aAKE,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAQD,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWZ;AAEA,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,qDAAqD;AAAA,UACnD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAmBpB;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAcpB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAWK,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAElB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA,aACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA,MAAA;AAAA,MAIlB;AAAA,QACE;AAAA,QACA,yBAAyB,IAAI;AAAA,MAAA;AAAA,MAE/B;AAAA,QACE;AAAA,QACA,yBAAyB,KAAK;AAAA,MAAA;AAAA,MAEhC;AAAA,QACE;AAAA,QACA,mDAAmD;AAAA,UACjD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA,aAGK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,IAKF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,eACb,IACA,QACA,KACA;AACA,MAAI,MAAM,OAAO,WAAW,CAAC;AAC7B,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,OAAO,EAAE;AAEzC,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,gDAAgD,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC7D;AAAA,IAEF;AAAA,EAAA;AAEF,QAAM,IAAI,IAAA;AACZ;AAEA,SAAS,aAAa,IAAgB,QAAoB,KAAe;AACvE,MAAI,MAAM,OAAO,SAAS,CAAC;AAC3B,QAAM,KAAK,IAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,aAAa,KAAK,MAAM,GAAG,OAAO,WAAW,CAAC,CAAC,CAAC;AAAA,MACjD,CAAC,gBAAgB,KAAK,MAAM,GAAG,OAAO,cAAc,CAAC,CAAC,CAAC;AAAA,MACvD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,sBAAsB,KAAK,MAAM,GAAG,OAAO,oBAAoB,CAAC,CAAC,CAAC;AAAA,MACnE,CAAC,iBAAiB,GAAG,SAAS,OAAO,QAAQ,IAAI,CAAC;AAAA,IAAA;AAAA,IAEpD;AAAA,EAAA;AAEJ;AAEA,SAAS,QAAQ,KAAe;AAC9B;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,UAAU,GAAG,QAAQ;AAAA,MACtB,CAAC,aAAa,GAAG,UAAU;AAAA,MAC3B,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,QAAQ,GAAG,KAAA,EAAO,MAAM;AAAA,MACzB,CAAC,YAAY,GAAG,UAAU;AAAA,MAC1B,CAAC,QAAQ,GAAG,MAAM;AAAA,MAClB,CAAC,WAAW,GAAG,SAAS;AAAA,MACxB,CAAC,UAAU,GAAG,OAAA,CAAQ;AAAA,IAAA;AAAA,IAExB;AAAA,EAAA;AAEJ;AAEA,eAAe,aACb,gBAIA,KACA;AACA,QAAM,UAAU,MAAM,QAAQ;AAAA,IAC5B,eAAe,IAAI,OAAO,CAAC,MAAM,KAAK,MAAM,CAAC,MAAM,MAAM,KAAK,CAAC;AAAA,EAAA;AAEjE,aAAW,CAAC,MAAM,IAAI,KAAK,SAAS;AAClC,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,WAAW,UAAU,MAAM,MAAM,CAAC,CAAC;AAAA,EAC/C;AACF;AAEA,SAAS,WACP,OACA,SACA,KACA;AACA,MAAI,MAAM,OAAO,OAAO,KAAK,CAAC;AAC9B,aAAW,CAAC,MAAM,MAAM,KAAK,SAAS;AACpC,QAAI,MAAM,OAAO,OAAO,WAAW,UAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,EAC/D;AACF;AAEA,eAAsB,mBACpB,IACA,QACA,KACA,KACA;AACA,QAAM,cAAc,KAAK,GAAG;AAC5B,MAAI,CAAC,qBAAqB,IAAI,QAAQ,aAAa,IAAI,GAAG;AACxD,SAAK,IACF,KAAK,GAAG,EACR,OAAO,oBAAoB,oCAAoC,EAC/D,KAAK,cAAc;AACtB;AAAA,EACF;AAEA,QAAM,cAAc,IAAI,QAAQ,IAAI,GAAG;AACvC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,SAAS,IAAI,QAAQ,IAAI,GAAG;AAClC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,eAAe,IAAI,QAAQ,IAAI,GAAG;AACxC,MAAI,IAAI,MAAM,MAAM;AACpB,eAAa,IAAI,QAAQ,IAAI,GAAG;AAChC,MAAI,IAAI,MAAM,MAAM;AACpB,UAAQ,IAAI,GAAG;AACf,MAAI,IAAI,IAAA;AACV;AAEA,SAAS,MAAM,GAAqB;AAClC,SAAO,EAAE,CAAC;AACZ;AAEA,SAAS,KAAK,GAAoB;AAChC,SAAO,OAAO,OAAO,CAAC,EAAE,CAAC;AAC3B;AAEA,SAAS,OAAO,MAAsB;AACpC,SAAO,OAAO,IAAI;AAAA;AACpB;"}
1
+ {"version":3,"file":"statz.js","sources":["../../../../../zero-cache/src/services/statz.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport auth from 'basic-auth';\nimport type {FastifyReply, FastifyRequest} from 'fastify';\nimport fs from 'fs';\nimport os from 'os';\nimport type {Writable} from 'stream';\nimport {BigIntJSON} from '../../../shared/src/bigint-json.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport type {NormalizedZeroConfig as ZeroConfig} from '../config/normalize.ts';\nimport {isAdminPasswordValid} from '../config/zero-config.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {getShardID, upstreamSchema} from '../types/shards.ts';\n\nasync function upstreamStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n const schema = upstreamSchema(getShardID(config));\n const sql = pgClient(lc, config.upstream.db);\n\n out.write(header('Upstream'));\n\n await printPgStats(\n [\n [\n 'num replicas',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"replicas\"`,\n ],\n [\n 'num clients with mutations',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n [\n 'num mutations processed',\n sql`SELECT SUM(\"lastMutationID\") as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n ],\n out,\n );\n\n await sql.end();\n}\n\nasync function cvrStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('CVR'));\n\n const schema = upstreamSchema(getShardID(config)) + '/cvr';\n const sql = pgClient(lc, config.cvr.db);\n\n function numQueriesPerClientGroup(\n active: boolean,\n ): ReturnType<ReturnType<typeof pgClient>> {\n const filter = active\n ? sql`WHERE \"inactivatedAt\" IS NULL AND deleted = false`\n : sql`WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`;\n return sql`WITH\n group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(*) AS num_queries\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n ),\n -- Count distinct clientIDs per clientGroupID\n client_per_group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(DISTINCT \"clientID\") AS num_clients\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n )\n -- Combine all the information\n SELECT\n g.\"clientGroupID\",\n cpg.num_clients,\n g.num_queries\n FROM group_counts g\n JOIN client_per_group_counts cpg ON g.\"clientGroupID\" = cpg.\"clientGroupID\"\n ORDER BY g.num_queries DESC;`;\n }\n\n await printPgStats(\n [\n [\n 'total num queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\"`,\n ],\n [\n 'num unique query hashes',\n sql`SELECT COUNT(DISTINCT \"queryHash\") as \"c\" FROM ${sql(\n schema,\n )}.\"desires\"`,\n ],\n [\n 'num active queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NULL AND \"deleted\" = false`,\n ],\n [\n 'num inactive queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`,\n ],\n [\n 'num deleted queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"deleted\" = true`,\n ],\n [\n 'fresh queries percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS fresh_count\n FROM ${sql(schema)}.\"desires\"\n WHERE\n (\"inactivatedAt\" IS NOT NULL\n AND (\"inactivatedAt\" + \"ttl\") > NOW()) OR (\"inactivatedAt\" IS NULL\n AND deleted = false)\n GROUP BY \"clientGroupID\"\n )\n\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY fresh_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY fresh_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY fresh_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY fresh_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY fresh_count) AS \"p99\",\n MIN(fresh_count) AS \"min\",\n MAX(fresh_count) AS \"max\",\n AVG(fresh_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n 'rows per client group percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\"\n GROUP BY \"clientGroupID\"\n )\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY row_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY row_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY row_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY row_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY row_count) AS \"p99\",\n MIN(row_count) AS \"min\",\n MAX(row_count) AS \"max\",\n AVG(row_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n // check for AST blowup due to DNF conversion.\n 'ast sizes',\n sql`SELECT\n percentile_cont(0.25) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"25th_percentile\",\n percentile_cont(0.5) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"50th_percentile\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"75th_percentile\",\n percentile_cont(0.9) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"90th_percentile\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"95th_percentile\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"99th_percentile\",\n MIN(length(\"clientAST\"::text)) AS \"minimum_length\",\n MAX(length(\"clientAST\"::text)) AS \"maximum_length\",\n AVG(length(\"clientAST\"::text))::integer AS \"average_length\",\n COUNT(*) AS \"total_records\"\n FROM ${sql(schema)}.\"queries\";`,\n ],\n [\n // output the hash of the largest AST\n 'biggest ast hash',\n sql`SELECT \"queryHash\", length(\"clientAST\"::text) AS \"ast_length\"\n FROM ${sql(schema)}.\"queries\"\n ORDER BY length(\"clientAST\"::text) DESC\n LIMIT 1;`,\n ],\n [\n 'total active queries per client and client group',\n numQueriesPerClientGroup(true),\n ],\n [\n 'total inactive queries per client and client group',\n numQueriesPerClientGroup(false),\n ],\n [\n 'total rows per client group',\n sql`SELECT \"clientGroupID\", COUNT(*) as \"c\" FROM ${sql(\n schema,\n )}.\"rows\" GROUP BY \"clientGroupID\" ORDER BY \"c\" DESC`,\n ],\n [\n 'num rows per query',\n sql`SELECT\n k.key AS \"queryHash\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\" r,\n LATERAL jsonb_each(r.\"refCounts\") k\n GROUP BY k.key\n ORDER BY row_count DESC;`,\n ],\n ] satisfies [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out,\n );\n\n await sql.end();\n}\n\nasync function changelogStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n out.write(header('Change DB'));\n const schema = upstreamSchema(getShardID(config)) + '/cdc';\n const sql = pgClient(lc, config.change.db);\n\n await printPgStats(\n [\n [\n 'change log size',\n sql`SELECT COUNT(*) as \"change_log_size\" FROM ${sql(schema)}.\"changeLog\"`,\n ],\n ],\n out,\n );\n await sql.end();\n}\n\nfunction replicaStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('Replica'));\n const db = new Database(lc, config.replica.file);\n printStats(\n 'replica',\n [\n ['wal checkpoint', pick(first(db.pragma('WAL_CHECKPOINT')))],\n ['page count', pick(first(db.pragma('PAGE_COUNT')))],\n ['page size', pick(first(db.pragma('PAGE_SIZE')))],\n ['journal mode', pick(first(db.pragma('JOURNAL_MODE')))],\n ['synchronous', pick(first(db.pragma('SYNCHRONOUS')))],\n ['cache size', pick(first(db.pragma('CACHE_SIZE')))],\n ['auto vacuum', pick(first(db.pragma('AUTO_VACUUM')))],\n ['freelist count', pick(first(db.pragma('FREELIST_COUNT')))],\n ['wal autocheckpoint', pick(first(db.pragma('WAL_AUTOCHECKPOINT')))],\n ['db file stats', fs.statSync(config.replica.file)],\n ] as const,\n out,\n );\n}\n\nfunction osStats(out: Writable) {\n printStats(\n 'os',\n [\n ['load avg', os.loadavg()],\n ['uptime', os.uptime()],\n ['total mem', os.totalmem()],\n ['free mem', os.freemem()],\n ['cpus', os.cpus().length],\n ['available parallelism', os.availableParallelism()],\n ['platform', os.platform()],\n ['arch', os.arch()],\n ['release', os.release()],\n ['uptime', os.uptime()],\n ] as const,\n out,\n );\n}\n\nasync function printPgStats(\n pendingQueries: [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out: Writable,\n) {\n const results = await Promise.all(\n pendingQueries.map(async ([name, query]) => [name, await query]),\n );\n for (const [name, data] of results) {\n out.write('\\n');\n out.write(name);\n out.write('\\n');\n out.write(BigIntJSON.stringify(data, null, 2));\n }\n}\n\nfunction printStats(\n group: string,\n queries: readonly [name: string, result: unknown][],\n out: Writable,\n) {\n out.write('\\n' + header(group));\n for (const [name, result] of queries) {\n out.write('\\n' + name + BigIntJSON.stringify(result, null, 2));\n }\n}\n\nexport async function handleStatzRequest(\n lc: LogContext,\n config: ZeroConfig,\n req: FastifyRequest,\n res: FastifyReply,\n) {\n const credentials = auth(req);\n if (!isAdminPasswordValid(lc, config, credentials?.pass)) {\n void res\n .code(401)\n .header('WWW-Authenticate', 'Basic realm=\"Statz Protected Area\"')\n .send('Unauthorized');\n return;\n }\n\n await upstreamStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await cvrStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await changelogStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n replicaStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n osStats(res.raw);\n res.raw.end();\n}\n\nfunction first(x: object[]): object {\n return x[0];\n}\n\nfunction pick(x: object): unknown {\n return Object.values(x)[0];\n}\n\nfunction header(name: string): string {\n return `=== ${name} ===\\n`;\n}\n"],"names":[],"mappings":";;;;;;;;AAaA,eAAe,cACb,IACA,QACA,KACA;AACA,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC;AAChD,QAAM,MAAM,SAAS,IAAI,OAAO,SAAS,EAAE;AAE3C,MAAI,MAAM,OAAO,UAAU,CAAC;AAE5B,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,+CAA+C,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC5D;AAAA,IAEF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,SAAS,IAAgB,QAAoB,KAAe;AACzE,MAAI,MAAM,OAAO,KAAK,CAAC;AAEvB,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,IAAI,EAAE;AAEtC,WAAS,yBACP,QACyC;AACzC,UAAM,SAAS,SACX,yDACA;AACJ,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA,aAKE,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAQD,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWZ;AAEA,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,qDAAqD;AAAA,UACnD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAmBpB;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAcpB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAWK,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAElB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA,aACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA,MAAA;AAAA,MAIlB;AAAA,QACE;AAAA,QACA,yBAAyB,IAAI;AAAA,MAAA;AAAA,MAE/B;AAAA,QACE;AAAA,QACA,yBAAyB,KAAK;AAAA,MAAA;AAAA,MAEhC;AAAA,QACE;AAAA,QACA,mDAAmD;AAAA,UACjD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA,aAGK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,IAKF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,eACb,IACA,QACA,KACA;AACA,MAAI,MAAM,OAAO,WAAW,CAAC;AAC7B,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,OAAO,EAAE;AAEzC,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,gDAAgD,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC7D;AAAA,IAEF;AAAA,EAAA;AAEF,QAAM,IAAI,IAAA;AACZ;AAEA,SAAS,aAAa,IAAgB,QAAoB,KAAe;AACvE,MAAI,MAAM,OAAO,SAAS,CAAC;AAC3B,QAAM,KAAK,IAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,aAAa,KAAK,MAAM,GAAG,OAAO,WAAW,CAAC,CAAC,CAAC;AAAA,MACjD,CAAC,gBAAgB,KAAK,MAAM,GAAG,OAAO,cAAc,CAAC,CAAC,CAAC;AAAA,MACvD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,sBAAsB,KAAK,MAAM,GAAG,OAAO,oBAAoB,CAAC,CAAC,CAAC;AAAA,MACnE,CAAC,iBAAiB,GAAG,SAAS,OAAO,QAAQ,IAAI,CAAC;AAAA,IAAA;AAAA,IAEpD;AAAA,EAAA;AAEJ;AAEA,SAAS,QAAQ,KAAe;AAC9B;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,UAAU,GAAG,QAAQ;AAAA,MACtB,CAAC,aAAa,GAAG,UAAU;AAAA,MAC3B,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,QAAQ,GAAG,KAAA,EAAO,MAAM;AAAA,MACzB,CAAC,yBAAyB,GAAG,sBAAsB;AAAA,MACnD,CAAC,YAAY,GAAG,UAAU;AAAA,MAC1B,CAAC,QAAQ,GAAG,MAAM;AAAA,MAClB,CAAC,WAAW,GAAG,SAAS;AAAA,MACxB,CAAC,UAAU,GAAG,OAAA,CAAQ;AAAA,IAAA;AAAA,IAExB;AAAA,EAAA;AAEJ;AAEA,eAAe,aACb,gBAIA,KACA;AACA,QAAM,UAAU,MAAM,QAAQ;AAAA,IAC5B,eAAe,IAAI,OAAO,CAAC,MAAM,KAAK,MAAM,CAAC,MAAM,MAAM,KAAK,CAAC;AAAA,EAAA;AAEjE,aAAW,CAAC,MAAM,IAAI,KAAK,SAAS;AAClC,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,WAAW,UAAU,MAAM,MAAM,CAAC,CAAC;AAAA,EAC/C;AACF;AAEA,SAAS,WACP,OACA,SACA,KACA;AACA,MAAI,MAAM,OAAO,OAAO,KAAK,CAAC;AAC9B,aAAW,CAAC,MAAM,MAAM,KAAK,SAAS;AACpC,QAAI,MAAM,OAAO,OAAO,WAAW,UAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,EAC/D;AACF;AAEA,eAAsB,mBACpB,IACA,QACA,KACA,KACA;AACA,QAAM,cAAc,KAAK,GAAG;AAC5B,MAAI,CAAC,qBAAqB,IAAI,QAAQ,aAAa,IAAI,GAAG;AACxD,SAAK,IACF,KAAK,GAAG,EACR,OAAO,oBAAoB,oCAAoC,EAC/D,KAAK,cAAc;AACtB;AAAA,EACF;AAEA,QAAM,cAAc,IAAI,QAAQ,IAAI,GAAG;AACvC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,SAAS,IAAI,QAAQ,IAAI,GAAG;AAClC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,eAAe,IAAI,QAAQ,IAAI,GAAG;AACxC,MAAI,IAAI,MAAM,MAAM;AACpB,eAAa,IAAI,QAAQ,IAAI,GAAG;AAChC,MAAI,IAAI,MAAM,MAAM;AACpB,UAAQ,IAAI,GAAG;AACf,MAAI,IAAI,IAAA;AACV;AAEA,SAAS,MAAM,GAAqB;AAClC,SAAO,EAAE,CAAC;AACZ;AAEA,SAAS,KAAK,GAAoB;AAChC,SAAO,OAAO,OAAO,CAAC,EAAE,CAAC;AAC3B;AAEA,SAAS,OAAO,MAAsB;AACpC,SAAO,OAAO,IAAI;AAAA;AACpB;"}
@@ -1,14 +1,13 @@
1
1
  import type { LogContext } from '@rocicorp/logger';
2
2
  import type { JSONObject } from '../../../../shared/src/bigint-json.ts';
3
3
  import { type JSONObject as SafeJSONObject } from '../../../../shared/src/json.ts';
4
+ import type { ErroredQuery } from '../../../../zero-protocol/src/custom-queries.ts';
4
5
  import type { Downstream } from '../../../../zero-protocol/src/down.ts';
6
+ import { type TransformFailedBody } from '../../../../zero-protocol/src/error.ts';
5
7
  import type { InspectDownBody } from '../../../../zero-protocol/src/inspect-down.ts';
6
- import { type SchemaVersions } from '../../types/schema-versions.ts';
7
8
  import { type ShardID } from '../../types/shards.ts';
8
9
  import type { Subscription } from '../../types/subscription.ts';
9
10
  import { type CVRVersion, type DelQueryPatch, type NullableCVRVersion, type PutQueryPatch, type RowID } from './schema/types.ts';
10
- import type { ErroredQuery } from '../../../../zero-protocol/src/custom-queries.ts';
11
- import { type TransformFailedBody } from '../../../../zero-protocol/src/error.ts';
12
11
  export type PutRowPatch = {
13
12
  type: 'row';
14
13
  op: 'put';
@@ -33,7 +32,7 @@ export interface PokeHandler {
33
32
  end(finalVersion: CVRVersion): Promise<void>;
34
33
  }
35
34
  /** Wraps PokeHandlers for multiple clients in a single PokeHandler. */
36
- export declare function startPoke(clients: ClientHandler[], tentativeVersion: CVRVersion, schemaVersions?: SchemaVersions): PokeHandler;
35
+ export declare function startPoke(clients: ClientHandler[], tentativeVersion: CVRVersion): PokeHandler;
37
36
  /**
38
37
  * Handles a single `ViewSyncer` connection.
39
38
  */
@@ -41,11 +40,11 @@ export declare class ClientHandler {
41
40
  #private;
42
41
  readonly clientID: string;
43
42
  readonly wsID: string;
44
- constructor(lc: LogContext, clientGroupID: string, clientID: string, wsID: string, shard: ShardID, baseCookie: string | null, schemaVersion: number | null, downstream: Subscription<Downstream>);
43
+ constructor(lc: LogContext, clientGroupID: string, clientID: string, wsID: string, shard: ShardID, baseCookie: string | null, downstream: Subscription<Downstream>);
45
44
  version(): NullableCVRVersion;
46
45
  fail(e: unknown): void;
47
46
  close(reason: string): void;
48
- startPoke(tentativeVersion: CVRVersion, schemaVersions?: SchemaVersions): PokeHandler;
47
+ startPoke(tentativeVersion: CVRVersion): PokeHandler;
49
48
  sendDeleteClients(lc: LogContext, deletedClientIDs: string[], deletedClientGroupIDs: string[]): Promise<void>;
50
49
  sendQueryTransformApplicationErrors(errors: ErroredQuery[]): void;
51
50
  sendQueryTransformFailedError(error: TransformFailedBody): void;
@@ -1 +1 @@
1
- {"version":3,"file":"client-handler.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/client-handler.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,uCAAuC,CAAC;AACtE,OAAO,EAEL,KAAK,UAAU,IAAI,cAAc,EAClC,MAAM,gCAAgC,CAAC;AAMxC,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,uCAAuC,CAAC;AACtE,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,+CAA+C,CAAC;AAgBnF,OAAO,EAEL,KAAK,cAAc,EACpB,MAAM,gCAAgC,CAAC;AACxC,OAAO,EAAiB,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAC9D,OAAO,EAKL,KAAK,UAAU,EACf,KAAK,aAAa,EAClB,KAAK,kBAAkB,EACvB,KAAK,aAAa,EAClB,KAAK,KAAK,EACX,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,iDAAiD,CAAC;AAClF,OAAO,EAEL,KAAK,mBAAmB,EACzB,MAAM,wCAAwC,CAAC;AAEhD,MAAM,MAAM,WAAW,GAAG;IACxB,IAAI,EAAE,KAAK,CAAC;IACZ,EAAE,EAAE,KAAK,CAAC;IACV,EAAE,EAAE,KAAK,CAAC;IACV,QAAQ,EAAE,UAAU,CAAC;CACtB,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC3B,IAAI,EAAE,KAAK,CAAC;IACZ,EAAE,EAAE,KAAK,CAAC;IACV,EAAE,EAAE,KAAK,CAAC;CACX,CAAC;AAEF,MAAM,MAAM,QAAQ,GAAG,WAAW,GAAG,cAAc,CAAC;AACpD,MAAM,MAAM,WAAW,GAAG,aAAa,GAAG,aAAa,CAAC;AAExD,MAAM,MAAM,KAAK,GAAG,WAAW,GAAG,QAAQ,CAAC;AAE3C,MAAM,MAAM,cAAc,GAAG;IAC3B,KAAK,EAAE,KAAK,CAAC;IACb,SAAS,EAAE,UAAU,CAAC;CACvB,CAAC;AAEF,MAAM,WAAW,WAAW;IAC1B,QAAQ,CAAC,KAAK,EAAE,cAAc,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC/C,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IACxB,GAAG,CAAC,YAAY,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CAC9C;AAQD,uEAAuE;AACvE,wBAAgB,SAAS,CACvB,OAAO,EAAE,aAAa,EAAE,EACxB,gBAAgB,EAAE,UAAU,EAC5B,cAAc,CAAC,EAAE,cAAc,GAC9B,WAAW,CAmBb;AAMD;;GAEG;AACH,qBAAa,aAAa;;IAExB,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;gBA2BpB,EAAE,EAAE,UAAU,EACd,aAAa,EAAE,MAAM,EACrB,QAAQ,EAAE,MAAM,EAChB,IAAI,EAAE,MAAM,EACZ,KAAK,EAAE,OAAO,EACd,UAAU,EAAE,MAAM,GAAG,IAAI,EACzB,aAAa,EAAE,MAAM,GAAG,IAAI,EAC5B,UAAU,EAAE,YAAY,CAAC,UAAU,CAAC;IActC,OAAO,IAAI,kBAAkB;IAS7B,IAAI,CAAC,CAAC,EAAE,OAAO;IAQf,KAAK,CAAC,MAAM,EAAE,MAAM;IAKpB,SAAS,CACP,gBAAgB,EAAE,UAAU,EAC5B,cAAc,CAAC,EAAE,cAAc,GAC9B,WAAW;IAuKR,iBAAiB,CACrB,EAAE,EAAE,UAAU,EACd,gBAAgB,EAAE,MAAM,EAAE,EAC1B,qBAAqB,EAAE,MAAM,EAAE;IAajC,mCAAmC,CAAC,MAAM,EAAE,YAAY,EAAE;IAI1D,6BAA6B,CAAC,KAAK,EAAE,mBAAmB;IAIxD,mBAAmB,CAAC,EAAE,EAAE,UAAU,EAAE,QAAQ,EAAE,eAAe,GAAG,IAAI;CA0BrE;AA0CD;;;;;;GAMG;AACH,wBAAgB,cAAc,CAAC,GAAG,EAAE,UAAU,GAAG,cAAc,CAkB9D"}
1
+ {"version":3,"file":"client-handler.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/client-handler.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,uCAAuC,CAAC;AACtE,OAAO,EAEL,KAAK,UAAU,IAAI,cAAc,EAClC,MAAM,gCAAgC,CAAC;AAIxC,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,iDAAiD,CAAC;AAGlF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,uCAAuC,CAAC;AACtE,OAAO,EAEL,KAAK,mBAAmB,EACzB,MAAM,wCAAwC,CAAC;AAChD,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,+CAA+C,CAAC;AAgBnF,OAAO,EAAiB,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAC9D,OAAO,EAKL,KAAK,UAAU,EACf,KAAK,aAAa,EAClB,KAAK,kBAAkB,EACvB,KAAK,aAAa,EAClB,KAAK,KAAK,EACX,MAAM,mBAAmB,CAAC;AAE3B,MAAM,MAAM,WAAW,GAAG;IACxB,IAAI,EAAE,KAAK,CAAC;IACZ,EAAE,EAAE,KAAK,CAAC;IACV,EAAE,EAAE,KAAK,CAAC;IACV,QAAQ,EAAE,UAAU,CAAC;CACtB,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC3B,IAAI,EAAE,KAAK,CAAC;IACZ,EAAE,EAAE,KAAK,CAAC;IACV,EAAE,EAAE,KAAK,CAAC;CACX,CAAC;AAEF,MAAM,MAAM,QAAQ,GAAG,WAAW,GAAG,cAAc,CAAC;AACpD,MAAM,MAAM,WAAW,GAAG,aAAa,GAAG,aAAa,CAAC;AAExD,MAAM,MAAM,KAAK,GAAG,WAAW,GAAG,QAAQ,CAAC;AAE3C,MAAM,MAAM,cAAc,GAAG;IAC3B,KAAK,EAAE,KAAK,CAAC;IACb,SAAS,EAAE,UAAU,CAAC;CACvB,CAAC;AAEF,MAAM,WAAW,WAAW;IAC1B,QAAQ,CAAC,KAAK,EAAE,cAAc,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC/C,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IACxB,GAAG,CAAC,YAAY,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CAC9C;AAQD,uEAAuE;AACvE,wBAAgB,SAAS,CACvB,OAAO,EAAE,aAAa,EAAE,EACxB,gBAAgB,EAAE,UAAU,GAC3B,WAAW,CAiBb;AAMD;;GAEG;AACH,qBAAa,aAAa;;IAExB,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;gBA0BpB,EAAE,EAAE,UAAU,EACd,aAAa,EAAE,MAAM,EACrB,QAAQ,EAAE,MAAM,EAChB,IAAI,EAAE,MAAM,EACZ,KAAK,EAAE,OAAO,EACd,UAAU,EAAE,MAAM,GAAG,IAAI,EACzB,UAAU,EAAE,YAAY,CAAC,UAAU,CAAC;IAatC,OAAO,IAAI,kBAAkB;IAS7B,IAAI,CAAC,CAAC,EAAE,OAAO;IAQf,KAAK,CAAC,MAAM,EAAE,MAAM;IAKpB,SAAS,CAAC,gBAAgB,EAAE,UAAU,GAAG,WAAW;IAwJ9C,iBAAiB,CACrB,EAAE,EAAE,UAAU,EACd,gBAAgB,EAAE,MAAM,EAAE,EAC1B,qBAAqB,EAAE,MAAM,EAAE;IAajC,mCAAmC,CAAC,MAAM,EAAE,YAAY,EAAE;IAI1D,6BAA6B,CAAC,KAAK,EAAE,mBAAmB;IAIxD,mBAAmB,CAAC,EAAE,EAAE,UAAU,EAAE,QAAQ,EAAE,eAAe,GAAG,IAAI;CA0BrE;AA0CD;;;;;;GAMG;AACH,wBAAgB,cAAc,CAAC,GAAG,EAAE,UAAU,GAAG,cAAc,CAkB9D"}
@@ -3,24 +3,21 @@ import { assertJSONValue } from "../../../../shared/src/json.js";
3
3
  import { promiseVoid } from "../../../../shared/src/resolved-promises.js";
4
4
  import { parse } from "../../../../shared/src/valita.js";
5
5
  import { rowSchema } from "../../../../zero-protocol/src/data.js";
6
+ import { ProtocolError } from "../../../../zero-protocol/src/error.js";
6
7
  import { primaryKeyValueRecordSchema } from "../../../../zero-protocol/src/primary-key.js";
7
8
  import { mutationResultSchema } from "../../../../zero-protocol/src/push.js";
8
9
  import { getOrCreateHistogram, getOrCreateCounter } from "../../observability/metrics.js";
9
10
  import { getLogLevel, wrapWithProtocolError } from "../../types/error-with-level.js";
10
- import { getProtocolErrorIfSchemaVersionNotSupported } from "../../types/schema-versions.js";
11
11
  import { upstreamSchema } from "../../types/shards.js";
12
12
  import { cookieToVersion, versionToCookie, cmpVersions, versionToNullableCookie } from "./schema/types.js";
13
- import { ProtocolError } from "../../../../zero-protocol/src/error.js";
14
13
  import { object, number, string } from "@badrap/valita";
15
14
  const NOOP = {
16
15
  addPatch: () => promiseVoid,
17
16
  cancel: () => promiseVoid,
18
17
  end: () => promiseVoid
19
18
  };
20
- function startPoke(clients, tentativeVersion, schemaVersions) {
21
- const pokers = clients.map(
22
- (c) => c.startPoke(tentativeVersion, schemaVersions)
23
- );
19
+ function startPoke(clients, tentativeVersion) {
20
+ const pokers = clients.map((c) => c.startPoke(tentativeVersion));
24
21
  return {
25
22
  addPatch: async (patch) => {
26
23
  await Promise.allSettled(pokers.map((poker) => poker.addPatch(patch)));
@@ -43,7 +40,6 @@ class ClientHandler {
43
40
  #lc;
44
41
  #downstream;
45
42
  #baseVersion;
46
- #schemaVersion;
47
43
  #pokeTime = getOrCreateHistogram("sync", "poke.time", {
48
44
  description: "Time elapsed for each poke transaction. Canceled / noop pokes are excluded.",
49
45
  unit: "s"
@@ -58,7 +54,7 @@ class ClientHandler {
58
54
  "poke.rows",
59
55
  "Count of poked rows."
60
56
  );
61
- constructor(lc, clientGroupID, clientID, wsID, shard, baseCookie, schemaVersion, downstream) {
57
+ constructor(lc, clientGroupID, clientID, wsID, shard, baseCookie, downstream) {
62
58
  lc.debug?.("new client handler");
63
59
  this.#clientGroupID = clientGroupID;
64
60
  this.clientID = clientID;
@@ -68,7 +64,6 @@ class ClientHandler {
68
64
  this.#lc = lc;
69
65
  this.#downstream = downstream;
70
66
  this.#baseVersion = cookieToVersion(baseCookie);
71
- this.#schemaVersion = schemaVersion;
72
67
  }
73
68
  version() {
74
69
  return this.#baseVersion;
@@ -88,19 +83,9 @@ class ClientHandler {
88
83
  this.#lc.debug?.(`view-syncer closing connection: ${reason}`);
89
84
  this.#downstream.cancel();
90
85
  }
91
- startPoke(tentativeVersion, schemaVersions) {
86
+ startPoke(tentativeVersion) {
92
87
  const pokeID = versionToCookie(tentativeVersion);
93
88
  const lc = this.#lc.withContext("pokeID", pokeID);
94
- if (schemaVersions && this.#schemaVersion) {
95
- const schemaVersionError = getProtocolErrorIfSchemaVersionNotSupported(
96
- this.#schemaVersion,
97
- schemaVersions
98
- );
99
- if (schemaVersionError) {
100
- this.fail(schemaVersionError);
101
- return NOOP;
102
- }
103
- }
104
89
  if (cmpVersions(this.#baseVersion, tentativeVersion) >= 0) {
105
90
  lc.info?.(`already caught up, not sending poke.`);
106
91
  return NOOP;
@@ -110,9 +95,6 @@ class ClientHandler {
110
95
  lc.info?.(`starting poke from ${baseCookie} to ${cookie}`);
111
96
  const start = performance.now();
112
97
  const pokeStart = { pokeID, baseCookie };
113
- if (schemaVersions) {
114
- pokeStart.schemaVersions = schemaVersions;
115
- }
116
98
  let pokeStarted = false;
117
99
  let body;
118
100
  let partCount = 0;