@rocicorp/zero 1.2.0 → 1.3.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (303) hide show
  1. package/out/analyze-query/src/bin-analyze.js +25 -25
  2. package/out/analyze-query/src/bin-analyze.js.map +1 -1
  3. package/out/ast-to-zql/src/ast-to-zql.d.ts.map +1 -1
  4. package/out/ast-to-zql/src/ast-to-zql.js +2 -1
  5. package/out/ast-to-zql/src/ast-to-zql.js.map +1 -1
  6. package/out/replicache/src/btree/node.d.ts.map +1 -1
  7. package/out/replicache/src/btree/node.js +2 -2
  8. package/out/replicache/src/btree/node.js.map +1 -1
  9. package/out/replicache/src/connection-loop.js +3 -3
  10. package/out/replicache/src/connection-loop.js.map +1 -1
  11. package/out/replicache/src/deleted-clients.d.ts +0 -4
  12. package/out/replicache/src/deleted-clients.d.ts.map +1 -1
  13. package/out/replicache/src/deleted-clients.js +1 -1
  14. package/out/replicache/src/deleted-clients.js.map +1 -1
  15. package/out/replicache/src/hash.d.ts.map +1 -1
  16. package/out/replicache/src/hash.js.map +1 -1
  17. package/out/replicache/src/process-scheduler.d.ts.map +1 -1
  18. package/out/replicache/src/process-scheduler.js.map +1 -1
  19. package/out/replicache/src/request-idle.js +1 -1
  20. package/out/replicache/src/request-idle.js.map +1 -1
  21. package/out/replicache/src/sync/patch.d.ts +1 -1
  22. package/out/replicache/src/sync/patch.d.ts.map +1 -1
  23. package/out/replicache/src/sync/patch.js +1 -1
  24. package/out/replicache/src/sync/patch.js.map +1 -1
  25. package/out/shared/src/arrays.d.ts.map +1 -1
  26. package/out/shared/src/arrays.js +1 -2
  27. package/out/shared/src/arrays.js.map +1 -1
  28. package/out/shared/src/bigint-json.js +1 -1
  29. package/out/shared/src/bigint-json.js.map +1 -1
  30. package/out/shared/src/btree-set.js +1 -1
  31. package/out/shared/src/btree-set.js.map +1 -1
  32. package/out/shared/src/iterables.d.ts +7 -0
  33. package/out/shared/src/iterables.d.ts.map +1 -1
  34. package/out/shared/src/iterables.js +10 -1
  35. package/out/shared/src/iterables.js.map +1 -1
  36. package/out/shared/src/logging.d.ts.map +1 -1
  37. package/out/shared/src/logging.js +10 -9
  38. package/out/shared/src/logging.js.map +1 -1
  39. package/out/shared/src/options.js +1 -1
  40. package/out/shared/src/options.js.map +1 -1
  41. package/out/shared/src/sorted-entries.d.ts +2 -0
  42. package/out/shared/src/sorted-entries.d.ts.map +1 -0
  43. package/out/shared/src/sorted-entries.js +9 -0
  44. package/out/shared/src/sorted-entries.js.map +1 -0
  45. package/out/shared/src/tdigest-schema.d.ts.map +1 -1
  46. package/out/shared/src/tdigest-schema.js.map +1 -1
  47. package/out/shared/src/tdigest.d.ts.map +1 -1
  48. package/out/shared/src/tdigest.js +7 -7
  49. package/out/shared/src/tdigest.js.map +1 -1
  50. package/out/shared/src/valita.d.ts.map +1 -1
  51. package/out/shared/src/valita.js +1 -1
  52. package/out/shared/src/valita.js.map +1 -1
  53. package/out/z2s/src/sql.d.ts +2 -2
  54. package/out/z2s/src/sql.d.ts.map +1 -1
  55. package/out/z2s/src/sql.js +3 -3
  56. package/out/z2s/src/sql.js.map +1 -1
  57. package/out/zero/package.js +6 -7
  58. package/out/zero/package.js.map +1 -1
  59. package/out/zero/src/pg.js +1 -1
  60. package/out/zero/src/server.js +1 -1
  61. package/out/zero-cache/src/auth/auth.d.ts +8 -26
  62. package/out/zero-cache/src/auth/auth.d.ts.map +1 -1
  63. package/out/zero-cache/src/auth/auth.js +57 -82
  64. package/out/zero-cache/src/auth/auth.js.map +1 -1
  65. package/out/zero-cache/src/auth/jwt.d.ts +3 -3
  66. package/out/zero-cache/src/auth/jwt.d.ts.map +1 -1
  67. package/out/zero-cache/src/auth/jwt.js.map +1 -1
  68. package/out/zero-cache/src/auth/load-permissions.js +1 -1
  69. package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
  70. package/out/zero-cache/src/config/zero-config.d.ts +38 -2
  71. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  72. package/out/zero-cache/src/config/zero-config.js +56 -1
  73. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  74. package/out/zero-cache/src/custom/fetch.d.ts +2 -9
  75. package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
  76. package/out/zero-cache/src/custom/fetch.js +11 -4
  77. package/out/zero-cache/src/custom/fetch.js.map +1 -1
  78. package/out/zero-cache/src/custom-queries/transform-query.d.ts +20 -9
  79. package/out/zero-cache/src/custom-queries/transform-query.d.ts.map +1 -1
  80. package/out/zero-cache/src/custom-queries/transform-query.js +74 -37
  81. package/out/zero-cache/src/custom-queries/transform-query.js.map +1 -1
  82. package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
  83. package/out/zero-cache/src/db/migration-lite.js +1 -1
  84. package/out/zero-cache/src/db/migration-lite.js.map +1 -1
  85. package/out/zero-cache/src/db/migration.d.ts.map +1 -1
  86. package/out/zero-cache/src/db/migration.js +1 -1
  87. package/out/zero-cache/src/db/migration.js.map +1 -1
  88. package/out/zero-cache/src/db/pg-copy-binary.d.ts +101 -0
  89. package/out/zero-cache/src/db/pg-copy-binary.d.ts.map +1 -0
  90. package/out/zero-cache/src/db/pg-copy-binary.js +381 -0
  91. package/out/zero-cache/src/db/pg-copy-binary.js.map +1 -0
  92. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  93. package/out/zero-cache/src/db/transaction-pool.js +3 -0
  94. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  95. package/out/zero-cache/src/db/warmup.d.ts.map +1 -1
  96. package/out/zero-cache/src/db/warmup.js +3 -1
  97. package/out/zero-cache/src/db/warmup.js.map +1 -1
  98. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  99. package/out/zero-cache/src/server/anonymous-otel-start.js +2 -1
  100. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  101. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  102. package/out/zero-cache/src/server/change-streamer.js +5 -2
  103. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  104. package/out/zero-cache/src/server/inspector-delegate.d.ts +2 -2
  105. package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
  106. package/out/zero-cache/src/server/inspector-delegate.js +4 -4
  107. package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
  108. package/out/zero-cache/src/server/main.js +1 -1
  109. package/out/zero-cache/src/server/main.js.map +1 -1
  110. package/out/zero-cache/src/server/reaper.d.ts.map +1 -1
  111. package/out/zero-cache/src/server/reaper.js +4 -1
  112. package/out/zero-cache/src/server/reaper.js.map +1 -1
  113. package/out/zero-cache/src/server/runner/run-worker.js +1 -1
  114. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  115. package/out/zero-cache/src/server/syncer.js +41 -20
  116. package/out/zero-cache/src/server/syncer.js.map +1 -1
  117. package/out/zero-cache/src/server/worker-urls.d.ts.map +1 -1
  118. package/out/zero-cache/src/server/worker-urls.js +2 -1
  119. package/out/zero-cache/src/server/worker-urls.js.map +1 -1
  120. package/out/zero-cache/src/services/change-source/change-source.d.ts +4 -0
  121. package/out/zero-cache/src/services/change-source/change-source.d.ts.map +1 -1
  122. package/out/zero-cache/src/services/change-source/common/backfill-manager.d.ts.map +1 -1
  123. package/out/zero-cache/src/services/change-source/common/backfill-manager.js +3 -2
  124. package/out/zero-cache/src/services/change-source/common/backfill-manager.js.map +1 -1
  125. package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
  126. package/out/zero-cache/src/services/change-source/custom/change-source.js +5 -2
  127. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  128. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  129. package/out/zero-cache/src/services/change-source/pg/change-source.js +13 -4
  130. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  131. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +3 -1
  132. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  133. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +91 -9
  134. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  135. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +2 -2
  136. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  137. package/out/zero-cache/src/services/change-streamer/broadcast.js +1 -1
  138. package/out/zero-cache/src/services/change-streamer/broadcast.js.map +1 -1
  139. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +3 -0
  140. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  141. package/out/zero-cache/src/services/life-cycle.d.ts +5 -4
  142. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  143. package/out/zero-cache/src/services/life-cycle.js +11 -11
  144. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  145. package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
  146. package/out/zero-cache/src/services/litestream/commands.js +5 -5
  147. package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
  148. package/out/zero-cache/src/services/mutagen/pusher.d.ts +20 -20
  149. package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
  150. package/out/zero-cache/src/services/mutagen/pusher.js +91 -104
  151. package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
  152. package/out/zero-cache/src/services/replicator/change-processor.js +1 -1
  153. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  154. package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
  155. package/out/zero-cache/src/services/view-syncer/client-schema.d.ts.map +1 -1
  156. package/out/zero-cache/src/services/view-syncer/client-schema.js +4 -3
  157. package/out/zero-cache/src/services/view-syncer/client-schema.js.map +1 -1
  158. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts +168 -0
  159. package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts.map +1 -0
  160. package/out/zero-cache/src/services/view-syncer/connection-context-manager.js +385 -0
  161. package/out/zero-cache/src/services/view-syncer/connection-context-manager.js.map +1 -0
  162. package/out/zero-cache/src/services/view-syncer/cvr-store.js +2 -2
  163. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  164. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  165. package/out/zero-cache/src/services/view-syncer/cvr.js +5 -4
  166. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  167. package/out/zero-cache/src/services/view-syncer/inspect-handler.d.ts +2 -3
  168. package/out/zero-cache/src/services/view-syncer/inspect-handler.d.ts.map +1 -1
  169. package/out/zero-cache/src/services/view-syncer/inspect-handler.js +3 -3
  170. package/out/zero-cache/src/services/view-syncer/inspect-handler.js.map +1 -1
  171. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  172. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +5 -3
  173. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  174. package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
  175. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +13 -7
  176. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  177. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +3 -1
  178. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  179. package/out/zero-cache/src/services/view-syncer/snapshotter.js +6 -9
  180. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  181. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +24 -26
  182. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  183. package/out/zero-cache/src/services/view-syncer/view-syncer.js +236 -124
  184. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  185. package/out/zero-cache/src/types/lite.d.ts.map +1 -1
  186. package/out/zero-cache/src/types/lite.js +3 -2
  187. package/out/zero-cache/src/types/lite.js.map +1 -1
  188. package/out/zero-cache/src/types/pg-types.js +4 -1
  189. package/out/zero-cache/src/types/pg-types.js.map +1 -1
  190. package/out/zero-cache/src/types/pg-versions.d.ts +3 -0
  191. package/out/zero-cache/src/types/pg-versions.d.ts.map +1 -0
  192. package/out/zero-cache/src/types/pg-versions.js +7 -0
  193. package/out/zero-cache/src/types/pg-versions.js.map +1 -0
  194. package/out/zero-cache/src/types/pg.d.ts.map +1 -1
  195. package/out/zero-cache/src/types/pg.js +6 -1
  196. package/out/zero-cache/src/types/pg.js.map +1 -1
  197. package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
  198. package/out/zero-cache/src/types/subscription.js +2 -2
  199. package/out/zero-cache/src/types/subscription.js.map +1 -1
  200. package/out/zero-cache/src/workers/connect-params.d.ts +1 -1
  201. package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
  202. package/out/zero-cache/src/workers/connect-params.js +1 -1
  203. package/out/zero-cache/src/workers/connect-params.js.map +1 -1
  204. package/out/zero-cache/src/workers/connection.js +2 -2
  205. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts +2 -1
  206. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  207. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +64 -38
  208. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  209. package/out/zero-cache/src/workers/syncer.d.ts +2 -1
  210. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  211. package/out/zero-cache/src/workers/syncer.js +70 -31
  212. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  213. package/out/zero-client/src/client/connection.d.ts +4 -4
  214. package/out/zero-client/src/client/connection.d.ts.map +1 -1
  215. package/out/zero-client/src/client/connection.js.map +1 -1
  216. package/out/zero-client/src/client/http-string.d.ts.map +1 -1
  217. package/out/zero-client/src/client/http-string.js.map +1 -1
  218. package/out/zero-client/src/client/metrics.d.ts.map +1 -1
  219. package/out/zero-client/src/client/metrics.js +2 -1
  220. package/out/zero-client/src/client/metrics.js.map +1 -1
  221. package/out/zero-client/src/client/options.d.ts +34 -5
  222. package/out/zero-client/src/client/options.d.ts.map +1 -1
  223. package/out/zero-client/src/client/options.js.map +1 -1
  224. package/out/zero-client/src/client/server-option.js +1 -1
  225. package/out/zero-client/src/client/server-option.js.map +1 -1
  226. package/out/zero-client/src/client/version.js +1 -1
  227. package/out/zero-client/src/client/zero-poke-handler.d.ts.map +1 -1
  228. package/out/zero-client/src/client/zero-poke-handler.js +1 -1
  229. package/out/zero-client/src/client/zero-poke-handler.js.map +1 -1
  230. package/out/zero-client/src/client/zero.d.ts +4 -3
  231. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  232. package/out/zero-client/src/client/zero.js +33 -11
  233. package/out/zero-client/src/client/zero.js.map +1 -1
  234. package/out/zero-pg/src/mod.js +1 -1
  235. package/out/zero-protocol/src/ast.d.ts.map +1 -1
  236. package/out/zero-protocol/src/ast.js.map +1 -1
  237. package/out/zero-protocol/src/change-desired-queries.d.ts +4 -0
  238. package/out/zero-protocol/src/change-desired-queries.d.ts.map +1 -1
  239. package/out/zero-protocol/src/change-desired-queries.js +4 -1
  240. package/out/zero-protocol/src/change-desired-queries.js.map +1 -1
  241. package/out/zero-protocol/src/connect.d.ts +4 -0
  242. package/out/zero-protocol/src/connect.d.ts.map +1 -1
  243. package/out/zero-protocol/src/connect.js +2 -1
  244. package/out/zero-protocol/src/connect.js.map +1 -1
  245. package/out/zero-protocol/src/primary-key.d.ts.map +1 -1
  246. package/out/zero-protocol/src/primary-key.js.map +1 -1
  247. package/out/zero-protocol/src/protocol-version.d.ts +1 -1
  248. package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
  249. package/out/zero-protocol/src/protocol-version.js.map +1 -1
  250. package/out/zero-protocol/src/push.d.ts +4 -0
  251. package/out/zero-protocol/src/push.d.ts.map +1 -1
  252. package/out/zero-protocol/src/push.js +2 -1
  253. package/out/zero-protocol/src/push.js.map +1 -1
  254. package/out/zero-protocol/src/up.d.ts +3 -0
  255. package/out/zero-protocol/src/up.d.ts.map +1 -1
  256. package/out/zero-react/src/zero-provider.d.ts.map +1 -1
  257. package/out/zero-react/src/zero-provider.js +11 -5
  258. package/out/zero-react/src/zero-provider.js.map +1 -1
  259. package/out/zero-schema/src/name-mapper.js +1 -1
  260. package/out/zero-schema/src/name-mapper.js.map +1 -1
  261. package/out/zero-server/src/mod.js +1 -1
  262. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  263. package/out/zero-server/src/process-mutations.js +2 -1
  264. package/out/zero-server/src/process-mutations.js.map +1 -1
  265. package/out/zero-server/src/push-processor.d.ts +1 -0
  266. package/out/zero-server/src/push-processor.d.ts.map +1 -1
  267. package/out/zero-server/src/push-processor.js +3 -2
  268. package/out/zero-server/src/push-processor.js.map +1 -1
  269. package/out/zero-solid/src/use-zero.d.ts.map +1 -1
  270. package/out/zero-solid/src/use-zero.js +8 -9
  271. package/out/zero-solid/src/use-zero.js.map +1 -1
  272. package/out/zql/src/builder/like.js +2 -1
  273. package/out/zql/src/builder/like.js.map +1 -1
  274. package/out/zql/src/ivm/data.d.ts.map +1 -1
  275. package/out/zql/src/ivm/data.js +6 -15
  276. package/out/zql/src/ivm/data.js.map +1 -1
  277. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  278. package/out/zql/src/ivm/memory-source.js +14 -8
  279. package/out/zql/src/ivm/memory-source.js.map +1 -1
  280. package/out/zql/src/query/complete-ordering.js +1 -1
  281. package/out/zql/src/query/complete-ordering.js.map +1 -1
  282. package/out/zql/src/query/query-impl.d.ts.map +1 -1
  283. package/out/zql/src/query/query-impl.js +2 -2
  284. package/out/zql/src/query/query-impl.js.map +1 -1
  285. package/out/zql/src/query/query-registry.d.ts.map +1 -1
  286. package/out/zql/src/query/query-registry.js +2 -1
  287. package/out/zql/src/query/query-registry.js.map +1 -1
  288. package/out/zql/src/query/ttl.js +1 -1
  289. package/out/zql/src/query/ttl.js.map +1 -1
  290. package/out/zqlite/src/internal/sql.d.ts +2 -2
  291. package/out/zqlite/src/internal/sql.d.ts.map +1 -1
  292. package/out/zqlite/src/internal/sql.js +1 -2
  293. package/out/zqlite/src/internal/sql.js.map +1 -1
  294. package/out/zqlite/src/sqlite-cost-model.d.ts +1 -1
  295. package/out/zqlite/src/sqlite-cost-model.d.ts.map +1 -1
  296. package/out/zqlite/src/sqlite-cost-model.js +1 -1
  297. package/out/zqlite/src/sqlite-cost-model.js.map +1 -1
  298. package/out/zqlite/src/sqlite-stat-fanout.js +1 -1
  299. package/out/zqlite/src/sqlite-stat-fanout.js.map +1 -1
  300. package/out/zqlite/src/table-source.d.ts.map +1 -1
  301. package/out/zqlite/src/table-source.js +8 -12
  302. package/out/zqlite/src/table-source.js.map +1 -1
  303. package/package.json +6 -7
@@ -0,0 +1,381 @@
1
+ import { stringify } from "../../../shared/src/bigint-json.js";
2
+ import { BPCHAR, DATE, JSONB, NUMERIC, TIME, TIMESTAMP, TIMESTAMPTZ, TIMETZ, UUID, VARCHAR } from "../types/pg-types.js";
3
+ //#region ../zero-cache/src/db/pg-copy-binary.ts
4
+ var PGCOPY_SIGNATURE = Buffer.from([
5
+ 80,
6
+ 71,
7
+ 67,
8
+ 79,
9
+ 80,
10
+ 89,
11
+ 10,
12
+ 255,
13
+ 13,
14
+ 10,
15
+ 0
16
+ ]);
17
+ var HEADER_MIN_SIZE = 19;
18
+ var PG_EPOCH_UNIX_MILLIS = 9466848e5;
19
+ var PG_EPOCH_UNIX_DAYS = 10957;
20
+ var MS_PER_DAY = 864e5;
21
+ var PG_TIMESTAMP_INF_HI = 2147483647;
22
+ var PG_TIMESTAMP_INF_LO = 4294967295;
23
+ var PG_TIMESTAMP_NEG_INF_HI = -2147483648;
24
+ var PG_TIMESTAMP_NEG_INF_LO = 0;
25
+ var PG_DATE_INFINITY = 2147483647;
26
+ var PG_DATE_NEG_INFINITY = -2147483648;
27
+ /**
28
+ * Streaming parser for PostgreSQL `COPY ... TO STDOUT WITH (FORMAT binary)`.
29
+ *
30
+ * Analogous to {@link import('./pg-copy.ts').TsvParser} but for binary format.
31
+ * Yields `Buffer | null` per field (null = SQL NULL).
32
+ *
33
+ * The caller tracks column position the same way as with TsvParser.
34
+ */
35
+ var BinaryCopyParser = class {
36
+ #buffer = Buffer.alloc(0);
37
+ #offset = 0;
38
+ #headerParsed = false;
39
+ #fieldsRemaining = 0;
40
+ *parse(chunk) {
41
+ this.#append(chunk);
42
+ if (!this.#headerParsed) {
43
+ if (!this.#tryParseHeader()) return;
44
+ }
45
+ for (;;) {
46
+ if (this.#fieldsRemaining === 0) {
47
+ if (this.#remaining() < 2) break;
48
+ const fieldCount = this.#buffer.readInt16BE(this.#offset);
49
+ if (fieldCount === -1) break;
50
+ this.#offset += 2;
51
+ this.#fieldsRemaining = fieldCount;
52
+ }
53
+ while (this.#fieldsRemaining > 0) {
54
+ if (this.#remaining() < 4) {
55
+ this.#compact();
56
+ return;
57
+ }
58
+ const fieldLen = this.#buffer.readInt32BE(this.#offset);
59
+ this.#offset += 4;
60
+ if (fieldLen === -1) yield null;
61
+ else {
62
+ if (this.#remaining() < fieldLen) {
63
+ this.#offset -= 4;
64
+ this.#compact();
65
+ return;
66
+ }
67
+ yield this.#buffer.subarray(this.#offset, this.#offset + fieldLen);
68
+ this.#offset += fieldLen;
69
+ }
70
+ this.#fieldsRemaining--;
71
+ }
72
+ }
73
+ this.#compact();
74
+ }
75
+ #remaining() {
76
+ return this.#buffer.length - this.#offset;
77
+ }
78
+ #append(chunk) {
79
+ if (this.#buffer.length === this.#offset) {
80
+ this.#buffer = chunk;
81
+ this.#offset = 0;
82
+ } else {
83
+ this.#buffer = Buffer.concat([this.#buffer.subarray(this.#offset), chunk]);
84
+ this.#offset = 0;
85
+ }
86
+ }
87
+ #compact() {
88
+ if (this.#offset > 0) {
89
+ this.#buffer = this.#buffer.subarray(this.#offset);
90
+ this.#offset = 0;
91
+ }
92
+ }
93
+ #tryParseHeader() {
94
+ if (this.#remaining() < HEADER_MIN_SIZE) return false;
95
+ for (let i = 0; i < PGCOPY_SIGNATURE.length; i++) if (this.#buffer[this.#offset + i] !== PGCOPY_SIGNATURE[i]) throw new Error("Invalid PGCOPY binary signature");
96
+ this.#offset += 11;
97
+ const flags = this.#buffer.readInt32BE(this.#offset);
98
+ this.#offset += 4;
99
+ if (flags !== 0) throw new Error(`Unsupported PGCOPY flags: ${flags}`);
100
+ const extensionLen = this.#buffer.readInt32BE(this.#offset);
101
+ this.#offset += 4;
102
+ if (extensionLen > 0) {
103
+ if (this.#remaining() < extensionLen) {
104
+ this.#offset -= HEADER_MIN_SIZE;
105
+ return false;
106
+ }
107
+ this.#offset += extensionLen;
108
+ }
109
+ this.#headerParsed = true;
110
+ return true;
111
+ }
112
+ };
113
+ var KNOWN_BINARY_OIDS = new Set([
114
+ 16,
115
+ 21,
116
+ 23,
117
+ 20,
118
+ 700,
119
+ 701,
120
+ 25,
121
+ VARCHAR,
122
+ BPCHAR,
123
+ 18,
124
+ UUID,
125
+ 17,
126
+ 114,
127
+ JSONB,
128
+ TIMESTAMP,
129
+ TIMESTAMPTZ,
130
+ DATE,
131
+ TIME,
132
+ TIMETZ,
133
+ NUMERIC
134
+ ]);
135
+ /**
136
+ * Returns true if the column's binary format is known and can be decoded
137
+ * natively. For columns where this returns false, the COPY SELECT should
138
+ * cast the column to `::text` so PG sends the text representation inside
139
+ * the binary frame.
140
+ */
141
+ function hasBinaryDecoder(spec) {
142
+ if (spec.elemPgTypeClass !== null && spec.elemPgTypeClass !== void 0) return true;
143
+ if (spec.pgTypeClass === "e") return true;
144
+ return KNOWN_BINARY_OIDS.has(spec.typeOID);
145
+ }
146
+ /** Decoder for columns cast to `::text` in the COPY SELECT. */
147
+ var textCastDecoder = (buf) => buf.toString("utf8");
148
+ /**
149
+ * Creates a specialized binary decoder for the given column spec.
150
+ * The returned function converts a raw COPY binary field `Buffer`
151
+ * directly to a `LiteValueType`, bypassing text parsing entirely.
152
+ *
153
+ * Only call this for columns where {@link hasBinaryDecoder} returns true.
154
+ * For other columns, cast to `::text` in the SELECT and use
155
+ * {@link textCastDecoder}.
156
+ */
157
+ function makeBinaryDecoder(spec) {
158
+ const { typeOID, pgTypeClass, elemPgTypeClass } = spec;
159
+ if (elemPgTypeClass !== null && elemPgTypeClass !== void 0) return (buf) => decodeArray(buf);
160
+ if (pgTypeClass === "e") return (buf) => buf.toString("utf8");
161
+ switch (typeOID) {
162
+ case 16: return (buf) => buf[0] ? 1 : 0;
163
+ case 21: return (buf) => buf.readInt16BE(0);
164
+ case 23: return (buf) => buf.readInt32BE(0);
165
+ case 20: return (buf) => buf.readBigInt64BE(0);
166
+ case 700: return (buf) => buf.readFloatBE(0);
167
+ case 701: return (buf) => buf.readDoubleBE(0);
168
+ case 25:
169
+ case VARCHAR:
170
+ case BPCHAR:
171
+ case 18: return (buf) => buf.toString("utf8");
172
+ case UUID: return (buf) => decodeUUID(buf);
173
+ case 17: return (buf) => Uint8Array.prototype.slice.call(buf);
174
+ case 114: return (buf) => buf.toString("utf8");
175
+ case JSONB: return (buf) => buf.toString("utf8", 1);
176
+ case TIMESTAMP:
177
+ case TIMESTAMPTZ: return (buf) => decodeTimestamp(buf);
178
+ case DATE: return (buf) => decodeDate(buf);
179
+ case TIME: return (buf) => decodeTime(buf);
180
+ case TIMETZ: return (buf) => decodeTimeTZ(buf);
181
+ case NUMERIC: return (buf) => decodeNumeric(buf);
182
+ default: throw new Error(`No binary decoder for type OID ${typeOID}. Use hasBinaryDecoder() to check before calling makeBinaryDecoder().`);
183
+ }
184
+ }
185
+ /**
186
+ * UUID: 16 bytes → "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
187
+ */
188
+ function decodeUUID(buf) {
189
+ const hex = buf.toString("hex");
190
+ return hex.substring(0, 8) + "-" + hex.substring(8, 12) + "-" + hex.substring(12, 16) + "-" + hex.substring(16, 20) + "-" + hex.substring(20, 32);
191
+ }
192
+ /**
193
+ * TIMESTAMP / TIMESTAMPTZ: int64 microseconds since PG epoch (2000-01-01 UTC)
194
+ * → floating-point milliseconds since Unix epoch.
195
+ *
196
+ * Matches the output of `timestampToFpMillis()` in `types/pg.ts`.
197
+ *
198
+ * Uses Number arithmetic (avoiding BigInt) for speed. The microsecond value
199
+ * fits safely in a Number for all practical dates (up to ~year 285,000).
200
+ */
201
+ function decodeTimestamp(buf) {
202
+ const hi = buf.readInt32BE(0);
203
+ const lo = buf.readUInt32BE(4);
204
+ if (hi === PG_TIMESTAMP_INF_HI && lo === PG_TIMESTAMP_INF_LO) return Infinity;
205
+ if (hi === PG_TIMESTAMP_NEG_INF_HI && lo === PG_TIMESTAMP_NEG_INF_LO) return -Infinity;
206
+ return (hi * 4294967296 + lo) / 1e3 + PG_EPOCH_UNIX_MILLIS;
207
+ }
208
+ /**
209
+ * DATE: int32 days since PG epoch (2000-01-01) → millis since Unix epoch at
210
+ * UTC midnight. Matches `dateToUTCMidnight()` in `types/pg.ts`.
211
+ */
212
+ function decodeDate(buf) {
213
+ const pgDays = buf.readInt32BE(0);
214
+ if (pgDays === PG_DATE_INFINITY) return Infinity;
215
+ if (pgDays === PG_DATE_NEG_INFINITY) return -Infinity;
216
+ return (pgDays + PG_EPOCH_UNIX_DAYS) * MS_PER_DAY;
217
+ }
218
+ /**
219
+ * TIME: int64 microseconds since midnight → milliseconds since midnight.
220
+ * Matches `postgresTimeToMilliseconds()` in `types/pg.ts`.
221
+ *
222
+ * Max value is 86,400,000,000 (~8.6e10), well within Number.MAX_SAFE_INTEGER.
223
+ */
224
+ function decodeTime(buf) {
225
+ const hi = buf.readInt32BE(0);
226
+ const lo = buf.readUInt32BE(4);
227
+ const micros = hi * 4294967296 + lo;
228
+ return Math.trunc(micros / 1e3);
229
+ }
230
+ /**
231
+ * TIMETZ: int64 microseconds since midnight + int32 timezone offset in seconds.
232
+ * PG stores the offset with inverted sign from ISO (POSIX convention):
233
+ * positive = west of UTC, negative = east of UTC.
234
+ * UTC = local_time + pg_offset.
235
+ * → UTC milliseconds since midnight.
236
+ *
237
+ * Max value ~1.3e11 microseconds, well within Number.MAX_SAFE_INTEGER.
238
+ */
239
+ function decodeTimeTZ(buf) {
240
+ const hi = buf.readInt32BE(0);
241
+ const lo = buf.readUInt32BE(4);
242
+ const utcMicros = hi * 4294967296 + lo + buf.readInt32BE(8) * 1e6;
243
+ let ms = Math.trunc(utcMicros / 1e3);
244
+ if (ms < 0 || ms >= MS_PER_DAY) ms = (ms % MS_PER_DAY + MS_PER_DAY) % MS_PER_DAY;
245
+ return ms;
246
+ }
247
+ var NUMERIC_NEG = 16384;
248
+ var NUMERIC_NAN = 49152;
249
+ var NUMERIC_PINF = 53248;
250
+ var NUMERIC_NINF = 61440;
251
+ var NBASE = 1e4;
252
+ /**
253
+ * NUMERIC: variable-length binary format.
254
+ * Header: {ndigits: int16, weight: int16, sign: int16, dscale: int16}
255
+ * Followed by ndigits x int16 base-10000 digits.
256
+ *
257
+ * Converts to a JS `number` (matching the text path's `Number(x)` behavior).
258
+ */
259
+ function decodeNumeric(buf) {
260
+ const ndigits = buf.readInt16BE(0);
261
+ const weight = buf.readInt16BE(2);
262
+ const sign = buf.readUInt16BE(4);
263
+ if (sign === NUMERIC_NAN) return NaN;
264
+ if (sign === NUMERIC_PINF) return Infinity;
265
+ if (sign === NUMERIC_NINF) return -Infinity;
266
+ if (ndigits === 0) return 0;
267
+ if (ndigits > 3) return decodeNumericViaString(buf, ndigits, weight, sign);
268
+ let intVal = 0;
269
+ for (let i = 0; i < ndigits; i++) intVal = intVal * NBASE + buf.readInt16BE(8 + i * 2);
270
+ const shift = ndigits - weight - 1;
271
+ let result;
272
+ if (shift > 0) result = intVal / NBASE ** shift;
273
+ else if (shift < 0) result = intVal * NBASE ** -shift;
274
+ else result = intVal;
275
+ return sign === NUMERIC_NEG ? -result : result;
276
+ }
277
+ /**
278
+ * Fallback for numerics with many base-10000 digits where accumulating
279
+ * into an integer would exceed MAX_SAFE_INTEGER. Builds the decimal
280
+ * string and uses Number() to match the text path exactly.
281
+ */
282
+ function decodeNumericViaString(buf, ndigits, weight, sign) {
283
+ const intGroups = weight + 1;
284
+ let str = "";
285
+ for (let i = 0; i < ndigits; i++) {
286
+ const digit = buf.readInt16BE(8 + i * 2);
287
+ if (i === intGroups) {
288
+ str = str || "0";
289
+ str += ".";
290
+ }
291
+ str += i === 0 ? String(digit) : String(digit).padStart(4, "0");
292
+ }
293
+ if (intGroups > ndigits) str += "0".repeat((intGroups - ndigits) * 4);
294
+ return Number((sign === NUMERIC_NEG ? "-" : "") + str);
295
+ }
296
+ /**
297
+ * Array: binary format.
298
+ *
299
+ * Header:
300
+ * int32 ndim — number of dimensions (0 for empty array)
301
+ * int32 flags — 0 or 1 (has-nulls)
302
+ * int32 elem_oid — OID of element type
303
+ * Per dimension:
304
+ * int32 dim_size — number of elements in this dimension
305
+ * int32 dim_lb — lower bound (usually 1)
306
+ *
307
+ * Then for each element (in row-major order):
308
+ * int32 length — -1 for NULL, otherwise byte length
309
+ * bytes — element data
310
+ *
311
+ * Result is JSON.stringify'd for storage in SQLite (matching text path behavior).
312
+ */
313
+ function decodeArray(buf) {
314
+ let offset = 0;
315
+ const ndim = buf.readInt32BE(offset);
316
+ offset += 4;
317
+ offset += 4;
318
+ const elemOid = buf.readInt32BE(offset);
319
+ offset += 4;
320
+ if (ndim === 0) return "[]";
321
+ const dims = [];
322
+ for (let d = 0; d < ndim; d++) {
323
+ dims.push(buf.readInt32BE(offset));
324
+ offset += 4;
325
+ offset += 4;
326
+ }
327
+ const elemDecoder = makeElementDecoder(elemOid);
328
+ function readDimension(dim) {
329
+ const size = dims[dim];
330
+ const arr = [];
331
+ for (let i = 0; i < size; i++) if (dim < ndim - 1) arr.push(readDimension(dim + 1));
332
+ else {
333
+ const elemLen = buf.readInt32BE(offset);
334
+ offset += 4;
335
+ if (elemLen === -1) arr.push(null);
336
+ else {
337
+ arr.push(elemDecoder(buf.subarray(offset, offset + elemLen)));
338
+ offset += elemLen;
339
+ }
340
+ }
341
+ return arr;
342
+ }
343
+ return stringify(readDimension(0));
344
+ }
345
+ /**
346
+ * Creates a decoder for array elements. Array elements use the same
347
+ * binary encoding as scalar columns, but we need to map the element
348
+ * OID to the right decoder. Returns JS values (not LiteValueType)
349
+ * since the result will be JSON.stringify'd.
350
+ */
351
+ function makeElementDecoder(elemOid) {
352
+ switch (elemOid) {
353
+ case 16: return (buf) => buf[0] ? true : false;
354
+ case 21: return (buf) => buf.readInt16BE(0);
355
+ case 23: return (buf) => buf.readInt32BE(0);
356
+ case 20: return (buf) => {
357
+ const val = buf.readBigInt64BE(0);
358
+ return val >= Number.MIN_SAFE_INTEGER && val <= Number.MAX_SAFE_INTEGER ? Number(val) : val;
359
+ };
360
+ case 700: return (buf) => buf.readFloatBE(0);
361
+ case 701: return (buf) => buf.readDoubleBE(0);
362
+ case 25:
363
+ case VARCHAR:
364
+ case BPCHAR:
365
+ case 18: return (buf) => buf.toString("utf8");
366
+ case UUID: return (buf) => decodeUUID(buf);
367
+ case 114: return (buf) => JSON.parse(buf.toString("utf8"));
368
+ case JSONB: return (buf) => JSON.parse(buf.toString("utf8", 1));
369
+ case TIMESTAMP:
370
+ case TIMESTAMPTZ: return (buf) => decodeTimestamp(buf);
371
+ case DATE: return (buf) => decodeDate(buf);
372
+ case TIME: return (buf) => decodeTime(buf);
373
+ case TIMETZ: return (buf) => decodeTimeTZ(buf);
374
+ case NUMERIC: return (buf) => decodeNumeric(buf);
375
+ default: return (buf) => buf.toString("utf8");
376
+ }
377
+ }
378
+ //#endregion
379
+ export { BinaryCopyParser, hasBinaryDecoder, makeBinaryDecoder, textCastDecoder };
380
+
381
+ //# sourceMappingURL=pg-copy-binary.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"pg-copy-binary.js","names":["#append","#headerParsed","#tryParseHeader","#fieldsRemaining","#remaining","#buffer","#offset","#compact"],"sources":["../../../../../zero-cache/src/db/pg-copy-binary.ts"],"sourcesContent":["import {stringify} from '../../../shared/src/bigint-json.ts';\nimport type {LiteValueType} from '../types/lite.ts';\nimport {\n BOOL,\n BPCHAR,\n BYTEA,\n CHAR,\n DATE,\n FLOAT4,\n FLOAT8,\n INT2,\n INT4,\n INT8,\n JSONB,\n NUMERIC,\n TEXT,\n TIME,\n TIMESTAMP,\n TIMESTAMPTZ,\n TIMETZ,\n UUID,\n VARCHAR,\n} from '../types/pg-types.ts';\nimport {JSON as JSON_OID} from '../types/pg-types.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\nimport type {ColumnSpec} from './specs.ts';\n\n// PostgreSQL COPY binary format signature: \"PGCOPY\\n\\xff\\r\\n\\0\"\nconst PGCOPY_SIGNATURE = Buffer.from([\n 0x50, 0x47, 0x43, 0x4f, 0x50, 0x59, 0x0a, 0xff, 0x0d, 0x0a, 0x00,\n]);\n\nconst HEADER_MIN_SIZE = 11 + 4 + 4; // signature + flags + extension length\n\n// PostgreSQL epoch is 2000-01-01T00:00:00Z.\n// Offset from Unix epoch (1970-01-01) in milliseconds.\nconst PG_EPOCH_UNIX_MILLIS = 946_684_800_000;\n\n// Days from Unix epoch (1970-01-01) to PG epoch (2000-01-01).\nconst PG_EPOCH_UNIX_DAYS = 10_957;\n\nconst MS_PER_DAY = 86_400_000;\n\n// Sentinel values for infinity in PG binary format (as hi/lo int32 pairs).\nconst PG_TIMESTAMP_INF_HI = 0x7fffffff;\nconst PG_TIMESTAMP_INF_LO = 0xffffffff;\nconst PG_TIMESTAMP_NEG_INF_HI = -0x80000000; // readInt32BE of 0x80000000\nconst PG_TIMESTAMP_NEG_INF_LO = 0;\nconst PG_DATE_INFINITY = 0x7fffffff;\nconst PG_DATE_NEG_INFINITY = -0x80000000;\n\n/**\n * Streaming parser for PostgreSQL `COPY ... TO STDOUT WITH (FORMAT binary)`.\n *\n * Analogous to {@link import('./pg-copy.ts').TsvParser} but for binary format.\n * Yields `Buffer | null` per field (null = SQL NULL).\n *\n * The caller tracks column position the same way as with TsvParser.\n */\nexport class BinaryCopyParser {\n #buffer: Buffer = Buffer.alloc(0);\n #offset = 0;\n #headerParsed = false;\n #fieldsRemaining = 0; // fields left in current tuple (0 = need new tuple header)\n\n *parse(chunk: Buffer): Iterable<Buffer | null> {\n this.#append(chunk);\n\n if (!this.#headerParsed) {\n if (!this.#tryParseHeader()) {\n return;\n }\n }\n\n for (;;) {\n // If we're at the start of a tuple, read the field count.\n if (this.#fieldsRemaining === 0) {\n if (this.#remaining() < 2) {\n break;\n }\n const fieldCount = this.#buffer.readInt16BE(this.#offset);\n if (fieldCount === -1) {\n // Trailer marker — end of data.\n break;\n }\n this.#offset += 2;\n this.#fieldsRemaining = fieldCount;\n }\n\n // Parse fields within the current tuple.\n while (this.#fieldsRemaining > 0) {\n if (this.#remaining() < 4) {\n // Not enough data for field length — wait for next chunk.\n this.#compact();\n return;\n }\n const fieldLen = this.#buffer.readInt32BE(this.#offset);\n this.#offset += 4;\n\n if (fieldLen === -1) {\n // NULL field.\n yield null;\n } else {\n if (this.#remaining() < fieldLen) {\n // Not enough data for field value — rewind past the length\n // we just read and wait for more data.\n this.#offset -= 4;\n this.#compact();\n return;\n }\n yield this.#buffer.subarray(this.#offset, this.#offset + fieldLen);\n this.#offset += fieldLen;\n }\n this.#fieldsRemaining--;\n }\n }\n\n this.#compact();\n }\n\n #remaining(): number {\n return this.#buffer.length - this.#offset;\n }\n\n #append(chunk: Buffer): void {\n if (this.#buffer.length === this.#offset) {\n // Fully consumed — replace.\n this.#buffer = chunk;\n this.#offset = 0;\n } else {\n // Concatenate unconsumed remainder with new chunk.\n this.#buffer = Buffer.concat([\n this.#buffer.subarray(this.#offset),\n chunk,\n ]);\n this.#offset = 0;\n }\n }\n\n #compact(): void {\n if (this.#offset > 0) {\n this.#buffer = this.#buffer.subarray(this.#offset);\n this.#offset = 0;\n }\n }\n\n #tryParseHeader(): boolean {\n if (this.#remaining() < HEADER_MIN_SIZE) {\n return false;\n }\n\n // Validate signature.\n for (let i = 0; i < PGCOPY_SIGNATURE.length; i++) {\n if (this.#buffer[this.#offset + i] !== PGCOPY_SIGNATURE[i]) {\n throw new Error('Invalid PGCOPY binary signature');\n }\n }\n this.#offset += 11;\n\n // Flags (int32) — currently only bit 16 (has OID column) is defined.\n // We don't use OID columns, so just skip.\n const flags = this.#buffer.readInt32BE(this.#offset);\n this.#offset += 4;\n if (flags !== 0) {\n throw new Error(`Unsupported PGCOPY flags: ${flags}`);\n }\n\n // Extension area length (int32).\n const extensionLen = this.#buffer.readInt32BE(this.#offset);\n this.#offset += 4;\n\n // Skip extension data if present.\n if (extensionLen > 0) {\n if (this.#remaining() < extensionLen) {\n // Rewind and wait for more data.\n this.#offset -= HEADER_MIN_SIZE;\n return false;\n }\n this.#offset += extensionLen;\n }\n\n this.#headerParsed = true;\n return true;\n }\n}\n\n// ---- Binary Type Decoders ----\n\nexport type BinaryDecoder = (buf: Buffer) => LiteValueType;\n\ntype BinaryColumnSpec = Pick<\n ColumnSpec,\n 'dataType' | 'pgTypeClass' | 'elemPgTypeClass'\n> & {typeOID: number};\n\nconst KNOWN_BINARY_OIDS = new Set([\n BOOL,\n INT2,\n INT4,\n INT8,\n FLOAT4,\n FLOAT8,\n TEXT,\n VARCHAR,\n BPCHAR,\n CHAR,\n UUID,\n BYTEA,\n JSON_OID,\n JSONB,\n TIMESTAMP,\n TIMESTAMPTZ,\n DATE,\n TIME,\n TIMETZ,\n NUMERIC,\n]);\n\n/**\n * Returns true if the column's binary format is known and can be decoded\n * natively. For columns where this returns false, the COPY SELECT should\n * cast the column to `::text` so PG sends the text representation inside\n * the binary frame.\n */\nexport function hasBinaryDecoder(spec: BinaryColumnSpec): boolean {\n if (spec.elemPgTypeClass !== null && spec.elemPgTypeClass !== undefined) {\n return true; // Array types\n }\n if (spec.pgTypeClass === PostgresTypeClass.Enum) {\n return true; // Enums are sent as UTF-8 text in binary format\n }\n return KNOWN_BINARY_OIDS.has(spec.typeOID);\n}\n\n/** Decoder for columns cast to `::text` in the COPY SELECT. */\nexport const textCastDecoder: BinaryDecoder = buf => buf.toString('utf8');\n\n/**\n * Creates a specialized binary decoder for the given column spec.\n * The returned function converts a raw COPY binary field `Buffer`\n * directly to a `LiteValueType`, bypassing text parsing entirely.\n *\n * Only call this for columns where {@link hasBinaryDecoder} returns true.\n * For other columns, cast to `::text` in the SELECT and use\n * {@link textCastDecoder}.\n */\nexport function makeBinaryDecoder(spec: BinaryColumnSpec): BinaryDecoder {\n const {typeOID, pgTypeClass, elemPgTypeClass} = spec;\n\n // Array types: elemPgTypeClass is non-null for arrays.\n if (elemPgTypeClass !== null && elemPgTypeClass !== undefined) {\n return buf => decodeArray(buf);\n }\n\n // Enum types: binary representation is UTF-8 text.\n if (pgTypeClass === PostgresTypeClass.Enum) {\n return buf => buf.toString('utf8');\n }\n\n switch (typeOID) {\n case BOOL:\n return buf => (buf[0] ? 1 : 0);\n case INT2:\n return buf => buf.readInt16BE(0);\n case INT4:\n return buf => buf.readInt32BE(0);\n case INT8:\n return buf => buf.readBigInt64BE(0);\n case FLOAT4:\n return buf => buf.readFloatBE(0);\n case FLOAT8:\n return buf => buf.readDoubleBE(0);\n case TEXT:\n case VARCHAR:\n case BPCHAR:\n case CHAR:\n return buf => buf.toString('utf8');\n case UUID:\n return buf => decodeUUID(buf);\n case BYTEA:\n return buf => Uint8Array.prototype.slice.call(buf) as Uint8Array;\n case JSON_OID:\n return buf => buf.toString('utf8');\n case JSONB:\n // JSONB binary format has a 1-byte version prefix (currently 0x01).\n return buf => buf.toString('utf8', 1);\n case TIMESTAMP:\n case TIMESTAMPTZ:\n return buf => decodeTimestamp(buf);\n case DATE:\n return buf => decodeDate(buf);\n case TIME:\n return buf => decodeTime(buf);\n case TIMETZ:\n return buf => decodeTimeTZ(buf);\n case NUMERIC:\n return buf => decodeNumeric(buf);\n default:\n throw new Error(\n `No binary decoder for type OID ${typeOID}. ` +\n `Use hasBinaryDecoder() to check before calling makeBinaryDecoder().`,\n );\n }\n}\n\n// ---- Individual Decoders (exported for testing) ----\n\n/**\n * UUID: 16 bytes → \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"\n */\nexport function decodeUUID(buf: Buffer): string {\n const hex = buf.toString('hex');\n return (\n hex.substring(0, 8) +\n '-' +\n hex.substring(8, 12) +\n '-' +\n hex.substring(12, 16) +\n '-' +\n hex.substring(16, 20) +\n '-' +\n hex.substring(20, 32)\n );\n}\n\n/**\n * TIMESTAMP / TIMESTAMPTZ: int64 microseconds since PG epoch (2000-01-01 UTC)\n * → floating-point milliseconds since Unix epoch.\n *\n * Matches the output of `timestampToFpMillis()` in `types/pg.ts`.\n *\n * Uses Number arithmetic (avoiding BigInt) for speed. The microsecond value\n * fits safely in a Number for all practical dates (up to ~year 285,000).\n */\nexport function decodeTimestamp(buf: Buffer): number {\n const hi = buf.readInt32BE(0);\n const lo = buf.readUInt32BE(4);\n if (hi === PG_TIMESTAMP_INF_HI && lo === PG_TIMESTAMP_INF_LO) return Infinity;\n if (hi === PG_TIMESTAMP_NEG_INF_HI && lo === PG_TIMESTAMP_NEG_INF_LO) {\n return -Infinity;\n }\n const microseconds = hi * 0x100000000 + lo;\n return microseconds / 1000 + PG_EPOCH_UNIX_MILLIS;\n}\n\n/**\n * DATE: int32 days since PG epoch (2000-01-01) → millis since Unix epoch at\n * UTC midnight. Matches `dateToUTCMidnight()` in `types/pg.ts`.\n */\nexport function decodeDate(buf: Buffer): number {\n const pgDays = buf.readInt32BE(0);\n if (pgDays === PG_DATE_INFINITY) return Infinity;\n if (pgDays === PG_DATE_NEG_INFINITY) return -Infinity;\n return (pgDays + PG_EPOCH_UNIX_DAYS) * MS_PER_DAY;\n}\n\n/**\n * TIME: int64 microseconds since midnight → milliseconds since midnight.\n * Matches `postgresTimeToMilliseconds()` in `types/pg.ts`.\n *\n * Max value is 86,400,000,000 (~8.6e10), well within Number.MAX_SAFE_INTEGER.\n */\nexport function decodeTime(buf: Buffer): number {\n const hi = buf.readInt32BE(0);\n const lo = buf.readUInt32BE(4);\n const micros = hi * 0x100000000 + lo;\n return Math.trunc(micros / 1000);\n}\n\n/**\n * TIMETZ: int64 microseconds since midnight + int32 timezone offset in seconds.\n * PG stores the offset with inverted sign from ISO (POSIX convention):\n * positive = west of UTC, negative = east of UTC.\n * UTC = local_time + pg_offset.\n * → UTC milliseconds since midnight.\n *\n * Max value ~1.3e11 microseconds, well within Number.MAX_SAFE_INTEGER.\n */\nexport function decodeTimeTZ(buf: Buffer): number {\n const hi = buf.readInt32BE(0);\n const lo = buf.readUInt32BE(4);\n const localMicros = hi * 0x100000000 + lo;\n const tzOffsetSeconds = buf.readInt32BE(8);\n const utcMicros = localMicros + tzOffsetSeconds * 1_000_000;\n let ms = Math.trunc(utcMicros / 1000);\n // Normalize to [0, MS_PER_DAY).\n if (ms < 0 || ms >= MS_PER_DAY) {\n ms = ((ms % MS_PER_DAY) + MS_PER_DAY) % MS_PER_DAY;\n }\n return ms;\n}\n\n// NUMERIC binary format constants.\nconst NUMERIC_NEG = 0x4000;\nconst NUMERIC_NAN = 0xc000;\nconst NUMERIC_PINF = 0xd000;\nconst NUMERIC_NINF = 0xf000;\nconst NBASE = 10_000;\n\n/**\n * NUMERIC: variable-length binary format.\n * Header: {ndigits: int16, weight: int16, sign: int16, dscale: int16}\n * Followed by ndigits x int16 base-10000 digits.\n *\n * Converts to a JS `number` (matching the text path's `Number(x)` behavior).\n */\nexport function decodeNumeric(buf: Buffer): number {\n const ndigits = buf.readInt16BE(0);\n const weight = buf.readInt16BE(2);\n const sign = buf.readUInt16BE(4);\n // const dscale = buf.readInt16BE(6); // display scale, not needed for value\n\n if (sign === NUMERIC_NAN) {\n return NaN;\n }\n if (sign === NUMERIC_PINF) {\n return Infinity;\n }\n if (sign === NUMERIC_NINF) {\n return -Infinity;\n }\n if (ndigits === 0) {\n return 0;\n }\n\n // Accumulate base-10000 digits into an integer, then do a single\n // division at the end. Repeated `scale /= NBASE` accumulates\n // floating-point error (e.g. 9900 * 0.0001 = 0.9900000000000001).\n // A single division lets IEEE 754 round to the nearest double,\n // matching the text path's `Number(\"0.99\")` behavior.\n //\n // For numerics with many digits (ndigits > 3), intVal can exceed\n // MAX_SAFE_INTEGER. In that case, fall back to building a string\n // and using Number() to match the text path exactly.\n if (ndigits > 3) {\n return decodeNumericViaString(buf, ndigits, weight, sign);\n }\n\n let intVal = 0;\n for (let i = 0; i < ndigits; i++) {\n intVal = intVal * NBASE + buf.readInt16BE(8 + i * 2);\n }\n\n // weight indicates the power-of-NBASE of the first digit.\n // shift is how many base-10000 positions to divide by.\n const shift = ndigits - weight - 1;\n let result;\n if (shift > 0) {\n result = intVal / NBASE ** shift;\n } else if (shift < 0) {\n result = intVal * NBASE ** -shift;\n } else {\n result = intVal;\n }\n return sign === NUMERIC_NEG ? -result : result;\n}\n\n/**\n * Fallback for numerics with many base-10000 digits where accumulating\n * into an integer would exceed MAX_SAFE_INTEGER. Builds the decimal\n * string and uses Number() to match the text path exactly.\n */\nfunction decodeNumericViaString(\n buf: Buffer,\n ndigits: number,\n weight: number,\n sign: number,\n): number {\n // Number of base-10000 digit groups before the decimal point.\n const intGroups = weight + 1;\n\n let str = '';\n for (let i = 0; i < ndigits; i++) {\n const digit = buf.readInt16BE(8 + i * 2);\n if (i === intGroups) {\n str = str || '0';\n str += '.';\n }\n str += i === 0 ? String(digit) : String(digit).padStart(4, '0');\n }\n\n // Append trailing zero groups if the integer part extends beyond ndigits.\n if (intGroups > ndigits) {\n str += '0'.repeat((intGroups - ndigits) * 4);\n }\n\n return Number((sign === NUMERIC_NEG ? '-' : '') + str);\n}\n\n/**\n * Array: binary format.\n *\n * Header:\n * int32 ndim — number of dimensions (0 for empty array)\n * int32 flags — 0 or 1 (has-nulls)\n * int32 elem_oid — OID of element type\n * Per dimension:\n * int32 dim_size — number of elements in this dimension\n * int32 dim_lb — lower bound (usually 1)\n *\n * Then for each element (in row-major order):\n * int32 length — -1 for NULL, otherwise byte length\n * bytes — element data\n *\n * Result is JSON.stringify'd for storage in SQLite (matching text path behavior).\n */\nexport function decodeArray(buf: Buffer): string {\n let offset = 0;\n\n const ndim = buf.readInt32BE(offset);\n offset += 4;\n // skip flags (has-nulls)\n offset += 4;\n const elemOid = buf.readInt32BE(offset);\n offset += 4;\n\n if (ndim === 0) {\n return '[]';\n }\n\n // Read dimension sizes.\n const dims: number[] = [];\n for (let d = 0; d < ndim; d++) {\n dims.push(buf.readInt32BE(offset));\n offset += 4;\n // skip lower bound\n offset += 4;\n }\n\n const elemDecoder = makeElementDecoder(elemOid);\n\n // Recursively build the nested array structure.\n function readDimension(dim: number): unknown[] {\n const size = dims[dim];\n const arr: unknown[] = [];\n for (let i = 0; i < size; i++) {\n if (dim < ndim - 1) {\n arr.push(readDimension(dim + 1));\n } else {\n // Leaf dimension — read element.\n const elemLen = buf.readInt32BE(offset);\n offset += 4;\n if (elemLen === -1) {\n arr.push(null);\n } else {\n arr.push(elemDecoder(buf.subarray(offset, offset + elemLen)));\n offset += elemLen;\n }\n }\n }\n return arr;\n }\n\n const result = readDimension(0);\n return stringify(result);\n}\n\n/**\n * Creates a decoder for array elements. Array elements use the same\n * binary encoding as scalar columns, but we need to map the element\n * OID to the right decoder. Returns JS values (not LiteValueType)\n * since the result will be JSON.stringify'd.\n */\nfunction makeElementDecoder(elemOid: number): (buf: Buffer) => unknown {\n switch (elemOid) {\n case BOOL:\n return buf => (buf[0] ? true : false);\n case INT2:\n return buf => buf.readInt16BE(0);\n case INT4:\n return buf => buf.readInt32BE(0);\n case INT8:\n return buf => {\n const val = buf.readBigInt64BE(0);\n // Use number if it fits safely, otherwise bigint for JSON.\n return val >= Number.MIN_SAFE_INTEGER && val <= Number.MAX_SAFE_INTEGER\n ? Number(val)\n : val;\n };\n case FLOAT4:\n return buf => buf.readFloatBE(0);\n case FLOAT8:\n return buf => buf.readDoubleBE(0);\n case TEXT:\n case VARCHAR:\n case BPCHAR:\n case CHAR:\n return buf => buf.toString('utf8');\n case UUID:\n return buf => decodeUUID(buf);\n case JSON_OID:\n return buf => JSON.parse(buf.toString('utf8'));\n case JSONB:\n return buf => JSON.parse(buf.toString('utf8', 1));\n case TIMESTAMP:\n case TIMESTAMPTZ:\n return buf => decodeTimestamp(buf);\n case DATE:\n return buf => decodeDate(buf);\n case TIME:\n return buf => decodeTime(buf);\n case TIMETZ:\n return buf => decodeTimeTZ(buf);\n case NUMERIC:\n return buf => decodeNumeric(buf);\n default:\n return buf => buf.toString('utf8');\n }\n}\n"],"mappings":";;;AA4BA,IAAM,mBAAmB,OAAO,KAAK;CACnC;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAC7D,CAAC;AAEF,IAAM,kBAAkB;AAIxB,IAAM,uBAAuB;AAG7B,IAAM,qBAAqB;AAE3B,IAAM,aAAa;AAGnB,IAAM,sBAAsB;AAC5B,IAAM,sBAAsB;AAC5B,IAAM,0BAA0B;AAChC,IAAM,0BAA0B;AAChC,IAAM,mBAAmB;AACzB,IAAM,uBAAuB;;;;;;;;;AAU7B,IAAa,mBAAb,MAA8B;CAC5B,UAAkB,OAAO,MAAM,EAAE;CACjC,UAAU;CACV,gBAAgB;CAChB,mBAAmB;CAEnB,CAAC,MAAM,OAAwC;AAC7C,QAAA,OAAa,MAAM;AAEnB,MAAI,CAAC,MAAA;OACC,CAAC,MAAA,gBAAsB,CACzB;;AAIJ,WAAS;AAEP,OAAI,MAAA,oBAA0B,GAAG;AAC/B,QAAI,MAAA,WAAiB,GAAG,EACtB;IAEF,MAAM,aAAa,MAAA,OAAa,YAAY,MAAA,OAAa;AACzD,QAAI,eAAe,GAEjB;AAEF,UAAA,UAAgB;AAChB,UAAA,kBAAwB;;AAI1B,UAAO,MAAA,kBAAwB,GAAG;AAChC,QAAI,MAAA,WAAiB,GAAG,GAAG;AAEzB,WAAA,SAAe;AACf;;IAEF,MAAM,WAAW,MAAA,OAAa,YAAY,MAAA,OAAa;AACvD,UAAA,UAAgB;AAEhB,QAAI,aAAa,GAEf,OAAM;SACD;AACL,SAAI,MAAA,WAAiB,GAAG,UAAU;AAGhC,YAAA,UAAgB;AAChB,YAAA,SAAe;AACf;;AAEF,WAAM,MAAA,OAAa,SAAS,MAAA,QAAc,MAAA,SAAe,SAAS;AAClE,WAAA,UAAgB;;AAElB,UAAA;;;AAIJ,QAAA,SAAe;;CAGjB,aAAqB;AACnB,SAAO,MAAA,OAAa,SAAS,MAAA;;CAG/B,QAAQ,OAAqB;AAC3B,MAAI,MAAA,OAAa,WAAW,MAAA,QAAc;AAExC,SAAA,SAAe;AACf,SAAA,SAAe;SACV;AAEL,SAAA,SAAe,OAAO,OAAO,CAC3B,MAAA,OAAa,SAAS,MAAA,OAAa,EACnC,MACD,CAAC;AACF,SAAA,SAAe;;;CAInB,WAAiB;AACf,MAAI,MAAA,SAAe,GAAG;AACpB,SAAA,SAAe,MAAA,OAAa,SAAS,MAAA,OAAa;AAClD,SAAA,SAAe;;;CAInB,kBAA2B;AACzB,MAAI,MAAA,WAAiB,GAAG,gBACtB,QAAO;AAIT,OAAK,IAAI,IAAI,GAAG,IAAI,iBAAiB,QAAQ,IAC3C,KAAI,MAAA,OAAa,MAAA,SAAe,OAAO,iBAAiB,GACtD,OAAM,IAAI,MAAM,kCAAkC;AAGtD,QAAA,UAAgB;EAIhB,MAAM,QAAQ,MAAA,OAAa,YAAY,MAAA,OAAa;AACpD,QAAA,UAAgB;AAChB,MAAI,UAAU,EACZ,OAAM,IAAI,MAAM,6BAA6B,QAAQ;EAIvD,MAAM,eAAe,MAAA,OAAa,YAAY,MAAA,OAAa;AAC3D,QAAA,UAAgB;AAGhB,MAAI,eAAe,GAAG;AACpB,OAAI,MAAA,WAAiB,GAAG,cAAc;AAEpC,UAAA,UAAgB;AAChB,WAAO;;AAET,SAAA,UAAgB;;AAGlB,QAAA,eAAqB;AACrB,SAAO;;;AAaX,IAAM,oBAAoB,IAAI,IAAI;;;;;;;;CAQhC;CACA;;CAEA;;;CAGA;CACA;CACA;CACA;CACA;CACA;CACA;CACD,CAAC;;;;;;;AAQF,SAAgB,iBAAiB,MAAiC;AAChE,KAAI,KAAK,oBAAoB,QAAQ,KAAK,oBAAoB,KAAA,EAC5D,QAAO;AAET,KAAI,KAAK,gBAAgB,IACvB,QAAO;AAET,QAAO,kBAAkB,IAAI,KAAK,QAAQ;;;AAI5C,IAAa,mBAAiC,QAAO,IAAI,SAAS,OAAO;;;;;;;;;;AAWzE,SAAgB,kBAAkB,MAAuC;CACvE,MAAM,EAAC,SAAS,aAAa,oBAAmB;AAGhD,KAAI,oBAAoB,QAAQ,oBAAoB,KAAA,EAClD,SAAO,QAAO,YAAY,IAAI;AAIhC,KAAI,gBAAgB,IAClB,SAAO,QAAO,IAAI,SAAS,OAAO;AAGpC,SAAQ,SAAR;EACE,KAAA,GACE,SAAO,QAAQ,IAAI,KAAK,IAAI;EAC9B,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO,IAAI,eAAe,EAAE;EACrC,KAAA,IACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,IACE,SAAO,QAAO,IAAI,aAAa,EAAE;EACnC,KAAA;EACA,KAAK;EACL,KAAK;EACL,KAAA,GACE,SAAO,QAAO,IAAI,SAAS,OAAO;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAA,GACE,SAAO,QAAO,WAAW,UAAU,MAAM,KAAK,IAAI;EACpD,KAAA,IACE,SAAO,QAAO,IAAI,SAAS,OAAO;EACpC,KAAK,MAEH,SAAO,QAAO,IAAI,SAAS,QAAQ,EAAE;EACvC,KAAK;EACL,KAAK,YACH,SAAO,QAAO,gBAAgB,IAAI;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,OACH,SAAO,QAAO,aAAa,IAAI;EACjC,KAAK,QACH,SAAO,QAAO,cAAc,IAAI;EAClC,QACE,OAAM,IAAI,MACR,kCAAkC,QAAQ,uEAE3C;;;;;;AASP,SAAgB,WAAW,KAAqB;CAC9C,MAAM,MAAM,IAAI,SAAS,MAAM;AAC/B,QACE,IAAI,UAAU,GAAG,EAAE,GACnB,MACA,IAAI,UAAU,GAAG,GAAG,GACpB,MACA,IAAI,UAAU,IAAI,GAAG,GACrB,MACA,IAAI,UAAU,IAAI,GAAG,GACrB,MACA,IAAI,UAAU,IAAI,GAAG;;;;;;;;;;;AAazB,SAAgB,gBAAgB,KAAqB;CACnD,MAAM,KAAK,IAAI,YAAY,EAAE;CAC7B,MAAM,KAAK,IAAI,aAAa,EAAE;AAC9B,KAAI,OAAO,uBAAuB,OAAO,oBAAqB,QAAO;AACrE,KAAI,OAAO,2BAA2B,OAAO,wBAC3C,QAAO;AAGT,SADqB,KAAK,aAAc,MAClB,MAAO;;;;;;AAO/B,SAAgB,WAAW,KAAqB;CAC9C,MAAM,SAAS,IAAI,YAAY,EAAE;AACjC,KAAI,WAAW,iBAAkB,QAAO;AACxC,KAAI,WAAW,qBAAsB,QAAO;AAC5C,SAAQ,SAAS,sBAAsB;;;;;;;;AASzC,SAAgB,WAAW,KAAqB;CAC9C,MAAM,KAAK,IAAI,YAAY,EAAE;CAC7B,MAAM,KAAK,IAAI,aAAa,EAAE;CAC9B,MAAM,SAAS,KAAK,aAAc;AAClC,QAAO,KAAK,MAAM,SAAS,IAAK;;;;;;;;;;;AAYlC,SAAgB,aAAa,KAAqB;CAChD,MAAM,KAAK,IAAI,YAAY,EAAE;CAC7B,MAAM,KAAK,IAAI,aAAa,EAAE;CAG9B,MAAM,YAFc,KAAK,aAAc,KACf,IAAI,YAAY,EAAE,GACQ;CAClD,IAAI,KAAK,KAAK,MAAM,YAAY,IAAK;AAErC,KAAI,KAAK,KAAK,MAAM,WAClB,OAAO,KAAK,aAAc,cAAc;AAE1C,QAAO;;AAIT,IAAM,cAAc;AACpB,IAAM,cAAc;AACpB,IAAM,eAAe;AACrB,IAAM,eAAe;AACrB,IAAM,QAAQ;;;;;;;;AASd,SAAgB,cAAc,KAAqB;CACjD,MAAM,UAAU,IAAI,YAAY,EAAE;CAClC,MAAM,SAAS,IAAI,YAAY,EAAE;CACjC,MAAM,OAAO,IAAI,aAAa,EAAE;AAGhC,KAAI,SAAS,YACX,QAAO;AAET,KAAI,SAAS,aACX,QAAO;AAET,KAAI,SAAS,aACX,QAAO;AAET,KAAI,YAAY,EACd,QAAO;AAYT,KAAI,UAAU,EACZ,QAAO,uBAAuB,KAAK,SAAS,QAAQ,KAAK;CAG3D,IAAI,SAAS;AACb,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,IAC3B,UAAS,SAAS,QAAQ,IAAI,YAAY,IAAI,IAAI,EAAE;CAKtD,MAAM,QAAQ,UAAU,SAAS;CACjC,IAAI;AACJ,KAAI,QAAQ,EACV,UAAS,SAAS,SAAS;UAClB,QAAQ,EACjB,UAAS,SAAS,SAAS,CAAC;KAE5B,UAAS;AAEX,QAAO,SAAS,cAAc,CAAC,SAAS;;;;;;;AAQ1C,SAAS,uBACP,KACA,SACA,QACA,MACQ;CAER,MAAM,YAAY,SAAS;CAE3B,IAAI,MAAM;AACV,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,KAAK;EAChC,MAAM,QAAQ,IAAI,YAAY,IAAI,IAAI,EAAE;AACxC,MAAI,MAAM,WAAW;AACnB,SAAM,OAAO;AACb,UAAO;;AAET,SAAO,MAAM,IAAI,OAAO,MAAM,GAAG,OAAO,MAAM,CAAC,SAAS,GAAG,IAAI;;AAIjE,KAAI,YAAY,QACd,QAAO,IAAI,QAAQ,YAAY,WAAW,EAAE;AAG9C,QAAO,QAAQ,SAAS,cAAc,MAAM,MAAM,IAAI;;;;;;;;;;;;;;;;;;;AAoBxD,SAAgB,YAAY,KAAqB;CAC/C,IAAI,SAAS;CAEb,MAAM,OAAO,IAAI,YAAY,OAAO;AACpC,WAAU;AAEV,WAAU;CACV,MAAM,UAAU,IAAI,YAAY,OAAO;AACvC,WAAU;AAEV,KAAI,SAAS,EACX,QAAO;CAIT,MAAM,OAAiB,EAAE;AACzB,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,KAAK;AAC7B,OAAK,KAAK,IAAI,YAAY,OAAO,CAAC;AAClC,YAAU;AAEV,YAAU;;CAGZ,MAAM,cAAc,mBAAmB,QAAQ;CAG/C,SAAS,cAAc,KAAwB;EAC7C,MAAM,OAAO,KAAK;EAClB,MAAM,MAAiB,EAAE;AACzB,OAAK,IAAI,IAAI,GAAG,IAAI,MAAM,IACxB,KAAI,MAAM,OAAO,EACf,KAAI,KAAK,cAAc,MAAM,EAAE,CAAC;OAC3B;GAEL,MAAM,UAAU,IAAI,YAAY,OAAO;AACvC,aAAU;AACV,OAAI,YAAY,GACd,KAAI,KAAK,KAAK;QACT;AACL,QAAI,KAAK,YAAY,IAAI,SAAS,QAAQ,SAAS,QAAQ,CAAC,CAAC;AAC7D,cAAU;;;AAIhB,SAAO;;AAIT,QAAO,UADQ,cAAc,EAAE,CACP;;;;;;;;AAS1B,SAAS,mBAAmB,SAA2C;AACrE,SAAQ,SAAR;EACE,KAAA,GACE,SAAO,QAAQ,IAAI,KAAK,OAAO;EACjC,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO;GACZ,MAAM,MAAM,IAAI,eAAe,EAAE;AAEjC,UAAO,OAAO,OAAO,oBAAoB,OAAO,OAAO,mBACnD,OAAO,IAAI,GACX;;EAER,KAAA,IACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,IACE,SAAO,QAAO,IAAI,aAAa,EAAE;EACnC,KAAA;EACA,KAAK;EACL,KAAK;EACL,KAAA,GACE,SAAO,QAAO,IAAI,SAAS,OAAO;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAA,IACE,SAAO,QAAO,KAAK,MAAM,IAAI,SAAS,OAAO,CAAC;EAChD,KAAK,MACH,SAAO,QAAO,KAAK,MAAM,IAAI,SAAS,QAAQ,EAAE,CAAC;EACnD,KAAK;EACL,KAAK,YACH,SAAO,QAAO,gBAAgB,IAAI;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,OACH,SAAO,QAAO,aAAa,IAAI;EACjC,KAAK,QACH,SAAO,QAAO,cAAc,IAAI;EAClC,QACE,SAAO,QAAO,IAAI,SAAS,OAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"transaction-pool.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,6BAA6B,CAAC;AAGtD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,gBAAgB,CAAC;AACzE,OAAO,KAAK,KAAK,IAAI,MAAM,gBAAgB,CAAC;AAG5C,KAAK,IAAI,GAAG,IAAI,CAAC,OAAO,IAAI,CAAC,CAAC;AAE9B,KAAK,YAAY,CAAC,CAAC,IAAI,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAEtC,MAAM,MAAM,SAAS,GACjB,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,GAAG,GAAG,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAChE,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;AAE1C;;;;;GAKG;AACH,MAAM,MAAM,IAAI,GAAG,CACjB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,SAAS,EAAE,CAAC,CAAC;AAE/B;;;;GAIG;AACH,MAAM,MAAM,QAAQ,CAAC,CAAC,IAAI,CACxB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,CAAC,CAAC,CAAC;AAErB;;;;;;;GAOG;AACH,qBAAa,eAAe;;IAkB1B;;;;;;;;;;;;;;;OAeG;gBAED,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,IAAI,EACV,IAAI,CAAC,EAAE,IAAI,EACX,OAAO,CAAC,EAAE,IAAI,EACd,cAAc,SAAI,EAClB,UAAU,SAAiB,EAC3B,YAAY,eAAgB;IAkB9B;;;OAGG;IACH,GAAG,CAAC,EAAE,EAAE,UAAU,GAAG,IAAI;IASzB;;;;;;OAMG;IACH,iBAAiB,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM;iCAIf,MAAM,SAAS,MAAM;;IAKlD;;;;;;;;;;;;;;;;;;;OAmBG;IACG,IAAI;IA0GV;;;;;;;;;;;OAWG;IACH,OAAO,CAAC,IAAI,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IA+DlC;;;;;OAKG;IACH,eAAe,CAAC,CAAC,EAAE,QAAQ,EAAE,QAAQ,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAsDrD;;;OAGG;IACH,KAAK;IAIL;;;OAGG;IACH,OAAO;IASP;;;;;;;;;;;;;;;;;OAiBG;IAEH,GAAG,CAAC,KAAK,SAAI;IAQb;;OAEG;IACH,KAAK,CAAC,KAAK,SAAI;IAYf,SAAS,IAAI,OAAO;IAIpB;;OAEG;IACH,IAAI,CAAC,GAAG,EAAE,OAAO;CAelB;AAED,KAAK,wBAAwB,GAAG;IAC9B;;;;;OAKG;IACH,cAAc,EAAE,IAAI,CAAC;IAErB;;;;;OAKG;IACH,aAAa,EAAE,IAAI,CAAC;IAEpB;;;;OAIG;IACH,WAAW,EAAE,IAAI,CAAC;IAElB,qCAAqC;IACrC,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAAC;AAEF;;;;GAIG;AACH,wBAAgB,qBAAqB,IAAI,wBAAwB,CAoDhE;AAED;;;;GAIG;AACH,wBAAgB,cAAc,IAAI;IAChC,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;IACd,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAqDA;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,UAAU,EAAE,MAAM,GAAG;IAClD,IAAI,EAAE,IAAI,CAAC;IACX,QAAQ,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC;CACzB,CAYA;AAED;;;;;GAKG;AACH,qBAAa,gBAAiB,SAAQ,KAAK;gBAC7B,KAAK,CAAC,EAAE,OAAO;CAI5B;AAgFD,KAAK,WAAW,GAAG;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,IAAI,GAAG,MAAM,CAAC;CACrB,CAAC;AAEF,KAAK,YAAY,GAAG;IAClB,iBAAiB,EAAE,WAAW,CAAC;IAC/B,eAAe,EAAE,WAAW,CAAC;CAC9B,CAAC;AAGF,eAAO,MAAM,aAAa,EAAE,YAS3B,CAAC"}
1
+ {"version":3,"file":"transaction-pool.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,6BAA6B,CAAC;AAGtD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,gBAAgB,CAAC;AACzE,OAAO,KAAK,KAAK,IAAI,MAAM,gBAAgB,CAAC;AAG5C,KAAK,IAAI,GAAG,IAAI,CAAC,OAAO,IAAI,CAAC,CAAC;AAE9B,KAAK,YAAY,CAAC,CAAC,IAAI,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAEtC,MAAM,MAAM,SAAS,GACjB,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,GAAG,GAAG,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAChE,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;AAE1C;;;;;GAKG;AACH,MAAM,MAAM,IAAI,GAAG,CACjB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,SAAS,EAAE,CAAC,CAAC;AAE/B;;;;GAIG;AACH,MAAM,MAAM,QAAQ,CAAC,CAAC,IAAI,CACxB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,CAAC,CAAC,CAAC;AAErB;;;;;;;GAOG;AACH,qBAAa,eAAe;;IAkB1B;;;;;;;;;;;;;;;OAeG;gBAED,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,IAAI,EACV,IAAI,CAAC,EAAE,IAAI,EACX,OAAO,CAAC,EAAE,IAAI,EACd,cAAc,SAAI,EAClB,UAAU,SAAiB,EAC3B,YAAY,eAAgB;IAkB9B;;;OAGG;IACH,GAAG,CAAC,EAAE,EAAE,UAAU,GAAG,IAAI;IASzB;;;;;;OAMG;IACH,iBAAiB,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM;iCAIf,MAAM,SAAS,MAAM;;IAKlD;;;;;;;;;;;;;;;;;;;OAmBG;IACG,IAAI;IAuHV;;;;;;;;;;;OAWG;IACH,OAAO,CAAC,IAAI,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IA+DlC;;;;;OAKG;IACH,eAAe,CAAC,CAAC,EAAE,QAAQ,EAAE,QAAQ,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAsDrD;;;OAGG;IACH,KAAK;IAIL;;;OAGG;IACH,OAAO;IASP;;;;;;;;;;;;;;;;;OAiBG;IAEH,GAAG,CAAC,KAAK,SAAI;IAQb;;OAEG;IACH,KAAK,CAAC,KAAK,SAAI;IAYf,SAAS,IAAI,OAAO;IAIpB;;OAEG;IACH,IAAI,CAAC,GAAG,EAAE,OAAO;CAelB;AAED,KAAK,wBAAwB,GAAG;IAC9B;;;;;OAKG;IACH,cAAc,EAAE,IAAI,CAAC;IAErB;;;;;OAKG;IACH,aAAa,EAAE,IAAI,CAAC;IAEpB;;;;OAIG;IACH,WAAW,EAAE,IAAI,CAAC;IAElB,qCAAqC;IACrC,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAAC;AAEF;;;;GAIG;AACH,wBAAgB,qBAAqB,IAAI,wBAAwB,CAoDhE;AAED;;;;GAIG;AACH,wBAAgB,cAAc,IAAI;IAChC,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;IACd,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAqDA;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,UAAU,EAAE,MAAM,GAAG;IAClD,IAAI,EAAE,IAAI,CAAC;IACX,QAAQ,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC;CACzB,CAYA;AAED;;;;;GAKG;AACH,qBAAa,gBAAiB,SAAQ,KAAK;gBAC7B,KAAK,CAAC,EAAE,OAAO;CAI5B;AAgFD,KAAK,WAAW,GAAG;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,IAAI,GAAG,MAAM,CAAC;CACrB,CAAC;AAEF,KAAK,YAAY,GAAG;IAClB,iBAAiB,EAAE,WAAW,CAAC;IAC/B,eAAe,EAAE,WAAW,CAAC;CAC9B,CAAC;AAGF,eAAO,MAAM,aAAa,EAAE,YAS3B,CAAC"}
@@ -103,6 +103,9 @@ var TransactionPool = class {
103
103
  await Promise.all(this.#workers);
104
104
  if (numWorkers < this.#workers.length) await Promise.all(this.#workers);
105
105
  this.#lc.debug?.("transaction pool done");
106
+ const elapsed = performance.now() - this.#start;
107
+ if (elapsed > 6e4) if (this.#stmts > 0) this.#lc.warn?.(`finished long transaction with ${this.#stmts} statements (${elapsed.toFixed(3)} ms)`);
108
+ else this.#lc.warn?.(`finished long read transaction (${elapsed.toFixed(3)} ms)`);
106
109
  }
107
110
  #addWorker(db) {
108
111
  const id = this.#workers.length + 1;
@@ -1 +1 @@
1
- {"version":3,"file":"transaction-pool.js","names":["#mode","#init","#cleanup","#tasks","#workers","#initialWorkers","#maxWorkers","#timeoutTask","#lc","#stmtRunner","#numWorkers","#db","#addWorker","#numWorking","#failure","#done","#process","#start","#stmts","#readRunner","#refCount"],"sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport type {Enum} from '../../../shared/src/enum.ts';\nimport {Queue} from '../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../types/pg.ts';\nimport type * as Mode from './mode-enum.ts';\nimport {runTx} from './run-transaction.ts';\n\ntype Mode = Enum<typeof Mode>;\n\ntype MaybePromise<T> = Promise<T> | T;\n\nexport type Statement =\n | postgres.PendingQuery<(postgres.Row & Iterable<postgres.Row>)[]>\n | postgres.PendingQuery<postgres.Row[]>;\n\n/**\n * A {@link Task} is logic run from within a transaction in a {@link TransactionPool}.\n * It returns a list of `Statements` that the transaction executes asynchronously and\n * awaits when it receives the 'done' signal.\n *\n */\nexport type Task = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<Statement[]>;\n\n/**\n * A {@link ReadTask} is run from within a transaction, but unlike a {@link Task},\n * the results of a ReadTask are opaque to the TransactionPool and returned to the\n * caller of {@link TransactionPool.processReadTask}.\n */\nexport type ReadTask<T> = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<T>;\n\n/**\n * A TransactionPool is a pool of one or more {@link postgres.TransactionSql}\n * objects that participate in processing a dynamic queue of tasks.\n *\n * This can be used for serializing a set of tasks that arrive asynchronously\n * to a single transaction (for writing) or performing parallel reads across\n * multiple connections at the same snapshot (e.g. read only snapshot transactions).\n */\nexport class TransactionPool {\n #lc: LogContext;\n readonly #mode: Mode;\n readonly #init: TaskRunner | undefined;\n readonly #cleanup: TaskRunner | undefined;\n readonly #tasks = new Queue<TaskRunner | Error | 'done'>();\n readonly #workers: Promise<unknown>[] = [];\n readonly #initialWorkers: number;\n readonly #maxWorkers: number;\n readonly #timeoutTask: TimeoutTasks;\n #numWorkers: number;\n #numWorking = 0;\n #db: PostgresDB | undefined; // set when running. stored to allow adaptive pool sizing.\n\n #refCount = 1;\n #done = false;\n #failure: Error | undefined;\n\n /**\n * @param init A {@link Task} that is run in each Transaction before it begins\n * processing general tasks. This can be used to to set the transaction\n * mode, export/set snapshots, etc. This will be run even if\n * {@link fail} has been called on the pool.\n * @param cleanup A {@link Task} that is run in each Transaction before it closes.\n * This will be run even if {@link fail} has been called, or if a\n * preceding Task threw an Error.\n * @param initialWorkers The initial number of transaction workers to process tasks.\n * This is the steady state number of workers that will be kept\n * alive if the TransactionPool is long lived.\n * This must be greater than 0. Defaults to 1.\n * @param maxWorkers When specified, allows the pool to grow to `maxWorkers`. This\n * must be greater than or equal to `initialWorkers`. On-demand\n * workers will be shut down after an idle timeout of 5 seconds.\n */\n constructor(\n lc: LogContext,\n mode: Mode,\n init?: Task,\n cleanup?: Task,\n initialWorkers = 1,\n maxWorkers = initialWorkers,\n timeoutTasks = TIMEOUT_TASKS, // Overridden for tests.\n ) {\n assert(initialWorkers > 0, 'initialWorkers must be positive');\n assert(\n maxWorkers >= initialWorkers,\n 'maxWorkers must be >= initialWorkers',\n );\n\n this.#lc = lc;\n this.#mode = mode;\n this.#init = init ? this.#stmtRunner(init) : undefined;\n this.#cleanup = cleanup ? this.#stmtRunner(cleanup) : undefined;\n this.#initialWorkers = initialWorkers;\n this.#numWorkers = initialWorkers;\n this.#maxWorkers = maxWorkers;\n this.#timeoutTask = timeoutTasks;\n }\n\n /**\n * Starts the pool of workers to process Tasks with transactions opened from the\n * specified {@link db}.\n */\n run(db: PostgresDB): this {\n assert(!this.#db, 'already running');\n this.#db = db;\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#addWorker(db);\n }\n return this;\n }\n\n /**\n * Adds context parameters to internal LogContext. This is useful for context values that\n * are not known when the TransactionPool is constructed (e.g. determined after a database\n * call when the pool is running).\n *\n * Returns an object that can be used to add more parameters.\n */\n addLoggingContext(key: string, value: string) {\n this.#lc = this.#lc.withContext(key, value);\n\n return {\n addLoggingContext: (key: string, value: string) =>\n this.addLoggingContext(key, value),\n };\n }\n\n /**\n * Returns a promise that:\n *\n * * resolves after {@link setDone} has been called (or the the pool as been {@link unref}ed\n * to a 0 ref count), once all added tasks have been processed and all transactions have been\n * committed or closed.\n *\n * * rejects if processing was aborted with {@link fail} or if processing any of\n * the tasks resulted in an error. All uncommitted transactions will have been\n * rolled back.\n *\n * Note that partial failures are possible if processing writes with multiple workers\n * (e.g. `setDone` is called, allowing some workers to commit, after which other\n * workers encounter errors). Using a TransactionPool in this manner does not make\n * sense in terms of transactional semantics, and is thus not recommended.\n *\n * For reads, however, multiple workers is useful for performing parallel reads\n * at the same snapshot. See {@link synchronizedSnapshots} for an example.\n * Resolves or rejects when all workers are done or failed.\n */\n async done() {\n const numWorkers = this.#workers.length;\n await Promise.all(this.#workers);\n\n if (numWorkers < this.#workers.length) {\n // If workers were added after the initial set, they must be awaited to ensure\n // that the results (i.e. rejections) of all workers are accounted for. This only\n // needs to be re-done once, because the fact that the first `await` completed\n // guarantees that the pool is in a terminal state and no new workers can be added.\n await Promise.all(this.#workers);\n }\n this.#lc.debug?.('transaction pool done');\n }\n\n #addWorker(db: PostgresDB) {\n const id = this.#workers.length + 1;\n const lc = this.#lc.withContext('tx', id);\n\n const tt: TimeoutTask =\n this.#workers.length < this.#initialWorkers\n ? this.#timeoutTask.forInitialWorkers\n : this.#timeoutTask.forExtraWorkers;\n const {timeoutMs} = tt;\n const timeoutTask = tt.task === 'done' ? 'done' : this.#stmtRunner(tt.task);\n\n const worker = async (tx: PostgresTransaction) => {\n const start = performance.now();\n try {\n lc.debug?.('started transaction');\n\n let last: Promise<void> = promiseVoid;\n\n const executeTask = async (runner: TaskRunner) => {\n runner !== this.#init && this.#numWorking++;\n const {pending} = await runner.run(tx, lc, () => {\n runner !== this.#init && this.#numWorking--;\n });\n last = pending ?? last;\n };\n\n let task: TaskRunner | Error | 'done' =\n this.#init ?? (await this.#tasks.dequeue(timeoutTask, timeoutMs));\n\n try {\n while (task !== 'done') {\n if (\n task instanceof Error ||\n (task !== this.#init && this.#failure)\n ) {\n throw this.#failure ?? task;\n }\n await executeTask(task);\n\n // await the next task.\n task = await this.#tasks.dequeue(timeoutTask, timeoutMs);\n }\n } finally {\n // Execute the cleanup task even on failure.\n if (this.#cleanup) {\n await executeTask(this.#cleanup);\n }\n }\n\n const elapsed = performance.now() - start;\n lc.debug?.(`closing transaction (${elapsed.toFixed(3)} ms)`);\n // Given the semantics of a Postgres transaction, the last statement\n // will only succeed if all of the preceding statements succeeded.\n return last;\n } catch (e) {\n if (e !== this.#failure) {\n this.fail(e); // A failure in any worker should fail the pool.\n }\n throw e;\n }\n };\n\n const workerTx = runTx(db, worker, {mode: this.#mode})\n .catch(e => {\n if (e instanceof RollbackSignal) {\n // A RollbackSignal is used to gracefully rollback the postgres.js\n // transaction block. It should not be thrown up to the application.\n lc.debug?.('aborted transaction');\n } else {\n throw e;\n }\n })\n .finally(() => this.#numWorkers--);\n\n // Attach a rejection handler immediately to prevent unhandledRejections.\n // The application will handle errors when it awaits processReadTask()\n // or done().\n workerTx.catch(() => {});\n\n this.#workers.push(workerTx);\n\n // After adding the worker, enqueue a terminal signal if we are in either of the\n // terminal states (both of which prevent more tasks from being enqueued), to ensure\n // that the added worker eventually exits.\n if (this.#done) {\n this.#tasks.enqueue('done');\n }\n if (this.#failure) {\n this.#tasks.enqueue(this.#failure);\n }\n }\n\n /**\n * Processes the statements produced by the specified {@link Task},\n * returning a Promise that resolves when the statements are either processed\n * by the database or rejected.\n *\n * Note that statement failures will result in failing the entire\n * TransactionPool (per transaction semantics). However, the returned Promise\n * itself will resolve rather than reject. As such, it is fine to ignore\n * returned Promises in order to pipeline requests to the database. It is\n * recommended to occasionally await them (e.g. after some threshold) in\n * order to avoid memory blowup in the case of database slowness.\n */\n process(task: Task): Promise<void> {\n const r = resolver<void>();\n this.#process(this.#stmtRunner(task, r));\n return r.promise;\n }\n\n readonly #start = performance.now();\n #stmts = 0;\n\n /**\n * Implements the semantics specified in {@link process()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the statements are produced,\n * allowing them to be pipelined to the database.\n * * Statement errors result in failing the transaction pool.\n * * The client-supplied Resolver resolves on success or failure;\n * it is never rejected.\n */\n #stmtRunner(task: Task, r: {resolve: () => void} = resolver()): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let stmts: Statement[];\n try {\n stmts = await task(tx, lc);\n } catch (e) {\n r.resolve();\n throw e;\n } finally {\n freeWorker();\n }\n\n if (stmts.length === 0) {\n r.resolve();\n return {pending: null};\n }\n\n // Execute the statements (i.e. send to the db) immediately.\n // The last result is returned for the worker to await before\n // closing the transaction.\n const last = stmts.reduce(\n (_, stmt) =>\n stmt\n .execute()\n .then(() => {\n if (++this.#stmts % 1000 === 0) {\n const log = this.#stmts % 10000 === 0 ? 'info' : 'debug';\n const q = stmt as unknown as Query;\n lc[log]?.(\n `executed ${this.#stmts}th statement (${(performance.now() - this.#start).toFixed(3)} ms)`,\n {statement: q.string},\n );\n }\n })\n .catch(e => this.fail(e)),\n promiseVoid,\n );\n return {pending: last.then(r.resolve)};\n },\n rejected: r.resolve,\n };\n }\n\n /**\n * Processes and returns the result of executing the {@link ReadTask} from\n * within the transaction. An error thrown by the task will result in\n * rejecting the returned Promise, but will not affect the transaction pool\n * itself.\n */\n processReadTask<T>(readTask: ReadTask<T>): Promise<T> {\n const r = resolver<T>();\n this.#process(this.#readRunner(readTask, r));\n return r.promise;\n }\n\n /**\n * Implements the semantics specified in {@link processReadTask()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the result is produced,\n * before resolving the client-supplied Resolver.\n * * Errors result in rejecting the client-supplied Resolver but\n * do not affect transaction pool.\n */\n #readRunner<T>(readTask: ReadTask<T>, r: Resolver<T>): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let result: T;\n try {\n result = await readTask(tx, lc);\n freeWorker();\n r.resolve(result);\n } catch (e) {\n freeWorker();\n r.reject(e);\n }\n return {pending: null};\n },\n rejected: r.reject,\n };\n }\n\n #process(runner: TaskRunner): void {\n assert(!this.#done, 'already set done');\n if (this.#failure) {\n runner.rejected(this.#failure);\n return;\n }\n\n this.#tasks.enqueue(runner);\n\n // Check if the pool size can and should be increased.\n if (this.#numWorkers < this.#maxWorkers) {\n const outstanding = this.#tasks.size();\n\n if (outstanding > this.#numWorkers - this.#numWorking) {\n this.#db && this.#addWorker(this.#db);\n this.#numWorkers++;\n this.#lc.debug?.(`Increased pool size to ${this.#numWorkers}`);\n }\n }\n }\n\n /**\n * Ends all workers with a ROLLBACK. Throws if the pool is already done\n * or aborted.\n */\n abort() {\n this.fail(new RollbackSignal());\n }\n\n /**\n * Signals to all workers to end their transaction once all pending tasks have\n * been completed. Throws if the pool is already done or aborted.\n */\n setDone() {\n assert(!this.#done, 'already set done');\n this.#done = true;\n\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#tasks.enqueue('done');\n }\n }\n\n /**\n * An alternative to explicitly calling {@link setDone}, `ref()` increments an internal reference\n * count, and {@link unref} decrements it. When the reference count reaches 0, {@link setDone} is\n * automatically called. A TransactionPool is initialized with a reference count of 1.\n *\n * `ref()` should be called before sharing the pool with another component, and only after the\n * pool has been started with {@link run()}. It must not be called on a TransactionPool that is\n * already done (either via {@link unref()} or {@link setDone()}. (Doing so indicates a logical\n * error in the code.)\n *\n * It follows that:\n * * The creator of the TransactionPool is responsible for running it.\n * * The TransactionPool should be ref'ed before being sharing.\n * * The receiver of the TransactionPool is only responsible for unref'ing it.\n *\n * On the other hand, a transaction pool that fails with a runtime error can still be ref'ed;\n * attempts to use the pool will result in the runtime error as expected.\n */\n // TODO: Get rid of the ref-counting stuff. It's no longer needed.\n ref(count = 1) {\n assert(\n this.#db !== undefined && !this.#done,\n `Cannot ref() a TransactionPool that is not running`,\n );\n this.#refCount += count;\n }\n\n /**\n * Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.\n */\n unref(count = 1) {\n assert(\n count <= this.#refCount,\n () => `Cannot unref ${count} when refCount is ${this.#refCount}`,\n );\n\n this.#refCount -= count;\n if (this.#refCount === 0) {\n this.setDone();\n }\n }\n\n isRunning(): boolean {\n return this.#db !== undefined && !this.#done && this.#failure === undefined;\n }\n\n /**\n * Signals all workers to fail their transactions with the given {@link err}.\n */\n fail(err: unknown) {\n if (!this.#failure) {\n this.#failure = ensureError(err); // Fail fast: this is checked in the worker loop.\n // Logged for informational purposes. It is the responsibility of\n // higher level logic to classify and handle the exception.\n const level =\n this.#failure instanceof ControlFlowError ? 'debug' : 'info';\n this.#lc[level]?.(this.#failure);\n\n for (let i = 0; i < this.#numWorkers; i++) {\n // Enqueue the Error to terminate any workers waiting for tasks.\n this.#tasks.enqueue(this.#failure);\n }\n }\n }\n}\n\ntype SynchronizeSnapshotTasks = {\n /**\n * The `init` Task for the TransactionPool from which the snapshot originates.\n * The pool must have Mode.SERIALIZABLE, and will be set to READ ONLY by the\n * `exportSnapshot` init task. If the TransactionPool has multiple workers, the\n * first worker will export a snapshot that the others set.\n */\n exportSnapshot: Task;\n\n /**\n * The `cleanup` Task for the TransactionPool from which the snapshot\n * originates. This Task will wait for the follower pool to `setSnapshot`\n * to ensure that the snapshot is successfully shared before the originating\n * transaction is closed.\n */\n cleanupExport: Task;\n\n /**\n * The `init` Task for the TransactionPool in which workers will\n * consequently see the same snapshot as that of the first pool. The pool\n * must have Mode.SERIALIZABLE, and will have the ability to perform writes.\n */\n setSnapshot: Task;\n\n /** The ID of the shared snapshot. */\n snapshotID: Promise<string>;\n};\n\n/**\n * Init Tasks for Postgres snapshot synchronization across transactions.\n *\n * https://www.postgresql.org/docs/9.3/functions-admin.html#:~:text=Snapshot%20Synchronization%20Functions,identical%20content%20in%20the%20database.\n */\nexport function synchronizedSnapshots(): SynchronizeSnapshotTasks {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n const {\n promise: snapshotCaptured,\n resolve: captureSnapshot,\n reject: failCapture,\n } = resolver<unknown>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot. TODO: Plumb the workerNum and use that instead.\n let firstWorkerRun = false;\n\n // Note: Neither init task should `await`, as processing in each pool can proceed\n // as soon as the statements have been sent to the db. However, the `cleanupExport`\n // task must `await` the result of `setSnapshot` to ensure that exporting transaction\n // does not close before the snapshot has been captured.\n return {\n exportSnapshot: tx => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt =\n tx`SELECT pg_export_snapshot() AS snapshot; SET TRANSACTION READ ONLY;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n tx`SET TRANSACTION READ ONLY`.simple(),\n ]);\n },\n\n setSnapshot: tx =>\n snapshotExported.then(snapshotID => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n // Intercept the promise to propagate the information to `cleanupExport`.\n stmt.then(captureSnapshot, failCapture);\n return [stmt];\n }),\n\n cleanupExport: async () => {\n await snapshotCaptured;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * Returns `init` and `cleanup` {@link Task}s for a TransactionPool that ensure its workers\n * share a single view of the database. This is used for View Notifier and View Syncer logic\n * that allows multiple entities to perform parallel reads on the same snapshot of the database.\n */\nexport function sharedSnapshot(): {\n init: Task;\n cleanup: Task;\n snapshotID: Promise<string>;\n} {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot.\n let firstWorkerRun = false;\n\n // The LogContext of the exporting worker, used to identify its cleanup call.\n // Each worker receives a unique lc instance (via withContext('tx', id)), so\n // reference equality reliably identifies the exporting worker.\n let exporterLc: LogContext | undefined;\n\n // Set when the exporting worker's cleanup runs, signalling that the snapshot\n // is no longer needed and any subsequently spawned workers should skip their\n // initTask.\n let firstWorkerDone = false;\n\n return {\n init: (tx, lc) => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n exporterLc = lc; // Remember which worker is the exporter.\n const stmt = tx`SELECT pg_export_snapshot() AS snapshot;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n if (!firstWorkerDone) {\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n ]);\n }\n lc.debug?.('All work is done. No need to set snapshot');\n return [];\n },\n\n cleanup: (_tx, lc) => {\n // Only the exporting worker's cleanup should disable snapshot-setting.\n // Non-exporter workers may finish early; letting them flip this flag\n // would cause subsequently spawned workers to skip SET TRANSACTION SNAPSHOT\n // and read a newer database view, violating snapshot isolation.\n if (lc === exporterLc) {\n firstWorkerDone = true;\n }\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * @returns An `init` Task for importing a snapshot from another transaction.\n */\nexport function importSnapshot(snapshotID: string): {\n init: Task;\n imported: Promise<void>;\n} {\n const {promise: imported, resolve, reject} = resolver<void>();\n\n return {\n init: tx => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n stmt.then(() => resolve(), reject);\n return [stmt];\n },\n\n imported,\n };\n}\n\n/**\n * A superclass of Errors used for control flow that is needed to handle\n * another Error but does not constitute an error condition itself (e.g.\n * aborting transactions after a previous one fails). Subclassing this Error\n * will result in lowering the log level from `error` to `debug`.\n */\nexport class ControlFlowError extends Error {\n constructor(cause?: unknown) {\n super();\n this.cause = cause;\n }\n}\n\n/**\n * Internal error used to rollback the worker transaction. This is used\n * instead of executing a `ROLLBACK` statement because the postgres.js\n * library will otherwise try to execute an extraneous `COMMIT`, which\n * results in outputting a \"no transaction in progress\" warning to the\n * database logs.\n *\n * Throwing an exception, on the other hand, executes the postgres.js\n * codepath that calls `ROLLBACK` instead.\n */\nclass RollbackSignal extends ControlFlowError {\n readonly name = 'RollbackSignal';\n readonly message = 'rolling back transaction';\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n\ninterface TaskRunner {\n /**\n * Manages the running of a Task or ReadTask in two phases:\n *\n * - If the task involves blocking, this is done in the worker. Once the\n * blocking is done, `freeWorker()` is invoked to signal that the worker\n * is available to run another task. Note that this should be invoked\n * *before* resolving the result to the calling thread so that a\n * subsequent task can reuse the same worker.\n *\n * - Task statements are executed on the database asynchronously. The final\n * result of this processing is encapsulated in the returned `pending`\n * Promise. The worker will await the last pending Promise before closing\n * the transaction.\n *\n * @param freeWorker should be called as soon as all blocking operations are\n * completed in order to return the transaction to the pool.\n * @returns A `pending` Promise indicating when the statements have been\n * processed by the database, allowing the transaction to be closed.\n * This should be `null` if there are no transaction-dependent\n * statements to await.\n */\n run(\n tx: PostgresTransaction,\n lc: LogContext,\n freeWorker: () => void,\n ): Promise<{pending: Promise<void> | null}>;\n\n /**\n * Invoked if the TransactionPool is already in a failed state when the task\n * is requested.\n */\n rejected(reason: unknown): void;\n}\n\nconst IDLE_TIMEOUT_MS = 5_000;\n\n// The keepalive interval is settable by ZERO_TRANSACTION_POOL_KEEPALIVE_MS\n// as an emergency measure and is explicitly not made available as a server\n// option. This value is function of how the zero-cache uses transactions, and\n// should never need to be \"tuned\" or adjusted for different environments.\n//\n// Note that it must be shorter than IDLE_IN_TRANSACTION_SESSION_TIMEOUT_MS\n// with sufficient buffering to account for when the process is blocked by\n// synchronous calls (e.g. to the replica).\nconst KEEPALIVE_TIMEOUT_MS = parseInt(\n process.env.ZERO_TRANSACTION_POOL_KEEPALIVE_MS ?? '5000',\n);\n\nconst KEEPALIVE_TASK: Task = (tx, lc) => {\n lc.debug?.(`sending tx keepalive`);\n return [tx`SELECT 1`.simple()];\n};\n\ntype TimeoutTask = {\n timeoutMs: number;\n task: Task | 'done';\n};\n\ntype TimeoutTasks = {\n forInitialWorkers: TimeoutTask;\n forExtraWorkers: TimeoutTask;\n};\n\n// Production timeout tasks. Overridden in tests.\nexport const TIMEOUT_TASKS: TimeoutTasks = {\n forInitialWorkers: {\n timeoutMs: KEEPALIVE_TIMEOUT_MS,\n task: KEEPALIVE_TASK,\n },\n forExtraWorkers: {\n timeoutMs: IDLE_TIMEOUT_MS,\n task: 'done',\n },\n};\n\n// The slice of information from the Query object in Postgres.js that gets logged for debugging.\n// https://github.com/porsager/postgres/blob/f58cd4f3affd3e8ce8f53e42799672d86cd2c70b/src/connection.js#L219\ntype Query = {string: string; parameters: object[]};\n"],"mappings":";;;;;;;;;;;;;;;AAgDA,IAAa,kBAAb,MAA6B;CAC3B;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAoC;CAC1D,WAAwC,EAAE;CAC1C;CACA;CACA;CACA;CACA,cAAc;CACd;CAEA,YAAY;CACZ,QAAQ;CACR;;;;;;;;;;;;;;;;;CAkBA,YACE,IACA,MACA,MACA,SACA,iBAAiB,GACjB,aAAa,gBACb,eAAe,eACf;AACA,SAAO,iBAAiB,GAAG,kCAAkC;AAC7D,SACE,cAAc,gBACd,uCACD;AAED,QAAA,KAAW;AACX,QAAA,OAAa;AACb,QAAA,OAAa,OAAO,MAAA,WAAiB,KAAK,GAAG,KAAA;AAC7C,QAAA,UAAgB,UAAU,MAAA,WAAiB,QAAQ,GAAG,KAAA;AACtD,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,aAAmB;AACnB,QAAA,cAAoB;;;;;;CAOtB,IAAI,IAAsB;AACxB,SAAO,CAAC,MAAA,IAAU,kBAAkB;AACpC,QAAA,KAAW;AACX,OAAK,IAAI,IAAI,GAAG,IAAI,MAAA,YAAkB,IACpC,OAAA,UAAgB,GAAG;AAErB,SAAO;;;;;;;;;CAUT,kBAAkB,KAAa,OAAe;AAC5C,QAAA,KAAW,MAAA,GAAS,YAAY,KAAK,MAAM;AAE3C,SAAO,EACL,oBAAoB,KAAa,UAC/B,KAAK,kBAAkB,KAAK,MAAM,EACrC;;;;;;;;;;;;;;;;;;;;;;CAuBH,MAAM,OAAO;EACX,MAAM,aAAa,MAAA,QAAc;AACjC,QAAM,QAAQ,IAAI,MAAA,QAAc;AAEhC,MAAI,aAAa,MAAA,QAAc,OAK7B,OAAM,QAAQ,IAAI,MAAA,QAAc;AAElC,QAAA,GAAS,QAAQ,wBAAwB;;CAG3C,WAAW,IAAgB;EACzB,MAAM,KAAK,MAAA,QAAc,SAAS;EAClC,MAAM,KAAK,MAAA,GAAS,YAAY,MAAM,GAAG;EAEzC,MAAM,KACJ,MAAA,QAAc,SAAS,MAAA,iBACnB,MAAA,YAAkB,oBAClB,MAAA,YAAkB;EACxB,MAAM,EAAC,cAAa;EACpB,MAAM,cAAc,GAAG,SAAS,SAAS,SAAS,MAAA,WAAiB,GAAG,KAAK;EAE3E,MAAM,SAAS,OAAO,OAA4B;GAChD,MAAM,QAAQ,YAAY,KAAK;AAC/B,OAAI;AACF,OAAG,QAAQ,sBAAsB;IAEjC,IAAI,OAAsB;IAE1B,MAAM,cAAc,OAAO,WAAuB;AAChD,gBAAW,MAAA,QAAc,MAAA;KACzB,MAAM,EAAC,YAAW,MAAM,OAAO,IAAI,IAAI,UAAU;AAC/C,iBAAW,MAAA,QAAc,MAAA;OACzB;AACF,YAAO,WAAW;;IAGpB,IAAI,OACF,MAAA,QAAe,MAAM,MAAA,MAAY,QAAQ,aAAa,UAAU;AAElE,QAAI;AACF,YAAO,SAAS,QAAQ;AACtB,UACE,gBAAgB,SACf,SAAS,MAAA,QAAc,MAAA,QAExB,OAAM,MAAA,WAAiB;AAEzB,YAAM,YAAY,KAAK;AAGvB,aAAO,MAAM,MAAA,MAAY,QAAQ,aAAa,UAAU;;cAElD;AAER,SAAI,MAAA,QACF,OAAM,YAAY,MAAA,QAAc;;IAIpC,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,OAAG,QAAQ,wBAAwB,QAAQ,QAAQ,EAAE,CAAC,MAAM;AAG5D,WAAO;YACA,GAAG;AACV,QAAI,MAAM,MAAA,QACR,MAAK,KAAK,EAAE;AAEd,UAAM;;;EAIV,MAAM,WAAW,MAAM,IAAI,QAAQ,EAAC,MAAM,MAAA,MAAW,CAAC,CACnD,OAAM,MAAK;AACV,OAAI,aAAa,eAGf,IAAG,QAAQ,sBAAsB;OAEjC,OAAM;IAER,CACD,cAAc,MAAA,aAAmB;AAKpC,WAAS,YAAY,GAAG;AAExB,QAAA,QAAc,KAAK,SAAS;AAK5B,MAAI,MAAA,KACF,OAAA,MAAY,QAAQ,OAAO;AAE7B,MAAI,MAAA,QACF,OAAA,MAAY,QAAQ,MAAA,QAAc;;;;;;;;;;;;;;CAgBtC,QAAQ,MAA2B;EACjC,MAAM,IAAI,UAAgB;AAC1B,QAAA,QAAc,MAAA,WAAiB,MAAM,EAAE,CAAC;AACxC,SAAO,EAAE;;CAGX,SAAkB,YAAY,KAAK;CACnC,SAAS;;;;;;;;;;;CAYT,YAAY,MAAY,IAA2B,UAAU,EAAc;AACzE,SAAO;GACL,KAAK,OAAO,IAAI,IAAI,eAAe;IACjC,IAAI;AACJ,QAAI;AACF,aAAQ,MAAM,KAAK,IAAI,GAAG;aACnB,GAAG;AACV,OAAE,SAAS;AACX,WAAM;cACE;AACR,iBAAY;;AAGd,QAAI,MAAM,WAAW,GAAG;AACtB,OAAE,SAAS;AACX,YAAO,EAAC,SAAS,MAAK;;AAuBxB,WAAO,EAAC,SAjBK,MAAM,QAChB,GAAG,SACF,KACG,SAAS,CACT,WAAW;AACV,SAAI,EAAE,MAAA,QAAc,QAAS,GAAG;MAC9B,MAAM,MAAM,MAAA,QAAc,QAAU,IAAI,SAAS;MACjD,MAAM,IAAI;AACV,SAAG,OACD,YAAY,MAAA,MAAY,iBAAiB,YAAY,KAAK,GAAG,MAAA,OAAa,QAAQ,EAAE,CAAC,OACrF,EAAC,WAAW,EAAE,QAAO,CACtB;;MAEH,CACD,OAAM,MAAK,KAAK,KAAK,EAAE,CAAC,EAC7B,YACD,CACqB,KAAK,EAAE,QAAQ,EAAC;;GAExC,UAAU,EAAE;GACb;;;;;;;;CASH,gBAAmB,UAAmC;EACpD,MAAM,IAAI,UAAa;AACvB,QAAA,QAAc,MAAA,WAAiB,UAAU,EAAE,CAAC;AAC5C,SAAO,EAAE;;;;;;;;;;;CAYX,YAAe,UAAuB,GAA4B;AAChE,SAAO;GACL,KAAK,OAAO,IAAI,IAAI,eAAe;IACjC,IAAI;AACJ,QAAI;AACF,cAAS,MAAM,SAAS,IAAI,GAAG;AAC/B,iBAAY;AACZ,OAAE,QAAQ,OAAO;aACV,GAAG;AACV,iBAAY;AACZ,OAAE,OAAO,EAAE;;AAEb,WAAO,EAAC,SAAS,MAAK;;GAExB,UAAU,EAAE;GACb;;CAGH,SAAS,QAA0B;AACjC,SAAO,CAAC,MAAA,MAAY,mBAAmB;AACvC,MAAI,MAAA,SAAe;AACjB,UAAO,SAAS,MAAA,QAAc;AAC9B;;AAGF,QAAA,MAAY,QAAQ,OAAO;AAG3B,MAAI,MAAA,aAAmB,MAAA;OACD,MAAA,MAAY,MAAM,GAEpB,MAAA,aAAmB,MAAA,YAAkB;AACrD,UAAA,MAAY,MAAA,UAAgB,MAAA,GAAS;AACrC,UAAA;AACA,UAAA,GAAS,QAAQ,0BAA0B,MAAA,aAAmB;;;;;;;;CASpE,QAAQ;AACN,OAAK,KAAK,IAAI,gBAAgB,CAAC;;;;;;CAOjC,UAAU;AACR,SAAO,CAAC,MAAA,MAAY,mBAAmB;AACvC,QAAA,OAAa;AAEb,OAAK,IAAI,IAAI,GAAG,IAAI,MAAA,YAAkB,IACpC,OAAA,MAAY,QAAQ,OAAO;;;;;;;;;;;;;;;;;;;;CAuB/B,IAAI,QAAQ,GAAG;AACb,SACE,MAAA,OAAa,KAAA,KAAa,CAAC,MAAA,MAC3B,qDACD;AACD,QAAA,YAAkB;;;;;CAMpB,MAAM,QAAQ,GAAG;AACf,SACE,SAAS,MAAA,gBACH,gBAAgB,MAAM,oBAAoB,MAAA,WACjD;AAED,QAAA,YAAkB;AAClB,MAAI,MAAA,aAAmB,EACrB,MAAK,SAAS;;CAIlB,YAAqB;AACnB,SAAO,MAAA,OAAa,KAAA,KAAa,CAAC,MAAA,QAAc,MAAA,YAAkB,KAAA;;;;;CAMpE,KAAK,KAAc;AACjB,MAAI,CAAC,MAAA,SAAe;AAClB,SAAA,UAAgB,YAAY,IAAI;GAGhC,MAAM,QACJ,MAAA,mBAAyB,mBAAmB,UAAU;AACxD,SAAA,GAAS,SAAS,MAAA,QAAc;AAEhC,QAAK,IAAI,IAAI,GAAG,IAAI,MAAA,YAAkB,IAEpC,OAAA,MAAY,QAAQ,MAAA,QAAc;;;;;;;AAgK1C,SAAgB,eAAe,YAG7B;CACA,MAAM,EAAC,SAAS,UAAU,SAAS,WAAU,UAAgB;AAE7D,QAAO;EACL,OAAM,OAAM;GACV,MAAM,OAAO,GAAG,OAAO,6BAA6B,WAAW,GAAG;AAClE,QAAK,WAAW,SAAS,EAAE,OAAO;AAClC,UAAO,CAAC,KAAK;;EAGf;EACD;;;;;;;;AASH,IAAa,mBAAb,cAAsC,MAAM;CAC1C,YAAY,OAAiB;AAC3B,SAAO;AACP,OAAK,QAAQ;;;;;;;;;;;;;AAcjB,IAAM,iBAAN,cAA6B,iBAAiB;CAC5C,OAAgB;CAChB,UAAmB;;AAGrB,SAAS,YAAY,KAAqB;AACxC,KAAI,eAAe,MACjB,QAAO;CAET,MAAM,wBAAQ,IAAI,OAAO;AACzB,OAAM,QAAQ;AACd,QAAO;;AAsCT,IAAM,kBAAkB;AAUxB,IAAM,uBAAuB,SAC3B,QAAQ,IAAI,sCAAsC,OACnD;AAED,IAAM,kBAAwB,IAAI,OAAO;AACvC,IAAG,QAAQ,uBAAuB;AAClC,QAAO,CAAC,EAAE,WAAW,QAAQ,CAAC;;AAchC,IAAa,gBAA8B;CACzC,mBAAmB;EACjB,WAAW;EACX,MAAM;EACP;CACD,iBAAiB;EACf,WAAW;EACX,MAAM;EACP;CACF"}
1
+ {"version":3,"file":"transaction-pool.js","names":["#mode","#init","#cleanup","#tasks","#workers","#initialWorkers","#maxWorkers","#timeoutTask","#lc","#stmtRunner","#numWorkers","#db","#addWorker","#start","#stmts","#numWorking","#failure","#done","#process","#readRunner","#refCount"],"sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport type {Enum} from '../../../shared/src/enum.ts';\nimport {Queue} from '../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../types/pg.ts';\nimport type * as Mode from './mode-enum.ts';\nimport {runTx} from './run-transaction.ts';\n\ntype Mode = Enum<typeof Mode>;\n\ntype MaybePromise<T> = Promise<T> | T;\n\nexport type Statement =\n | postgres.PendingQuery<(postgres.Row & Iterable<postgres.Row>)[]>\n | postgres.PendingQuery<postgres.Row[]>;\n\n/**\n * A {@link Task} is logic run from within a transaction in a {@link TransactionPool}.\n * It returns a list of `Statements` that the transaction executes asynchronously and\n * awaits when it receives the 'done' signal.\n *\n */\nexport type Task = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<Statement[]>;\n\n/**\n * A {@link ReadTask} is run from within a transaction, but unlike a {@link Task},\n * the results of a ReadTask are opaque to the TransactionPool and returned to the\n * caller of {@link TransactionPool.processReadTask}.\n */\nexport type ReadTask<T> = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<T>;\n\n/**\n * A TransactionPool is a pool of one or more {@link postgres.TransactionSql}\n * objects that participate in processing a dynamic queue of tasks.\n *\n * This can be used for serializing a set of tasks that arrive asynchronously\n * to a single transaction (for writing) or performing parallel reads across\n * multiple connections at the same snapshot (e.g. read only snapshot transactions).\n */\nexport class TransactionPool {\n #lc: LogContext;\n readonly #mode: Mode;\n readonly #init: TaskRunner | undefined;\n readonly #cleanup: TaskRunner | undefined;\n readonly #tasks = new Queue<TaskRunner | Error | 'done'>();\n readonly #workers: Promise<unknown>[] = [];\n readonly #initialWorkers: number;\n readonly #maxWorkers: number;\n readonly #timeoutTask: TimeoutTasks;\n #numWorkers: number;\n #numWorking = 0;\n #db: PostgresDB | undefined; // set when running. stored to allow adaptive pool sizing.\n\n #refCount = 1;\n #done = false;\n #failure: Error | undefined;\n\n /**\n * @param init A {@link Task} that is run in each Transaction before it begins\n * processing general tasks. This can be used to to set the transaction\n * mode, export/set snapshots, etc. This will be run even if\n * {@link fail} has been called on the pool.\n * @param cleanup A {@link Task} that is run in each Transaction before it closes.\n * This will be run even if {@link fail} has been called, or if a\n * preceding Task threw an Error.\n * @param initialWorkers The initial number of transaction workers to process tasks.\n * This is the steady state number of workers that will be kept\n * alive if the TransactionPool is long lived.\n * This must be greater than 0. Defaults to 1.\n * @param maxWorkers When specified, allows the pool to grow to `maxWorkers`. This\n * must be greater than or equal to `initialWorkers`. On-demand\n * workers will be shut down after an idle timeout of 5 seconds.\n */\n constructor(\n lc: LogContext,\n mode: Mode,\n init?: Task,\n cleanup?: Task,\n initialWorkers = 1,\n maxWorkers = initialWorkers,\n timeoutTasks = TIMEOUT_TASKS, // Overridden for tests.\n ) {\n assert(initialWorkers > 0, 'initialWorkers must be positive');\n assert(\n maxWorkers >= initialWorkers,\n 'maxWorkers must be >= initialWorkers',\n );\n\n this.#lc = lc;\n this.#mode = mode;\n this.#init = init ? this.#stmtRunner(init) : undefined;\n this.#cleanup = cleanup ? this.#stmtRunner(cleanup) : undefined;\n this.#initialWorkers = initialWorkers;\n this.#numWorkers = initialWorkers;\n this.#maxWorkers = maxWorkers;\n this.#timeoutTask = timeoutTasks;\n }\n\n /**\n * Starts the pool of workers to process Tasks with transactions opened from the\n * specified {@link db}.\n */\n run(db: PostgresDB): this {\n assert(!this.#db, 'already running');\n this.#db = db;\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#addWorker(db);\n }\n return this;\n }\n\n /**\n * Adds context parameters to internal LogContext. This is useful for context values that\n * are not known when the TransactionPool is constructed (e.g. determined after a database\n * call when the pool is running).\n *\n * Returns an object that can be used to add more parameters.\n */\n addLoggingContext(key: string, value: string) {\n this.#lc = this.#lc.withContext(key, value);\n\n return {\n addLoggingContext: (key: string, value: string) =>\n this.addLoggingContext(key, value),\n };\n }\n\n /**\n * Returns a promise that:\n *\n * * resolves after {@link setDone} has been called (or the the pool as been {@link unref}ed\n * to a 0 ref count), once all added tasks have been processed and all transactions have been\n * committed or closed.\n *\n * * rejects if processing was aborted with {@link fail} or if processing any of\n * the tasks resulted in an error. All uncommitted transactions will have been\n * rolled back.\n *\n * Note that partial failures are possible if processing writes with multiple workers\n * (e.g. `setDone` is called, allowing some workers to commit, after which other\n * workers encounter errors). Using a TransactionPool in this manner does not make\n * sense in terms of transactional semantics, and is thus not recommended.\n *\n * For reads, however, multiple workers is useful for performing parallel reads\n * at the same snapshot. See {@link synchronizedSnapshots} for an example.\n * Resolves or rejects when all workers are done or failed.\n */\n async done() {\n const numWorkers = this.#workers.length;\n await Promise.all(this.#workers);\n\n if (numWorkers < this.#workers.length) {\n // If workers were added after the initial set, they must be awaited to ensure\n // that the results (i.e. rejections) of all workers are accounted for. This only\n // needs to be re-done once, because the fact that the first `await` completed\n // guarantees that the pool is in a terminal state and no new workers can be added.\n await Promise.all(this.#workers);\n }\n this.#lc.debug?.('transaction pool done');\n\n const elapsed = performance.now() - this.#start;\n if (elapsed > 60_000) {\n if (this.#stmts > 0) {\n this.#lc.warn?.(\n `finished long transaction with ${this.#stmts} statements (${elapsed.toFixed(3)} ms)`,\n );\n } else {\n this.#lc.warn?.(\n `finished long read transaction (${elapsed.toFixed(3)} ms)`,\n );\n }\n }\n }\n\n #addWorker(db: PostgresDB) {\n const id = this.#workers.length + 1;\n const lc = this.#lc.withContext('tx', id);\n\n const tt: TimeoutTask =\n this.#workers.length < this.#initialWorkers\n ? this.#timeoutTask.forInitialWorkers\n : this.#timeoutTask.forExtraWorkers;\n const {timeoutMs} = tt;\n const timeoutTask = tt.task === 'done' ? 'done' : this.#stmtRunner(tt.task);\n\n const worker = async (tx: PostgresTransaction) => {\n const start = performance.now();\n try {\n lc.debug?.('started transaction');\n\n let last: Promise<void> = promiseVoid;\n\n const executeTask = async (runner: TaskRunner) => {\n runner !== this.#init && this.#numWorking++;\n const {pending} = await runner.run(tx, lc, () => {\n runner !== this.#init && this.#numWorking--;\n });\n last = pending ?? last;\n };\n\n let task: TaskRunner | Error | 'done' =\n this.#init ?? (await this.#tasks.dequeue(timeoutTask, timeoutMs));\n\n try {\n while (task !== 'done') {\n if (\n task instanceof Error ||\n (task !== this.#init && this.#failure)\n ) {\n throw this.#failure ?? task;\n }\n await executeTask(task);\n\n // await the next task.\n task = await this.#tasks.dequeue(timeoutTask, timeoutMs);\n }\n } finally {\n // Execute the cleanup task even on failure.\n if (this.#cleanup) {\n await executeTask(this.#cleanup);\n }\n }\n\n const elapsed = performance.now() - start;\n lc.debug?.(`closing transaction (${elapsed.toFixed(3)} ms)`);\n // Given the semantics of a Postgres transaction, the last statement\n // will only succeed if all of the preceding statements succeeded.\n return last;\n } catch (e) {\n if (e !== this.#failure) {\n this.fail(e); // A failure in any worker should fail the pool.\n }\n throw e;\n }\n };\n\n const workerTx = runTx(db, worker, {mode: this.#mode})\n .catch(e => {\n if (e instanceof RollbackSignal) {\n // A RollbackSignal is used to gracefully rollback the postgres.js\n // transaction block. It should not be thrown up to the application.\n lc.debug?.('aborted transaction');\n } else {\n throw e;\n }\n })\n .finally(() => this.#numWorkers--);\n\n // Attach a rejection handler immediately to prevent unhandledRejections.\n // The application will handle errors when it awaits processReadTask()\n // or done().\n workerTx.catch(() => {});\n\n this.#workers.push(workerTx);\n\n // After adding the worker, enqueue a terminal signal if we are in either of the\n // terminal states (both of which prevent more tasks from being enqueued), to ensure\n // that the added worker eventually exits.\n if (this.#done) {\n this.#tasks.enqueue('done');\n }\n if (this.#failure) {\n this.#tasks.enqueue(this.#failure);\n }\n }\n\n /**\n * Processes the statements produced by the specified {@link Task},\n * returning a Promise that resolves when the statements are either processed\n * by the database or rejected.\n *\n * Note that statement failures will result in failing the entire\n * TransactionPool (per transaction semantics). However, the returned Promise\n * itself will resolve rather than reject. As such, it is fine to ignore\n * returned Promises in order to pipeline requests to the database. It is\n * recommended to occasionally await them (e.g. after some threshold) in\n * order to avoid memory blowup in the case of database slowness.\n */\n process(task: Task): Promise<void> {\n const r = resolver<void>();\n this.#process(this.#stmtRunner(task, r));\n return r.promise;\n }\n\n readonly #start = performance.now();\n #stmts = 0;\n\n /**\n * Implements the semantics specified in {@link process()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the statements are produced,\n * allowing them to be pipelined to the database.\n * * Statement errors result in failing the transaction pool.\n * * The client-supplied Resolver resolves on success or failure;\n * it is never rejected.\n */\n #stmtRunner(task: Task, r: {resolve: () => void} = resolver()): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let stmts: Statement[];\n try {\n stmts = await task(tx, lc);\n } catch (e) {\n r.resolve();\n throw e;\n } finally {\n freeWorker();\n }\n\n if (stmts.length === 0) {\n r.resolve();\n return {pending: null};\n }\n\n // Execute the statements (i.e. send to the db) immediately.\n // The last result is returned for the worker to await before\n // closing the transaction.\n const last = stmts.reduce(\n (_, stmt) =>\n stmt\n .execute()\n .then(() => {\n if (++this.#stmts % 1000 === 0) {\n const log = this.#stmts % 10000 === 0 ? 'info' : 'debug';\n const q = stmt as unknown as Query;\n lc[log]?.(\n `executed ${this.#stmts}th statement (${(performance.now() - this.#start).toFixed(3)} ms)`,\n {statement: q.string},\n );\n }\n })\n .catch(e => this.fail(e)),\n promiseVoid,\n );\n return {pending: last.then(r.resolve)};\n },\n rejected: r.resolve,\n };\n }\n\n /**\n * Processes and returns the result of executing the {@link ReadTask} from\n * within the transaction. An error thrown by the task will result in\n * rejecting the returned Promise, but will not affect the transaction pool\n * itself.\n */\n processReadTask<T>(readTask: ReadTask<T>): Promise<T> {\n const r = resolver<T>();\n this.#process(this.#readRunner(readTask, r));\n return r.promise;\n }\n\n /**\n * Implements the semantics specified in {@link processReadTask()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the result is produced,\n * before resolving the client-supplied Resolver.\n * * Errors result in rejecting the client-supplied Resolver but\n * do not affect transaction pool.\n */\n #readRunner<T>(readTask: ReadTask<T>, r: Resolver<T>): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let result: T;\n try {\n result = await readTask(tx, lc);\n freeWorker();\n r.resolve(result);\n } catch (e) {\n freeWorker();\n r.reject(e);\n }\n return {pending: null};\n },\n rejected: r.reject,\n };\n }\n\n #process(runner: TaskRunner): void {\n assert(!this.#done, 'already set done');\n if (this.#failure) {\n runner.rejected(this.#failure);\n return;\n }\n\n this.#tasks.enqueue(runner);\n\n // Check if the pool size can and should be increased.\n if (this.#numWorkers < this.#maxWorkers) {\n const outstanding = this.#tasks.size();\n\n if (outstanding > this.#numWorkers - this.#numWorking) {\n this.#db && this.#addWorker(this.#db);\n this.#numWorkers++;\n this.#lc.debug?.(`Increased pool size to ${this.#numWorkers}`);\n }\n }\n }\n\n /**\n * Ends all workers with a ROLLBACK. Throws if the pool is already done\n * or aborted.\n */\n abort() {\n this.fail(new RollbackSignal());\n }\n\n /**\n * Signals to all workers to end their transaction once all pending tasks have\n * been completed. Throws if the pool is already done or aborted.\n */\n setDone() {\n assert(!this.#done, 'already set done');\n this.#done = true;\n\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#tasks.enqueue('done');\n }\n }\n\n /**\n * An alternative to explicitly calling {@link setDone}, `ref()` increments an internal reference\n * count, and {@link unref} decrements it. When the reference count reaches 0, {@link setDone} is\n * automatically called. A TransactionPool is initialized with a reference count of 1.\n *\n * `ref()` should be called before sharing the pool with another component, and only after the\n * pool has been started with {@link run()}. It must not be called on a TransactionPool that is\n * already done (either via {@link unref()} or {@link setDone()}. (Doing so indicates a logical\n * error in the code.)\n *\n * It follows that:\n * * The creator of the TransactionPool is responsible for running it.\n * * The TransactionPool should be ref'ed before being sharing.\n * * The receiver of the TransactionPool is only responsible for unref'ing it.\n *\n * On the other hand, a transaction pool that fails with a runtime error can still be ref'ed;\n * attempts to use the pool will result in the runtime error as expected.\n */\n // TODO: Get rid of the ref-counting stuff. It's no longer needed.\n ref(count = 1) {\n assert(\n this.#db !== undefined && !this.#done,\n `Cannot ref() a TransactionPool that is not running`,\n );\n this.#refCount += count;\n }\n\n /**\n * Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.\n */\n unref(count = 1) {\n assert(\n count <= this.#refCount,\n () => `Cannot unref ${count} when refCount is ${this.#refCount}`,\n );\n\n this.#refCount -= count;\n if (this.#refCount === 0) {\n this.setDone();\n }\n }\n\n isRunning(): boolean {\n return this.#db !== undefined && !this.#done && this.#failure === undefined;\n }\n\n /**\n * Signals all workers to fail their transactions with the given {@link err}.\n */\n fail(err: unknown) {\n if (!this.#failure) {\n this.#failure = ensureError(err); // Fail fast: this is checked in the worker loop.\n // Logged for informational purposes. It is the responsibility of\n // higher level logic to classify and handle the exception.\n const level =\n this.#failure instanceof ControlFlowError ? 'debug' : 'info';\n this.#lc[level]?.(this.#failure);\n\n for (let i = 0; i < this.#numWorkers; i++) {\n // Enqueue the Error to terminate any workers waiting for tasks.\n this.#tasks.enqueue(this.#failure);\n }\n }\n }\n}\n\ntype SynchronizeSnapshotTasks = {\n /**\n * The `init` Task for the TransactionPool from which the snapshot originates.\n * The pool must have Mode.SERIALIZABLE, and will be set to READ ONLY by the\n * `exportSnapshot` init task. If the TransactionPool has multiple workers, the\n * first worker will export a snapshot that the others set.\n */\n exportSnapshot: Task;\n\n /**\n * The `cleanup` Task for the TransactionPool from which the snapshot\n * originates. This Task will wait for the follower pool to `setSnapshot`\n * to ensure that the snapshot is successfully shared before the originating\n * transaction is closed.\n */\n cleanupExport: Task;\n\n /**\n * The `init` Task for the TransactionPool in which workers will\n * consequently see the same snapshot as that of the first pool. The pool\n * must have Mode.SERIALIZABLE, and will have the ability to perform writes.\n */\n setSnapshot: Task;\n\n /** The ID of the shared snapshot. */\n snapshotID: Promise<string>;\n};\n\n/**\n * Init Tasks for Postgres snapshot synchronization across transactions.\n *\n * https://www.postgresql.org/docs/9.3/functions-admin.html#:~:text=Snapshot%20Synchronization%20Functions,identical%20content%20in%20the%20database.\n */\nexport function synchronizedSnapshots(): SynchronizeSnapshotTasks {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n const {\n promise: snapshotCaptured,\n resolve: captureSnapshot,\n reject: failCapture,\n } = resolver<unknown>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot. TODO: Plumb the workerNum and use that instead.\n let firstWorkerRun = false;\n\n // Note: Neither init task should `await`, as processing in each pool can proceed\n // as soon as the statements have been sent to the db. However, the `cleanupExport`\n // task must `await` the result of `setSnapshot` to ensure that exporting transaction\n // does not close before the snapshot has been captured.\n return {\n exportSnapshot: tx => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt =\n tx`SELECT pg_export_snapshot() AS snapshot; SET TRANSACTION READ ONLY;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n tx`SET TRANSACTION READ ONLY`.simple(),\n ]);\n },\n\n setSnapshot: tx =>\n snapshotExported.then(snapshotID => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n // Intercept the promise to propagate the information to `cleanupExport`.\n stmt.then(captureSnapshot, failCapture);\n return [stmt];\n }),\n\n cleanupExport: async () => {\n await snapshotCaptured;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * Returns `init` and `cleanup` {@link Task}s for a TransactionPool that ensure its workers\n * share a single view of the database. This is used for View Notifier and View Syncer logic\n * that allows multiple entities to perform parallel reads on the same snapshot of the database.\n */\nexport function sharedSnapshot(): {\n init: Task;\n cleanup: Task;\n snapshotID: Promise<string>;\n} {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot.\n let firstWorkerRun = false;\n\n // The LogContext of the exporting worker, used to identify its cleanup call.\n // Each worker receives a unique lc instance (via withContext('tx', id)), so\n // reference equality reliably identifies the exporting worker.\n let exporterLc: LogContext | undefined;\n\n // Set when the exporting worker's cleanup runs, signalling that the snapshot\n // is no longer needed and any subsequently spawned workers should skip their\n // initTask.\n let firstWorkerDone = false;\n\n return {\n init: (tx, lc) => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n exporterLc = lc; // Remember which worker is the exporter.\n const stmt = tx`SELECT pg_export_snapshot() AS snapshot;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n if (!firstWorkerDone) {\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n ]);\n }\n lc.debug?.('All work is done. No need to set snapshot');\n return [];\n },\n\n cleanup: (_tx, lc) => {\n // Only the exporting worker's cleanup should disable snapshot-setting.\n // Non-exporter workers may finish early; letting them flip this flag\n // would cause subsequently spawned workers to skip SET TRANSACTION SNAPSHOT\n // and read a newer database view, violating snapshot isolation.\n if (lc === exporterLc) {\n firstWorkerDone = true;\n }\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * @returns An `init` Task for importing a snapshot from another transaction.\n */\nexport function importSnapshot(snapshotID: string): {\n init: Task;\n imported: Promise<void>;\n} {\n const {promise: imported, resolve, reject} = resolver<void>();\n\n return {\n init: tx => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n stmt.then(() => resolve(), reject);\n return [stmt];\n },\n\n imported,\n };\n}\n\n/**\n * A superclass of Errors used for control flow that is needed to handle\n * another Error but does not constitute an error condition itself (e.g.\n * aborting transactions after a previous one fails). Subclassing this Error\n * will result in lowering the log level from `error` to `debug`.\n */\nexport class ControlFlowError extends Error {\n constructor(cause?: unknown) {\n super();\n this.cause = cause;\n }\n}\n\n/**\n * Internal error used to rollback the worker transaction. This is used\n * instead of executing a `ROLLBACK` statement because the postgres.js\n * library will otherwise try to execute an extraneous `COMMIT`, which\n * results in outputting a \"no transaction in progress\" warning to the\n * database logs.\n *\n * Throwing an exception, on the other hand, executes the postgres.js\n * codepath that calls `ROLLBACK` instead.\n */\nclass RollbackSignal extends ControlFlowError {\n readonly name = 'RollbackSignal';\n readonly message = 'rolling back transaction';\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n\ninterface TaskRunner {\n /**\n * Manages the running of a Task or ReadTask in two phases:\n *\n * - If the task involves blocking, this is done in the worker. Once the\n * blocking is done, `freeWorker()` is invoked to signal that the worker\n * is available to run another task. Note that this should be invoked\n * *before* resolving the result to the calling thread so that a\n * subsequent task can reuse the same worker.\n *\n * - Task statements are executed on the database asynchronously. The final\n * result of this processing is encapsulated in the returned `pending`\n * Promise. The worker will await the last pending Promise before closing\n * the transaction.\n *\n * @param freeWorker should be called as soon as all blocking operations are\n * completed in order to return the transaction to the pool.\n * @returns A `pending` Promise indicating when the statements have been\n * processed by the database, allowing the transaction to be closed.\n * This should be `null` if there are no transaction-dependent\n * statements to await.\n */\n run(\n tx: PostgresTransaction,\n lc: LogContext,\n freeWorker: () => void,\n ): Promise<{pending: Promise<void> | null}>;\n\n /**\n * Invoked if the TransactionPool is already in a failed state when the task\n * is requested.\n */\n rejected(reason: unknown): void;\n}\n\nconst IDLE_TIMEOUT_MS = 5_000;\n\n// The keepalive interval is settable by ZERO_TRANSACTION_POOL_KEEPALIVE_MS\n// as an emergency measure and is explicitly not made available as a server\n// option. This value is function of how the zero-cache uses transactions, and\n// should never need to be \"tuned\" or adjusted for different environments.\n//\n// Note that it must be shorter than IDLE_IN_TRANSACTION_SESSION_TIMEOUT_MS\n// with sufficient buffering to account for when the process is blocked by\n// synchronous calls (e.g. to the replica).\nconst KEEPALIVE_TIMEOUT_MS = parseInt(\n process.env.ZERO_TRANSACTION_POOL_KEEPALIVE_MS ?? '5000',\n);\n\nconst KEEPALIVE_TASK: Task = (tx, lc) => {\n lc.debug?.(`sending tx keepalive`);\n return [tx`SELECT 1`.simple()];\n};\n\ntype TimeoutTask = {\n timeoutMs: number;\n task: Task | 'done';\n};\n\ntype TimeoutTasks = {\n forInitialWorkers: TimeoutTask;\n forExtraWorkers: TimeoutTask;\n};\n\n// Production timeout tasks. Overridden in tests.\nexport const TIMEOUT_TASKS: TimeoutTasks = {\n forInitialWorkers: {\n timeoutMs: KEEPALIVE_TIMEOUT_MS,\n task: KEEPALIVE_TASK,\n },\n forExtraWorkers: {\n timeoutMs: IDLE_TIMEOUT_MS,\n task: 'done',\n },\n};\n\n// The slice of information from the Query object in Postgres.js that gets logged for debugging.\n// https://github.com/porsager/postgres/blob/f58cd4f3affd3e8ce8f53e42799672d86cd2c70b/src/connection.js#L219\ntype Query = {string: string; parameters: object[]};\n"],"mappings":";;;;;;;;;;;;;;;AAgDA,IAAa,kBAAb,MAA6B;CAC3B;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAoC;CAC1D,WAAwC,EAAE;CAC1C;CACA;CACA;CACA;CACA,cAAc;CACd;CAEA,YAAY;CACZ,QAAQ;CACR;;;;;;;;;;;;;;;;;CAkBA,YACE,IACA,MACA,MACA,SACA,iBAAiB,GACjB,aAAa,gBACb,eAAe,eACf;AACA,SAAO,iBAAiB,GAAG,kCAAkC;AAC7D,SACE,cAAc,gBACd,uCACD;AAED,QAAA,KAAW;AACX,QAAA,OAAa;AACb,QAAA,OAAa,OAAO,MAAA,WAAiB,KAAK,GAAG,KAAA;AAC7C,QAAA,UAAgB,UAAU,MAAA,WAAiB,QAAQ,GAAG,KAAA;AACtD,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,aAAmB;AACnB,QAAA,cAAoB;;;;;;CAOtB,IAAI,IAAsB;AACxB,SAAO,CAAC,MAAA,IAAU,kBAAkB;AACpC,QAAA,KAAW;AACX,OAAK,IAAI,IAAI,GAAG,IAAI,MAAA,YAAkB,IACpC,OAAA,UAAgB,GAAG;AAErB,SAAO;;;;;;;;;CAUT,kBAAkB,KAAa,OAAe;AAC5C,QAAA,KAAW,MAAA,GAAS,YAAY,KAAK,MAAM;AAE3C,SAAO,EACL,oBAAoB,KAAa,UAC/B,KAAK,kBAAkB,KAAK,MAAM,EACrC;;;;;;;;;;;;;;;;;;;;;;CAuBH,MAAM,OAAO;EACX,MAAM,aAAa,MAAA,QAAc;AACjC,QAAM,QAAQ,IAAI,MAAA,QAAc;AAEhC,MAAI,aAAa,MAAA,QAAc,OAK7B,OAAM,QAAQ,IAAI,MAAA,QAAc;AAElC,QAAA,GAAS,QAAQ,wBAAwB;EAEzC,MAAM,UAAU,YAAY,KAAK,GAAG,MAAA;AACpC,MAAI,UAAU,IACZ,KAAI,MAAA,QAAc,EAChB,OAAA,GAAS,OACP,kCAAkC,MAAA,MAAY,eAAe,QAAQ,QAAQ,EAAE,CAAC,MACjF;MAED,OAAA,GAAS,OACP,mCAAmC,QAAQ,QAAQ,EAAE,CAAC,MACvD;;CAKP,WAAW,IAAgB;EACzB,MAAM,KAAK,MAAA,QAAc,SAAS;EAClC,MAAM,KAAK,MAAA,GAAS,YAAY,MAAM,GAAG;EAEzC,MAAM,KACJ,MAAA,QAAc,SAAS,MAAA,iBACnB,MAAA,YAAkB,oBAClB,MAAA,YAAkB;EACxB,MAAM,EAAC,cAAa;EACpB,MAAM,cAAc,GAAG,SAAS,SAAS,SAAS,MAAA,WAAiB,GAAG,KAAK;EAE3E,MAAM,SAAS,OAAO,OAA4B;GAChD,MAAM,QAAQ,YAAY,KAAK;AAC/B,OAAI;AACF,OAAG,QAAQ,sBAAsB;IAEjC,IAAI,OAAsB;IAE1B,MAAM,cAAc,OAAO,WAAuB;AAChD,gBAAW,MAAA,QAAc,MAAA;KACzB,MAAM,EAAC,YAAW,MAAM,OAAO,IAAI,IAAI,UAAU;AAC/C,iBAAW,MAAA,QAAc,MAAA;OACzB;AACF,YAAO,WAAW;;IAGpB,IAAI,OACF,MAAA,QAAe,MAAM,MAAA,MAAY,QAAQ,aAAa,UAAU;AAElE,QAAI;AACF,YAAO,SAAS,QAAQ;AACtB,UACE,gBAAgB,SACf,SAAS,MAAA,QAAc,MAAA,QAExB,OAAM,MAAA,WAAiB;AAEzB,YAAM,YAAY,KAAK;AAGvB,aAAO,MAAM,MAAA,MAAY,QAAQ,aAAa,UAAU;;cAElD;AAER,SAAI,MAAA,QACF,OAAM,YAAY,MAAA,QAAc;;IAIpC,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,OAAG,QAAQ,wBAAwB,QAAQ,QAAQ,EAAE,CAAC,MAAM;AAG5D,WAAO;YACA,GAAG;AACV,QAAI,MAAM,MAAA,QACR,MAAK,KAAK,EAAE;AAEd,UAAM;;;EAIV,MAAM,WAAW,MAAM,IAAI,QAAQ,EAAC,MAAM,MAAA,MAAW,CAAC,CACnD,OAAM,MAAK;AACV,OAAI,aAAa,eAGf,IAAG,QAAQ,sBAAsB;OAEjC,OAAM;IAER,CACD,cAAc,MAAA,aAAmB;AAKpC,WAAS,YAAY,GAAG;AAExB,QAAA,QAAc,KAAK,SAAS;AAK5B,MAAI,MAAA,KACF,OAAA,MAAY,QAAQ,OAAO;AAE7B,MAAI,MAAA,QACF,OAAA,MAAY,QAAQ,MAAA,QAAc;;;;;;;;;;;;;;CAgBtC,QAAQ,MAA2B;EACjC,MAAM,IAAI,UAAgB;AAC1B,QAAA,QAAc,MAAA,WAAiB,MAAM,EAAE,CAAC;AACxC,SAAO,EAAE;;CAGX,SAAkB,YAAY,KAAK;CACnC,SAAS;;;;;;;;;;;CAYT,YAAY,MAAY,IAA2B,UAAU,EAAc;AACzE,SAAO;GACL,KAAK,OAAO,IAAI,IAAI,eAAe;IACjC,IAAI;AACJ,QAAI;AACF,aAAQ,MAAM,KAAK,IAAI,GAAG;aACnB,GAAG;AACV,OAAE,SAAS;AACX,WAAM;cACE;AACR,iBAAY;;AAGd,QAAI,MAAM,WAAW,GAAG;AACtB,OAAE,SAAS;AACX,YAAO,EAAC,SAAS,MAAK;;AAuBxB,WAAO,EAAC,SAjBK,MAAM,QAChB,GAAG,SACF,KACG,SAAS,CACT,WAAW;AACV,SAAI,EAAE,MAAA,QAAc,QAAS,GAAG;MAC9B,MAAM,MAAM,MAAA,QAAc,QAAU,IAAI,SAAS;MACjD,MAAM,IAAI;AACV,SAAG,OACD,YAAY,MAAA,MAAY,iBAAiB,YAAY,KAAK,GAAG,MAAA,OAAa,QAAQ,EAAE,CAAC,OACrF,EAAC,WAAW,EAAE,QAAO,CACtB;;MAEH,CACD,OAAM,MAAK,KAAK,KAAK,EAAE,CAAC,EAC7B,YACD,CACqB,KAAK,EAAE,QAAQ,EAAC;;GAExC,UAAU,EAAE;GACb;;;;;;;;CASH,gBAAmB,UAAmC;EACpD,MAAM,IAAI,UAAa;AACvB,QAAA,QAAc,MAAA,WAAiB,UAAU,EAAE,CAAC;AAC5C,SAAO,EAAE;;;;;;;;;;;CAYX,YAAe,UAAuB,GAA4B;AAChE,SAAO;GACL,KAAK,OAAO,IAAI,IAAI,eAAe;IACjC,IAAI;AACJ,QAAI;AACF,cAAS,MAAM,SAAS,IAAI,GAAG;AAC/B,iBAAY;AACZ,OAAE,QAAQ,OAAO;aACV,GAAG;AACV,iBAAY;AACZ,OAAE,OAAO,EAAE;;AAEb,WAAO,EAAC,SAAS,MAAK;;GAExB,UAAU,EAAE;GACb;;CAGH,SAAS,QAA0B;AACjC,SAAO,CAAC,MAAA,MAAY,mBAAmB;AACvC,MAAI,MAAA,SAAe;AACjB,UAAO,SAAS,MAAA,QAAc;AAC9B;;AAGF,QAAA,MAAY,QAAQ,OAAO;AAG3B,MAAI,MAAA,aAAmB,MAAA;OACD,MAAA,MAAY,MAAM,GAEpB,MAAA,aAAmB,MAAA,YAAkB;AACrD,UAAA,MAAY,MAAA,UAAgB,MAAA,GAAS;AACrC,UAAA;AACA,UAAA,GAAS,QAAQ,0BAA0B,MAAA,aAAmB;;;;;;;;CASpE,QAAQ;AACN,OAAK,KAAK,IAAI,gBAAgB,CAAC;;;;;;CAOjC,UAAU;AACR,SAAO,CAAC,MAAA,MAAY,mBAAmB;AACvC,QAAA,OAAa;AAEb,OAAK,IAAI,IAAI,GAAG,IAAI,MAAA,YAAkB,IACpC,OAAA,MAAY,QAAQ,OAAO;;;;;;;;;;;;;;;;;;;;CAuB/B,IAAI,QAAQ,GAAG;AACb,SACE,MAAA,OAAa,KAAA,KAAa,CAAC,MAAA,MAC3B,qDACD;AACD,QAAA,YAAkB;;;;;CAMpB,MAAM,QAAQ,GAAG;AACf,SACE,SAAS,MAAA,gBACH,gBAAgB,MAAM,oBAAoB,MAAA,WACjD;AAED,QAAA,YAAkB;AAClB,MAAI,MAAA,aAAmB,EACrB,MAAK,SAAS;;CAIlB,YAAqB;AACnB,SAAO,MAAA,OAAa,KAAA,KAAa,CAAC,MAAA,QAAc,MAAA,YAAkB,KAAA;;;;;CAMpE,KAAK,KAAc;AACjB,MAAI,CAAC,MAAA,SAAe;AAClB,SAAA,UAAgB,YAAY,IAAI;GAGhC,MAAM,QACJ,MAAA,mBAAyB,mBAAmB,UAAU;AACxD,SAAA,GAAS,SAAS,MAAA,QAAc;AAEhC,QAAK,IAAI,IAAI,GAAG,IAAI,MAAA,YAAkB,IAEpC,OAAA,MAAY,QAAQ,MAAA,QAAc;;;;;;;AAgK1C,SAAgB,eAAe,YAG7B;CACA,MAAM,EAAC,SAAS,UAAU,SAAS,WAAU,UAAgB;AAE7D,QAAO;EACL,OAAM,OAAM;GACV,MAAM,OAAO,GAAG,OAAO,6BAA6B,WAAW,GAAG;AAClE,QAAK,WAAW,SAAS,EAAE,OAAO;AAClC,UAAO,CAAC,KAAK;;EAGf;EACD;;;;;;;;AASH,IAAa,mBAAb,cAAsC,MAAM;CAC1C,YAAY,OAAiB;AAC3B,SAAO;AACP,OAAK,QAAQ;;;;;;;;;;;;;AAcjB,IAAM,iBAAN,cAA6B,iBAAiB;CAC5C,OAAgB;CAChB,UAAmB;;AAGrB,SAAS,YAAY,KAAqB;AACxC,KAAI,eAAe,MACjB,QAAO;CAET,MAAM,wBAAQ,IAAI,OAAO;AACzB,OAAM,QAAQ;AACd,QAAO;;AAsCT,IAAM,kBAAkB;AAUxB,IAAM,uBAAuB,SAC3B,QAAQ,IAAI,sCAAsC,OACnD;AAED,IAAM,kBAAwB,IAAI,OAAO;AACvC,IAAG,QAAQ,uBAAuB;AAClC,QAAO,CAAC,EAAE,WAAW,QAAQ,CAAC;;AAchC,IAAa,gBAA8B;CACzC,mBAAmB;EACjB,WAAW;EACX,MAAM;EACP;CACD,iBAAiB;EACf,WAAW;EACX,MAAM;EACP;CACF"}
@@ -1 +1 @@
1
- {"version":3,"file":"warmup.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/warmup.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,gBAAgB,CAAC;AAE/C,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,MAAM,iBAqBb"}
1
+ {"version":3,"file":"warmup.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/warmup.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,gBAAgB,CAAC;AAI/C,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,MAAM,iBAsBb"}
@@ -1,6 +1,8 @@
1
1
  //#region ../zero-cache/src/db/warmup.ts
2
+ var MAX_WARMUP_CONNECTIONS = 5;
2
3
  async function warmupConnections(lc, db, name) {
3
- const { max, host } = db.options;
4
+ const { host } = db.options;
5
+ const max = Math.min(db.options.max, MAX_WARMUP_CONNECTIONS);
4
6
  await Promise.allSettled(Array.from({ length: max }, () => db`SELECT 1`.simple().execute()));
5
7
  const start = performance.now();
6
8
  const pingTimes = await Promise.all(Array.from({ length: Math.min(max, 5) }, () => db`SELECT 2`.simple().then(() => performance.now() - start, () => performance.now() - start)));