@rocicorp/zero 1.2.0-canary.10 → 1.2.0-canary.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. package/out/analyze-query/src/bin-analyze.js +25 -25
  2. package/out/analyze-query/src/bin-analyze.js.map +1 -1
  3. package/out/ast-to-zql/src/ast-to-zql.d.ts.map +1 -1
  4. package/out/ast-to-zql/src/ast-to-zql.js +2 -1
  5. package/out/ast-to-zql/src/ast-to-zql.js.map +1 -1
  6. package/out/replicache/src/btree/node.d.ts.map +1 -1
  7. package/out/replicache/src/btree/node.js +2 -2
  8. package/out/replicache/src/btree/node.js.map +1 -1
  9. package/out/replicache/src/connection-loop.js +3 -3
  10. package/out/replicache/src/connection-loop.js.map +1 -1
  11. package/out/replicache/src/deleted-clients.d.ts +0 -4
  12. package/out/replicache/src/deleted-clients.d.ts.map +1 -1
  13. package/out/replicache/src/deleted-clients.js +1 -1
  14. package/out/replicache/src/deleted-clients.js.map +1 -1
  15. package/out/replicache/src/hash.d.ts.map +1 -1
  16. package/out/replicache/src/hash.js.map +1 -1
  17. package/out/replicache/src/process-scheduler.d.ts.map +1 -1
  18. package/out/replicache/src/process-scheduler.js.map +1 -1
  19. package/out/replicache/src/request-idle.js +1 -1
  20. package/out/replicache/src/request-idle.js.map +1 -1
  21. package/out/replicache/src/sync/patch.d.ts +1 -1
  22. package/out/replicache/src/sync/patch.d.ts.map +1 -1
  23. package/out/replicache/src/sync/patch.js +1 -1
  24. package/out/replicache/src/sync/patch.js.map +1 -1
  25. package/out/shared/src/arrays.d.ts.map +1 -1
  26. package/out/shared/src/arrays.js +1 -2
  27. package/out/shared/src/arrays.js.map +1 -1
  28. package/out/shared/src/bigint-json.js +1 -1
  29. package/out/shared/src/bigint-json.js.map +1 -1
  30. package/out/shared/src/btree-set.js +1 -1
  31. package/out/shared/src/btree-set.js.map +1 -1
  32. package/out/shared/src/iterables.d.ts +7 -0
  33. package/out/shared/src/iterables.d.ts.map +1 -1
  34. package/out/shared/src/iterables.js +10 -1
  35. package/out/shared/src/iterables.js.map +1 -1
  36. package/out/shared/src/logging.d.ts.map +1 -1
  37. package/out/shared/src/logging.js +10 -9
  38. package/out/shared/src/logging.js.map +1 -1
  39. package/out/shared/src/options.js +1 -1
  40. package/out/shared/src/options.js.map +1 -1
  41. package/out/shared/src/tdigest-schema.d.ts.map +1 -1
  42. package/out/shared/src/tdigest-schema.js.map +1 -1
  43. package/out/shared/src/tdigest.d.ts.map +1 -1
  44. package/out/shared/src/tdigest.js +7 -7
  45. package/out/shared/src/tdigest.js.map +1 -1
  46. package/out/shared/src/valita.d.ts.map +1 -1
  47. package/out/shared/src/valita.js +1 -1
  48. package/out/shared/src/valita.js.map +1 -1
  49. package/out/z2s/src/sql.d.ts.map +1 -1
  50. package/out/z2s/src/sql.js +2 -1
  51. package/out/z2s/src/sql.js.map +1 -1
  52. package/out/zero/package.js +5 -6
  53. package/out/zero/package.js.map +1 -1
  54. package/out/zero/src/pg.js +1 -1
  55. package/out/zero/src/server.js +1 -1
  56. package/out/zero-cache/src/auth/load-permissions.js +1 -1
  57. package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
  58. package/out/zero-cache/src/config/zero-config.d.ts +8 -0
  59. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  60. package/out/zero-cache/src/config/zero-config.js +16 -0
  61. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  62. package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
  63. package/out/zero-cache/src/custom/fetch.js +2 -0
  64. package/out/zero-cache/src/custom/fetch.js.map +1 -1
  65. package/out/zero-cache/src/custom-queries/transform-query.js +1 -1
  66. package/out/zero-cache/src/custom-queries/transform-query.js.map +1 -1
  67. package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
  68. package/out/zero-cache/src/db/migration-lite.js +1 -1
  69. package/out/zero-cache/src/db/migration-lite.js.map +1 -1
  70. package/out/zero-cache/src/db/migration.d.ts.map +1 -1
  71. package/out/zero-cache/src/db/migration.js +1 -1
  72. package/out/zero-cache/src/db/migration.js.map +1 -1
  73. package/out/zero-cache/src/db/pg-copy-binary.d.ts +101 -0
  74. package/out/zero-cache/src/db/pg-copy-binary.d.ts.map +1 -0
  75. package/out/zero-cache/src/db/pg-copy-binary.js +381 -0
  76. package/out/zero-cache/src/db/pg-copy-binary.js.map +1 -0
  77. package/out/zero-cache/src/db/warmup.d.ts.map +1 -1
  78. package/out/zero-cache/src/db/warmup.js +3 -1
  79. package/out/zero-cache/src/db/warmup.js.map +1 -1
  80. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  81. package/out/zero-cache/src/server/anonymous-otel-start.js +2 -1
  82. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  83. package/out/zero-cache/src/server/main.js +1 -1
  84. package/out/zero-cache/src/server/main.js.map +1 -1
  85. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  86. package/out/zero-cache/src/server/syncer.js +8 -10
  87. package/out/zero-cache/src/server/syncer.js.map +1 -1
  88. package/out/zero-cache/src/server/worker-urls.d.ts.map +1 -1
  89. package/out/zero-cache/src/server/worker-urls.js +2 -1
  90. package/out/zero-cache/src/server/worker-urls.js.map +1 -1
  91. package/out/zero-cache/src/services/change-source/change-source.d.ts +4 -0
  92. package/out/zero-cache/src/services/change-source/change-source.d.ts.map +1 -1
  93. package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
  94. package/out/zero-cache/src/services/change-source/custom/change-source.js +7 -4
  95. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  96. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  97. package/out/zero-cache/src/services/change-source/pg/change-source.js +9 -1
  98. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  99. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +1 -0
  100. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  101. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +85 -5
  102. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  103. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -1
  104. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  105. package/out/zero-cache/src/services/change-streamer/broadcast.js +1 -1
  106. package/out/zero-cache/src/services/change-streamer/broadcast.js.map +1 -1
  107. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +3 -3
  108. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
  109. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +1 -0
  110. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  111. package/out/zero-cache/src/services/replicator/change-processor.js +1 -1
  112. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  113. package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
  114. package/out/zero-cache/src/services/view-syncer/client-schema.d.ts.map +1 -1
  115. package/out/zero-cache/src/services/view-syncer/client-schema.js +4 -3
  116. package/out/zero-cache/src/services/view-syncer/client-schema.js.map +1 -1
  117. package/out/zero-cache/src/services/view-syncer/cvr-store.js +2 -2
  118. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  119. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  120. package/out/zero-cache/src/services/view-syncer/cvr.js +4 -3
  121. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  122. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +3 -1
  123. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  124. package/out/zero-cache/src/services/view-syncer/snapshotter.js +1 -1
  125. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  126. package/out/zero-cache/src/services/view-syncer/view-syncer.js +2 -2
  127. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  128. package/out/zero-cache/src/types/lite.d.ts.map +1 -1
  129. package/out/zero-cache/src/types/lite.js +3 -2
  130. package/out/zero-cache/src/types/lite.js.map +1 -1
  131. package/out/zero-cache/src/types/pg-types.js +4 -1
  132. package/out/zero-cache/src/types/pg-types.js.map +1 -1
  133. package/out/zero-cache/src/types/pg.d.ts.map +1 -1
  134. package/out/zero-cache/src/types/pg.js +6 -1
  135. package/out/zero-cache/src/types/pg.js.map +1 -1
  136. package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
  137. package/out/zero-cache/src/types/subscription.js +2 -2
  138. package/out/zero-cache/src/types/subscription.js.map +1 -1
  139. package/out/zero-cache/src/workers/connection.js +2 -2
  140. package/out/zero-cache/src/workers/connection.js.map +1 -1
  141. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts +1 -1
  142. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  143. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +18 -2
  144. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  145. package/out/zero-cache/src/workers/syncer.d.ts +1 -1
  146. package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
  147. package/out/zero-cache/src/workers/syncer.js +5 -5
  148. package/out/zero-cache/src/workers/syncer.js.map +1 -1
  149. package/out/zero-client/src/client/http-string.d.ts.map +1 -1
  150. package/out/zero-client/src/client/http-string.js.map +1 -1
  151. package/out/zero-client/src/client/metrics.d.ts.map +1 -1
  152. package/out/zero-client/src/client/metrics.js +2 -1
  153. package/out/zero-client/src/client/metrics.js.map +1 -1
  154. package/out/zero-client/src/client/server-option.js +1 -1
  155. package/out/zero-client/src/client/server-option.js.map +1 -1
  156. package/out/zero-client/src/client/version.js +1 -1
  157. package/out/zero-client/src/client/zero-poke-handler.d.ts.map +1 -1
  158. package/out/zero-client/src/client/zero-poke-handler.js +1 -1
  159. package/out/zero-client/src/client/zero-poke-handler.js.map +1 -1
  160. package/out/zero-pg/src/mod.js +1 -1
  161. package/out/zero-protocol/src/ast.d.ts.map +1 -1
  162. package/out/zero-protocol/src/ast.js.map +1 -1
  163. package/out/zero-protocol/src/primary-key.d.ts.map +1 -1
  164. package/out/zero-protocol/src/primary-key.js.map +1 -1
  165. package/out/zero-protocol/src/push.d.ts.map +1 -1
  166. package/out/zero-protocol/src/push.js.map +1 -1
  167. package/out/zero-schema/src/name-mapper.js +1 -1
  168. package/out/zero-schema/src/name-mapper.js.map +1 -1
  169. package/out/zero-server/src/mod.js +1 -1
  170. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  171. package/out/zero-server/src/process-mutations.js +2 -1
  172. package/out/zero-server/src/process-mutations.js.map +1 -1
  173. package/out/zero-server/src/push-processor.d.ts +1 -0
  174. package/out/zero-server/src/push-processor.d.ts.map +1 -1
  175. package/out/zero-server/src/push-processor.js +3 -2
  176. package/out/zero-server/src/push-processor.js.map +1 -1
  177. package/out/zql/src/builder/like.js +2 -1
  178. package/out/zql/src/builder/like.js.map +1 -1
  179. package/out/zql/src/ivm/data.d.ts.map +1 -1
  180. package/out/zql/src/ivm/data.js +6 -15
  181. package/out/zql/src/ivm/data.js.map +1 -1
  182. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  183. package/out/zql/src/ivm/memory-source.js +2 -4
  184. package/out/zql/src/ivm/memory-source.js.map +1 -1
  185. package/out/zql/src/query/complete-ordering.js +1 -1
  186. package/out/zql/src/query/complete-ordering.js.map +1 -1
  187. package/out/zql/src/query/query-impl.d.ts.map +1 -1
  188. package/out/zql/src/query/query-impl.js +2 -2
  189. package/out/zql/src/query/query-impl.js.map +1 -1
  190. package/out/zql/src/query/query-registry.d.ts.map +1 -1
  191. package/out/zql/src/query/query-registry.js +2 -1
  192. package/out/zql/src/query/query-registry.js.map +1 -1
  193. package/out/zql/src/query/ttl.js +1 -1
  194. package/out/zql/src/query/ttl.js.map +1 -1
  195. package/out/zqlite/src/sqlite-cost-model.d.ts +1 -1
  196. package/out/zqlite/src/sqlite-cost-model.d.ts.map +1 -1
  197. package/out/zqlite/src/sqlite-cost-model.js +1 -1
  198. package/out/zqlite/src/sqlite-cost-model.js.map +1 -1
  199. package/out/zqlite/src/sqlite-stat-fanout.js +1 -1
  200. package/out/zqlite/src/sqlite-stat-fanout.js.map +1 -1
  201. package/out/zqlite/src/table-source.d.ts.map +1 -1
  202. package/out/zqlite/src/table-source.js +1 -1
  203. package/out/zqlite/src/table-source.js.map +1 -1
  204. package/package.json +5 -6
@@ -1 +1 @@
1
- {"version":3,"file":"migration.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/migration.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAGrC,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,gBAAgB,CAAC;AAGzE,KAAK,UAAU,GAAG,CAAC,GAAG,EAAE,UAAU,EAAE,EAAE,EAAE,mBAAmB,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;AAE9E;;;;GAIG;AACH,MAAM,MAAM,SAAS,GAAG;IACtB;;;;OAIG;IACH,aAAa,CAAC,EAAE,UAAU,CAAC;IAE3B;;;;;;;OAOG;IACH,WAAW,CAAC,EAAE,UAAU,CAAC;IAEzB;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB,CAAC;AAEF;;;;;;;;;;GAUG;AACH,MAAM,MAAM,uBAAuB,GAAG;IACpC,CAAC,kBAAkB,EAAE,MAAM,GAAG,SAAS,CAAC;CACzC,CAAC;AAEF;;;GAGG;AACH,wBAAsB,mBAAmB,CACvC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,UAAU,EAAE,MAAM,EAClB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,SAAS,EACzB,uBAAuB,EAAE,uBAAuB,GAC/C,OAAO,CAAC,IAAI,CAAC,CA6Ef;AAaD,eAAO,MAAM,cAAc;IACzB;;;;;;OAMG;;IAGH;;;;;;OAMG;;IAGH;;;;OAIG;;aAEH,CAAC;AAGH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,cAAc,CAAC,CAAC;AAG5D,wBAAsB,yBAAyB,CAC7C,GAAG,EAAE,QAAQ,CAAC,GAAG,EACjB,UAAU,EAAE,MAAM,iBAcnB;AASD,wBAAsB,iBAAiB,CACrC,GAAG,EAAE,QAAQ,CAAC,GAAG,EACjB,UAAU,EAAE,MAAM,EAClB,MAAM,UAAQ,GACb,OAAO,CAAC,cAAc,GAAG,IAAI,CAAC,CAuBhC"}
1
+ {"version":3,"file":"migration.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/migration.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAGrC,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,gBAAgB,CAAC;AAGzE,KAAK,UAAU,GAAG,CAAC,GAAG,EAAE,UAAU,EAAE,EAAE,EAAE,mBAAmB,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;AAE9E;;;;GAIG;AACH,MAAM,MAAM,SAAS,GAAG;IACtB;;;;OAIG;IACH,aAAa,CAAC,EAAE,UAAU,CAAC;IAE3B;;;;;;;OAOG;IACH,WAAW,CAAC,EAAE,UAAU,CAAC;IAEzB;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB,CAAC;AAEF;;;;;;;;;;GAUG;AACH,MAAM,MAAM,uBAAuB,GAAG;IACpC,CAAC,kBAAkB,EAAE,MAAM,GAAG,SAAS,CAAC;CACzC,CAAC;AAEF;;;GAGG;AACH,wBAAsB,mBAAmB,CACvC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,UAAU,EAAE,MAAM,EAClB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,SAAS,EACzB,uBAAuB,EAAE,uBAAuB,GAC/C,OAAO,CAAC,IAAI,CAAC,CA8Ef;AAaD,eAAO,MAAM,cAAc;IACzB;;;;;;OAMG;;IAGH;;;;;;OAMG;;IAGH;;;;OAIG;;aAEH,CAAC;AAGH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,cAAc,CAAC,CAAC;AAG5D,wBAAsB,yBAAyB,CAC7C,GAAG,EAAE,QAAQ,CAAC,GAAG,EACjB,UAAU,EAAE,MAAM,iBAcnB;AASD,wBAAsB,iBAAiB,CACrC,GAAG,EAAE,QAAQ,CAAC,GAAG,EACjB,UAAU,EAAE,MAAM,EAClB,MAAM,UAAQ,GACb,OAAO,CAAC,cAAc,GAAG,IAAI,CAAC,CAuBhC"}
@@ -13,7 +13,7 @@ async function runSchemaMigrations(log, debugName, schemaName, db, setupMigratio
13
13
  const versionMigrations = sorted(incrementalMigrationMap);
14
14
  assert(versionMigrations.length, `Must specify at least one version migration`);
15
15
  assert(versionMigrations[0][0] > 0, `Versions must be non-zero positive numbers`);
16
- const codeVersion = versionMigrations[versionMigrations.length - 1][0];
16
+ const codeVersion = versionMigrations.at(-1)[0];
17
17
  log.info?.(`Checking schema for compatibility with ${debugName} at schema v${codeVersion}`);
18
18
  try {
19
19
  await runTx(db, async (tx) => {
@@ -1 +1 @@
1
- {"version":3,"file":"migration.js","names":[],"sources":["../../../../../zero-cache/src/db/migration.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../types/pg.ts';\nimport {runTx} from './run-transaction.ts';\n\ntype Operations = (log: LogContext, tx: PostgresTransaction) => Promise<void>;\n\n/**\n * Encapsulates the logic for setting up or upgrading to a new schema. After the\n * Migration code successfully completes, {@link runSchemaMigrations}\n * will update the schema version and commit the transaction.\n */\nexport type Migration = {\n /**\n * Perform database operations that create or alter table structure. This is\n * called at most once during lifetime of the application. If a `migrateData()`\n * operation is defined, that will be performed after `migrateSchema()` succeeds.\n */\n migrateSchema?: Operations;\n\n /**\n * Perform database operations to migrate data to the new schema. This is\n * called after `migrateSchema()` (if defined), and may be called again\n * to re-migrate data after the server was rolled back to an earlier version,\n * and rolled forward again.\n *\n * Consequently, the logic in `migrateData()` must be idempotent.\n */\n migrateData?: Operations;\n\n /**\n * Sets the `minSafeVersion` to the specified value, prohibiting running\n * any earlier code versions.\n */\n minSafeVersion?: number;\n};\n\n/**\n * Mapping of incremental migrations to move from the previous old code\n * version to next one. Versions must be non-zero.\n *\n * The schema resulting from performing incremental migrations should be\n * equivalent to that of the `setupMigration` on a blank database.\n *\n * The highest destinationVersion of this map denotes the current\n * \"code version\", and is also used as the destination version when\n * running the initial setup migration on a blank database.\n */\nexport type IncrementalMigrationMap = {\n [destinationVersion: number]: Migration;\n};\n\n/**\n * Ensures that the schema is compatible with the current code, updating and\n * migrating the schema if necessary.\n */\nexport async function runSchemaMigrations(\n log: LogContext,\n debugName: string,\n schemaName: string,\n db: PostgresDB,\n setupMigration: Migration,\n incrementalMigrationMap: IncrementalMigrationMap,\n): Promise<void> {\n log = log.withContext('initSchema', schemaName);\n\n const versionMigrations = sorted(incrementalMigrationMap);\n assert(\n versionMigrations.length,\n `Must specify at least one version migration`,\n );\n assert(\n versionMigrations[0][0] > 0,\n `Versions must be non-zero positive numbers`,\n );\n const codeVersion = versionMigrations[versionMigrations.length - 1][0];\n\n log.info?.(\n `Checking schema for compatibility with ${debugName} at schema v${codeVersion}`,\n );\n\n try {\n await runTx(db, async tx => {\n // Acquire advisory lock to prevent concurrent migrations from racing.\n // This can happen during rolling deployments when multiple pods start\n // up simultaneously. The lock auto-releases when the transaction ends.\n const lockName = `migrate-schema:${schemaName}`;\n await tx`SELECT pg_advisory_xact_lock(hashtext(${lockName}))`;\n\n let versions = await ensureVersionHistory(tx, schemaName);\n\n if (codeVersion < versions.minSafeVersion) {\n throw new Error(\n `Cannot run ${debugName} at schema v${codeVersion} because rollback limit is v${versions.minSafeVersion}`,\n );\n }\n\n if (versions.dataVersion > codeVersion) {\n log.info?.(\n `Data is at v${versions.dataVersion}. Resetting to v${codeVersion}`,\n );\n await updateVersionHistory(log, tx, schemaName, versions, codeVersion);\n return;\n }\n\n if (versions.dataVersion === codeVersion) {\n return;\n }\n\n const migrations =\n versions.dataVersion === 0\n ? // For an empty database (v0), only run the setup migration.\n ([[codeVersion, setupMigration]] as const)\n : versionMigrations;\n\n for (const [dest, migration] of migrations) {\n if (versions.dataVersion < dest) {\n log.info?.(\n `Migrating schema from v${versions.dataVersion} to v${dest}`,\n );\n void log.flush();\n versions = await runMigration(\n log,\n schemaName,\n tx,\n versions,\n dest,\n migration,\n );\n }\n }\n });\n\n log.info?.(`Running ${debugName} at schema v${codeVersion}`);\n } catch (e) {\n log.error?.('Error in ensureSchemaMigrated', e);\n throw e;\n } finally {\n void log.flush();\n }\n}\n\nfunction sorted(\n incrementalMigrationMap: IncrementalMigrationMap,\n): [number, Migration][] {\n const versionMigrations: [number, Migration][] = [];\n for (const [v, m] of Object.entries(incrementalMigrationMap)) {\n versionMigrations.push([Number(v), m]);\n }\n return versionMigrations.sort(([a], [b]) => a - b);\n}\n\n// Exposed for tests.\nexport const versionHistory = v.object({\n /**\n * The `schemaVersion` is highest code version that has ever been run\n * on the database, and is used to delineate the structure of the tables\n * in the database. A schemaVersion only moves forward; rolling back to\n * an earlier (safe) code version does not revert schema changes that\n * have already been applied.\n */\n schemaVersion: v.number(),\n\n /**\n * The data version is the code version of the latest server that ran.\n * Note that this may be less than the schemaVersion in the case that\n * a server is rolled back to an earlier version after a schema change.\n * In such a case, data (but not schema), may need to be re-migrated\n * when rolling forward again.\n */\n dataVersion: v.number(),\n\n /**\n * The minimum code version that is safe to run. This is used when\n * a schema migration is not backwards compatible with an older version\n * of the code.\n */\n minSafeVersion: v.number(),\n});\n\n// Exposed for tests.\nexport type VersionHistory = v.Infer<typeof versionHistory>;\n\n// Exposed for tests.\nexport async function createVersionHistoryTable(\n sql: postgres.Sql,\n schemaName: string,\n) {\n // Note: The `lock` column transparently ensures that at most one row exists.\n await sql`\n CREATE SCHEMA IF NOT EXISTS ${sql(schemaName)};\n CREATE TABLE IF NOT EXISTS ${sql(schemaName)}.\"versionHistory\" (\n \"dataVersion\" int NOT NULL,\n \"schemaVersion\" int NOT NULL,\n \"minSafeVersion\" int NOT NULL,\n\n lock char(1) NOT NULL CONSTRAINT DF_schema_meta_lock DEFAULT 'v',\n CONSTRAINT PK_schema_meta_lock PRIMARY KEY (lock),\n CONSTRAINT CK_schema_meta_lock CHECK (lock='v')\n );`.simple();\n}\n\nasync function ensureVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n): Promise<VersionHistory> {\n return must(await getVersionHistory(sql, schemaName, true));\n}\n\nexport async function getVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n create = false,\n): Promise<VersionHistory | null> {\n const exists = await sql`\n SELECT nspname, relname FROM pg_class\n JOIN pg_namespace ON relnamespace = pg_namespace.oid\n WHERE nspname = ${schemaName} AND relname = ${'versionHistory'}`;\n\n if (exists.length === 0) {\n if (create) {\n await createVersionHistoryTable(sql, schemaName);\n } else {\n return null;\n }\n }\n const rows = await sql`\n SELECT \"dataVersion\", \"schemaVersion\", \"minSafeVersion\"\n FROM ${sql(schemaName)}.\"versionHistory\"`;\n\n if (rows.length === 0) {\n return create\n ? {schemaVersion: 0, dataVersion: 0, minSafeVersion: 0}\n : null;\n }\n return v.parse(rows[0], versionHistory);\n}\n\nasync function updateVersionHistory(\n log: LogContext,\n sql: postgres.Sql,\n schemaName: string,\n prev: VersionHistory,\n newVersion: number,\n minSafeVersion?: number,\n): Promise<VersionHistory> {\n assert(newVersion > 0, 'newVersion must be positive');\n const versions = {\n dataVersion: newVersion,\n // The schemaVersion never moves backwards.\n schemaVersion: Math.max(newVersion, prev.schemaVersion),\n minSafeVersion: getMinSafeVersion(log, prev, minSafeVersion),\n } satisfies VersionHistory;\n\n await sql`\n INSERT INTO ${sql(schemaName)}.\"versionHistory\" ${sql(versions)}\n ON CONFLICT (lock) DO UPDATE SET ${sql(versions)}\n `;\n return versions;\n}\n\nasync function runMigration(\n log: LogContext,\n schemaName: string,\n tx: PostgresTransaction,\n versions: VersionHistory,\n destinationVersion: number,\n migration: Migration,\n): Promise<VersionHistory> {\n if (versions.schemaVersion < destinationVersion) {\n await migration.migrateSchema?.(log, tx);\n }\n if (versions.dataVersion < destinationVersion) {\n await migration.migrateData?.(log, tx);\n }\n return updateVersionHistory(\n log,\n tx,\n schemaName,\n versions,\n destinationVersion,\n migration.minSafeVersion,\n );\n}\n\n/**\n * Bumps the rollback limit [[toAtLeast]] the specified version.\n * Leaves the rollback limit unchanged if it is equal or greater.\n */\nfunction getMinSafeVersion(\n log: LogContext,\n current: VersionHistory,\n proposedSafeVersion?: number,\n): number {\n if (proposedSafeVersion === undefined) {\n return current.minSafeVersion;\n }\n if (current.minSafeVersion >= proposedSafeVersion) {\n // The rollback limit must never move backwards.\n log.debug?.(\n `rollback limit is already at ${current.minSafeVersion}, ` +\n `don't need to bump to ${proposedSafeVersion}`,\n );\n return current.minSafeVersion;\n }\n log.info?.(\n `bumping rollback limit from ${current.minSafeVersion} to ${proposedSafeVersion}`,\n );\n return proposedSafeVersion;\n}\n"],"mappings":";;;;;;;;;;AA2DA,eAAsB,oBACpB,KACA,WACA,YACA,IACA,gBACA,yBACe;AACf,OAAM,IAAI,YAAY,cAAc,WAAW;CAE/C,MAAM,oBAAoB,OAAO,wBAAwB;AACzD,QACE,kBAAkB,QAClB,8CACD;AACD,QACE,kBAAkB,GAAG,KAAK,GAC1B,6CACD;CACD,MAAM,cAAc,kBAAkB,kBAAkB,SAAS,GAAG;AAEpE,KAAI,OACF,0CAA0C,UAAU,cAAc,cACnE;AAED,KAAI;AACF,QAAM,MAAM,IAAI,OAAM,OAAM;AAK1B,SAAM,EAAE,yCADS,kBAAkB,aACuB;GAE1D,IAAI,WAAW,MAAM,qBAAqB,IAAI,WAAW;AAEzD,OAAI,cAAc,SAAS,eACzB,OAAM,IAAI,MACR,cAAc,UAAU,cAAc,YAAY,8BAA8B,SAAS,iBAC1F;AAGH,OAAI,SAAS,cAAc,aAAa;AACtC,QAAI,OACF,eAAe,SAAS,YAAY,kBAAkB,cACvD;AACD,UAAM,qBAAqB,KAAK,IAAI,YAAY,UAAU,YAAY;AACtE;;AAGF,OAAI,SAAS,gBAAgB,YAC3B;GAGF,MAAM,aACJ,SAAS,gBAAgB,IAEpB,CAAC,CAAC,aAAa,eAAe,CAAC,GAChC;AAEN,QAAK,MAAM,CAAC,MAAM,cAAc,WAC9B,KAAI,SAAS,cAAc,MAAM;AAC/B,QAAI,OACF,0BAA0B,SAAS,YAAY,OAAO,OACvD;AACI,QAAI,OAAO;AAChB,eAAW,MAAM,aACf,KACA,YACA,IACA,UACA,MACA,UACD;;IAGL;AAEF,MAAI,OAAO,WAAW,UAAU,cAAc,cAAc;UACrD,GAAG;AACV,MAAI,QAAQ,iCAAiC,EAAE;AAC/C,QAAM;WACE;AACH,MAAI,OAAO;;;AAIpB,SAAS,OACP,yBACuB;CACvB,MAAM,oBAA2C,EAAE;AACnD,MAAK,MAAM,CAAC,GAAG,MAAM,OAAO,QAAQ,wBAAwB,CAC1D,mBAAkB,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,CAAC;AAExC,QAAO,kBAAkB,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE;;AAIpD,IAAa,iBAAiB,eAAE,OAAO;CAQrC,eAAe,eAAE,QAAQ;CASzB,aAAa,eAAE,QAAQ;CAOvB,gBAAgB,eAAE,QAAQ;CAC3B,CAAC;AAMF,eAAsB,0BACpB,KACA,YACA;AAEA,OAAM,GAAG;kCACuB,IAAI,WAAW,CAAC;iCACjB,IAAI,WAAW,CAAC;;;;;;;;QAQzC,QAAQ;;AAGhB,eAAe,qBACb,KACA,YACyB;AACzB,QAAO,KAAK,MAAM,kBAAkB,KAAK,YAAY,KAAK,CAAC;;AAG7D,eAAsB,kBACpB,KACA,YACA,SAAS,OACuB;AAMhC,MALe,MAAM,GAAG;;;sBAGJ,WAAW,iBAAiB,oBAErC,WAAW,EACpB,KAAI,OACF,OAAM,0BAA0B,KAAK,WAAW;KAEhD,QAAO;CAGX,MAAM,OAAO,MAAM,GAAG;;cAEV,IAAI,WAAW,CAAC;AAE5B,KAAI,KAAK,WAAW,EAClB,QAAO,SACH;EAAC,eAAe;EAAG,aAAa;EAAG,gBAAgB;EAAE,GACrD;AAEN,QAAO,MAAQ,KAAK,IAAI,eAAe;;AAGzC,eAAe,qBACb,KACA,KACA,YACA,MACA,YACA,gBACyB;AACzB,QAAO,aAAa,GAAG,8BAA8B;CACrD,MAAM,WAAW;EACf,aAAa;EAEb,eAAe,KAAK,IAAI,YAAY,KAAK,cAAc;EACvD,gBAAgB,kBAAkB,KAAK,MAAM,eAAe;EAC7D;AAED,OAAM,GAAG;kBACO,IAAI,WAAW,CAAC,oBAAoB,IAAI,SAAS,CAAC;yCAC3B,IAAI,SAAS,CAAC;;AAErD,QAAO;;AAGT,eAAe,aACb,KACA,YACA,IACA,UACA,oBACA,WACyB;AACzB,KAAI,SAAS,gBAAgB,mBAC3B,OAAM,UAAU,gBAAgB,KAAK,GAAG;AAE1C,KAAI,SAAS,cAAc,mBACzB,OAAM,UAAU,cAAc,KAAK,GAAG;AAExC,QAAO,qBACL,KACA,IACA,YACA,UACA,oBACA,UAAU,eACX;;;;;;AAOH,SAAS,kBACP,KACA,SACA,qBACQ;AACR,KAAI,wBAAwB,KAAA,EAC1B,QAAO,QAAQ;AAEjB,KAAI,QAAQ,kBAAkB,qBAAqB;AAEjD,MAAI,QACF,gCAAgC,QAAQ,eAAe,0BAC5B,sBAC5B;AACD,SAAO,QAAQ;;AAEjB,KAAI,OACF,+BAA+B,QAAQ,eAAe,MAAM,sBAC7D;AACD,QAAO"}
1
+ {"version":3,"file":"migration.js","names":[],"sources":["../../../../../zero-cache/src/db/migration.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../types/pg.ts';\nimport {runTx} from './run-transaction.ts';\n\ntype Operations = (log: LogContext, tx: PostgresTransaction) => Promise<void>;\n\n/**\n * Encapsulates the logic for setting up or upgrading to a new schema. After the\n * Migration code successfully completes, {@link runSchemaMigrations}\n * will update the schema version and commit the transaction.\n */\nexport type Migration = {\n /**\n * Perform database operations that create or alter table structure. This is\n * called at most once during lifetime of the application. If a `migrateData()`\n * operation is defined, that will be performed after `migrateSchema()` succeeds.\n */\n migrateSchema?: Operations;\n\n /**\n * Perform database operations to migrate data to the new schema. This is\n * called after `migrateSchema()` (if defined), and may be called again\n * to re-migrate data after the server was rolled back to an earlier version,\n * and rolled forward again.\n *\n * Consequently, the logic in `migrateData()` must be idempotent.\n */\n migrateData?: Operations;\n\n /**\n * Sets the `minSafeVersion` to the specified value, prohibiting running\n * any earlier code versions.\n */\n minSafeVersion?: number;\n};\n\n/**\n * Mapping of incremental migrations to move from the previous old code\n * version to next one. Versions must be non-zero.\n *\n * The schema resulting from performing incremental migrations should be\n * equivalent to that of the `setupMigration` on a blank database.\n *\n * The highest destinationVersion of this map denotes the current\n * \"code version\", and is also used as the destination version when\n * running the initial setup migration on a blank database.\n */\nexport type IncrementalMigrationMap = {\n [destinationVersion: number]: Migration;\n};\n\n/**\n * Ensures that the schema is compatible with the current code, updating and\n * migrating the schema if necessary.\n */\nexport async function runSchemaMigrations(\n log: LogContext,\n debugName: string,\n schemaName: string,\n db: PostgresDB,\n setupMigration: Migration,\n incrementalMigrationMap: IncrementalMigrationMap,\n): Promise<void> {\n log = log.withContext('initSchema', schemaName);\n\n const versionMigrations = sorted(incrementalMigrationMap);\n assert(\n versionMigrations.length,\n `Must specify at least one version migration`,\n );\n assert(\n versionMigrations[0][0] > 0,\n `Versions must be non-zero positive numbers`,\n );\n // oxlint-disable-next-line typescript/no-non-null-assertion\n const codeVersion = versionMigrations.at(-1)![0];\n\n log.info?.(\n `Checking schema for compatibility with ${debugName} at schema v${codeVersion}`,\n );\n\n try {\n await runTx(db, async tx => {\n // Acquire advisory lock to prevent concurrent migrations from racing.\n // This can happen during rolling deployments when multiple pods start\n // up simultaneously. The lock auto-releases when the transaction ends.\n const lockName = `migrate-schema:${schemaName}`;\n await tx`SELECT pg_advisory_xact_lock(hashtext(${lockName}))`;\n\n let versions = await ensureVersionHistory(tx, schemaName);\n\n if (codeVersion < versions.minSafeVersion) {\n throw new Error(\n `Cannot run ${debugName} at schema v${codeVersion} because rollback limit is v${versions.minSafeVersion}`,\n );\n }\n\n if (versions.dataVersion > codeVersion) {\n log.info?.(\n `Data is at v${versions.dataVersion}. Resetting to v${codeVersion}`,\n );\n await updateVersionHistory(log, tx, schemaName, versions, codeVersion);\n return;\n }\n\n if (versions.dataVersion === codeVersion) {\n return;\n }\n\n const migrations =\n versions.dataVersion === 0\n ? // For an empty database (v0), only run the setup migration.\n ([[codeVersion, setupMigration]] as const)\n : versionMigrations;\n\n for (const [dest, migration] of migrations) {\n if (versions.dataVersion < dest) {\n log.info?.(\n `Migrating schema from v${versions.dataVersion} to v${dest}`,\n );\n void log.flush();\n versions = await runMigration(\n log,\n schemaName,\n tx,\n versions,\n dest,\n migration,\n );\n }\n }\n });\n\n log.info?.(`Running ${debugName} at schema v${codeVersion}`);\n } catch (e) {\n log.error?.('Error in ensureSchemaMigrated', e);\n throw e;\n } finally {\n void log.flush();\n }\n}\n\nfunction sorted(\n incrementalMigrationMap: IncrementalMigrationMap,\n): [number, Migration][] {\n const versionMigrations: [number, Migration][] = [];\n for (const [v, m] of Object.entries(incrementalMigrationMap)) {\n versionMigrations.push([Number(v), m]);\n }\n return versionMigrations.sort(([a], [b]) => a - b);\n}\n\n// Exposed for tests.\nexport const versionHistory = v.object({\n /**\n * The `schemaVersion` is highest code version that has ever been run\n * on the database, and is used to delineate the structure of the tables\n * in the database. A schemaVersion only moves forward; rolling back to\n * an earlier (safe) code version does not revert schema changes that\n * have already been applied.\n */\n schemaVersion: v.number(),\n\n /**\n * The data version is the code version of the latest server that ran.\n * Note that this may be less than the schemaVersion in the case that\n * a server is rolled back to an earlier version after a schema change.\n * In such a case, data (but not schema), may need to be re-migrated\n * when rolling forward again.\n */\n dataVersion: v.number(),\n\n /**\n * The minimum code version that is safe to run. This is used when\n * a schema migration is not backwards compatible with an older version\n * of the code.\n */\n minSafeVersion: v.number(),\n});\n\n// Exposed for tests.\nexport type VersionHistory = v.Infer<typeof versionHistory>;\n\n// Exposed for tests.\nexport async function createVersionHistoryTable(\n sql: postgres.Sql,\n schemaName: string,\n) {\n // Note: The `lock` column transparently ensures that at most one row exists.\n await sql`\n CREATE SCHEMA IF NOT EXISTS ${sql(schemaName)};\n CREATE TABLE IF NOT EXISTS ${sql(schemaName)}.\"versionHistory\" (\n \"dataVersion\" int NOT NULL,\n \"schemaVersion\" int NOT NULL,\n \"minSafeVersion\" int NOT NULL,\n\n lock char(1) NOT NULL CONSTRAINT DF_schema_meta_lock DEFAULT 'v',\n CONSTRAINT PK_schema_meta_lock PRIMARY KEY (lock),\n CONSTRAINT CK_schema_meta_lock CHECK (lock='v')\n );`.simple();\n}\n\nasync function ensureVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n): Promise<VersionHistory> {\n return must(await getVersionHistory(sql, schemaName, true));\n}\n\nexport async function getVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n create = false,\n): Promise<VersionHistory | null> {\n const exists = await sql`\n SELECT nspname, relname FROM pg_class\n JOIN pg_namespace ON relnamespace = pg_namespace.oid\n WHERE nspname = ${schemaName} AND relname = ${'versionHistory'}`;\n\n if (exists.length === 0) {\n if (create) {\n await createVersionHistoryTable(sql, schemaName);\n } else {\n return null;\n }\n }\n const rows = await sql`\n SELECT \"dataVersion\", \"schemaVersion\", \"minSafeVersion\"\n FROM ${sql(schemaName)}.\"versionHistory\"`;\n\n if (rows.length === 0) {\n return create\n ? {schemaVersion: 0, dataVersion: 0, minSafeVersion: 0}\n : null;\n }\n return v.parse(rows[0], versionHistory);\n}\n\nasync function updateVersionHistory(\n log: LogContext,\n sql: postgres.Sql,\n schemaName: string,\n prev: VersionHistory,\n newVersion: number,\n minSafeVersion?: number,\n): Promise<VersionHistory> {\n assert(newVersion > 0, 'newVersion must be positive');\n const versions = {\n dataVersion: newVersion,\n // The schemaVersion never moves backwards.\n schemaVersion: Math.max(newVersion, prev.schemaVersion),\n minSafeVersion: getMinSafeVersion(log, prev, minSafeVersion),\n } satisfies VersionHistory;\n\n await sql`\n INSERT INTO ${sql(schemaName)}.\"versionHistory\" ${sql(versions)}\n ON CONFLICT (lock) DO UPDATE SET ${sql(versions)}\n `;\n return versions;\n}\n\nasync function runMigration(\n log: LogContext,\n schemaName: string,\n tx: PostgresTransaction,\n versions: VersionHistory,\n destinationVersion: number,\n migration: Migration,\n): Promise<VersionHistory> {\n if (versions.schemaVersion < destinationVersion) {\n await migration.migrateSchema?.(log, tx);\n }\n if (versions.dataVersion < destinationVersion) {\n await migration.migrateData?.(log, tx);\n }\n return updateVersionHistory(\n log,\n tx,\n schemaName,\n versions,\n destinationVersion,\n migration.minSafeVersion,\n );\n}\n\n/**\n * Bumps the rollback limit [[toAtLeast]] the specified version.\n * Leaves the rollback limit unchanged if it is equal or greater.\n */\nfunction getMinSafeVersion(\n log: LogContext,\n current: VersionHistory,\n proposedSafeVersion?: number,\n): number {\n if (proposedSafeVersion === undefined) {\n return current.minSafeVersion;\n }\n if (current.minSafeVersion >= proposedSafeVersion) {\n // The rollback limit must never move backwards.\n log.debug?.(\n `rollback limit is already at ${current.minSafeVersion}, ` +\n `don't need to bump to ${proposedSafeVersion}`,\n );\n return current.minSafeVersion;\n }\n log.info?.(\n `bumping rollback limit from ${current.minSafeVersion} to ${proposedSafeVersion}`,\n );\n return proposedSafeVersion;\n}\n"],"mappings":";;;;;;;;;;AA2DA,eAAsB,oBACpB,KACA,WACA,YACA,IACA,gBACA,yBACe;AACf,OAAM,IAAI,YAAY,cAAc,WAAW;CAE/C,MAAM,oBAAoB,OAAO,wBAAwB;AACzD,QACE,kBAAkB,QAClB,8CACD;AACD,QACE,kBAAkB,GAAG,KAAK,GAC1B,6CACD;CAED,MAAM,cAAc,kBAAkB,GAAG,GAAG,CAAE;AAE9C,KAAI,OACF,0CAA0C,UAAU,cAAc,cACnE;AAED,KAAI;AACF,QAAM,MAAM,IAAI,OAAM,OAAM;AAK1B,SAAM,EAAE,yCADS,kBAAkB,aACuB;GAE1D,IAAI,WAAW,MAAM,qBAAqB,IAAI,WAAW;AAEzD,OAAI,cAAc,SAAS,eACzB,OAAM,IAAI,MACR,cAAc,UAAU,cAAc,YAAY,8BAA8B,SAAS,iBAC1F;AAGH,OAAI,SAAS,cAAc,aAAa;AACtC,QAAI,OACF,eAAe,SAAS,YAAY,kBAAkB,cACvD;AACD,UAAM,qBAAqB,KAAK,IAAI,YAAY,UAAU,YAAY;AACtE;;AAGF,OAAI,SAAS,gBAAgB,YAC3B;GAGF,MAAM,aACJ,SAAS,gBAAgB,IAEpB,CAAC,CAAC,aAAa,eAAe,CAAC,GAChC;AAEN,QAAK,MAAM,CAAC,MAAM,cAAc,WAC9B,KAAI,SAAS,cAAc,MAAM;AAC/B,QAAI,OACF,0BAA0B,SAAS,YAAY,OAAO,OACvD;AACI,QAAI,OAAO;AAChB,eAAW,MAAM,aACf,KACA,YACA,IACA,UACA,MACA,UACD;;IAGL;AAEF,MAAI,OAAO,WAAW,UAAU,cAAc,cAAc;UACrD,GAAG;AACV,MAAI,QAAQ,iCAAiC,EAAE;AAC/C,QAAM;WACE;AACH,MAAI,OAAO;;;AAIpB,SAAS,OACP,yBACuB;CACvB,MAAM,oBAA2C,EAAE;AACnD,MAAK,MAAM,CAAC,GAAG,MAAM,OAAO,QAAQ,wBAAwB,CAC1D,mBAAkB,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,CAAC;AAExC,QAAO,kBAAkB,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE;;AAIpD,IAAa,iBAAiB,eAAE,OAAO;CAQrC,eAAe,eAAE,QAAQ;CASzB,aAAa,eAAE,QAAQ;CAOvB,gBAAgB,eAAE,QAAQ;CAC3B,CAAC;AAMF,eAAsB,0BACpB,KACA,YACA;AAEA,OAAM,GAAG;kCACuB,IAAI,WAAW,CAAC;iCACjB,IAAI,WAAW,CAAC;;;;;;;;QAQzC,QAAQ;;AAGhB,eAAe,qBACb,KACA,YACyB;AACzB,QAAO,KAAK,MAAM,kBAAkB,KAAK,YAAY,KAAK,CAAC;;AAG7D,eAAsB,kBACpB,KACA,YACA,SAAS,OACuB;AAMhC,MALe,MAAM,GAAG;;;sBAGJ,WAAW,iBAAiB,oBAErC,WAAW,EACpB,KAAI,OACF,OAAM,0BAA0B,KAAK,WAAW;KAEhD,QAAO;CAGX,MAAM,OAAO,MAAM,GAAG;;cAEV,IAAI,WAAW,CAAC;AAE5B,KAAI,KAAK,WAAW,EAClB,QAAO,SACH;EAAC,eAAe;EAAG,aAAa;EAAG,gBAAgB;EAAE,GACrD;AAEN,QAAO,MAAQ,KAAK,IAAI,eAAe;;AAGzC,eAAe,qBACb,KACA,KACA,YACA,MACA,YACA,gBACyB;AACzB,QAAO,aAAa,GAAG,8BAA8B;CACrD,MAAM,WAAW;EACf,aAAa;EAEb,eAAe,KAAK,IAAI,YAAY,KAAK,cAAc;EACvD,gBAAgB,kBAAkB,KAAK,MAAM,eAAe;EAC7D;AAED,OAAM,GAAG;kBACO,IAAI,WAAW,CAAC,oBAAoB,IAAI,SAAS,CAAC;yCAC3B,IAAI,SAAS,CAAC;;AAErD,QAAO;;AAGT,eAAe,aACb,KACA,YACA,IACA,UACA,oBACA,WACyB;AACzB,KAAI,SAAS,gBAAgB,mBAC3B,OAAM,UAAU,gBAAgB,KAAK,GAAG;AAE1C,KAAI,SAAS,cAAc,mBACzB,OAAM,UAAU,cAAc,KAAK,GAAG;AAExC,QAAO,qBACL,KACA,IACA,YACA,UACA,oBACA,UAAU,eACX;;;;;;AAOH,SAAS,kBACP,KACA,SACA,qBACQ;AACR,KAAI,wBAAwB,KAAA,EAC1B,QAAO,QAAQ;AAEjB,KAAI,QAAQ,kBAAkB,qBAAqB;AAEjD,MAAI,QACF,gCAAgC,QAAQ,eAAe,0BAC5B,sBAC5B;AACD,SAAO,QAAQ;;AAEjB,KAAI,OACF,+BAA+B,QAAQ,eAAe,MAAM,sBAC7D;AACD,QAAO"}
@@ -0,0 +1,101 @@
1
+ import type { LiteValueType } from '../types/lite.ts';
2
+ import type { ColumnSpec } from './specs.ts';
3
+ /**
4
+ * Streaming parser for PostgreSQL `COPY ... TO STDOUT WITH (FORMAT binary)`.
5
+ *
6
+ * Analogous to {@link import('./pg-copy.ts').TsvParser} but for binary format.
7
+ * Yields `Buffer | null` per field (null = SQL NULL).
8
+ *
9
+ * The caller tracks column position the same way as with TsvParser.
10
+ */
11
+ export declare class BinaryCopyParser {
12
+ #private;
13
+ parse(chunk: Buffer): Iterable<Buffer | null>;
14
+ }
15
+ export type BinaryDecoder = (buf: Buffer) => LiteValueType;
16
+ type BinaryColumnSpec = Pick<ColumnSpec, 'dataType' | 'pgTypeClass' | 'elemPgTypeClass'> & {
17
+ typeOID: number;
18
+ };
19
+ /**
20
+ * Returns true if the column's binary format is known and can be decoded
21
+ * natively. For columns where this returns false, the COPY SELECT should
22
+ * cast the column to `::text` so PG sends the text representation inside
23
+ * the binary frame.
24
+ */
25
+ export declare function hasBinaryDecoder(spec: BinaryColumnSpec): boolean;
26
+ /** Decoder for columns cast to `::text` in the COPY SELECT. */
27
+ export declare const textCastDecoder: BinaryDecoder;
28
+ /**
29
+ * Creates a specialized binary decoder for the given column spec.
30
+ * The returned function converts a raw COPY binary field `Buffer`
31
+ * directly to a `LiteValueType`, bypassing text parsing entirely.
32
+ *
33
+ * Only call this for columns where {@link hasBinaryDecoder} returns true.
34
+ * For other columns, cast to `::text` in the SELECT and use
35
+ * {@link textCastDecoder}.
36
+ */
37
+ export declare function makeBinaryDecoder(spec: BinaryColumnSpec): BinaryDecoder;
38
+ /**
39
+ * UUID: 16 bytes → "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
40
+ */
41
+ export declare function decodeUUID(buf: Buffer): string;
42
+ /**
43
+ * TIMESTAMP / TIMESTAMPTZ: int64 microseconds since PG epoch (2000-01-01 UTC)
44
+ * → floating-point milliseconds since Unix epoch.
45
+ *
46
+ * Matches the output of `timestampToFpMillis()` in `types/pg.ts`.
47
+ *
48
+ * Uses Number arithmetic (avoiding BigInt) for speed. The microsecond value
49
+ * fits safely in a Number for all practical dates (up to ~year 285,000).
50
+ */
51
+ export declare function decodeTimestamp(buf: Buffer): number;
52
+ /**
53
+ * DATE: int32 days since PG epoch (2000-01-01) → millis since Unix epoch at
54
+ * UTC midnight. Matches `dateToUTCMidnight()` in `types/pg.ts`.
55
+ */
56
+ export declare function decodeDate(buf: Buffer): number;
57
+ /**
58
+ * TIME: int64 microseconds since midnight → milliseconds since midnight.
59
+ * Matches `postgresTimeToMilliseconds()` in `types/pg.ts`.
60
+ *
61
+ * Max value is 86,400,000,000 (~8.6e10), well within Number.MAX_SAFE_INTEGER.
62
+ */
63
+ export declare function decodeTime(buf: Buffer): number;
64
+ /**
65
+ * TIMETZ: int64 microseconds since midnight + int32 timezone offset in seconds.
66
+ * PG stores the offset with inverted sign from ISO (POSIX convention):
67
+ * positive = west of UTC, negative = east of UTC.
68
+ * UTC = local_time + pg_offset.
69
+ * → UTC milliseconds since midnight.
70
+ *
71
+ * Max value ~1.3e11 microseconds, well within Number.MAX_SAFE_INTEGER.
72
+ */
73
+ export declare function decodeTimeTZ(buf: Buffer): number;
74
+ /**
75
+ * NUMERIC: variable-length binary format.
76
+ * Header: {ndigits: int16, weight: int16, sign: int16, dscale: int16}
77
+ * Followed by ndigits x int16 base-10000 digits.
78
+ *
79
+ * Converts to a JS `number` (matching the text path's `Number(x)` behavior).
80
+ */
81
+ export declare function decodeNumeric(buf: Buffer): number;
82
+ /**
83
+ * Array: binary format.
84
+ *
85
+ * Header:
86
+ * int32 ndim — number of dimensions (0 for empty array)
87
+ * int32 flags — 0 or 1 (has-nulls)
88
+ * int32 elem_oid — OID of element type
89
+ * Per dimension:
90
+ * int32 dim_size — number of elements in this dimension
91
+ * int32 dim_lb — lower bound (usually 1)
92
+ *
93
+ * Then for each element (in row-major order):
94
+ * int32 length — -1 for NULL, otherwise byte length
95
+ * bytes — element data
96
+ *
97
+ * Result is JSON.stringify'd for storage in SQLite (matching text path behavior).
98
+ */
99
+ export declare function decodeArray(buf: Buffer): string;
100
+ export {};
101
+ //# sourceMappingURL=pg-copy-binary.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"pg-copy-binary.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/pg-copy-binary.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,kBAAkB,CAAC;AAwBpD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,YAAY,CAAC;AA0B3C;;;;;;;GAOG;AACH,qBAAa,gBAAgB;;IAM1B,KAAK,CAAC,KAAK,EAAE,MAAM,GAAG,QAAQ,CAAC,MAAM,GAAG,IAAI,CAAC;CAuH/C;AAID,MAAM,MAAM,aAAa,GAAG,CAAC,GAAG,EAAE,MAAM,KAAK,aAAa,CAAC;AAE3D,KAAK,gBAAgB,GAAG,IAAI,CAC1B,UAAU,EACV,UAAU,GAAG,aAAa,GAAG,iBAAiB,CAC/C,GAAG;IAAC,OAAO,EAAE,MAAM,CAAA;CAAC,CAAC;AAyBtB;;;;;GAKG;AACH,wBAAgB,gBAAgB,CAAC,IAAI,EAAE,gBAAgB,GAAG,OAAO,CAQhE;AAED,+DAA+D;AAC/D,eAAO,MAAM,eAAe,EAAE,aAA2C,CAAC;AAE1E;;;;;;;;GAQG;AACH,wBAAgB,iBAAiB,CAAC,IAAI,EAAE,gBAAgB,GAAG,aAAa,CAyDvE;AAID;;GAEG;AACH,wBAAgB,UAAU,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CAa9C;AAED;;;;;;;;GAQG;AACH,wBAAgB,eAAe,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CASnD;AAED;;;GAGG;AACH,wBAAgB,UAAU,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CAK9C;AAED;;;;;GAKG;AACH,wBAAgB,UAAU,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CAK9C;AAED;;;;;;;;GAQG;AACH,wBAAgB,YAAY,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CAYhD;AASD;;;;;;GAMG;AACH,wBAAgB,aAAa,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CAiDjD;AAkCD;;;;;;;;;;;;;;;;GAgBG;AACH,wBAAgB,WAAW,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,CAiD/C"}
@@ -0,0 +1,381 @@
1
+ import { stringify } from "../../../shared/src/bigint-json.js";
2
+ import { BPCHAR, DATE, JSONB, NUMERIC, TIME, TIMESTAMP, TIMESTAMPTZ, TIMETZ, UUID, VARCHAR } from "../types/pg-types.js";
3
+ //#region ../zero-cache/src/db/pg-copy-binary.ts
4
+ var PGCOPY_SIGNATURE = Buffer.from([
5
+ 80,
6
+ 71,
7
+ 67,
8
+ 79,
9
+ 80,
10
+ 89,
11
+ 10,
12
+ 255,
13
+ 13,
14
+ 10,
15
+ 0
16
+ ]);
17
+ var HEADER_MIN_SIZE = 19;
18
+ var PG_EPOCH_UNIX_MILLIS = 9466848e5;
19
+ var PG_EPOCH_UNIX_DAYS = 10957;
20
+ var MS_PER_DAY = 864e5;
21
+ var PG_TIMESTAMP_INF_HI = 2147483647;
22
+ var PG_TIMESTAMP_INF_LO = 4294967295;
23
+ var PG_TIMESTAMP_NEG_INF_HI = -2147483648;
24
+ var PG_TIMESTAMP_NEG_INF_LO = 0;
25
+ var PG_DATE_INFINITY = 2147483647;
26
+ var PG_DATE_NEG_INFINITY = -2147483648;
27
+ /**
28
+ * Streaming parser for PostgreSQL `COPY ... TO STDOUT WITH (FORMAT binary)`.
29
+ *
30
+ * Analogous to {@link import('./pg-copy.ts').TsvParser} but for binary format.
31
+ * Yields `Buffer | null` per field (null = SQL NULL).
32
+ *
33
+ * The caller tracks column position the same way as with TsvParser.
34
+ */
35
+ var BinaryCopyParser = class {
36
+ #buffer = Buffer.alloc(0);
37
+ #offset = 0;
38
+ #headerParsed = false;
39
+ #fieldsRemaining = 0;
40
+ *parse(chunk) {
41
+ this.#append(chunk);
42
+ if (!this.#headerParsed) {
43
+ if (!this.#tryParseHeader()) return;
44
+ }
45
+ for (;;) {
46
+ if (this.#fieldsRemaining === 0) {
47
+ if (this.#remaining() < 2) break;
48
+ const fieldCount = this.#buffer.readInt16BE(this.#offset);
49
+ if (fieldCount === -1) break;
50
+ this.#offset += 2;
51
+ this.#fieldsRemaining = fieldCount;
52
+ }
53
+ while (this.#fieldsRemaining > 0) {
54
+ if (this.#remaining() < 4) {
55
+ this.#compact();
56
+ return;
57
+ }
58
+ const fieldLen = this.#buffer.readInt32BE(this.#offset);
59
+ this.#offset += 4;
60
+ if (fieldLen === -1) yield null;
61
+ else {
62
+ if (this.#remaining() < fieldLen) {
63
+ this.#offset -= 4;
64
+ this.#compact();
65
+ return;
66
+ }
67
+ yield this.#buffer.subarray(this.#offset, this.#offset + fieldLen);
68
+ this.#offset += fieldLen;
69
+ }
70
+ this.#fieldsRemaining--;
71
+ }
72
+ }
73
+ this.#compact();
74
+ }
75
+ #remaining() {
76
+ return this.#buffer.length - this.#offset;
77
+ }
78
+ #append(chunk) {
79
+ if (this.#buffer.length === this.#offset) {
80
+ this.#buffer = chunk;
81
+ this.#offset = 0;
82
+ } else {
83
+ this.#buffer = Buffer.concat([this.#buffer.subarray(this.#offset), chunk]);
84
+ this.#offset = 0;
85
+ }
86
+ }
87
+ #compact() {
88
+ if (this.#offset > 0) {
89
+ this.#buffer = this.#buffer.subarray(this.#offset);
90
+ this.#offset = 0;
91
+ }
92
+ }
93
+ #tryParseHeader() {
94
+ if (this.#remaining() < HEADER_MIN_SIZE) return false;
95
+ for (let i = 0; i < PGCOPY_SIGNATURE.length; i++) if (this.#buffer[this.#offset + i] !== PGCOPY_SIGNATURE[i]) throw new Error("Invalid PGCOPY binary signature");
96
+ this.#offset += 11;
97
+ const flags = this.#buffer.readInt32BE(this.#offset);
98
+ this.#offset += 4;
99
+ if (flags !== 0) throw new Error(`Unsupported PGCOPY flags: ${flags}`);
100
+ const extensionLen = this.#buffer.readInt32BE(this.#offset);
101
+ this.#offset += 4;
102
+ if (extensionLen > 0) {
103
+ if (this.#remaining() < extensionLen) {
104
+ this.#offset -= HEADER_MIN_SIZE;
105
+ return false;
106
+ }
107
+ this.#offset += extensionLen;
108
+ }
109
+ this.#headerParsed = true;
110
+ return true;
111
+ }
112
+ };
113
+ var KNOWN_BINARY_OIDS = new Set([
114
+ 16,
115
+ 21,
116
+ 23,
117
+ 20,
118
+ 700,
119
+ 701,
120
+ 25,
121
+ VARCHAR,
122
+ BPCHAR,
123
+ 18,
124
+ UUID,
125
+ 17,
126
+ 114,
127
+ JSONB,
128
+ TIMESTAMP,
129
+ TIMESTAMPTZ,
130
+ DATE,
131
+ TIME,
132
+ TIMETZ,
133
+ NUMERIC
134
+ ]);
135
+ /**
136
+ * Returns true if the column's binary format is known and can be decoded
137
+ * natively. For columns where this returns false, the COPY SELECT should
138
+ * cast the column to `::text` so PG sends the text representation inside
139
+ * the binary frame.
140
+ */
141
+ function hasBinaryDecoder(spec) {
142
+ if (spec.elemPgTypeClass !== null && spec.elemPgTypeClass !== void 0) return true;
143
+ if (spec.pgTypeClass === "e") return true;
144
+ return KNOWN_BINARY_OIDS.has(spec.typeOID);
145
+ }
146
+ /** Decoder for columns cast to `::text` in the COPY SELECT. */
147
+ var textCastDecoder = (buf) => buf.toString("utf8");
148
+ /**
149
+ * Creates a specialized binary decoder for the given column spec.
150
+ * The returned function converts a raw COPY binary field `Buffer`
151
+ * directly to a `LiteValueType`, bypassing text parsing entirely.
152
+ *
153
+ * Only call this for columns where {@link hasBinaryDecoder} returns true.
154
+ * For other columns, cast to `::text` in the SELECT and use
155
+ * {@link textCastDecoder}.
156
+ */
157
+ function makeBinaryDecoder(spec) {
158
+ const { typeOID, pgTypeClass, elemPgTypeClass } = spec;
159
+ if (elemPgTypeClass !== null && elemPgTypeClass !== void 0) return (buf) => decodeArray(buf);
160
+ if (pgTypeClass === "e") return (buf) => buf.toString("utf8");
161
+ switch (typeOID) {
162
+ case 16: return (buf) => buf[0] ? 1 : 0;
163
+ case 21: return (buf) => buf.readInt16BE(0);
164
+ case 23: return (buf) => buf.readInt32BE(0);
165
+ case 20: return (buf) => buf.readBigInt64BE(0);
166
+ case 700: return (buf) => buf.readFloatBE(0);
167
+ case 701: return (buf) => buf.readDoubleBE(0);
168
+ case 25:
169
+ case VARCHAR:
170
+ case BPCHAR:
171
+ case 18: return (buf) => buf.toString("utf8");
172
+ case UUID: return (buf) => decodeUUID(buf);
173
+ case 17: return (buf) => Uint8Array.prototype.slice.call(buf);
174
+ case 114: return (buf) => buf.toString("utf8");
175
+ case JSONB: return (buf) => buf.toString("utf8", 1);
176
+ case TIMESTAMP:
177
+ case TIMESTAMPTZ: return (buf) => decodeTimestamp(buf);
178
+ case DATE: return (buf) => decodeDate(buf);
179
+ case TIME: return (buf) => decodeTime(buf);
180
+ case TIMETZ: return (buf) => decodeTimeTZ(buf);
181
+ case NUMERIC: return (buf) => decodeNumeric(buf);
182
+ default: throw new Error(`No binary decoder for type OID ${typeOID}. Use hasBinaryDecoder() to check before calling makeBinaryDecoder().`);
183
+ }
184
+ }
185
+ /**
186
+ * UUID: 16 bytes → "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
187
+ */
188
+ function decodeUUID(buf) {
189
+ const hex = buf.toString("hex");
190
+ return hex.substring(0, 8) + "-" + hex.substring(8, 12) + "-" + hex.substring(12, 16) + "-" + hex.substring(16, 20) + "-" + hex.substring(20, 32);
191
+ }
192
+ /**
193
+ * TIMESTAMP / TIMESTAMPTZ: int64 microseconds since PG epoch (2000-01-01 UTC)
194
+ * → floating-point milliseconds since Unix epoch.
195
+ *
196
+ * Matches the output of `timestampToFpMillis()` in `types/pg.ts`.
197
+ *
198
+ * Uses Number arithmetic (avoiding BigInt) for speed. The microsecond value
199
+ * fits safely in a Number for all practical dates (up to ~year 285,000).
200
+ */
201
+ function decodeTimestamp(buf) {
202
+ const hi = buf.readInt32BE(0);
203
+ const lo = buf.readUInt32BE(4);
204
+ if (hi === PG_TIMESTAMP_INF_HI && lo === PG_TIMESTAMP_INF_LO) return Infinity;
205
+ if (hi === PG_TIMESTAMP_NEG_INF_HI && lo === PG_TIMESTAMP_NEG_INF_LO) return -Infinity;
206
+ return (hi * 4294967296 + lo) / 1e3 + PG_EPOCH_UNIX_MILLIS;
207
+ }
208
+ /**
209
+ * DATE: int32 days since PG epoch (2000-01-01) → millis since Unix epoch at
210
+ * UTC midnight. Matches `dateToUTCMidnight()` in `types/pg.ts`.
211
+ */
212
+ function decodeDate(buf) {
213
+ const pgDays = buf.readInt32BE(0);
214
+ if (pgDays === PG_DATE_INFINITY) return Infinity;
215
+ if (pgDays === PG_DATE_NEG_INFINITY) return -Infinity;
216
+ return (pgDays + PG_EPOCH_UNIX_DAYS) * MS_PER_DAY;
217
+ }
218
+ /**
219
+ * TIME: int64 microseconds since midnight → milliseconds since midnight.
220
+ * Matches `postgresTimeToMilliseconds()` in `types/pg.ts`.
221
+ *
222
+ * Max value is 86,400,000,000 (~8.6e10), well within Number.MAX_SAFE_INTEGER.
223
+ */
224
+ function decodeTime(buf) {
225
+ const hi = buf.readInt32BE(0);
226
+ const lo = buf.readUInt32BE(4);
227
+ const micros = hi * 4294967296 + lo;
228
+ return Math.trunc(micros / 1e3);
229
+ }
230
+ /**
231
+ * TIMETZ: int64 microseconds since midnight + int32 timezone offset in seconds.
232
+ * PG stores the offset with inverted sign from ISO (POSIX convention):
233
+ * positive = west of UTC, negative = east of UTC.
234
+ * UTC = local_time + pg_offset.
235
+ * → UTC milliseconds since midnight.
236
+ *
237
+ * Max value ~1.3e11 microseconds, well within Number.MAX_SAFE_INTEGER.
238
+ */
239
+ function decodeTimeTZ(buf) {
240
+ const hi = buf.readInt32BE(0);
241
+ const lo = buf.readUInt32BE(4);
242
+ const utcMicros = hi * 4294967296 + lo + buf.readInt32BE(8) * 1e6;
243
+ let ms = Math.trunc(utcMicros / 1e3);
244
+ if (ms < 0 || ms >= MS_PER_DAY) ms = (ms % MS_PER_DAY + MS_PER_DAY) % MS_PER_DAY;
245
+ return ms;
246
+ }
247
+ var NUMERIC_NEG = 16384;
248
+ var NUMERIC_NAN = 49152;
249
+ var NUMERIC_PINF = 53248;
250
+ var NUMERIC_NINF = 61440;
251
+ var NBASE = 1e4;
252
+ /**
253
+ * NUMERIC: variable-length binary format.
254
+ * Header: {ndigits: int16, weight: int16, sign: int16, dscale: int16}
255
+ * Followed by ndigits x int16 base-10000 digits.
256
+ *
257
+ * Converts to a JS `number` (matching the text path's `Number(x)` behavior).
258
+ */
259
+ function decodeNumeric(buf) {
260
+ const ndigits = buf.readInt16BE(0);
261
+ const weight = buf.readInt16BE(2);
262
+ const sign = buf.readUInt16BE(4);
263
+ if (sign === NUMERIC_NAN) return NaN;
264
+ if (sign === NUMERIC_PINF) return Infinity;
265
+ if (sign === NUMERIC_NINF) return -Infinity;
266
+ if (ndigits === 0) return 0;
267
+ if (ndigits > 3) return decodeNumericViaString(buf, ndigits, weight, sign);
268
+ let intVal = 0;
269
+ for (let i = 0; i < ndigits; i++) intVal = intVal * NBASE + buf.readInt16BE(8 + i * 2);
270
+ const shift = ndigits - weight - 1;
271
+ let result;
272
+ if (shift > 0) result = intVal / NBASE ** shift;
273
+ else if (shift < 0) result = intVal * NBASE ** -shift;
274
+ else result = intVal;
275
+ return sign === NUMERIC_NEG ? -result : result;
276
+ }
277
+ /**
278
+ * Fallback for numerics with many base-10000 digits where accumulating
279
+ * into an integer would exceed MAX_SAFE_INTEGER. Builds the decimal
280
+ * string and uses Number() to match the text path exactly.
281
+ */
282
+ function decodeNumericViaString(buf, ndigits, weight, sign) {
283
+ const intGroups = weight + 1;
284
+ let str = "";
285
+ for (let i = 0; i < ndigits; i++) {
286
+ const digit = buf.readInt16BE(8 + i * 2);
287
+ if (i === intGroups) {
288
+ str = str || "0";
289
+ str += ".";
290
+ }
291
+ str += i === 0 ? String(digit) : String(digit).padStart(4, "0");
292
+ }
293
+ if (intGroups > ndigits) str += "0".repeat((intGroups - ndigits) * 4);
294
+ return Number((sign === NUMERIC_NEG ? "-" : "") + str);
295
+ }
296
+ /**
297
+ * Array: binary format.
298
+ *
299
+ * Header:
300
+ * int32 ndim — number of dimensions (0 for empty array)
301
+ * int32 flags — 0 or 1 (has-nulls)
302
+ * int32 elem_oid — OID of element type
303
+ * Per dimension:
304
+ * int32 dim_size — number of elements in this dimension
305
+ * int32 dim_lb — lower bound (usually 1)
306
+ *
307
+ * Then for each element (in row-major order):
308
+ * int32 length — -1 for NULL, otherwise byte length
309
+ * bytes — element data
310
+ *
311
+ * Result is JSON.stringify'd for storage in SQLite (matching text path behavior).
312
+ */
313
+ function decodeArray(buf) {
314
+ let offset = 0;
315
+ const ndim = buf.readInt32BE(offset);
316
+ offset += 4;
317
+ offset += 4;
318
+ const elemOid = buf.readInt32BE(offset);
319
+ offset += 4;
320
+ if (ndim === 0) return "[]";
321
+ const dims = [];
322
+ for (let d = 0; d < ndim; d++) {
323
+ dims.push(buf.readInt32BE(offset));
324
+ offset += 4;
325
+ offset += 4;
326
+ }
327
+ const elemDecoder = makeElementDecoder(elemOid);
328
+ function readDimension(dim) {
329
+ const size = dims[dim];
330
+ const arr = [];
331
+ for (let i = 0; i < size; i++) if (dim < ndim - 1) arr.push(readDimension(dim + 1));
332
+ else {
333
+ const elemLen = buf.readInt32BE(offset);
334
+ offset += 4;
335
+ if (elemLen === -1) arr.push(null);
336
+ else {
337
+ arr.push(elemDecoder(buf.subarray(offset, offset + elemLen)));
338
+ offset += elemLen;
339
+ }
340
+ }
341
+ return arr;
342
+ }
343
+ return stringify(readDimension(0));
344
+ }
345
+ /**
346
+ * Creates a decoder for array elements. Array elements use the same
347
+ * binary encoding as scalar columns, but we need to map the element
348
+ * OID to the right decoder. Returns JS values (not LiteValueType)
349
+ * since the result will be JSON.stringify'd.
350
+ */
351
+ function makeElementDecoder(elemOid) {
352
+ switch (elemOid) {
353
+ case 16: return (buf) => buf[0] ? true : false;
354
+ case 21: return (buf) => buf.readInt16BE(0);
355
+ case 23: return (buf) => buf.readInt32BE(0);
356
+ case 20: return (buf) => {
357
+ const val = buf.readBigInt64BE(0);
358
+ return val >= Number.MIN_SAFE_INTEGER && val <= Number.MAX_SAFE_INTEGER ? Number(val) : val;
359
+ };
360
+ case 700: return (buf) => buf.readFloatBE(0);
361
+ case 701: return (buf) => buf.readDoubleBE(0);
362
+ case 25:
363
+ case VARCHAR:
364
+ case BPCHAR:
365
+ case 18: return (buf) => buf.toString("utf8");
366
+ case UUID: return (buf) => decodeUUID(buf);
367
+ case 114: return (buf) => JSON.parse(buf.toString("utf8"));
368
+ case JSONB: return (buf) => JSON.parse(buf.toString("utf8", 1));
369
+ case TIMESTAMP:
370
+ case TIMESTAMPTZ: return (buf) => decodeTimestamp(buf);
371
+ case DATE: return (buf) => decodeDate(buf);
372
+ case TIME: return (buf) => decodeTime(buf);
373
+ case TIMETZ: return (buf) => decodeTimeTZ(buf);
374
+ case NUMERIC: return (buf) => decodeNumeric(buf);
375
+ default: return (buf) => buf.toString("utf8");
376
+ }
377
+ }
378
+ //#endregion
379
+ export { BinaryCopyParser, hasBinaryDecoder, makeBinaryDecoder, textCastDecoder };
380
+
381
+ //# sourceMappingURL=pg-copy-binary.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"pg-copy-binary.js","names":["#append","#headerParsed","#tryParseHeader","#fieldsRemaining","#remaining","#buffer","#offset","#compact"],"sources":["../../../../../zero-cache/src/db/pg-copy-binary.ts"],"sourcesContent":["import {stringify} from '../../../shared/src/bigint-json.ts';\nimport type {LiteValueType} from '../types/lite.ts';\nimport {\n BOOL,\n BPCHAR,\n BYTEA,\n CHAR,\n DATE,\n FLOAT4,\n FLOAT8,\n INT2,\n INT4,\n INT8,\n JSONB,\n NUMERIC,\n TEXT,\n TIME,\n TIMESTAMP,\n TIMESTAMPTZ,\n TIMETZ,\n UUID,\n VARCHAR,\n} from '../types/pg-types.ts';\nimport {JSON as JSON_OID} from '../types/pg-types.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\nimport type {ColumnSpec} from './specs.ts';\n\n// PostgreSQL COPY binary format signature: \"PGCOPY\\n\\xff\\r\\n\\0\"\nconst PGCOPY_SIGNATURE = Buffer.from([\n 0x50, 0x47, 0x43, 0x4f, 0x50, 0x59, 0x0a, 0xff, 0x0d, 0x0a, 0x00,\n]);\n\nconst HEADER_MIN_SIZE = 11 + 4 + 4; // signature + flags + extension length\n\n// PostgreSQL epoch is 2000-01-01T00:00:00Z.\n// Offset from Unix epoch (1970-01-01) in milliseconds.\nconst PG_EPOCH_UNIX_MILLIS = 946_684_800_000;\n\n// Days from Unix epoch (1970-01-01) to PG epoch (2000-01-01).\nconst PG_EPOCH_UNIX_DAYS = 10_957;\n\nconst MS_PER_DAY = 86_400_000;\n\n// Sentinel values for infinity in PG binary format (as hi/lo int32 pairs).\nconst PG_TIMESTAMP_INF_HI = 0x7fffffff;\nconst PG_TIMESTAMP_INF_LO = 0xffffffff;\nconst PG_TIMESTAMP_NEG_INF_HI = -0x80000000; // readInt32BE of 0x80000000\nconst PG_TIMESTAMP_NEG_INF_LO = 0;\nconst PG_DATE_INFINITY = 0x7fffffff;\nconst PG_DATE_NEG_INFINITY = -0x80000000;\n\n/**\n * Streaming parser for PostgreSQL `COPY ... TO STDOUT WITH (FORMAT binary)`.\n *\n * Analogous to {@link import('./pg-copy.ts').TsvParser} but for binary format.\n * Yields `Buffer | null` per field (null = SQL NULL).\n *\n * The caller tracks column position the same way as with TsvParser.\n */\nexport class BinaryCopyParser {\n #buffer: Buffer = Buffer.alloc(0);\n #offset = 0;\n #headerParsed = false;\n #fieldsRemaining = 0; // fields left in current tuple (0 = need new tuple header)\n\n *parse(chunk: Buffer): Iterable<Buffer | null> {\n this.#append(chunk);\n\n if (!this.#headerParsed) {\n if (!this.#tryParseHeader()) {\n return;\n }\n }\n\n for (;;) {\n // If we're at the start of a tuple, read the field count.\n if (this.#fieldsRemaining === 0) {\n if (this.#remaining() < 2) {\n break;\n }\n const fieldCount = this.#buffer.readInt16BE(this.#offset);\n if (fieldCount === -1) {\n // Trailer marker — end of data.\n break;\n }\n this.#offset += 2;\n this.#fieldsRemaining = fieldCount;\n }\n\n // Parse fields within the current tuple.\n while (this.#fieldsRemaining > 0) {\n if (this.#remaining() < 4) {\n // Not enough data for field length — wait for next chunk.\n this.#compact();\n return;\n }\n const fieldLen = this.#buffer.readInt32BE(this.#offset);\n this.#offset += 4;\n\n if (fieldLen === -1) {\n // NULL field.\n yield null;\n } else {\n if (this.#remaining() < fieldLen) {\n // Not enough data for field value — rewind past the length\n // we just read and wait for more data.\n this.#offset -= 4;\n this.#compact();\n return;\n }\n yield this.#buffer.subarray(this.#offset, this.#offset + fieldLen);\n this.#offset += fieldLen;\n }\n this.#fieldsRemaining--;\n }\n }\n\n this.#compact();\n }\n\n #remaining(): number {\n return this.#buffer.length - this.#offset;\n }\n\n #append(chunk: Buffer): void {\n if (this.#buffer.length === this.#offset) {\n // Fully consumed — replace.\n this.#buffer = chunk;\n this.#offset = 0;\n } else {\n // Concatenate unconsumed remainder with new chunk.\n this.#buffer = Buffer.concat([\n this.#buffer.subarray(this.#offset),\n chunk,\n ]);\n this.#offset = 0;\n }\n }\n\n #compact(): void {\n if (this.#offset > 0) {\n this.#buffer = this.#buffer.subarray(this.#offset);\n this.#offset = 0;\n }\n }\n\n #tryParseHeader(): boolean {\n if (this.#remaining() < HEADER_MIN_SIZE) {\n return false;\n }\n\n // Validate signature.\n for (let i = 0; i < PGCOPY_SIGNATURE.length; i++) {\n if (this.#buffer[this.#offset + i] !== PGCOPY_SIGNATURE[i]) {\n throw new Error('Invalid PGCOPY binary signature');\n }\n }\n this.#offset += 11;\n\n // Flags (int32) — currently only bit 16 (has OID column) is defined.\n // We don't use OID columns, so just skip.\n const flags = this.#buffer.readInt32BE(this.#offset);\n this.#offset += 4;\n if (flags !== 0) {\n throw new Error(`Unsupported PGCOPY flags: ${flags}`);\n }\n\n // Extension area length (int32).\n const extensionLen = this.#buffer.readInt32BE(this.#offset);\n this.#offset += 4;\n\n // Skip extension data if present.\n if (extensionLen > 0) {\n if (this.#remaining() < extensionLen) {\n // Rewind and wait for more data.\n this.#offset -= HEADER_MIN_SIZE;\n return false;\n }\n this.#offset += extensionLen;\n }\n\n this.#headerParsed = true;\n return true;\n }\n}\n\n// ---- Binary Type Decoders ----\n\nexport type BinaryDecoder = (buf: Buffer) => LiteValueType;\n\ntype BinaryColumnSpec = Pick<\n ColumnSpec,\n 'dataType' | 'pgTypeClass' | 'elemPgTypeClass'\n> & {typeOID: number};\n\nconst KNOWN_BINARY_OIDS = new Set([\n BOOL,\n INT2,\n INT4,\n INT8,\n FLOAT4,\n FLOAT8,\n TEXT,\n VARCHAR,\n BPCHAR,\n CHAR,\n UUID,\n BYTEA,\n JSON_OID,\n JSONB,\n TIMESTAMP,\n TIMESTAMPTZ,\n DATE,\n TIME,\n TIMETZ,\n NUMERIC,\n]);\n\n/**\n * Returns true if the column's binary format is known and can be decoded\n * natively. For columns where this returns false, the COPY SELECT should\n * cast the column to `::text` so PG sends the text representation inside\n * the binary frame.\n */\nexport function hasBinaryDecoder(spec: BinaryColumnSpec): boolean {\n if (spec.elemPgTypeClass !== null && spec.elemPgTypeClass !== undefined) {\n return true; // Array types\n }\n if (spec.pgTypeClass === PostgresTypeClass.Enum) {\n return true; // Enums are sent as UTF-8 text in binary format\n }\n return KNOWN_BINARY_OIDS.has(spec.typeOID);\n}\n\n/** Decoder for columns cast to `::text` in the COPY SELECT. */\nexport const textCastDecoder: BinaryDecoder = buf => buf.toString('utf8');\n\n/**\n * Creates a specialized binary decoder for the given column spec.\n * The returned function converts a raw COPY binary field `Buffer`\n * directly to a `LiteValueType`, bypassing text parsing entirely.\n *\n * Only call this for columns where {@link hasBinaryDecoder} returns true.\n * For other columns, cast to `::text` in the SELECT and use\n * {@link textCastDecoder}.\n */\nexport function makeBinaryDecoder(spec: BinaryColumnSpec): BinaryDecoder {\n const {typeOID, pgTypeClass, elemPgTypeClass} = spec;\n\n // Array types: elemPgTypeClass is non-null for arrays.\n if (elemPgTypeClass !== null && elemPgTypeClass !== undefined) {\n return buf => decodeArray(buf);\n }\n\n // Enum types: binary representation is UTF-8 text.\n if (pgTypeClass === PostgresTypeClass.Enum) {\n return buf => buf.toString('utf8');\n }\n\n switch (typeOID) {\n case BOOL:\n return buf => (buf[0] ? 1 : 0);\n case INT2:\n return buf => buf.readInt16BE(0);\n case INT4:\n return buf => buf.readInt32BE(0);\n case INT8:\n return buf => buf.readBigInt64BE(0);\n case FLOAT4:\n return buf => buf.readFloatBE(0);\n case FLOAT8:\n return buf => buf.readDoubleBE(0);\n case TEXT:\n case VARCHAR:\n case BPCHAR:\n case CHAR:\n return buf => buf.toString('utf8');\n case UUID:\n return buf => decodeUUID(buf);\n case BYTEA:\n return buf => Uint8Array.prototype.slice.call(buf) as Uint8Array;\n case JSON_OID:\n return buf => buf.toString('utf8');\n case JSONB:\n // JSONB binary format has a 1-byte version prefix (currently 0x01).\n return buf => buf.toString('utf8', 1);\n case TIMESTAMP:\n case TIMESTAMPTZ:\n return buf => decodeTimestamp(buf);\n case DATE:\n return buf => decodeDate(buf);\n case TIME:\n return buf => decodeTime(buf);\n case TIMETZ:\n return buf => decodeTimeTZ(buf);\n case NUMERIC:\n return buf => decodeNumeric(buf);\n default:\n throw new Error(\n `No binary decoder for type OID ${typeOID}. ` +\n `Use hasBinaryDecoder() to check before calling makeBinaryDecoder().`,\n );\n }\n}\n\n// ---- Individual Decoders (exported for testing) ----\n\n/**\n * UUID: 16 bytes → \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"\n */\nexport function decodeUUID(buf: Buffer): string {\n const hex = buf.toString('hex');\n return (\n hex.substring(0, 8) +\n '-' +\n hex.substring(8, 12) +\n '-' +\n hex.substring(12, 16) +\n '-' +\n hex.substring(16, 20) +\n '-' +\n hex.substring(20, 32)\n );\n}\n\n/**\n * TIMESTAMP / TIMESTAMPTZ: int64 microseconds since PG epoch (2000-01-01 UTC)\n * → floating-point milliseconds since Unix epoch.\n *\n * Matches the output of `timestampToFpMillis()` in `types/pg.ts`.\n *\n * Uses Number arithmetic (avoiding BigInt) for speed. The microsecond value\n * fits safely in a Number for all practical dates (up to ~year 285,000).\n */\nexport function decodeTimestamp(buf: Buffer): number {\n const hi = buf.readInt32BE(0);\n const lo = buf.readUInt32BE(4);\n if (hi === PG_TIMESTAMP_INF_HI && lo === PG_TIMESTAMP_INF_LO) return Infinity;\n if (hi === PG_TIMESTAMP_NEG_INF_HI && lo === PG_TIMESTAMP_NEG_INF_LO) {\n return -Infinity;\n }\n const microseconds = hi * 0x100000000 + lo;\n return microseconds / 1000 + PG_EPOCH_UNIX_MILLIS;\n}\n\n/**\n * DATE: int32 days since PG epoch (2000-01-01) → millis since Unix epoch at\n * UTC midnight. Matches `dateToUTCMidnight()` in `types/pg.ts`.\n */\nexport function decodeDate(buf: Buffer): number {\n const pgDays = buf.readInt32BE(0);\n if (pgDays === PG_DATE_INFINITY) return Infinity;\n if (pgDays === PG_DATE_NEG_INFINITY) return -Infinity;\n return (pgDays + PG_EPOCH_UNIX_DAYS) * MS_PER_DAY;\n}\n\n/**\n * TIME: int64 microseconds since midnight → milliseconds since midnight.\n * Matches `postgresTimeToMilliseconds()` in `types/pg.ts`.\n *\n * Max value is 86,400,000,000 (~8.6e10), well within Number.MAX_SAFE_INTEGER.\n */\nexport function decodeTime(buf: Buffer): number {\n const hi = buf.readInt32BE(0);\n const lo = buf.readUInt32BE(4);\n const micros = hi * 0x100000000 + lo;\n return Math.trunc(micros / 1000);\n}\n\n/**\n * TIMETZ: int64 microseconds since midnight + int32 timezone offset in seconds.\n * PG stores the offset with inverted sign from ISO (POSIX convention):\n * positive = west of UTC, negative = east of UTC.\n * UTC = local_time + pg_offset.\n * → UTC milliseconds since midnight.\n *\n * Max value ~1.3e11 microseconds, well within Number.MAX_SAFE_INTEGER.\n */\nexport function decodeTimeTZ(buf: Buffer): number {\n const hi = buf.readInt32BE(0);\n const lo = buf.readUInt32BE(4);\n const localMicros = hi * 0x100000000 + lo;\n const tzOffsetSeconds = buf.readInt32BE(8);\n const utcMicros = localMicros + tzOffsetSeconds * 1_000_000;\n let ms = Math.trunc(utcMicros / 1000);\n // Normalize to [0, MS_PER_DAY).\n if (ms < 0 || ms >= MS_PER_DAY) {\n ms = ((ms % MS_PER_DAY) + MS_PER_DAY) % MS_PER_DAY;\n }\n return ms;\n}\n\n// NUMERIC binary format constants.\nconst NUMERIC_NEG = 0x4000;\nconst NUMERIC_NAN = 0xc000;\nconst NUMERIC_PINF = 0xd000;\nconst NUMERIC_NINF = 0xf000;\nconst NBASE = 10_000;\n\n/**\n * NUMERIC: variable-length binary format.\n * Header: {ndigits: int16, weight: int16, sign: int16, dscale: int16}\n * Followed by ndigits x int16 base-10000 digits.\n *\n * Converts to a JS `number` (matching the text path's `Number(x)` behavior).\n */\nexport function decodeNumeric(buf: Buffer): number {\n const ndigits = buf.readInt16BE(0);\n const weight = buf.readInt16BE(2);\n const sign = buf.readUInt16BE(4);\n // const dscale = buf.readInt16BE(6); // display scale, not needed for value\n\n if (sign === NUMERIC_NAN) {\n return NaN;\n }\n if (sign === NUMERIC_PINF) {\n return Infinity;\n }\n if (sign === NUMERIC_NINF) {\n return -Infinity;\n }\n if (ndigits === 0) {\n return 0;\n }\n\n // Accumulate base-10000 digits into an integer, then do a single\n // division at the end. Repeated `scale /= NBASE` accumulates\n // floating-point error (e.g. 9900 * 0.0001 = 0.9900000000000001).\n // A single division lets IEEE 754 round to the nearest double,\n // matching the text path's `Number(\"0.99\")` behavior.\n //\n // For numerics with many digits (ndigits > 3), intVal can exceed\n // MAX_SAFE_INTEGER. In that case, fall back to building a string\n // and using Number() to match the text path exactly.\n if (ndigits > 3) {\n return decodeNumericViaString(buf, ndigits, weight, sign);\n }\n\n let intVal = 0;\n for (let i = 0; i < ndigits; i++) {\n intVal = intVal * NBASE + buf.readInt16BE(8 + i * 2);\n }\n\n // weight indicates the power-of-NBASE of the first digit.\n // shift is how many base-10000 positions to divide by.\n const shift = ndigits - weight - 1;\n let result;\n if (shift > 0) {\n result = intVal / NBASE ** shift;\n } else if (shift < 0) {\n result = intVal * NBASE ** -shift;\n } else {\n result = intVal;\n }\n return sign === NUMERIC_NEG ? -result : result;\n}\n\n/**\n * Fallback for numerics with many base-10000 digits where accumulating\n * into an integer would exceed MAX_SAFE_INTEGER. Builds the decimal\n * string and uses Number() to match the text path exactly.\n */\nfunction decodeNumericViaString(\n buf: Buffer,\n ndigits: number,\n weight: number,\n sign: number,\n): number {\n // Number of base-10000 digit groups before the decimal point.\n const intGroups = weight + 1;\n\n let str = '';\n for (let i = 0; i < ndigits; i++) {\n const digit = buf.readInt16BE(8 + i * 2);\n if (i === intGroups) {\n str = str || '0';\n str += '.';\n }\n str += i === 0 ? String(digit) : String(digit).padStart(4, '0');\n }\n\n // Append trailing zero groups if the integer part extends beyond ndigits.\n if (intGroups > ndigits) {\n str += '0'.repeat((intGroups - ndigits) * 4);\n }\n\n return Number((sign === NUMERIC_NEG ? '-' : '') + str);\n}\n\n/**\n * Array: binary format.\n *\n * Header:\n * int32 ndim — number of dimensions (0 for empty array)\n * int32 flags — 0 or 1 (has-nulls)\n * int32 elem_oid — OID of element type\n * Per dimension:\n * int32 dim_size — number of elements in this dimension\n * int32 dim_lb — lower bound (usually 1)\n *\n * Then for each element (in row-major order):\n * int32 length — -1 for NULL, otherwise byte length\n * bytes — element data\n *\n * Result is JSON.stringify'd for storage in SQLite (matching text path behavior).\n */\nexport function decodeArray(buf: Buffer): string {\n let offset = 0;\n\n const ndim = buf.readInt32BE(offset);\n offset += 4;\n // skip flags (has-nulls)\n offset += 4;\n const elemOid = buf.readInt32BE(offset);\n offset += 4;\n\n if (ndim === 0) {\n return '[]';\n }\n\n // Read dimension sizes.\n const dims: number[] = [];\n for (let d = 0; d < ndim; d++) {\n dims.push(buf.readInt32BE(offset));\n offset += 4;\n // skip lower bound\n offset += 4;\n }\n\n const elemDecoder = makeElementDecoder(elemOid);\n\n // Recursively build the nested array structure.\n function readDimension(dim: number): unknown[] {\n const size = dims[dim];\n const arr: unknown[] = [];\n for (let i = 0; i < size; i++) {\n if (dim < ndim - 1) {\n arr.push(readDimension(dim + 1));\n } else {\n // Leaf dimension — read element.\n const elemLen = buf.readInt32BE(offset);\n offset += 4;\n if (elemLen === -1) {\n arr.push(null);\n } else {\n arr.push(elemDecoder(buf.subarray(offset, offset + elemLen)));\n offset += elemLen;\n }\n }\n }\n return arr;\n }\n\n const result = readDimension(0);\n return stringify(result);\n}\n\n/**\n * Creates a decoder for array elements. Array elements use the same\n * binary encoding as scalar columns, but we need to map the element\n * OID to the right decoder. Returns JS values (not LiteValueType)\n * since the result will be JSON.stringify'd.\n */\nfunction makeElementDecoder(elemOid: number): (buf: Buffer) => unknown {\n switch (elemOid) {\n case BOOL:\n return buf => (buf[0] ? true : false);\n case INT2:\n return buf => buf.readInt16BE(0);\n case INT4:\n return buf => buf.readInt32BE(0);\n case INT8:\n return buf => {\n const val = buf.readBigInt64BE(0);\n // Use number if it fits safely, otherwise bigint for JSON.\n return val >= Number.MIN_SAFE_INTEGER && val <= Number.MAX_SAFE_INTEGER\n ? Number(val)\n : val;\n };\n case FLOAT4:\n return buf => buf.readFloatBE(0);\n case FLOAT8:\n return buf => buf.readDoubleBE(0);\n case TEXT:\n case VARCHAR:\n case BPCHAR:\n case CHAR:\n return buf => buf.toString('utf8');\n case UUID:\n return buf => decodeUUID(buf);\n case JSON_OID:\n return buf => JSON.parse(buf.toString('utf8'));\n case JSONB:\n return buf => JSON.parse(buf.toString('utf8', 1));\n case TIMESTAMP:\n case TIMESTAMPTZ:\n return buf => decodeTimestamp(buf);\n case DATE:\n return buf => decodeDate(buf);\n case TIME:\n return buf => decodeTime(buf);\n case TIMETZ:\n return buf => decodeTimeTZ(buf);\n case NUMERIC:\n return buf => decodeNumeric(buf);\n default:\n return buf => buf.toString('utf8');\n }\n}\n"],"mappings":";;;AA4BA,IAAM,mBAAmB,OAAO,KAAK;CACnC;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAAM;CAC7D,CAAC;AAEF,IAAM,kBAAkB;AAIxB,IAAM,uBAAuB;AAG7B,IAAM,qBAAqB;AAE3B,IAAM,aAAa;AAGnB,IAAM,sBAAsB;AAC5B,IAAM,sBAAsB;AAC5B,IAAM,0BAA0B;AAChC,IAAM,0BAA0B;AAChC,IAAM,mBAAmB;AACzB,IAAM,uBAAuB;;;;;;;;;AAU7B,IAAa,mBAAb,MAA8B;CAC5B,UAAkB,OAAO,MAAM,EAAE;CACjC,UAAU;CACV,gBAAgB;CAChB,mBAAmB;CAEnB,CAAC,MAAM,OAAwC;AAC7C,QAAA,OAAa,MAAM;AAEnB,MAAI,CAAC,MAAA;OACC,CAAC,MAAA,gBAAsB,CACzB;;AAIJ,WAAS;AAEP,OAAI,MAAA,oBAA0B,GAAG;AAC/B,QAAI,MAAA,WAAiB,GAAG,EACtB;IAEF,MAAM,aAAa,MAAA,OAAa,YAAY,MAAA,OAAa;AACzD,QAAI,eAAe,GAEjB;AAEF,UAAA,UAAgB;AAChB,UAAA,kBAAwB;;AAI1B,UAAO,MAAA,kBAAwB,GAAG;AAChC,QAAI,MAAA,WAAiB,GAAG,GAAG;AAEzB,WAAA,SAAe;AACf;;IAEF,MAAM,WAAW,MAAA,OAAa,YAAY,MAAA,OAAa;AACvD,UAAA,UAAgB;AAEhB,QAAI,aAAa,GAEf,OAAM;SACD;AACL,SAAI,MAAA,WAAiB,GAAG,UAAU;AAGhC,YAAA,UAAgB;AAChB,YAAA,SAAe;AACf;;AAEF,WAAM,MAAA,OAAa,SAAS,MAAA,QAAc,MAAA,SAAe,SAAS;AAClE,WAAA,UAAgB;;AAElB,UAAA;;;AAIJ,QAAA,SAAe;;CAGjB,aAAqB;AACnB,SAAO,MAAA,OAAa,SAAS,MAAA;;CAG/B,QAAQ,OAAqB;AAC3B,MAAI,MAAA,OAAa,WAAW,MAAA,QAAc;AAExC,SAAA,SAAe;AACf,SAAA,SAAe;SACV;AAEL,SAAA,SAAe,OAAO,OAAO,CAC3B,MAAA,OAAa,SAAS,MAAA,OAAa,EACnC,MACD,CAAC;AACF,SAAA,SAAe;;;CAInB,WAAiB;AACf,MAAI,MAAA,SAAe,GAAG;AACpB,SAAA,SAAe,MAAA,OAAa,SAAS,MAAA,OAAa;AAClD,SAAA,SAAe;;;CAInB,kBAA2B;AACzB,MAAI,MAAA,WAAiB,GAAG,gBACtB,QAAO;AAIT,OAAK,IAAI,IAAI,GAAG,IAAI,iBAAiB,QAAQ,IAC3C,KAAI,MAAA,OAAa,MAAA,SAAe,OAAO,iBAAiB,GACtD,OAAM,IAAI,MAAM,kCAAkC;AAGtD,QAAA,UAAgB;EAIhB,MAAM,QAAQ,MAAA,OAAa,YAAY,MAAA,OAAa;AACpD,QAAA,UAAgB;AAChB,MAAI,UAAU,EACZ,OAAM,IAAI,MAAM,6BAA6B,QAAQ;EAIvD,MAAM,eAAe,MAAA,OAAa,YAAY,MAAA,OAAa;AAC3D,QAAA,UAAgB;AAGhB,MAAI,eAAe,GAAG;AACpB,OAAI,MAAA,WAAiB,GAAG,cAAc;AAEpC,UAAA,UAAgB;AAChB,WAAO;;AAET,SAAA,UAAgB;;AAGlB,QAAA,eAAqB;AACrB,SAAO;;;AAaX,IAAM,oBAAoB,IAAI,IAAI;;;;;;;;CAQhC;CACA;;CAEA;;;CAGA;CACA;CACA;CACA;CACA;CACA;CACA;CACD,CAAC;;;;;;;AAQF,SAAgB,iBAAiB,MAAiC;AAChE,KAAI,KAAK,oBAAoB,QAAQ,KAAK,oBAAoB,KAAA,EAC5D,QAAO;AAET,KAAI,KAAK,gBAAgB,IACvB,QAAO;AAET,QAAO,kBAAkB,IAAI,KAAK,QAAQ;;;AAI5C,IAAa,mBAAiC,QAAO,IAAI,SAAS,OAAO;;;;;;;;;;AAWzE,SAAgB,kBAAkB,MAAuC;CACvE,MAAM,EAAC,SAAS,aAAa,oBAAmB;AAGhD,KAAI,oBAAoB,QAAQ,oBAAoB,KAAA,EAClD,SAAO,QAAO,YAAY,IAAI;AAIhC,KAAI,gBAAgB,IAClB,SAAO,QAAO,IAAI,SAAS,OAAO;AAGpC,SAAQ,SAAR;EACE,KAAA,GACE,SAAO,QAAQ,IAAI,KAAK,IAAI;EAC9B,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO,IAAI,eAAe,EAAE;EACrC,KAAA,IACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,IACE,SAAO,QAAO,IAAI,aAAa,EAAE;EACnC,KAAA;EACA,KAAK;EACL,KAAK;EACL,KAAA,GACE,SAAO,QAAO,IAAI,SAAS,OAAO;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAA,GACE,SAAO,QAAO,WAAW,UAAU,MAAM,KAAK,IAAI;EACpD,KAAA,IACE,SAAO,QAAO,IAAI,SAAS,OAAO;EACpC,KAAK,MAEH,SAAO,QAAO,IAAI,SAAS,QAAQ,EAAE;EACvC,KAAK;EACL,KAAK,YACH,SAAO,QAAO,gBAAgB,IAAI;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,OACH,SAAO,QAAO,aAAa,IAAI;EACjC,KAAK,QACH,SAAO,QAAO,cAAc,IAAI;EAClC,QACE,OAAM,IAAI,MACR,kCAAkC,QAAQ,uEAE3C;;;;;;AASP,SAAgB,WAAW,KAAqB;CAC9C,MAAM,MAAM,IAAI,SAAS,MAAM;AAC/B,QACE,IAAI,UAAU,GAAG,EAAE,GACnB,MACA,IAAI,UAAU,GAAG,GAAG,GACpB,MACA,IAAI,UAAU,IAAI,GAAG,GACrB,MACA,IAAI,UAAU,IAAI,GAAG,GACrB,MACA,IAAI,UAAU,IAAI,GAAG;;;;;;;;;;;AAazB,SAAgB,gBAAgB,KAAqB;CACnD,MAAM,KAAK,IAAI,YAAY,EAAE;CAC7B,MAAM,KAAK,IAAI,aAAa,EAAE;AAC9B,KAAI,OAAO,uBAAuB,OAAO,oBAAqB,QAAO;AACrE,KAAI,OAAO,2BAA2B,OAAO,wBAC3C,QAAO;AAGT,SADqB,KAAK,aAAc,MAClB,MAAO;;;;;;AAO/B,SAAgB,WAAW,KAAqB;CAC9C,MAAM,SAAS,IAAI,YAAY,EAAE;AACjC,KAAI,WAAW,iBAAkB,QAAO;AACxC,KAAI,WAAW,qBAAsB,QAAO;AAC5C,SAAQ,SAAS,sBAAsB;;;;;;;;AASzC,SAAgB,WAAW,KAAqB;CAC9C,MAAM,KAAK,IAAI,YAAY,EAAE;CAC7B,MAAM,KAAK,IAAI,aAAa,EAAE;CAC9B,MAAM,SAAS,KAAK,aAAc;AAClC,QAAO,KAAK,MAAM,SAAS,IAAK;;;;;;;;;;;AAYlC,SAAgB,aAAa,KAAqB;CAChD,MAAM,KAAK,IAAI,YAAY,EAAE;CAC7B,MAAM,KAAK,IAAI,aAAa,EAAE;CAG9B,MAAM,YAFc,KAAK,aAAc,KACf,IAAI,YAAY,EAAE,GACQ;CAClD,IAAI,KAAK,KAAK,MAAM,YAAY,IAAK;AAErC,KAAI,KAAK,KAAK,MAAM,WAClB,OAAO,KAAK,aAAc,cAAc;AAE1C,QAAO;;AAIT,IAAM,cAAc;AACpB,IAAM,cAAc;AACpB,IAAM,eAAe;AACrB,IAAM,eAAe;AACrB,IAAM,QAAQ;;;;;;;;AASd,SAAgB,cAAc,KAAqB;CACjD,MAAM,UAAU,IAAI,YAAY,EAAE;CAClC,MAAM,SAAS,IAAI,YAAY,EAAE;CACjC,MAAM,OAAO,IAAI,aAAa,EAAE;AAGhC,KAAI,SAAS,YACX,QAAO;AAET,KAAI,SAAS,aACX,QAAO;AAET,KAAI,SAAS,aACX,QAAO;AAET,KAAI,YAAY,EACd,QAAO;AAYT,KAAI,UAAU,EACZ,QAAO,uBAAuB,KAAK,SAAS,QAAQ,KAAK;CAG3D,IAAI,SAAS;AACb,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,IAC3B,UAAS,SAAS,QAAQ,IAAI,YAAY,IAAI,IAAI,EAAE;CAKtD,MAAM,QAAQ,UAAU,SAAS;CACjC,IAAI;AACJ,KAAI,QAAQ,EACV,UAAS,SAAS,SAAS;UAClB,QAAQ,EACjB,UAAS,SAAS,SAAS,CAAC;KAE5B,UAAS;AAEX,QAAO,SAAS,cAAc,CAAC,SAAS;;;;;;;AAQ1C,SAAS,uBACP,KACA,SACA,QACA,MACQ;CAER,MAAM,YAAY,SAAS;CAE3B,IAAI,MAAM;AACV,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,KAAK;EAChC,MAAM,QAAQ,IAAI,YAAY,IAAI,IAAI,EAAE;AACxC,MAAI,MAAM,WAAW;AACnB,SAAM,OAAO;AACb,UAAO;;AAET,SAAO,MAAM,IAAI,OAAO,MAAM,GAAG,OAAO,MAAM,CAAC,SAAS,GAAG,IAAI;;AAIjE,KAAI,YAAY,QACd,QAAO,IAAI,QAAQ,YAAY,WAAW,EAAE;AAG9C,QAAO,QAAQ,SAAS,cAAc,MAAM,MAAM,IAAI;;;;;;;;;;;;;;;;;;;AAoBxD,SAAgB,YAAY,KAAqB;CAC/C,IAAI,SAAS;CAEb,MAAM,OAAO,IAAI,YAAY,OAAO;AACpC,WAAU;AAEV,WAAU;CACV,MAAM,UAAU,IAAI,YAAY,OAAO;AACvC,WAAU;AAEV,KAAI,SAAS,EACX,QAAO;CAIT,MAAM,OAAiB,EAAE;AACzB,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,KAAK;AAC7B,OAAK,KAAK,IAAI,YAAY,OAAO,CAAC;AAClC,YAAU;AAEV,YAAU;;CAGZ,MAAM,cAAc,mBAAmB,QAAQ;CAG/C,SAAS,cAAc,KAAwB;EAC7C,MAAM,OAAO,KAAK;EAClB,MAAM,MAAiB,EAAE;AACzB,OAAK,IAAI,IAAI,GAAG,IAAI,MAAM,IACxB,KAAI,MAAM,OAAO,EACf,KAAI,KAAK,cAAc,MAAM,EAAE,CAAC;OAC3B;GAEL,MAAM,UAAU,IAAI,YAAY,OAAO;AACvC,aAAU;AACV,OAAI,YAAY,GACd,KAAI,KAAK,KAAK;QACT;AACL,QAAI,KAAK,YAAY,IAAI,SAAS,QAAQ,SAAS,QAAQ,CAAC,CAAC;AAC7D,cAAU;;;AAIhB,SAAO;;AAIT,QAAO,UADQ,cAAc,EAAE,CACP;;;;;;;;AAS1B,SAAS,mBAAmB,SAA2C;AACrE,SAAQ,SAAR;EACE,KAAA,GACE,SAAO,QAAQ,IAAI,KAAK,OAAO;EACjC,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,GACE,SAAO,QAAO;GACZ,MAAM,MAAM,IAAI,eAAe,EAAE;AAEjC,UAAO,OAAO,OAAO,oBAAoB,OAAO,OAAO,mBACnD,OAAO,IAAI,GACX;;EAER,KAAA,IACE,SAAO,QAAO,IAAI,YAAY,EAAE;EAClC,KAAA,IACE,SAAO,QAAO,IAAI,aAAa,EAAE;EACnC,KAAA;EACA,KAAK;EACL,KAAK;EACL,KAAA,GACE,SAAO,QAAO,IAAI,SAAS,OAAO;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAA,IACE,SAAO,QAAO,KAAK,MAAM,IAAI,SAAS,OAAO,CAAC;EAChD,KAAK,MACH,SAAO,QAAO,KAAK,MAAM,IAAI,SAAS,QAAQ,EAAE,CAAC;EACnD,KAAK;EACL,KAAK,YACH,SAAO,QAAO,gBAAgB,IAAI;EACpC,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,KACH,SAAO,QAAO,WAAW,IAAI;EAC/B,KAAK,OACH,SAAO,QAAO,aAAa,IAAI;EACjC,KAAK,QACH,SAAO,QAAO,cAAc,IAAI;EAClC,QACE,SAAO,QAAO,IAAI,SAAS,OAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"warmup.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/warmup.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,gBAAgB,CAAC;AAE/C,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,MAAM,iBAqBb"}
1
+ {"version":3,"file":"warmup.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/warmup.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,gBAAgB,CAAC;AAI/C,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,MAAM,iBAsBb"}
@@ -1,6 +1,8 @@
1
1
  //#region ../zero-cache/src/db/warmup.ts
2
+ var MAX_WARMUP_CONNECTIONS = 5;
2
3
  async function warmupConnections(lc, db, name) {
3
- const { max, host } = db.options;
4
+ const { host } = db.options;
5
+ const max = Math.min(db.options.max, MAX_WARMUP_CONNECTIONS);
4
6
  await Promise.allSettled(Array.from({ length: max }, () => db`SELECT 1`.simple().execute()));
5
7
  const start = performance.now();
6
8
  const pingTimes = await Promise.all(Array.from({ length: Math.min(max, 5) }, () => db`SELECT 2`.simple().then(() => performance.now() - start, () => performance.now() - start)));
@@ -1 +1 @@
1
- {"version":3,"file":"warmup.js","names":[],"sources":["../../../../../zero-cache/src/db/warmup.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {PostgresDB} from '../types/pg.ts';\n\nexport async function warmupConnections(\n lc: LogContext,\n db: PostgresDB,\n name: string,\n) {\n const {max, host} = db.options;\n await Promise.allSettled(\n Array.from({length: max}, () => db`SELECT 1`.simple().execute()),\n );\n const start = performance.now();\n const pingTimes = await Promise.all(\n Array.from({length: Math.min(max, 5)}, () =>\n db`SELECT 2`.simple().then(\n () => performance.now() - start,\n () => performance.now() - start,\n ),\n ),\n );\n const average = pingTimes.reduce((l, r) => l + r, 0) / pingTimes.length;\n const log = average >= 10 ? 'warn' : 'info';\n lc[log]?.(`average ping to ${name} db@${host}: ${average.toFixed(2)} ms`);\n if (log === 'warn') {\n lc.warn?.(`ideal db ping time is < 5 ms`);\n }\n}\n"],"mappings":";AAGA,eAAsB,kBACpB,IACA,IACA,MACA;CACA,MAAM,EAAC,KAAK,SAAQ,GAAG;AACvB,OAAM,QAAQ,WACZ,MAAM,KAAK,EAAC,QAAQ,KAAI,QAAQ,EAAE,WAAW,QAAQ,CAAC,SAAS,CAAC,CACjE;CACD,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,YAAY,MAAM,QAAQ,IAC9B,MAAM,KAAK,EAAC,QAAQ,KAAK,IAAI,KAAK,EAAE,EAAC,QACnC,EAAE,WAAW,QAAQ,CAAC,WACd,YAAY,KAAK,GAAG,aACpB,YAAY,KAAK,GAAG,MAC3B,CACF,CACF;CACD,MAAM,UAAU,UAAU,QAAQ,GAAG,MAAM,IAAI,GAAG,EAAE,GAAG,UAAU;CACjE,MAAM,MAAM,WAAW,KAAK,SAAS;AACrC,IAAG,OAAO,mBAAmB,KAAK,MAAM,KAAK,IAAI,QAAQ,QAAQ,EAAE,CAAC,KAAK;AACzE,KAAI,QAAQ,OACV,IAAG,OAAO,+BAA+B"}
1
+ {"version":3,"file":"warmup.js","names":[],"sources":["../../../../../zero-cache/src/db/warmup.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {PostgresDB} from '../types/pg.ts';\n\nconst MAX_WARMUP_CONNECTIONS = 5;\n\nexport async function warmupConnections(\n lc: LogContext,\n db: PostgresDB,\n name: string,\n) {\n const {host} = db.options;\n const max = Math.min(db.options.max, MAX_WARMUP_CONNECTIONS);\n await Promise.allSettled(\n Array.from({length: max}, () => db`SELECT 1`.simple().execute()),\n );\n const start = performance.now();\n const pingTimes = await Promise.all(\n Array.from({length: Math.min(max, 5)}, () =>\n db`SELECT 2`.simple().then(\n () => performance.now() - start,\n () => performance.now() - start,\n ),\n ),\n );\n const average = pingTimes.reduce((l, r) => l + r, 0) / pingTimes.length;\n const log = average >= 10 ? 'warn' : 'info';\n lc[log]?.(`average ping to ${name} db@${host}: ${average.toFixed(2)} ms`);\n if (log === 'warn') {\n lc.warn?.(`ideal db ping time is < 5 ms`);\n }\n}\n"],"mappings":";AAGA,IAAM,yBAAyB;AAE/B,eAAsB,kBACpB,IACA,IACA,MACA;CACA,MAAM,EAAC,SAAQ,GAAG;CAClB,MAAM,MAAM,KAAK,IAAI,GAAG,QAAQ,KAAK,uBAAuB;AAC5D,OAAM,QAAQ,WACZ,MAAM,KAAK,EAAC,QAAQ,KAAI,QAAQ,EAAE,WAAW,QAAQ,CAAC,SAAS,CAAC,CACjE;CACD,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,YAAY,MAAM,QAAQ,IAC9B,MAAM,KAAK,EAAC,QAAQ,KAAK,IAAI,KAAK,EAAE,EAAC,QACnC,EAAE,WAAW,QAAQ,CAAC,WACd,YAAY,KAAK,GAAG,aACpB,YAAY,KAAK,GAAG,MAC3B,CACF,CACF;CACD,MAAM,UAAU,UAAU,QAAQ,GAAG,MAAM,IAAI,GAAG,EAAE,GAAG,UAAU;CACjE,MAAM,MAAM,WAAW,KAAK,SAAS;AACrC,IAAG,OAAO,mBAAmB,KAAK,MAAM,KAAK,IAAI,QAAQ,QAAQ,EAAE,CAAC,KAAK;AACzE,KAAI,QAAQ,OACV,IAAG,OAAO,+BAA+B"}