orez 0.1.49 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/bench/proxy-throughput.bench.d.ts +13 -0
  2. package/dist/bench/proxy-throughput.bench.d.ts.map +1 -0
  3. package/dist/bench/proxy-throughput.bench.js +249 -0
  4. package/dist/bench/proxy-throughput.bench.js.map +1 -0
  5. package/dist/browser.d.ts.map +1 -1
  6. package/dist/browser.js +1 -0
  7. package/dist/browser.js.map +1 -1
  8. package/dist/cli.js +8 -0
  9. package/dist/cli.js.map +1 -1
  10. package/dist/config.d.ts +3 -0
  11. package/dist/config.d.ts.map +1 -1
  12. package/dist/config.js +6 -0
  13. package/dist/config.js.map +1 -1
  14. package/dist/index.d.ts.map +1 -1
  15. package/dist/index.js +13 -93
  16. package/dist/index.js.map +1 -1
  17. package/dist/mutex.d.ts +2 -0
  18. package/dist/mutex.d.ts.map +1 -1
  19. package/dist/mutex.js +4 -0
  20. package/dist/mutex.js.map +1 -1
  21. package/dist/pg-proxy-browser.d.ts.map +1 -1
  22. package/dist/pg-proxy-browser.js +6 -1
  23. package/dist/pg-proxy-browser.js.map +1 -1
  24. package/dist/pg-proxy.d.ts.map +1 -1
  25. package/dist/pg-proxy.js +146 -75
  26. package/dist/pg-proxy.js.map +1 -1
  27. package/dist/pglite-ipc.d.ts +15 -0
  28. package/dist/pglite-ipc.d.ts.map +1 -1
  29. package/dist/pglite-ipc.js +24 -0
  30. package/dist/pglite-ipc.js.map +1 -1
  31. package/dist/pglite-manager.d.ts +13 -0
  32. package/dist/pglite-manager.d.ts.map +1 -1
  33. package/dist/pglite-manager.js +43 -3
  34. package/dist/pglite-manager.js.map +1 -1
  35. package/dist/pglite-worker-thread.d.ts +2 -0
  36. package/dist/pglite-worker-thread.d.ts.map +1 -1
  37. package/dist/pglite-worker-thread.js +35 -0
  38. package/dist/pglite-worker-thread.js.map +1 -1
  39. package/package.json +2 -2
  40. package/src/bench/proxy-throughput.bench.ts +343 -0
  41. package/src/browser.ts +1 -0
  42. package/src/cli.ts +9 -0
  43. package/src/config.ts +11 -0
  44. package/src/index.ts +15 -112
  45. package/src/mutex.ts +5 -0
  46. package/src/pg-proxy-browser.ts +6 -1
  47. package/src/pg-proxy.ts +159 -73
  48. package/src/pglite-ipc.ts +32 -0
  49. package/src/pglite-manager.ts +60 -3
  50. package/src/pglite-worker-thread.ts +39 -0
  51. package/src/worker/embed-integration.test.ts +1 -1
package/src/config.ts CHANGED
@@ -1,3 +1,5 @@
1
+ import { availableParallelism } from 'node:os'
2
+
1
3
  import type { PGliteOptions } from '@electric-sql/pglite'
2
4
 
3
5
  export type LogLevel = 'error' | 'warn' | 'info' | 'debug'
@@ -19,6 +21,7 @@ export interface ZeroLiteConfig {
19
21
  forceWasmSqlite: boolean
20
22
  useWorkerThreads: boolean
21
23
  singleDb: boolean
24
+ readReplicas: number
22
25
  logLevel: LogLevel
23
26
  pgliteOptions: Partial<PGliteOptions>
24
27
  zeroPublications?: string
@@ -80,6 +83,8 @@ export interface OrezConfig {
80
83
  onDbReady?: Hook
81
84
  /** command to run once all services healthy */
82
85
  onHealthy?: Hook
86
+ /** number of pglite read replicas for postgres (default: auto, 0 to disable) */
87
+ readReplicas?: number
83
88
  /** pglite options */
84
89
  pgliteOptions?: Partial<PGliteOptions>
85
90
  /** ZERO_APP_PUBLICATIONS — comma-separated publication names */
@@ -110,6 +115,12 @@ export function getConfig(overrides: Partial<ZeroLiteConfig> = {}): ZeroLiteConf
110
115
  forceWasmSqlite: overrides.forceWasmSqlite ?? false,
111
116
  useWorkerThreads: overrides.useWorkerThreads ?? true,
112
117
  singleDb: overrides.singleDb ?? false,
118
+ // singleDb shares one pglite instance for all databases — replicas make no sense
119
+ readReplicas:
120
+ (overrides.singleDb ?? false)
121
+ ? 0
122
+ : (overrides.readReplicas ??
123
+ Math.min(Math.ceil(availableParallelism() * 0.5), 4)),
113
124
  logLevel: overrides.logLevel || (process.env.OREZ_LOG_LEVEL as LogLevel) || 'warn',
114
125
  pgliteOptions: overrides.pgliteOptions || {},
115
126
  zeroPublications: overrides.zeroPublications,
package/src/index.ts CHANGED
@@ -37,6 +37,8 @@ import {
37
37
  import { installChangeTracking } from './replication/change-tracker.js'
38
38
  import { resetReplicationState } from './replication/handler.js'
39
39
  import {
40
+ applySqliteMode,
41
+ cleanupShim,
40
42
  formatNativeBootstrapInstructions,
41
43
  hasMissingNativeBinarySignature,
42
44
  inspectNativeSqliteBinary,
@@ -310,6 +312,13 @@ export async function startZeroLite(overrides: Partial<ZeroLiteConfig> = {}) {
310
312
  await installChangeTracking(db)
311
313
  }
312
314
 
315
+ // create read replicas after the primary is fully initialized
316
+ // (migrations, seed, change tracking, publications all set up)
317
+ if (config.readReplicas > 0 && config.useWorkerThreads) {
318
+ const { createReadReplicas } = await import('./pglite-manager.js')
319
+ instances.postgresReplicas = await createReadReplicas(db, config.readReplicas, config)
320
+ }
321
+
313
322
  // clean up stale sqlite replica from previous runs
314
323
  cleanupStaleReplica(config)
315
324
 
@@ -720,115 +729,6 @@ async function seedIfNeeded(db: PGlite, config: ZeroLiteConfig): Promise<void> {
720
729
  log.orez('seeded')
721
730
  }
722
731
 
723
- // install wasm shim into node_modules/@rocicorp/zero-sqlite3
724
- // backs up original and writes a conditional loader that switches based on env
725
- function installWasmShim(bedrockPath: string, zeroEntry: string): void {
726
- // find node_modules where @rocicorp/zero is installed
727
- const nodeModulesMatch = zeroEntry.match(/^(.+\/node_modules)\/@rocicorp\/zero\//)
728
- if (!nodeModulesMatch) {
729
- throw new Error(`cannot determine node_modules path from ${zeroEntry}`)
730
- }
731
- const nodeModules = nodeModulesMatch[1]
732
- const shimDir = resolve(nodeModules, '@rocicorp', 'zero-sqlite3')
733
- const indexPath = resolve(shimDir, 'index.js')
734
- const originalPath = resolve(shimDir, 'index.original.js')
735
- const wasmPath = resolve(shimDir, 'index.wasm.js')
736
-
737
- // if directory doesn't exist, create it with just the wasm shim
738
- if (!existsSync(shimDir)) {
739
- mkdirSync(shimDir, { recursive: true })
740
- }
741
-
742
- // backup original if it exists and hasn't been backed up
743
- if (existsSync(indexPath) && !existsSync(originalPath)) {
744
- const content = readFileSync(indexPath, 'utf-8')
745
- // only backup if it's not already our shim
746
- if (!content.includes('orez-sqlite-shim')) {
747
- writeFileSync(originalPath, content)
748
- log.debug.orez('backed up original zero-sqlite3/index.js')
749
- }
750
- }
751
-
752
- // write wasm implementation
753
- writeFileSync(
754
- wasmPath,
755
- `// orez wasm sqlite implementation
756
- import bedrockSqlite from '${bedrockPath}';
757
- const { Database: OrigDatabase, SqliteError } = bedrockSqlite;
758
-
759
- function Database(...args) {
760
- const db = new OrigDatabase(...args);
761
- try {
762
- db.pragma('journal_mode = wal2');
763
- db.pragma('synchronous = off');
764
- db.pragma('temp_store = memory');
765
- db.pragma('busy_timeout = 30000');
766
- } catch(e) {}
767
- return db;
768
- }
769
- Database.prototype = OrigDatabase.prototype;
770
- Database.prototype.constructor = Database;
771
- Object.keys(OrigDatabase).forEach(k => { Database[k] = OrigDatabase[k]; });
772
- Database.prototype.unsafeMode = function() { return this; };
773
- if (!Database.prototype.defaultSafeIntegers) Database.prototype.defaultSafeIntegers = function() { return this; };
774
- if (!Database.prototype.serialize) Database.prototype.serialize = function() { throw new Error('not supported in wasm'); };
775
- if (!Database.prototype.backup) Database.prototype.backup = function() { throw new Error('not supported in wasm'); };
776
-
777
- const tmpDb = new OrigDatabase(':memory:');
778
- const tmpStmt = tmpDb.prepare('SELECT 1');
779
- const SP = Object.getPrototypeOf(tmpStmt);
780
- if (!SP.safeIntegers) SP.safeIntegers = function() { return this; };
781
- SP.scanStatus = function() { return undefined; };
782
- SP.scanStatusV2 = function() { return []; };
783
- SP.scanStatusReset = function() {};
784
- tmpDb.close();
785
-
786
- Database.SQLITE_SCANSTAT_NLOOP = 0;
787
- Database.SQLITE_SCANSTAT_NVISIT = 1;
788
- Database.SQLITE_SCANSTAT_EST = 2;
789
- Database.SQLITE_SCANSTAT_NAME = 3;
790
- Database.SQLITE_SCANSTAT_EXPLAIN = 4;
791
- Database.SQLITE_SCANSTAT_SELECTID = 5;
792
- Database.SQLITE_SCANSTAT_PARENTID = 6;
793
- Database.SQLITE_SCANSTAT_NCYCLE = 7;
794
- Database.SQLITE_SCANSTAT_COMPLEX = 8;
795
-
796
- export default Database;
797
- export { SqliteError };
798
- `
799
- )
800
-
801
- // write esm loader - always uses wasm since native requires compiled binary
802
- // users who want native should use --disable-wasm-sqlite flag which skips this
803
- writeFileSync(
804
- indexPath,
805
- `// orez-sqlite-shim: wasm sqlite for zero-cache
806
- // this file was generated by orez - original backed up to index.original.js
807
- export * from './index.wasm.js';
808
- export { default } from './index.wasm.js';
809
- `
810
- )
811
-
812
- // ensure package.json exists with correct config
813
- const pkgPath = resolve(shimDir, 'package.json')
814
- if (!existsSync(pkgPath)) {
815
- writeFileSync(
816
- pkgPath,
817
- JSON.stringify(
818
- {
819
- name: '@rocicorp/zero-sqlite3',
820
- version: '0.0.0-orez-shim',
821
- main: './index.js',
822
- },
823
- null,
824
- 2
825
- ) + '\n'
826
- )
827
- }
828
-
829
- log.debug.orez(`installed wasm shim to ${shimDir}`)
830
- }
831
-
832
732
  async function startZeroCache(
833
733
  config: ZeroLiteConfig,
834
734
  logStore?: LogStore,
@@ -886,9 +786,12 @@ async function startZeroCache(
886
786
  throw new Error('zero-cache cli.js not found. install @rocicorp/zero')
887
787
  }
888
788
 
889
- // install wasm shim into node_modules/@rocicorp/zero-sqlite3
890
- if (sqliteMode === 'wasm' && sqliteModeConfig?.bedrockPath) {
891
- installWasmShim(sqliteModeConfig.bedrockPath, zeroEntry)
789
+ // apply sqlite mode shim (wasm: patches lib/index.js, native: restores original)
790
+ if (sqliteModeConfig) {
791
+ const shimResult = applySqliteMode(sqliteModeConfig)
792
+ if (!shimResult.success) {
793
+ log.orez(`warning: sqlite shim failed: ${shimResult.error}`)
794
+ }
892
795
  }
893
796
 
894
797
  // preload script to label the zero-cache child process
package/src/mutex.ts CHANGED
@@ -5,6 +5,11 @@ export class Mutex {
5
5
  private queue: Array<() => void> = []
6
6
  private head = 0
7
7
 
8
+ /** check if the mutex is currently held (non-blocking, no side effects) */
9
+ get isLocked(): boolean {
10
+ return this.locked
11
+ }
12
+
8
13
  async acquire(): Promise<void> {
9
14
  if (!this.locked) {
10
15
  this.locked = true
@@ -682,7 +682,12 @@ export async function createBrowserProxy(
682
682
  const instances: PGliteInstances =
683
683
  'postgres' in dbInput
684
684
  ? (dbInput as PGliteInstances)
685
- : { postgres: dbInput as PGlite, cvr: dbInput as PGlite, cdb: dbInput as PGlite }
685
+ : {
686
+ postgres: dbInput as PGlite,
687
+ cvr: dbInput as PGlite,
688
+ cdb: dbInput as PGlite,
689
+ postgresReplicas: [],
690
+ }
686
691
 
687
692
  // per-instance mutexes for serializing pglite access.
688
693
  // when all instances are the same object (single-db mode), share one mutex
package/src/pg-proxy.ts CHANGED
@@ -418,24 +418,62 @@ function readInt32BE(data: Uint8Array, offset: number): number {
418
418
  }
419
419
 
420
420
  /**
421
- * extract ReadyForQuery status byte from a response.
422
- * returns the status: 'I' (0x49) idle, 'T' (0x54) in transaction, 'E' (0x45) error.
423
- * returns null if no ReadyForQuery found.
421
+ * single-pass response processor: extracts ReadyForQuery status AND
422
+ * optionally strips RFQ messages + benign notices in one scan.
423
+ * replaces the previous two-function approach that scanned the buffer twice.
424
424
  */
425
- function getReadyForQueryStatus(data: Uint8Array): number | null {
425
+ interface ProcessedResponse {
426
+ data: Uint8Array
427
+ rfqStatus: number | null
428
+ }
429
+
430
+ function processResponse(data: Uint8Array, stripRfq: boolean): ProcessedResponse {
431
+ if (data.length === 0) return { data, rfqStatus: null }
432
+
433
+ let rfqStatus: number | null = null
434
+ const parts: Uint8Array[] = []
426
435
  let offset = 0
427
- let lastStatus: number | null = null
436
+ let stripped = false
437
+
428
438
  while (offset < data.length) {
439
+ const msgType = data[offset]
429
440
  if (offset + 5 > data.length) break
430
441
  const msgLen = readInt32BE(data, offset + 1)
431
442
  const totalLen = 1 + msgLen
432
443
  if (totalLen <= 0 || offset + totalLen > data.length) break
433
- if (data[offset] === 0x5a && totalLen >= 6) {
434
- lastStatus = data[offset + 5]
444
+
445
+ if (msgType === 0x5a && totalLen >= 6) {
446
+ rfqStatus = data[offset + 5]
447
+ if (stripRfq) {
448
+ stripped = true
449
+ offset += totalLen
450
+ continue
451
+ }
452
+ } else {
453
+ const code = extractNoticeCode(data, offset, totalLen)
454
+ if (code && SUPPRESS_NOTICE_CODES.has(code)) {
455
+ stripped = true
456
+ offset += totalLen
457
+ continue
458
+ }
435
459
  }
460
+
461
+ parts.push(data.subarray(offset, offset + totalLen))
436
462
  offset += totalLen
437
463
  }
438
- return lastStatus
464
+
465
+ if (!stripped) return { data, rfqStatus }
466
+ if (parts.length === 0) return { data: new Uint8Array(0), rfqStatus }
467
+ if (parts.length === 1) return { data: parts[0], rfqStatus }
468
+
469
+ const total = parts.reduce((sum, p) => sum + p.length, 0)
470
+ const result = new Uint8Array(total)
471
+ let pos = 0
472
+ for (const p of parts) {
473
+ result.set(p, pos)
474
+ pos += p.length
475
+ }
476
+ return { data: result, rfqStatus }
439
477
  }
440
478
 
441
479
  /**
@@ -492,54 +530,9 @@ function extractNoticeCode(
492
530
  return null
493
531
  }
494
532
 
495
- /**
496
- * single-pass response message filter. strips ReadyForQuery messages (when
497
- * stripRfq=true) and benign transaction state warnings in one scan.
498
- */
533
+ // legacy wrapper for callers that only need filtering (no RFQ status)
499
534
  function stripResponseMessages(data: Uint8Array, stripRfq: boolean): Uint8Array {
500
- if (data.length === 0) return data
501
-
502
- const parts: Uint8Array[] = []
503
- let offset = 0
504
- let stripped = false
505
-
506
- while (offset < data.length) {
507
- const msgType = data[offset]
508
- if (offset + 5 > data.length) break
509
- const msgLen = readInt32BE(data, offset + 1)
510
- const totalLen = 1 + msgLen
511
-
512
- if (totalLen <= 0 || offset + totalLen > data.length) break
513
-
514
- // strip ReadyForQuery (0x5a) when requested
515
- if (stripRfq && msgType === 0x5a) {
516
- stripped = true
517
- }
518
- // strip benign transaction state notices
519
- else {
520
- const code = extractNoticeCode(data, offset, totalLen)
521
- if (code && SUPPRESS_NOTICE_CODES.has(code)) {
522
- stripped = true
523
- } else {
524
- parts.push(data.subarray(offset, offset + totalLen))
525
- }
526
- }
527
-
528
- offset += totalLen
529
- }
530
-
531
- if (!stripped) return data
532
- if (parts.length === 0) return new Uint8Array(0)
533
- if (parts.length === 1) return parts[0]
534
-
535
- const total = parts.reduce((sum, p) => sum + p.length, 0)
536
- const result = new Uint8Array(total)
537
- let pos = 0
538
- for (const p of parts) {
539
- result.set(p, pos)
540
- pos += p.length
541
- }
542
- return result
535
+ return processResponse(data, stripRfq).data
543
536
  }
544
537
 
545
538
  export async function startPgProxy(
@@ -550,7 +543,12 @@ export async function startPgProxy(
550
543
  const instances: PGliteInstances =
551
544
  'postgres' in dbInput
552
545
  ? (dbInput as PGliteInstances)
553
- : { postgres: dbInput as PGlite, cvr: dbInput as PGlite, cdb: dbInput as PGlite }
546
+ : {
547
+ postgres: dbInput as PGlite,
548
+ cvr: dbInput as PGlite,
549
+ cdb: dbInput as PGlite,
550
+ postgresReplicas: [],
551
+ }
554
552
 
555
553
  // per-instance mutexes for serializing pglite access.
556
554
  // when all instances are the same object (single-db mode), share one mutex
@@ -564,6 +562,24 @@ export async function startPgProxy(
564
562
  cdb: sharedInstance ? pgMutex : new Mutex(),
565
563
  }
566
564
 
565
+ // replica state: initialized lazily when replicas become available.
566
+ // instances.postgresReplicas is populated AFTER the proxy starts
567
+ // (replicas are created after migrations/on-db-ready run via TCP proxy).
568
+ const replicaMutexes: Mutex[] = []
569
+ let replicasInitialized = false
570
+ let nextReplicaIdx = 0
571
+
572
+ function ensureReplicaState() {
573
+ if (replicasInitialized) return
574
+ const replicas = instances.postgresReplicas
575
+ if (replicas.length === 0) return
576
+ replicasInitialized = true
577
+ for (let i = 0; i < replicas.length; i++) {
578
+ replicaMutexes.push(new Mutex())
579
+ }
580
+ log.proxy(`${replicas.length} postgres read replica(s) active`)
581
+ }
582
+
567
583
  // per-instance transaction state: tracks which socket owns the current transaction
568
584
  // so we can auto-ROLLBACK stale aborted transactions from other connections
569
585
  const txStates: Record<string, PgLiteTxState> = {
@@ -585,6 +601,38 @@ export async function startPgProxy(
585
601
  return { db: instances.postgres, mutex: mutexes.postgres, txState: txStates.postgres }
586
602
  }
587
603
 
604
+ /**
605
+ * try to acquire a read replica for a postgres SELECT query.
606
+ * returns null if no replicas are available or all are busy.
607
+ * used for per-query routing: only non-transactional SELECTs can use replicas.
608
+ */
609
+ function tryAcquireReadReplica(): { db: PGlite; mutex: Mutex } | null {
610
+ ensureReplicaState()
611
+ const replicas = instances.postgresReplicas
612
+ if (replicas.length === 0) return null
613
+ // round-robin with tryAcquire: find the first available replica
614
+ for (let i = 0; i < replicas.length; i++) {
615
+ const idx = (nextReplicaIdx + i) % replicas.length
616
+ if (replicaMutexes[idx].tryAcquire()) {
617
+ nextReplicaIdx = idx + 1
618
+ return { db: replicas[idx], mutex: replicaMutexes[idx] }
619
+ }
620
+ }
621
+ return null // all replicas busy, fall back to primary
622
+ }
623
+
624
+ /**
625
+ * fan-out a write SQL to all postgres replicas.
626
+ * fire-and-forget — replica staleness is acceptable for a dev tool.
627
+ */
628
+ function fanOutWriteToReplicas(sql: string) {
629
+ const replicas = instances.postgresReplicas
630
+ if (replicas.length === 0) return
631
+ for (const replica of replicas) {
632
+ replica.exec(sql).catch(() => {})
633
+ }
634
+ }
635
+
588
636
  // signal replication handler after extended protocol writes complete.
589
637
  // 8ms leading-edge debounce: fires exactly 8ms after the FIRST write,
590
638
  // subsequent writes within that window are batched (handler polls all
@@ -779,11 +827,13 @@ export async function startPgProxy(
779
827
  proxyStats.totalExecMs += t2 - t1
780
828
  proxyStats.count++
781
829
 
782
- // update transaction state
783
- const rfqStatus = getReadyForQueryStatus(result)
784
- if (rfqStatus !== null) {
785
- txState.status = rfqStatus
786
- txState.owner = rfqStatus === 0x49 ? null : socket
830
+ // single-pass: extract tx status + strip RFQ from non-Sync messages
831
+ const stripRfqFromPipeline = msgType !== 0x53
832
+ const processed = processResponse(result, stripRfqFromPipeline)
833
+ result = processed.data
834
+ if (processed.rfqStatus !== null) {
835
+ txState.status = processed.rfqStatus
836
+ txState.owner = processed.rfqStatus === 0x49 ? null : socket
787
837
  }
788
838
 
789
839
  // release mutex on Sync (end of pipeline)
@@ -797,9 +847,6 @@ export async function startPgProxy(
797
847
  extWritePending = false
798
848
  signalWrite()
799
849
  }
800
- } else {
801
- // strip ReadyForQuery from non-Sync pipeline messages
802
- result = stripResponseMessages(result, true)
803
850
  }
804
851
 
805
852
  if (proxyStats.count % 200 === 0) {
@@ -853,6 +900,34 @@ export async function startPgProxy(
853
900
  }
854
901
  }
855
902
 
903
+ // per-query replica spillover: when the primary is busy with another query,
904
+ // route read-only SELECTs to an available replica instead of waiting.
905
+ // only for: postgres db, non-write, non-DDL, not in a transaction, primary contended.
906
+ if (
907
+ dbName === 'postgres' &&
908
+ queryNorm &&
909
+ !isWriteNormalized(queryNorm) &&
910
+ !isDDLNormalized(queryNorm) &&
911
+ txState.status === 0x49 &&
912
+ mutex.isLocked // primary is busy — try spilling to replica
913
+ ) {
914
+ const replica = tryAcquireReadReplica()
915
+ if (replica) {
916
+ const t1 = performance.now()
917
+ let result: Uint8Array
918
+ try {
919
+ result = await replica.db.execProtocolRaw(data, { syncToFs: false })
920
+ } catch (err) {
921
+ replica.mutex.release()
922
+ throw err
923
+ }
924
+ replica.mutex.release()
925
+ proxyStats.totalExecMs += performance.now() - t1
926
+ proxyStats.count++
927
+ return processResponse(result, false).data
928
+ }
929
+ }
930
+
856
931
  const execute = async (): Promise<Uint8Array> => {
857
932
  const t0 = performance.now()
858
933
  await mutex.acquire()
@@ -871,13 +946,17 @@ export async function startPgProxy(
871
946
  mutex.release()
872
947
  throw err
873
948
  }
874
- const rfqStatus = getReadyForQueryStatus(result)
875
- if (rfqStatus !== null) {
876
- txState.status = rfqStatus
877
- txState.owner = rfqStatus === 0x49 ? null : socket
878
- }
879
949
  const t2 = performance.now()
880
950
  mutex.release()
951
+
952
+ // single-pass: extract tx status + strip messages
953
+ const doStripRfq = msgType !== 0x53 && msgType !== 0x51
954
+ const processed = processResponse(result, doStripRfq)
955
+ if (processed.rfqStatus !== null) {
956
+ txState.status = processed.rfqStatus
957
+ txState.owner = processed.rfqStatus === 0x49 ? null : socket
958
+ }
959
+
881
960
  proxyStats.totalWaitMs += t1 - t0
882
961
  proxyStats.totalExecMs += t2 - t1
883
962
  proxyStats.count++
@@ -886,7 +965,7 @@ export async function startPgProxy(
886
965
  `perf: ${proxyStats.count} ops (${proxyStats.batches} batches) | mutex ${proxyStats.totalWaitMs.toFixed(0)}ms | pglite ${proxyStats.totalExecMs.toFixed(0)}ms`
887
966
  )
888
967
  }
889
- return result
968
+ return processed.data
890
969
  }
891
970
 
892
971
  let result: Uint8Array
@@ -909,12 +988,19 @@ export async function startPgProxy(
909
988
  }
910
989
  }
911
990
 
912
- const stripRfq = msgType !== 0x53 && msgType !== 0x51
913
- result = stripResponseMessages(result, stripRfq)
914
-
915
991
  // signal replication handler on postgres writes for instant sync
916
992
  if (dbName === 'postgres' && queryNorm && isWriteNormalized(queryNorm)) {
917
993
  signalReplicationChange()
994
+ if (queryText) fanOutWriteToReplicas(queryText)
995
+ }
996
+ // fan-out DDL to replicas so schema stays in sync
997
+ if (
998
+ dbName === 'postgres' &&
999
+ queryNorm &&
1000
+ isDDLNormalized(queryNorm) &&
1001
+ queryText
1002
+ ) {
1003
+ fanOutWriteToReplicas(queryText)
918
1004
  }
919
1005
 
920
1006
  return result
package/src/pglite-ipc.ts CHANGED
@@ -162,6 +162,29 @@ export class PGliteWorkerProxy {
162
162
  return new Uint8Array(result.data)
163
163
  }
164
164
 
165
+ /**
166
+ * execute multiple wire protocol messages in a single IPC round-trip.
167
+ * each message is executed sequentially in the worker, and all results
168
+ * are concatenated and returned. eliminates N-1 postMessage round-trips
169
+ * for extended protocol pipelines (Parse→Bind→Execute→Sync).
170
+ */
171
+ async execProtocolRawBatch(
172
+ messages: Uint8Array[],
173
+ options?: { syncToFs?: boolean; throwOnError?: boolean }
174
+ ): Promise<Uint8Array> {
175
+ const buffers: ArrayBuffer[] = []
176
+ for (const msg of messages) {
177
+ const buf = new ArrayBuffer(msg.byteLength)
178
+ new Uint8Array(buf).set(msg)
179
+ buffers.push(buf)
180
+ }
181
+ const result = await this.send(
182
+ { type: 'execProtocolRawBatch', buffers, options },
183
+ buffers
184
+ )
185
+ return new Uint8Array(result.data)
186
+ }
187
+
165
188
  async query<T = any>(
166
189
  sql: string,
167
190
  params?: any[]
@@ -204,6 +227,15 @@ export class PGliteWorkerProxy {
204
227
  }
205
228
  }
206
229
 
230
+ /**
231
+ * dump the PGlite data directory as a tar blob.
232
+ * used to create read replicas from an existing instance.
233
+ */
234
+ async dumpDataDir(): Promise<ArrayBuffer> {
235
+ const result = await this.send({ type: 'dumpDataDir' })
236
+ return result.data as ArrayBuffer
237
+ }
238
+
207
239
  async close(): Promise<void> {
208
240
  try {
209
241
  await this.send({ type: 'close' })
@@ -76,6 +76,8 @@ export interface PGliteInstances {
76
76
  postgres: PGlite
77
77
  cvr: PGlite
78
78
  cdb: PGlite
79
+ /** read replicas of the postgres instance (empty if disabled) */
80
+ postgresReplicas: PGlite[]
79
81
  }
80
82
 
81
83
  // shared setup extracted from the 4 factory functions below
@@ -262,7 +264,7 @@ export async function createPGliteInstances(
262
264
  ])
263
265
 
264
266
  await ensurePublication(postgres)
265
- return { postgres, cvr, cdb }
267
+ return { postgres, cvr, cdb, postgresReplicas: [] }
266
268
  }
267
269
 
268
270
  /**
@@ -318,6 +320,7 @@ export async function createPGliteWorkerInstances(
318
320
  postgres: pgProxy as unknown as PGlite,
319
321
  cvr: cvrProxy as unknown as PGlite,
320
322
  cdb: cdbProxy as unknown as PGlite,
323
+ postgresReplicas: [],
321
324
  }
322
325
  }
323
326
 
@@ -338,7 +341,7 @@ export async function createSinglePGliteInstance(
338
341
  await ensurePublication(db)
339
342
 
340
343
  // same instance for all three — pg-proxy detects this and shares a mutex
341
- return { postgres: db, cvr: db, cdb: db }
344
+ return { postgres: db, cvr: db, cdb: db, postgresReplicas: [] }
342
345
  }
343
346
 
344
347
  /**
@@ -382,7 +385,7 @@ export async function createSinglePGliteWorkerInstance(
382
385
  await ensurePublication(proxy)
383
386
 
384
387
  const db = proxy as unknown as PGlite
385
- return { postgres: db, cvr: db, cdb: db }
388
+ return { postgres: db, cvr: db, cdb: db, postgresReplicas: [] }
386
389
  }
387
390
 
388
391
  /** create a single worker-backed PGlite instance (for CVR/CDB recreation during reset) */
@@ -396,6 +399,60 @@ export function createPGliteWorker(dataDir: string, name: string): PGliteWorkerP
396
399
  })
397
400
  }
398
401
 
402
+ /**
403
+ * create read replicas of the postgres instance.
404
+ *
405
+ * dumps the primary's data directory and initializes N new worker threads
406
+ * from the dump. each replica is an independent PGlite instance on its own
407
+ * core, handling read queries concurrently.
408
+ *
409
+ * call this AFTER migrations, seed, on-db-ready — the dump captures the
410
+ * full database state at the time of cloning.
411
+ */
412
+ export async function createReadReplicas(
413
+ primary: PGlite,
414
+ count: number,
415
+ config: ZeroLiteConfig
416
+ ): Promise<PGlite[]> {
417
+ if (count <= 0) return []
418
+
419
+ const proxy = primary as unknown as PGliteWorkerProxy
420
+ if (typeof proxy.dumpDataDir !== 'function') {
421
+ log.pglite('read replicas require worker threads (dumpDataDir not available)')
422
+ return []
423
+ }
424
+
425
+ log.pglite(`creating ${count} read replica(s)...`)
426
+ const t0 = performance.now()
427
+
428
+ const dump = await proxy.dumpDataDir()
429
+ log.debug.pglite(`primary dump: ${(dump.byteLength / 1024 / 1024).toFixed(1)}MB`)
430
+
431
+ const {
432
+ dataDir: _ud,
433
+ debug: _dbg,
434
+ ...userOpts
435
+ } = config.pgliteOptions as Record<string, any>
436
+
437
+ const replicas: PGliteWorkerProxy[] = []
438
+ for (let i = 0; i < count; i++) {
439
+ const replica = new PGliteWorkerProxy({
440
+ dataDir: 'memory://',
441
+ name: `postgres-replica-${i}`,
442
+ withExtensions: true,
443
+ debug: config.logLevel === 'debug' ? 1 : 0,
444
+ pgliteOptions: userOpts,
445
+ loadDataDir: dump,
446
+ })
447
+ replicas.push(replica)
448
+ }
449
+
450
+ await Promise.all(replicas.map((r) => r.waitReady))
451
+ log.pglite(`${count} read replica(s) ready in ${(performance.now() - t0).toFixed(0)}ms`)
452
+
453
+ return replicas as unknown as PGlite[]
454
+ }
455
+
399
456
  /** run pending migrations, returns count of newly applied migrations */
400
457
  export async function runMigrations(db: PGlite, config: ZeroLiteConfig): Promise<number> {
401
458
  if (!config.migrationsDir) {