orez 0.0.46 → 0.0.48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/README.md +4 -8
  2. package/dist/admin/http-proxy.d.ts +31 -0
  3. package/dist/admin/http-proxy.d.ts.map +1 -0
  4. package/dist/admin/http-proxy.js +140 -0
  5. package/dist/admin/http-proxy.js.map +1 -0
  6. package/dist/admin/log-store.d.ts +22 -0
  7. package/dist/admin/log-store.d.ts.map +1 -0
  8. package/dist/admin/log-store.js +86 -0
  9. package/dist/admin/log-store.js.map +1 -0
  10. package/dist/admin/server.d.ts +19 -0
  11. package/dist/admin/server.d.ts.map +1 -0
  12. package/dist/admin/server.js +110 -0
  13. package/dist/admin/server.js.map +1 -0
  14. package/dist/admin/ui.d.ts +2 -0
  15. package/dist/admin/ui.d.ts.map +1 -0
  16. package/dist/admin/ui.js +683 -0
  17. package/dist/admin/ui.js.map +1 -0
  18. package/dist/cli.js +48 -1
  19. package/dist/cli.js.map +1 -1
  20. package/dist/config.d.ts +4 -0
  21. package/dist/config.d.ts.map +1 -1
  22. package/dist/config.js +4 -0
  23. package/dist/config.js.map +1 -1
  24. package/dist/index.d.ts +9 -0
  25. package/dist/index.d.ts.map +1 -1
  26. package/dist/index.js +211 -20
  27. package/dist/index.js.map +1 -1
  28. package/dist/log.d.ts +9 -0
  29. package/dist/log.d.ts.map +1 -1
  30. package/dist/log.js +24 -1
  31. package/dist/log.js.map +1 -1
  32. package/dist/pg-proxy.d.ts.map +1 -1
  33. package/dist/pg-proxy.js +19 -4
  34. package/dist/pg-proxy.js.map +1 -1
  35. package/dist/pglite-manager.d.ts +1 -0
  36. package/dist/pglite-manager.d.ts.map +1 -1
  37. package/dist/pglite-manager.js +8 -2
  38. package/dist/pglite-manager.js.map +1 -1
  39. package/dist/replication/change-tracker.d.ts.map +1 -1
  40. package/dist/replication/change-tracker.js +16 -29
  41. package/dist/replication/change-tracker.js.map +1 -1
  42. package/dist/replication/handler.d.ts.map +1 -1
  43. package/dist/replication/handler.js +42 -7
  44. package/dist/replication/handler.js.map +1 -1
  45. package/dist/vite-plugin.d.ts +3 -0
  46. package/dist/vite-plugin.d.ts.map +1 -1
  47. package/dist/vite-plugin.js +24 -0
  48. package/dist/vite-plugin.js.map +1 -1
  49. package/package.json +4 -2
  50. package/src/admin/http-proxy.ts +190 -0
  51. package/src/admin/log-store.ts +114 -0
  52. package/src/admin/server.ts +152 -0
  53. package/src/admin/ui.ts +684 -0
  54. package/src/cli.ts +62 -13
  55. package/src/config.ts +8 -0
  56. package/src/index.ts +239 -20
  57. package/src/log.ts +25 -1
  58. package/src/pg-proxy.ts +27 -5
  59. package/src/pglite-manager.ts +9 -2
  60. package/src/replication/change-tracker.ts +20 -30
  61. package/src/replication/handler.ts +54 -8
  62. package/src/replication/pgoutput-encoder.test.ts +217 -0
  63. package/src/replication/zero-compat.test.ts +232 -1
  64. package/src/shim/hooks.mjs +33 -0
  65. package/src/vite-plugin.ts +28 -0
@@ -67,35 +67,6 @@ export async function installChangeTracking(db: PGlite): Promise<void> {
67
67
  $$ LANGUAGE plpgsql;
68
68
  `)
69
69
 
70
- // auto-install change tracking on tables created after startup (e.g. via restore
71
- // or wire protocol). uses a DDL event trigger that fires on CREATE TABLE.
72
- await db.exec(`
73
- CREATE OR REPLACE FUNCTION public._zero_auto_track() RETURNS event_trigger AS $$
74
- DECLARE
75
- obj record;
76
- BEGIN
77
- FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands()
78
- WHERE command_tag = 'CREATE TABLE'
79
- LOOP
80
- IF obj.schema_name = 'public'
81
- AND obj.object_identity NOT LIKE '%._zero_%'
82
- AND obj.object_identity NOT LIKE '%.migrations'
83
- THEN
84
- EXECUTE format(
85
- 'CREATE TRIGGER _zero_change_trigger AFTER INSERT OR UPDATE OR DELETE ON %s FOR EACH ROW EXECUTE FUNCTION public._zero_track_change()',
86
- obj.object_identity
87
- );
88
- END IF;
89
- END LOOP;
90
- END;
91
- $$ LANGUAGE plpgsql;
92
-
93
- DROP EVENT TRIGGER IF EXISTS _zero_auto_track_trigger;
94
- CREATE EVENT TRIGGER _zero_auto_track_trigger ON ddl_command_end
95
- WHEN TAG IN ('CREATE TABLE')
96
- EXECUTE FUNCTION public._zero_auto_track();
97
- `)
98
-
99
70
  // install triggers on all public tables
100
71
  await installTriggersOnAllTables(db)
101
72
  }
@@ -239,10 +210,29 @@ export async function installTriggersOnShardTables(db: PGlite): Promise<void> {
239
210
 
240
211
  if (result.rows.length === 0) return
241
212
 
213
+ // only track `clients` — that's the table zero-cache expects in the
214
+ // replication stream (needed for .server promise resolution). other shard
215
+ // tables like `replicas` are zero-cache internal state and streaming them
216
+ // back causes "Unknown table" crashes in zero-cache's change-processor.
242
217
  let count = 0
243
218
  for (const { nspname } of result.rows) {
219
+ // remove stale triggers from non-clients tables (from previous versions)
220
+ const stale = await db.query<{ event_object_table: string }>(
221
+ `SELECT DISTINCT event_object_table FROM information_schema.triggers
222
+ WHERE trigger_name = '_zero_change_trigger'
223
+ AND event_object_schema = $1
224
+ AND event_object_table != 'clients'`,
225
+ [nspname]
226
+ )
227
+ for (const { event_object_table } of stale.rows) {
228
+ const qs = quoteIdent(nspname)
229
+ const qt = quoteIdent(event_object_table)
230
+ await db.exec(`DROP TRIGGER IF EXISTS _zero_change_trigger ON ${qs}.${qt}`)
231
+ log.debug.pglite(`removed stale shard trigger from ${nspname}.${event_object_table}`)
232
+ }
233
+
244
234
  const tables = await db.query<{ tablename: string }>(
245
- `SELECT tablename FROM pg_tables WHERE schemaname = $1`,
235
+ `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = 'clients'`,
246
236
  [nspname]
247
237
  )
248
238
 
@@ -12,7 +12,6 @@ import {
12
12
  getCurrentWatermark,
13
13
  purgeConsumedChanges,
14
14
  installTriggersOnShardTables,
15
- ensureChangeTrackingOnAllTables,
16
15
  type ChangeRecord,
17
16
  } from './change-tracker.js'
18
17
  import {
@@ -33,6 +32,9 @@ import {
33
32
  import type { Mutex } from '../mutex.js'
34
33
  import type { PGlite } from '@electric-sql/pglite'
35
34
 
35
+ // track concurrent replication handlers to detect reconnect-purge race
36
+ let activeHandlerCount = 0
37
+
36
38
  export interface ReplicationWriter {
37
39
  write(data: Uint8Array): void
38
40
  }
@@ -263,6 +265,11 @@ export async function handleStartReplication(
263
265
  db: PGlite,
264
266
  mutex: Mutex
265
267
  ): Promise<void> {
268
+ activeHandlerCount++
269
+ const handlerId = activeHandlerCount
270
+ console.info(
271
+ `[orez-repl#${handlerId}] START_REPLICATION (active handlers: ${activeHandlerCount})`
272
+ )
266
273
  log.debug.proxy('replication: entering streaming mode')
267
274
 
268
275
  // send CopyBothResponse to enter streaming mode
@@ -286,9 +293,6 @@ export async function handleStartReplication(
286
293
  // "already in transaction" errors when they interleave.
287
294
  await mutex.acquire()
288
295
  try {
289
- // install change tracking triggers on any tables created after startup
290
- await ensureChangeTrackingOnAllTables(db)
291
-
292
296
  // install change tracking triggers on shard schema tables (e.g. chat_0.clients)
293
297
  // these track zero-cache's lastMutationID for .server promise resolution
294
298
  await installTriggersOnShardTables(db)
@@ -347,7 +351,7 @@ export async function handleStartReplication(
347
351
  for (const schema of relevantSchemas) {
348
352
  if (schema === 'public') continue
349
353
  const shardTables = await db.query<{ tablename: string }>(
350
- `SELECT tablename FROM pg_tables WHERE schemaname = $1`,
354
+ `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = 'clients'`,
351
355
  [schema]
352
356
  )
353
357
  for (const { tablename } of shardTables.rows) {
@@ -457,6 +461,10 @@ export async function handleStartReplication(
457
461
  mutex.release()
458
462
  }
459
463
 
464
+ console.info(
465
+ `[orez-repl#${handlerId}] setup complete, starting poll (lastWatermark=${lastWatermark})`
466
+ )
467
+
460
468
  // track which tables we've sent RELATION messages for
461
469
  const sentRelations = new Set<string>()
462
470
  let txCounter = 1
@@ -469,6 +477,7 @@ export async function handleStartReplication(
469
477
  const purgeEveryN = 10
470
478
  let running = true
471
479
  let pollsSincePurge = 0
480
+ let lastIdleLog = 0
472
481
 
473
482
  const poll = async () => {
474
483
  while (running) {
@@ -483,6 +492,29 @@ export async function handleStartReplication(
483
492
  }
484
493
 
485
494
  if (changes.length > 0) {
495
+ // filter out shard tables that zero-cache doesn't expect.
496
+ // only `clients` is needed (for .server promise resolution).
497
+ // other shard tables (replicas, mutations) crash zero-cache
498
+ // with "Unknown table" in change-processor.
499
+ const batchEnd = changes[changes.length - 1].watermark
500
+ changes = changes.filter((c) => {
501
+ const dot = c.table_name.indexOf('.')
502
+ if (dot === -1) return true
503
+ const schema = c.table_name.substring(0, dot)
504
+ if (schema === 'public') return true
505
+ const table = c.table_name.substring(dot + 1)
506
+ return table === 'clients'
507
+ })
508
+
509
+ if (changes.length === 0) {
510
+ lastWatermark = batchEnd
511
+ continue
512
+ }
513
+
514
+ const tables = [...new Set(changes.map((c) => c.table_name))].join(',')
515
+ console.info(
516
+ `[orez-repl#${handlerId}] found ${changes.length} changes [${tables}] (wm ${lastWatermark}→${changes[changes.length - 1].watermark}, type=${typeof changes[0].watermark})`
517
+ )
486
518
  await streamChanges(
487
519
  changes,
488
520
  writer,
@@ -492,7 +524,7 @@ export async function handleStartReplication(
492
524
  excludedColumns,
493
525
  columnTypeOids
494
526
  )
495
- lastWatermark = changes[changes.length - 1].watermark
527
+ lastWatermark = batchEnd
496
528
 
497
529
  // purge consumed changes periodically to free wasm memory
498
530
  pollsSincePurge++
@@ -502,12 +534,23 @@ export async function handleStartReplication(
502
534
  try {
503
535
  const purged = await purgeConsumedChanges(db, lastWatermark)
504
536
  if (purged > 0) {
505
- log.debug.proxy(`purged ${purged} consumed changes`)
537
+ console.info(
538
+ `[orez-repl#${handlerId}] purged ${purged} changes (wm<=${lastWatermark})`
539
+ )
506
540
  }
507
541
  } finally {
508
542
  mutex.release()
509
543
  }
510
544
  }
545
+ } else {
546
+ // throttled idle logging (every 10s)
547
+ const now = Date.now()
548
+ if (now - lastIdleLog > 10000) {
549
+ lastIdleLog = now
550
+ console.info(
551
+ `[orez-repl#${handlerId}] idle (lastWatermark=${lastWatermark}, type=${typeof lastWatermark})`
552
+ )
553
+ }
511
554
  }
512
555
 
513
556
  // send keepalive
@@ -531,7 +574,10 @@ export async function handleStartReplication(
531
574
 
532
575
  log.debug.proxy('replication: starting poll loop')
533
576
  await poll()
534
- log.debug.proxy('replication: poll loop exited')
577
+ activeHandlerCount--
578
+ console.info(
579
+ `[orez-repl#${handlerId}] poll loop exited (remaining handlers: ${activeHandlerCount})`
580
+ )
535
581
  }
536
582
 
537
583
  async function streamChanges(
@@ -364,6 +364,223 @@ describe('pgoutput-encoder', () => {
364
364
  })
365
365
  })
366
366
 
367
+ // roundtrip tests: encode with orez → parse with zero-cache's parser
368
+ // this validates the fundamental contract between orez and zero-cache
369
+ describe('roundtrip: orez encoder → zero-cache parser', () => {
370
+ // absolute path bypasses package.json exports restriction
371
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
372
+ const { PgoutputParser } = require('/Users/n8/orez/node_modules/@rocicorp/zero/out/zero-cache/src/services/change-source/pg/logical-replication/pgoutput-parser.js')
373
+
374
+ // mock type parsers: unknown OIDs default to String (identity for text)
375
+ const typeParsers = { getTypeParser: () => String }
376
+
377
+ function makeParser() {
378
+ return new PgoutputParser(typeParsers)
379
+ }
380
+
381
+ it('BEGIN roundtrip', () => {
382
+ const lsn = 0x1000200n
383
+ const ts = BigInt(Date.now()) * 1000n
384
+ const parser = makeParser()
385
+ const parsed = parser.parse(encodeBegin(lsn, ts, 42))
386
+
387
+ expect(parsed.tag).toBe('begin')
388
+ expect(parsed.commitLsn).toBe('00000000/01000200')
389
+ expect(parsed.xid).toBe(42)
390
+ expect(parsed.commitTime).toBe(ts)
391
+ })
392
+
393
+ it('COMMIT roundtrip', () => {
394
+ const lsn = 0x1000200n
395
+ const endLsn = 0x1000300n
396
+ const ts = BigInt(Date.now()) * 1000n
397
+ const parser = makeParser()
398
+ const parsed = parser.parse(encodeCommit(0, lsn, endLsn, ts))
399
+
400
+ expect(parsed.tag).toBe('commit')
401
+ expect(parsed.commitLsn).toBe('00000000/01000200')
402
+ expect(parsed.commitEndLsn).toBe('00000000/01000300')
403
+ expect(parsed.commitTime).toBe(ts)
404
+ })
405
+
406
+ it('RELATION roundtrip', () => {
407
+ const oid = getTableOid('rt.rel_test')
408
+ const cols: ColumnInfo[] = [
409
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
410
+ { name: 'name', typeOid: 25, typeMod: -1 },
411
+ ]
412
+ const parser = makeParser()
413
+ const parsed = parser.parse(encodeRelation(oid, 'public', 'rel_test', 0x64, cols))
414
+
415
+ expect(parsed.tag).toBe('relation')
416
+ expect(parsed.schema).toBe('public')
417
+ expect(parsed.name).toBe('rel_test')
418
+ expect(parsed.columns).toHaveLength(2)
419
+ expect(parsed.keyColumns).toEqual(['id'])
420
+ })
421
+
422
+ it('INSERT roundtrip', () => {
423
+ const oid = getTableOid('rt.ins_test')
424
+ const cols: ColumnInfo[] = [
425
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
426
+ { name: 'val', typeOid: 25, typeMod: -1 },
427
+ ]
428
+ const parser = makeParser()
429
+ parser.parse(encodeRelation(oid, 'public', 'ins_test', 0x64, cols))
430
+
431
+ const parsed = parser.parse(encodeInsert(oid, { id: 'abc', val: 'hello' }, cols))
432
+ expect(parsed.tag).toBe('insert')
433
+ expect(parsed.new.id).toBe('abc')
434
+ expect(parsed.new.val).toBe('hello')
435
+ })
436
+
437
+ it('INSERT with null', () => {
438
+ const oid = getTableOid('rt.null_test')
439
+ const cols: ColumnInfo[] = [
440
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
441
+ { name: 'opt', typeOid: 25, typeMod: -1 },
442
+ ]
443
+ const parser = makeParser()
444
+ parser.parse(encodeRelation(oid, 'public', 'null_test', 0x64, cols))
445
+
446
+ const parsed = parser.parse(encodeInsert(oid, { id: 'x', opt: null }, cols))
447
+ expect(parsed.new.opt).toBeNull()
448
+ })
449
+
450
+ it('UPDATE with old row roundtrip', () => {
451
+ const oid = getTableOid('rt.upd_test')
452
+ const cols: ColumnInfo[] = [
453
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
454
+ { name: 'val', typeOid: 25, typeMod: -1 },
455
+ ]
456
+ const parser = makeParser()
457
+ parser.parse(encodeRelation(oid, 'public', 'upd_test', 0x64, cols))
458
+
459
+ const parsed = parser.parse(
460
+ encodeUpdate(oid, { id: '1', val: 'new' }, { id: '1', val: 'old' }, cols)
461
+ )
462
+ expect(parsed.tag).toBe('update')
463
+ expect(parsed.new.val).toBe('new')
464
+ expect(parsed.old.val).toBe('old')
465
+ })
466
+
467
+ it('UPDATE without old row', () => {
468
+ const oid = getTableOid('rt.upd_no_old')
469
+ const cols: ColumnInfo[] = [
470
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
471
+ { name: 'val', typeOid: 25, typeMod: -1 },
472
+ ]
473
+ const parser = makeParser()
474
+ parser.parse(encodeRelation(oid, 'public', 'upd_no_old', 0x64, cols))
475
+
476
+ const parsed = parser.parse(encodeUpdate(oid, { id: '1', val: 'v' }, null, cols))
477
+ expect(parsed.tag).toBe('update')
478
+ expect(parsed.new.val).toBe('v')
479
+ expect(parsed.old).toBeNull()
480
+ expect(parsed.key).toBeNull()
481
+ })
482
+
483
+ it('DELETE roundtrip', () => {
484
+ const oid = getTableOid('rt.del_test')
485
+ const cols: ColumnInfo[] = [
486
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
487
+ { name: 'val', typeOid: 25, typeMod: -1 },
488
+ ]
489
+ const parser = makeParser()
490
+ parser.parse(encodeRelation(oid, 'public', 'del_test', 0x64, cols))
491
+
492
+ const parsed = parser.parse(encodeDelete(oid, { id: 'gone', val: 'x' }, cols))
493
+ expect(parsed.tag).toBe('delete')
494
+ expect(parsed.key.id).toBe('gone')
495
+ })
496
+
497
+ it('full transaction: BEGIN → RELATION → INSERT → COMMIT', () => {
498
+ const parser = makeParser()
499
+ const lsn = 0x2000000n
500
+ const endLsn = 0x2000100n
501
+ const ts = BigInt(Date.now()) * 1000n
502
+
503
+ const begin = parser.parse(encodeBegin(lsn, ts, 1))
504
+ expect(begin.commitLsn).toBe('00000000/02000000')
505
+
506
+ const oid = getTableOid('rt.full_tx')
507
+ const cols: ColumnInfo[] = [
508
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
509
+ { name: 'data', typeOid: 25, typeMod: -1 },
510
+ ]
511
+ parser.parse(encodeRelation(oid, 'public', 'full_tx', 0x64, cols))
512
+
513
+ const ins = parser.parse(encodeInsert(oid, { id: '1', data: 'test' }, cols))
514
+ expect(ins.new.id).toBe('1')
515
+
516
+ const commit = parser.parse(encodeCommit(0, lsn, endLsn, ts))
517
+ expect(commit.commitLsn).toBe(begin.commitLsn)
518
+ })
519
+
520
+ it('XLogData + CopyData wrapper roundtrip with parser', () => {
521
+ const lsn = 0x3000000n
522
+ const ts = BigInt(Date.now()) * 1000n
523
+ const pgoutput = encodeBegin(lsn, ts, 1)
524
+ const xlog = wrapXLogData(lsn, lsn, ts, pgoutput)
525
+ const frame = wrapCopyData(xlog)
526
+
527
+ // unwrap CopyData
528
+ const copyLen = r32(frame, 1)
529
+ const inner = frame.subarray(5, 1 + copyLen)
530
+
531
+ // parse like stream.js
532
+ expect(inner[0]).toBe(0x77)
533
+ const streamLsn = new DataView(inner.buffer, inner.byteOffset).getBigUint64(1)
534
+ expect(streamLsn).toBe(lsn)
535
+
536
+ // parse pgoutput
537
+ const parser = makeParser()
538
+ const parsed = parser.parse(inner.subarray(25))
539
+ expect(parsed.tag).toBe('begin')
540
+ expect(parsed.commitLsn).toBe('00000000/03000000')
541
+ })
542
+
543
+ it('shard schema encoding', () => {
544
+ const oid = getTableOid('rt.chat_0.clients')
545
+ const cols: ColumnInfo[] = [
546
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
547
+ { name: 'lastMutationID', typeOid: 20, typeMod: -1 },
548
+ ]
549
+ const parser = makeParser()
550
+ const rel = parser.parse(encodeRelation(oid, 'chat_0', 'clients', 0x64, cols))
551
+ expect(rel.schema).toBe('chat_0')
552
+ expect(rel.name).toBe('clients')
553
+ })
554
+
555
+ it('LSN ordering: slot < streaming changes', () => {
556
+ // validates that streaming changes will be seen as "new" by zero-cache
557
+ let testLsn = 0x1000000n
558
+ const next = () => { testLsn += 0x100n; return testLsn }
559
+
560
+ const slotLsn = next() // CREATE_REPLICATION_SLOT
561
+ const beginLsn = next() // first streaming BEGIN
562
+ const commitLsn = next() // first streaming COMMIT
563
+
564
+ expect(beginLsn).toBeGreaterThan(slotLsn)
565
+ expect(commitLsn).toBeGreaterThan(beginLsn)
566
+
567
+ // verify lexi version ordering is preserved
568
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
569
+ const { versionToLexi } = require('/Users/n8/orez/node_modules/@rocicorp/zero/out/zero-cache/src/types/lexi-version.js')
570
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
571
+ const { toBigInt: lsnToBigInt } = require('/Users/n8/orez/node_modules/@rocicorp/zero/out/zero-cache/src/services/change-source/pg/lsn.js')
572
+
573
+ const slotHex = `00000000/${slotLsn.toString(16).padStart(8, '0')}`.toUpperCase()
574
+ const beginHex = `00000000/${beginLsn.toString(16).padStart(8, '0')}`.toUpperCase()
575
+
576
+ const slotVersion = versionToLexi(lsnToBigInt(slotHex))
577
+ const beginVersion = versionToLexi(lsnToBigInt(beginHex))
578
+
579
+ // lexi versions must maintain ordering
580
+ expect(beginVersion > slotVersion).toBe(true)
581
+ })
582
+ })
583
+
367
584
  describe('double-wrap: CopyData(XLogData(message))', () => {
368
585
  // this is the exact framing zero-cache expects for every replication message
369
586
  it('produces parseable nested structure', () => {
@@ -701,17 +701,22 @@ describe('zero-cache pgoutput compatibility', { timeout: 30000 }, () => {
701
701
  const s = await stream()
702
702
  const q = s.messages
703
703
 
704
+ // insert sequentially with waits to avoid batching
704
705
  await db.exec(`INSERT INTO public.foo (id) VALUES ('t1')`)
705
706
  await db.exec(`INSERT INTO public.foo (id) VALUES ('t2')`)
706
707
  await db.exec(`INSERT INTO public.foo (id) VALUES ('t3')`)
707
708
 
709
+ // orez may batch changes into fewer transactions, so just verify
710
+ // all inserts arrive and every begin has a matching commit
708
711
  const all: ZcMessage[] = []
709
712
  const deadline = Date.now() + 8000
710
713
  while (Date.now() < deadline) {
711
714
  const m = await q.dequeue(2000).catch(() => null)
712
715
  if (!m) break
713
716
  if (m.tag !== 'keepalive') all.push(m)
714
- if (all.filter((x) => x.tag === 'commit').length >= 3) break
717
+ const inserts = all.filter((x) => x.tag === 'insert')
718
+ const commits = all.filter((x) => x.tag === 'commit')
719
+ if (inserts.length >= 3 && commits.length >= 1) break
715
720
  }
716
721
 
717
722
  const begins = all.filter((m) => m.tag === 'begin')
@@ -941,3 +946,229 @@ describe('zero-cache pgoutput compatibility', { timeout: 30000 }, () => {
941
946
  s.close()
942
947
  })
943
948
  })
949
+
950
+ /**
951
+ * postgres.js replication stream test.
952
+ *
953
+ * uses the same postgres library and code path that zero-cache uses
954
+ * in its stream.js subscribe function. validates that orez's CopyData
955
+ * frames are correctly parsed by postgres.js's wire protocol handler
956
+ * and that zero-cache's PgoutputParser can consume the payloads.
957
+ */
958
+ describe('postgres.js replication stream (zero-cache code path)', { timeout: 30000 }, () => {
959
+ let db: PGlite
960
+ let server: Server
961
+ let port: number
962
+
963
+ beforeEach(async () => {
964
+ db = new PGlite()
965
+ await db.waitReady
966
+ await db.exec(`
967
+ CREATE TABLE public.items (
968
+ id TEXT PRIMARY KEY,
969
+ val INTEGER,
970
+ note TEXT
971
+ )
972
+ `)
973
+ await db.exec(`CREATE PUBLICATION zero_data FOR ALL TABLES`)
974
+ await installChangeTracking(db)
975
+
976
+ const config = { ...getConfig(), pgPort: 0 }
977
+ server = await startPgProxy(db, config)
978
+ port = (server.address() as AddressInfo).port
979
+ })
980
+
981
+ afterEach(async () => {
982
+ server?.close()
983
+ await db?.close()
984
+ })
985
+
986
+ it('postgres.js receives CopyData and parseStreamMessage decodes it', { timeout: 30000 }, async () => {
987
+ // import postgres (same lib zero-cache uses)
988
+ const pg = (await import('postgres')).default
989
+
990
+ // create a regular connection for queries
991
+ const regular = pg({
992
+ host: '127.0.0.1',
993
+ port,
994
+ user: 'user',
995
+ password: 'password',
996
+ database: 'postgres',
997
+ max: 1,
998
+ })
999
+
1000
+ // create replication connection (same as zero-cache's subscribe)
1001
+ const session = pg({
1002
+ host: '127.0.0.1',
1003
+ port,
1004
+ user: 'user',
1005
+ password: 'password',
1006
+ database: 'postgres',
1007
+ max: 1,
1008
+ fetch_types: false,
1009
+ idle_timeout: null,
1010
+ max_lifetime: null,
1011
+ connection: { replication: 'database' },
1012
+ })
1013
+
1014
+ try {
1015
+ // create slot (same as zero-cache)
1016
+ await session.unsafe(
1017
+ `CREATE_REPLICATION_SLOT "pgjs_test" TEMPORARY LOGICAL pgoutput NOEXPORT_SNAPSHOT`
1018
+ ).simple()
1019
+
1020
+ // start replication stream (same pattern as stream.js)
1021
+ const stream = session.unsafe(
1022
+ `START_REPLICATION SLOT "pgjs_test" LOGICAL 0/0 (proto_version '1', publication_names 'zero_data', messages 'true')`
1023
+ ).execute()
1024
+
1025
+ const [readable, _writable] = await Promise.all([
1026
+ stream.readable(),
1027
+ stream.writable(),
1028
+ ])
1029
+
1030
+ // import zero-cache's actual parser
1031
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
1032
+ const { PgoutputParser } = require('/Users/n8/orez/node_modules/@rocicorp/zero/out/zero-cache/src/services/change-source/pg/logical-replication/pgoutput-parser.js')
1033
+ const typeParsers = { getTypeParser: () => String }
1034
+ const parser = new PgoutputParser(typeParsers)
1035
+
1036
+ // parseStreamMessage from zero-cache's stream.js
1037
+ function parseStreamMessage(buffer: Buffer): [bigint, any] | null {
1038
+ if (buffer[0] !== 0x77 && buffer[0] !== 0x6b) return null
1039
+ const lsn = buffer.readBigUInt64BE(1)
1040
+ if (buffer[0] === 0x77) {
1041
+ return [lsn, parser.parse(buffer.subarray(25))]
1042
+ }
1043
+ if (buffer.readInt8(17)) {
1044
+ return [lsn, { tag: 'keepalive' }]
1045
+ }
1046
+ return null // keepalive with shouldRespond=false
1047
+ }
1048
+
1049
+ // collect parsed messages
1050
+ const messages: any[] = []
1051
+ const collectDone = new Promise<void>((resolve) => {
1052
+ readable.on('data', (chunk: Buffer) => {
1053
+ const result = parseStreamMessage(chunk)
1054
+ if (result) {
1055
+ const [_lsn, msg] = result
1056
+ messages.push(msg)
1057
+ }
1058
+ })
1059
+ setTimeout(resolve, 3000)
1060
+ })
1061
+
1062
+ await new Promise((r) => setTimeout(r, 500))
1063
+ await regular.unsafe(`INSERT INTO public.items (id, val, note) VALUES ('pgjs', 42, 'postgres.js test')`)
1064
+
1065
+ await collectDone
1066
+ readable.destroy()
1067
+
1068
+ // filter out keepalives
1069
+ const data = messages.filter((m: any) => m.tag !== 'keepalive')
1070
+
1071
+ // should have: begin, relation, insert, commit
1072
+ const tags = data.map((m: any) => m.tag)
1073
+ expect(tags).toContain('begin')
1074
+ expect(tags).toContain('relation')
1075
+ expect(tags).toContain('insert')
1076
+ expect(tags).toContain('commit')
1077
+
1078
+ // validate BEGIN has commitLsn as string (not BigInt)
1079
+ const begin = data.find((m: any) => m.tag === 'begin')
1080
+ expect(typeof begin.commitLsn).toBe('string')
1081
+ expect(begin.commitLsn).toMatch(/^[0-9A-F]+\/[0-9A-F]+$/)
1082
+
1083
+ // validate RELATION has correct structure
1084
+ const rel = data.find((m: any) => m.tag === 'relation')
1085
+ expect(rel.schema).toBe('public')
1086
+ expect(rel.name).toBe('items')
1087
+ expect(rel.columns.length).toBe(3)
1088
+
1089
+ // validate INSERT has parsed values
1090
+ const ins = data.find((m: any) => m.tag === 'insert')
1091
+ expect(ins.relation.name).toBe('items')
1092
+ expect(ins.new.id).toBe('pgjs')
1093
+ expect(ins.new.val).toBe('42')
1094
+ expect(ins.new.note).toBe('postgres.js test')
1095
+
1096
+ // validate COMMIT has commitLsn and commitEndLsn
1097
+ const commit = data.find((m: any) => m.tag === 'commit')
1098
+ expect(typeof commit.commitLsn).toBe('string')
1099
+ expect(typeof commit.commitEndLsn).toBe('string')
1100
+
1101
+ // validate LSN ordering: commit.commitEndLsn > begin.commitLsn
1102
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
1103
+ const { toBigInt: lsnToBigInt } = require('/Users/n8/orez/node_modules/@rocicorp/zero/out/zero-cache/src/services/change-source/pg/lsn.js')
1104
+ expect(lsnToBigInt(commit.commitEndLsn)).toBeGreaterThan(lsnToBigInt(begin.commitLsn))
1105
+
1106
+ // validate lexi version conversion works (storer uses this)
1107
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
1108
+ const { versionToLexi } = require('/Users/n8/orez/node_modules/@rocicorp/zero/out/zero-cache/src/types/lexi-version.js')
1109
+ const beginVersion = versionToLexi(lsnToBigInt(begin.commitLsn))
1110
+ const commitVersion = versionToLexi(lsnToBigInt(commit.commitEndLsn))
1111
+ expect(typeof beginVersion).toBe('string')
1112
+ expect(commitVersion > beginVersion).toBe(true)
1113
+
1114
+ } finally {
1115
+ await regular.end()
1116
+ // session.end() can hang because the replication handler keeps polling.
1117
+ // just force-close the underlying connection by destroying the socket.
1118
+ await session.end({ timeout: 2 }).catch(() => {})
1119
+ }
1120
+ })
1121
+
1122
+ it('postgres.js handles concurrent regular + replication connections', { timeout: 30000 }, async () => {
1123
+ const pg = (await import('postgres')).default
1124
+
1125
+ const regular = pg({
1126
+ host: '127.0.0.1', port,
1127
+ user: 'user', password: 'password', database: 'postgres',
1128
+ max: 1,
1129
+ })
1130
+
1131
+ const session = pg({
1132
+ host: '127.0.0.1', port,
1133
+ user: 'user', password: 'password', database: 'postgres',
1134
+ max: 1,
1135
+ fetch_types: false,
1136
+ idle_timeout: null,
1137
+ max_lifetime: null,
1138
+ connection: { replication: 'database' },
1139
+ })
1140
+
1141
+ try {
1142
+ await session.unsafe(
1143
+ `CREATE_REPLICATION_SLOT "conc_test" TEMPORARY LOGICAL pgoutput NOEXPORT_SNAPSHOT`
1144
+ ).simple()
1145
+
1146
+ const stream = session.unsafe(
1147
+ `START_REPLICATION SLOT "conc_test" LOGICAL 0/0 (proto_version '1', publication_names 'zero_data', messages 'true')`
1148
+ ).execute()
1149
+
1150
+ const [readable] = await Promise.all([stream.readable(), stream.writable()])
1151
+
1152
+ const received: Buffer[] = []
1153
+ readable.on('data', (chunk: Buffer) => received.push(chunk))
1154
+
1155
+ await new Promise((r) => setTimeout(r, 300))
1156
+
1157
+ // do 5 inserts via regular connection while replication is active
1158
+ for (let i = 0; i < 5; i++) {
1159
+ await regular.unsafe(`INSERT INTO public.items (id, val) VALUES ('c${i}', ${i})`)
1160
+ }
1161
+
1162
+ await new Promise((r) => setTimeout(r, 2000))
1163
+ readable.destroy()
1164
+
1165
+ // should have received XLogData frames (0x77) from replication
1166
+ const xlogFrames = received.filter((b) => b[0] === 0x77)
1167
+ expect(xlogFrames.length).toBeGreaterThan(0)
1168
+
1169
+ } finally {
1170
+ await regular.end()
1171
+ await session.end({ timeout: 2 }).catch(() => {})
1172
+ }
1173
+ })
1174
+ })