orez 0.0.49 → 0.0.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,8 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'
3
3
 
4
4
  import {
5
5
  installChangeTracking,
6
+ installTriggersOnShardTables,
7
+ purgeConsumedChanges,
6
8
  getChangesSince,
7
9
  getCurrentWatermark,
8
10
  } from './change-tracker'
@@ -184,3 +186,115 @@ describe('change-tracker', () => {
184
186
  expect(special[0].row_data).toMatchObject({ val: 'works' })
185
187
  })
186
188
  })
189
+
190
+ describe('shard table tracking', () => {
191
+ let db: PGlite
192
+
193
+ beforeEach(async () => {
194
+ db = new PGlite()
195
+ await db.waitReady
196
+ await installChangeTracking(db)
197
+ })
198
+
199
+ afterEach(async () => {
200
+ await db.close()
201
+ })
202
+
203
+ it('only tracks clients table in shard schemas, not replicas/mutations', async () => {
204
+ // zero-cache creates shard schemas like chat_0 with clients, replicas, mutations.
205
+ // only clients needs tracking — replicas/mutations changes crash zero-cache
206
+ // with "Unknown table chat_0.replicas" because they aren't in zero's schema.
207
+ await db.exec(`
208
+ CREATE SCHEMA chat_0;
209
+ CREATE TABLE chat_0.clients (
210
+ "clientGroupID" TEXT NOT NULL,
211
+ "clientID" TEXT NOT NULL,
212
+ "lastMutationID" BIGINT,
213
+ "userID" TEXT,
214
+ PRIMARY KEY ("clientGroupID", "clientID")
215
+ );
216
+ CREATE TABLE chat_0.replicas (
217
+ id TEXT PRIMARY KEY,
218
+ version TEXT,
219
+ cookie TEXT
220
+ );
221
+ CREATE TABLE chat_0.mutations (
222
+ id TEXT PRIMARY KEY,
223
+ "clientID" TEXT,
224
+ name TEXT,
225
+ args JSONB
226
+ );
227
+ `)
228
+
229
+ await installTriggersOnShardTables(db)
230
+
231
+ // insert into all three tables
232
+ await db.exec(
233
+ `INSERT INTO chat_0.clients ("clientGroupID", "clientID", "lastMutationID") VALUES ('cg1', 'c1', 1)`
234
+ )
235
+ await db.exec(`INSERT INTO chat_0.replicas (id, version) VALUES ('r1', 'v1')`)
236
+ await db.exec(
237
+ `INSERT INTO chat_0.mutations (id, "clientID", name) VALUES ('m1', 'c1', 'sendMessage')`
238
+ )
239
+
240
+ const changes = await getChangesSince(db, 0)
241
+ const tables = changes.map((c) => c.table_name)
242
+
243
+ // only clients should be tracked
244
+ expect(tables).toContain('chat_0.clients')
245
+ expect(tables).not.toContain('chat_0.replicas')
246
+ expect(tables).not.toContain('chat_0.mutations')
247
+ })
248
+
249
+ it('purges consumed changes to prevent OOM', async () => {
250
+ // _zero_changes accumulates forever in 0.0.37. with wasm pglite,
251
+ // this eventually causes OOM. we need a purge mechanism.
252
+ await db.exec(`
253
+ CREATE TABLE public.items (id SERIAL PRIMARY KEY, val TEXT)
254
+ `)
255
+ await installChangeTracking(db)
256
+
257
+ // insert some data
258
+ for (let i = 0; i < 10; i++) {
259
+ await db.exec(`INSERT INTO public.items (val) VALUES ('item${i}')`)
260
+ }
261
+
262
+ const changes = await getChangesSince(db, 0)
263
+ expect(changes).toHaveLength(10)
264
+ const lastWatermark = changes[changes.length - 1].watermark
265
+
266
+ // purge consumed changes up to the watermark we've processed
267
+ await purgeConsumedChanges(db, lastWatermark)
268
+
269
+ // after purge, no changes before that watermark should remain
270
+ const remaining = await getChangesSince(db, 0)
271
+ expect(remaining).toHaveLength(0)
272
+ })
273
+
274
+ it('tracks tables created after initial installChangeTracking', async () => {
275
+ // simulate zero-cache creating shard schema AFTER replication starts.
276
+ // in production, zero-cache creates chat_0 schema + clients table
277
+ // after the replication connection is already established.
278
+ // the change tracker must pick up these new tables.
279
+ await db.exec(`
280
+ CREATE SCHEMA chat_0;
281
+ CREATE TABLE chat_0.clients (
282
+ "clientGroupID" TEXT NOT NULL,
283
+ "clientID" TEXT NOT NULL,
284
+ "lastMutationID" BIGINT,
285
+ PRIMARY KEY ("clientGroupID", "clientID")
286
+ );
287
+ `)
288
+
289
+ // re-running installTriggersOnShardTables should pick up new tables
290
+ await installTriggersOnShardTables(db)
291
+
292
+ await db.exec(
293
+ `INSERT INTO chat_0.clients ("clientGroupID", "clientID", "lastMutationID") VALUES ('cg1', 'c1', 1)`
294
+ )
295
+
296
+ const changes = await getChangesSince(db, 0)
297
+ expect(changes).toHaveLength(1)
298
+ expect(changes[0].table_name).toBe('chat_0.clients')
299
+ })
300
+ })
@@ -158,10 +158,31 @@ export async function installTriggersOnShardTables(db: PGlite): Promise<void> {
158
158
 
159
159
  if (result.rows.length === 0) return
160
160
 
161
+ // only track `clients` — that's the table zero-cache expects in the
162
+ // replication stream (needed for .server promise resolution). other shard
163
+ // tables like `replicas` are zero-cache internal state and streaming them
164
+ // back causes "Unknown table" crashes in zero-cache's change-processor.
161
165
  let count = 0
162
166
  for (const { nspname } of result.rows) {
167
+ // remove stale triggers from non-clients tables (from previous versions)
168
+ const stale = await db.query<{ event_object_table: string }>(
169
+ `SELECT DISTINCT event_object_table FROM information_schema.triggers
170
+ WHERE trigger_name = '_zero_change_trigger'
171
+ AND event_object_schema = $1
172
+ AND event_object_table != 'clients'`,
173
+ [nspname]
174
+ )
175
+ for (const { event_object_table } of stale.rows) {
176
+ const qs = quoteIdent(nspname)
177
+ const qt = quoteIdent(event_object_table)
178
+ await db.exec(`DROP TRIGGER IF EXISTS _zero_change_trigger ON ${qs}.${qt}`)
179
+ log.debug.pglite(
180
+ `removed stale shard trigger from ${nspname}.${event_object_table}`
181
+ )
182
+ }
183
+
163
184
  const tables = await db.query<{ tablename: string }>(
164
- `SELECT tablename FROM pg_tables WHERE schemaname = $1`,
185
+ `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = 'clients'`,
165
186
  [nspname]
166
187
  )
167
188
 
@@ -195,6 +216,17 @@ export async function getChangesSince(
195
216
  return result.rows
196
217
  }
197
218
 
219
+ export async function purgeConsumedChanges(
220
+ db: PGlite,
221
+ watermark: number
222
+ ): Promise<number> {
223
+ const result = await db.query<{ count: string }>(
224
+ 'WITH deleted AS (DELETE FROM public._zero_changes WHERE watermark <= $1 RETURNING 1) SELECT count(*)::text AS count FROM deleted',
225
+ [watermark]
226
+ )
227
+ return Number(result.rows[0]?.count || 0)
228
+ }
229
+
198
230
  export async function getCurrentWatermark(db: PGlite): Promise<number> {
199
231
  const result = await db.query<{ last_value: string; is_called: boolean }>(
200
232
  'SELECT last_value, is_called FROM public._zero_watermark'
@@ -10,6 +10,7 @@ import { log } from '../log.js'
10
10
  import {
11
11
  getChangesSince,
12
12
  getCurrentWatermark,
13
+ purgeConsumedChanges,
13
14
  installTriggersOnShardTables,
14
15
  type ChangeRecord,
15
16
  } from './change-tracker.js'
@@ -342,7 +343,7 @@ export async function handleStartReplication(
342
343
  for (const schema of relevantSchemas) {
343
344
  if (schema === 'public') continue
344
345
  const shardTables = await db.query<{ tablename: string }>(
345
- `SELECT tablename FROM pg_tables WHERE schemaname = $1`,
346
+ `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = 'clients'`,
346
347
  [schema]
347
348
  )
348
349
  for (const { tablename } of shardTables.rows) {
@@ -457,22 +458,60 @@ export async function handleStartReplication(
457
458
  let txCounter = 1
458
459
 
459
460
  // polling + notification loop
460
- const pollInterval = 500
461
+ // adaptive: poll fast when catching up, slow when idle
462
+ const pollIntervalIdle = 500
463
+ const pollIntervalCatchUp = 20
464
+ const batchSize = 2000
465
+ const purgeEveryN = 10
466
+ const shardRescanEveryN = 20
461
467
  let running = true
468
+ let pollsSincePurge = 0
469
+ let pollsSinceShardRescan = 0
462
470
 
463
471
  const poll = async () => {
464
472
  while (running) {
465
473
  try {
474
+ // periodically re-scan for new shard schemas (e.g. chat_0 created by zero-cache)
475
+ pollsSinceShardRescan++
476
+ if (pollsSinceShardRescan >= shardRescanEveryN) {
477
+ pollsSinceShardRescan = 0
478
+ await mutex.acquire()
479
+ try {
480
+ await installTriggersOnShardTables(db)
481
+ } finally {
482
+ mutex.release()
483
+ }
484
+ }
485
+
466
486
  // acquire mutex to avoid conflicting with proxy connections
467
487
  await mutex.acquire()
468
488
  let changes: Awaited<ReturnType<typeof getChangesSince>>
469
489
  try {
470
- changes = await getChangesSince(db, lastWatermark, 100)
490
+ changes = await getChangesSince(db, lastWatermark, batchSize)
471
491
  } finally {
472
492
  mutex.release()
473
493
  }
474
494
 
475
495
  if (changes.length > 0) {
496
+ // filter out shard tables that zero-cache doesn't expect.
497
+ // only `clients` is needed (for .server promise resolution).
498
+ // other shard tables (replicas, mutations) crash zero-cache
499
+ // with "Unknown table" in change-processor.
500
+ const batchEnd = changes[changes.length - 1].watermark
501
+ changes = changes.filter((c) => {
502
+ const dot = c.table_name.indexOf('.')
503
+ if (dot === -1) return true
504
+ const schema = c.table_name.substring(0, dot)
505
+ if (schema === 'public') return true
506
+ const table = c.table_name.substring(dot + 1)
507
+ return table === 'clients'
508
+ })
509
+
510
+ if (changes.length === 0) {
511
+ lastWatermark = batchEnd
512
+ continue
513
+ }
514
+
476
515
  await streamChanges(
477
516
  changes,
478
517
  writer,
@@ -482,14 +521,31 @@ export async function handleStartReplication(
482
521
  excludedColumns,
483
522
  columnTypeOids
484
523
  )
485
- lastWatermark = changes[changes.length - 1].watermark
524
+ lastWatermark = batchEnd
525
+
526
+ // purge consumed changes periodically to free wasm memory
527
+ pollsSincePurge++
528
+ if (pollsSincePurge >= purgeEveryN) {
529
+ pollsSincePurge = 0
530
+ await mutex.acquire()
531
+ try {
532
+ const purged = await purgeConsumedChanges(db, lastWatermark)
533
+ if (purged > 0) {
534
+ log.debug.proxy(`purged ${purged} consumed changes`)
535
+ }
536
+ } finally {
537
+ mutex.release()
538
+ }
539
+ }
486
540
  }
487
541
 
488
542
  // send keepalive
489
543
  const ts = nowMicros()
490
544
  writer.write(encodeKeepalive(currentLsn, ts, false))
491
545
 
492
- await new Promise((resolve) => setTimeout(resolve, pollInterval))
546
+ // if we got a full batch, there's likely more - poll fast
547
+ const delay = changes.length >= batchSize ? pollIntervalCatchUp : pollIntervalIdle
548
+ await new Promise((resolve) => setTimeout(resolve, delay))
493
549
  } catch (err: unknown) {
494
550
  const msg = err instanceof Error ? err.message : String(err)
495
551
  log.debug.proxy(`replication poll error: ${msg}`)
@@ -1,3 +1,5 @@
1
+ import { join } from 'node:path'
2
+
1
3
  import { describe, it, expect } from 'vitest'
2
4
 
3
5
  import {
@@ -364,6 +366,238 @@ describe('pgoutput-encoder', () => {
364
366
  })
365
367
  })
366
368
 
369
+ // roundtrip tests: encode with orez → parse with zero-cache's parser
370
+ // this validates the fundamental contract between orez and zero-cache
371
+ describe('roundtrip: orez encoder → zero-cache parser', () => {
372
+ // relative path bypasses package.json exports restriction
373
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
374
+ const parserPath = join(
375
+ import.meta.dirname,
376
+ '../../node_modules/@rocicorp/zero/out/zero-cache/src/services/change-source/pg/logical-replication/pgoutput-parser.js'
377
+ )
378
+ const { PgoutputParser } = require(parserPath)
379
+
380
+ // mock type parsers: unknown OIDs default to String (identity for text)
381
+ const typeParsers = { getTypeParser: () => String }
382
+
383
+ function makeParser() {
384
+ return new PgoutputParser(typeParsers)
385
+ }
386
+
387
+ it('BEGIN roundtrip', () => {
388
+ const lsn = 0x1000200n
389
+ const ts = BigInt(Date.now()) * 1000n
390
+ const parser = makeParser()
391
+ const parsed = parser.parse(encodeBegin(lsn, ts, 42))
392
+
393
+ expect(parsed.tag).toBe('begin')
394
+ expect(parsed.commitLsn).toBe('00000000/01000200')
395
+ expect(parsed.xid).toBe(42)
396
+ expect(parsed.commitTime).toBe(ts)
397
+ })
398
+
399
+ it('COMMIT roundtrip', () => {
400
+ const lsn = 0x1000200n
401
+ const endLsn = 0x1000300n
402
+ const ts = BigInt(Date.now()) * 1000n
403
+ const parser = makeParser()
404
+ const parsed = parser.parse(encodeCommit(0, lsn, endLsn, ts))
405
+
406
+ expect(parsed.tag).toBe('commit')
407
+ expect(parsed.commitLsn).toBe('00000000/01000200')
408
+ expect(parsed.commitEndLsn).toBe('00000000/01000300')
409
+ expect(parsed.commitTime).toBe(ts)
410
+ })
411
+
412
+ it('RELATION roundtrip', () => {
413
+ const oid = getTableOid('rt.rel_test')
414
+ const cols: ColumnInfo[] = [
415
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
416
+ { name: 'name', typeOid: 25, typeMod: -1 },
417
+ ]
418
+ const parser = makeParser()
419
+ const parsed = parser.parse(encodeRelation(oid, 'public', 'rel_test', 0x64, cols))
420
+
421
+ expect(parsed.tag).toBe('relation')
422
+ expect(parsed.schema).toBe('public')
423
+ expect(parsed.name).toBe('rel_test')
424
+ expect(parsed.columns).toHaveLength(2)
425
+ expect(parsed.keyColumns).toEqual(['id'])
426
+ })
427
+
428
+ it('INSERT roundtrip', () => {
429
+ const oid = getTableOid('rt.ins_test')
430
+ const cols: ColumnInfo[] = [
431
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
432
+ { name: 'val', typeOid: 25, typeMod: -1 },
433
+ ]
434
+ const parser = makeParser()
435
+ parser.parse(encodeRelation(oid, 'public', 'ins_test', 0x64, cols))
436
+
437
+ const parsed = parser.parse(encodeInsert(oid, { id: 'abc', val: 'hello' }, cols))
438
+ expect(parsed.tag).toBe('insert')
439
+ expect(parsed.new.id).toBe('abc')
440
+ expect(parsed.new.val).toBe('hello')
441
+ })
442
+
443
+ it('INSERT with null', () => {
444
+ const oid = getTableOid('rt.null_test')
445
+ const cols: ColumnInfo[] = [
446
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
447
+ { name: 'opt', typeOid: 25, typeMod: -1 },
448
+ ]
449
+ const parser = makeParser()
450
+ parser.parse(encodeRelation(oid, 'public', 'null_test', 0x64, cols))
451
+
452
+ const parsed = parser.parse(encodeInsert(oid, { id: 'x', opt: null }, cols))
453
+ expect(parsed.new.opt).toBeNull()
454
+ })
455
+
456
+ it('UPDATE with old row roundtrip', () => {
457
+ const oid = getTableOid('rt.upd_test')
458
+ const cols: ColumnInfo[] = [
459
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
460
+ { name: 'val', typeOid: 25, typeMod: -1 },
461
+ ]
462
+ const parser = makeParser()
463
+ parser.parse(encodeRelation(oid, 'public', 'upd_test', 0x64, cols))
464
+
465
+ const parsed = parser.parse(
466
+ encodeUpdate(oid, { id: '1', val: 'new' }, { id: '1', val: 'old' }, cols)
467
+ )
468
+ expect(parsed.tag).toBe('update')
469
+ expect(parsed.new.val).toBe('new')
470
+ expect(parsed.old.val).toBe('old')
471
+ })
472
+
473
+ it('UPDATE without old row', () => {
474
+ const oid = getTableOid('rt.upd_no_old')
475
+ const cols: ColumnInfo[] = [
476
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
477
+ { name: 'val', typeOid: 25, typeMod: -1 },
478
+ ]
479
+ const parser = makeParser()
480
+ parser.parse(encodeRelation(oid, 'public', 'upd_no_old', 0x64, cols))
481
+
482
+ const parsed = parser.parse(encodeUpdate(oid, { id: '1', val: 'v' }, null, cols))
483
+ expect(parsed.tag).toBe('update')
484
+ expect(parsed.new.val).toBe('v')
485
+ expect(parsed.old).toBeNull()
486
+ expect(parsed.key).toBeNull()
487
+ })
488
+
489
+ it('DELETE roundtrip', () => {
490
+ const oid = getTableOid('rt.del_test')
491
+ const cols: ColumnInfo[] = [
492
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
493
+ { name: 'val', typeOid: 25, typeMod: -1 },
494
+ ]
495
+ const parser = makeParser()
496
+ parser.parse(encodeRelation(oid, 'public', 'del_test', 0x64, cols))
497
+
498
+ const parsed = parser.parse(encodeDelete(oid, { id: 'gone', val: 'x' }, cols))
499
+ expect(parsed.tag).toBe('delete')
500
+ expect(parsed.key.id).toBe('gone')
501
+ })
502
+
503
+ it('full transaction: BEGIN → RELATION → INSERT → COMMIT', () => {
504
+ const parser = makeParser()
505
+ const lsn = 0x2000000n
506
+ const endLsn = 0x2000100n
507
+ const ts = BigInt(Date.now()) * 1000n
508
+
509
+ const begin = parser.parse(encodeBegin(lsn, ts, 1))
510
+ expect(begin.commitLsn).toBe('00000000/02000000')
511
+
512
+ const oid = getTableOid('rt.full_tx')
513
+ const cols: ColumnInfo[] = [
514
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
515
+ { name: 'data', typeOid: 25, typeMod: -1 },
516
+ ]
517
+ parser.parse(encodeRelation(oid, 'public', 'full_tx', 0x64, cols))
518
+
519
+ const ins = parser.parse(encodeInsert(oid, { id: '1', data: 'test' }, cols))
520
+ expect(ins.new.id).toBe('1')
521
+
522
+ const commit = parser.parse(encodeCommit(0, lsn, endLsn, ts))
523
+ expect(commit.commitLsn).toBe(begin.commitLsn)
524
+ })
525
+
526
+ it('XLogData + CopyData wrapper roundtrip with parser', () => {
527
+ const lsn = 0x3000000n
528
+ const ts = BigInt(Date.now()) * 1000n
529
+ const pgoutput = encodeBegin(lsn, ts, 1)
530
+ const xlog = wrapXLogData(lsn, lsn, ts, pgoutput)
531
+ const frame = wrapCopyData(xlog)
532
+
533
+ // unwrap CopyData
534
+ const copyLen = r32(frame, 1)
535
+ const inner = frame.subarray(5, 1 + copyLen)
536
+
537
+ // parse like stream.js
538
+ expect(inner[0]).toBe(0x77)
539
+ const streamLsn = new DataView(inner.buffer, inner.byteOffset).getBigUint64(1)
540
+ expect(streamLsn).toBe(lsn)
541
+
542
+ // parse pgoutput
543
+ const parser = makeParser()
544
+ const parsed = parser.parse(inner.subarray(25))
545
+ expect(parsed.tag).toBe('begin')
546
+ expect(parsed.commitLsn).toBe('00000000/03000000')
547
+ })
548
+
549
+ it('shard schema encoding', () => {
550
+ const oid = getTableOid('rt.chat_0.clients')
551
+ const cols: ColumnInfo[] = [
552
+ { name: 'id', typeOid: 25, typeMod: -1, isKey: true },
553
+ { name: 'lastMutationID', typeOid: 20, typeMod: -1 },
554
+ ]
555
+ const parser = makeParser()
556
+ const rel = parser.parse(encodeRelation(oid, 'chat_0', 'clients', 0x64, cols))
557
+ expect(rel.schema).toBe('chat_0')
558
+ expect(rel.name).toBe('clients')
559
+ })
560
+
561
+ it('LSN ordering: slot < streaming changes', () => {
562
+ // validates that streaming changes will be seen as "new" by zero-cache
563
+ let testLsn = 0x1000000n
564
+ const next = () => {
565
+ testLsn += 0x100n
566
+ return testLsn
567
+ }
568
+
569
+ const slotLsn = next() // CREATE_REPLICATION_SLOT
570
+ const beginLsn = next() // first streaming BEGIN
571
+ const commitLsn = next() // first streaming COMMIT
572
+
573
+ expect(beginLsn).toBeGreaterThan(slotLsn)
574
+ expect(commitLsn).toBeGreaterThan(beginLsn)
575
+
576
+ // verify lexi version ordering is preserved
577
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
578
+ const lexiPath = join(
579
+ import.meta.dirname,
580
+ '../../node_modules/@rocicorp/zero/out/zero-cache/src/types/lexi-version.js'
581
+ )
582
+ const { versionToLexi } = require(lexiPath)
583
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
584
+ const lsnPath = join(
585
+ import.meta.dirname,
586
+ '../../node_modules/@rocicorp/zero/out/zero-cache/src/services/change-source/pg/lsn.js'
587
+ )
588
+ const { toBigInt: lsnToBigInt } = require(lsnPath)
589
+
590
+ const slotHex = `00000000/${slotLsn.toString(16).padStart(8, '0')}`.toUpperCase()
591
+ const beginHex = `00000000/${beginLsn.toString(16).padStart(8, '0')}`.toUpperCase()
592
+
593
+ const slotVersion = versionToLexi(lsnToBigInt(slotHex))
594
+ const beginVersion = versionToLexi(lsnToBigInt(beginHex))
595
+
596
+ // lexi versions must maintain ordering
597
+ expect(beginVersion > slotVersion).toBe(true)
598
+ })
599
+ })
600
+
367
601
  describe('double-wrap: CopyData(XLogData(message))', () => {
368
602
  // this is the exact framing zero-cache expects for every replication message
369
603
  it('produces parseable nested structure', () => {
@@ -940,4 +940,108 @@ describe('zero-cache pgoutput compatibility', { timeout: 30000 }, () => {
940
940
 
941
941
  s.close()
942
942
  })
943
+
944
+ it('shard replicas/mutations changes NOT streamed (only clients)', async () => {
945
+ // zero-cache creates shard schemas (chat_0) with clients, replicas, mutations.
946
+ // if we stream replicas/mutations changes, zero-cache crashes with
947
+ // "Unknown table chat_0.replicas". only clients changes should be streamed.
948
+ await db.exec(`
949
+ CREATE SCHEMA chat_0;
950
+ CREATE TABLE chat_0.clients (
951
+ "clientGroupID" TEXT NOT NULL,
952
+ "clientID" TEXT NOT NULL,
953
+ "lastMutationID" BIGINT,
954
+ PRIMARY KEY ("clientGroupID", "clientID")
955
+ );
956
+ CREATE TABLE chat_0.replicas (
957
+ id TEXT PRIMARY KEY,
958
+ version TEXT
959
+ );
960
+ CREATE TABLE chat_0.mutations (
961
+ id TEXT PRIMARY KEY,
962
+ "clientID" TEXT,
963
+ name TEXT
964
+ );
965
+ `)
966
+ await installChangeTracking(db)
967
+
968
+ const s = await stream()
969
+ const q = s.messages
970
+
971
+ // insert into all three shard tables + a public table
972
+ await db.exec(
973
+ `INSERT INTO chat_0.clients ("clientGroupID", "clientID", "lastMutationID") VALUES ('cg1', 'c1', 1)`
974
+ )
975
+ await db.exec(`INSERT INTO chat_0.replicas (id, version) VALUES ('r1', 'v1')`)
976
+ await db.exec(
977
+ `INSERT INTO chat_0.mutations (id, "clientID", name) VALUES ('m1', 'c1', 'send')`
978
+ )
979
+ await db.exec(`INSERT INTO public.foo (id) VALUES ('normal')`)
980
+
981
+ // collect all inserts for a few seconds
982
+ const inserts: ZcInsert[] = []
983
+ const deadline = Date.now() + 4000
984
+ while (Date.now() < deadline) {
985
+ const m = await q.dequeue(1500).catch(() => null)
986
+ if (!m) break
987
+ if (m.tag === 'insert') inserts.push(m as ZcInsert)
988
+ }
989
+
990
+ // should see clients + foo inserts, but NOT replicas or mutations
991
+ const streamedTables = inserts.map((i) => `${i.relation.schema}.${i.relation.name}`)
992
+ expect(streamedTables).toContain('public.foo')
993
+ expect(streamedTables).toContain('chat_0.clients')
994
+ expect(streamedTables).not.toContain('chat_0.replicas')
995
+ expect(streamedTables).not.toContain('chat_0.mutations')
996
+
997
+ s.close()
998
+ })
999
+
1000
+ it('shard clients table created AFTER replication starts still gets tracked', async () => {
1001
+ // zero-cache creates shard schemas after the replication connection is live.
1002
+ // the poll loop must detect new shard tables and install triggers dynamically.
1003
+ const s = await stream()
1004
+ const q = s.messages
1005
+
1006
+ // give replication time to start polling
1007
+ await new Promise((r) => setTimeout(r, 300))
1008
+
1009
+ // now create shard schema (simulating zero-cache's DDL during initial sync)
1010
+ await db.exec(`
1011
+ CREATE SCHEMA chat_0;
1012
+ CREATE TABLE chat_0.clients (
1013
+ "clientGroupID" TEXT NOT NULL,
1014
+ "clientID" TEXT NOT NULL,
1015
+ "lastMutationID" BIGINT,
1016
+ PRIMARY KEY ("clientGroupID", "clientID")
1017
+ );
1018
+ `)
1019
+
1020
+ // wait for poll loop to detect new table (rescan every ~10s)
1021
+ await new Promise((r) => setTimeout(r, 12000))
1022
+
1023
+ // insert data that should be captured
1024
+ await db.exec(
1025
+ `INSERT INTO chat_0.clients ("clientGroupID", "clientID", "lastMutationID") VALUES ('cg1', 'c1', 42)`
1026
+ )
1027
+
1028
+ // the insert should appear in the replication stream
1029
+ let found = false
1030
+ const deadline = Date.now() + 5000
1031
+ while (Date.now() < deadline) {
1032
+ const m = await q.dequeue(2000).catch(() => null)
1033
+ if (!m) break
1034
+ if (m.tag === 'insert') {
1035
+ const ins = m as ZcInsert
1036
+ if (ins.relation.schema === 'chat_0' && ins.relation.name === 'clients') {
1037
+ found = true
1038
+ break
1039
+ }
1040
+ }
1041
+ }
1042
+
1043
+ expect(found).toBe(true)
1044
+
1045
+ s.close()
1046
+ })
943
1047
  })
@@ -9,7 +9,7 @@ export interface OrezPluginOptions extends Partial<ZeroLiteConfig> {
9
9
  s3Port?: number
10
10
  }
11
11
 
12
- export default function orez(options?: OrezPluginOptions): Plugin {
12
+ export function orezPlugin(options?: OrezPluginOptions): Plugin {
13
13
  let stop: (() => Promise<void>) | null = null
14
14
  let s3Server: Server | null = null
15
15