orez 0.1.21 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/cli-entry.js +2 -0
  2. package/dist/cli-entry.js.map +1 -1
  3. package/dist/cli.d.ts.map +1 -1
  4. package/dist/cli.js +8 -0
  5. package/dist/cli.js.map +1 -1
  6. package/dist/config.d.ts +1 -0
  7. package/dist/config.d.ts.map +1 -1
  8. package/dist/config.js +1 -0
  9. package/dist/config.js.map +1 -1
  10. package/dist/index.d.ts.map +1 -1
  11. package/dist/index.js +43 -18
  12. package/dist/index.js.map +1 -1
  13. package/dist/pg-proxy.d.ts.map +1 -1
  14. package/dist/pg-proxy.js +93 -37
  15. package/dist/pg-proxy.js.map +1 -1
  16. package/dist/pglite-ipc.d.ts +37 -0
  17. package/dist/pglite-ipc.d.ts.map +1 -0
  18. package/dist/pglite-ipc.js +182 -0
  19. package/dist/pglite-ipc.js.map +1 -0
  20. package/dist/pglite-manager.d.ts +11 -0
  21. package/dist/pglite-manager.d.ts.map +1 -1
  22. package/dist/pglite-manager.js +72 -0
  23. package/dist/pglite-manager.js.map +1 -1
  24. package/dist/pglite-worker-thread.d.ts +15 -0
  25. package/dist/pglite-worker-thread.d.ts.map +1 -0
  26. package/dist/pglite-worker-thread.js +147 -0
  27. package/dist/pglite-worker-thread.js.map +1 -0
  28. package/dist/process-title.d.ts +2 -0
  29. package/dist/process-title.d.ts.map +1 -0
  30. package/dist/process-title.js +9 -0
  31. package/dist/process-title.js.map +1 -0
  32. package/dist/recovery.d.ts +1 -0
  33. package/dist/recovery.d.ts.map +1 -1
  34. package/dist/recovery.js +23 -13
  35. package/dist/recovery.js.map +1 -1
  36. package/dist/replication/change-tracker.d.ts +2 -0
  37. package/dist/replication/change-tracker.d.ts.map +1 -1
  38. package/dist/replication/change-tracker.js +4 -0
  39. package/dist/replication/change-tracker.js.map +1 -1
  40. package/dist/replication/handler.d.ts.map +1 -1
  41. package/dist/replication/handler.js +85 -20
  42. package/dist/replication/handler.js.map +1 -1
  43. package/package.json +2 -2
  44. package/src/cli-entry.ts +4 -0
  45. package/src/cli.ts +10 -0
  46. package/src/config.ts +2 -0
  47. package/src/index.ts +55 -19
  48. package/src/integration/replication-latency.test.ts +428 -0
  49. package/src/pg-proxy.ts +106 -39
  50. package/src/pglite-ipc.test.ts +99 -0
  51. package/src/pglite-ipc.ts +214 -0
  52. package/src/pglite-manager.ts +93 -0
  53. package/src/pglite-worker-thread.ts +172 -0
  54. package/src/process-title.ts +9 -0
  55. package/src/recovery.ts +23 -14
  56. package/src/replication/change-tracker.test.ts +2 -0
  57. package/src/replication/change-tracker.ts +5 -0
  58. package/src/replication/handler.test.ts +14 -1
  59. package/src/replication/handler.ts +83 -22
  60. package/src/replication/tcp-replication.test.ts +9 -1
  61. package/src/replication/zero-compat.test.ts +17 -1
@@ -0,0 +1,428 @@
1
+ /**
2
+ * replication latency stress test.
3
+ *
4
+ * measures the end-to-end time from a proxy write to the zero-cache
5
+ * websocket poke arriving at the client. this is the critical path
6
+ * that determines whether UI re-renders overlap with user interactions.
7
+ *
8
+ * run: vitest run src/integration/replication-latency.test.ts
9
+ */
10
+
11
+ import postgres from 'postgres'
12
+ import { describe, expect, test, beforeAll, afterAll } from 'vitest'
13
+ import WebSocket from 'ws'
14
+
15
+ import { startZeroLite } from '../index.js'
16
+ import { installChangeTracking } from '../replication/change-tracker.js'
17
+ import {
18
+ ensureTablesInPublications,
19
+ installAllowAllPermissions,
20
+ } from './test-permissions.js'
21
+
22
+ import type { PGlite } from '@electric-sql/pglite'
23
+
24
+ const SYNC_PROTOCOL_VERSION = 45
25
+
26
+ function encodeSecProtocols(
27
+ initConnectionMessage: unknown,
28
+ authToken: string | undefined
29
+ ): string {
30
+ const payload = JSON.stringify({ initConnectionMessage, authToken })
31
+ return encodeURIComponent(Buffer.from(payload, 'utf-8').toString('base64'))
32
+ }
33
+
34
+ class Queue<T> {
35
+ private items: T[] = []
36
+ private waiters: Array<{
37
+ resolve: (v: T) => void
38
+ timer?: ReturnType<typeof setTimeout>
39
+ }> = []
40
+
41
+ enqueue(item: T) {
42
+ const waiter = this.waiters.shift()
43
+ if (waiter) {
44
+ if (waiter.timer) clearTimeout(waiter.timer)
45
+ waiter.resolve(item)
46
+ } else {
47
+ this.items.push(item)
48
+ }
49
+ }
50
+
51
+ dequeue(fallback?: T, timeoutMs = 10000): Promise<T> {
52
+ if (this.items.length > 0) {
53
+ return Promise.resolve(this.items.shift()!)
54
+ }
55
+ return new Promise<T>((resolve) => {
56
+ const waiter: { resolve: (v: T) => void; timer?: ReturnType<typeof setTimeout> } = {
57
+ resolve,
58
+ }
59
+ if (fallback !== undefined) {
60
+ waiter.timer = setTimeout(() => {
61
+ const idx = this.waiters.indexOf(waiter)
62
+ if (idx >= 0) this.waiters.splice(idx, 1)
63
+ resolve(fallback)
64
+ }, timeoutMs)
65
+ }
66
+ this.waiters.push(waiter)
67
+ })
68
+ }
69
+ }
70
+
71
+ describe('replication latency', { timeout: 120000 }, () => {
72
+ let db: PGlite
73
+ let zeroPort: number
74
+ let pgPort: number
75
+ let shutdown: () => Promise<void>
76
+ let resetZeroFull: (() => Promise<void>) | undefined
77
+ let dataDir: string
78
+ let sql: ReturnType<typeof postgres>
79
+
80
+ beforeAll(async () => {
81
+ const testPgPort = 24000 + Math.floor(Math.random() * 1000)
82
+ const testZeroPort = testPgPort + 100
83
+
84
+ dataDir = `.orez-latency-test-${Date.now()}`
85
+ const result = await startZeroLite({
86
+ pgPort: testPgPort,
87
+ zeroPort: testZeroPort,
88
+ dataDir,
89
+ logLevel: 'info',
90
+ skipZeroCache: false,
91
+ })
92
+
93
+ db = result.db
94
+ zeroPort = result.zeroPort
95
+ pgPort = result.pgPort
96
+ shutdown = result.stop
97
+ resetZeroFull = result.resetZeroFull
98
+
99
+ // create test table
100
+ await db.exec(`
101
+ CREATE TABLE IF NOT EXISTS latency_test (
102
+ id TEXT PRIMARY KEY,
103
+ value TEXT,
104
+ ts BIGINT
105
+ );
106
+ `)
107
+ await ensureTablesInPublications(db, ['latency_test'])
108
+ const pubName = process.env.ZERO_APP_PUBLICATIONS?.trim()
109
+ if (pubName) {
110
+ const quotedPub = '"' + pubName.replace(/"/g, '""') + '"'
111
+ await db
112
+ .exec(`ALTER PUBLICATION ${quotedPub} ADD TABLE "public"."latency_test"`)
113
+ .catch(() => {})
114
+ await installChangeTracking(db)
115
+ }
116
+ await installAllowAllPermissions(db, ['latency_test'])
117
+ if (resetZeroFull) await resetZeroFull()
118
+
119
+ // wait for zero-cache ready
120
+ await waitForZero(zeroPort, 90000)
121
+
122
+ // connect via wire protocol (like a real app would)
123
+ sql = postgres(`postgresql://user:password@127.0.0.1:${pgPort}/postgres`, {
124
+ max: 1,
125
+ idle_timeout: 0,
126
+ })
127
+ }, 120000)
128
+
129
+ afterAll(async () => {
130
+ if (sql) await sql.end()
131
+ if (shutdown) await shutdown()
132
+ if (dataDir) {
133
+ const { rmSync } = await import('node:fs')
134
+ try {
135
+ rmSync(dataDir, { recursive: true, force: true })
136
+ } catch {}
137
+ }
138
+ })
139
+
140
+ test('measure write-to-poke latency (single inserts)', async () => {
141
+ const downstream = new Queue<unknown>()
142
+ const ws = connectAndSubscribe(zeroPort, downstream)
143
+ await drainInitialPokes(downstream)
144
+
145
+ const NUM_WRITES = 20
146
+ const latencies: number[] = []
147
+
148
+ for (let i = 0; i < NUM_WRITES; i++) {
149
+ const id = `latency-${i}-${Date.now()}`
150
+ const writeStart = performance.now()
151
+
152
+ // write through the wire protocol proxy (like a real app)
153
+ await sql`INSERT INTO latency_test (id, value, ts) VALUES (${id}, ${'test'}, ${Date.now()})`
154
+
155
+ // wait for the poke containing our row
156
+ const poke = await waitForPokeWithRow(downstream, 'latency_test', id, 10000)
157
+ const latencyMs = performance.now() - writeStart
158
+
159
+ expect(poke).toBeTruthy()
160
+ latencies.push(latencyMs)
161
+ }
162
+
163
+ ws.close()
164
+
165
+ // report
166
+ latencies.sort((a, b) => a - b)
167
+ const avg = latencies.reduce((s, v) => s + v, 0) / latencies.length
168
+ const p50 = latencies[Math.floor(latencies.length * 0.5)]
169
+ const p95 = latencies[Math.floor(latencies.length * 0.95)]
170
+ const p99 = latencies[Math.floor(latencies.length * 0.99)]
171
+ const max = latencies[latencies.length - 1]
172
+
173
+ console.log(`\n[replication latency] ${NUM_WRITES} single inserts via wire protocol:`)
174
+ console.log(
175
+ ` avg=${avg.toFixed(1)}ms p50=${p50.toFixed(1)}ms p95=${p95.toFixed(1)}ms p99=${p99.toFixed(1)}ms max=${max.toFixed(1)}ms`
176
+ )
177
+ console.log(` all: ${latencies.map((l) => l.toFixed(0)).join(', ')}ms`)
178
+
179
+ // assert reasonable latency — under 200ms avg means the UI re-render
180
+ // arrives before a user can interact with the element
181
+ expect(avg).toBeLessThan(200)
182
+ // no single write should take more than 500ms
183
+ expect(max).toBeLessThan(500)
184
+ })
185
+
186
+ test('count poke batches per single write', async () => {
187
+ // theory: orez causes 2+ poke batches per write because zero-cache
188
+ // writes shard updates back through the proxy, creating a separate
189
+ // replication batch. real postgres doesn't have this round-trip.
190
+ const downstream = new Queue<unknown>()
191
+ const ws = connectAndSubscribe(zeroPort, downstream)
192
+ await drainInitialPokes(downstream)
193
+
194
+ const id = `poke-count-${Date.now()}`
195
+ await sql`INSERT INTO latency_test (id, value, ts) VALUES (${id}, ${'count-test'}, ${Date.now()})`
196
+
197
+ // collect ALL messages for 2 seconds after the write
198
+ const messages: any[] = []
199
+ const deadline = Date.now() + 2000
200
+ while (Date.now() < deadline) {
201
+ const remaining = Math.max(100, deadline - Date.now())
202
+ const msg = (await downstream.dequeue('timeout' as any, remaining)) as any
203
+ if (msg !== 'timeout') messages.push(msg)
204
+ }
205
+
206
+ const pokeStarts = messages.filter((m) => Array.isArray(m) && m[0] === 'pokeStart')
207
+ const pokeEnds = messages.filter((m) => Array.isArray(m) && m[0] === 'pokeEnd')
208
+ const pokeParts = messages.filter((m) => Array.isArray(m) && m[0] === 'pokePart')
209
+
210
+ console.log(`\n[poke batches] after 1 INSERT:`)
211
+ console.log(
212
+ ` pokeStart=${pokeStarts.length} pokePart=${pokeParts.length} pokeEnd=${pokeEnds.length}`
213
+ )
214
+ console.log(` total messages: ${messages.length}`)
215
+ for (const msg of messages) {
216
+ if (Array.isArray(msg)) {
217
+ const type = msg[0]
218
+ if (type === 'pokePart' && msg[1]?.rowsPatch) {
219
+ const tables = msg[1].rowsPatch
220
+ .map((r: any) => `${r.op}:${r.tableName}`)
221
+ .join(', ')
222
+ console.log(` pokePart: ${tables}`)
223
+ } else {
224
+ console.log(` ${type}`)
225
+ }
226
+ }
227
+ }
228
+
229
+ // ideally just 1 poke cycle per write, but we want to measure reality
230
+ expect(pokeStarts.length).toBeGreaterThanOrEqual(1)
231
+
232
+ ws.close()
233
+ })
234
+
235
+ test('count poke batches when shard tables update', async () => {
236
+ // simulate what happens in the real app: zero-cache writes to shard
237
+ // tables (clients.lastMutationID) after processing a mutation.
238
+ // these shard writes go through the proxy and trigger replication.
239
+ const downstream = new Queue<unknown>()
240
+ const ws = connectAndSubscribe(zeroPort, downstream)
241
+ await drainInitialPokes(downstream)
242
+
243
+ const id = `shard-test-${Date.now()}`
244
+ // insert via proxy (triggers replication)
245
+ await sql`INSERT INTO latency_test (id, value, ts) VALUES (${id}, ${'shard'}, ${Date.now()})`
246
+
247
+ // now simulate a shard write (like zero-cache updating clients table)
248
+ // check if any shard schemas exist
249
+ const shardSchemas = await sql`
250
+ SELECT nspname FROM pg_namespace
251
+ WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast', 'public', '_orez')
252
+ AND nspname NOT LIKE 'pg_%'
253
+ AND nspname NOT LIKE 'zero_%'
254
+ AND nspname NOT LIKE '_zero_%'
255
+ AND nspname NOT LIKE '%/%'
256
+ `
257
+
258
+ // collect messages for 3 seconds
259
+ const messages: any[] = []
260
+ const deadline = Date.now() + 3000
261
+ while (Date.now() < deadline) {
262
+ const remaining = Math.max(100, deadline - Date.now())
263
+ const msg = (await downstream.dequeue('timeout' as any, remaining)) as any
264
+ if (msg !== 'timeout') messages.push(msg)
265
+ }
266
+
267
+ const pokeStarts = messages.filter((m) => Array.isArray(m) && m[0] === 'pokeStart')
268
+ const pokeParts = messages.filter((m) => Array.isArray(m) && m[0] === 'pokePart')
269
+
270
+ console.log(
271
+ `\n[shard poke batches] after INSERT + shard schemas=${shardSchemas.length}:`
272
+ )
273
+ console.log(` pokeStart=${pokeStarts.length} pokePart=${pokeParts.length}`)
274
+ for (const msg of messages) {
275
+ if (Array.isArray(msg) && msg[0] === 'pokePart' && msg[1]?.rowsPatch) {
276
+ const tables = msg[1].rowsPatch
277
+ .map((r: any) => `${r.op}:${r.tableName}`)
278
+ .join(', ')
279
+ console.log(` pokePart: ${tables}`)
280
+ }
281
+ }
282
+
283
+ expect(pokeStarts.length).toBeGreaterThanOrEqual(1)
284
+ ws.close()
285
+ })
286
+
287
+ test('measure rapid sequential write latency', async () => {
288
+ const downstream = new Queue<unknown>()
289
+ const ws = connectAndSubscribe(zeroPort, downstream)
290
+ await drainInitialPokes(downstream)
291
+
292
+ // simulate rapid sequential writes (like a chat app sending messages)
293
+ const NUM_WRITES = 10
294
+ const ids: string[] = []
295
+ const writeStart = performance.now()
296
+
297
+ for (let i = 0; i < NUM_WRITES; i++) {
298
+ const id = `rapid-${i}-${Date.now()}`
299
+ ids.push(id)
300
+ await sql`INSERT INTO latency_test (id, value, ts) VALUES (${id}, ${'rapid'}, ${Date.now()})`
301
+ }
302
+
303
+ const writeEnd = performance.now()
304
+
305
+ // wait for ALL rows to arrive
306
+ const receivedIds = new Set<string>()
307
+ const deadline = Date.now() + 30000
308
+ while (receivedIds.size < NUM_WRITES && Date.now() < deadline) {
309
+ const msg = (await downstream.dequeue('timeout' as any, 5000)) as any
310
+ if (msg === 'timeout') continue
311
+ if (Array.isArray(msg) && msg[0] === 'pokePart' && msg[1]?.rowsPatch) {
312
+ for (const row of msg[1].rowsPatch) {
313
+ if (row.op === 'put' && row.tableName === 'latency_test' && row.value?.id) {
314
+ receivedIds.add(row.value.id)
315
+ }
316
+ }
317
+ }
318
+ }
319
+
320
+ const totalMs = performance.now() - writeStart
321
+ const writeMs = writeEnd - writeStart
322
+ const replicationMs = totalMs - writeMs
323
+
324
+ console.log(`\n[replication latency] ${NUM_WRITES} rapid sequential inserts:`)
325
+ console.log(
326
+ ` write=${writeMs.toFixed(1)}ms replication=${replicationMs.toFixed(1)}ms total=${totalMs.toFixed(1)}ms`
327
+ )
328
+ console.log(` received ${receivedIds.size}/${NUM_WRITES} rows`)
329
+
330
+ expect(receivedIds.size).toBe(NUM_WRITES)
331
+ for (const id of ids) {
332
+ expect(receivedIds.has(id)).toBe(true)
333
+ }
334
+ // all 10 writes + replication should complete in under 3s
335
+ expect(totalMs).toBeLessThan(3000)
336
+ })
337
+
338
+ // --- helpers ---
339
+
340
+ function connectAndSubscribe(port: number, downstream: Queue<unknown>): WebSocket {
341
+ const cg = `latency-cg-${Date.now()}`
342
+ const cid = `latency-client-${Date.now()}`
343
+ const secProtocol = encodeSecProtocols(
344
+ [
345
+ 'initConnection',
346
+ {
347
+ desiredQueriesPatch: [
348
+ {
349
+ op: 'put',
350
+ hash: 'q1',
351
+ ast: {
352
+ table: 'latency_test',
353
+ orderBy: [['id', 'asc']],
354
+ },
355
+ },
356
+ ],
357
+ clientSchema: {
358
+ tables: {
359
+ latency_test: {
360
+ columns: {
361
+ id: { type: 'string' },
362
+ value: { type: 'string' },
363
+ ts: { type: 'number' },
364
+ },
365
+ primaryKey: ['id'],
366
+ },
367
+ },
368
+ },
369
+ },
370
+ ],
371
+ undefined
372
+ )
373
+ const ws = new WebSocket(
374
+ `ws://localhost:${port}/sync/v${SYNC_PROTOCOL_VERSION}/connect` +
375
+ `?clientGroupID=${cg}&clientID=${cid}&wsid=ws1&schemaVersion=1&baseCookie=&ts=${Date.now()}&lmid=0`,
376
+ secProtocol
377
+ )
378
+ ws.on('message', (data) => downstream.enqueue(JSON.parse(data.toString())))
379
+ return ws
380
+ }
381
+
382
+ async function drainInitialPokes(downstream: Queue<unknown>) {
383
+ let settled = false
384
+ const timeout = Date.now() + 30000
385
+ while (!settled && Date.now() < timeout) {
386
+ const msg = (await downstream.dequeue('timeout' as any, 3000)) as any
387
+ if (msg === 'timeout') {
388
+ settled = true
389
+ } else if (Array.isArray(msg) && msg[0] === 'pokeEnd') {
390
+ const next = (await downstream.dequeue('timeout' as any, 2000)) as any
391
+ if (next === 'timeout') settled = true
392
+ }
393
+ }
394
+ }
395
+
396
+ async function waitForPokeWithRow(
397
+ downstream: Queue<unknown>,
398
+ tableName: string,
399
+ rowId: string,
400
+ timeoutMs = 10000
401
+ ): Promise<Record<string, any> | null> {
402
+ const deadline = Date.now() + timeoutMs
403
+ while (Date.now() < deadline) {
404
+ const remaining = Math.max(500, deadline - Date.now())
405
+ const msg = (await downstream.dequeue('timeout' as any, remaining)) as any
406
+ if (msg === 'timeout') return null
407
+ if (Array.isArray(msg) && msg[0] === 'pokePart' && msg[1]?.rowsPatch) {
408
+ const match = msg[1].rowsPatch.find(
409
+ (r: any) => r.op === 'put' && r.tableName === tableName && r.value?.id === rowId
410
+ )
411
+ if (match) return match
412
+ }
413
+ }
414
+ return null
415
+ }
416
+ })
417
+
418
+ async function waitForZero(port: number, timeoutMs = 60000): Promise<void> {
419
+ const deadline = Date.now() + timeoutMs
420
+ while (Date.now() < deadline) {
421
+ try {
422
+ const res = await fetch(`http://localhost:${port}/`)
423
+ if (res.ok) return
424
+ } catch {}
425
+ await new Promise((r) => setTimeout(r, 500))
426
+ }
427
+ throw new Error(`zero-cache did not become ready within ${timeoutMs}ms`)
428
+ }
package/src/pg-proxy.ts CHANGED
@@ -470,16 +470,14 @@ export async function startPgProxy(
470
470
  return { db: instances.postgres, mutex: mutexes.postgres, txState: txStates.postgres }
471
471
  }
472
472
 
473
- // shared debounce timer for extended protocol write signaling.
474
- // 2ms trailing-edge: each Sync resets the timer, so the signal fires
475
- // 2ms after the LAST write in a pipeline (coalesces rapid sequential writes).
476
- // 2ms is enough because postgres.js pipelines are sub-ms per statement.
473
+ // signal replication handler after writes complete.
474
+ // 2ms trailing-edge debounce coalesces rapid sequential writes and ensures
475
+ // the socket response is fully flushed before the handler acquires the mutex.
477
476
  let signalTimer: ReturnType<typeof setTimeout> | null = null
478
- function debouncedSignal() {
477
+ function signalWrite() {
479
478
  if (signalTimer) clearTimeout(signalTimer)
480
479
  signalTimer = setTimeout(() => {
481
480
  signalTimer = null
482
- log.debug.proxy('ext-write: debounced signal firing')
483
481
  signalReplicationChange()
484
482
  }, 2)
485
483
  }
@@ -497,6 +495,10 @@ export async function startPgProxy(
497
495
  // track extended protocol writes (Parse with INSERT/UPDATE/DELETE/COPY/TRUNCATE)
498
496
  // so we can signal replication on Sync (0x53) after the pipeline completes
499
497
  let extWritePending = false
498
+ // hold mutex across entire extended protocol pipeline (Parse→Sync).
499
+ // prevents other connections from interleaving and corrupting PGlite's
500
+ // unnamed portal/statement state during the pipeline.
501
+ let pipelineMutexHeld = false
500
502
  // clean up pglite transaction state when a client disconnects
501
503
  socket.on('close', async () => {
502
504
  // replication sockets don't own a transaction — skip ROLLBACK
@@ -583,6 +585,94 @@ export async function startPgProxy(
583
585
  const msgType = data[0]
584
586
  const { db, mutex, txState } = getDbContext(dbName)
585
587
 
588
+ // extended protocol pipeline: hold mutex across Parse→Sync to prevent
589
+ // other connections from interleaving and corrupting unnamed portal state.
590
+ // 0x50=Parse, 0x42=Bind, 0x44=Describe, 0x45=Execute, 0x43=Close, 0x48=Flush
591
+ const isExtendedMsg =
592
+ msgType === 0x50 ||
593
+ msgType === 0x42 ||
594
+ msgType === 0x44 ||
595
+ msgType === 0x45 ||
596
+ msgType === 0x43 ||
597
+ msgType === 0x48
598
+ const isSyncInPipeline = msgType === 0x53 && pipelineMutexHeld
599
+
600
+ if (isExtendedMsg || isSyncInPipeline) {
601
+ // acquire mutex on first message of pipeline
602
+ if (!pipelineMutexHeld) {
603
+ const t0 = performance.now()
604
+ await mutex.acquire()
605
+ proxyStats.totalWaitMs += performance.now() - t0
606
+ pipelineMutexHeld = true
607
+ // auto-rollback stale transactions from other connections
608
+ if (txState.status === 0x45 && txState.owner !== socket) {
609
+ try {
610
+ await db.exec('ROLLBACK')
611
+ } catch {}
612
+ txState.status = 0x49
613
+ txState.owner = null
614
+ }
615
+ }
616
+
617
+ // detect extended protocol writes for replication signaling
618
+ if (dbName === 'postgres' && msgType === 0x50) {
619
+ const q = extractParseQuery(data)?.trimStart().toLowerCase()
620
+ if (q && /^(insert|update|delete|copy|truncate)/.test(q)) {
621
+ extWritePending = true
622
+ log.debug.proxy(`ext-write: detected ${q.slice(0, 40)}`)
623
+ }
624
+ }
625
+
626
+ // apply query rewrites
627
+ data = interceptQuery(data)
628
+
629
+ const t1 = performance.now()
630
+ let result: Uint8Array
631
+ try {
632
+ result = await db.execProtocolRaw(data, { syncToFs: false })
633
+ } catch (err) {
634
+ mutex.release()
635
+ pipelineMutexHeld = false
636
+ throw err
637
+ }
638
+ const t2 = performance.now()
639
+ proxyStats.totalExecMs += t2 - t1
640
+ proxyStats.count++
641
+
642
+ // update transaction state
643
+ const rfqStatus = getReadyForQueryStatus(result)
644
+ if (rfqStatus !== null) {
645
+ txState.status = rfqStatus
646
+ txState.owner = rfqStatus === 0x49 ? null : socket
647
+ }
648
+
649
+ // release mutex on Sync (end of pipeline)
650
+ if (msgType === 0x53) {
651
+ mutex.release()
652
+ pipelineMutexHeld = false
653
+ proxyStats.batches++
654
+
655
+ // signal replication handler on postgres writes
656
+ if (dbName === 'postgres' && extWritePending) {
657
+ extWritePending = false
658
+ signalWrite()
659
+ }
660
+ } else {
661
+ // strip ReadyForQuery from non-Sync pipeline messages
662
+ result = stripResponseMessages(result, true)
663
+ }
664
+
665
+ if (proxyStats.count % 200 === 0) {
666
+ log.debug.proxy(
667
+ `perf: ${proxyStats.count} ops (${proxyStats.batches} batches) | mutex ${proxyStats.totalWaitMs.toFixed(0)}ms | pglite ${proxyStats.totalExecMs.toFixed(0)}ms`
668
+ )
669
+ }
670
+
671
+ return result
672
+ }
673
+
674
+ // Simple Query (0x51) or standalone Sync — per-message mutex
675
+
586
676
  // check for no-op queries (only SimpleQuery has queries worth intercepting)
587
677
  if (isNoopQuery(data)) {
588
678
  if (msgType === 0x51) {
@@ -590,15 +680,6 @@ export async function startPgProxy(
590
680
  }
591
681
  }
592
682
 
593
- // detect extended protocol writes on postgres db for replication signaling
594
- if (dbName === 'postgres' && msgType === 0x50) {
595
- const q = extractParseQuery(data)?.trimStart().toLowerCase()
596
- if (q && /^(insert|update|delete|copy|truncate)/.test(q)) {
597
- extWritePending = true
598
- log.debug.proxy(`ext-write: detected ${q.slice(0, 40)}`)
599
- }
600
- }
601
-
602
683
  // intercept and rewrite queries
603
684
  data = interceptQuery(data)
604
685
 
@@ -620,10 +701,6 @@ export async function startPgProxy(
620
701
  const execute = async (): Promise<Uint8Array> => {
621
702
  const t0 = performance.now()
622
703
  await mutex.acquire()
623
- // pglite is single-connection: if a previous connection left an aborted
624
- // transaction, this connection will inherit it. auto-rollback stale
625
- // transactions from OTHER connections, but let the SAME connection
626
- // handle its own errors (e.g. ROLLBACK TO SAVEPOINT in migrations).
627
704
  if (txState.status === 0x45 && txState.owner !== socket) {
628
705
  try {
629
706
  await db.exec('ROLLBACK')
@@ -639,7 +716,6 @@ export async function startPgProxy(
639
716
  mutex.release()
640
717
  throw err
641
718
  }
642
- // update transaction state tracking
643
719
  const rfqStatus = getReadyForQueryStatus(result)
644
720
  if (rfqStatus !== null) {
645
721
  txState.status = rfqStatus
@@ -689,25 +765,16 @@ export async function startPgProxy(
689
765
  result = stripResponseMessages(result, stripRfq)
690
766
 
691
767
  // signal replication handler on postgres writes for instant sync
692
- if (dbName === 'postgres') {
693
- if (isSimpleQuery && queryText) {
694
- // immediate signal for SimpleQuery writes
695
- const q = queryText.trimStart().toLowerCase()
696
- if (
697
- q.startsWith('insert') ||
698
- q.startsWith('update') ||
699
- q.startsWith('delete') ||
700
- q.startsWith('copy') ||
701
- q.startsWith('truncate')
702
- ) {
703
- signalReplicationChange()
704
- }
705
- } else if (msgType === 0x53 && extWritePending) {
706
- // debounced signal for extended protocol writes.
707
- // fires 5ms after the last Sync in a pipeline, coalescing
708
- // rapid sequential writes (e.g. server→channel→member)
709
- extWritePending = false
710
- debouncedSignal()
768
+ if (dbName === 'postgres' && isSimpleQuery && queryText) {
769
+ const q = queryText.trimStart().toLowerCase()
770
+ if (
771
+ q.startsWith('insert') ||
772
+ q.startsWith('update') ||
773
+ q.startsWith('delete') ||
774
+ q.startsWith('copy') ||
775
+ q.startsWith('truncate')
776
+ ) {
777
+ signalReplicationChange()
711
778
  }
712
779
  }
713
780