orez 0.1.21 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/cli-entry.js +2 -0
  2. package/dist/cli-entry.js.map +1 -1
  3. package/dist/cli.d.ts.map +1 -1
  4. package/dist/cli.js +8 -0
  5. package/dist/cli.js.map +1 -1
  6. package/dist/config.d.ts +1 -0
  7. package/dist/config.d.ts.map +1 -1
  8. package/dist/config.js +1 -0
  9. package/dist/config.js.map +1 -1
  10. package/dist/index.d.ts.map +1 -1
  11. package/dist/index.js +43 -18
  12. package/dist/index.js.map +1 -1
  13. package/dist/pg-proxy.d.ts.map +1 -1
  14. package/dist/pg-proxy.js +93 -37
  15. package/dist/pg-proxy.js.map +1 -1
  16. package/dist/pglite-ipc.d.ts +37 -0
  17. package/dist/pglite-ipc.d.ts.map +1 -0
  18. package/dist/pglite-ipc.js +182 -0
  19. package/dist/pglite-ipc.js.map +1 -0
  20. package/dist/pglite-manager.d.ts +11 -0
  21. package/dist/pglite-manager.d.ts.map +1 -1
  22. package/dist/pglite-manager.js +72 -0
  23. package/dist/pglite-manager.js.map +1 -1
  24. package/dist/pglite-worker-thread.d.ts +15 -0
  25. package/dist/pglite-worker-thread.d.ts.map +1 -0
  26. package/dist/pglite-worker-thread.js +147 -0
  27. package/dist/pglite-worker-thread.js.map +1 -0
  28. package/dist/process-title.d.ts +2 -0
  29. package/dist/process-title.d.ts.map +1 -0
  30. package/dist/process-title.js +9 -0
  31. package/dist/process-title.js.map +1 -0
  32. package/dist/recovery.d.ts +1 -0
  33. package/dist/recovery.d.ts.map +1 -1
  34. package/dist/recovery.js +23 -13
  35. package/dist/recovery.js.map +1 -1
  36. package/dist/replication/change-tracker.d.ts +2 -0
  37. package/dist/replication/change-tracker.d.ts.map +1 -1
  38. package/dist/replication/change-tracker.js +4 -0
  39. package/dist/replication/change-tracker.js.map +1 -1
  40. package/dist/replication/handler.d.ts.map +1 -1
  41. package/dist/replication/handler.js +85 -20
  42. package/dist/replication/handler.js.map +1 -1
  43. package/package.json +2 -2
  44. package/src/cli-entry.ts +4 -0
  45. package/src/cli.ts +10 -0
  46. package/src/config.ts +2 -0
  47. package/src/index.ts +55 -19
  48. package/src/integration/replication-latency.test.ts +428 -0
  49. package/src/pg-proxy.ts +106 -39
  50. package/src/pglite-ipc.test.ts +99 -0
  51. package/src/pglite-ipc.ts +214 -0
  52. package/src/pglite-manager.ts +93 -0
  53. package/src/pglite-worker-thread.ts +172 -0
  54. package/src/process-title.ts +9 -0
  55. package/src/recovery.ts +23 -14
  56. package/src/replication/change-tracker.test.ts +2 -0
  57. package/src/replication/change-tracker.ts +5 -0
  58. package/src/replication/handler.test.ts +14 -1
  59. package/src/replication/handler.ts +83 -22
  60. package/src/replication/tcp-replication.test.ts +9 -1
  61. package/src/replication/zero-compat.test.ts +17 -1
@@ -0,0 +1,99 @@
1
+ import { describe, test, expect, beforeAll, afterAll } from 'vitest'
2
+
3
+ import { PGliteWorkerProxy } from './pglite-ipc.js'
4
+
5
+ describe('PGliteWorkerProxy', () => {
6
+ let proxy: PGliteWorkerProxy
7
+
8
+ beforeAll(async () => {
9
+ proxy = new PGliteWorkerProxy({
10
+ dataDir: 'memory://',
11
+ name: 'test',
12
+ withExtensions: false,
13
+ debug: 0,
14
+ pgliteOptions: {},
15
+ })
16
+ await proxy.waitReady
17
+ }, 30_000)
18
+
19
+ afterAll(async () => {
20
+ await proxy.close()
21
+ })
22
+
23
+ test('exec creates table', async () => {
24
+ await proxy.exec(`
25
+ CREATE TABLE test_items (
26
+ id SERIAL PRIMARY KEY,
27
+ name TEXT NOT NULL
28
+ )
29
+ `)
30
+ })
31
+
32
+ test('query returns rows', async () => {
33
+ await proxy.exec(`INSERT INTO test_items (name) VALUES ('hello')`)
34
+ await proxy.exec(`INSERT INTO test_items (name) VALUES ('world')`)
35
+
36
+ const result = await proxy.query<{ id: number; name: string }>(
37
+ 'SELECT * FROM test_items ORDER BY id'
38
+ )
39
+ expect(result.rows).toHaveLength(2)
40
+ expect(result.rows[0].name).toBe('hello')
41
+ expect(result.rows[1].name).toBe('world')
42
+ })
43
+
44
+ test('query with params', async () => {
45
+ const result = await proxy.query<{ name: string }>(
46
+ 'SELECT name FROM test_items WHERE name = $1',
47
+ ['world']
48
+ )
49
+ expect(result.rows).toHaveLength(1)
50
+ expect(result.rows[0].name).toBe('world')
51
+ })
52
+
53
+ test('exec returns affectedRows', async () => {
54
+ const result = await proxy.exec(`DELETE FROM test_items WHERE name = 'hello'`)
55
+ expect(result[0].affectedRows).toBe(1)
56
+ })
57
+
58
+ test('execProtocolRaw handles wire protocol', async () => {
59
+ // simple query message: SELECT 1 as num
60
+ const query = 'SELECT 1 as num\0'
61
+ const encoder = new TextEncoder()
62
+ const queryBytes = encoder.encode(query)
63
+ const buf = new Uint8Array(5 + queryBytes.length)
64
+ buf[0] = 0x51 // 'Q' simple query
65
+ new DataView(buf.buffer).setInt32(1, 4 + queryBytes.length)
66
+ buf.set(queryBytes, 5)
67
+
68
+ const result = await proxy.execProtocolRaw(buf)
69
+ expect(result).toBeInstanceOf(Uint8Array)
70
+ expect(result.length).toBeGreaterThan(0)
71
+ // should contain a ReadyForQuery message (0x5a)
72
+ let hasRfq = false
73
+ for (let i = 0; i < result.length; i++) {
74
+ if (result[i] === 0x5a) {
75
+ hasRfq = true
76
+ break
77
+ }
78
+ }
79
+ expect(hasRfq).toBe(true)
80
+ })
81
+
82
+ test('listen receives notifications', async () => {
83
+ const received: string[] = []
84
+ const unsub = await proxy.listen('test_channel', (payload) => {
85
+ received.push(payload)
86
+ })
87
+
88
+ await proxy.exec(`NOTIFY test_channel, 'hello'`)
89
+ // give notification time to propagate
90
+ await new Promise((r) => setTimeout(r, 100))
91
+
92
+ expect(received).toContain('hello')
93
+ await unsub()
94
+ })
95
+
96
+ test('error propagation with SQL code', async () => {
97
+ await expect(proxy.exec('SELECT * FROM nonexistent_table')).rejects.toThrow()
98
+ })
99
+ })
@@ -0,0 +1,214 @@
1
+ /**
2
+ * PGlite worker proxy — runs in the main thread, proxies calls to a
3
+ * worker thread running the actual PGlite instance.
4
+ *
5
+ * implements the PGlite interface surface used throughout orez:
6
+ * execProtocolRaw, query, exec, listen, close.
7
+ *
8
+ * ArrayBuffers are transferred (not copied) for execProtocolRaw to
9
+ * keep IPC overhead near-zero for wire protocol data.
10
+ */
11
+
12
+ import { existsSync } from 'node:fs'
13
+ import { resolve } from 'node:path'
14
+ import { Worker } from 'node:worker_threads'
15
+
16
+ import { log } from './log.js'
17
+ import { signalReplicationChange } from './replication/handler.js'
18
+
19
+ import type { WorkerInitConfig } from './pglite-worker-thread.js'
20
+
21
+ interface PendingRequest {
22
+ resolve: (value: any) => void
23
+ reject: (error: Error) => void
24
+ }
25
+
26
+ const WRITE_PREFIXES = ['insert', 'update', 'delete', 'copy', 'truncate']
27
+ // shard-internal tables that the replication handler filters out.
28
+ // signaling for these just causes spurious wakeups + mutex contention.
29
+ const SHARD_INTERNAL_TABLES = ['"replicas"', '"mutations"', '"replicationState"']
30
+ function isReplicatedWrite(sql: string): boolean {
31
+ const q = sql.trimStart().toLowerCase()
32
+ if (!WRITE_PREFIXES.some((p) => q.startsWith(p))) return false
33
+ // skip shard-internal writes (zero-cache manages these, not replicated)
34
+ for (const t of SHARD_INTERNAL_TABLES) {
35
+ if (q.includes(t.toLowerCase())) return false
36
+ }
37
+ return true
38
+ }
39
+
40
+ // resolve worker file path — .ts in dev/test (vitest), .js when compiled
41
+ function resolveWorkerPath(): string {
42
+ const dir = import.meta.dirname
43
+ const tsPath = resolve(dir, 'pglite-worker-thread.ts')
44
+ if (existsSync(tsPath)) return tsPath
45
+ return resolve(dir, 'pglite-worker-thread.js')
46
+ }
47
+
48
+ export class PGliteWorkerProxy {
49
+ private worker: Worker
50
+ private pending = new Map<number, PendingRequest>()
51
+ private nextId = 1
52
+ private notificationCallbacks = new Map<string, Set<(payload: string) => void>>()
53
+ readonly name: string
54
+
55
+ /** resolves when the worker's PGlite instance is ready */
56
+ readonly waitReady: Promise<void>
57
+
58
+ constructor(config: WorkerInitConfig) {
59
+ this.name = config.name
60
+ const workerPath = resolveWorkerPath()
61
+
62
+ this.worker = new Worker(workerPath, {
63
+ workerData: config,
64
+ name: `pglite-${config.name}`,
65
+ })
66
+
67
+ // set up waitReady promise, then install message handler once ready
68
+ let onReady: () => void
69
+ this.waitReady = new Promise<void>((resolveReady, rejectReady) => {
70
+ onReady = () => {
71
+ log.debug.pglite(`worker ${config.name} ready`)
72
+ resolveReady()
73
+ }
74
+
75
+ const onMessage = (msg: { type: string; id?: number; message?: string }) => {
76
+ if (msg.type === 'ready') {
77
+ this.worker.off('message', onMessage)
78
+ this.installMessageHandler()
79
+ onReady()
80
+ } else if (msg.type === 'error' && msg.id === 0) {
81
+ rejectReady(new Error(msg.message))
82
+ }
83
+ }
84
+
85
+ this.worker.on('message', onMessage)
86
+ this.worker.once('error', rejectReady)
87
+ })
88
+
89
+ // handle unexpected worker crashes
90
+ this.worker.on('error', (err) => {
91
+ log.pglite(`worker ${config.name} error: ${err.message}`)
92
+ for (const [, req] of this.pending) {
93
+ req.reject(new Error(`worker crashed: ${err.message}`))
94
+ }
95
+ this.pending.clear()
96
+ })
97
+
98
+ this.worker.on('exit', (code) => {
99
+ if (code !== 0) {
100
+ log.pglite(`worker ${config.name} exited with code ${code}`)
101
+ for (const [, req] of this.pending) {
102
+ req.reject(new Error(`worker exited with code ${code}`))
103
+ }
104
+ this.pending.clear()
105
+ }
106
+ })
107
+ }
108
+
109
+ private installMessageHandler() {
110
+ this.worker.on(
111
+ 'message',
112
+ (msg: { type: string; id?: number; [key: string]: any }) => {
113
+ if (msg.type === 'notification') {
114
+ const callbacks = this.notificationCallbacks.get(msg.channel)
115
+ if (callbacks) {
116
+ for (const cb of callbacks) {
117
+ try {
118
+ cb(msg.payload)
119
+ } catch {}
120
+ }
121
+ }
122
+ return
123
+ }
124
+
125
+ const req = this.pending.get(msg.id!)
126
+ if (!req) return
127
+ this.pending.delete(msg.id!)
128
+
129
+ if (msg.type === 'error') {
130
+ const err = new Error(msg.message) as Error & { code?: string }
131
+ if (msg.code) err.code = msg.code
132
+ req.reject(err)
133
+ } else {
134
+ req.resolve(msg)
135
+ }
136
+ }
137
+ )
138
+ }
139
+
140
+ private send(msg: Record<string, unknown>, transfer?: ArrayBuffer[]): Promise<any> {
141
+ const id = this.nextId++
142
+ msg.id = id
143
+ return new Promise((resolve, reject) => {
144
+ this.pending.set(id, { resolve, reject })
145
+ if (transfer?.length) {
146
+ this.worker.postMessage(msg, transfer)
147
+ } else {
148
+ this.worker.postMessage(msg)
149
+ }
150
+ })
151
+ }
152
+
153
+ async execProtocolRaw(
154
+ data: Uint8Array,
155
+ options?: { syncToFs?: boolean; throwOnError?: boolean }
156
+ ): Promise<Uint8Array> {
157
+ // copy to a transferable buffer then transfer (avoids copying in the worker)
158
+ const buf = new ArrayBuffer(data.byteLength)
159
+ new Uint8Array(buf).set(data)
160
+ const result = await this.send({ type: 'execProtocolRaw', data: buf, options }, [buf])
161
+ return new Uint8Array(result.data)
162
+ }
163
+
164
+ async query<T = any>(
165
+ sql: string,
166
+ params?: any[]
167
+ ): Promise<{ rows: T[]; affectedRows?: number }> {
168
+ const result = await this.send({ type: 'query', sql, params })
169
+ if (this.name === 'postgres' && isReplicatedWrite(sql)) {
170
+ signalReplicationChange()
171
+ }
172
+ return { rows: result.rows ?? [], affectedRows: result.affectedRows }
173
+ }
174
+
175
+ async exec(sql: string): Promise<{ affectedRows?: number }[]> {
176
+ const result = await this.send({ type: 'exec', sql })
177
+ if (this.name === 'postgres' && isReplicatedWrite(sql)) {
178
+ signalReplicationChange()
179
+ }
180
+ return result.results ?? []
181
+ }
182
+
183
+ async listen(
184
+ channel: string,
185
+ callback: (payload: string) => void
186
+ ): Promise<() => Promise<void>> {
187
+ let callbacks = this.notificationCallbacks.get(channel)
188
+ if (!callbacks) {
189
+ callbacks = new Set()
190
+ this.notificationCallbacks.set(channel, callbacks)
191
+ }
192
+ callbacks.add(callback)
193
+
194
+ const result = await this.send({ type: 'listen', channel })
195
+ const listenId = result.id
196
+
197
+ return async () => {
198
+ callbacks!.delete(callback)
199
+ if (callbacks!.size === 0) {
200
+ this.notificationCallbacks.delete(channel)
201
+ }
202
+ await this.send({ type: 'unlisten', listenId }).catch(() => {})
203
+ }
204
+ }
205
+
206
+ async close(): Promise<void> {
207
+ try {
208
+ await this.send({ type: 'close' })
209
+ } catch {
210
+ // worker may already be gone
211
+ }
212
+ await this.worker.terminate()
213
+ }
214
+ }
@@ -23,6 +23,7 @@ import { uuid_ossp } from '@electric-sql/pglite/contrib/uuid_ossp'
23
23
  import { vector } from '@electric-sql/pglite/vector'
24
24
 
25
25
  import { log } from './log.js'
26
+ import { PGliteWorkerProxy } from './pglite-ipc.js'
26
27
 
27
28
  import type { ZeroLiteConfig } from './config.js'
28
29
 
@@ -210,6 +211,98 @@ export async function createPGliteInstances(
210
211
  return { postgres, cvr, cdb }
211
212
  }
212
213
 
214
+ /**
215
+ * create worker-backed pglite instances.
216
+ *
217
+ * each instance runs in its own worker thread with a separate event loop,
218
+ * so PGlite WASM execution doesn't block the proxy or replication handler.
219
+ * ArrayBuffers are transferred (not copied) for wire protocol data.
220
+ */
221
+ export async function createPGliteWorkerInstances(
222
+ config: ZeroLiteConfig
223
+ ): Promise<PGliteInstances> {
224
+ // migrate from old single-instance layout (pgdata → pgdata-postgres)
225
+ const pgliteDataDir = (config.pgliteOptions as Record<string, any>)?.dataDir
226
+ if (!pgliteDataDir || !String(pgliteDataDir).startsWith('memory://')) {
227
+ const oldDataPath = resolve(config.dataDir, 'pgdata')
228
+ const newDataPath = resolve(config.dataDir, 'pgdata-postgres')
229
+ if (existsSync(oldDataPath) && !existsSync(newDataPath)) {
230
+ renameSync(oldDataPath, newDataPath)
231
+ log.debug.pglite('migrated pgdata → pgdata-postgres')
232
+ }
233
+ }
234
+
235
+ const useMemory =
236
+ typeof pgliteDataDir === 'string' && pgliteDataDir.startsWith('memory://')
237
+ const {
238
+ dataDir: _ud,
239
+ debug: _dbg,
240
+ ...userOpts
241
+ } = config.pgliteOptions as Record<string, any>
242
+
243
+ function makeWorkerConfig(name: string, withExtensions: boolean) {
244
+ const dataPath = useMemory ? 'memory://' : resolve(config.dataDir, `pgdata-${name}`)
245
+ if (!useMemory) {
246
+ mkdirSync(dataPath, { recursive: true })
247
+ if (cleanStaleLocks(dataPath)) {
248
+ log.debug.pglite(`cleaned stale locks in ${name}`)
249
+ }
250
+ }
251
+ return {
252
+ dataDir: dataPath,
253
+ name,
254
+ withExtensions,
255
+ debug: config.logLevel === 'debug' ? 1 : 0,
256
+ pgliteOptions: userOpts,
257
+ }
258
+ }
259
+
260
+ log.pglite('starting worker threads for postgres, cvr, cdb')
261
+
262
+ // create all 3 worker proxies in parallel
263
+ const pgProxy = new PGliteWorkerProxy(makeWorkerConfig('postgres', true))
264
+ const cvrProxy = new PGliteWorkerProxy(makeWorkerConfig('cvr', false))
265
+ const cdbProxy = new PGliteWorkerProxy(makeWorkerConfig('cdb', false))
266
+
267
+ await Promise.all([pgProxy.waitReady, cvrProxy.waitReady, cdbProxy.waitReady])
268
+
269
+ log.pglite('all worker threads ready')
270
+
271
+ // postgres-specific setup
272
+ await pgProxy.exec('CREATE EXTENSION IF NOT EXISTS plpgsql')
273
+
274
+ // create publication only when explicitly configured
275
+ const pubName = process.env.ZERO_APP_PUBLICATIONS?.trim()
276
+ if (pubName) {
277
+ const pubs = await pgProxy.query<{ count: string }>(
278
+ `SELECT count(*) as count FROM pg_publication WHERE pubname = $1`,
279
+ [pubName]
280
+ )
281
+ if (Number(pubs.rows[0].count) === 0) {
282
+ const quoted = '"' + pubName.replace(/"/g, '""') + '"'
283
+ await pgProxy.exec(`CREATE PUBLICATION ${quoted}`)
284
+ }
285
+ }
286
+
287
+ // cast to PGlite — our proxy implements the same interface surface
288
+ return {
289
+ postgres: pgProxy as unknown as PGlite,
290
+ cvr: cvrProxy as unknown as PGlite,
291
+ cdb: cdbProxy as unknown as PGlite,
292
+ }
293
+ }
294
+
295
+ /** create a single worker-backed PGlite instance (for CVR/CDB recreation during reset) */
296
+ export function createPGliteWorker(dataDir: string, name: string): PGliteWorkerProxy {
297
+ return new PGliteWorkerProxy({
298
+ dataDir,
299
+ name,
300
+ withExtensions: false,
301
+ debug: 0,
302
+ pgliteOptions: {},
303
+ })
304
+ }
305
+
213
306
  /** run pending migrations, returns count of newly applied migrations */
214
307
  export async function runMigrations(db: PGlite, config: ZeroLiteConfig): Promise<number> {
215
308
  if (!config.migrationsDir) {
@@ -0,0 +1,172 @@
1
+ /**
2
+ * worker thread that runs a single PGlite instance.
3
+ *
4
+ * receives commands via parentPort messages, executes them on the PGlite
5
+ * instance, and sends results back. ArrayBuffers are transferred (not copied)
6
+ * for execProtocolRaw to minimize overhead.
7
+ */
8
+
9
+ import { parentPort, workerData } from 'node:worker_threads'
10
+
11
+ import { PGlite } from '@electric-sql/pglite'
12
+ import { btree_gin } from '@electric-sql/pglite/contrib/btree_gin'
13
+ import { btree_gist } from '@electric-sql/pglite/contrib/btree_gist'
14
+ import { citext } from '@electric-sql/pglite/contrib/citext'
15
+ import { cube } from '@electric-sql/pglite/contrib/cube'
16
+ import { earthdistance } from '@electric-sql/pglite/contrib/earthdistance'
17
+ import { fuzzystrmatch } from '@electric-sql/pglite/contrib/fuzzystrmatch'
18
+ import { hstore } from '@electric-sql/pglite/contrib/hstore'
19
+ import { ltree } from '@electric-sql/pglite/contrib/ltree'
20
+ import { pg_trgm } from '@electric-sql/pglite/contrib/pg_trgm'
21
+ import { pgcrypto } from '@electric-sql/pglite/contrib/pgcrypto'
22
+ import { uuid_ossp } from '@electric-sql/pglite/contrib/uuid_ossp'
23
+ import { vector } from '@electric-sql/pglite/vector'
24
+
25
+ export interface WorkerInitConfig {
26
+ dataDir: string
27
+ name: string
28
+ withExtensions: boolean
29
+ debug: number
30
+ pgliteOptions?: Record<string, unknown>
31
+ }
32
+
33
+ const port = parentPort!
34
+ const config = workerData as WorkerInitConfig
35
+
36
+ // active listen subscriptions
37
+ const listeners = new Map<number, () => Promise<void>>()
38
+
39
+ let db: PGlite
40
+
41
+ async function init() {
42
+ const { dataDir: _userDataDir, debug: _dbg, ...userOpts } = config.pgliteOptions || {}
43
+
44
+ db = new PGlite({
45
+ dataDir: config.dataDir,
46
+ debug: config.debug,
47
+ relaxedDurability: true,
48
+ ...userOpts,
49
+ extensions: config.withExtensions
50
+ ? userOpts.extensions || {
51
+ vector,
52
+ pg_trgm,
53
+ pgcrypto,
54
+ uuid_ossp,
55
+ citext,
56
+ hstore,
57
+ ltree,
58
+ fuzzystrmatch,
59
+ btree_gin,
60
+ btree_gist,
61
+ cube,
62
+ earthdistance,
63
+ }
64
+ : {},
65
+ } as any)
66
+
67
+ await db.waitReady
68
+
69
+ // tune postgres internals
70
+ await db.exec(`
71
+ SET work_mem = '64MB';
72
+ SET maintenance_work_mem = '128MB';
73
+ SET effective_cache_size = '512MB';
74
+ SET random_page_cost = 1.1;
75
+ SET jit = off;
76
+ `)
77
+
78
+ port.postMessage({ type: 'ready' })
79
+ }
80
+
81
+ port.on('message', async (msg: { type: string; id: number; [key: string]: unknown }) => {
82
+ const { type, id } = msg
83
+
84
+ try {
85
+ switch (type) {
86
+ case 'execProtocolRaw': {
87
+ const input = new Uint8Array(msg.data as ArrayBuffer)
88
+ const result = await db.execProtocolRaw(input, msg.options as any)
89
+ // copy result to a transferable buffer (pglite may reuse wasm memory)
90
+ const buf = new ArrayBuffer(result.byteLength)
91
+ new Uint8Array(buf).set(result)
92
+ port.postMessage({ type: 'result', id, data: buf }, [buf])
93
+ break
94
+ }
95
+
96
+ case 'query': {
97
+ const result = await db.query(msg.sql as string, msg.params as any[])
98
+ port.postMessage({
99
+ type: 'result',
100
+ id,
101
+ rows: result.rows,
102
+ affectedRows: result.affectedRows,
103
+ })
104
+ break
105
+ }
106
+
107
+ case 'exec': {
108
+ const result = await db.exec(msg.sql as string)
109
+ // serialize exec results (array of { affectedRows })
110
+ const results = result.map((r) => ({ affectedRows: r.affectedRows ?? 0 }))
111
+ port.postMessage({ type: 'result', id, results })
112
+ break
113
+ }
114
+
115
+ case 'listen': {
116
+ const channel = msg.channel as string
117
+ const unsub = await db.listen(channel, (payload) => {
118
+ port.postMessage({ type: 'notification', channel, payload })
119
+ })
120
+ listeners.set(id, unsub)
121
+ port.postMessage({ type: 'result', id })
122
+ break
123
+ }
124
+
125
+ case 'unlisten': {
126
+ const listenId = msg.listenId as number
127
+ const unsub = listeners.get(listenId)
128
+ if (unsub) {
129
+ await unsub()
130
+ listeners.delete(listenId)
131
+ }
132
+ port.postMessage({ type: 'result', id })
133
+ break
134
+ }
135
+
136
+ case 'close': {
137
+ for (const unsub of listeners.values()) {
138
+ await unsub().catch(() => {})
139
+ }
140
+ listeners.clear()
141
+ await db.close()
142
+ port.postMessage({ type: 'result', id })
143
+ break
144
+ }
145
+
146
+ default:
147
+ port.postMessage({
148
+ type: 'error',
149
+ id,
150
+ message: `unknown message type: ${type}`,
151
+ })
152
+ }
153
+ } catch (err: unknown) {
154
+ const error = err as { message?: string; code?: string }
155
+ port.postMessage({
156
+ type: 'error',
157
+ id,
158
+ message: error?.message || String(err),
159
+ code: error?.code,
160
+ })
161
+ }
162
+ })
163
+
164
+ init().catch((err: unknown) => {
165
+ const error = err as { message?: string }
166
+ port.postMessage({
167
+ type: 'error',
168
+ id: 0,
169
+ message: `worker init failed: ${error?.message || String(err)}`,
170
+ })
171
+ process.exit(1)
172
+ })
@@ -0,0 +1,9 @@
1
+ export function orezTitle(label = 'orez'): string {
2
+ const cwd = process.cwd()
3
+ const home = process.env.HOME || ''
4
+ const dir =
5
+ home && cwd.startsWith(home + '/')
6
+ ? '~' + cwd.slice(home.length)
7
+ : cwd.split('/').pop()!
8
+ return `${label} (${dir})`
9
+ }
package/src/recovery.ts CHANGED
@@ -7,12 +7,13 @@ import { mkdirSync, rmSync } from 'node:fs'
7
7
  import { resolve } from 'node:path'
8
8
 
9
9
  import { log } from './log.js'
10
+ import { createPGliteWorker } from './pglite-manager.js'
10
11
 
11
12
  import type { PGlite } from '@electric-sql/pglite'
12
13
  import type { ChildProcess } from 'node:child_process'
13
14
 
14
15
  export interface RecoveryContext {
15
- config: { dataDir: string }
16
+ config: { dataDir: string; useWorkerThreads?: boolean }
16
17
  instances: {
17
18
  postgres: PGlite
18
19
  cvr: PGlite
@@ -74,19 +75,27 @@ export async function recoverFromCdcCorruption(ctx: RecoveryContext): Promise<vo
74
75
  }
75
76
 
76
77
  // recreate CVR/CDB instances
77
- const { PGlite } = await import('@electric-sql/pglite')
78
- mkdirSync(resolve(config.dataDir, 'pgdata-cvr'), { recursive: true })
79
- mkdirSync(resolve(config.dataDir, 'pgdata-cdb'), { recursive: true })
80
- instances.cvr = new PGlite({
81
- dataDir: resolve(config.dataDir, 'pgdata-cvr'),
82
- relaxedDurability: true,
83
- })
84
- instances.cdb = new PGlite({
85
- dataDir: resolve(config.dataDir, 'pgdata-cdb'),
86
- relaxedDurability: true,
87
- })
88
- await instances.cvr.waitReady
89
- await instances.cdb.waitReady
78
+ if (config.useWorkerThreads) {
79
+ const cvrProxy = createPGliteWorker(resolve(config.dataDir, 'pgdata-cvr'), 'cvr')
80
+ const cdbProxy = createPGliteWorker(resolve(config.dataDir, 'pgdata-cdb'), 'cdb')
81
+ await Promise.all([cvrProxy.waitReady, cdbProxy.waitReady])
82
+ instances.cvr = cvrProxy as unknown as PGlite
83
+ instances.cdb = cdbProxy as unknown as PGlite
84
+ } else {
85
+ const { PGlite: PGliteCtor } = await import('@electric-sql/pglite')
86
+ mkdirSync(resolve(config.dataDir, 'pgdata-cvr'), { recursive: true })
87
+ mkdirSync(resolve(config.dataDir, 'pgdata-cdb'), { recursive: true })
88
+ instances.cvr = new PGliteCtor({
89
+ dataDir: resolve(config.dataDir, 'pgdata-cvr'),
90
+ relaxedDurability: true,
91
+ })
92
+ instances.cdb = new PGliteCtor({
93
+ dataDir: resolve(config.dataDir, 'pgdata-cdb'),
94
+ relaxedDurability: true,
95
+ })
96
+ await instances.cvr.waitReady
97
+ await instances.cdb.waitReady
98
+ }
90
99
  log.orez('recreated CVR/CDB instances')
91
100
 
92
101
  // clear upstream replication tracking
@@ -4,6 +4,7 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'
4
4
  import {
5
5
  installChangeTracking,
6
6
  installTriggersOnShardTables,
7
+ resetShardSchemaCache,
7
8
  purgeConsumedChanges,
8
9
  getChangesSince,
9
10
  getCurrentWatermark,
@@ -208,6 +209,7 @@ describe('shard table tracking', () => {
208
209
  let db: PGlite
209
210
 
210
211
  beforeEach(async () => {
212
+ resetShardSchemaCache()
211
213
  db = new PGlite()
212
214
  await db.waitReady
213
215
  await installChangeTracking(db)
@@ -153,6 +153,11 @@ async function installTriggersOnAllTables(db: PGlite): Promise<void> {
153
153
  */
154
154
  const processedShardSchemas = new Set<string>()
155
155
 
156
+ /** reset shard schema cache (for tests) */
157
+ export function resetShardSchemaCache(): void {
158
+ processedShardSchemas.clear()
159
+ }
160
+
156
161
  export async function installTriggersOnShardTables(db: PGlite): Promise<void> {
157
162
  const result = await db.query<{ nspname: string }>(
158
163
  `SELECT nspname FROM pg_namespace