orez 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +116 -0
- package/dist/config.d.ts +15 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +20 -0
- package/dist/config.js.map +1 -0
- package/dist/index.d.ts +15 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +195 -0
- package/dist/index.js.map +1 -0
- package/dist/pg-proxy.d.ts +14 -0
- package/dist/pg-proxy.d.ts.map +1 -0
- package/dist/pg-proxy.js +385 -0
- package/dist/pg-proxy.js.map +1 -0
- package/dist/pglite-manager.d.ts +5 -0
- package/dist/pglite-manager.d.ts.map +1 -0
- package/dist/pglite-manager.js +71 -0
- package/dist/pglite-manager.js.map +1 -0
- package/dist/replication/change-tracker.d.ts +14 -0
- package/dist/replication/change-tracker.d.ts.map +1 -0
- package/dist/replication/change-tracker.js +86 -0
- package/dist/replication/change-tracker.js.map +1 -0
- package/dist/replication/handler.d.ts +24 -0
- package/dist/replication/handler.d.ts.map +1 -0
- package/dist/replication/handler.js +300 -0
- package/dist/replication/handler.js.map +1 -0
- package/dist/replication/pgoutput-encoder.d.ts +26 -0
- package/dist/replication/pgoutput-encoder.d.ts.map +1 -0
- package/dist/replication/pgoutput-encoder.js +204 -0
- package/dist/replication/pgoutput-encoder.js.map +1 -0
- package/dist/s3-local.d.ts +8 -0
- package/dist/s3-local.d.ts.map +1 -0
- package/dist/s3-local.js +131 -0
- package/dist/s3-local.js.map +1 -0
- package/package.json +56 -0
- package/src/config.ts +40 -0
- package/src/index.ts +255 -0
- package/src/pg-proxy.ts +474 -0
- package/src/pglite-manager.ts +105 -0
- package/src/replication/change-tracker.test.ts +179 -0
- package/src/replication/change-tracker.ts +115 -0
- package/src/replication/handler.test.ts +331 -0
- package/src/replication/handler.ts +378 -0
- package/src/replication/pgoutput-encoder.test.ts +381 -0
- package/src/replication/pgoutput-encoder.ts +252 -0
- package/src/replication/tcp-replication.test.ts +824 -0
- package/src/replication/zero-compat.test.ts +882 -0
- package/src/s3-local.ts +179 -0
package/src/pg-proxy.ts
ADDED
|
@@ -0,0 +1,474 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* tcp proxy that makes pglite speak postgresql wire protocol.
|
|
3
|
+
*
|
|
4
|
+
* uses pg-gateway to handle protocol lifecycle for regular connections,
|
|
5
|
+
* and directly handles the raw socket for replication connections.
|
|
6
|
+
*
|
|
7
|
+
* regular connections: forwarded to pglite via execProtocolRaw()
|
|
8
|
+
* replication connections: intercepted, replication protocol faked
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { createServer, type Server, type Socket } from 'node:net'
|
|
12
|
+
|
|
13
|
+
import { fromNodeSocket } from 'pg-gateway/node'
|
|
14
|
+
|
|
15
|
+
import type { PGlite } from '@electric-sql/pglite'
|
|
16
|
+
|
|
17
|
+
import type { ZeroLiteConfig } from './config'
|
|
18
|
+
import {
|
|
19
|
+
handleReplicationQuery,
|
|
20
|
+
handleStartReplication,
|
|
21
|
+
} from './replication/handler'
|
|
22
|
+
|
|
23
|
+
// database name -> search_path mapping
|
|
24
|
+
const DB_SCHEMA_MAP: Record<string, string> = {
|
|
25
|
+
postgres: 'public',
|
|
26
|
+
zero_cvr: 'zero_cvr, public',
|
|
27
|
+
zero_cdb: 'zero_cdb, public',
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// query rewrites: make pglite look like real postgres with logical replication
|
|
31
|
+
const QUERY_REWRITES: Array<{ match: RegExp; replace: string }> = [
|
|
32
|
+
// wal_level check
|
|
33
|
+
{
|
|
34
|
+
match: /current_setting\s*\(\s*'wal_level'\s*\)/gi,
|
|
35
|
+
replace: "'logical'::text",
|
|
36
|
+
},
|
|
37
|
+
// strip READ ONLY from BEGIN
|
|
38
|
+
{
|
|
39
|
+
match: /\bREAD\s+ONLY\b/gi,
|
|
40
|
+
replace: '',
|
|
41
|
+
},
|
|
42
|
+
// redirect pg_replication_slots to our fake table
|
|
43
|
+
{
|
|
44
|
+
match: /\bpg_replication_slots\b/g,
|
|
45
|
+
replace: 'public._zero_replication_slots',
|
|
46
|
+
},
|
|
47
|
+
]
|
|
48
|
+
|
|
49
|
+
// queries to intercept and return no-op success
|
|
50
|
+
const NOOP_QUERY_PATTERNS = [
|
|
51
|
+
/^\s*SET\s+TRANSACTION\s+SNAPSHOT\s+/i,
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* extract query text from a Parse message (0x50).
|
|
56
|
+
*/
|
|
57
|
+
function extractParseQuery(data: Uint8Array): string | null {
|
|
58
|
+
if (data[0] !== 0x50) return null
|
|
59
|
+
let offset = 5
|
|
60
|
+
while (offset < data.length && data[offset] !== 0) offset++
|
|
61
|
+
offset++
|
|
62
|
+
const queryStart = offset
|
|
63
|
+
while (offset < data.length && data[offset] !== 0) offset++
|
|
64
|
+
return new TextDecoder().decode(data.subarray(queryStart, offset))
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* rebuild a Parse message with a modified query string.
|
|
69
|
+
*/
|
|
70
|
+
function rebuildParseMessage(
|
|
71
|
+
data: Uint8Array,
|
|
72
|
+
newQuery: string
|
|
73
|
+
): Uint8Array {
|
|
74
|
+
let offset = 5
|
|
75
|
+
while (offset < data.length && data[offset] !== 0) offset++
|
|
76
|
+
const nameEnd = offset + 1
|
|
77
|
+
const nameBytes = data.subarray(5, nameEnd)
|
|
78
|
+
|
|
79
|
+
offset = nameEnd
|
|
80
|
+
while (offset < data.length && data[offset] !== 0) offset++
|
|
81
|
+
offset++
|
|
82
|
+
|
|
83
|
+
const suffix = data.subarray(offset)
|
|
84
|
+
const encoder = new TextEncoder()
|
|
85
|
+
const queryBytes = encoder.encode(newQuery)
|
|
86
|
+
|
|
87
|
+
const totalLen =
|
|
88
|
+
4 + nameBytes.length + queryBytes.length + 1 + suffix.length
|
|
89
|
+
const result = new Uint8Array(1 + totalLen)
|
|
90
|
+
const dv = new DataView(result.buffer)
|
|
91
|
+
result[0] = 0x50
|
|
92
|
+
dv.setInt32(1, totalLen)
|
|
93
|
+
let pos = 5
|
|
94
|
+
result.set(nameBytes, pos)
|
|
95
|
+
pos += nameBytes.length
|
|
96
|
+
result.set(queryBytes, pos)
|
|
97
|
+
pos += queryBytes.length
|
|
98
|
+
result[pos++] = 0
|
|
99
|
+
result.set(suffix, pos)
|
|
100
|
+
return result
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* rebuild a Simple Query message with a modified query string.
|
|
105
|
+
*/
|
|
106
|
+
function rebuildSimpleQuery(newQuery: string): Uint8Array {
|
|
107
|
+
const encoder = new TextEncoder()
|
|
108
|
+
const queryBytes = encoder.encode(newQuery + '\0')
|
|
109
|
+
const buf = new Uint8Array(5 + queryBytes.length)
|
|
110
|
+
buf[0] = 0x51
|
|
111
|
+
new DataView(buf.buffer).setInt32(1, 4 + queryBytes.length)
|
|
112
|
+
buf.set(queryBytes, 5)
|
|
113
|
+
return buf
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* intercept and rewrite query messages to make pglite look like real postgres.
|
|
118
|
+
*/
|
|
119
|
+
function interceptQuery(data: Uint8Array): Uint8Array {
|
|
120
|
+
const msgType = data[0]
|
|
121
|
+
|
|
122
|
+
if (msgType === 0x51) {
|
|
123
|
+
const view = new DataView(
|
|
124
|
+
data.buffer,
|
|
125
|
+
data.byteOffset,
|
|
126
|
+
data.byteLength
|
|
127
|
+
)
|
|
128
|
+
const len = view.getInt32(1)
|
|
129
|
+
let query = new TextDecoder()
|
|
130
|
+
.decode(data.subarray(5, 1 + len - 1))
|
|
131
|
+
.replace(/\0$/, '')
|
|
132
|
+
|
|
133
|
+
let modified = false
|
|
134
|
+
for (const rw of QUERY_REWRITES) {
|
|
135
|
+
if (rw.match.test(query)) {
|
|
136
|
+
query = query.replace(rw.match, rw.replace)
|
|
137
|
+
modified = true
|
|
138
|
+
rw.match.lastIndex = 0
|
|
139
|
+
}
|
|
140
|
+
rw.match.lastIndex = 0
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
if (modified) {
|
|
144
|
+
return rebuildSimpleQuery(query)
|
|
145
|
+
}
|
|
146
|
+
} else if (msgType === 0x50) {
|
|
147
|
+
const query = extractParseQuery(data)
|
|
148
|
+
if (query) {
|
|
149
|
+
let newQuery = query
|
|
150
|
+
let modified = false
|
|
151
|
+
for (const rw of QUERY_REWRITES) {
|
|
152
|
+
if (rw.match.test(newQuery)) {
|
|
153
|
+
newQuery = newQuery.replace(rw.match, rw.replace)
|
|
154
|
+
modified = true
|
|
155
|
+
rw.match.lastIndex = 0
|
|
156
|
+
}
|
|
157
|
+
rw.match.lastIndex = 0
|
|
158
|
+
}
|
|
159
|
+
if (modified) {
|
|
160
|
+
return rebuildParseMessage(data, newQuery)
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
return data
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* check if a query should be intercepted as a no-op.
|
|
170
|
+
*/
|
|
171
|
+
function isNoopQuery(data: Uint8Array): boolean {
|
|
172
|
+
let query: string | null = null
|
|
173
|
+
if (data[0] === 0x51) {
|
|
174
|
+
const view = new DataView(
|
|
175
|
+
data.buffer,
|
|
176
|
+
data.byteOffset,
|
|
177
|
+
data.byteLength
|
|
178
|
+
)
|
|
179
|
+
const len = view.getInt32(1)
|
|
180
|
+
query = new TextDecoder()
|
|
181
|
+
.decode(data.subarray(5, 1 + len - 1))
|
|
182
|
+
.replace(/\0$/, '')
|
|
183
|
+
} else if (data[0] === 0x50) {
|
|
184
|
+
query = extractParseQuery(data)
|
|
185
|
+
}
|
|
186
|
+
if (!query) return false
|
|
187
|
+
return NOOP_QUERY_PATTERNS.some((p) => p.test(query!))
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* build a synthetic "SET" command complete response.
|
|
192
|
+
*/
|
|
193
|
+
function buildSetCompleteResponse(): Uint8Array {
|
|
194
|
+
const encoder = new TextEncoder()
|
|
195
|
+
const tag = encoder.encode('SET\0')
|
|
196
|
+
const cc = new Uint8Array(1 + 4 + tag.length)
|
|
197
|
+
cc[0] = 0x43
|
|
198
|
+
new DataView(cc.buffer).setInt32(1, 4 + tag.length)
|
|
199
|
+
cc.set(tag, 5)
|
|
200
|
+
|
|
201
|
+
const rfq = new Uint8Array(6)
|
|
202
|
+
rfq[0] = 0x5a
|
|
203
|
+
new DataView(rfq.buffer).setInt32(1, 5)
|
|
204
|
+
rfq[5] = 0x54 // 'T' = in transaction
|
|
205
|
+
|
|
206
|
+
const result = new Uint8Array(cc.length + rfq.length)
|
|
207
|
+
result.set(cc, 0)
|
|
208
|
+
result.set(rfq, cc.length)
|
|
209
|
+
return result
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* build a synthetic ParseComplete response for extended protocol no-ops.
|
|
214
|
+
*/
|
|
215
|
+
function buildParseCompleteResponse(): Uint8Array {
|
|
216
|
+
const pc = new Uint8Array(5)
|
|
217
|
+
pc[0] = 0x31 // ParseComplete
|
|
218
|
+
new DataView(pc.buffer).setInt32(1, 4)
|
|
219
|
+
return pc
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* strip ReadyForQuery messages from a response buffer.
|
|
224
|
+
*/
|
|
225
|
+
function stripReadyForQuery(data: Uint8Array): Uint8Array {
|
|
226
|
+
if (data.length === 0) return data
|
|
227
|
+
|
|
228
|
+
const parts: Uint8Array[] = []
|
|
229
|
+
let offset = 0
|
|
230
|
+
while (offset < data.length) {
|
|
231
|
+
const msgType = data[offset]
|
|
232
|
+
if (offset + 5 > data.length) break
|
|
233
|
+
const msgLen = new DataView(
|
|
234
|
+
data.buffer,
|
|
235
|
+
data.byteOffset + offset + 1
|
|
236
|
+
).getInt32(0)
|
|
237
|
+
const totalLen = 1 + msgLen
|
|
238
|
+
|
|
239
|
+
if (msgType !== 0x5a) {
|
|
240
|
+
parts.push(data.subarray(offset, offset + totalLen))
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
offset += totalLen
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
if (parts.length === 0) return new Uint8Array(0)
|
|
247
|
+
if (parts.length === 1) return parts[0]
|
|
248
|
+
|
|
249
|
+
const total = parts.reduce((sum, p) => sum + p.length, 0)
|
|
250
|
+
const result = new Uint8Array(total)
|
|
251
|
+
let pos = 0
|
|
252
|
+
for (const p of parts) {
|
|
253
|
+
result.set(p, pos)
|
|
254
|
+
pos += p.length
|
|
255
|
+
}
|
|
256
|
+
return result
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
// simple mutex for serializing pglite access
|
|
260
|
+
class Mutex {
|
|
261
|
+
private locked = false
|
|
262
|
+
private queue: Array<() => void> = []
|
|
263
|
+
|
|
264
|
+
async acquire(): Promise<void> {
|
|
265
|
+
if (!this.locked) {
|
|
266
|
+
this.locked = true
|
|
267
|
+
return
|
|
268
|
+
}
|
|
269
|
+
return new Promise<void>((resolve) => {
|
|
270
|
+
this.queue.push(resolve)
|
|
271
|
+
})
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
release(): void {
|
|
275
|
+
const next = this.queue.shift()
|
|
276
|
+
if (next) {
|
|
277
|
+
next()
|
|
278
|
+
} else {
|
|
279
|
+
this.locked = false
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
const mutex = new Mutex()
|
|
285
|
+
|
|
286
|
+
// module-level search_path tracking
|
|
287
|
+
let currentSearchPath = 'public'
|
|
288
|
+
|
|
289
|
+
export async function startPgProxy(
|
|
290
|
+
db: PGlite,
|
|
291
|
+
config: ZeroLiteConfig
|
|
292
|
+
): Promise<Server> {
|
|
293
|
+
const server = createServer(async (socket: Socket) => {
|
|
294
|
+
let dbName = 'postgres'
|
|
295
|
+
let isReplicationConnection = false
|
|
296
|
+
|
|
297
|
+
try {
|
|
298
|
+
const connection = await fromNodeSocket(socket, {
|
|
299
|
+
auth: {
|
|
300
|
+
method: 'password',
|
|
301
|
+
getClearTextPassword() {
|
|
302
|
+
return config.pgPassword
|
|
303
|
+
},
|
|
304
|
+
validateCredentials(credentials: {
|
|
305
|
+
username: string
|
|
306
|
+
password: string
|
|
307
|
+
clearTextPassword: string
|
|
308
|
+
}) {
|
|
309
|
+
return (
|
|
310
|
+
credentials.password ===
|
|
311
|
+
credentials.clearTextPassword &&
|
|
312
|
+
credentials.username === config.pgUser
|
|
313
|
+
)
|
|
314
|
+
},
|
|
315
|
+
},
|
|
316
|
+
|
|
317
|
+
async onStartup(state) {
|
|
318
|
+
const params = state.clientParams
|
|
319
|
+
if (params?.replication === 'database') {
|
|
320
|
+
isReplicationConnection = true
|
|
321
|
+
}
|
|
322
|
+
dbName = params?.database || 'postgres'
|
|
323
|
+
console.info(
|
|
324
|
+
`[orez] new connection: db=${dbName} user=${params?.user} replication=${params?.replication || 'none'}`
|
|
325
|
+
)
|
|
326
|
+
await db.waitReady
|
|
327
|
+
},
|
|
328
|
+
|
|
329
|
+
async onMessage(data, state) {
|
|
330
|
+
if (!state.isAuthenticated) return
|
|
331
|
+
|
|
332
|
+
// handle replication connections
|
|
333
|
+
if (isReplicationConnection) {
|
|
334
|
+
if (data[0] === 0x51) {
|
|
335
|
+
const view = new DataView(
|
|
336
|
+
data.buffer,
|
|
337
|
+
data.byteOffset,
|
|
338
|
+
data.byteLength
|
|
339
|
+
)
|
|
340
|
+
const len = view.getInt32(1)
|
|
341
|
+
const query = new TextDecoder()
|
|
342
|
+
.decode(data.subarray(5, 1 + len - 1))
|
|
343
|
+
.replace(/\0$/, '')
|
|
344
|
+
console.info(
|
|
345
|
+
`[orez] repl query: ${query.slice(0, 200)}`
|
|
346
|
+
)
|
|
347
|
+
}
|
|
348
|
+
return handleReplicationMessage(
|
|
349
|
+
data,
|
|
350
|
+
socket,
|
|
351
|
+
db,
|
|
352
|
+
connection
|
|
353
|
+
)
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// check for no-op queries
|
|
357
|
+
if (isNoopQuery(data)) {
|
|
358
|
+
if (data[0] === 0x51) {
|
|
359
|
+
return buildSetCompleteResponse()
|
|
360
|
+
} else if (data[0] === 0x50) {
|
|
361
|
+
return buildParseCompleteResponse()
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
// intercept and rewrite queries
|
|
366
|
+
data = interceptQuery(data)
|
|
367
|
+
|
|
368
|
+
// regular query: set search_path based on database name, then forward
|
|
369
|
+
await mutex.acquire()
|
|
370
|
+
try {
|
|
371
|
+
const searchPath =
|
|
372
|
+
DB_SCHEMA_MAP[dbName] || 'public'
|
|
373
|
+
if (currentSearchPath !== searchPath) {
|
|
374
|
+
await db.exec(
|
|
375
|
+
`SET search_path TO ${searchPath}`
|
|
376
|
+
)
|
|
377
|
+
currentSearchPath = searchPath
|
|
378
|
+
}
|
|
379
|
+
let result = await db.execProtocolRaw(data, {
|
|
380
|
+
throwOnError: false,
|
|
381
|
+
})
|
|
382
|
+
// strip ReadyForQuery from non-Sync responses
|
|
383
|
+
if (data[0] !== 0x53 && data[0] !== 0x51) {
|
|
384
|
+
result = stripReadyForQuery(result)
|
|
385
|
+
}
|
|
386
|
+
return result
|
|
387
|
+
} finally {
|
|
388
|
+
mutex.release()
|
|
389
|
+
}
|
|
390
|
+
},
|
|
391
|
+
})
|
|
392
|
+
} catch (err) {
|
|
393
|
+
if (!socket.destroyed) {
|
|
394
|
+
socket.destroy()
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
})
|
|
398
|
+
|
|
399
|
+
return new Promise((resolve, reject) => {
|
|
400
|
+
server.listen(config.pgPort, '127.0.0.1', () => {
|
|
401
|
+
console.info(
|
|
402
|
+
`[orez] pg proxy listening on port ${config.pgPort}`
|
|
403
|
+
)
|
|
404
|
+
resolve(server)
|
|
405
|
+
})
|
|
406
|
+
server.on('error', reject)
|
|
407
|
+
})
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
async function handleReplicationMessage(
|
|
411
|
+
data: Uint8Array,
|
|
412
|
+
socket: Socket,
|
|
413
|
+
db: PGlite,
|
|
414
|
+
connection: Awaited<ReturnType<typeof fromNodeSocket>>
|
|
415
|
+
): Promise<Uint8Array | undefined> {
|
|
416
|
+
if (data[0] !== 0x51) return undefined
|
|
417
|
+
|
|
418
|
+
const view = new DataView(
|
|
419
|
+
data.buffer,
|
|
420
|
+
data.byteOffset,
|
|
421
|
+
data.byteLength
|
|
422
|
+
)
|
|
423
|
+
const len = view.getInt32(1)
|
|
424
|
+
const query = new TextDecoder()
|
|
425
|
+
.decode(data.subarray(5, 1 + len - 1))
|
|
426
|
+
.replace(/\0$/, '')
|
|
427
|
+
const upper = query.trim().toUpperCase()
|
|
428
|
+
|
|
429
|
+
// check if this is a START_REPLICATION command
|
|
430
|
+
if (upper.startsWith('START_REPLICATION')) {
|
|
431
|
+
await connection.detach()
|
|
432
|
+
|
|
433
|
+
const writer = {
|
|
434
|
+
write(chunk: Uint8Array) {
|
|
435
|
+
if (!socket.destroyed) {
|
|
436
|
+
socket.write(chunk)
|
|
437
|
+
}
|
|
438
|
+
},
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
// drain incoming standby status updates
|
|
442
|
+
socket.on('data', (_chunk: Buffer) => {})
|
|
443
|
+
|
|
444
|
+
socket.on('close', () => {
|
|
445
|
+
socket.destroy()
|
|
446
|
+
})
|
|
447
|
+
|
|
448
|
+
handleStartReplication(query, writer, db).catch((err) => {
|
|
449
|
+
console.info(
|
|
450
|
+
`[orez] replication stream ended: ${err}`
|
|
451
|
+
)
|
|
452
|
+
})
|
|
453
|
+
return undefined
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
// handle other replication queries
|
|
457
|
+
const response = await handleReplicationQuery(query, db)
|
|
458
|
+
if (response) return response
|
|
459
|
+
|
|
460
|
+
// fall through to pglite for unrecognized queries
|
|
461
|
+
await mutex.acquire()
|
|
462
|
+
try {
|
|
463
|
+
const searchPath = 'public'
|
|
464
|
+
if (currentSearchPath !== searchPath) {
|
|
465
|
+
await db.exec(`SET search_path TO ${searchPath}`)
|
|
466
|
+
currentSearchPath = searchPath
|
|
467
|
+
}
|
|
468
|
+
return await db.execProtocolRaw(data, {
|
|
469
|
+
throwOnError: false,
|
|
470
|
+
})
|
|
471
|
+
} finally {
|
|
472
|
+
mutex.release()
|
|
473
|
+
}
|
|
474
|
+
}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import { readFileSync, readdirSync, existsSync, mkdirSync } from 'node:fs'
|
|
2
|
+
import { join, resolve } from 'node:path'
|
|
3
|
+
|
|
4
|
+
import { PGlite } from '@electric-sql/pglite'
|
|
5
|
+
|
|
6
|
+
import type { ZeroLiteConfig } from './config'
|
|
7
|
+
|
|
8
|
+
export async function createPGliteInstance(
|
|
9
|
+
config: ZeroLiteConfig
|
|
10
|
+
): Promise<PGlite> {
|
|
11
|
+
const dataPath = resolve(config.dataDir, 'pgdata')
|
|
12
|
+
mkdirSync(dataPath, { recursive: true })
|
|
13
|
+
|
|
14
|
+
console.info(`[orez] creating pglite instance at ${dataPath}`)
|
|
15
|
+
const db = new PGlite(dataPath)
|
|
16
|
+
|
|
17
|
+
await db.waitReady
|
|
18
|
+
console.info('[orez] pglite ready')
|
|
19
|
+
|
|
20
|
+
// create schemas for multi-db simulation
|
|
21
|
+
await db.exec('CREATE SCHEMA IF NOT EXISTS zero_cvr')
|
|
22
|
+
await db.exec('CREATE SCHEMA IF NOT EXISTS zero_cdb')
|
|
23
|
+
|
|
24
|
+
// create publication for zero-cache
|
|
25
|
+
const pubName =
|
|
26
|
+
process.env.ZERO_APP_PUBLICATIONS || 'zero_pub'
|
|
27
|
+
const pubs = await db.query<{ count: string }>(
|
|
28
|
+
`SELECT count(*) as count FROM pg_publication WHERE pubname = $1`,
|
|
29
|
+
[pubName]
|
|
30
|
+
)
|
|
31
|
+
if (Number(pubs.rows[0].count) === 0) {
|
|
32
|
+
const quoted = '"' + pubName.replace(/"/g, '""') + '"'
|
|
33
|
+
await db.exec(`CREATE PUBLICATION ${quoted} FOR ALL TABLES`)
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
return db
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export async function runMigrations(
|
|
40
|
+
db: PGlite,
|
|
41
|
+
config: ZeroLiteConfig
|
|
42
|
+
): Promise<void> {
|
|
43
|
+
const migrationsDir = resolve(config.migrationsDir)
|
|
44
|
+
if (!existsSync(migrationsDir)) {
|
|
45
|
+
console.info('[orez] no migrations directory found, skipping')
|
|
46
|
+
return
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// create migrations tracking table
|
|
50
|
+
await db.exec(`
|
|
51
|
+
CREATE TABLE IF NOT EXISTS public.migrations (
|
|
52
|
+
id SERIAL PRIMARY KEY,
|
|
53
|
+
name TEXT NOT NULL UNIQUE,
|
|
54
|
+
applied_at TIMESTAMPTZ DEFAULT NOW()
|
|
55
|
+
)
|
|
56
|
+
`)
|
|
57
|
+
|
|
58
|
+
// read drizzle journal for correct migration order
|
|
59
|
+
const journalPath = join(migrationsDir, 'meta', '_journal.json')
|
|
60
|
+
let files: string[]
|
|
61
|
+
if (existsSync(journalPath)) {
|
|
62
|
+
const journal = JSON.parse(readFileSync(journalPath, 'utf-8'))
|
|
63
|
+
files = journal.entries.map(
|
|
64
|
+
(e: { tag: string }) => `${e.tag}.sql`
|
|
65
|
+
)
|
|
66
|
+
} else {
|
|
67
|
+
files = readdirSync(migrationsDir)
|
|
68
|
+
.filter((f) => f.endsWith('.sql'))
|
|
69
|
+
.sort()
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
for (const file of files) {
|
|
73
|
+
const name = file.replace(/\.sql$/, '')
|
|
74
|
+
|
|
75
|
+
// check if already applied
|
|
76
|
+
const result = await db.query<{ count: string }>(
|
|
77
|
+
'SELECT count(*) as count FROM public.migrations WHERE name = $1',
|
|
78
|
+
[name]
|
|
79
|
+
)
|
|
80
|
+
if (Number(result.rows[0].count) > 0) {
|
|
81
|
+
continue
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
console.info(`[orez] applying migration: ${name}`)
|
|
85
|
+
const sql = readFileSync(join(migrationsDir, file), 'utf-8')
|
|
86
|
+
|
|
87
|
+
// split by drizzle's statement-breakpoint marker
|
|
88
|
+
const statements = sql
|
|
89
|
+
.split('--> statement-breakpoint')
|
|
90
|
+
.map((s) => s.trim())
|
|
91
|
+
.filter(Boolean)
|
|
92
|
+
|
|
93
|
+
for (const stmt of statements) {
|
|
94
|
+
await db.exec(stmt)
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
await db.query(
|
|
98
|
+
'INSERT INTO public.migrations (name) VALUES ($1)',
|
|
99
|
+
[name]
|
|
100
|
+
)
|
|
101
|
+
console.info(`[orez] applied migration: ${name}`)
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
console.info('[orez] migrations complete')
|
|
105
|
+
}
|