prisma-pglite-bridge 0.3.0 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1,617 +1,679 @@
1
- "use strict";
1
+ Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
2
+ //#region \0rolldown/runtime.js
2
3
  var __create = Object.create;
3
4
  var __defProp = Object.defineProperty;
4
5
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
6
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
7
  var __getProtoOf = Object.getPrototypeOf;
7
8
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __export = (target, all) => {
9
- for (var name in all)
10
- __defProp(target, name, { get: all[name], enumerable: true });
11
- };
12
9
  var __copyProps = (to, from, except, desc) => {
13
- if (from && typeof from === "object" || typeof from === "function") {
14
- for (let key of __getOwnPropNames(from))
15
- if (!__hasOwnProp.call(to, key) && key !== except)
16
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
- }
18
- return to;
10
+ if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
11
+ key = keys[i];
12
+ if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
13
+ get: ((k) => from[k]).bind(null, key),
14
+ enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
15
+ });
16
+ }
17
+ return to;
19
18
  };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
-
30
- // src/index.ts
31
- var index_exports = {};
32
- __export(index_exports, {
33
- PGliteBridge: () => PGliteBridge,
34
- createPgliteAdapter: () => createPgliteAdapter,
35
- createPool: () => createPool
36
- });
37
- module.exports = __toCommonJS(index_exports);
38
-
39
- // src/pglite-bridge.ts
40
- var import_node_stream = require("stream");
41
-
42
- // src/session-lock.ts
43
- var STATUS_IDLE = 73;
44
- var STATUS_IN_TRANSACTION = 84;
45
- var STATUS_FAILED = 69;
46
- var createBridgeId = () => /* @__PURE__ */ Symbol("bridge");
47
- var extractRfqStatus = (response) => {
48
- if (response.length < 6) return null;
49
- const i = response.length - 6;
50
- if (response[i] === 90 && response[i + 1] === 0 && response[i + 2] === 0 && response[i + 3] === 0 && response[i + 4] === 5) {
51
- return response[i + 5] ?? null;
52
- }
53
- return null;
19
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
20
+ value: mod,
21
+ enumerable: true
22
+ }) : target, mod));
23
+ //#endregion
24
+ let node_fs = require("node:fs");
25
+ let node_path = require("node:path");
26
+ let _prisma_adapter_pg = require("@prisma/adapter-pg");
27
+ let _electric_sql_pglite = require("@electric-sql/pglite");
28
+ let pg = require("pg");
29
+ pg = __toESM(pg, 1);
30
+ let node_stream = require("node:stream");
31
+ //#region src/session-lock.ts
32
+ /**
33
+ * Session-level lock for PGlite's single-session model.
34
+ *
35
+ * PGlite runs PostgreSQL in single-user mode — one session shared by all
36
+ * bridges. runExclusive serializes individual operations, but transactions
37
+ * span multiple operations. Without session-level locking, Bridge A's BEGIN
38
+ * and Bridge B's query interleave, corrupting transaction boundaries.
39
+ *
40
+ * The session lock tracks which bridge owns the session. When PGlite enters
41
+ * transaction state (ReadyForQuery status 'T' or 'E'), the owning bridge
42
+ * gets exclusive access until the transaction completes (status returns to 'I').
43
+ *
44
+ * Non-transactional operations from any bridge are allowed when no transaction
45
+ * is active they serialize naturally through runExclusive.
46
+ */
47
+ const STATUS_IDLE = 73;
48
+ const STATUS_IN_TRANSACTION = 84;
49
+ const STATUS_FAILED = 69;
50
+ const createBridgeId = () => Symbol("bridge");
51
+ /**
52
+ * Extracts the ReadyForQuery status byte from a response buffer.
53
+ * Scans from the end since RFQ is always the last message.
54
+ * Returns null if no RFQ found.
55
+ */
56
+ const extractRfqStatus = (response) => {
57
+ if (response.length < 6) return null;
58
+ const i = response.length - 6;
59
+ if (response[i] === 90 && response[i + 1] === 0 && response[i + 2] === 0 && response[i + 3] === 0 && response[i + 4] === 5) return response[i + 5] ?? null;
60
+ return null;
54
61
  };
55
62
  var SessionLock = class {
56
- owner = null;
57
- waitQueue = [];
58
- /**
59
- * Acquire access to PGlite. Resolves immediately if no transaction is
60
- * active or if this bridge owns the current transaction. Queues otherwise.
61
- */
62
- async acquire(id) {
63
- if (this.owner === null || this.owner === id) return;
64
- return new Promise((resolve) => {
65
- this.waitQueue.push({ id, resolve });
66
- });
67
- }
68
- /**
69
- * Update session state based on the ReadyForQuery status byte.
70
- * Call after every PGlite response that contains RFQ.
71
- */
72
- updateStatus(id, status) {
73
- if (status === STATUS_IN_TRANSACTION || status === STATUS_FAILED) {
74
- this.owner = id;
75
- } else if (status === STATUS_IDLE) {
76
- if (this.owner === id) {
77
- this.owner = null;
78
- this.drainWaitQueue();
79
- }
80
- }
81
- }
82
- /**
83
- * Release ownership (e.g., when a bridge is destroyed mid-transaction).
84
- */
85
- release(id) {
86
- if (this.owner === id) {
87
- this.owner = null;
88
- this.drainWaitQueue();
89
- }
90
- }
91
- drainWaitQueue() {
92
- const waiters = this.waitQueue;
93
- this.waitQueue = [];
94
- for (const waiter of waiters) {
95
- waiter.resolve();
96
- }
97
- }
63
+ owner = null;
64
+ waitQueue = [];
65
+ /**
66
+ * Acquire access to PGlite. Resolves immediately if no transaction is
67
+ * active or if this bridge owns the current transaction. Queues otherwise.
68
+ */
69
+ async acquire(id) {
70
+ if (this.owner === null || this.owner === id) return;
71
+ return new Promise((resolve) => {
72
+ this.waitQueue.push({
73
+ id,
74
+ resolve
75
+ });
76
+ });
77
+ }
78
+ /**
79
+ * Update session state based on the ReadyForQuery status byte.
80
+ * Call after every PGlite response that contains RFQ.
81
+ */
82
+ updateStatus(id, status) {
83
+ if (status === STATUS_IN_TRANSACTION || status === STATUS_FAILED) this.owner = id;
84
+ else if (status === STATUS_IDLE) {
85
+ if (this.owner === id) {
86
+ this.owner = null;
87
+ this.drainWaitQueue();
88
+ }
89
+ }
90
+ }
91
+ /**
92
+ * Release ownership (e.g., when a bridge is destroyed mid-transaction).
93
+ */
94
+ release(id) {
95
+ if (this.owner === id) {
96
+ this.owner = null;
97
+ this.drainWaitQueue();
98
+ }
99
+ }
100
+ drainWaitQueue() {
101
+ const waiters = this.waitQueue;
102
+ this.waitQueue = [];
103
+ for (const waiter of waiters) waiter.resolve();
104
+ }
98
105
  };
99
-
100
- // src/pglite-bridge.ts
101
- var PARSE = 80;
102
- var BIND = 66;
103
- var DESCRIBE = 68;
104
- var EXECUTE = 69;
105
- var CLOSE = 67;
106
- var FLUSH = 72;
107
- var SYNC = 83;
108
- var TERMINATE = 88;
109
- var READY_FOR_QUERY = 90;
110
- var EQP_MESSAGES = /* @__PURE__ */ new Set([PARSE, BIND, DESCRIBE, EXECUTE, CLOSE, FLUSH]);
111
- var stripIntermediateReadyForQuery = (response) => {
112
- const rfqPositions = [];
113
- let offset = 0;
114
- while (offset < response.length) {
115
- if (offset + 5 >= response.length) break;
116
- if (response[offset] === READY_FOR_QUERY && response[offset + 1] === 0 && response[offset + 2] === 0 && response[offset + 3] === 0 && response[offset + 4] === 5) {
117
- rfqPositions.push(offset);
118
- offset += 6;
119
- } else {
120
- const b1 = response[offset + 1];
121
- const b2 = response[offset + 2];
122
- const b3 = response[offset + 3];
123
- const b4 = response[offset + 4];
124
- if (b1 === void 0 || b2 === void 0 || b3 === void 0 || b4 === void 0) break;
125
- const msgLen = (b1 << 24 | b2 << 16 | b3 << 8 | b4) >>> 0;
126
- if (msgLen < 4) break;
127
- offset += 1 + msgLen;
128
- }
129
- }
130
- if (rfqPositions.length <= 1) return response;
131
- const removeCount = rfqPositions.length - 1;
132
- const resultLen = response.length - removeCount * 6;
133
- const result = new Uint8Array(resultLen);
134
- let src = 0;
135
- let dst = 0;
136
- let removeIdx = 0;
137
- while (src < response.length) {
138
- const nextRemove = removeIdx < removeCount ? rfqPositions[removeIdx] ?? response.length : response.length;
139
- if (src < nextRemove) {
140
- const copyLen = nextRemove - src;
141
- result.set(response.subarray(src, src + copyLen), dst);
142
- dst += copyLen;
143
- src += copyLen;
144
- }
145
- if (removeIdx < removeCount && src === rfqPositions[removeIdx]) {
146
- src += 6;
147
- removeIdx++;
148
- }
149
- }
150
- return result;
106
+ //#endregion
107
+ //#region src/pglite-bridge.ts
108
+ /**
109
+ * PGlite bridge stream.
110
+ *
111
+ * A Duplex stream that replaces the TCP socket in pg.Client, routing
112
+ * wire protocol messages directly to an in-process PGlite instance.
113
+ *
114
+ * pg.Client writes wire protocol bytes → bridge frames messages →
115
+ * PGlite processes via execProtocolRawStream → bridge pushes responses back.
116
+ *
117
+ * Extended Query Protocol pipelines (Parse→Bind→Describe→Execute→Sync) are
118
+ * concatenated into a single buffer and sent as one atomic execProtocolRawStream
119
+ * call within one runExclusive. This prevents portal interleaving between
120
+ * concurrent bridges AND reduces async overhead (1 WASM call instead of 5).
121
+ *
122
+ * The response from a batched pipeline contains spurious ReadyForQuery messages
123
+ * after each sub-message (PGlite's single-user mode). These are stripped,
124
+ * keeping only the final ReadyForQuery after Sync.
125
+ */
126
+ const PARSE = 80;
127
+ const BIND = 66;
128
+ const DESCRIBE = 68;
129
+ const EXECUTE = 69;
130
+ const CLOSE = 67;
131
+ const FLUSH = 72;
132
+ const SYNC = 83;
133
+ const TERMINATE = 88;
134
+ const READY_FOR_QUERY = 90;
135
+ const EQP_MESSAGES = new Set([
136
+ PARSE,
137
+ BIND,
138
+ DESCRIBE,
139
+ EXECUTE,
140
+ CLOSE,
141
+ FLUSH
142
+ ]);
143
+ /**
144
+ * Strips all intermediate ReadyForQuery messages from a response, keeping
145
+ * only the last one. PGlite's single-user mode emits RFQ after every
146
+ * sub-message; pg.Client expects exactly one after Sync.
147
+ *
148
+ * Operates in-place on the response by building a list of byte ranges to
149
+ * keep, then assembling the result. Returns the original buffer (no copy)
150
+ * if there are 0 or 1 RFQ messages.
151
+ */
152
+ /** @internal exported for testing only */
153
+ const stripIntermediateReadyForQuery = (response) => {
154
+ const rfqPositions = [];
155
+ let offset = 0;
156
+ while (offset < response.length) {
157
+ if (offset + 5 >= response.length) break;
158
+ if (response[offset] === READY_FOR_QUERY && response[offset + 1] === 0 && response[offset + 2] === 0 && response[offset + 3] === 0 && response[offset + 4] === 5) {
159
+ rfqPositions.push(offset);
160
+ offset += 6;
161
+ } else {
162
+ const b1 = response[offset + 1];
163
+ const b2 = response[offset + 2];
164
+ const b3 = response[offset + 3];
165
+ const b4 = response[offset + 4];
166
+ if (b1 === void 0 || b2 === void 0 || b3 === void 0 || b4 === void 0) break;
167
+ const msgLen = (b1 << 24 | b2 << 16 | b3 << 8 | b4) >>> 0;
168
+ if (msgLen < 4) break;
169
+ offset += 1 + msgLen;
170
+ }
171
+ }
172
+ if (rfqPositions.length <= 1) return response;
173
+ const removeCount = rfqPositions.length - 1;
174
+ const resultLen = response.length - removeCount * 6;
175
+ const result = new Uint8Array(resultLen);
176
+ let src = 0;
177
+ let dst = 0;
178
+ let removeIdx = 0;
179
+ while (src < response.length) {
180
+ const nextRemove = removeIdx < removeCount ? rfqPositions[removeIdx] ?? response.length : response.length;
181
+ if (src < nextRemove) {
182
+ const copyLen = nextRemove - src;
183
+ result.set(response.subarray(src, src + copyLen), dst);
184
+ dst += copyLen;
185
+ src += copyLen;
186
+ }
187
+ if (removeIdx < removeCount && src === rfqPositions[removeIdx]) {
188
+ src += 6;
189
+ removeIdx++;
190
+ }
191
+ }
192
+ return result;
151
193
  };
152
- var concat = (parts) => {
153
- if (parts.length === 1) return parts[0] ?? new Uint8Array(0);
154
- const total = parts.reduce((sum, p) => sum + p.length, 0);
155
- const result = new Uint8Array(total);
156
- let offset = 0;
157
- for (const part of parts) {
158
- result.set(part, offset);
159
- offset += part.length;
160
- }
161
- return result;
194
+ /**
195
+ * Concatenates multiple Uint8Array views into one contiguous buffer.
196
+ */
197
+ const concat = (parts) => {
198
+ if (parts.length === 1) return parts[0] ?? new Uint8Array(0);
199
+ const total = parts.reduce((sum, p) => sum + p.length, 0);
200
+ const result = new Uint8Array(total);
201
+ let offset = 0;
202
+ for (const part of parts) {
203
+ result.set(part, offset);
204
+ offset += part.length;
205
+ }
206
+ return result;
162
207
  };
163
- var PGliteBridge = class extends import_node_stream.Duplex {
164
- pglite;
165
- sessionLock;
166
- bridgeId;
167
- /** Incoming bytes not yet compacted into buf */
168
- pending = [];
169
- pendingLen = 0;
170
- /** Compacted input buffer for message framing */
171
- buf = Buffer.alloc(0);
172
- phase = "pre_startup";
173
- draining = false;
174
- tornDown = false;
175
- /** Callbacks waiting for drain to process their data */
176
- drainQueue = [];
177
- /** Buffered EQP messages awaiting Sync */
178
- pipeline = [];
179
- pipelineLen = 0;
180
- constructor(pglite, sessionLock) {
181
- super();
182
- this.pglite = pglite;
183
- this.sessionLock = sessionLock ?? null;
184
- this.bridgeId = createBridgeId();
185
- }
186
- // ── Socket compatibility (called by pg's Connection) ──
187
- connect() {
188
- setImmediate(() => this.emit("connect"));
189
- return this;
190
- }
191
- setKeepAlive() {
192
- return this;
193
- }
194
- setNoDelay() {
195
- return this;
196
- }
197
- setTimeout() {
198
- return this;
199
- }
200
- ref() {
201
- return this;
202
- }
203
- unref() {
204
- return this;
205
- }
206
- // ── Duplex implementation ──
207
- _read() {
208
- }
209
- _write(chunk, _encoding, callback) {
210
- this.pending.push(chunk);
211
- this.pendingLen += chunk.length;
212
- this.enqueue(callback);
213
- }
214
- /** Handles corked batches — pg.Client corks during prepared queries (P+B+D+E+S) */
215
- _writev(chunks, callback) {
216
- for (const { chunk } of chunks) {
217
- this.pending.push(chunk);
218
- this.pendingLen += chunk.length;
219
- }
220
- this.enqueue(callback);
221
- }
222
- _final(callback) {
223
- this.sessionLock?.release(this.bridgeId);
224
- this.push(null);
225
- callback();
226
- }
227
- _destroy(error, callback) {
228
- this.tornDown = true;
229
- this.pipeline.length = 0;
230
- this.pipelineLen = 0;
231
- this.pending.length = 0;
232
- this.pendingLen = 0;
233
- this.sessionLock?.release(this.bridgeId);
234
- const callbacks = this.drainQueue;
235
- this.drainQueue = [];
236
- for (const cb of callbacks) {
237
- cb(error);
238
- }
239
- callback(error);
240
- }
241
- // ── Message processing ──
242
- /** Merge pending chunks into buf only when needed for framing */
243
- compact() {
244
- if (this.pending.length === 0) return;
245
- if (this.buf.length === 0 && this.pending.length === 1) {
246
- this.buf = this.pending[0];
247
- } else {
248
- this.buf = Buffer.concat([this.buf, ...this.pending]);
249
- }
250
- this.pending.length = 0;
251
- this.pendingLen = 0;
252
- }
253
- /**
254
- * Enqueue a write callback and start draining if not already running.
255
- * The callback is NOT called until drain has processed the data.
256
- */
257
- enqueue(callback) {
258
- this.drainQueue.push(callback);
259
- if (!this.draining) {
260
- this.drain().catch(() => {
261
- });
262
- }
263
- }
264
- /**
265
- * Process all pending data, looping until no new data arrives.
266
- * Fires all queued callbacks on completion or error.
267
- */
268
- async drain() {
269
- if (this.draining) return;
270
- this.draining = true;
271
- let error = null;
272
- try {
273
- while (this.pending.length > 0 || this.buf.length > 0) {
274
- if (this.tornDown) break;
275
- if (this.phase === "pre_startup") {
276
- await this.processPreStartup();
277
- }
278
- if (this.phase === "ready") {
279
- await this.processMessages();
280
- }
281
- if (this.pending.length === 0) break;
282
- }
283
- } catch (err) {
284
- error = err instanceof Error ? err : new Error(String(err));
285
- this.sessionLock?.release(this.bridgeId);
286
- } finally {
287
- this.draining = false;
288
- const callbacks = this.drainQueue;
289
- this.drainQueue = [];
290
- for (const cb of callbacks) {
291
- cb(error);
292
- }
293
- }
294
- }
295
- /**
296
- * Frames and processes the startup message.
297
- *
298
- * Format: [4 bytes: total length] [4 bytes: protocol version] [key\0value\0 pairs]
299
- * No type byte — length includes itself.
300
- */
301
- async processPreStartup() {
302
- this.compact();
303
- if (this.buf.length < 4) return;
304
- const len = this.buf.readInt32BE(0);
305
- if (this.buf.length < len) return;
306
- const message = this.buf.subarray(0, len);
307
- this.buf = this.buf.subarray(len);
308
- await this.acquireSession();
309
- await this.pglite.runExclusive(async () => {
310
- await this.execAndPush(message);
311
- });
312
- this.phase = "ready";
313
- }
314
- /**
315
- * Frames and processes regular wire protocol messages.
316
- *
317
- * Extended Query Protocol messages (Parse, Bind, Describe, Execute, Close,
318
- * Flush) are buffered in `this.pipeline`. When Sync arrives, the entire
319
- * pipeline is concatenated and sent to PGlite as one atomic
320
- * execProtocolRawStream call within one runExclusive.
321
- *
322
- * SimpleQuery messages are sent directly (they're self-contained).
323
- */
324
- async processMessages() {
325
- this.compact();
326
- while (this.buf.length >= 5) {
327
- const len = 1 + this.buf.readInt32BE(1);
328
- if (len < 5 || this.buf.length < len) break;
329
- const message = this.buf.subarray(0, len);
330
- this.buf = this.buf.subarray(len);
331
- const msgType = message[0] ?? 0;
332
- if (msgType === TERMINATE) {
333
- this.sessionLock?.release(this.bridgeId);
334
- this.push(null);
335
- return;
336
- }
337
- if (EQP_MESSAGES.has(msgType)) {
338
- this.pipeline.push(message);
339
- this.pipelineLen += message.length;
340
- continue;
341
- }
342
- if (msgType === SYNC) {
343
- this.pipeline.push(message);
344
- this.pipelineLen += message.length;
345
- await this.flushPipeline();
346
- continue;
347
- }
348
- await this.acquireSession();
349
- await this.pglite.runExclusive(async () => {
350
- await this.execAndPush(message);
351
- });
352
- }
353
- }
354
- /**
355
- * Sends the accumulated EQP pipeline as one atomic operation.
356
- *
357
- * All buffered messages are concatenated into a single buffer and sent
358
- * as one execProtocolRawStream call. This is both correct (prevents
359
- * portal interleaving) and fast (1 WASM call + 1 async boundary instead
360
- * of 5). Intermediate ReadyForQuery messages are stripped from the
361
- * combined response.
362
- */
363
- async flushPipeline() {
364
- const messages = this.pipeline;
365
- const totalLen = this.pipelineLen;
366
- this.pipeline = [];
367
- this.pipelineLen = 0;
368
- let batch;
369
- if (messages.length === 1) {
370
- batch = messages[0] ?? new Uint8Array(0);
371
- } else {
372
- batch = new Uint8Array(totalLen);
373
- let offset = 0;
374
- for (const msg of messages) {
375
- batch.set(msg, offset);
376
- offset += msg.length;
377
- }
378
- }
379
- await this.acquireSession();
380
- await this.pglite.runExclusive(async () => {
381
- const chunks = [];
382
- await this.pglite.execProtocolRawStream(batch, {
383
- onRawData: (chunk) => chunks.push(chunk)
384
- });
385
- if (this.tornDown || chunks.length === 0) return;
386
- if (chunks.length === 1) {
387
- const raw = chunks[0] ?? new Uint8Array(0);
388
- this.trackSessionStatus(raw);
389
- const cleaned2 = stripIntermediateReadyForQuery(raw);
390
- if (cleaned2.length > 0) this.push(cleaned2);
391
- return;
392
- }
393
- const combined = concat(chunks);
394
- this.trackSessionStatus(combined);
395
- const cleaned = stripIntermediateReadyForQuery(combined);
396
- if (cleaned.length > 0) this.push(cleaned);
397
- });
398
- }
399
- /**
400
- * Sends a message to PGlite and pushes response chunks directly to the
401
- * stream as they arrive. Avoids collecting and concatenating for large
402
- * multi-row responses (e.g., findMany 500 rows = ~503 onRawData chunks).
403
- *
404
- * Must be called inside runExclusive.
405
- */
406
- async execAndPush(message) {
407
- let lastChunk = null;
408
- await this.pglite.execProtocolRawStream(message, {
409
- onRawData: (chunk) => {
410
- if (!this.tornDown && chunk.length > 0) {
411
- this.push(chunk);
412
- lastChunk = chunk;
413
- }
414
- }
415
- });
416
- if (lastChunk) this.trackSessionStatus(lastChunk);
417
- }
418
- // ── Session lock helpers ──
419
- async acquireSession() {
420
- await this.sessionLock?.acquire(this.bridgeId);
421
- }
422
- trackSessionStatus(response) {
423
- if (!this.sessionLock) return;
424
- const status = extractRfqStatus(response);
425
- if (status !== null) {
426
- this.sessionLock.updateStatus(this.bridgeId, status);
427
- }
428
- }
208
+ /**
209
+ * Duplex stream that bridges `pg.Client` to an in-process PGlite instance.
210
+ *
211
+ * Replaces the TCP socket in `pg.Client` via the `stream` option. Speaks
212
+ * PostgreSQL wire protocol directly to PGlite no TCP, no serialization
213
+ * overhead beyond what the wire protocol requires.
214
+ *
215
+ * Pass to `pg.Client` or use via `createPool()` / `createPgliteAdapter()`:
216
+ *
217
+ * ```typescript
218
+ * const client = new pg.Client({
219
+ * stream: () => new PGliteBridge(pglite),
220
+ * });
221
+ * ```
222
+ */
223
+ var PGliteBridge = class extends node_stream.Duplex {
224
+ pglite;
225
+ sessionLock;
226
+ bridgeId;
227
+ /** Incoming bytes not yet compacted into buf */
228
+ pending = [];
229
+ pendingLen = 0;
230
+ /** Compacted input buffer for message framing */
231
+ buf = Buffer.alloc(0);
232
+ phase = "pre_startup";
233
+ draining = false;
234
+ tornDown = false;
235
+ /** Callbacks waiting for drain to process their data */
236
+ drainQueue = [];
237
+ /** Buffered EQP messages awaiting Sync */
238
+ pipeline = [];
239
+ pipelineLen = 0;
240
+ constructor(pglite, sessionLock) {
241
+ super();
242
+ this.pglite = pglite;
243
+ this.sessionLock = sessionLock ?? null;
244
+ this.bridgeId = createBridgeId();
245
+ }
246
+ connect() {
247
+ setImmediate(() => this.emit("connect"));
248
+ return this;
249
+ }
250
+ setKeepAlive() {
251
+ return this;
252
+ }
253
+ setNoDelay() {
254
+ return this;
255
+ }
256
+ setTimeout() {
257
+ return this;
258
+ }
259
+ ref() {
260
+ return this;
261
+ }
262
+ unref() {
263
+ return this;
264
+ }
265
+ _read() {}
266
+ _write(chunk, _encoding, callback) {
267
+ this.pending.push(chunk);
268
+ this.pendingLen += chunk.length;
269
+ this.enqueue(callback);
270
+ }
271
+ /** Handles corked batches — pg.Client corks during prepared queries (P+B+D+E+S) */
272
+ _writev(chunks, callback) {
273
+ for (const { chunk } of chunks) {
274
+ this.pending.push(chunk);
275
+ this.pendingLen += chunk.length;
276
+ }
277
+ this.enqueue(callback);
278
+ }
279
+ _final(callback) {
280
+ this.sessionLock?.release(this.bridgeId);
281
+ this.push(null);
282
+ callback();
283
+ }
284
+ _destroy(error, callback) {
285
+ this.tornDown = true;
286
+ this.pipeline.length = 0;
287
+ this.pipelineLen = 0;
288
+ this.pending.length = 0;
289
+ this.pendingLen = 0;
290
+ this.sessionLock?.release(this.bridgeId);
291
+ const callbacks = this.drainQueue;
292
+ this.drainQueue = [];
293
+ for (const cb of callbacks) cb(error);
294
+ callback(error);
295
+ }
296
+ /** Merge pending chunks into buf only when needed for framing */
297
+ compact() {
298
+ if (this.pending.length === 0) return;
299
+ if (this.buf.length === 0 && this.pending.length === 1) this.buf = this.pending[0];
300
+ else this.buf = Buffer.concat([this.buf, ...this.pending]);
301
+ this.pending.length = 0;
302
+ this.pendingLen = 0;
303
+ }
304
+ /**
305
+ * Enqueue a write callback and start draining if not already running.
306
+ * The callback is NOT called until drain has processed the data.
307
+ */
308
+ enqueue(callback) {
309
+ this.drainQueue.push(callback);
310
+ if (!this.draining) this.drain().catch(() => {});
311
+ }
312
+ /**
313
+ * Process all pending data, looping until no new data arrives.
314
+ * Fires all queued callbacks on completion or error.
315
+ */
316
+ async drain() {
317
+ if (this.draining) return;
318
+ this.draining = true;
319
+ let error = null;
320
+ try {
321
+ while (this.pending.length > 0 || this.buf.length > 0) {
322
+ if (this.tornDown) break;
323
+ if (this.phase === "pre_startup") await this.processPreStartup();
324
+ if (this.phase === "ready") await this.processMessages();
325
+ if (this.pending.length === 0) break;
326
+ }
327
+ } catch (err) {
328
+ error = err instanceof Error ? err : new Error(String(err));
329
+ this.sessionLock?.release(this.bridgeId);
330
+ } finally {
331
+ this.draining = false;
332
+ const callbacks = this.drainQueue;
333
+ this.drainQueue = [];
334
+ for (const cb of callbacks) cb(error);
335
+ }
336
+ }
337
+ /**
338
+ * Frames and processes the startup message.
339
+ *
340
+ * Format: [4 bytes: total length] [4 bytes: protocol version] [key\0value\0 pairs]
341
+ * No type byte length includes itself.
342
+ */
343
+ async processPreStartup() {
344
+ this.compact();
345
+ if (this.buf.length < 4) return;
346
+ const len = this.buf.readInt32BE(0);
347
+ if (this.buf.length < len) return;
348
+ const message = this.buf.subarray(0, len);
349
+ this.buf = this.buf.subarray(len);
350
+ await this.acquireSession();
351
+ await this.pglite.runExclusive(async () => {
352
+ await this.execAndPush(message);
353
+ });
354
+ this.phase = "ready";
355
+ }
356
+ /**
357
+ * Frames and processes regular wire protocol messages.
358
+ *
359
+ * Extended Query Protocol messages (Parse, Bind, Describe, Execute, Close,
360
+ * Flush) are buffered in `this.pipeline`. When Sync arrives, the entire
361
+ * pipeline is concatenated and sent to PGlite as one atomic
362
+ * execProtocolRawStream call within one runExclusive.
363
+ *
364
+ * SimpleQuery messages are sent directly (they're self-contained).
365
+ */
366
+ async processMessages() {
367
+ this.compact();
368
+ while (this.buf.length >= 5) {
369
+ const len = 1 + this.buf.readInt32BE(1);
370
+ if (len < 5 || this.buf.length < len) break;
371
+ const message = this.buf.subarray(0, len);
372
+ this.buf = this.buf.subarray(len);
373
+ const msgType = message[0] ?? 0;
374
+ if (msgType === TERMINATE) {
375
+ this.sessionLock?.release(this.bridgeId);
376
+ this.push(null);
377
+ return;
378
+ }
379
+ if (EQP_MESSAGES.has(msgType)) {
380
+ this.pipeline.push(message);
381
+ this.pipelineLen += message.length;
382
+ continue;
383
+ }
384
+ if (msgType === SYNC) {
385
+ this.pipeline.push(message);
386
+ this.pipelineLen += message.length;
387
+ await this.flushPipeline();
388
+ continue;
389
+ }
390
+ await this.acquireSession();
391
+ await this.pglite.runExclusive(async () => {
392
+ await this.execAndPush(message);
393
+ });
394
+ }
395
+ }
396
+ /**
397
+ * Sends the accumulated EQP pipeline as one atomic operation.
398
+ *
399
+ * All buffered messages are concatenated into a single buffer and sent
400
+ * as one execProtocolRawStream call. This is both correct (prevents
401
+ * portal interleaving) and fast (1 WASM call + 1 async boundary instead
402
+ * of 5). Intermediate ReadyForQuery messages are stripped from the
403
+ * combined response.
404
+ */
405
+ async flushPipeline() {
406
+ const messages = this.pipeline;
407
+ const totalLen = this.pipelineLen;
408
+ this.pipeline = [];
409
+ this.pipelineLen = 0;
410
+ let batch;
411
+ if (messages.length === 1) batch = messages[0] ?? new Uint8Array(0);
412
+ else {
413
+ batch = new Uint8Array(totalLen);
414
+ let offset = 0;
415
+ for (const msg of messages) {
416
+ batch.set(msg, offset);
417
+ offset += msg.length;
418
+ }
419
+ }
420
+ await this.acquireSession();
421
+ await this.pglite.runExclusive(async () => {
422
+ const chunks = [];
423
+ await this.pglite.execProtocolRawStream(batch, { onRawData: (chunk) => chunks.push(chunk) });
424
+ if (this.tornDown || chunks.length === 0) return;
425
+ if (chunks.length === 1) {
426
+ const raw = chunks[0] ?? new Uint8Array(0);
427
+ this.trackSessionStatus(raw);
428
+ const cleaned = stripIntermediateReadyForQuery(raw);
429
+ if (cleaned.length > 0) this.push(cleaned);
430
+ return;
431
+ }
432
+ const combined = concat(chunks);
433
+ this.trackSessionStatus(combined);
434
+ const cleaned = stripIntermediateReadyForQuery(combined);
435
+ if (cleaned.length > 0) this.push(cleaned);
436
+ });
437
+ }
438
+ /**
439
+ * Sends a message to PGlite and pushes response chunks directly to the
440
+ * stream as they arrive. Avoids collecting and concatenating for large
441
+ * multi-row responses (e.g., findMany 500 rows = ~503 onRawData chunks).
442
+ *
443
+ * Must be called inside runExclusive.
444
+ */
445
+ async execAndPush(message) {
446
+ let lastChunk = null;
447
+ await this.pglite.execProtocolRawStream(message, { onRawData: (chunk) => {
448
+ if (!this.tornDown && chunk.length > 0) {
449
+ this.push(chunk);
450
+ lastChunk = chunk;
451
+ }
452
+ } });
453
+ if (lastChunk) this.trackSessionStatus(lastChunk);
454
+ }
455
+ async acquireSession() {
456
+ await this.sessionLock?.acquire(this.bridgeId);
457
+ }
458
+ trackSessionStatus(response) {
459
+ if (!this.sessionLock) return;
460
+ const status = extractRfqStatus(response);
461
+ if (status !== null) this.sessionLock.updateStatus(this.bridgeId, status);
462
+ }
429
463
  };
430
-
431
- // src/create-pool.ts
432
- var import_pglite = require("@electric-sql/pglite");
433
- var import_pg = __toESM(require("pg"), 1);
434
- var { Client, Pool } = import_pg.default;
435
- var createPool = async (options = {}) => {
436
- const { dataDir, extensions, max = 5 } = options;
437
- const ownsInstance = !options.pglite;
438
- const pglite = options.pglite ?? new import_pglite.PGlite(dataDir, extensions ? { extensions } : void 0);
439
- await pglite.waitReady;
440
- const sessionLock = new SessionLock();
441
- const BridgedClient = class extends Client {
442
- constructor(config) {
443
- const cfg = typeof config === "string" ? { connectionString: config } : config ?? {};
444
- super({
445
- ...cfg,
446
- user: "postgres",
447
- database: "postgres",
448
- stream: (() => new PGliteBridge(pglite, sessionLock))
449
- });
450
- }
451
- };
452
- const pool = new Pool({
453
- Client: BridgedClient,
454
- max
455
- });
456
- const close = async () => {
457
- await pool.end();
458
- if (ownsInstance) {
459
- await pglite.close();
460
- }
461
- };
462
- return { pool, pglite, close };
464
+ //#endregion
465
+ //#region src/create-pool.ts
466
+ /**
467
+ * Pool factory — creates a pg.Pool backed by an in-process PGlite instance.
468
+ *
469
+ * Each pool connection gets its own PGliteBridge stream, all sharing the
470
+ * same PGlite WASM instance and SessionLock. The session lock ensures
471
+ * transaction isolation: when one bridge starts a transaction (BEGIN),
472
+ * it gets exclusive PGlite access until COMMIT/ROLLBACK. Non-transactional
473
+ * operations from any bridge serialize through PGlite's runExclusive mutex.
474
+ */
475
+ const { Client, Pool } = pg.default;
476
+ /**
477
+ * Creates a pg.Pool where every connection is an in-process PGlite bridge.
478
+ *
479
+ * ```typescript
480
+ * import { createPool } from 'prisma-pglite-bridge';
481
+ * import { PrismaPg } from '@prisma/adapter-pg';
482
+ * import { PrismaClient } from '@prisma/client';
483
+ *
484
+ * const { pool, close } = await createPool();
485
+ * const adapter = new PrismaPg(pool);
486
+ * const prisma = new PrismaClient({ adapter });
487
+ * ```
488
+ */
489
+ const createPool = async (options = {}) => {
490
+ const { dataDir, extensions, max = 5 } = options;
491
+ const ownsInstance = !options.pglite;
492
+ const pglite = options.pglite ?? new _electric_sql_pglite.PGlite(dataDir, extensions ? { extensions } : void 0);
493
+ await pglite.waitReady;
494
+ const sessionLock = new SessionLock();
495
+ const BridgedClient = class extends Client {
496
+ constructor(config) {
497
+ super({
498
+ ...typeof config === "string" ? { connectionString: config } : config ?? {},
499
+ user: "postgres",
500
+ database: "postgres",
501
+ stream: (() => new PGliteBridge(pglite, sessionLock))
502
+ });
503
+ }
504
+ };
505
+ const pool = new Pool({
506
+ Client: BridgedClient,
507
+ max
508
+ });
509
+ const close = async () => {
510
+ await pool.end();
511
+ if (ownsInstance) await pglite.close();
512
+ };
513
+ return {
514
+ pool,
515
+ pglite,
516
+ close
517
+ };
463
518
  };
464
-
465
- // src/create-pglite-adapter.ts
466
- var import_node_fs = require("fs");
467
- var import_node_path = require("path");
468
- var import_adapter_pg = require("@prisma/adapter-pg");
469
- var SNAPSHOT_SCHEMA = "_pglite_snapshot";
470
- var discoverMigrationsPath = async (configRoot) => {
471
- try {
472
- const { loadConfigFromFile } = await import("@prisma/config");
473
- const { config, error } = await loadConfigFromFile({ configRoot: configRoot ?? process.cwd() });
474
- if (error) return null;
475
- if (config.migrations?.path) return config.migrations.path;
476
- const schemaPath = config.schema;
477
- if (schemaPath) return (0, import_node_path.join)((0, import_node_path.dirname)(schemaPath), "migrations");
478
- return null;
479
- } catch {
480
- return null;
481
- }
519
+ //#endregion
520
+ //#region src/create-pglite-adapter.ts
521
+ /**
522
+ * Creates a Prisma adapter backed by in-process PGlite.
523
+ *
524
+ * No TCP, no Docker, no worker threads — everything runs in the same process.
525
+ * Works for testing, development, seeding, and scripts.
526
+ *
527
+ * ```typescript
528
+ * import { createPgliteAdapter } from 'prisma-pglite-bridge';
529
+ * import { PrismaClient } from '@prisma/client';
530
+ *
531
+ * const { adapter, resetDb } = await createPgliteAdapter();
532
+ * const prisma = new PrismaClient({ adapter });
533
+ *
534
+ * beforeEach(() => resetDb());
535
+ * ```
536
+ */
537
+ const SNAPSHOT_SCHEMA = "_pglite_snapshot";
538
+ /**
539
+ * Discover the migrations directory via Prisma's config API.
540
+ * Uses the same resolution as `prisma migrate dev` — reads prisma.config.ts,
541
+ * resolves paths relative to config file location.
542
+ *
543
+ * Returns null if @prisma/config is not available or config cannot be loaded.
544
+ */
545
+ const discoverMigrationsPath = async (configRoot) => {
546
+ try {
547
+ const { loadConfigFromFile } = await import("@prisma/config");
548
+ const { config, error } = await loadConfigFromFile({ configRoot: configRoot ?? process.cwd() });
549
+ if (error) return null;
550
+ if (config.migrations?.path) return config.migrations.path;
551
+ const schemaPath = config.schema;
552
+ if (schemaPath) return (0, node_path.join)((0, node_path.dirname)(schemaPath), "migrations");
553
+ return null;
554
+ } catch {
555
+ return null;
556
+ }
482
557
  };
483
- var tryReadMigrationFiles = (migrationsPath) => {
484
- if (!(0, import_node_fs.existsSync)(migrationsPath)) return null;
485
- const dirs = (0, import_node_fs.readdirSync)(migrationsPath).filter((d) => (0, import_node_fs.statSync)((0, import_node_path.join)(migrationsPath, d)).isDirectory()).sort();
486
- const sqlParts = [];
487
- for (const dir of dirs) {
488
- const sqlPath = (0, import_node_path.join)(migrationsPath, dir, "migration.sql");
489
- if ((0, import_node_fs.existsSync)(sqlPath)) {
490
- sqlParts.push((0, import_node_fs.readFileSync)(sqlPath, "utf8"));
491
- }
492
- }
493
- return sqlParts.length > 0 ? sqlParts.join("\n") : null;
558
+ /**
559
+ * Read migration SQL files from a migrations directory in directory order.
560
+ * Returns null if the directory doesn't exist or has no migration files.
561
+ */
562
+ const tryReadMigrationFiles = (migrationsPath) => {
563
+ if (!(0, node_fs.existsSync)(migrationsPath)) return null;
564
+ const dirs = (0, node_fs.readdirSync)(migrationsPath).filter((d) => (0, node_fs.statSync)((0, node_path.join)(migrationsPath, d)).isDirectory()).sort();
565
+ const sqlParts = [];
566
+ for (const dir of dirs) {
567
+ const sqlPath = (0, node_path.join)(migrationsPath, dir, "migration.sql");
568
+ if ((0, node_fs.existsSync)(sqlPath)) sqlParts.push((0, node_fs.readFileSync)(sqlPath, "utf8"));
569
+ }
570
+ return sqlParts.length > 0 ? sqlParts.join("\n") : null;
494
571
  };
495
- var resolveSQL = async (options) => {
496
- if (options.sql) return options.sql;
497
- if (options.migrationsPath) {
498
- const sql = tryReadMigrationFiles(options.migrationsPath);
499
- if (sql) return sql;
500
- throw new Error(
501
- `No migration.sql files found in ${options.migrationsPath}. Run \`prisma migrate dev\` to generate migration files.`
502
- );
503
- }
504
- const migrationsPath = await discoverMigrationsPath(options.configRoot);
505
- if (migrationsPath) {
506
- const sql = tryReadMigrationFiles(migrationsPath);
507
- if (sql) return sql;
508
- }
509
- throw new Error(
510
- "No migration files found. Run `prisma migrate dev` to generate them, or pass pre-generated SQL via the `sql` option."
511
- );
572
+ /**
573
+ * Resolve schema SQL. Priority:
574
+ * 1. Explicit `sql` option — use directly
575
+ * 2. Explicit `migrationsPath` — read migration files
576
+ * 3. Auto-discovered migrations (via prisma.config.ts) read migration files
577
+ * 4. Error — tell the user to generate migration files
578
+ */
579
+ const resolveSQL = async (options) => {
580
+ if (options.sql) return options.sql;
581
+ if (options.migrationsPath) {
582
+ const sql = tryReadMigrationFiles(options.migrationsPath);
583
+ if (sql) return sql;
584
+ throw new Error(`No migration.sql files found in ${options.migrationsPath}. Run \`prisma migrate dev\` to generate migration files.`);
585
+ }
586
+ const migrationsPath = await discoverMigrationsPath(options.configRoot);
587
+ if (migrationsPath) {
588
+ const sql = tryReadMigrationFiles(migrationsPath);
589
+ if (sql) return sql;
590
+ }
591
+ throw new Error("No migration files found. Run `prisma migrate dev` to generate them, or pass pre-generated SQL via the `sql` option.");
512
592
  };
513
- var createPgliteAdapter = async (options = {}) => {
514
- const sql = await resolveSQL(options);
515
- const {
516
- pool,
517
- pglite,
518
- close: poolClose
519
- } = await createPool({
520
- dataDir: options.dataDir,
521
- extensions: options.extensions,
522
- max: options.max
523
- });
524
- try {
525
- await pglite.exec(sql);
526
- } catch (err) {
527
- throw new Error("Failed to apply schema SQL to PGlite. Check your schema or migration files.", {
528
- cause: err
529
- });
530
- }
531
- const adapter = new import_adapter_pg.PrismaPg(pool);
532
- let cachedTables = null;
533
- let hasSnapshot = false;
534
- const discoverTables = async () => {
535
- if (cachedTables !== null) return cachedTables;
536
- const { rows } = await pglite.query(
537
- `SELECT quote_ident(schemaname) || '.' || quote_ident(tablename) AS qualified
593
+ /**
594
+ * Creates a Prisma adapter backed by an in-process PGlite instance.
595
+ *
596
+ * Applies the schema and returns a ready-to-use adapter + a `resetDb`
597
+ * function for clearing tables between tests.
598
+ */
599
+ const createPgliteAdapter = async (options = {}) => {
600
+ const sql = await resolveSQL(options);
601
+ const { pool, pglite, close: poolClose } = await createPool({
602
+ dataDir: options.dataDir,
603
+ extensions: options.extensions,
604
+ max: options.max
605
+ });
606
+ try {
607
+ await pglite.exec(sql);
608
+ } catch (err) {
609
+ throw new Error("Failed to apply schema SQL to PGlite. Check your schema or migration files.", { cause: err });
610
+ }
611
+ const adapter = new _prisma_adapter_pg.PrismaPg(pool);
612
+ let cachedTables = null;
613
+ let hasSnapshot = false;
614
+ const discoverTables = async () => {
615
+ if (cachedTables !== null) return cachedTables;
616
+ const { rows } = await pglite.query(`SELECT quote_ident(schemaname) || '.' || quote_ident(tablename) AS qualified
538
617
  FROM pg_tables
539
618
  WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
540
619
  AND schemaname != '${SNAPSHOT_SCHEMA}'
541
- AND tablename NOT LIKE '_prisma%'`
542
- );
543
- cachedTables = rows.length > 0 ? rows.map((r) => r.qualified).join(", ") : "";
544
- return cachedTables;
545
- };
546
- const snapshotDb = async () => {
547
- await pglite.exec(`DROP SCHEMA IF EXISTS "${SNAPSHOT_SCHEMA}" CASCADE`);
548
- await pglite.exec(`CREATE SCHEMA "${SNAPSHOT_SCHEMA}"`);
549
- const { rows: tables } = await pglite.query(
550
- `SELECT quote_ident(tablename) AS tablename FROM pg_tables
620
+ AND tablename NOT LIKE '_prisma%'`);
621
+ cachedTables = rows.length > 0 ? rows.map((r) => r.qualified).join(", ") : "";
622
+ return cachedTables;
623
+ };
624
+ const snapshotDb = async () => {
625
+ await pglite.exec(`DROP SCHEMA IF EXISTS "${SNAPSHOT_SCHEMA}" CASCADE`);
626
+ await pglite.exec(`CREATE SCHEMA "${SNAPSHOT_SCHEMA}"`);
627
+ const { rows: tables } = await pglite.query(`SELECT quote_ident(tablename) AS tablename FROM pg_tables
551
628
  WHERE schemaname = 'public'
552
- AND tablename NOT LIKE '_prisma%'`
553
- );
554
- for (const { tablename } of tables) {
555
- await pglite.exec(
556
- `CREATE TABLE "${SNAPSHOT_SCHEMA}".${tablename} AS SELECT * FROM public.${tablename}`
557
- );
558
- }
559
- const { rows: seqs } = await pglite.query(
560
- `SELECT quote_literal(sequencename) AS name, last_value::text AS value
561
- FROM pg_sequences WHERE schemaname = 'public' AND last_value IS NOT NULL`
562
- );
563
- await pglite.exec(`CREATE TABLE "${SNAPSHOT_SCHEMA}".__sequences (name text, value bigint)`);
564
- for (const { name, value } of seqs) {
565
- await pglite.exec(`INSERT INTO "${SNAPSHOT_SCHEMA}".__sequences VALUES (${name}, ${value})`);
566
- }
567
- hasSnapshot = true;
568
- };
569
- const resetSnapshot = async () => {
570
- hasSnapshot = false;
571
- await pglite.exec(`DROP SCHEMA IF EXISTS "${SNAPSHOT_SCHEMA}" CASCADE`);
572
- };
573
- const resetDb = async () => {
574
- const tables = await discoverTables();
575
- if (hasSnapshot && tables) {
576
- try {
577
- await pglite.exec("SET session_replication_role = replica");
578
- await pglite.exec(`TRUNCATE TABLE ${tables} CASCADE`);
579
- const { rows: snapshotTables } = await pglite.query(
580
- `SELECT quote_ident(tablename) AS tablename FROM pg_tables
629
+ AND tablename NOT LIKE '_prisma%'`);
630
+ for (const { tablename } of tables) await pglite.exec(`CREATE TABLE "${SNAPSHOT_SCHEMA}".${tablename} AS SELECT * FROM public.${tablename}`);
631
+ const { rows: seqs } = await pglite.query(`SELECT quote_literal(sequencename) AS name, last_value::text AS value
632
+ FROM pg_sequences WHERE schemaname = 'public' AND last_value IS NOT NULL`);
633
+ await pglite.exec(`CREATE TABLE "${SNAPSHOT_SCHEMA}".__sequences (name text, value bigint)`);
634
+ for (const { name, value } of seqs) await pglite.exec(`INSERT INTO "${SNAPSHOT_SCHEMA}".__sequences VALUES (${name}, ${value})`);
635
+ hasSnapshot = true;
636
+ };
637
+ const resetSnapshot = async () => {
638
+ hasSnapshot = false;
639
+ await pglite.exec(`DROP SCHEMA IF EXISTS "${SNAPSHOT_SCHEMA}" CASCADE`);
640
+ };
641
+ const resetDb = async () => {
642
+ const tables = await discoverTables();
643
+ if (hasSnapshot && tables) {
644
+ try {
645
+ await pglite.exec("SET session_replication_role = replica");
646
+ await pglite.exec(`TRUNCATE TABLE ${tables} CASCADE`);
647
+ const { rows: snapshotTables } = await pglite.query(`SELECT quote_ident(tablename) AS tablename FROM pg_tables
581
648
  WHERE schemaname = '${SNAPSHOT_SCHEMA}'
582
- AND tablename != '__sequences'`
583
- );
584
- for (const { tablename } of snapshotTables) {
585
- await pglite.exec(
586
- `INSERT INTO public.${tablename} SELECT * FROM "${SNAPSHOT_SCHEMA}".${tablename}`
587
- );
588
- }
589
- } finally {
590
- await pglite.exec("SET session_replication_role = DEFAULT");
591
- }
592
- const { rows: seqs } = await pglite.query(
593
- `SELECT quote_literal(name) AS name, value::text AS value FROM "${SNAPSHOT_SCHEMA}".__sequences`
594
- );
595
- for (const { name, value } of seqs) {
596
- await pglite.exec(`SELECT setval(${name}, ${value})`);
597
- }
598
- } else if (tables) {
599
- try {
600
- await pglite.exec("SET session_replication_role = replica");
601
- await pglite.exec(`TRUNCATE TABLE ${tables} CASCADE`);
602
- } finally {
603
- await pglite.exec("SET session_replication_role = DEFAULT");
604
- }
605
- }
606
- await pglite.exec("RESET ALL");
607
- await pglite.exec("DEALLOCATE ALL");
608
- };
609
- return { adapter, pglite, resetDb, snapshotDb, resetSnapshot, close: poolClose };
649
+ AND tablename != '__sequences'`);
650
+ for (const { tablename } of snapshotTables) await pglite.exec(`INSERT INTO public.${tablename} SELECT * FROM "${SNAPSHOT_SCHEMA}".${tablename}`);
651
+ } finally {
652
+ await pglite.exec("SET session_replication_role = DEFAULT");
653
+ }
654
+ const { rows: seqs } = await pglite.query(`SELECT quote_literal(name) AS name, value::text AS value FROM "${SNAPSHOT_SCHEMA}".__sequences`);
655
+ for (const { name, value } of seqs) await pglite.exec(`SELECT setval(${name}, ${value})`);
656
+ } else if (tables) try {
657
+ await pglite.exec("SET session_replication_role = replica");
658
+ await pglite.exec(`TRUNCATE TABLE ${tables} CASCADE`);
659
+ } finally {
660
+ await pglite.exec("SET session_replication_role = DEFAULT");
661
+ }
662
+ await pglite.exec("RESET ALL");
663
+ await pglite.exec("DEALLOCATE ALL");
664
+ };
665
+ return {
666
+ adapter,
667
+ pglite,
668
+ resetDb,
669
+ snapshotDb,
670
+ resetSnapshot,
671
+ close: poolClose
672
+ };
610
673
  };
611
- // Annotate the CommonJS export names for ESM import in node:
612
- 0 && (module.exports = {
613
- PGliteBridge,
614
- createPgliteAdapter,
615
- createPool
616
- });
674
+ //#endregion
675
+ exports.PGliteBridge = PGliteBridge;
676
+ exports.createPgliteAdapter = createPgliteAdapter;
677
+ exports.createPool = createPool;
678
+
617
679
  //# sourceMappingURL=index.cjs.map