postgres-memory-server 0.1.0 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +104 -79
- package/dist/cli.js +687 -65
- package/dist/cli.js.map +1 -1
- package/dist/index.d.ts +65 -19
- package/dist/index.js +682 -59
- package/dist/index.js.map +1 -1
- package/package.json +5 -4
package/dist/index.js
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
// src/PostgresMemoryServer.ts
|
|
2
|
-
import { promises as
|
|
3
|
-
import
|
|
4
|
-
import
|
|
2
|
+
import { promises as fs2, rmSync } from "fs";
|
|
3
|
+
import os2 from "os";
|
|
4
|
+
import path2 from "path";
|
|
5
|
+
import process2 from "process";
|
|
6
|
+
import EmbeddedPostgres from "embedded-postgres";
|
|
5
7
|
import { Client } from "pg";
|
|
6
8
|
|
|
7
9
|
// src/errors.ts
|
|
@@ -23,20 +25,439 @@ var ServerStoppedError = class extends PostgresMemoryServerError {
|
|
|
23
25
|
super("The PostgresMemoryServer has already been stopped.");
|
|
24
26
|
}
|
|
25
27
|
};
|
|
28
|
+
var ExtensionInstallError = class extends PostgresMemoryServerError {
|
|
29
|
+
constructor(extensionName, cause) {
|
|
30
|
+
super(
|
|
31
|
+
`Failed to install the "${extensionName}" extension. ${cause?.message ?? ""}`.trim(),
|
|
32
|
+
cause ? { cause } : void 0
|
|
33
|
+
);
|
|
34
|
+
}
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
// src/native.ts
|
|
38
|
+
import { promises as fs, readFileSync, existsSync } from "fs";
|
|
39
|
+
import { createRequire } from "module";
|
|
40
|
+
import { execFile as execFileCb } from "child_process";
|
|
41
|
+
import { promisify } from "util";
|
|
42
|
+
import net from "net";
|
|
43
|
+
import os from "os";
|
|
44
|
+
import path from "path";
|
|
45
|
+
var execFile = promisify(execFileCb);
|
|
46
|
+
async function getFreePort() {
|
|
47
|
+
return new Promise((resolve, reject) => {
|
|
48
|
+
const server = net.createServer();
|
|
49
|
+
server.listen(0, () => {
|
|
50
|
+
const { port } = server.address();
|
|
51
|
+
server.close(() => resolve(port));
|
|
52
|
+
});
|
|
53
|
+
server.on("error", reject);
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
function getPgMajorVersion() {
|
|
57
|
+
const req = createRequire(import.meta.url);
|
|
58
|
+
const mainEntry = req.resolve("embedded-postgres");
|
|
59
|
+
let dir = path.dirname(mainEntry);
|
|
60
|
+
while (dir !== path.dirname(dir)) {
|
|
61
|
+
const candidate = path.join(dir, "package.json");
|
|
62
|
+
try {
|
|
63
|
+
const content = readFileSync(candidate, "utf8");
|
|
64
|
+
const pkg = JSON.parse(content);
|
|
65
|
+
if (pkg.name === "embedded-postgres" && pkg.version) {
|
|
66
|
+
const major = pkg.version.split(".")[0];
|
|
67
|
+
if (major) return major;
|
|
68
|
+
}
|
|
69
|
+
} catch {
|
|
70
|
+
}
|
|
71
|
+
dir = path.dirname(dir);
|
|
72
|
+
}
|
|
73
|
+
throw new Error(
|
|
74
|
+
"Could not determine embedded-postgres version. Ensure embedded-postgres is installed."
|
|
75
|
+
);
|
|
76
|
+
}
|
|
77
|
+
function getNativeDir() {
|
|
78
|
+
const platform = os.platform();
|
|
79
|
+
const arch = os.arch();
|
|
80
|
+
const platformPkgNames = {
|
|
81
|
+
darwin: {
|
|
82
|
+
arm64: "@embedded-postgres/darwin-arm64",
|
|
83
|
+
x64: "@embedded-postgres/darwin-x64"
|
|
84
|
+
},
|
|
85
|
+
linux: {
|
|
86
|
+
x64: "@embedded-postgres/linux-x64",
|
|
87
|
+
arm64: "@embedded-postgres/linux-arm64"
|
|
88
|
+
},
|
|
89
|
+
win32: {
|
|
90
|
+
x64: "@embedded-postgres/windows-x64"
|
|
91
|
+
}
|
|
92
|
+
};
|
|
93
|
+
const pkgName = platformPkgNames[platform]?.[arch];
|
|
94
|
+
if (!pkgName) {
|
|
95
|
+
throw new Error(`Unsupported platform: ${platform}-${arch}`);
|
|
96
|
+
}
|
|
97
|
+
const req = createRequire(import.meta.url);
|
|
98
|
+
const mainEntry = req.resolve(pkgName);
|
|
99
|
+
let dir = path.dirname(mainEntry);
|
|
100
|
+
while (dir !== path.dirname(dir)) {
|
|
101
|
+
const nativeDir = path.join(dir, "native");
|
|
102
|
+
if (existsSync(nativeDir)) {
|
|
103
|
+
return nativeDir;
|
|
104
|
+
}
|
|
105
|
+
dir = path.dirname(dir);
|
|
106
|
+
}
|
|
107
|
+
throw new Error(
|
|
108
|
+
`Could not find native directory for ${pkgName}. Ensure embedded-postgres is installed correctly.`
|
|
109
|
+
);
|
|
110
|
+
}
|
|
111
|
+
function getCacheDir() {
|
|
112
|
+
const xdgCache = process.env.XDG_CACHE_HOME;
|
|
113
|
+
const base = xdgCache || path.join(os.homedir(), ".cache");
|
|
114
|
+
return path.join(base, "postgres-memory-server");
|
|
115
|
+
}
|
|
116
|
+
async function installParadeDBExtension(nativeDir, paradedbVersion, pgMajorVersion) {
|
|
117
|
+
const libDir = path.join(nativeDir, "lib", "postgresql");
|
|
118
|
+
const extDir = path.join(nativeDir, "share", "postgresql", "extension");
|
|
119
|
+
const soName = os.platform() === "darwin" && parseInt(pgMajorVersion, 10) >= 16 ? "pg_search.dylib" : "pg_search.so";
|
|
120
|
+
try {
|
|
121
|
+
await fs.access(path.join(libDir, soName));
|
|
122
|
+
await fs.access(path.join(extDir, "pg_search.control"));
|
|
123
|
+
return;
|
|
124
|
+
} catch {
|
|
125
|
+
}
|
|
126
|
+
const cacheDir = getCacheDir();
|
|
127
|
+
const platform = os.platform();
|
|
128
|
+
const arch = os.arch();
|
|
129
|
+
const cacheKey = `paradedb-${paradedbVersion}-pg${pgMajorVersion}-${platform}-${arch}`;
|
|
130
|
+
const cachedDir = path.join(cacheDir, cacheKey);
|
|
131
|
+
let cached = false;
|
|
132
|
+
try {
|
|
133
|
+
await fs.access(path.join(cachedDir, "lib", soName));
|
|
134
|
+
cached = true;
|
|
135
|
+
} catch {
|
|
136
|
+
}
|
|
137
|
+
if (!cached) {
|
|
138
|
+
const url = buildDownloadUrl(
|
|
139
|
+
paradedbVersion,
|
|
140
|
+
pgMajorVersion,
|
|
141
|
+
platform,
|
|
142
|
+
arch
|
|
143
|
+
);
|
|
144
|
+
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "paradedb-"));
|
|
145
|
+
try {
|
|
146
|
+
const filename = decodeURIComponent(url.split("/").pop());
|
|
147
|
+
const archivePath = path.join(tmpDir, filename);
|
|
148
|
+
await downloadFile(url, archivePath);
|
|
149
|
+
const extractedDir = path.join(tmpDir, "extracted");
|
|
150
|
+
await fs.mkdir(extractedDir, { recursive: true });
|
|
151
|
+
if (platform === "darwin") {
|
|
152
|
+
await extractPkg(archivePath, extractedDir);
|
|
153
|
+
} else {
|
|
154
|
+
await extractDeb(archivePath, extractedDir);
|
|
155
|
+
}
|
|
156
|
+
const cacheLibDir2 = path.join(cachedDir, "lib");
|
|
157
|
+
const cacheExtDir2 = path.join(cachedDir, "extension");
|
|
158
|
+
await fs.mkdir(cacheLibDir2, { recursive: true });
|
|
159
|
+
await fs.mkdir(cacheExtDir2, { recursive: true });
|
|
160
|
+
const soFiles = await findFiles(
|
|
161
|
+
extractedDir,
|
|
162
|
+
/pg_search\.(so|dylib)$/
|
|
163
|
+
);
|
|
164
|
+
for (const soFile of soFiles) {
|
|
165
|
+
await copyFileWithPermissions(soFile, path.join(cacheLibDir2, path.basename(soFile)));
|
|
166
|
+
}
|
|
167
|
+
const extFiles = await findFiles(
|
|
168
|
+
extractedDir,
|
|
169
|
+
/pg_search[^/]*(\.control|\.sql)$/
|
|
170
|
+
);
|
|
171
|
+
for (const extFile of extFiles) {
|
|
172
|
+
await copyFileWithPermissions(
|
|
173
|
+
extFile,
|
|
174
|
+
path.join(cacheExtDir2, path.basename(extFile))
|
|
175
|
+
);
|
|
176
|
+
}
|
|
177
|
+
} finally {
|
|
178
|
+
await fs.rm(tmpDir, { recursive: true, force: true });
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
await fs.mkdir(libDir, { recursive: true });
|
|
182
|
+
await fs.mkdir(extDir, { recursive: true });
|
|
183
|
+
const cacheLibDir = path.join(cachedDir, "lib");
|
|
184
|
+
const cacheExtDir = path.join(cachedDir, "extension");
|
|
185
|
+
for (const file of await fs.readdir(cacheLibDir)) {
|
|
186
|
+
await copyFileWithPermissions(path.join(cacheLibDir, file), path.join(libDir, file));
|
|
187
|
+
}
|
|
188
|
+
for (const file of await fs.readdir(cacheExtDir)) {
|
|
189
|
+
await copyFileWithPermissions(path.join(cacheExtDir, file), path.join(extDir, file));
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
function buildDownloadUrl(version, pgMajorVersion, platform, arch) {
|
|
193
|
+
const base = `https://github.com/paradedb/paradedb/releases/download/v${version}`;
|
|
194
|
+
if (platform === "darwin") {
|
|
195
|
+
if (arch !== "arm64") {
|
|
196
|
+
throw new Error(
|
|
197
|
+
"ParadeDB only provides macOS binaries for arm64 (Apple Silicon). Intel Macs are not supported."
|
|
198
|
+
);
|
|
199
|
+
}
|
|
200
|
+
const macosName = getMacOSCodename();
|
|
201
|
+
return `${base}/pg_search%40${pgMajorVersion}--${version}.arm64_${macosName}.pkg`;
|
|
202
|
+
}
|
|
203
|
+
if (platform === "linux") {
|
|
204
|
+
const debArch = arch === "arm64" ? "arm64" : "amd64";
|
|
205
|
+
return `${base}/postgresql-${pgMajorVersion}-pg-search_${version}-1PARADEDB-bookworm_${debArch}.deb`;
|
|
206
|
+
}
|
|
207
|
+
throw new Error(
|
|
208
|
+
`ParadeDB does not provide prebuilt binaries for ${platform}. Use the Docker-based preset instead.`
|
|
209
|
+
);
|
|
210
|
+
}
|
|
211
|
+
function getMacOSCodename() {
|
|
212
|
+
const release = os.release();
|
|
213
|
+
const majorVersion = parseInt(release.split(".")[0] ?? "0", 10);
|
|
214
|
+
if (majorVersion >= 24) return "sequoia";
|
|
215
|
+
if (majorVersion >= 23) return "sonoma";
|
|
216
|
+
throw new Error(
|
|
217
|
+
`ParadeDB requires macOS 14 (Sonoma) or later. Detected Darwin ${release}.`
|
|
218
|
+
);
|
|
219
|
+
}
|
|
220
|
+
async function downloadFile(url, destPath) {
|
|
221
|
+
const response = await fetch(url, { redirect: "follow" });
|
|
222
|
+
if (!response.ok) {
|
|
223
|
+
throw new Error(
|
|
224
|
+
`Failed to download ParadeDB extension from ${url}: ${response.status} ${response.statusText}`
|
|
225
|
+
);
|
|
226
|
+
}
|
|
227
|
+
const buffer = Buffer.from(await response.arrayBuffer());
|
|
228
|
+
await fs.writeFile(destPath, buffer);
|
|
229
|
+
}
|
|
230
|
+
async function extractDeb(debPath, extractDir) {
|
|
231
|
+
await execFile("ar", ["x", debPath], { cwd: extractDir });
|
|
232
|
+
const files = await fs.readdir(extractDir);
|
|
233
|
+
const dataTar = files.find((f) => f.startsWith("data.tar"));
|
|
234
|
+
if (!dataTar) {
|
|
235
|
+
throw new Error(
|
|
236
|
+
"No data.tar.* found in .deb archive. The ParadeDB package format may have changed."
|
|
237
|
+
);
|
|
238
|
+
}
|
|
239
|
+
const dataDir = path.join(extractDir, "data");
|
|
240
|
+
await fs.mkdir(dataDir, { recursive: true });
|
|
241
|
+
await execFile("tar", [
|
|
242
|
+
"xf",
|
|
243
|
+
path.join(extractDir, dataTar),
|
|
244
|
+
"-C",
|
|
245
|
+
dataDir
|
|
246
|
+
]);
|
|
247
|
+
}
|
|
248
|
+
async function extractPkg(pkgPath, extractDir) {
|
|
249
|
+
const pkgDir = path.join(extractDir, "pkg");
|
|
250
|
+
await execFile("pkgutil", ["--expand-full", pkgPath, pkgDir]);
|
|
251
|
+
}
|
|
252
|
+
async function copyFileWithPermissions(src, dest) {
|
|
253
|
+
const content = await fs.readFile(src);
|
|
254
|
+
await fs.writeFile(dest, content, { mode: 493 });
|
|
255
|
+
}
|
|
256
|
+
async function findFiles(dir, pattern) {
|
|
257
|
+
const results = [];
|
|
258
|
+
async function walk(currentDir) {
|
|
259
|
+
const entries = await fs.readdir(currentDir, { withFileTypes: true });
|
|
260
|
+
for (const entry of entries) {
|
|
261
|
+
const fullPath = path.join(currentDir, entry.name);
|
|
262
|
+
if (entry.isDirectory()) {
|
|
263
|
+
await walk(fullPath);
|
|
264
|
+
} else if (pattern.test(entry.name)) {
|
|
265
|
+
results.push(fullPath);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
await walk(dir);
|
|
270
|
+
return results;
|
|
271
|
+
}
|
|
272
|
+
async function installPgVectorExtension(nativeDir, pgMajorVersion) {
|
|
273
|
+
const libDir = path.join(nativeDir, "lib", "postgresql");
|
|
274
|
+
const extDir = path.join(nativeDir, "share", "postgresql", "extension");
|
|
275
|
+
const soName = os.platform() === "darwin" ? "vector.dylib" : "vector.so";
|
|
276
|
+
try {
|
|
277
|
+
await fs.access(path.join(libDir, soName));
|
|
278
|
+
await fs.access(path.join(extDir, "vector.control"));
|
|
279
|
+
return;
|
|
280
|
+
} catch {
|
|
281
|
+
}
|
|
282
|
+
const platform = os.platform();
|
|
283
|
+
const arch = os.arch();
|
|
284
|
+
const cacheDir = getCacheDir();
|
|
285
|
+
const cacheKey = `pgvector-pg${pgMajorVersion}-${platform}-${arch}`;
|
|
286
|
+
const cachedDir = path.join(cacheDir, cacheKey);
|
|
287
|
+
let cached = false;
|
|
288
|
+
try {
|
|
289
|
+
await fs.access(path.join(cachedDir, "lib", soName));
|
|
290
|
+
cached = true;
|
|
291
|
+
} catch {
|
|
292
|
+
}
|
|
293
|
+
if (!cached) {
|
|
294
|
+
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "pgvector-"));
|
|
295
|
+
try {
|
|
296
|
+
const formulaRes = await fetch(
|
|
297
|
+
"https://formulae.brew.sh/api/formula/pgvector.json"
|
|
298
|
+
);
|
|
299
|
+
if (!formulaRes.ok) {
|
|
300
|
+
throw new Error(
|
|
301
|
+
`Failed to fetch pgvector formula: ${formulaRes.status}`
|
|
302
|
+
);
|
|
303
|
+
}
|
|
304
|
+
const formula = await formulaRes.json();
|
|
305
|
+
const bottleTag = getHomebrewBottleTag(platform, arch);
|
|
306
|
+
const fileInfo = formula.bottle.stable.files[bottleTag];
|
|
307
|
+
if (!fileInfo) {
|
|
308
|
+
throw new Error(
|
|
309
|
+
`No pgvector Homebrew bottle for ${bottleTag}. Available: ${Object.keys(formula.bottle.stable.files).join(", ")}`
|
|
310
|
+
);
|
|
311
|
+
}
|
|
312
|
+
const tokenRes = await fetch(
|
|
313
|
+
"https://ghcr.io/token?scope=repository:homebrew/core/pgvector:pull"
|
|
314
|
+
);
|
|
315
|
+
if (!tokenRes.ok) {
|
|
316
|
+
throw new Error(`Failed to get GHCR token: ${tokenRes.status}`);
|
|
317
|
+
}
|
|
318
|
+
const { token } = await tokenRes.json();
|
|
319
|
+
const blobUrl = `https://ghcr.io/v2/homebrew/core/pgvector/blobs/sha256:${fileInfo.sha256}`;
|
|
320
|
+
const blobRes = await fetch(blobUrl, {
|
|
321
|
+
headers: { Authorization: `Bearer ${token}` },
|
|
322
|
+
redirect: "follow"
|
|
323
|
+
});
|
|
324
|
+
if (!blobRes.ok) {
|
|
325
|
+
throw new Error(
|
|
326
|
+
`Failed to download pgvector bottle: ${blobRes.status}`
|
|
327
|
+
);
|
|
328
|
+
}
|
|
329
|
+
const bottlePath = path.join(tmpDir, "pgvector.tar.gz");
|
|
330
|
+
const buffer = Buffer.from(await blobRes.arrayBuffer());
|
|
331
|
+
await fs.writeFile(bottlePath, buffer);
|
|
332
|
+
const extractDir = path.join(tmpDir, "extracted");
|
|
333
|
+
await fs.mkdir(extractDir, { recursive: true });
|
|
334
|
+
await execFile("tar", ["xzf", bottlePath, "-C", extractDir]);
|
|
335
|
+
const cacheLibDir2 = path.join(cachedDir, "lib");
|
|
336
|
+
const cacheExtDir2 = path.join(cachedDir, "extension");
|
|
337
|
+
await fs.mkdir(cacheLibDir2, { recursive: true });
|
|
338
|
+
await fs.mkdir(cacheExtDir2, { recursive: true });
|
|
339
|
+
const pgSubdir = `postgresql@${pgMajorVersion}`;
|
|
340
|
+
let soFiles = await findFiles(
|
|
341
|
+
extractDir,
|
|
342
|
+
new RegExp(`${pgSubdir}.*vector\\.(so|dylib)$`)
|
|
343
|
+
);
|
|
344
|
+
if (soFiles.length === 0) {
|
|
345
|
+
soFiles = await findFiles(extractDir, /vector\.(so|dylib)$/);
|
|
346
|
+
}
|
|
347
|
+
for (const f of soFiles) {
|
|
348
|
+
await copyFileWithPermissions(f, path.join(cacheLibDir2, path.basename(f)));
|
|
349
|
+
}
|
|
350
|
+
let extFiles = await findFiles(
|
|
351
|
+
extractDir,
|
|
352
|
+
new RegExp(`${pgSubdir}.*vector[^/]*(\\.control|\\.sql)$`)
|
|
353
|
+
);
|
|
354
|
+
if (extFiles.length === 0) {
|
|
355
|
+
extFiles = await findFiles(extractDir, /vector[^/]*(\.control|\.sql)$/);
|
|
356
|
+
}
|
|
357
|
+
for (const f of extFiles) {
|
|
358
|
+
await copyFileWithPermissions(f, path.join(cacheExtDir2, path.basename(f)));
|
|
359
|
+
}
|
|
360
|
+
} finally {
|
|
361
|
+
await fs.rm(tmpDir, { recursive: true, force: true });
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
await fs.mkdir(libDir, { recursive: true });
|
|
365
|
+
await fs.mkdir(extDir, { recursive: true });
|
|
366
|
+
const cacheLibDir = path.join(cachedDir, "lib");
|
|
367
|
+
const cacheExtDir = path.join(cachedDir, "extension");
|
|
368
|
+
for (const file of await fs.readdir(cacheLibDir)) {
|
|
369
|
+
await copyFileWithPermissions(path.join(cacheLibDir, file), path.join(libDir, file));
|
|
370
|
+
}
|
|
371
|
+
for (const file of await fs.readdir(cacheExtDir)) {
|
|
372
|
+
await copyFileWithPermissions(path.join(cacheExtDir, file), path.join(extDir, file));
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
function getHomebrewBottleTag(platform, arch) {
|
|
376
|
+
if (platform === "darwin") {
|
|
377
|
+
const release = os.release();
|
|
378
|
+
const major = parseInt(release.split(".")[0] ?? "0", 10);
|
|
379
|
+
const prefix = arch === "arm64" ? "arm64_" : "";
|
|
380
|
+
if (major >= 25) return `${prefix}tahoe`;
|
|
381
|
+
if (major >= 24) return `${prefix}sequoia`;
|
|
382
|
+
if (major >= 23) return `${prefix}sonoma`;
|
|
383
|
+
return `${prefix}ventura`;
|
|
384
|
+
}
|
|
385
|
+
if (platform === "linux") {
|
|
386
|
+
return arch === "arm64" ? "aarch64_linux" : "x86_64_linux";
|
|
387
|
+
}
|
|
388
|
+
throw new Error(`No Homebrew bottles available for ${platform}-${arch}`);
|
|
389
|
+
}
|
|
390
|
+
var ORPHAN_MIN_AGE_MS = 6e4;
|
|
391
|
+
async function sweepOrphanedDataDirs(minAgeMs = ORPHAN_MIN_AGE_MS) {
|
|
392
|
+
const tmpDir = os.tmpdir();
|
|
393
|
+
let entries;
|
|
394
|
+
try {
|
|
395
|
+
entries = await fs.readdir(tmpDir);
|
|
396
|
+
} catch {
|
|
397
|
+
return;
|
|
398
|
+
}
|
|
399
|
+
const cutoff = Date.now() - minAgeMs;
|
|
400
|
+
await Promise.all(
|
|
401
|
+
entries.filter((name) => name.startsWith("postgres-memory-server-")).map(async (name) => {
|
|
402
|
+
const fullPath = path.join(tmpDir, name);
|
|
403
|
+
let stat;
|
|
404
|
+
try {
|
|
405
|
+
stat = await fs.stat(fullPath);
|
|
406
|
+
if (!stat.isDirectory()) return;
|
|
407
|
+
} catch {
|
|
408
|
+
return;
|
|
409
|
+
}
|
|
410
|
+
const pidFile = path.join(fullPath, "postmaster.pid");
|
|
411
|
+
let pid = null;
|
|
412
|
+
let pidFileExists = false;
|
|
413
|
+
try {
|
|
414
|
+
const content = await fs.readFile(pidFile, "utf8");
|
|
415
|
+
pidFileExists = true;
|
|
416
|
+
const firstLine = content.split("\n")[0]?.trim();
|
|
417
|
+
const parsed = firstLine ? parseInt(firstLine, 10) : NaN;
|
|
418
|
+
if (!Number.isNaN(parsed) && parsed > 0) {
|
|
419
|
+
pid = parsed;
|
|
420
|
+
}
|
|
421
|
+
} catch {
|
|
422
|
+
}
|
|
423
|
+
if (pid !== null) {
|
|
424
|
+
try {
|
|
425
|
+
process.kill(pid, 0);
|
|
426
|
+
return;
|
|
427
|
+
} catch (err) {
|
|
428
|
+
const code = err.code;
|
|
429
|
+
if (code === "EPERM") {
|
|
430
|
+
return;
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
if (!pidFileExists && stat.mtimeMs > cutoff) {
|
|
435
|
+
return;
|
|
436
|
+
}
|
|
437
|
+
await fs.rm(fullPath, { recursive: true, force: true }).catch(() => {
|
|
438
|
+
});
|
|
439
|
+
})
|
|
440
|
+
);
|
|
441
|
+
}
|
|
442
|
+
function parseParadeDBVersion(version) {
|
|
443
|
+
const match = version.match(/^(\d+\.\d+\.\d+)(?:-pg(\d+))?$/);
|
|
444
|
+
if (!match || !match[1]) {
|
|
445
|
+
return { extVersion: version };
|
|
446
|
+
}
|
|
447
|
+
return {
|
|
448
|
+
extVersion: match[1],
|
|
449
|
+
pgVersion: match[2]
|
|
450
|
+
};
|
|
451
|
+
}
|
|
26
452
|
|
|
27
453
|
// src/presets.ts
|
|
454
|
+
var DEFAULT_PARADEDB_EXT_VERSION = "0.22.5";
|
|
455
|
+
var DEFAULT_POSTGRES_VERSION = getPgMajorVersion();
|
|
456
|
+
var DEFAULT_PARADEDB_VERSION = `${DEFAULT_PARADEDB_EXT_VERSION}-pg${DEFAULT_POSTGRES_VERSION}`;
|
|
457
|
+
var DEFAULT_POSTGRES_IMAGE = `postgres:${DEFAULT_POSTGRES_VERSION}`;
|
|
458
|
+
var DEFAULT_PARADEDB_IMAGE = `paradedb:${DEFAULT_PARADEDB_VERSION}`;
|
|
28
459
|
var POSTGRES_IMAGE_REPOSITORY = "postgres";
|
|
29
|
-
var PARADEDB_IMAGE_REPOSITORY = "paradedb
|
|
30
|
-
var DEFAULT_POSTGRES_VERSION = "17";
|
|
31
|
-
var DEFAULT_PARADEDB_VERSION = "0.22.3-pg17";
|
|
32
|
-
var DEFAULT_POSTGRES_IMAGE = getImageForVersion(
|
|
33
|
-
"postgres",
|
|
34
|
-
DEFAULT_POSTGRES_VERSION
|
|
35
|
-
);
|
|
36
|
-
var DEFAULT_PARADEDB_IMAGE = getImageForVersion(
|
|
37
|
-
"paradedb",
|
|
38
|
-
DEFAULT_PARADEDB_VERSION
|
|
39
|
-
);
|
|
460
|
+
var PARADEDB_IMAGE_REPOSITORY = "paradedb";
|
|
40
461
|
var DEFAULT_DATABASE = "testdb";
|
|
41
462
|
var DEFAULT_USERNAME = "testuser";
|
|
42
463
|
var DEFAULT_PASSWORD = "testpassword";
|
|
@@ -44,7 +465,7 @@ var PARADEDB_DEFAULT_EXTENSIONS = ["pg_search", "vector"];
|
|
|
44
465
|
function normalizeOptions(options = {}) {
|
|
45
466
|
const preset = options.preset ?? "postgres";
|
|
46
467
|
const version = options.version;
|
|
47
|
-
const image =
|
|
468
|
+
const image = getImageLabel(preset, version);
|
|
48
469
|
const database = options.database ?? DEFAULT_DATABASE;
|
|
49
470
|
const username = options.username ?? DEFAULT_USERNAME;
|
|
50
471
|
const password = options.password ?? DEFAULT_PASSWORD;
|
|
@@ -68,8 +489,11 @@ function getImageForVersion(preset, version) {
|
|
|
68
489
|
function getDefaultImage(preset) {
|
|
69
490
|
return preset === "paradedb" ? DEFAULT_PARADEDB_IMAGE : DEFAULT_POSTGRES_IMAGE;
|
|
70
491
|
}
|
|
71
|
-
function
|
|
72
|
-
|
|
492
|
+
function getImageLabel(preset, version) {
|
|
493
|
+
if (version) {
|
|
494
|
+
return getImageForVersion(preset, version);
|
|
495
|
+
}
|
|
496
|
+
return getDefaultImage(preset);
|
|
73
497
|
}
|
|
74
498
|
function getDefaultExtensions(preset) {
|
|
75
499
|
return preset === "paradedb" ? [...PARADEDB_DEFAULT_EXTENSIONS] : [];
|
|
@@ -80,6 +504,21 @@ function buildInitStatements(options) {
|
|
|
80
504
|
);
|
|
81
505
|
return [...extensionStatements, ...options.initSql];
|
|
82
506
|
}
|
|
507
|
+
function resolveParadeDBVersion(version) {
|
|
508
|
+
if (!version) {
|
|
509
|
+
return DEFAULT_PARADEDB_EXT_VERSION;
|
|
510
|
+
}
|
|
511
|
+
const parsed = parseParadeDBVersion(version);
|
|
512
|
+
if (parsed.pgVersion) {
|
|
513
|
+
const installedPg = DEFAULT_POSTGRES_VERSION;
|
|
514
|
+
if (parsed.pgVersion !== installedPg) {
|
|
515
|
+
throw new Error(
|
|
516
|
+
`ParadeDB version "${version}" targets PostgreSQL ${parsed.pgVersion}, but embedded-postgres provides PostgreSQL ${installedPg}. Install embedded-postgres@${parsed.pgVersion}.x to match, or use version "${parsed.extVersion}" without the -pg suffix.`
|
|
517
|
+
);
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
return parsed.extVersion;
|
|
521
|
+
}
|
|
83
522
|
function quoteIdentifier(name) {
|
|
84
523
|
if (/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(name)) {
|
|
85
524
|
return name;
|
|
@@ -88,23 +527,117 @@ function quoteIdentifier(name) {
|
|
|
88
527
|
}
|
|
89
528
|
|
|
90
529
|
// src/PostgresMemoryServer.ts
|
|
530
|
+
var liveInstances = /* @__PURE__ */ new Set();
|
|
531
|
+
var exitHandlersRegistered = false;
|
|
532
|
+
var orphanSweepDone = false;
|
|
533
|
+
function registerExitHandlers() {
|
|
534
|
+
if (exitHandlersRegistered) return;
|
|
535
|
+
exitHandlersRegistered = true;
|
|
536
|
+
const cleanup = () => {
|
|
537
|
+
for (const instance of liveInstances) {
|
|
538
|
+
try {
|
|
539
|
+
instance._cleanupSync();
|
|
540
|
+
} catch {
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
};
|
|
544
|
+
process2.once("exit", cleanup);
|
|
545
|
+
const signalCleanup = (signal) => {
|
|
546
|
+
cleanup();
|
|
547
|
+
process2.removeListener(signal, signalCleanup);
|
|
548
|
+
process2.kill(process2.pid, signal);
|
|
549
|
+
};
|
|
550
|
+
process2.on("SIGINT", signalCleanup);
|
|
551
|
+
process2.on("SIGTERM", signalCleanup);
|
|
552
|
+
process2.on("SIGHUP", signalCleanup);
|
|
553
|
+
}
|
|
91
554
|
var PostgresMemoryServer = class _PostgresMemoryServer {
|
|
92
|
-
constructor(
|
|
93
|
-
this.
|
|
555
|
+
constructor(pg, port, dataDir, options) {
|
|
556
|
+
this.pg = pg;
|
|
557
|
+
this.port = port;
|
|
558
|
+
this.dataDir = dataDir;
|
|
94
559
|
this.options = options;
|
|
95
560
|
this.snapshotSupported = options.database !== "postgres";
|
|
96
561
|
}
|
|
97
562
|
stopped = false;
|
|
98
563
|
snapshotSupported;
|
|
564
|
+
hasSnapshot = false;
|
|
99
565
|
static async create(options = {}) {
|
|
566
|
+
if (!orphanSweepDone) {
|
|
567
|
+
orphanSweepDone = true;
|
|
568
|
+
await sweepOrphanedDataDirs().catch(() => {
|
|
569
|
+
});
|
|
570
|
+
}
|
|
100
571
|
const normalized = normalizeOptions(options);
|
|
101
|
-
const
|
|
102
|
-
const
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
572
|
+
const port = await getFreePort();
|
|
573
|
+
const dataDir = await fs2.mkdtemp(
|
|
574
|
+
path2.join(os2.tmpdir(), "postgres-memory-server-")
|
|
575
|
+
);
|
|
576
|
+
let pg;
|
|
577
|
+
try {
|
|
578
|
+
const postgresFlags = [];
|
|
579
|
+
if (normalized.preset === "paradedb") {
|
|
580
|
+
const nativeDir = getNativeDir();
|
|
581
|
+
const extVersion = resolveParadeDBVersion(normalized.version);
|
|
582
|
+
const pgMajor = DEFAULT_POSTGRES_VERSION;
|
|
583
|
+
try {
|
|
584
|
+
await installParadeDBExtension(nativeDir, extVersion, pgMajor);
|
|
585
|
+
} catch (error) {
|
|
586
|
+
throw new ExtensionInstallError(
|
|
587
|
+
"pg_search",
|
|
588
|
+
error instanceof Error ? error : new Error(String(error))
|
|
589
|
+
);
|
|
590
|
+
}
|
|
591
|
+
if (normalized.extensions.includes("vector")) {
|
|
592
|
+
try {
|
|
593
|
+
await installPgVectorExtension(nativeDir, pgMajor);
|
|
594
|
+
} catch (error) {
|
|
595
|
+
throw new ExtensionInstallError(
|
|
596
|
+
"vector",
|
|
597
|
+
error instanceof Error ? error : new Error(String(error))
|
|
598
|
+
);
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
if (normalized.extensions.includes("pg_search") || normalized.extensions.length === 0) {
|
|
602
|
+
postgresFlags.push("-c", "shared_preload_libraries=pg_search");
|
|
603
|
+
}
|
|
604
|
+
}
|
|
605
|
+
pg = new EmbeddedPostgres({
|
|
606
|
+
databaseDir: dataDir,
|
|
607
|
+
port,
|
|
608
|
+
user: normalized.username,
|
|
609
|
+
password: normalized.password,
|
|
610
|
+
persistent: false,
|
|
611
|
+
postgresFlags,
|
|
612
|
+
onLog: () => {
|
|
613
|
+
},
|
|
614
|
+
onError: () => {
|
|
615
|
+
}
|
|
616
|
+
});
|
|
617
|
+
await pg.initialise();
|
|
618
|
+
await pg.start();
|
|
619
|
+
if (normalized.database !== "postgres") {
|
|
620
|
+
await pg.createDatabase(normalized.database);
|
|
621
|
+
}
|
|
622
|
+
const server = new _PostgresMemoryServer(pg, port, dataDir, normalized);
|
|
623
|
+
liveInstances.add(server);
|
|
624
|
+
registerExitHandlers();
|
|
625
|
+
const initStatements = buildInitStatements(normalized);
|
|
626
|
+
if (initStatements.length > 0) {
|
|
627
|
+
await server.runSql(initStatements);
|
|
628
|
+
}
|
|
629
|
+
return server;
|
|
630
|
+
} catch (error) {
|
|
631
|
+
if (pg) {
|
|
632
|
+
try {
|
|
633
|
+
await pg.stop();
|
|
634
|
+
} catch {
|
|
635
|
+
}
|
|
636
|
+
}
|
|
637
|
+
await fs2.rm(dataDir, { recursive: true, force: true }).catch(() => {
|
|
638
|
+
});
|
|
639
|
+
throw error;
|
|
106
640
|
}
|
|
107
|
-
return server;
|
|
108
641
|
}
|
|
109
642
|
static createPostgres(options = {}) {
|
|
110
643
|
return _PostgresMemoryServer.create({ ...options, preset: "postgres" });
|
|
@@ -114,15 +647,15 @@ var PostgresMemoryServer = class _PostgresMemoryServer {
|
|
|
114
647
|
}
|
|
115
648
|
getUri() {
|
|
116
649
|
this.ensureRunning();
|
|
117
|
-
return this.
|
|
650
|
+
return `postgres://${this.options.username}:${this.options.password}@localhost:${this.port}/${this.options.database}`;
|
|
118
651
|
}
|
|
119
652
|
getHost() {
|
|
120
653
|
this.ensureRunning();
|
|
121
|
-
return
|
|
654
|
+
return "localhost";
|
|
122
655
|
}
|
|
123
656
|
getPort() {
|
|
124
657
|
this.ensureRunning();
|
|
125
|
-
return this.
|
|
658
|
+
return this.port;
|
|
126
659
|
}
|
|
127
660
|
getDatabase() {
|
|
128
661
|
return this.options.database;
|
|
@@ -136,6 +669,14 @@ var PostgresMemoryServer = class _PostgresMemoryServer {
|
|
|
136
669
|
getImage() {
|
|
137
670
|
return this.options.image;
|
|
138
671
|
}
|
|
672
|
+
/**
|
|
673
|
+
* Returns the absolute path to the temporary PostgreSQL data directory
|
|
674
|
+
* for this instance. Useful for debugging or backing up state. The
|
|
675
|
+
* directory is automatically removed by `stop()`.
|
|
676
|
+
*/
|
|
677
|
+
getDataDir() {
|
|
678
|
+
return this.dataDir;
|
|
679
|
+
}
|
|
139
680
|
getConnectionOptions() {
|
|
140
681
|
return {
|
|
141
682
|
host: this.getHost(),
|
|
@@ -177,35 +718,115 @@ var PostgresMemoryServer = class _PostgresMemoryServer {
|
|
|
177
718
|
});
|
|
178
719
|
}
|
|
179
720
|
async runSqlFile(filePath) {
|
|
180
|
-
const sql = await
|
|
721
|
+
const sql = await fs2.readFile(filePath, "utf8");
|
|
181
722
|
await this.runSql(sql);
|
|
182
723
|
}
|
|
183
724
|
async runMigrationsDir(dirPath) {
|
|
184
|
-
const entries = await
|
|
725
|
+
const entries = await fs2.readdir(dirPath, { withFileTypes: true });
|
|
185
726
|
const files = entries.filter(
|
|
186
727
|
(entry) => entry.isFile() && entry.name.toLowerCase().endsWith(".sql")
|
|
187
728
|
).map((entry) => entry.name).sort((left, right) => left.localeCompare(right));
|
|
188
729
|
for (const file of files) {
|
|
189
|
-
await this.runSqlFile(
|
|
730
|
+
await this.runSqlFile(path2.join(dirPath, file));
|
|
190
731
|
}
|
|
191
732
|
return files;
|
|
192
733
|
}
|
|
734
|
+
/**
|
|
735
|
+
* Create a snapshot of the current database state.
|
|
736
|
+
* Uses PostgreSQL template databases for fast, native snapshots.
|
|
737
|
+
*/
|
|
193
738
|
async snapshot() {
|
|
194
739
|
this.ensureRunning();
|
|
195
740
|
this.ensureSnapshotSupported();
|
|
196
|
-
|
|
741
|
+
const snapshotDb = `${this.options.database}_snapshot`;
|
|
742
|
+
await this.withAdminClient(async (client) => {
|
|
743
|
+
await client.query(
|
|
744
|
+
`SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1 AND pid != pg_backend_pid()`,
|
|
745
|
+
[this.options.database]
|
|
746
|
+
);
|
|
747
|
+
if (this.hasSnapshot) {
|
|
748
|
+
await client.query(`DROP DATABASE IF EXISTS "${snapshotDb}"`);
|
|
749
|
+
}
|
|
750
|
+
await client.query(
|
|
751
|
+
`CREATE DATABASE "${snapshotDb}" TEMPLATE "${this.options.database}"`
|
|
752
|
+
);
|
|
753
|
+
});
|
|
754
|
+
this.hasSnapshot = true;
|
|
197
755
|
}
|
|
756
|
+
/**
|
|
757
|
+
* Restore the database to the last snapshot.
|
|
758
|
+
* Drops and recreates the database from the snapshot template.
|
|
759
|
+
*/
|
|
198
760
|
async restore() {
|
|
199
761
|
this.ensureRunning();
|
|
200
762
|
this.ensureSnapshotSupported();
|
|
201
|
-
|
|
763
|
+
if (!this.hasSnapshot) {
|
|
764
|
+
throw new Error(
|
|
765
|
+
"No snapshot exists. Call snapshot() before calling restore()."
|
|
766
|
+
);
|
|
767
|
+
}
|
|
768
|
+
const snapshotDb = `${this.options.database}_snapshot`;
|
|
769
|
+
await this.withAdminClient(async (client) => {
|
|
770
|
+
await client.query(
|
|
771
|
+
`SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1 AND pid != pg_backend_pid()`,
|
|
772
|
+
[this.options.database]
|
|
773
|
+
);
|
|
774
|
+
await client.query(`DROP DATABASE "${this.options.database}"`);
|
|
775
|
+
await client.query(
|
|
776
|
+
`CREATE DATABASE "${this.options.database}" TEMPLATE "${snapshotDb}"`
|
|
777
|
+
);
|
|
778
|
+
});
|
|
202
779
|
}
|
|
203
780
|
async stop() {
|
|
204
781
|
if (this.stopped) {
|
|
205
782
|
return;
|
|
206
783
|
}
|
|
207
784
|
this.stopped = true;
|
|
208
|
-
|
|
785
|
+
liveInstances.delete(this);
|
|
786
|
+
try {
|
|
787
|
+
await this.pg.stop();
|
|
788
|
+
} catch {
|
|
789
|
+
}
|
|
790
|
+
await fs2.rm(this.dataDir, { recursive: true, force: true }).catch(() => {
|
|
791
|
+
});
|
|
792
|
+
}
|
|
793
|
+
/**
|
|
794
|
+
* Synchronous cleanup for use in process exit handlers. Cannot await,
|
|
795
|
+
* so we just remove the data directory and let the OS reap the postgres
|
|
796
|
+
* child process. embedded-postgres registers its own exit hook to kill
|
|
797
|
+
* the process; this method is a backup for the data directory only.
|
|
798
|
+
*
|
|
799
|
+
* @internal
|
|
800
|
+
*/
|
|
801
|
+
_cleanupSync() {
|
|
802
|
+
if (this.stopped) {
|
|
803
|
+
return;
|
|
804
|
+
}
|
|
805
|
+
this.stopped = true;
|
|
806
|
+
liveInstances.delete(this);
|
|
807
|
+
try {
|
|
808
|
+
rmSync(this.dataDir, { recursive: true, force: true });
|
|
809
|
+
} catch {
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
/**
|
|
813
|
+
* Connect to the "postgres" system database for admin operations
|
|
814
|
+
* (snapshot, restore, etc.).
|
|
815
|
+
*/
|
|
816
|
+
async withAdminClient(callback) {
|
|
817
|
+
const client = new Client({
|
|
818
|
+
host: "localhost",
|
|
819
|
+
port: this.port,
|
|
820
|
+
database: "postgres",
|
|
821
|
+
user: this.options.username,
|
|
822
|
+
password: this.options.password
|
|
823
|
+
});
|
|
824
|
+
await client.connect();
|
|
825
|
+
try {
|
|
826
|
+
return await callback(client);
|
|
827
|
+
} finally {
|
|
828
|
+
await client.end();
|
|
829
|
+
}
|
|
209
830
|
}
|
|
210
831
|
ensureRunning() {
|
|
211
832
|
if (this.stopped) {
|
|
@@ -220,21 +841,21 @@ var PostgresMemoryServer = class _PostgresMemoryServer {
|
|
|
220
841
|
};
|
|
221
842
|
|
|
222
843
|
// src/jest.ts
|
|
223
|
-
import { promises as
|
|
844
|
+
import { promises as fs3 } from "fs";
|
|
224
845
|
import { spawn } from "child_process";
|
|
225
846
|
import { createHash } from "crypto";
|
|
226
|
-
import
|
|
847
|
+
import path3 from "path";
|
|
227
848
|
import { tmpdir } from "os";
|
|
228
|
-
import
|
|
849
|
+
import process3 from "process";
|
|
229
850
|
import { fileURLToPath, pathToFileURL } from "url";
|
|
230
851
|
var CHILD_OPTIONS_ENV_VAR = "POSTGRES_MEMORY_SERVER_CHILD_OPTIONS_B64";
|
|
231
852
|
var CHILD_SETUP_TIMEOUT_MS = 12e4;
|
|
232
853
|
var CHILD_SHUTDOWN_TIMEOUT_MS = 3e4;
|
|
233
854
|
var POLL_INTERVAL_MS = 100;
|
|
234
855
|
var DEFAULT_JEST_ENV_VAR_NAME = "DATABASE_URL";
|
|
235
|
-
var DEFAULT_JEST_STATE_FILE =
|
|
856
|
+
var DEFAULT_JEST_STATE_FILE = path3.join(
|
|
236
857
|
tmpdir(),
|
|
237
|
-
`postgres-memory-server-jest-${createHash("sha256").update(
|
|
858
|
+
`postgres-memory-server-jest-${createHash("sha256").update(process3.cwd()).digest("hex").slice(0, 12)}.json`
|
|
238
859
|
);
|
|
239
860
|
function getChildScript(childModuleUrl) {
|
|
240
861
|
return `
|
|
@@ -285,7 +906,7 @@ function createJestGlobalSetup(options = {}) {
|
|
|
285
906
|
...serverOptions
|
|
286
907
|
} = options;
|
|
287
908
|
const resolvedStateFilePath = resolveStateFilePath(stateFilePath);
|
|
288
|
-
await
|
|
909
|
+
await fs3.mkdir(path3.dirname(resolvedStateFilePath), { recursive: true });
|
|
289
910
|
const existingState = await readStateFile(resolvedStateFilePath);
|
|
290
911
|
if (existingState) {
|
|
291
912
|
await stopChildProcess(existingState.pid);
|
|
@@ -297,7 +918,7 @@ function createJestGlobalSetup(options = {}) {
|
|
|
297
918
|
envVarName,
|
|
298
919
|
payload
|
|
299
920
|
};
|
|
300
|
-
await
|
|
921
|
+
await fs3.writeFile(
|
|
301
922
|
resolvedStateFilePath,
|
|
302
923
|
JSON.stringify(state, null, 2),
|
|
303
924
|
"utf8"
|
|
@@ -312,15 +933,15 @@ function createJestGlobalTeardown(options = {}) {
|
|
|
312
933
|
return;
|
|
313
934
|
}
|
|
314
935
|
await stopChildProcess(state.pid);
|
|
315
|
-
await
|
|
936
|
+
await fs3.rm(resolvedStateFilePath, { force: true });
|
|
316
937
|
};
|
|
317
938
|
}
|
|
318
939
|
function resolveStateFilePath(stateFilePath) {
|
|
319
|
-
return stateFilePath ?
|
|
940
|
+
return stateFilePath ? path3.resolve(stateFilePath) : DEFAULT_JEST_STATE_FILE;
|
|
320
941
|
}
|
|
321
942
|
async function readStateFile(filePath) {
|
|
322
943
|
try {
|
|
323
|
-
const content = await
|
|
944
|
+
const content = await fs3.readFile(filePath, "utf8");
|
|
324
945
|
return JSON.parse(content);
|
|
325
946
|
} catch (error) {
|
|
326
947
|
if (isMissingFileError(error)) {
|
|
@@ -330,24 +951,24 @@ async function readStateFile(filePath) {
|
|
|
330
951
|
}
|
|
331
952
|
}
|
|
332
953
|
function applyConnectionEnvironment(envVarName, payload) {
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
954
|
+
process3.env[envVarName] = payload.uri;
|
|
955
|
+
process3.env.POSTGRES_MEMORY_SERVER_URI = payload.uri;
|
|
956
|
+
process3.env.POSTGRES_MEMORY_SERVER_HOST = payload.host;
|
|
957
|
+
process3.env.POSTGRES_MEMORY_SERVER_PORT = String(payload.port);
|
|
958
|
+
process3.env.POSTGRES_MEMORY_SERVER_DATABASE = payload.database;
|
|
959
|
+
process3.env.POSTGRES_MEMORY_SERVER_USERNAME = payload.username;
|
|
960
|
+
process3.env.POSTGRES_MEMORY_SERVER_PASSWORD = payload.password;
|
|
961
|
+
process3.env.POSTGRES_MEMORY_SERVER_IMAGE = payload.image;
|
|
341
962
|
}
|
|
342
963
|
async function startChildProcess(options) {
|
|
343
964
|
const childModuleUrl = await resolveChildModuleUrl();
|
|
344
965
|
return new Promise((resolve, reject) => {
|
|
345
966
|
const child = spawn(
|
|
346
|
-
|
|
967
|
+
process3.execPath,
|
|
347
968
|
["--input-type=module", "--eval", getChildScript(childModuleUrl)],
|
|
348
969
|
{
|
|
349
970
|
env: {
|
|
350
|
-
...
|
|
971
|
+
...process3.env,
|
|
351
972
|
[CHILD_OPTIONS_ENV_VAR]: Buffer.from(
|
|
352
973
|
JSON.stringify(options),
|
|
353
974
|
"utf8"
|
|
@@ -427,10 +1048,10 @@ async function startChildProcess(options) {
|
|
|
427
1048
|
}
|
|
428
1049
|
async function resolveChildModuleUrl() {
|
|
429
1050
|
const currentFilePath = fileURLToPath(import.meta.url);
|
|
430
|
-
const currentDirectoryPath =
|
|
431
|
-
const distEntryPath =
|
|
1051
|
+
const currentDirectoryPath = path3.dirname(currentFilePath);
|
|
1052
|
+
const distEntryPath = path3.resolve(currentDirectoryPath, "../dist/index.js");
|
|
432
1053
|
try {
|
|
433
|
-
await
|
|
1054
|
+
await fs3.access(distEntryPath);
|
|
434
1055
|
} catch (error) {
|
|
435
1056
|
if (isMissingFileError(error)) {
|
|
436
1057
|
throw new Error(
|
|
@@ -443,7 +1064,7 @@ async function resolveChildModuleUrl() {
|
|
|
443
1064
|
}
|
|
444
1065
|
async function stopChildProcess(pid) {
|
|
445
1066
|
try {
|
|
446
|
-
|
|
1067
|
+
process3.kill(pid, "SIGTERM");
|
|
447
1068
|
} catch (error) {
|
|
448
1069
|
if (isMissingProcessError(error)) {
|
|
449
1070
|
return;
|
|
@@ -454,7 +1075,7 @@ async function stopChildProcess(pid) {
|
|
|
454
1075
|
while (Date.now() < deadline) {
|
|
455
1076
|
await sleep(POLL_INTERVAL_MS);
|
|
456
1077
|
try {
|
|
457
|
-
|
|
1078
|
+
process3.kill(pid, 0);
|
|
458
1079
|
} catch (error) {
|
|
459
1080
|
if (isMissingProcessError(error)) {
|
|
460
1081
|
return;
|
|
@@ -483,10 +1104,12 @@ function isNodeErrorWithCode(error, code) {
|
|
|
483
1104
|
export {
|
|
484
1105
|
DEFAULT_JEST_ENV_VAR_NAME,
|
|
485
1106
|
DEFAULT_JEST_STATE_FILE,
|
|
1107
|
+
DEFAULT_PARADEDB_EXT_VERSION,
|
|
486
1108
|
DEFAULT_PARADEDB_IMAGE,
|
|
487
1109
|
DEFAULT_PARADEDB_VERSION,
|
|
488
1110
|
DEFAULT_POSTGRES_IMAGE,
|
|
489
1111
|
DEFAULT_POSTGRES_VERSION,
|
|
1112
|
+
ExtensionInstallError,
|
|
490
1113
|
PARADEDB_IMAGE_REPOSITORY,
|
|
491
1114
|
POSTGRES_IMAGE_REPOSITORY,
|
|
492
1115
|
PostgresMemoryServer,
|