postgresai 0.14.0-dev.7 → 0.14.0-dev.70
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +161 -61
- package/bin/postgres-ai.ts +1957 -404
- package/bun.lock +258 -0
- package/bunfig.toml +20 -0
- package/dist/bin/postgres-ai.js +29351 -1576
- package/dist/sql/01.role.sql +16 -0
- package/dist/sql/02.permissions.sql +37 -0
- package/dist/sql/03.optional_rds.sql +6 -0
- package/dist/sql/04.optional_self_managed.sql +8 -0
- package/dist/sql/05.helpers.sql +439 -0
- package/dist/sql/sql/01.role.sql +16 -0
- package/dist/sql/sql/02.permissions.sql +37 -0
- package/dist/sql/sql/03.optional_rds.sql +6 -0
- package/dist/sql/sql/04.optional_self_managed.sql +8 -0
- package/dist/sql/sql/05.helpers.sql +439 -0
- package/lib/auth-server.ts +124 -106
- package/lib/checkup-api.ts +386 -0
- package/lib/checkup.ts +1396 -0
- package/lib/config.ts +6 -3
- package/lib/init.ts +512 -156
- package/lib/issues.ts +400 -191
- package/lib/mcp-server.ts +213 -90
- package/lib/metrics-embedded.ts +79 -0
- package/lib/metrics-loader.ts +127 -0
- package/lib/supabase.ts +769 -0
- package/lib/util.ts +61 -0
- package/package.json +20 -10
- package/packages/postgres-ai/README.md +26 -0
- package/packages/postgres-ai/bin/postgres-ai.js +27 -0
- package/packages/postgres-ai/package.json +27 -0
- package/scripts/embed-metrics.ts +154 -0
- package/sql/01.role.sql +16 -0
- package/sql/02.permissions.sql +37 -0
- package/sql/03.optional_rds.sql +6 -0
- package/sql/04.optional_self_managed.sql +8 -0
- package/sql/05.helpers.sql +439 -0
- package/test/auth.test.ts +258 -0
- package/test/checkup.integration.test.ts +321 -0
- package/test/checkup.test.ts +1117 -0
- package/test/init.integration.test.ts +500 -0
- package/test/init.test.ts +527 -0
- package/test/issues.cli.test.ts +314 -0
- package/test/issues.test.ts +456 -0
- package/test/mcp-server.test.ts +988 -0
- package/test/schema-validation.test.ts +81 -0
- package/test/supabase.test.ts +568 -0
- package/test/test-utils.ts +128 -0
- package/tsconfig.json +12 -20
- package/dist/bin/postgres-ai.d.ts +0 -3
- package/dist/bin/postgres-ai.d.ts.map +0 -1
- package/dist/bin/postgres-ai.js.map +0 -1
- package/dist/lib/auth-server.d.ts +0 -31
- package/dist/lib/auth-server.d.ts.map +0 -1
- package/dist/lib/auth-server.js +0 -263
- package/dist/lib/auth-server.js.map +0 -1
- package/dist/lib/config.d.ts +0 -45
- package/dist/lib/config.d.ts.map +0 -1
- package/dist/lib/config.js +0 -181
- package/dist/lib/config.js.map +0 -1
- package/dist/lib/init.d.ts +0 -61
- package/dist/lib/init.d.ts.map +0 -1
- package/dist/lib/init.js +0 -359
- package/dist/lib/init.js.map +0 -1
- package/dist/lib/issues.d.ts +0 -75
- package/dist/lib/issues.d.ts.map +0 -1
- package/dist/lib/issues.js +0 -336
- package/dist/lib/issues.js.map +0 -1
- package/dist/lib/mcp-server.d.ts +0 -9
- package/dist/lib/mcp-server.d.ts.map +0 -1
- package/dist/lib/mcp-server.js +0 -168
- package/dist/lib/mcp-server.js.map +0 -1
- package/dist/lib/pkce.d.ts +0 -32
- package/dist/lib/pkce.d.ts.map +0 -1
- package/dist/lib/pkce.js +0 -101
- package/dist/lib/pkce.js.map +0 -1
- package/dist/lib/util.d.ts +0 -27
- package/dist/lib/util.d.ts.map +0 -1
- package/dist/lib/util.js +0 -46
- package/dist/lib/util.js.map +0 -1
- package/dist/package.json +0 -46
- package/test/init.integration.test.cjs +0 -269
- package/test/init.test.cjs +0 -69
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Integration tests for checkup command (express mode)
|
|
3
|
+
* Validates that CLI-generated reports match JSON schemas used by the Python reporter.
|
|
4
|
+
* This ensures compatibility between "express" and "full" (monitoring) modes.
|
|
5
|
+
*/
|
|
6
|
+
import { describe, test, expect, afterAll, beforeAll } from "bun:test";
|
|
7
|
+
import * as fs from "fs";
|
|
8
|
+
import * as os from "os";
|
|
9
|
+
import * as path from "path";
|
|
10
|
+
import * as net from "net";
|
|
11
|
+
import { Client } from "pg";
|
|
12
|
+
import { resolve } from "path";
|
|
13
|
+
import { readFileSync } from "fs";
|
|
14
|
+
import Ajv2020 from "ajv/dist/2020";
|
|
15
|
+
|
|
16
|
+
import * as checkup from "../lib/checkup";
|
|
17
|
+
|
|
18
|
+
const ajv = new Ajv2020({ allErrors: true, strict: false });
|
|
19
|
+
const schemasDir = resolve(import.meta.dir, "../../reporter/schemas");
|
|
20
|
+
|
|
21
|
+
function findOnPath(cmd: string): string | null {
|
|
22
|
+
const result = Bun.spawnSync(["sh", "-c", `command -v ${cmd}`]);
|
|
23
|
+
if (result.exitCode === 0) {
|
|
24
|
+
return new TextDecoder().decode(result.stdout).trim();
|
|
25
|
+
}
|
|
26
|
+
return null;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function findPgBin(cmd: string): string | null {
|
|
30
|
+
const p = findOnPath(cmd);
|
|
31
|
+
if (p) return p;
|
|
32
|
+
const probe = Bun.spawnSync([
|
|
33
|
+
"sh",
|
|
34
|
+
"-c",
|
|
35
|
+
`ls -1 /usr/lib/postgresql/*/bin/${cmd} 2>/dev/null | head -n 1 || true`,
|
|
36
|
+
]);
|
|
37
|
+
const out = new TextDecoder().decode(probe.stdout).trim();
|
|
38
|
+
if (out) return out;
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
function havePostgresBinaries(): boolean {
|
|
43
|
+
return !!(findPgBin("initdb") && findPgBin("postgres"));
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function isRunningAsRoot(): boolean {
|
|
47
|
+
return process.getuid?.() === 0;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
async function getFreePort(): Promise<number> {
|
|
51
|
+
return new Promise((resolve, reject) => {
|
|
52
|
+
const srv = net.createServer();
|
|
53
|
+
srv.listen(0, "127.0.0.1", () => {
|
|
54
|
+
const addr = srv.address() as net.AddressInfo;
|
|
55
|
+
srv.close((err) => {
|
|
56
|
+
if (err) return reject(err);
|
|
57
|
+
resolve(addr.port);
|
|
58
|
+
});
|
|
59
|
+
});
|
|
60
|
+
srv.on("error", reject);
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async function waitFor<T>(
|
|
65
|
+
fn: () => Promise<T>,
|
|
66
|
+
{ timeoutMs = 10000, intervalMs = 100 } = {}
|
|
67
|
+
): Promise<T> {
|
|
68
|
+
const start = Date.now();
|
|
69
|
+
while (true) {
|
|
70
|
+
try {
|
|
71
|
+
return await fn();
|
|
72
|
+
} catch (e) {
|
|
73
|
+
if (Date.now() - start > timeoutMs) throw e;
|
|
74
|
+
await new Promise((r) => setTimeout(r, intervalMs));
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
interface TempPostgres {
|
|
80
|
+
port: number;
|
|
81
|
+
socketDir: string;
|
|
82
|
+
cleanup: () => Promise<void>;
|
|
83
|
+
connect: (database?: string) => Promise<Client>;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async function createTempPostgres(): Promise<TempPostgres> {
|
|
87
|
+
const tmpRoot = fs.mkdtempSync(path.join(os.tmpdir(), "postgresai-checkup-"));
|
|
88
|
+
const dataDir = path.join(tmpRoot, "data");
|
|
89
|
+
const socketDir = path.join(tmpRoot, "sock");
|
|
90
|
+
fs.mkdirSync(socketDir, { recursive: true });
|
|
91
|
+
|
|
92
|
+
const initdb = findPgBin("initdb");
|
|
93
|
+
const postgresBin = findPgBin("postgres");
|
|
94
|
+
if (!initdb || !postgresBin) {
|
|
95
|
+
throw new Error("PostgreSQL binaries not found");
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
const init = Bun.spawnSync([initdb, "-D", dataDir, "-U", "postgres", "-A", "trust"]);
|
|
99
|
+
if (init.exitCode !== 0) {
|
|
100
|
+
throw new Error(new TextDecoder().decode(init.stderr) || new TextDecoder().decode(init.stdout));
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const hbaPath = path.join(dataDir, "pg_hba.conf");
|
|
104
|
+
fs.appendFileSync(hbaPath, "\nlocal all all trust\n", "utf8");
|
|
105
|
+
|
|
106
|
+
const port = await getFreePort();
|
|
107
|
+
const postgresProc = Bun.spawn(
|
|
108
|
+
[postgresBin, "-D", dataDir, "-k", socketDir, "-h", "127.0.0.1", "-p", String(port)],
|
|
109
|
+
{ stdio: ["ignore", "pipe", "pipe"] }
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
const cleanup = async () => {
|
|
113
|
+
postgresProc.kill("SIGTERM");
|
|
114
|
+
try {
|
|
115
|
+
// 30s timeout to handle slower CI environments gracefully
|
|
116
|
+
await waitFor(
|
|
117
|
+
async () => {
|
|
118
|
+
if (postgresProc.exitCode === null) throw new Error("still running");
|
|
119
|
+
},
|
|
120
|
+
{ timeoutMs: 30000, intervalMs: 100 }
|
|
121
|
+
);
|
|
122
|
+
} catch {
|
|
123
|
+
postgresProc.kill("SIGKILL");
|
|
124
|
+
}
|
|
125
|
+
fs.rmSync(tmpRoot, { recursive: true, force: true });
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
const connect = async (database = "postgres"): Promise<Client> => {
|
|
129
|
+
const c = new Client({ host: socketDir, port, user: "postgres", database });
|
|
130
|
+
await c.connect();
|
|
131
|
+
return c;
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
// Wait for Postgres to start (30s timeout for slower CI environments)
|
|
135
|
+
await waitFor(async () => {
|
|
136
|
+
const c = await connect();
|
|
137
|
+
await c.end();
|
|
138
|
+
}, { timeoutMs: 30000, intervalMs: 100 });
|
|
139
|
+
|
|
140
|
+
return { port, socketDir, cleanup, connect };
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
function validateAgainstSchema(report: any, checkId: string): void {
|
|
144
|
+
const schemaPath = resolve(schemasDir, `${checkId}.schema.json`);
|
|
145
|
+
if (!fs.existsSync(schemaPath)) {
|
|
146
|
+
throw new Error(`Schema not found: ${schemaPath}`);
|
|
147
|
+
}
|
|
148
|
+
const schema = JSON.parse(readFileSync(schemaPath, "utf8"));
|
|
149
|
+
const validate = ajv.compile(schema);
|
|
150
|
+
const valid = validate(report);
|
|
151
|
+
if (!valid) {
|
|
152
|
+
const errors = validate.errors?.map(e => `${e.instancePath}: ${e.message}`).join(", ");
|
|
153
|
+
throw new Error(`${checkId} schema validation failed: ${errors}`);
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Skip tests if PostgreSQL binaries are not available
|
|
158
|
+
const skipReason = !havePostgresBinaries()
|
|
159
|
+
? "PostgreSQL binaries not available"
|
|
160
|
+
: isRunningAsRoot()
|
|
161
|
+
? "Cannot run as root (PostgreSQL refuses)"
|
|
162
|
+
: null;
|
|
163
|
+
|
|
164
|
+
// In CI, warn if integration tests are being skipped (helps catch configuration issues)
|
|
165
|
+
const isCI = process.env.CI === "true" || process.env.GITLAB_CI === "true";
|
|
166
|
+
if (skipReason && isCI) {
|
|
167
|
+
console.warn(`[CI WARNING] Integration tests skipped: ${skipReason}`);
|
|
168
|
+
console.warn("This may indicate a CI configuration issue - PostgreSQL binaries should be available.");
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
describe.skipIf(!!skipReason)("checkup integration: express mode schema compatibility", () => {
|
|
172
|
+
let pg: TempPostgres;
|
|
173
|
+
let client: Client;
|
|
174
|
+
|
|
175
|
+
// 60s timeout for hooks - PostgreSQL startup can take 30s+ in slow CI
|
|
176
|
+
beforeAll(async () => {
|
|
177
|
+
pg = await createTempPostgres();
|
|
178
|
+
client = await pg.connect();
|
|
179
|
+
}, { timeout: 60000 });
|
|
180
|
+
|
|
181
|
+
afterAll(async () => {
|
|
182
|
+
if (client) await client.end();
|
|
183
|
+
if (pg) await pg.cleanup();
|
|
184
|
+
}, { timeout: 60000 });
|
|
185
|
+
|
|
186
|
+
// Test all checks supported by express mode
|
|
187
|
+
const expressChecks = Object.keys(checkup.CHECK_INFO);
|
|
188
|
+
|
|
189
|
+
for (const checkId of expressChecks) {
|
|
190
|
+
test(`${checkId} report validates against shared schema`, async () => {
|
|
191
|
+
const generator = checkup.REPORT_GENERATORS[checkId];
|
|
192
|
+
expect(generator).toBeDefined();
|
|
193
|
+
|
|
194
|
+
const report = await generator(client, "test-node");
|
|
195
|
+
|
|
196
|
+
// Validate basic report structure (matching schema requirements)
|
|
197
|
+
expect(report).toHaveProperty("checkId", checkId);
|
|
198
|
+
expect(report).toHaveProperty("checkTitle");
|
|
199
|
+
expect(report).toHaveProperty("timestamptz");
|
|
200
|
+
expect(report).toHaveProperty("nodes");
|
|
201
|
+
expect(report).toHaveProperty("results");
|
|
202
|
+
expect(report.results).toHaveProperty("test-node");
|
|
203
|
+
|
|
204
|
+
// Validate against JSON schema (same schema used by Python reporter)
|
|
205
|
+
validateAgainstSchema(report, checkId);
|
|
206
|
+
});
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
test("generateAllReports produces valid reports for all checks", async () => {
|
|
210
|
+
const reports = await checkup.generateAllReports(client, "test-node");
|
|
211
|
+
|
|
212
|
+
expect(Object.keys(reports).length).toBe(expressChecks.length);
|
|
213
|
+
|
|
214
|
+
for (const [checkId, report] of Object.entries(reports)) {
|
|
215
|
+
validateAgainstSchema(report, checkId);
|
|
216
|
+
}
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
test("report structure matches Python reporter format", async () => {
|
|
220
|
+
// Generate A003 (settings) report and verify structure matches what Python produces
|
|
221
|
+
const report = await checkup.generateA003(client, "test-node");
|
|
222
|
+
|
|
223
|
+
// Check required fields match Python reporter output structure (per schema)
|
|
224
|
+
expect(report).toHaveProperty("checkId", "A003");
|
|
225
|
+
expect(report).toHaveProperty("checkTitle", "Postgres settings");
|
|
226
|
+
expect(report).toHaveProperty("timestamptz");
|
|
227
|
+
expect(report).toHaveProperty("nodes");
|
|
228
|
+
expect(report.nodes).toHaveProperty("primary");
|
|
229
|
+
expect(report.nodes).toHaveProperty("standbys");
|
|
230
|
+
expect(report).toHaveProperty("results");
|
|
231
|
+
|
|
232
|
+
// Results should have node-specific data
|
|
233
|
+
const nodeResult = report.results["test-node"];
|
|
234
|
+
expect(nodeResult).toHaveProperty("data");
|
|
235
|
+
|
|
236
|
+
// A003 should have settings as keyed object
|
|
237
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
238
|
+
|
|
239
|
+
// Check postgres_version if present
|
|
240
|
+
if (nodeResult.postgres_version) {
|
|
241
|
+
expect(nodeResult.postgres_version).toHaveProperty("version");
|
|
242
|
+
expect(nodeResult.postgres_version).toHaveProperty("server_version_num");
|
|
243
|
+
expect(nodeResult.postgres_version).toHaveProperty("server_major_ver");
|
|
244
|
+
expect(nodeResult.postgres_version).toHaveProperty("server_minor_ver");
|
|
245
|
+
}
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
test("H001 (invalid indexes) has correct data structure", async () => {
|
|
249
|
+
const report = await checkup.generateH001(client, "test-node");
|
|
250
|
+
validateAgainstSchema(report, "H001");
|
|
251
|
+
|
|
252
|
+
const nodeResult = report.results["test-node"];
|
|
253
|
+
expect(nodeResult).toHaveProperty("data");
|
|
254
|
+
// data should be an object with indexes (may be empty on fresh DB)
|
|
255
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
test("H001 returns index_definition with CREATE INDEX statement", async () => {
|
|
259
|
+
// Create a table and an index, then mark the index as invalid
|
|
260
|
+
await client.query(`
|
|
261
|
+
CREATE TABLE IF NOT EXISTS test_invalid_idx_table (id serial PRIMARY KEY, value text);
|
|
262
|
+
CREATE INDEX IF NOT EXISTS test_invalid_idx ON test_invalid_idx_table(value);
|
|
263
|
+
`);
|
|
264
|
+
|
|
265
|
+
// Mark the index as invalid (simulating a failed CONCURRENTLY build)
|
|
266
|
+
await client.query(`
|
|
267
|
+
UPDATE pg_index SET indisvalid = false
|
|
268
|
+
WHERE indexrelid = 'test_invalid_idx'::regclass;
|
|
269
|
+
`);
|
|
270
|
+
|
|
271
|
+
try {
|
|
272
|
+
const report = await checkup.generateH001(client, "test-node");
|
|
273
|
+
validateAgainstSchema(report, "H001");
|
|
274
|
+
|
|
275
|
+
const nodeResult = report.results["test-node"];
|
|
276
|
+
const dbName = Object.keys(nodeResult.data)[0];
|
|
277
|
+
expect(dbName).toBeTruthy();
|
|
278
|
+
|
|
279
|
+
const dbData = nodeResult.data[dbName] as any;
|
|
280
|
+
expect(dbData.invalid_indexes).toBeDefined();
|
|
281
|
+
expect(dbData.invalid_indexes.length).toBeGreaterThan(0);
|
|
282
|
+
|
|
283
|
+
// Find our test index
|
|
284
|
+
const testIndex = dbData.invalid_indexes.find(
|
|
285
|
+
(idx: any) => idx.index_name === "test_invalid_idx"
|
|
286
|
+
);
|
|
287
|
+
expect(testIndex).toBeDefined();
|
|
288
|
+
|
|
289
|
+
// Verify index_definition contains the actual CREATE INDEX statement
|
|
290
|
+
expect(testIndex.index_definition).toMatch(/^CREATE INDEX/);
|
|
291
|
+
expect(testIndex.index_definition).toContain("test_invalid_idx");
|
|
292
|
+
expect(testIndex.index_definition).toContain("test_invalid_idx_table");
|
|
293
|
+
} finally {
|
|
294
|
+
// Cleanup: restore the index and drop test objects
|
|
295
|
+
await client.query(`
|
|
296
|
+
UPDATE pg_index SET indisvalid = true
|
|
297
|
+
WHERE indexrelid = 'test_invalid_idx'::regclass;
|
|
298
|
+
DROP INDEX IF EXISTS test_invalid_idx;
|
|
299
|
+
DROP TABLE IF EXISTS test_invalid_idx_table;
|
|
300
|
+
`);
|
|
301
|
+
}
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
test("H002 (unused indexes) has correct data structure", async () => {
|
|
305
|
+
const report = await checkup.generateH002(client, "test-node");
|
|
306
|
+
validateAgainstSchema(report, "H002");
|
|
307
|
+
|
|
308
|
+
const nodeResult = report.results["test-node"];
|
|
309
|
+
expect(nodeResult).toHaveProperty("data");
|
|
310
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
test("H004 (redundant indexes) has correct data structure", async () => {
|
|
314
|
+
const report = await checkup.generateH004(client, "test-node");
|
|
315
|
+
validateAgainstSchema(report, "H004");
|
|
316
|
+
|
|
317
|
+
const nodeResult = report.results["test-node"];
|
|
318
|
+
expect(nodeResult).toHaveProperty("data");
|
|
319
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
320
|
+
});
|
|
321
|
+
});
|