postgresai 0.15.0-dev.6 → 0.15.0-dev.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,8 +5,29 @@ import type { Client } from "pg";
5
5
  // Import from source directly since we're using Bun
6
6
  import * as checkup from "../lib/checkup";
7
7
  import * as api from "../lib/checkup-api";
8
+ import * as metricsLoader from "../lib/metrics-loader";
8
9
  import { createMockClient } from "./test-utils";
9
10
 
11
+ const SUPPORTED_PG_VERSIONS = [
12
+ { major: 13, minor: 16, versionNum: "130016" },
13
+ { major: 14, minor: 12, versionNum: "140012" },
14
+ { major: 15, minor: 7, versionNum: "150007" },
15
+ { major: 16, minor: 3, versionNum: "160003" },
16
+ { major: 17, minor: 2, versionNum: "170002" },
17
+ { major: 18, minor: 0, versionNum: "180000" },
18
+ ];
19
+ const SUPPORTED_PG_MAJOR_VERSIONS = SUPPORTED_PG_VERSIONS.map(({ major }) => major);
20
+ const SECONDS_PER_DAY = 86400;
21
+ const STATS_RESET_EPOCH = Date.UTC(2024, 0, 1) / 1000;
22
+ const DAYS_SINCE_RESET = 30;
23
+ const STATS_RESET_SECONDS_SINCE_RESET = DAYS_SINCE_RESET * SECONDS_PER_DAY;
24
+ const TEST_NOW_EPOCH = STATS_RESET_EPOCH + STATS_RESET_SECONDS_SINCE_RESET;
25
+ const POSTMASTER_STARTUP_EPOCH = TEST_NOW_EPOCH - STATS_RESET_SECONDS_SINCE_RESET;
26
+ const STATS_RESET_TIME = new Date(STATS_RESET_EPOCH * 1000).toISOString();
27
+ const formatPostgresTimestamp = (epochSeconds: number) =>
28
+ new Date(epochSeconds * 1000).toISOString().replace("T", " ").replace(".000Z", "+00");
29
+ const POSTMASTER_STARTUP_TIME = formatPostgresTimestamp(POSTMASTER_STARTUP_EPOCH);
30
+ const POSTMASTER_UPTIME_SECONDS = TEST_NOW_EPOCH - POSTMASTER_STARTUP_EPOCH;
10
31
 
11
32
  function runCli(args: string[], env: Record<string, string> = {}) {
12
33
  const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
@@ -41,6 +62,24 @@ describe("parseVersionNum", () => {
41
62
  expect(result.minor).toBe("12");
42
63
  });
43
64
 
65
+ test("parses PG 13.16 version number", () => {
66
+ const result = checkup.parseVersionNum("130016");
67
+ expect(result.major).toBe("13");
68
+ expect(result.minor).toBe("16");
69
+ });
70
+
71
+ test("parses PG 17.2 version number", () => {
72
+ const result = checkup.parseVersionNum("170002");
73
+ expect(result.major).toBe("17");
74
+ expect(result.minor).toBe("2");
75
+ });
76
+
77
+ test("parses PG 18.0 version number", () => {
78
+ const result = checkup.parseVersionNum("180000");
79
+ expect(result.major).toBe("18");
80
+ expect(result.minor).toBe("0");
81
+ });
82
+
44
83
  test("handles empty string", () => {
45
84
  const result = checkup.parseVersionNum("");
46
85
  expect(result.major).toBe("");
@@ -86,8 +125,7 @@ describe("createBaseReport", () => {
86
125
 
87
126
  // Tests for CHECK_INFO
88
127
  describe("CHECK_INFO and REPORT_GENERATORS", () => {
89
- // Express-mode checks that have generators
90
- const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "F004", "F005", "G001", "G003", "H001", "H002", "H004"];
128
+ const expressCheckIds = Object.keys(checkup.REPORT_GENERATORS);
91
129
 
92
130
  test("CHECK_INFO contains all express-mode checks", () => {
93
131
  for (const checkId of expressCheckIds) {
@@ -152,6 +190,90 @@ describe("formatBytes", () => {
152
190
  });
153
191
  });
154
192
 
193
+ function createI001MockClient(options: {
194
+ versionRows?: any[];
195
+ ioRows?: any[];
196
+ ioError?: boolean;
197
+ resetRows?: any[];
198
+ resetError?: boolean;
199
+ } = {}) {
200
+ const queries: string[] = [];
201
+ const {
202
+ versionRows = [
203
+ { name: "server_version", setting: "16.3" },
204
+ { name: "server_version_num", setting: "160003" },
205
+ ],
206
+ ioRows = [],
207
+ ioError = false,
208
+ resetRows = [{ stats_reset_s: "86400" }],
209
+ resetError = false,
210
+ } = options;
211
+
212
+ return {
213
+ queries,
214
+ query: async (sql: string) => {
215
+ queries.push(sql);
216
+ if (sql.includes("server_version") && sql.includes("server_version_num") && sql.includes("pg_settings")) {
217
+ return { rows: versionRows };
218
+ }
219
+ if (sql.includes("pg_stat_io") && sql.includes("rollup")) {
220
+ if (ioError) {
221
+ throw new Error("I/O statistics unavailable");
222
+ }
223
+ return { rows: ioRows };
224
+ }
225
+ if (sql.includes("pg_stat_io") && sql.includes("stats_reset_s")) {
226
+ if (resetError) {
227
+ throw new Error("stats reset unavailable");
228
+ }
229
+ return { rows: resetRows };
230
+ }
231
+ throw new Error(`Unexpected query: ${sql}`);
232
+ },
233
+ };
234
+ }
235
+
236
+ const i001Rows = [
237
+ {
238
+ tag_backend_type: "client backend",
239
+ reads: "1000",
240
+ read_bytes_mb: "100",
241
+ read_time_ms: "500",
242
+ writes: "200",
243
+ write_bytes_mb: "50",
244
+ write_time_ms: "100",
245
+ writebacks: "20",
246
+ writeback_bytes_mb: "2",
247
+ writeback_time_ms: "4",
248
+ fsyncs: "1",
249
+ fsync_time_ms: "3",
250
+ extends: "6",
251
+ extend_bytes_mb: "8",
252
+ hits: "5000",
253
+ evictions: "3",
254
+ reuses: "7",
255
+ },
256
+ {
257
+ tag_backend_type: "total",
258
+ reads: "1500",
259
+ read_bytes_mb: "150",
260
+ read_time_ms: "750",
261
+ writes: "300",
262
+ write_bytes_mb: "75",
263
+ write_time_ms: "150",
264
+ writebacks: "30",
265
+ writeback_bytes_mb: "3",
266
+ writeback_time_ms: "6",
267
+ fsyncs: "2",
268
+ fsync_time_ms: "5",
269
+ extends: "9",
270
+ extend_bytes_mb: "12",
271
+ hits: "7500",
272
+ evictions: "4",
273
+ reuses: "11",
274
+ },
275
+ ];
276
+
155
277
  // Mock client tests for report generators
156
278
  describe("Report generators with mock client", () => {
157
279
  test("getPostgresVersion extracts version info", async () => {
@@ -169,6 +291,208 @@ describe("Report generators with mock client", () => {
169
291
  expect(version.server_minor_ver).toBe("3");
170
292
  });
171
293
 
294
+ test("getIOStatistics returns empty for PostgreSQL versions before 16", async () => {
295
+ const mockClient = createI001MockClient({ ioRows: i001Rows });
296
+
297
+ const defaultStats = await checkup.getIOStatistics(mockClient as any);
298
+ const stats = await checkup.getIOStatistics(mockClient as any, 15);
299
+
300
+ expect(defaultStats).toEqual([]);
301
+ expect(stats).toEqual([]);
302
+ expect(mockClient.queries).toEqual([]);
303
+ });
304
+
305
+ test("getIOStatistics skips placeholder SQL without querying", async () => {
306
+ const mockClient = createI001MockClient({ ioRows: i001Rows });
307
+
308
+ const stats = await checkup.getIOStatistics(mockClient as any, 16, "; -- pg_stat_io unavailable");
309
+ const barePlaceholderStats = await checkup.getIOStatistics(mockClient as any, 16, ";");
310
+
311
+ const whitespacePlaceholderStats = await checkup.getIOStatistics(mockClient as any, 16, " ; -- whitespace prefix");
312
+
313
+ expect(stats).toEqual([]);
314
+ expect(barePlaceholderStats).toEqual([]);
315
+ expect(whitespacePlaceholderStats).toEqual([]);
316
+ expect(mockClient.queries).toEqual([]);
317
+ });
318
+
319
+ test("getIOStatistics returns empty when the SQL result has no rows", async () => {
320
+ const mockClient = createI001MockClient({ ioRows: [] });
321
+
322
+ const stats = await checkup.getIOStatistics(mockClient as any, 16);
323
+
324
+ expect(stats).toEqual([]);
325
+ });
326
+
327
+ test("getIOStatistics catches primary query errors", async () => {
328
+ const mockClient = createI001MockClient({ ioError: true, resetRows: [] });
329
+
330
+ const stats = await checkup.getIOStatistics(mockClient as any, 16);
331
+ const report = await checkup.REPORT_GENERATORS.I001(mockClient as any, "node-01");
332
+
333
+ expect(stats).toEqual([]);
334
+ expect(report.results["node-01"].data).toEqual({
335
+ available: false,
336
+ by_backend_type: [],
337
+ analysis: {
338
+ total_read_mb: 0,
339
+ total_write_mb: 0,
340
+ total_io_time_ms: 0,
341
+ read_hit_ratio_pct: 0,
342
+ avg_read_time_ms: null,
343
+ avg_write_time_ms: null,
344
+ },
345
+ stats_reset_s: null,
346
+ });
347
+ });
348
+
349
+ test("getIOStatistics maps backend rows including extension bytes", async () => {
350
+ const mockClient = createI001MockClient({ ioRows: i001Rows });
351
+
352
+ const stats = await checkup.getIOStatistics(mockClient as any, 16);
353
+
354
+ expect(stats).toHaveLength(2);
355
+ expect(stats[0]).toMatchObject({
356
+ backend_type: "client backend",
357
+ reads: 1000,
358
+ read_bytes_mb: 100,
359
+ writes: 200,
360
+ write_bytes_mb: 50,
361
+ writebacks: 20,
362
+ writeback_bytes_mb: 2,
363
+ fsyncs: 1,
364
+ extends: 6,
365
+ extend_bytes_mb: 8,
366
+ hits: 5000,
367
+ evictions: 3,
368
+ reuses: 7,
369
+ });
370
+ });
371
+
372
+ test("generateI001 keeps report available when stats_reset query fails", async () => {
373
+ const mockClient = createI001MockClient({ ioRows: i001Rows, resetError: true });
374
+
375
+ const report = await checkup.REPORT_GENERATORS.I001(mockClient as any, "node-01");
376
+ const data = report.results["node-01"].data;
377
+
378
+ expect(report.checkId).toBe("I001");
379
+ expect(data.available).toBe(true);
380
+ expect(data.stats_reset_s).toBeNull();
381
+ expect(data.by_backend_type.map((row: any) => row.backend_type)).toEqual(["total", "client backend"]);
382
+ expect(data.by_backend_type[0].extend_bytes_mb).toBe(12);
383
+ expect(data.analysis.read_hit_ratio_pct).toBe(83.33);
384
+ });
385
+
386
+ test("generateI001 dispatches version-specific pg_stat_io SQL", async () => {
387
+ const pg16Client = createI001MockClient({ ioRows: i001Rows });
388
+ const pg18Client = createI001MockClient({
389
+ versionRows: [
390
+ { name: "server_version", setting: "18.0" },
391
+ { name: "server_version_num", setting: "180000" },
392
+ ],
393
+ ioRows: i001Rows,
394
+ });
395
+
396
+ await checkup.REPORT_GENERATORS.I001(pg16Client as any, "node-01");
397
+ await checkup.REPORT_GENERATORS.I001(pg18Client as any, "node-01");
398
+
399
+ const pg16MetricSql = pg16Client.queries.find((sql) => sql.includes("pg_stat_io") && sql.includes("rollup"));
400
+ const pg18MetricSql = pg18Client.queries.find((sql) => sql.includes("pg_stat_io") && sql.includes("rollup"));
401
+
402
+ expect(pg16MetricSql).toContain("sum(coalesce(reads, 0) * op_bytes)");
403
+ expect(pg16MetricSql).toContain("sum(coalesce(extends, 0) * op_bytes)");
404
+ expect(pg18MetricSql).toContain("sum(coalesce(read_bytes, 0))");
405
+ expect(pg18MetricSql).toContain("sum(coalesce(extend_bytes, 0))");
406
+ expect(pg18MetricSql).toContain("sum(coalesce(writebacks, 0) * coalesce(op_bytes, 0))");
407
+ });
408
+
409
+ test("generateI001 returns unavailable on PostgreSQL 16 when ioStats are empty", async () => {
410
+ const mockClient = createI001MockClient({ ioRows: [], resetRows: [] });
411
+
412
+ const report = await checkup.REPORT_GENERATORS.I001(mockClient as any, "node-01");
413
+ const data = report.results["node-01"].data;
414
+
415
+ expect(data.available).toBe(false);
416
+ expect(data.by_backend_type).toEqual([]);
417
+ expect(data.stats_reset_s).toBeNull();
418
+ expect(data.analysis).toEqual({
419
+ total_read_mb: 0,
420
+ total_write_mb: 0,
421
+ total_io_time_ms: 0,
422
+ read_hit_ratio_pct: 0,
423
+ avg_read_time_ms: null,
424
+ avg_write_time_ms: null,
425
+ });
426
+ });
427
+
428
+ test("generateI001 returns unavailable on PostgreSQL before 16 without querying pg_stat_io", async () => {
429
+ const mockClient = createI001MockClient({
430
+ versionRows: [
431
+ { name: "server_version", setting: "15.4" },
432
+ { name: "server_version_num", setting: "150004" },
433
+ ],
434
+ });
435
+
436
+ const report = await checkup.REPORT_GENERATORS.I001(mockClient as any, "node-01");
437
+ const data = report.results["node-01"].data;
438
+
439
+ expect(data.available).toBe(false);
440
+ expect(data.min_version_required).toBe("16");
441
+ expect(data.by_backend_type).toEqual([]);
442
+ expect(mockClient.queries.every((sql) => !sql.includes("pg_stat_io"))).toBe(true);
443
+ });
444
+
445
+ test("generateI001 handles unknown PostgreSQL version as unavailable", async () => {
446
+ const mockClient = createI001MockClient({
447
+ versionRows: [
448
+ { name: "server_version", setting: "unknown" },
449
+ { name: "server_version_num", setting: "unknown" },
450
+ ],
451
+ });
452
+
453
+ const report = await checkup.REPORT_GENERATORS.I001(mockClient as any, "node-01");
454
+ const data = report.results["node-01"].data;
455
+
456
+ expect(data.available).toBe(false);
457
+ expect(data.min_version_required).toBe("16");
458
+ expect(data.by_backend_type).toEqual([]);
459
+ });
460
+
461
+ test("generateI001 handles zero-request hit ratio and empty averages", async () => {
462
+ const mockClient = createI001MockClient({
463
+ ioRows: [{
464
+ tag_backend_type: "total",
465
+ reads: "0",
466
+ read_bytes_mb: "0",
467
+ read_time_ms: "0",
468
+ writes: "0",
469
+ write_bytes_mb: "0",
470
+ write_time_ms: "0",
471
+ writebacks: "0",
472
+ writeback_bytes_mb: "0",
473
+ writeback_time_ms: "0",
474
+ fsyncs: "0",
475
+ fsync_time_ms: "0",
476
+ extends: "0",
477
+ extend_bytes_mb: "0",
478
+ hits: "0",
479
+ evictions: "0",
480
+ reuses: "0",
481
+ }],
482
+ resetRows: [],
483
+ });
484
+
485
+ const report = await checkup.REPORT_GENERATORS.I001(mockClient as any, "node-01");
486
+ const data = report.results["node-01"].data;
487
+ const analysis = data.analysis;
488
+
489
+ expect(data.available).toBe(true);
490
+ expect(data.stats_reset_s).toBeNull();
491
+ expect(analysis.read_hit_ratio_pct).toBe(0);
492
+ expect(analysis.avg_read_time_ms).toBeNull();
493
+ expect(analysis.avg_write_time_ms).toBeNull();
494
+ });
495
+
172
496
  test("getSettings transforms rows to keyed object", async () => {
173
497
  const mockClient = createMockClient({
174
498
  settingsRows: [
@@ -1290,6 +1614,90 @@ describe("checkup-api", () => {
1290
1614
  }
1291
1615
  expect(attempts).toBe(2); // Should retry on ECONNRESET
1292
1616
  });
1617
+
1618
+ // Transport selection — pick http/https by URL protocol, but refuse HTTP
1619
+ // to non-loopback hosts unless CHECKUP_ALLOW_HTTP=1 is set (prevents
1620
+ // typo-driven plaintext API-key leaks like http://api.postgres.ai/...).
1621
+ describe("transport selection", () => {
1622
+ test("https URL does not trip the guard (network error expected)", async () => {
1623
+ let caught: Error | null = null;
1624
+ try {
1625
+ await api.createCheckupReport({
1626
+ apiKey: "dummy",
1627
+ apiBaseUrl: "https://127.0.0.1:1/api", // port 1 — connect refused
1628
+ project: "p",
1629
+ });
1630
+ } catch (e) {
1631
+ caught = e as Error;
1632
+ }
1633
+ expect(caught).not.toBeNull();
1634
+ expect(caught!.message).not.toMatch(/Refusing to send API key/);
1635
+ });
1636
+
1637
+ test("http on loopback does not trip the guard (network error expected)", async () => {
1638
+ // IPv6 loopback is written as `[::1]` in URLs; WHATWG URL preserves
1639
+ // the brackets in .hostname, so the guard must strip them before
1640
+ // matching the allowlist.
1641
+ for (const host of ["localhost", "127.0.0.1", "[::1]"]) {
1642
+ let caught: Error | null = null;
1643
+ try {
1644
+ await api.createCheckupReport({
1645
+ apiKey: "dummy",
1646
+ apiBaseUrl: `http://${host}:1/api`, // port 1 — connect refused
1647
+ project: "p",
1648
+ });
1649
+ } catch (e) {
1650
+ caught = e as Error;
1651
+ }
1652
+ expect(caught).not.toBeNull();
1653
+ expect(caught!.message).not.toMatch(/Refusing to send API key/);
1654
+ }
1655
+ });
1656
+
1657
+ test("http to non-loopback host is refused by the guard", async () => {
1658
+ const saved = process.env.CHECKUP_ALLOW_HTTP;
1659
+ delete process.env.CHECKUP_ALLOW_HTTP;
1660
+ try {
1661
+ let caught: Error | null = null;
1662
+ try {
1663
+ await api.createCheckupReport({
1664
+ apiKey: "dummy",
1665
+ apiBaseUrl: "http://example.com/api",
1666
+ project: "p",
1667
+ });
1668
+ } catch (e) {
1669
+ caught = e as Error;
1670
+ }
1671
+ expect(caught).not.toBeNull();
1672
+ expect(caught!.message).toMatch(/Refusing to send API key over plaintext HTTP/);
1673
+ expect(caught!.message).toMatch(/example\.com/);
1674
+ } finally {
1675
+ if (saved !== undefined) process.env.CHECKUP_ALLOW_HTTP = saved;
1676
+ }
1677
+ });
1678
+
1679
+ test("CHECKUP_ALLOW_HTTP=1 bypasses the guard for non-loopback hosts", async () => {
1680
+ const saved = process.env.CHECKUP_ALLOW_HTTP;
1681
+ process.env.CHECKUP_ALLOW_HTTP = "1";
1682
+ try {
1683
+ let caught: Error | null = null;
1684
+ try {
1685
+ await api.createCheckupReport({
1686
+ apiKey: "dummy",
1687
+ apiBaseUrl: "http://127.0.0.2:1/api", // non-loopback-match hostname, connect refused port
1688
+ project: "p",
1689
+ });
1690
+ } catch (e) {
1691
+ caught = e as Error;
1692
+ }
1693
+ expect(caught).not.toBeNull();
1694
+ expect(caught!.message).not.toMatch(/Refusing to send API key/);
1695
+ } finally {
1696
+ if (saved === undefined) delete process.env.CHECKUP_ALLOW_HTTP;
1697
+ else process.env.CHECKUP_ALLOW_HTTP = saved;
1698
+ }
1699
+ });
1700
+ });
1293
1701
  });
1294
1702
 
1295
1703
  // Tests for checkup-summary module
@@ -1822,4 +2230,882 @@ describe("checkup-summary", () => {
1822
2230
  });
1823
2231
  });
1824
2232
 
2233
+ // Postgres version compatibility tests (PG13-PG18)
2234
+ describe("Postgres version compatibility (PG13-PG18)", () => {
2235
+ /**
2236
+ * Version-matrix fixture invariants:
2237
+ * - version_num uses Postgres major * 10000 + minor encoding.
2238
+ * - shared_buffers 16384 * 8kB is 128 MiB.
2239
+ * - days_since_reset is derived by production code from the stats_reset metric's
2240
+ * seconds_since_reset field, not from Date.now().
2241
+ * - postmaster uptime, uptime text, and postmaster startup timestamp are derived
2242
+ * from TEST_NOW_EPOCH and POSTMASTER_STARTUP_EPOCH and must stay in sync.
2243
+ */
2244
+ const createVersionMockData = (major: number, minor: number) => ({
2245
+ versionRows: [
2246
+ { name: "server_version", setting: `${major}.${minor}` },
2247
+ { name: "server_version_num", setting: `${major}${String(minor).padStart(4, "0")}` },
2248
+ ],
2249
+ settingsRows: [
2250
+ {
2251
+ tag_setting_name: "shared_buffers",
2252
+ tag_setting_value: "16384",
2253
+ tag_unit: "8kB",
2254
+ tag_category: "Resource Usage / Memory",
2255
+ tag_vartype: "integer",
2256
+ is_default: 0,
2257
+ setting_normalized: "134217728",
2258
+ unit_normalized: "bytes",
2259
+ },
2260
+ {
2261
+ tag_setting_name: "autovacuum_vacuum_scale_factor",
2262
+ tag_setting_value: "0.2",
2263
+ tag_unit: "",
2264
+ tag_category: "Autovacuum",
2265
+ tag_vartype: "real",
2266
+ is_default: 0,
2267
+ setting_normalized: null,
2268
+ unit_normalized: null,
2269
+ },
2270
+ {
2271
+ tag_setting_name: "log_min_duration_statement",
2272
+ tag_setting_value: "-1",
2273
+ tag_unit: "ms",
2274
+ tag_category: "Reporting and Logging / When to Log",
2275
+ tag_vartype: "integer",
2276
+ is_default: 1,
2277
+ setting_normalized: null,
2278
+ unit_normalized: null,
2279
+ },
2280
+ {
2281
+ tag_setting_name: "deadlock_timeout",
2282
+ tag_setting_value: "1000",
2283
+ tag_unit: "ms",
2284
+ tag_category: "Lock Management",
2285
+ tag_vartype: "integer",
2286
+ is_default: 1,
2287
+ setting_normalized: null,
2288
+ unit_normalized: null,
2289
+ },
2290
+ ],
2291
+ databaseSizesRows: [{ datname: "postgres", size_bytes: "1073741824" }],
2292
+ dbStatsRows: [{
2293
+ numbackends: 5,
2294
+ xact_commit: 100,
2295
+ xact_rollback: 1,
2296
+ blks_read: 100,
2297
+ blks_hit: 900,
2298
+ tup_returned: 500,
2299
+ tup_fetched: 400,
2300
+ tup_inserted: 50,
2301
+ tup_updated: 30,
2302
+ tup_deleted: 10,
2303
+ deadlocks: 0,
2304
+ temp_files: 0,
2305
+ temp_bytes: 0,
2306
+ postmaster_uptime_s: POSTMASTER_UPTIME_SECONDS,
2307
+ }],
2308
+ connectionStatesRows: [{ state: "active", count: 2 }, { state: "idle", count: 3 }],
2309
+ uptimeRows: [{ start_time: new Date(POSTMASTER_STARTUP_EPOCH * 1000), uptime: `${DAYS_SINCE_RESET} days` }],
2310
+ invalidIndexesRows: [],
2311
+ unusedIndexesRows: [],
2312
+ redundantIndexesRows: [],
2313
+ });
2314
+
2315
+ const pgVersions = SUPPORTED_PG_VERSIONS;
2316
+
2317
+ // A003 surfaces the full pg_settings projection. A007 uses value, not setting,
2318
+ // and intentionally omits context/vartype because altered settings expose a smaller shape.
2319
+ const expectedSharedBuffersSetting = {
2320
+ setting: "16384",
2321
+ unit: "8kB",
2322
+ category: "Resource Usage / Memory",
2323
+ context: "",
2324
+ vartype: "integer",
2325
+ pretty_value: "128.00 MiB",
2326
+ };
2327
+
2328
+ const expectedSharedBuffersAlteredSetting = {
2329
+ value: "16384",
2330
+ unit: "8kB",
2331
+ category: "Resource Usage / Memory",
2332
+ pretty_value: "128.00 MiB",
2333
+ };
2334
+
2335
+ const expectedAutovacuumSetting = {
2336
+ setting: "0.2",
2337
+ unit: "",
2338
+ category: "Autovacuum",
2339
+ context: "",
2340
+ vartype: "real",
2341
+ pretty_value: "0.2",
2342
+ };
2343
+
2344
+ const expectedAutovacuumAlteredSetting = {
2345
+ value: "0.2",
2346
+ unit: "",
2347
+ category: "Autovacuum",
2348
+ pretty_value: "0.2",
2349
+ };
2350
+
2351
+ const expectedLogMinDurationSetting = {
2352
+ setting: "-1",
2353
+ unit: "ms",
2354
+ category: "Reporting and Logging / When to Log",
2355
+ context: "",
2356
+ vartype: "integer",
2357
+ pretty_value: "-1",
2358
+ };
2359
+
2360
+ const expectedDeadlockTimeoutSetting = {
2361
+ setting: "1000",
2362
+ unit: "ms",
2363
+ category: "Lock Management",
2364
+ context: "",
2365
+ vartype: "integer",
2366
+ pretty_value: "1000",
2367
+ };
2368
+
2369
+ const expectedDatabaseSizeBytes = 1073741824;
2370
+
2371
+ describe("getPostgresVersion extracts correct version for each PG version", () => {
2372
+ for (const { major, minor, versionNum } of pgVersions) {
2373
+ test(`PG ${major}.${minor}`, async () => {
2374
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2375
+ const version = await checkup.getPostgresVersion(mockClient as any);
2376
+
2377
+ expect(version.version).toBe(`${major}.${minor}`);
2378
+ expect(version.server_version_num).toBe(versionNum);
2379
+ expect(version.server_major_ver).toBe(String(major));
2380
+ expect(version.server_minor_ver).toBe(String(minor));
2381
+ });
2382
+ }
2383
+ });
2384
+
2385
+ describe("generateA002 (major version) works for each PG version", () => {
2386
+ for (const { major, minor } of pgVersions) {
2387
+ test(`PG ${major}.${minor}`, async () => {
2388
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2389
+ const report = await checkup.generateA002(mockClient as any, "test-node");
1825
2390
 
2391
+ expect(report.checkId).toBe("A002");
2392
+ expect(report.checkTitle).toBe("Postgres major version");
2393
+ expect(report.results["test-node"].data.version.version).toBe(`${major}.${minor}`);
2394
+ expect(report.results["test-node"].data.version.server_major_ver).toBe(String(major));
2395
+ expect(report.results["test-node"].data.version.server_minor_ver).toBe(String(minor));
2396
+ });
2397
+ }
2398
+ });
2399
+
2400
+ describe("generateA013 (minor version) works for each PG version", () => {
2401
+ for (const { major, minor } of pgVersions) {
2402
+ test(`PG ${major}.${minor}`, async () => {
2403
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2404
+ const report = await checkup.generateA013(mockClient as any, "test-node");
2405
+
2406
+ expect(report.checkId).toBe("A013");
2407
+ expect(report.checkTitle).toBe("Postgres minor version");
2408
+ expect(report.results["test-node"].data.version.server_minor_ver).toBe(String(minor));
2409
+ expect(report.results["test-node"].data.version.version).toBe(`${major}.${minor}`);
2410
+ });
2411
+ }
2412
+ });
2413
+
2414
+ describe("generateA003 (settings) works for each PG version", () => {
2415
+ for (const { major, minor } of pgVersions) {
2416
+ test(`PG ${major}.${minor}`, async () => {
2417
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2418
+ const report = await checkup.generateA003(mockClient as any, "test-node");
2419
+
2420
+ expect(report.checkId).toBe("A003");
2421
+ expect(report.checkTitle).toBe("Postgres settings");
2422
+ expect(report.results["test-node"].data.shared_buffers).toEqual(expectedSharedBuffersSetting);
2423
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2424
+ expect(report.results["test-node"].postgres_version?.server_major_ver).toBe(String(major));
2425
+ });
2426
+ }
2427
+ });
2428
+
2429
+ describe("generateA007 (altered settings) works for each PG version", () => {
2430
+ for (const { major, minor } of pgVersions) {
2431
+ test(`PG ${major}.${minor}`, async () => {
2432
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2433
+ const report = await checkup.generateA007(mockClient as any, "test-node");
2434
+
2435
+ expect(report.checkId).toBe("A007");
2436
+ expect(report.checkTitle).toBe("Altered settings");
2437
+ expect(report.results["test-node"].data.shared_buffers).toEqual(expectedSharedBuffersAlteredSetting);
2438
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2439
+ });
2440
+ }
2441
+ });
2442
+
2443
+ describe("generateA004 (cluster info) works for each PG version", () => {
2444
+ for (const { major, minor } of pgVersions) {
2445
+ test(`PG ${major}.${minor}`, async () => {
2446
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2447
+ const report = await checkup.generateA004(mockClient as any, "test-node");
2448
+
2449
+ expect(report.checkId).toBe("A004");
2450
+ expect(report.checkTitle).toBe("Cluster information");
2451
+ const data = report.results["test-node"].data;
2452
+ expect(data.general_info.total_connections.value).toBe("5");
2453
+ expect(data.general_info.cache_hit_ratio.value).toBe("90.00");
2454
+ expect(data.general_info.connections_active.value).toBe("2");
2455
+ expect(data.database_sizes.postgres).toBe(expectedDatabaseSizeBytes);
2456
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2457
+ });
2458
+ }
2459
+ });
2460
+
2461
+ describe("generateH001 (invalid indexes) works for each PG version", () => {
2462
+ for (const { major, minor } of pgVersions) {
2463
+ test(`PG ${major}.${minor}`, async () => {
2464
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2465
+ const report = await checkup.generateH001(mockClient as any, "test-node");
2466
+
2467
+ expect(report.checkId).toBe("H001");
2468
+ expect(report.checkTitle).toBe("Invalid indexes");
2469
+ expect(report.results["test-node"].data.testdb).toEqual({
2470
+ invalid_indexes: [],
2471
+ total_count: 0,
2472
+ total_size_bytes: 0,
2473
+ total_size_pretty: "0 B",
2474
+ database_size_bytes: expectedDatabaseSizeBytes,
2475
+ database_size_pretty: "1.00 GiB",
2476
+ });
2477
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2478
+ });
2479
+ }
2480
+ });
2481
+
2482
+ describe("generateH002 (unused indexes) works for each PG version", () => {
2483
+ for (const { major, minor } of pgVersions) {
2484
+ test(`PG ${major}.${minor}`, async () => {
2485
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2486
+ const report = await checkup.generateH002(mockClient as any, "test-node");
2487
+
2488
+ expect(report.checkId).toBe("H002");
2489
+ expect(report.checkTitle).toBe("Unused indexes");
2490
+ expect(report.results["test-node"].data.testdb).toEqual({
2491
+ unused_indexes: [],
2492
+ total_count: 0,
2493
+ total_size_bytes: 0,
2494
+ total_size_pretty: "0 B",
2495
+ database_size_bytes: expectedDatabaseSizeBytes,
2496
+ database_size_pretty: "1.00 GiB",
2497
+ stats_reset: {
2498
+ stats_reset_epoch: STATS_RESET_EPOCH,
2499
+ stats_reset_time: STATS_RESET_TIME,
2500
+ days_since_reset: DAYS_SINCE_RESET,
2501
+ postmaster_startup_epoch: STATS_RESET_EPOCH,
2502
+ postmaster_startup_time: POSTMASTER_STARTUP_TIME,
2503
+ },
2504
+ });
2505
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2506
+ });
2507
+ }
2508
+ });
2509
+
2510
+ describe("generateH004 (redundant indexes) works for each PG version", () => {
2511
+ for (const { major, minor } of pgVersions) {
2512
+ test(`PG ${major}.${minor}`, async () => {
2513
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2514
+ const report = await checkup.generateH004(mockClient as any, "test-node");
2515
+
2516
+ expect(report.checkId).toBe("H004");
2517
+ expect(report.checkTitle).toBe("Redundant indexes");
2518
+ expect(report.results["test-node"].data.testdb).toEqual({
2519
+ redundant_indexes: [],
2520
+ total_count: 0,
2521
+ total_size_bytes: 0,
2522
+ total_size_pretty: "0 B",
2523
+ database_size_bytes: expectedDatabaseSizeBytes,
2524
+ database_size_pretty: "1.00 GiB",
2525
+ });
2526
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2527
+ });
2528
+ }
2529
+ });
2530
+
2531
+ describe("index reports surface non-empty rows for each PG version", () => {
2532
+ for (const { major, minor } of pgVersions) {
2533
+ test(`PG ${major}.${minor}`, async () => {
2534
+ const mockClient = createMockClient({
2535
+ ...createVersionMockData(major, minor),
2536
+ invalidIndexesRows: [
2537
+ {
2538
+ schema_name: "public",
2539
+ table_name: "orders",
2540
+ index_name: "orders_status_idx_invalid",
2541
+ relation_name: "orders",
2542
+ index_size_bytes: "2097152",
2543
+ index_definition: "CREATE INDEX orders_status_idx_invalid ON public.orders USING btree (status)",
2544
+ supports_fk: false,
2545
+ is_pk: false,
2546
+ is_unique: false,
2547
+ constraint_name: null,
2548
+ table_row_estimate: "50000",
2549
+ has_valid_duplicate: true,
2550
+ valid_index_name: "orders_status_idx",
2551
+ valid_index_definition: "CREATE INDEX orders_status_idx ON public.orders USING btree (status)",
2552
+ },
2553
+ ],
2554
+ unusedIndexesRows: [
2555
+ {
2556
+ schema_name: "public",
2557
+ table_name: "logs",
2558
+ index_name: "logs_created_idx",
2559
+ index_definition: "CREATE INDEX logs_created_idx ON public.logs USING btree (created_at)",
2560
+ reason: "Never Used Indexes",
2561
+ index_size_bytes: "4194304",
2562
+ idx_scan: "0",
2563
+ idx_is_btree: true,
2564
+ supports_fk: false,
2565
+ },
2566
+ ],
2567
+ redundantIndexesRows: [
2568
+ {
2569
+ schema_name: "public",
2570
+ table_name: "orders",
2571
+ index_name: "orders_user_id_idx",
2572
+ relation_name: "orders",
2573
+ access_method: "btree",
2574
+ reason: "public.orders_user_id_created_idx",
2575
+ index_size_bytes: "2097152",
2576
+ table_size_bytes: "16777216",
2577
+ index_usage: "0",
2578
+ supports_fk: false,
2579
+ index_definition: "CREATE INDEX orders_user_id_idx ON public.orders USING btree (user_id)",
2580
+ redundant_to_json: JSON.stringify([
2581
+ {
2582
+ index_name: "public.orders_user_id_created_idx",
2583
+ index_definition: "CREATE INDEX orders_user_id_created_idx ON public.orders USING btree (user_id, created_at)",
2584
+ index_size_bytes: 1048576,
2585
+ },
2586
+ ]),
2587
+ },
2588
+ ],
2589
+ });
2590
+
2591
+ const invalidReport = await checkup.generateH001(mockClient as any, "test-node");
2592
+ expect(invalidReport.results["test-node"].data.testdb).toEqual({
2593
+ invalid_indexes: [
2594
+ {
2595
+ schema_name: "public",
2596
+ table_name: "orders",
2597
+ index_name: "orders_status_idx_invalid",
2598
+ relation_name: "orders",
2599
+ index_size_bytes: 2097152,
2600
+ index_size_pretty: "2.00 MiB",
2601
+ index_definition: "CREATE INDEX orders_status_idx_invalid ON public.orders USING btree (status)",
2602
+ supports_fk: false,
2603
+ is_pk: false,
2604
+ is_unique: false,
2605
+ constraint_name: null,
2606
+ table_row_estimate: 50000,
2607
+ has_valid_duplicate: true,
2608
+ valid_duplicate_name: "orders_status_idx",
2609
+ valid_duplicate_definition: "CREATE INDEX orders_status_idx ON public.orders USING btree (status)",
2610
+ },
2611
+ ],
2612
+ total_count: 1,
2613
+ total_size_bytes: 2097152,
2614
+ total_size_pretty: "2.00 MiB",
2615
+ database_size_bytes: expectedDatabaseSizeBytes,
2616
+ database_size_pretty: "1.00 GiB",
2617
+ });
2618
+
2619
+ const unusedReport = await checkup.generateH002(mockClient as any, "test-node");
2620
+ expect(unusedReport.results["test-node"].data.testdb).toEqual({
2621
+ unused_indexes: [
2622
+ {
2623
+ schema_name: "public",
2624
+ table_name: "logs",
2625
+ index_name: "logs_created_idx",
2626
+ index_definition: "CREATE INDEX logs_created_idx ON public.logs USING btree (created_at)",
2627
+ reason: "Never Used Indexes",
2628
+ idx_scan: 0,
2629
+ index_size_bytes: 4194304,
2630
+ idx_is_btree: true,
2631
+ supports_fk: false,
2632
+ index_size_pretty: "4.00 MiB",
2633
+ },
2634
+ ],
2635
+ total_count: 1,
2636
+ total_size_bytes: 4194304,
2637
+ total_size_pretty: "4.00 MiB",
2638
+ database_size_bytes: expectedDatabaseSizeBytes,
2639
+ database_size_pretty: "1.00 GiB",
2640
+ stats_reset: {
2641
+ stats_reset_epoch: STATS_RESET_EPOCH,
2642
+ stats_reset_time: STATS_RESET_TIME,
2643
+ days_since_reset: DAYS_SINCE_RESET,
2644
+ postmaster_startup_epoch: STATS_RESET_EPOCH,
2645
+ postmaster_startup_time: POSTMASTER_STARTUP_TIME,
2646
+ },
2647
+ });
2648
+
2649
+ const redundantReport = await checkup.generateH004(mockClient as any, "test-node");
2650
+ expect(redundantReport.results["test-node"].data.testdb).toEqual({
2651
+ redundant_indexes: [
2652
+ {
2653
+ schema_name: "public",
2654
+ table_name: "orders",
2655
+ index_name: "orders_user_id_idx",
2656
+ relation_name: "orders",
2657
+ access_method: "btree",
2658
+ reason: "public.orders_user_id_created_idx",
2659
+ index_size_bytes: 2097152,
2660
+ table_size_bytes: 16777216,
2661
+ index_usage: 0,
2662
+ supports_fk: false,
2663
+ index_definition: "CREATE INDEX orders_user_id_idx ON public.orders USING btree (user_id)",
2664
+ index_size_pretty: "2.00 MiB",
2665
+ table_size_pretty: "16.00 MiB",
2666
+ redundant_to: [
2667
+ {
2668
+ index_name: "public.orders_user_id_created_idx",
2669
+ index_definition: "CREATE INDEX orders_user_id_created_idx ON public.orders USING btree (user_id, created_at)",
2670
+ index_size_bytes: 1048576,
2671
+ index_size_pretty: "1.00 MiB",
2672
+ },
2673
+ ],
2674
+ },
2675
+ ],
2676
+ total_count: 1,
2677
+ total_size_bytes: 2097152,
2678
+ total_size_pretty: "2.00 MiB",
2679
+ database_size_bytes: expectedDatabaseSizeBytes,
2680
+ database_size_pretty: "1.00 GiB",
2681
+ });
2682
+ });
2683
+ }
2684
+ });
2685
+
2686
+ describe("generateD004 (pg_stat_statements) works for each PG version", () => {
2687
+ for (const { major, minor } of pgVersions) {
2688
+ test(`PG ${major}.${minor}`, async () => {
2689
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2690
+ const report = await checkup.REPORT_GENERATORS.D004(mockClient as any, "test-node");
2691
+
2692
+ expect(report.checkId).toBe("D004");
2693
+ expect(report.checkTitle).toBe("pg_stat_statements and pg_stat_kcache settings");
2694
+ const data = report.results["test-node"].data;
2695
+ expect(data).toEqual({
2696
+ settings: {},
2697
+ pg_stat_statements_status: {
2698
+ extension_available: false,
2699
+ metrics_count: 0,
2700
+ total_calls: 0,
2701
+ sample_queries: [],
2702
+ },
2703
+ pg_stat_kcache_status: {
2704
+ extension_available: false,
2705
+ metrics_count: 0,
2706
+ total_exec_time: 0,
2707
+ total_user_time: 0,
2708
+ total_system_time: 0,
2709
+ sample_queries: [],
2710
+ },
2711
+ });
2712
+ });
2713
+ }
2714
+
2715
+ test("surfaces populated extension metrics", async () => {
2716
+ const mockClient = createMockClient({
2717
+ ...createVersionMockData(16, 3),
2718
+ settingsRows: [
2719
+ {
2720
+ tag_setting_name: "pg_stat_statements.max",
2721
+ tag_setting_value: "5000",
2722
+ tag_unit: "",
2723
+ tag_category: "Custom",
2724
+ tag_vartype: "integer",
2725
+ is_default: 0,
2726
+ setting_normalized: null,
2727
+ unit_normalized: null,
2728
+ },
2729
+ {
2730
+ tag_setting_name: "pg_stat_kcache.linux_hz",
2731
+ tag_setting_value: "100",
2732
+ tag_unit: "",
2733
+ tag_category: "Custom",
2734
+ tag_vartype: "integer",
2735
+ is_default: 1,
2736
+ setting_normalized: null,
2737
+ unit_normalized: null,
2738
+ },
2739
+ ],
2740
+ pgStatStatementsExtensionRows: [{ exists: 1 }],
2741
+ pgStatStatementsStatsRows: [{ cnt: "2", total_calls: "42" }],
2742
+ pgStatStatementsSampleRows: [
2743
+ { queryid: "101", user: "app", database: "testdb", calls: "40" },
2744
+ { queryid: "202", user: "worker", database: "testdb", calls: "2" },
2745
+ ],
2746
+ pgStatKcacheExtensionRows: [{ exists: 1 }],
2747
+ pgStatKcacheStatsRows: [{ cnt: "1", total_exec_time: "12.5", total_user_time: "8.5", total_system_time: "4" }],
2748
+ pgStatKcacheSampleRows: [{ queryid: "101", user: "app", exec_total_time: "12.5" }],
2749
+ });
2750
+
2751
+ const report = await checkup.REPORT_GENERATORS.D004(mockClient as any, "test-node");
2752
+ expect(report.results["test-node"].data).toEqual({
2753
+ settings: {
2754
+ "pg_stat_statements.max": {
2755
+ setting: "5000",
2756
+ unit: "",
2757
+ category: "Custom",
2758
+ context: "",
2759
+ vartype: "integer",
2760
+ pretty_value: "5000",
2761
+ },
2762
+ "pg_stat_kcache.linux_hz": {
2763
+ setting: "100",
2764
+ unit: "",
2765
+ category: "Custom",
2766
+ context: "",
2767
+ vartype: "integer",
2768
+ pretty_value: "100",
2769
+ },
2770
+ },
2771
+ pg_stat_statements_status: {
2772
+ extension_available: true,
2773
+ metrics_count: 2,
2774
+ total_calls: 42,
2775
+ sample_queries: [
2776
+ { queryid: "101", user: "app", database: "testdb", calls: 40 },
2777
+ { queryid: "202", user: "worker", database: "testdb", calls: 2 },
2778
+ ],
2779
+ },
2780
+ pg_stat_kcache_status: {
2781
+ extension_available: true,
2782
+ metrics_count: 1,
2783
+ total_exec_time: 12.5,
2784
+ total_user_time: 8.5,
2785
+ total_system_time: 4,
2786
+ sample_queries: [{ queryid: "101", user: "app", exec_total_time: 12.5 }],
2787
+ },
2788
+ });
2789
+ });
2790
+ });
2791
+
2792
+ describe("generateF001 (autovacuum settings) works for each PG version", () => {
2793
+ for (const { major, minor } of pgVersions) {
2794
+ test(`PG ${major}.${minor}`, async () => {
2795
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2796
+ const report = await checkup.REPORT_GENERATORS.F001(mockClient as any, "test-node");
2797
+
2798
+ expect(report.checkId).toBe("F001");
2799
+ expect(report.checkTitle).toBe("Autovacuum: current settings");
2800
+ expect(report.results["test-node"].data).toEqual({
2801
+ autovacuum_vacuum_scale_factor: expectedAutovacuumSetting,
2802
+ });
2803
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2804
+ });
2805
+ }
2806
+ });
2807
+
2808
+ describe("generateG001 (memory settings) works for each PG version", () => {
2809
+ for (const { major, minor } of pgVersions) {
2810
+ test(`PG ${major}.${minor}`, async () => {
2811
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2812
+ const report = await checkup.REPORT_GENERATORS.G001(mockClient as any, "test-node");
2813
+
2814
+ expect(report.checkId).toBe("G001");
2815
+ expect(report.checkTitle).toBe("Memory-related settings");
2816
+ const data = report.results["test-node"].data;
2817
+ expect(data).toEqual({
2818
+ settings: {
2819
+ shared_buffers: expectedSharedBuffersSetting,
2820
+ },
2821
+ analysis: {
2822
+ estimated_total_memory_usage: {
2823
+ shared_buffers_bytes: 134217728,
2824
+ shared_buffers_pretty: "128.00 MiB",
2825
+ wal_buffers_bytes: 4194304,
2826
+ wal_buffers_pretty: "4.00 MiB",
2827
+ shared_memory_total_bytes: 138412032,
2828
+ shared_memory_total_pretty: "132.00 MiB",
2829
+ work_mem_per_connection_bytes: 4194304,
2830
+ work_mem_per_connection_pretty: "4.00 MiB",
2831
+ max_work_mem_usage_bytes: 419430400,
2832
+ max_work_mem_usage_pretty: "400.00 MiB",
2833
+ maintenance_work_mem_bytes: 67108864,
2834
+ maintenance_work_mem_pretty: "64.00 MiB",
2835
+ effective_cache_size_bytes: 4294967296,
2836
+ effective_cache_size_pretty: "4.00 GiB",
2837
+ },
2838
+ },
2839
+ });
2840
+ expect(report.results["test-node"].postgres_version?.version).toBe(`${major}.${minor}`);
2841
+ });
2842
+ }
2843
+ });
2844
+
2845
+ describe("generateAllReports works for each PG version", () => {
2846
+ for (const { major, minor, versionNum } of pgVersions) {
2847
+ test(`PG ${major}.${minor}`, async () => {
2848
+ const mockClient = createMockClient(createVersionMockData(major, minor));
2849
+ const reports = await checkup.generateAllReports(mockClient as any, "test-node");
2850
+ const expectedVersion = {
2851
+ version: `${major}.${minor}`,
2852
+ server_version_num: versionNum,
2853
+ server_major_ver: String(major),
2854
+ server_minor_ver: String(minor),
2855
+ };
2856
+
2857
+ // Verify all express-mode checks are generated from the same source of truth.
2858
+ const expectedChecks = Object.keys(checkup.REPORT_GENERATORS).sort();
2859
+ expect(Object.keys(reports).sort()).toEqual(expectedChecks);
2860
+ for (const checkId of expectedChecks) {
2861
+ expect(reports[checkId]?.checkId).toBe(checkId);
2862
+ expect(reports[checkId].results["test-node"]).toBeDefined();
2863
+ }
2864
+
2865
+ // Verify every generated report has a concrete payload shape.
2866
+ expect(reports.A002.results["test-node"].data).toEqual({ version: expectedVersion });
2867
+ expect(reports.A003.results["test-node"].data).toEqual({
2868
+ shared_buffers: expectedSharedBuffersSetting,
2869
+ autovacuum_vacuum_scale_factor: expectedAutovacuumSetting,
2870
+ log_min_duration_statement: expectedLogMinDurationSetting,
2871
+ deadlock_timeout: expectedDeadlockTimeoutSetting,
2872
+ });
2873
+ expect(reports.A004.results["test-node"].data.database_sizes).toEqual({ postgres: expectedDatabaseSizeBytes });
2874
+ expect(reports.A004.results["test-node"].data.general_info.total_connections.value).toBe("5");
2875
+ expect(reports.A004.results["test-node"].data.general_info.uptime.value).toBe("30 days 0:00:00");
2876
+ expect(reports.A007.results["test-node"].data).toEqual({
2877
+ shared_buffers: expectedSharedBuffersAlteredSetting,
2878
+ autovacuum_vacuum_scale_factor: expectedAutovacuumAlteredSetting,
2879
+ });
2880
+ expect(reports.A013.results["test-node"].data).toEqual({ version: expectedVersion });
2881
+ expect(reports.D001.results["test-node"].data).toEqual({
2882
+ log_min_duration_statement: expectedLogMinDurationSetting,
2883
+ });
2884
+ expect(reports.D004.results["test-node"].data).toEqual({
2885
+ settings: {},
2886
+ pg_stat_statements_status: {
2887
+ extension_available: false,
2888
+ metrics_count: 0,
2889
+ total_calls: 0,
2890
+ sample_queries: [],
2891
+ },
2892
+ pg_stat_kcache_status: {
2893
+ extension_available: false,
2894
+ metrics_count: 0,
2895
+ total_exec_time: 0,
2896
+ total_user_time: 0,
2897
+ total_system_time: 0,
2898
+ sample_queries: [],
2899
+ },
2900
+ });
2901
+ expect(reports.F001.results["test-node"].data).toEqual({
2902
+ autovacuum_vacuum_scale_factor: expectedAutovacuumSetting,
2903
+ });
2904
+ expect(reports.F004.results["test-node"].data.testdb).toEqual({
2905
+ bloated_tables: [],
2906
+ total_count: 0,
2907
+ total_bloat_size_bytes: 0,
2908
+ total_bloat_size_pretty: "0 B",
2909
+ database_size_bytes: expectedDatabaseSizeBytes,
2910
+ database_size_pretty: "1.00 GiB",
2911
+ });
2912
+ expect(reports.F005.results["test-node"].data.testdb).toEqual({
2913
+ bloated_indexes: [],
2914
+ total_count: 0,
2915
+ total_bloat_size_bytes: 0,
2916
+ total_bloat_size_pretty: "0 B",
2917
+ database_size_bytes: expectedDatabaseSizeBytes,
2918
+ database_size_pretty: "1.00 GiB",
2919
+ });
2920
+ expect(reports.G001.results["test-node"].data.settings).toEqual({
2921
+ shared_buffers: expectedSharedBuffersSetting,
2922
+ });
2923
+ expect(reports.G001.results["test-node"].data.analysis.estimated_total_memory_usage.shared_buffers_bytes).toBe(134217728);
2924
+ expect(reports.G003.results["test-node"].data).toEqual({
2925
+ settings: {
2926
+ deadlock_timeout: expectedDeadlockTimeoutSetting,
2927
+ },
2928
+ deadlock_stats: { deadlocks: 0, conflicts: 0, stats_reset: null },
2929
+ });
2930
+ expect(reports.H001.results["test-node"].data.testdb).toEqual({
2931
+ invalid_indexes: [],
2932
+ total_count: 0,
2933
+ total_size_bytes: 0,
2934
+ total_size_pretty: "0 B",
2935
+ database_size_bytes: expectedDatabaseSizeBytes,
2936
+ database_size_pretty: "1.00 GiB",
2937
+ });
2938
+ expect(reports.H002.results["test-node"].data.testdb).toEqual({
2939
+ unused_indexes: [],
2940
+ total_count: 0,
2941
+ total_size_bytes: 0,
2942
+ total_size_pretty: "0 B",
2943
+ database_size_bytes: expectedDatabaseSizeBytes,
2944
+ database_size_pretty: "1.00 GiB",
2945
+ stats_reset: {
2946
+ stats_reset_epoch: STATS_RESET_EPOCH,
2947
+ stats_reset_time: STATS_RESET_TIME,
2948
+ days_since_reset: DAYS_SINCE_RESET,
2949
+ postmaster_startup_epoch: STATS_RESET_EPOCH,
2950
+ postmaster_startup_time: POSTMASTER_STARTUP_TIME,
2951
+ },
2952
+ });
2953
+ expect(reports.H004.results["test-node"].data.testdb).toEqual({
2954
+ redundant_indexes: [],
2955
+ total_count: 0,
2956
+ total_size_bytes: 0,
2957
+ total_size_pretty: "0 B",
2958
+ database_size_bytes: expectedDatabaseSizeBytes,
2959
+ database_size_pretty: "1.00 GiB",
2960
+ });
2961
+
2962
+ // Verify postgres_version is set in reports that include it.
2963
+ expect(reports.A003.results["test-node"].postgres_version).toEqual(expectedVersion);
2964
+ expect(reports.A004.results["test-node"].postgres_version).toEqual(expectedVersion);
2965
+ });
2966
+ }
2967
+ });
2968
+ });
2969
+
2970
+ // Tests for version-aware SQL query selection
2971
+ describe("Version-aware SQL query selection (PG13-PG18)", () => {
2972
+ const pgVersions = SUPPORTED_PG_MAJOR_VERSIONS;
2973
+
2974
+ // All metrics registered in metrics.yml.
2975
+ const allMetrics = metricsLoader.listMetricNames();
2976
+ expect(allMetrics.length).toBeGreaterThan(0);
2977
+ const sqlStartPattern = /^\s*(with|select)\b/i;
2978
+
2979
+ describe("All metrics from metrics.yml return valid SQL for each PG version", () => {
2980
+ for (const pgVersion of pgVersions) {
2981
+ describe(`PG${pgVersion}`, () => {
2982
+ for (const metric of allMetrics) {
2983
+ test(`${metric}`, () => {
2984
+ const sql = metricsLoader.getMetricSql(metric, pgVersion);
2985
+ expect(typeof sql).toBe("string");
2986
+ expect(sql.length).toBeGreaterThan(0);
2987
+
2988
+ const trimmedSql = sql.trim();
2989
+ if (trimmedSql.startsWith(";")) {
2990
+ expect(metric).toBe("pg_stat_io");
2991
+ expect(pgVersion).toBeLessThan(16);
2992
+ expect(trimmedSql).toMatch(/pg_stat_io only available/i);
2993
+ return;
2994
+ }
2995
+
2996
+ expect(trimmedSql).toMatch(sqlStartPattern);
2997
+ expect(trimmedSql.toLowerCase()).toMatch(/\bfrom\b/);
2998
+ expect(trimmedSql).not.toMatch(/\{\{.*\}\}/);
2999
+ expect(trimmedSql).not.toMatch(/\$\{.*\}/);
3000
+ });
3001
+ }
3002
+ });
3003
+ }
3004
+ });
3005
+
3006
+ describe("getMetricSql rejects invalid version inputs", () => {
3007
+ const invalidVersions = [0, -1, Number.NaN];
3008
+
3009
+ for (const pgVersion of invalidVersions) {
3010
+ test(`settings rejects Postgres ${String(pgVersion)}`, () => {
3011
+ expect(() => metricsLoader.getMetricSql("settings", pgVersion)).toThrow(/No compatible SQL version/);
3012
+ });
3013
+ }
3014
+
3015
+ test("rejects versions older than the oldest keyed SQL", () => {
3016
+ for (const metric of allMetrics) {
3017
+ expect(() => metricsLoader.getMetricSql(metric, 10)).toThrow(/No compatible SQL version/);
3018
+ }
3019
+ });
3020
+
3021
+ test("rejects unknown metric names", () => {
3022
+ expect(() => metricsLoader.getMetricSql("not_a_metric", 16)).toThrow(/Metric "not_a_metric" not found/);
3023
+ });
3024
+ });
3025
+
3026
+ describe("getMetricSql selects the nearest compatible version", () => {
3027
+ test("uses exact and previous keyed SQL for mid-range versions", () => {
3028
+ const definition = metricsLoader.getMetricDefinition("db_stats")!;
3029
+ expect(typeof definition.sqls["11"]).toBe("string");
3030
+ expect(typeof definition.sqls["12"]).toBe("string");
3031
+ expect(typeof definition.sqls["14"]).toBe("string");
3032
+ expect(typeof definition.sqls["15"]).toBe("string");
3033
+
3034
+ expect(metricsLoader.getMetricSql("db_stats", 12)).toBe(definition.sqls["12"]);
3035
+ expect(metricsLoader.getMetricSql("db_stats", 13)).toBe(definition.sqls["12"]);
3036
+ expect(metricsLoader.getMetricSql("db_stats", 14)).toBe(definition.sqls["14"]);
3037
+ expect(metricsLoader.getMetricSql("db_stats", 19)).toBe(definition.sqls["15"]);
3038
+ expect(definition.sqls["12"]).not.toBe(definition.sqls["11"]);
3039
+ expect(definition.sqls["15"]).not.toBe(definition.sqls["14"]);
3040
+ });
3041
+
3042
+ test("uses a metric's oldest SQL when no newer key exists below the requested version", () => {
3043
+ const definition = metricsLoader.getMetricDefinition("settings")!;
3044
+ expect(typeof definition.sqls["11"]).toBe("string");
3045
+ expect(metricsLoader.getMetricSql("settings", 12)).toBe(definition.sqls["11"]);
3046
+ expect(metricsLoader.getMetricSql("settings", 19)).toBe(definition.sqls["11"]);
3047
+ });
3048
+ });
3049
+
3050
+ describe("getMetricDefinition returns metadata for all metrics", () => {
3051
+ for (const metric of allMetrics) {
3052
+ test(`${metric} has definition with versioned SQL`, () => {
3053
+ const definition = metricsLoader.getMetricDefinition(metric);
3054
+ expect(definition).toBeTruthy();
3055
+ expect(definition?.sqls).toBeTruthy();
3056
+ expect(typeof definition?.sqls).toBe("object");
3057
+ const entries = Object.entries(definition!.sqls);
3058
+ expect(entries.length).toBeGreaterThan(0);
3059
+ for (const [versionKey, sql] of entries) {
3060
+ const version = Number(versionKey);
3061
+ const trimmedSql = sql.trim();
3062
+ expect(Number.isInteger(version)).toBe(true);
3063
+ expect(version).toBeGreaterThan(0);
3064
+ expect(typeof sql).toBe("string");
3065
+ expect(sql.length).toBeGreaterThan(0);
3066
+ if (trimmedSql.startsWith(";")) {
3067
+ expect(metric).toBe("pg_stat_io");
3068
+ expect(version).toBe(11);
3069
+ expect(trimmedSql).toMatch(/pg_stat_io only available/i);
3070
+ continue;
3071
+ }
3072
+ expect(trimmedSql).toMatch(sqlStartPattern);
3073
+ }
3074
+ });
3075
+ }
3076
+ });
3077
+
3078
+ test("listMetricNames returns all expected core metrics", () => {
3079
+ const names = metricsLoader.listMetricNames();
3080
+ expect(Array.isArray(names)).toBe(true);
3081
+ expect(new Set(names).size).toBe(names.length);
3082
+ const coreMetrics = [
3083
+ "settings",
3084
+ "db_stats",
3085
+ "db_size",
3086
+ "stats_reset",
3087
+ "pg_invalid_indexes",
3088
+ "unused_indexes",
3089
+ "redundant_indexes",
3090
+ ];
3091
+ expect(names.length).toBeGreaterThanOrEqual(coreMetrics.length);
3092
+ for (const metric of coreMetrics) {
3093
+ expect(names).toContain(metric);
3094
+ }
3095
+ });
3096
+
3097
+ test("METRIC_NAMES maps every express report metric", () => {
3098
+ expect(metricsLoader.METRIC_NAMES).toEqual({
3099
+ H001: "pg_invalid_indexes",
3100
+ H002: "unused_indexes",
3101
+ H004: "redundant_indexes",
3102
+ F004: "pg_table_bloat",
3103
+ F005: "pg_btree_bloat",
3104
+ settings: "settings",
3105
+ dbStats: "db_stats",
3106
+ dbSize: "db_size",
3107
+ statsReset: "stats_reset",
3108
+ I001: "pg_stat_io",
3109
+ });
3110
+ });
3111
+ });