postgresai 0.14.0-dev.53 → 0.14.0-dev.55
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +65 -38
- package/bin/postgres-ai.ts +461 -12
- package/bun.lock +3 -1
- package/bunfig.toml +19 -0
- package/dist/bin/postgres-ai.js +2208 -224
- package/lib/auth-server.ts +52 -5
- package/lib/checkup-api.ts +386 -0
- package/lib/checkup.ts +1327 -0
- package/lib/config.ts +3 -0
- package/lib/issues.ts +5 -41
- package/lib/metrics-embedded.ts +79 -0
- package/lib/metrics-loader.ts +127 -0
- package/lib/util.ts +61 -0
- package/package.json +14 -6
- package/packages/postgres-ai/README.md +26 -0
- package/packages/postgres-ai/bin/postgres-ai.js +27 -0
- package/packages/postgres-ai/package.json +27 -0
- package/scripts/embed-metrics.ts +154 -0
- package/test/auth.test.ts +258 -0
- package/test/checkup.integration.test.ts +273 -0
- package/test/checkup.test.ts +890 -0
- package/test/init.integration.test.ts +36 -33
- package/test/schema-validation.test.ts +81 -0
- package/test/test-utils.ts +122 -0
- package/dist/sql/01.role.sql +0 -16
- package/dist/sql/02.permissions.sql +0 -37
- package/dist/sql/03.optional_rds.sql +0 -6
- package/dist/sql/04.optional_self_managed.sql +0 -8
- package/dist/sql/05.helpers.sql +0 -415
|
@@ -326,41 +326,44 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
|
|
|
326
326
|
}
|
|
327
327
|
});
|
|
328
328
|
|
|
329
|
-
test(
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
{
|
|
335
|
-
|
|
336
|
-
|
|
329
|
+
test(
|
|
330
|
+
"--verify returns 0 when ok and non-zero when missing",
|
|
331
|
+
async () => {
|
|
332
|
+
pg = await createTempPostgres();
|
|
333
|
+
|
|
334
|
+
try {
|
|
335
|
+
// Prepare: run init
|
|
336
|
+
{
|
|
337
|
+
const r = runCliInit([pg.adminUri, "--password", "monpw", "--skip-optional-permissions"]);
|
|
338
|
+
expect(r.status).toBe(0);
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
// Verify should pass
|
|
342
|
+
{
|
|
343
|
+
const r = runCliInit([pg.adminUri, "--verify", "--skip-optional-permissions"]);
|
|
344
|
+
expect(r.status).toBe(0);
|
|
345
|
+
expect(r.stdout).toMatch(/prepare-db verify: OK/i);
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
// Break a required privilege and ensure verify fails
|
|
349
|
+
{
|
|
350
|
+
const c = new Client({ connectionString: pg.adminUri });
|
|
351
|
+
await c.connect();
|
|
352
|
+
await c.query("revoke select on pg_catalog.pg_index from public");
|
|
353
|
+
await c.query("revoke select on pg_catalog.pg_index from postgres_ai_mon");
|
|
354
|
+
await c.end();
|
|
355
|
+
}
|
|
356
|
+
{
|
|
357
|
+
const r = runCliInit([pg.adminUri, "--verify", "--skip-optional-permissions"]);
|
|
358
|
+
expect(r.status).not.toBe(0);
|
|
359
|
+
expect(r.stderr).toMatch(/prepare-db verify failed/i);
|
|
360
|
+
expect(r.stderr).toMatch(/pg_catalog\.pg_index/i);
|
|
361
|
+
}
|
|
362
|
+
} finally {
|
|
363
|
+
await pg.cleanup();
|
|
337
364
|
}
|
|
338
|
-
|
|
339
|
-
// Verify should pass
|
|
340
|
-
{
|
|
341
|
-
const r = runCliInit([pg.adminUri, "--verify", "--skip-optional-permissions"]);
|
|
342
|
-
expect(r.status).toBe(0);
|
|
343
|
-
expect(r.stdout).toMatch(/prepare-db verify: OK/i);
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
// Break a required privilege and ensure verify fails
|
|
347
|
-
{
|
|
348
|
-
const c = new Client({ connectionString: pg.adminUri });
|
|
349
|
-
await c.connect();
|
|
350
|
-
await c.query("revoke select on pg_catalog.pg_index from public");
|
|
351
|
-
await c.query("revoke select on pg_catalog.pg_index from postgres_ai_mon");
|
|
352
|
-
await c.end();
|
|
353
|
-
}
|
|
354
|
-
{
|
|
355
|
-
const r = runCliInit([pg.adminUri, "--verify", "--skip-optional-permissions"]);
|
|
356
|
-
expect(r.status).not.toBe(0);
|
|
357
|
-
expect(r.stderr).toMatch(/prepare-db verify failed/i);
|
|
358
|
-
expect(r.stderr).toMatch(/pg_catalog\.pg_index/i);
|
|
359
|
-
}
|
|
360
|
-
} finally {
|
|
361
|
-
await pg.cleanup();
|
|
362
365
|
}
|
|
363
|
-
|
|
366
|
+
);
|
|
364
367
|
|
|
365
368
|
test("--reset-password updates the monitoring role login password", async () => {
|
|
366
369
|
pg = await createTempPostgres();
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* JSON Schema validation tests for express checkup reports.
|
|
3
|
+
* Validates that generated reports match schemas in reporter/schemas/.
|
|
4
|
+
*/
|
|
5
|
+
import { describe, test, expect } from "bun:test";
|
|
6
|
+
import { resolve } from "path";
|
|
7
|
+
import { readFileSync } from "fs";
|
|
8
|
+
import Ajv2020 from "ajv/dist/2020";
|
|
9
|
+
|
|
10
|
+
import * as checkup from "../lib/checkup";
|
|
11
|
+
import { createMockClient } from "./test-utils";
|
|
12
|
+
|
|
13
|
+
const ajv = new Ajv2020({ allErrors: true, strict: false });
|
|
14
|
+
const schemasDir = resolve(import.meta.dir, "../../reporter/schemas");
|
|
15
|
+
|
|
16
|
+
function validateAgainstSchema(report: any, checkId: string): void {
|
|
17
|
+
const schemaPath = resolve(schemasDir, `${checkId}.schema.json`);
|
|
18
|
+
const schema = JSON.parse(readFileSync(schemaPath, "utf8"));
|
|
19
|
+
const validate = ajv.compile(schema);
|
|
20
|
+
const valid = validate(report);
|
|
21
|
+
if (!valid) {
|
|
22
|
+
const errors = validate.errors?.map(e => `${e.instancePath}: ${e.message}`).join(", ");
|
|
23
|
+
throw new Error(`${checkId} schema validation failed: ${errors}`);
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// Test data for index reports
|
|
28
|
+
const indexTestData = {
|
|
29
|
+
H001: {
|
|
30
|
+
emptyRows: { invalidIndexesRows: [] },
|
|
31
|
+
dataRows: {
|
|
32
|
+
invalidIndexesRows: [
|
|
33
|
+
{ schema_name: "public", table_name: "users", index_name: "users_email_idx", relation_name: "users", index_size_bytes: "1048576", supports_fk: false },
|
|
34
|
+
],
|
|
35
|
+
},
|
|
36
|
+
},
|
|
37
|
+
H002: {
|
|
38
|
+
emptyRows: { unusedIndexesRows: [] },
|
|
39
|
+
dataRows: {
|
|
40
|
+
unusedIndexesRows: [
|
|
41
|
+
{ schema_name: "public", table_name: "logs", index_name: "logs_created_idx", index_definition: "CREATE INDEX logs_created_idx ON public.logs USING btree (created_at)", reason: "Never Used Indexes", idx_scan: "0", index_size_bytes: "8388608", idx_is_btree: true, supports_fk: false },
|
|
42
|
+
],
|
|
43
|
+
},
|
|
44
|
+
},
|
|
45
|
+
H004: {
|
|
46
|
+
emptyRows: { redundantIndexesRows: [] },
|
|
47
|
+
dataRows: {
|
|
48
|
+
redundantIndexesRows: [
|
|
49
|
+
{ schema_name: "public", table_name: "orders", index_name: "orders_user_id_idx", relation_name: "orders", access_method: "btree", reason: "public.orders_user_id_created_idx", index_size_bytes: "2097152", table_size_bytes: "16777216", index_usage: "0", supports_fk: false, index_definition: "CREATE INDEX orders_user_id_idx ON public.orders USING btree (user_id)", redundant_to_json: JSON.stringify([{ index_name: "public.orders_user_id_created_idx", index_definition: "CREATE INDEX ...", index_size_bytes: 1048576 }]) },
|
|
50
|
+
],
|
|
51
|
+
},
|
|
52
|
+
},
|
|
53
|
+
};
|
|
54
|
+
|
|
55
|
+
describe("Schema validation", () => {
|
|
56
|
+
// Index health checks (H001, H002, H004) - test empty and with data
|
|
57
|
+
for (const [checkId, testData] of Object.entries(indexTestData)) {
|
|
58
|
+
const generator = checkup.REPORT_GENERATORS[checkId];
|
|
59
|
+
|
|
60
|
+
test(`${checkId} validates with empty data`, async () => {
|
|
61
|
+
const mockClient = createMockClient(testData.emptyRows);
|
|
62
|
+
const report = await generator(mockClient as any, "node-01");
|
|
63
|
+
validateAgainstSchema(report, checkId);
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
test(`${checkId} validates with sample data`, async () => {
|
|
67
|
+
const mockClient = createMockClient(testData.dataRows);
|
|
68
|
+
const report = await generator(mockClient as any, "node-01");
|
|
69
|
+
validateAgainstSchema(report, checkId);
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Settings reports (D004, F001, G001) - single test each
|
|
74
|
+
for (const checkId of ["D004", "F001", "G001"]) {
|
|
75
|
+
test(`${checkId} validates against schema`, async () => {
|
|
76
|
+
const mockClient = createMockClient();
|
|
77
|
+
const report = await checkup.REPORT_GENERATORS[checkId](mockClient as any, "node-01");
|
|
78
|
+
validateAgainstSchema(report, checkId);
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
});
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared test utilities for CLI tests.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export interface MockClientOptions {
|
|
6
|
+
/** Database name returned by current_database() queries (default: "testdb") */
|
|
7
|
+
databaseName?: string;
|
|
8
|
+
/** Version rows for pg_settings version query (default: PG 16.3) */
|
|
9
|
+
versionRows?: any[];
|
|
10
|
+
settingsRows?: any[];
|
|
11
|
+
databaseSizesRows?: any[];
|
|
12
|
+
dbStatsRows?: any[];
|
|
13
|
+
connectionStatesRows?: any[];
|
|
14
|
+
uptimeRows?: any[];
|
|
15
|
+
invalidIndexesRows?: any[];
|
|
16
|
+
unusedIndexesRows?: any[];
|
|
17
|
+
redundantIndexesRows?: any[];
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const DEFAULT_VERSION_ROWS = [
|
|
21
|
+
{ name: "server_version", setting: "16.3" },
|
|
22
|
+
{ name: "server_version_num", setting: "160003" },
|
|
23
|
+
];
|
|
24
|
+
|
|
25
|
+
const defaultSettingsRows = [
|
|
26
|
+
{ tag_setting_name: "shared_buffers", tag_setting_value: "128MB", tag_unit: "", tag_category: "Resource Usage / Memory", tag_vartype: "string", is_default: 1, setting_normalized: null, unit_normalized: null },
|
|
27
|
+
{ tag_setting_name: "work_mem", tag_setting_value: "4MB", tag_unit: "", tag_category: "Resource Usage / Memory", tag_vartype: "string", is_default: 1, setting_normalized: null, unit_normalized: null },
|
|
28
|
+
{ tag_setting_name: "autovacuum", tag_setting_value: "on", tag_unit: "", tag_category: "Autovacuum", tag_vartype: "bool", is_default: 1, setting_normalized: null, unit_normalized: null },
|
|
29
|
+
{ tag_setting_name: "pg_stat_statements.max", tag_setting_value: "5000", tag_unit: "", tag_category: "Custom", tag_vartype: "integer", is_default: 0, setting_normalized: null, unit_normalized: null },
|
|
30
|
+
];
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Create a mock PostgreSQL client for testing report generators.
|
|
34
|
+
* Routes SQL queries to appropriate mock data based on query patterns.
|
|
35
|
+
*/
|
|
36
|
+
export function createMockClient(options: MockClientOptions = {}) {
|
|
37
|
+
const {
|
|
38
|
+
databaseName = "testdb",
|
|
39
|
+
versionRows = DEFAULT_VERSION_ROWS,
|
|
40
|
+
settingsRows = defaultSettingsRows,
|
|
41
|
+
databaseSizesRows = [],
|
|
42
|
+
dbStatsRows = [],
|
|
43
|
+
connectionStatesRows = [],
|
|
44
|
+
uptimeRows = [],
|
|
45
|
+
invalidIndexesRows = [],
|
|
46
|
+
unusedIndexesRows = [],
|
|
47
|
+
redundantIndexesRows = [],
|
|
48
|
+
} = options;
|
|
49
|
+
|
|
50
|
+
return {
|
|
51
|
+
query: async (sql: string) => {
|
|
52
|
+
// Version query (simple inline - used by getPostgresVersion)
|
|
53
|
+
if (sql.includes("server_version") && sql.includes("server_version_num") && sql.includes("pg_settings") && !sql.includes("tag_setting_name")) {
|
|
54
|
+
return { rows: versionRows };
|
|
55
|
+
}
|
|
56
|
+
// Settings metric query (from metrics.yml - has tag_setting_name, tag_setting_value)
|
|
57
|
+
if (sql.includes("tag_setting_name") && sql.includes("tag_setting_value") && sql.includes("pg_settings")) {
|
|
58
|
+
return { rows: settingsRows };
|
|
59
|
+
}
|
|
60
|
+
// Database sizes (simple inline - lists all databases)
|
|
61
|
+
if (sql.includes("pg_database") && sql.includes("pg_database_size") && sql.includes("datistemplate")) {
|
|
62
|
+
return { rows: databaseSizesRows };
|
|
63
|
+
}
|
|
64
|
+
// db_size metric (current database size from metrics.yml)
|
|
65
|
+
if (sql.includes("pg_database_size(current_database())") && sql.includes("size_b")) {
|
|
66
|
+
return { rows: [{ tag_datname: databaseName, size_b: "1073741824" }] };
|
|
67
|
+
}
|
|
68
|
+
// db_stats metric (from metrics.yml)
|
|
69
|
+
if (sql.includes("pg_stat_database") && sql.includes("xact_commit") && sql.includes("pg_control_system")) {
|
|
70
|
+
return { rows: dbStatsRows };
|
|
71
|
+
}
|
|
72
|
+
// Stats reset metric (from metrics.yml)
|
|
73
|
+
if (sql.includes("stats_reset") && sql.includes("pg_stat_database") && sql.includes("seconds_since_reset")) {
|
|
74
|
+
return { rows: [{ tag_database_name: databaseName, stats_reset_epoch: "1704067200", seconds_since_reset: "2592000" }] };
|
|
75
|
+
}
|
|
76
|
+
// Postmaster startup time (simple inline - used by getStatsReset)
|
|
77
|
+
if (sql.includes("pg_postmaster_start_time") && sql.includes("postmaster_startup_epoch")) {
|
|
78
|
+
return { rows: [{ postmaster_startup_epoch: "1704067200", postmaster_startup_time: "2024-01-01 00:00:00+00" }] };
|
|
79
|
+
}
|
|
80
|
+
// Connection states (simple inline)
|
|
81
|
+
if (sql.includes("pg_stat_activity") && sql.includes("state") && sql.includes("group by")) {
|
|
82
|
+
return { rows: connectionStatesRows };
|
|
83
|
+
}
|
|
84
|
+
// Uptime info (simple inline)
|
|
85
|
+
if (sql.includes("pg_postmaster_start_time()") && sql.includes("uptime") && !sql.includes("postmaster_startup_epoch")) {
|
|
86
|
+
return { rows: uptimeRows };
|
|
87
|
+
}
|
|
88
|
+
// Invalid indexes (H001) - from metrics.yml
|
|
89
|
+
if (sql.includes("indisvalid = false") && sql.includes("fk_indexes")) {
|
|
90
|
+
return { rows: invalidIndexesRows };
|
|
91
|
+
}
|
|
92
|
+
// Unused indexes (H002) - from metrics.yml
|
|
93
|
+
if (sql.includes("Never Used Indexes") && sql.includes("idx_scan = 0")) {
|
|
94
|
+
return { rows: unusedIndexesRows };
|
|
95
|
+
}
|
|
96
|
+
// Redundant indexes (H004) - from metrics.yml
|
|
97
|
+
if (sql.includes("redundant_indexes_grouped") && sql.includes("columns like")) {
|
|
98
|
+
return { rows: redundantIndexesRows };
|
|
99
|
+
}
|
|
100
|
+
// D004: pg_stat_statements extension check
|
|
101
|
+
if (sql.includes("pg_extension") && sql.includes("pg_stat_statements")) {
|
|
102
|
+
return { rows: [] };
|
|
103
|
+
}
|
|
104
|
+
// D004: pg_stat_kcache extension check
|
|
105
|
+
if (sql.includes("pg_extension") && sql.includes("pg_stat_kcache")) {
|
|
106
|
+
return { rows: [] };
|
|
107
|
+
}
|
|
108
|
+
// G001: Memory settings query
|
|
109
|
+
if (sql.includes("pg_size_bytes") && sql.includes("shared_buffers") && sql.includes("work_mem")) {
|
|
110
|
+
return { rows: [{
|
|
111
|
+
shared_buffers_bytes: "134217728",
|
|
112
|
+
wal_buffers_bytes: "4194304",
|
|
113
|
+
work_mem_bytes: "4194304",
|
|
114
|
+
maintenance_work_mem_bytes: "67108864",
|
|
115
|
+
effective_cache_size_bytes: "4294967296",
|
|
116
|
+
max_connections: 100,
|
|
117
|
+
}] };
|
|
118
|
+
}
|
|
119
|
+
throw new Error(`Unexpected query: ${sql}`);
|
|
120
|
+
},
|
|
121
|
+
};
|
|
122
|
+
}
|
package/dist/sql/01.role.sql
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
-- Role creation / password update (template-filled by cli/lib/init.ts)
|
|
2
|
-
--
|
|
3
|
-
-- Always uses a race-safe pattern (create if missing, then always alter to set the password):
|
|
4
|
-
-- do $$ begin
|
|
5
|
-
-- if not exists (select 1 from pg_catalog.pg_roles where rolname = '...') then
|
|
6
|
-
-- begin
|
|
7
|
-
-- create user "..." with password '...';
|
|
8
|
-
-- exception when duplicate_object then
|
|
9
|
-
-- null;
|
|
10
|
-
-- end;
|
|
11
|
-
-- end if;
|
|
12
|
-
-- alter user "..." with password '...';
|
|
13
|
-
-- end $$;
|
|
14
|
-
{{ROLE_STMT}}
|
|
15
|
-
|
|
16
|
-
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
-- Required permissions for postgres_ai monitoring user (template-filled by cli/lib/init.ts)
|
|
2
|
-
|
|
3
|
-
-- Allow connect
|
|
4
|
-
grant connect on database {{DB_IDENT}} to {{ROLE_IDENT}};
|
|
5
|
-
|
|
6
|
-
-- Standard monitoring privileges
|
|
7
|
-
grant pg_monitor to {{ROLE_IDENT}};
|
|
8
|
-
grant select on pg_catalog.pg_index to {{ROLE_IDENT}};
|
|
9
|
-
|
|
10
|
-
-- Create postgres_ai schema for our objects
|
|
11
|
-
create schema if not exists postgres_ai;
|
|
12
|
-
grant usage on schema postgres_ai to {{ROLE_IDENT}};
|
|
13
|
-
|
|
14
|
-
-- For bloat analysis: expose pg_statistic via a view
|
|
15
|
-
create or replace view postgres_ai.pg_statistic as
|
|
16
|
-
select
|
|
17
|
-
n.nspname as schemaname,
|
|
18
|
-
c.relname as tablename,
|
|
19
|
-
a.attname,
|
|
20
|
-
s.stanullfrac as null_frac,
|
|
21
|
-
s.stawidth as avg_width,
|
|
22
|
-
false as inherited
|
|
23
|
-
from pg_catalog.pg_statistic s
|
|
24
|
-
join pg_catalog.pg_class c on c.oid = s.starelid
|
|
25
|
-
join pg_catalog.pg_namespace n on n.oid = c.relnamespace
|
|
26
|
-
join pg_catalog.pg_attribute a on a.attrelid = s.starelid and a.attnum = s.staattnum
|
|
27
|
-
where a.attnum > 0 and not a.attisdropped;
|
|
28
|
-
|
|
29
|
-
grant select on postgres_ai.pg_statistic to {{ROLE_IDENT}};
|
|
30
|
-
|
|
31
|
-
-- Hardened clusters sometimes revoke PUBLIC on schema public
|
|
32
|
-
grant usage on schema public to {{ROLE_IDENT}};
|
|
33
|
-
|
|
34
|
-
-- Keep search_path predictable; postgres_ai first so our objects are found
|
|
35
|
-
alter user {{ROLE_IDENT}} set search_path = postgres_ai, "$user", public, pg_catalog;
|
|
36
|
-
|
|
37
|
-
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
-- Optional permissions for self-managed Postgres (best effort)
|
|
2
|
-
|
|
3
|
-
grant execute on function pg_catalog.pg_stat_file(text) to {{ROLE_IDENT}};
|
|
4
|
-
grant execute on function pg_catalog.pg_stat_file(text, boolean) to {{ROLE_IDENT}};
|
|
5
|
-
grant execute on function pg_catalog.pg_ls_dir(text) to {{ROLE_IDENT}};
|
|
6
|
-
grant execute on function pg_catalog.pg_ls_dir(text, boolean, boolean) to {{ROLE_IDENT}};
|
|
7
|
-
|
|
8
|
-
|