@indiekitai/pg-dash 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +196 -0
- package/README.zh-CN.md +196 -0
- package/dist/cli.js +2285 -0
- package/dist/cli.js.map +1 -0
- package/dist/mcp.js +940 -0
- package/dist/mcp.js.map +1 -0
- package/dist/ui/assets/index-IOgce042.js +105 -0
- package/dist/ui/assets/index-XqyTBg6o.css +1 -0
- package/dist/ui/index.html +13 -0
- package/package.json +60 -0
package/dist/mcp.js
ADDED
|
@@ -0,0 +1,940 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
#!/usr/bin/env node
|
|
3
|
+
|
|
4
|
+
// src/mcp.ts
|
|
5
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
6
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
7
|
+
import { Pool } from "pg";
|
|
8
|
+
import { z } from "zod";
|
|
9
|
+
|
|
10
|
+
// src/server/queries/overview.ts
|
|
11
|
+
async function getOverview(pool2) {
|
|
12
|
+
const client = await pool2.connect();
|
|
13
|
+
try {
|
|
14
|
+
const version = await client.query("SHOW server_version");
|
|
15
|
+
const uptime = await client.query(
|
|
16
|
+
"SELECT now() - pg_postmaster_start_time() AS uptime"
|
|
17
|
+
);
|
|
18
|
+
const dbSize = await client.query(
|
|
19
|
+
"SELECT pg_size_pretty(pg_database_size(current_database())) AS size"
|
|
20
|
+
);
|
|
21
|
+
const dbCount = await client.query(
|
|
22
|
+
"SELECT count(*)::int AS count FROM pg_database WHERE NOT datistemplate"
|
|
23
|
+
);
|
|
24
|
+
const connections = await client.query(`
|
|
25
|
+
SELECT
|
|
26
|
+
(SELECT count(*)::int FROM pg_stat_activity WHERE state = 'active') AS active,
|
|
27
|
+
(SELECT count(*)::int FROM pg_stat_activity WHERE state = 'idle') AS idle,
|
|
28
|
+
(SELECT setting::int FROM pg_settings WHERE name = 'max_connections') AS max
|
|
29
|
+
`);
|
|
30
|
+
return {
|
|
31
|
+
version: version.rows[0].server_version,
|
|
32
|
+
uptime: uptime.rows[0].uptime,
|
|
33
|
+
dbSize: dbSize.rows[0].size,
|
|
34
|
+
databaseCount: dbCount.rows[0].count,
|
|
35
|
+
connections: connections.rows[0]
|
|
36
|
+
};
|
|
37
|
+
} finally {
|
|
38
|
+
client.release();
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// src/server/queries/tables.ts
|
|
43
|
+
async function getTables(pool2) {
|
|
44
|
+
const client = await pool2.connect();
|
|
45
|
+
try {
|
|
46
|
+
const r = await client.query(`
|
|
47
|
+
SELECT
|
|
48
|
+
schemaname AS schema,
|
|
49
|
+
relname AS name,
|
|
50
|
+
pg_size_pretty(pg_total_relation_size(relid)) AS total_size,
|
|
51
|
+
pg_total_relation_size(relid) AS size_bytes,
|
|
52
|
+
n_live_tup AS rows,
|
|
53
|
+
n_dead_tup AS dead_tuples,
|
|
54
|
+
CASE WHEN n_live_tup > 0
|
|
55
|
+
THEN round(n_dead_tup::numeric / n_live_tup * 100, 1)
|
|
56
|
+
ELSE 0 END AS dead_pct
|
|
57
|
+
FROM pg_stat_user_tables
|
|
58
|
+
ORDER BY pg_total_relation_size(relid) DESC
|
|
59
|
+
`);
|
|
60
|
+
return r.rows;
|
|
61
|
+
} finally {
|
|
62
|
+
client.release();
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// src/server/queries/schema.ts
|
|
67
|
+
async function getSchemaTableDetail(pool2, tableName) {
|
|
68
|
+
const client = await pool2.connect();
|
|
69
|
+
try {
|
|
70
|
+
const parts = tableName.split(".");
|
|
71
|
+
const schema = parts.length > 1 ? parts[0] : "public";
|
|
72
|
+
const name = parts.length > 1 ? parts[1] : parts[0];
|
|
73
|
+
const tableInfo = await client.query(`
|
|
74
|
+
SELECT
|
|
75
|
+
c.relname AS name, n.nspname AS schema,
|
|
76
|
+
pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
|
|
77
|
+
pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
|
|
78
|
+
pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
|
|
79
|
+
pg_size_pretty(pg_relation_size(c.reltoastrelid)) AS toast_size,
|
|
80
|
+
s.n_live_tup AS row_count, s.n_dead_tup AS dead_tuples,
|
|
81
|
+
s.last_vacuum, s.last_autovacuum, s.last_analyze, s.last_autoanalyze,
|
|
82
|
+
s.seq_scan, s.idx_scan
|
|
83
|
+
FROM pg_class c
|
|
84
|
+
JOIN pg_namespace n ON c.relnamespace = n.oid
|
|
85
|
+
LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
|
|
86
|
+
WHERE c.relname = $1 AND n.nspname = $2 AND c.relkind = 'r'
|
|
87
|
+
`, [name, schema]);
|
|
88
|
+
if (tableInfo.rows.length === 0) return null;
|
|
89
|
+
const columns = await client.query(`
|
|
90
|
+
SELECT
|
|
91
|
+
a.attname AS name,
|
|
92
|
+
pg_catalog.format_type(a.atttypid, a.atttypmod) AS type,
|
|
93
|
+
NOT a.attnotnull AS nullable,
|
|
94
|
+
pg_get_expr(d.adbin, d.adrelid) AS default_value,
|
|
95
|
+
col_description(a.attrelid, a.attnum) AS description
|
|
96
|
+
FROM pg_attribute a
|
|
97
|
+
LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
|
98
|
+
WHERE a.attrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
|
|
99
|
+
AND a.attnum > 0 AND NOT a.attisdropped
|
|
100
|
+
ORDER BY a.attnum
|
|
101
|
+
`, [name, schema]);
|
|
102
|
+
const indexes = await client.query(`
|
|
103
|
+
SELECT
|
|
104
|
+
i.relname AS name,
|
|
105
|
+
am.amname AS type,
|
|
106
|
+
pg_size_pretty(pg_relation_size(i.oid)) AS size,
|
|
107
|
+
pg_get_indexdef(idx.indexrelid) AS definition,
|
|
108
|
+
idx.indisunique AS is_unique,
|
|
109
|
+
idx.indisprimary AS is_primary,
|
|
110
|
+
s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
|
|
111
|
+
FROM pg_index idx
|
|
112
|
+
JOIN pg_class i ON idx.indexrelid = i.oid
|
|
113
|
+
JOIN pg_class t ON idx.indrelid = t.oid
|
|
114
|
+
JOIN pg_namespace n ON t.relnamespace = n.oid
|
|
115
|
+
JOIN pg_am am ON i.relam = am.oid
|
|
116
|
+
LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
|
|
117
|
+
WHERE t.relname = $1 AND n.nspname = $2
|
|
118
|
+
ORDER BY i.relname
|
|
119
|
+
`, [name, schema]);
|
|
120
|
+
const constraints = await client.query(`
|
|
121
|
+
SELECT
|
|
122
|
+
conname AS name,
|
|
123
|
+
CASE contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY'
|
|
124
|
+
WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' WHEN 'x' THEN 'EXCLUDE' END AS type,
|
|
125
|
+
pg_get_constraintdef(oid) AS definition
|
|
126
|
+
FROM pg_constraint
|
|
127
|
+
WHERE conrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
|
|
128
|
+
ORDER BY
|
|
129
|
+
CASE contype WHEN 'p' THEN 1 WHEN 'u' THEN 2 WHEN 'f' THEN 3 WHEN 'c' THEN 4 ELSE 5 END
|
|
130
|
+
`, [name, schema]);
|
|
131
|
+
const foreignKeys = await client.query(`
|
|
132
|
+
SELECT
|
|
133
|
+
conname AS name,
|
|
134
|
+
a.attname AS column_name,
|
|
135
|
+
confrelid::regclass::text AS referenced_table,
|
|
136
|
+
af.attname AS referenced_column
|
|
137
|
+
FROM pg_constraint c
|
|
138
|
+
JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
|
|
139
|
+
JOIN pg_attribute af ON af.attrelid = c.confrelid AND af.attnum = ANY(c.confkey)
|
|
140
|
+
WHERE c.contype = 'f'
|
|
141
|
+
AND c.conrelid = (SELECT cl.oid FROM pg_class cl JOIN pg_namespace n ON cl.relnamespace = n.oid WHERE cl.relname = $1 AND n.nspname = $2)
|
|
142
|
+
`, [name, schema]);
|
|
143
|
+
let sampleData = [];
|
|
144
|
+
try {
|
|
145
|
+
const sample = await client.query(
|
|
146
|
+
`SELECT * FROM ${client.escapeIdentifier(schema)}.${client.escapeIdentifier(name)} LIMIT 10`
|
|
147
|
+
);
|
|
148
|
+
sampleData = sample.rows;
|
|
149
|
+
} catch (err) {
|
|
150
|
+
console.error("[schema] Error:", err.message);
|
|
151
|
+
}
|
|
152
|
+
return {
|
|
153
|
+
...tableInfo.rows[0],
|
|
154
|
+
columns: columns.rows,
|
|
155
|
+
indexes: indexes.rows,
|
|
156
|
+
constraints: constraints.rows,
|
|
157
|
+
foreignKeys: foreignKeys.rows,
|
|
158
|
+
sampleData
|
|
159
|
+
};
|
|
160
|
+
} finally {
|
|
161
|
+
client.release();
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// src/server/queries/activity.ts
|
|
166
|
+
async function getActivity(pool2) {
|
|
167
|
+
const client = await pool2.connect();
|
|
168
|
+
try {
|
|
169
|
+
const r = await client.query(`
|
|
170
|
+
SELECT
|
|
171
|
+
pid,
|
|
172
|
+
COALESCE(query, '') AS query,
|
|
173
|
+
COALESCE(state, 'unknown') AS state,
|
|
174
|
+
wait_event,
|
|
175
|
+
wait_event_type,
|
|
176
|
+
CASE WHEN state = 'active' THEN (now() - query_start)::text
|
|
177
|
+
WHEN state = 'idle in transaction' THEN (now() - state_change)::text
|
|
178
|
+
ELSE NULL END AS duration,
|
|
179
|
+
client_addr::text,
|
|
180
|
+
COALESCE(application_name, '') AS application_name,
|
|
181
|
+
backend_start::text
|
|
182
|
+
FROM pg_stat_activity
|
|
183
|
+
WHERE pid != pg_backend_pid()
|
|
184
|
+
AND state IS NOT NULL
|
|
185
|
+
ORDER BY
|
|
186
|
+
CASE state
|
|
187
|
+
WHEN 'active' THEN 1
|
|
188
|
+
WHEN 'idle in transaction' THEN 2
|
|
189
|
+
ELSE 3
|
|
190
|
+
END,
|
|
191
|
+
query_start ASC NULLS LAST
|
|
192
|
+
`);
|
|
193
|
+
return r.rows;
|
|
194
|
+
} finally {
|
|
195
|
+
client.release();
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// src/server/advisor.ts
|
|
200
|
+
var SEVERITY_WEIGHT = { critical: 20, warning: 8, info: 3 };
|
|
201
|
+
function computeAdvisorScore(issues) {
|
|
202
|
+
let score = 100;
|
|
203
|
+
const counts = { critical: 0, warning: 0, info: 0 };
|
|
204
|
+
for (const issue of issues) {
|
|
205
|
+
counts[issue.severity]++;
|
|
206
|
+
const n = counts[issue.severity];
|
|
207
|
+
const weight = SEVERITY_WEIGHT[issue.severity];
|
|
208
|
+
if (n <= 5) score -= weight;
|
|
209
|
+
else if (n <= 15) score -= weight * 0.5;
|
|
210
|
+
else score -= weight * 0.25;
|
|
211
|
+
}
|
|
212
|
+
return Math.max(0, Math.min(100, Math.round(score)));
|
|
213
|
+
}
|
|
214
|
+
function gradeFromScore(score) {
|
|
215
|
+
if (score >= 90) return "A";
|
|
216
|
+
if (score >= 80) return "B";
|
|
217
|
+
if (score >= 70) return "C";
|
|
218
|
+
if (score >= 50) return "D";
|
|
219
|
+
return "F";
|
|
220
|
+
}
|
|
221
|
+
function computeBreakdown(issues) {
|
|
222
|
+
const categories = ["performance", "maintenance", "schema", "security"];
|
|
223
|
+
const result = {};
|
|
224
|
+
for (const cat of categories) {
|
|
225
|
+
const catIssues = issues.filter((i) => i.category === cat);
|
|
226
|
+
const score = computeAdvisorScore(catIssues);
|
|
227
|
+
result[cat] = { score, grade: gradeFromScore(score), count: catIssues.length };
|
|
228
|
+
}
|
|
229
|
+
return result;
|
|
230
|
+
}
|
|
231
|
+
async function getAdvisorReport(pool2) {
|
|
232
|
+
const client = await pool2.connect();
|
|
233
|
+
const issues = [];
|
|
234
|
+
try {
|
|
235
|
+
try {
|
|
236
|
+
const r = await client.query(`
|
|
237
|
+
SELECT schemaname, relname, seq_scan, seq_tup_read, n_live_tup,
|
|
238
|
+
pg_size_pretty(pg_total_relation_size(relid)) AS size
|
|
239
|
+
FROM pg_stat_user_tables
|
|
240
|
+
WHERE n_live_tup > 10000 AND seq_scan > 100
|
|
241
|
+
ORDER BY seq_tup_read DESC LIMIT 10
|
|
242
|
+
`);
|
|
243
|
+
for (const row of r.rows) {
|
|
244
|
+
issues.push({
|
|
245
|
+
id: `perf-seq-scan-${row.schemaname}-${row.relname}`,
|
|
246
|
+
severity: row.seq_scan > 1e3 ? "warning" : "info",
|
|
247
|
+
category: "performance",
|
|
248
|
+
title: `High sequential scans on ${row.relname}`,
|
|
249
|
+
description: `Table ${row.schemaname}.${row.relname} (${row.n_live_tup} rows, ${row.size}) has ${row.seq_scan} sequential scans reading ${Number(row.seq_tup_read).toLocaleString()} tuples. Consider adding indexes on frequently filtered columns.`,
|
|
250
|
+
fix: `-- Identify commonly filtered columns and add indexes:
|
|
251
|
+
-- EXPLAIN ANALYZE SELECT * FROM ${row.schemaname}.${row.relname} WHERE <your_condition>;
|
|
252
|
+
CREATE INDEX CONCURRENTLY idx_${row.relname}_<column> ON ${row.schemaname}.${row.relname} (<column>);`,
|
|
253
|
+
impact: "Queries will continue to do full table scans, degrading performance as the table grows.",
|
|
254
|
+
effort: "moderate"
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
} catch (err) {
|
|
258
|
+
console.error("[advisor] Error checking seq scans:", err.message);
|
|
259
|
+
}
|
|
260
|
+
try {
|
|
261
|
+
const r = await client.query(`
|
|
262
|
+
SELECT
|
|
263
|
+
schemaname, relname, indexrelname,
|
|
264
|
+
pg_relation_size(indexrelid) AS idx_size,
|
|
265
|
+
pg_relation_size(relid) AS tbl_size,
|
|
266
|
+
pg_size_pretty(pg_relation_size(indexrelid)) AS idx_size_pretty,
|
|
267
|
+
pg_size_pretty(pg_relation_size(relid)) AS tbl_size_pretty
|
|
268
|
+
FROM pg_stat_user_indexes
|
|
269
|
+
WHERE pg_relation_size(indexrelid) > 1048576
|
|
270
|
+
AND pg_relation_size(indexrelid) > pg_relation_size(relid) * 3
|
|
271
|
+
ORDER BY pg_relation_size(indexrelid) DESC LIMIT 10
|
|
272
|
+
`);
|
|
273
|
+
for (const row of r.rows) {
|
|
274
|
+
issues.push({
|
|
275
|
+
id: `perf-bloated-idx-${row.indexrelname}`,
|
|
276
|
+
severity: "warning",
|
|
277
|
+
category: "performance",
|
|
278
|
+
title: `Bloated index ${row.indexrelname}`,
|
|
279
|
+
description: `Index ${row.indexrelname} on ${row.relname} is ${row.idx_size_pretty} but the table is only ${row.tbl_size_pretty}. The index may need rebuilding.`,
|
|
280
|
+
fix: `REINDEX INDEX CONCURRENTLY ${row.schemaname}.${row.indexrelname};`,
|
|
281
|
+
impact: "Bloated indexes waste disk space and slow down queries that use them.",
|
|
282
|
+
effort: "quick"
|
|
283
|
+
});
|
|
284
|
+
}
|
|
285
|
+
} catch (err) {
|
|
286
|
+
console.error("[advisor] Error checking bloated indexes:", err.message);
|
|
287
|
+
}
|
|
288
|
+
try {
|
|
289
|
+
const r = await client.query(`
|
|
290
|
+
SELECT schemaname, relname, n_dead_tup, n_live_tup,
|
|
291
|
+
CASE WHEN n_live_tup > 0 THEN round(n_dead_tup::numeric / n_live_tup * 100, 1) ELSE 0 END AS dead_pct,
|
|
292
|
+
pg_size_pretty(pg_total_relation_size(relid)) AS size
|
|
293
|
+
FROM pg_stat_user_tables
|
|
294
|
+
WHERE n_live_tup > 1000 AND n_dead_tup::float / GREATEST(n_live_tup, 1) > 0.1
|
|
295
|
+
ORDER BY n_dead_tup DESC LIMIT 10
|
|
296
|
+
`);
|
|
297
|
+
for (const row of r.rows) {
|
|
298
|
+
const pct = parseFloat(row.dead_pct);
|
|
299
|
+
issues.push({
|
|
300
|
+
id: `perf-bloat-${row.schemaname}-${row.relname}`,
|
|
301
|
+
severity: pct > 30 ? "critical" : "warning",
|
|
302
|
+
category: "performance",
|
|
303
|
+
title: `Table bloat on ${row.relname} (${row.dead_pct}% dead)`,
|
|
304
|
+
description: `${row.schemaname}.${row.relname} has ${Number(row.n_dead_tup).toLocaleString()} dead tuples (${row.dead_pct}% of ${Number(row.n_live_tup).toLocaleString()} live rows). Size: ${row.size}.`,
|
|
305
|
+
fix: `VACUUM FULL ${row.schemaname}.${row.relname};`,
|
|
306
|
+
impact: "Dead tuples waste storage and degrade scan performance.",
|
|
307
|
+
effort: pct > 30 ? "moderate" : "quick"
|
|
308
|
+
});
|
|
309
|
+
}
|
|
310
|
+
} catch (err) {
|
|
311
|
+
console.error("[advisor] Error checking table bloat:", err.message);
|
|
312
|
+
}
|
|
313
|
+
try {
|
|
314
|
+
const r = await client.query(`
|
|
315
|
+
SELECT schemaname, relname,
|
|
316
|
+
heap_blks_hit, heap_blks_read,
|
|
317
|
+
CASE WHEN (heap_blks_hit + heap_blks_read) = 0 THEN 1
|
|
318
|
+
ELSE heap_blks_hit::float / (heap_blks_hit + heap_blks_read) END AS ratio
|
|
319
|
+
FROM pg_statio_user_tables
|
|
320
|
+
WHERE (heap_blks_hit + heap_blks_read) > 100
|
|
321
|
+
ORDER BY ratio ASC LIMIT 5
|
|
322
|
+
`);
|
|
323
|
+
for (const row of r.rows) {
|
|
324
|
+
const ratio = parseFloat(row.ratio);
|
|
325
|
+
if (ratio < 0.9) {
|
|
326
|
+
issues.push({
|
|
327
|
+
id: `perf-cache-${row.schemaname}-${row.relname}`,
|
|
328
|
+
severity: ratio < 0.5 ? "critical" : "warning",
|
|
329
|
+
category: "performance",
|
|
330
|
+
title: `Poor cache hit ratio on ${row.relname}`,
|
|
331
|
+
description: `Table ${row.schemaname}.${row.relname} has a cache hit ratio of ${(ratio * 100).toFixed(1)}%. Most reads are going to disk.`,
|
|
332
|
+
fix: `-- Consider increasing shared_buffers or reducing working set:
|
|
333
|
+
SHOW shared_buffers;`,
|
|
334
|
+
impact: "Disk reads are orders of magnitude slower than memory reads.",
|
|
335
|
+
effort: "involved"
|
|
336
|
+
});
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
} catch (err) {
|
|
340
|
+
console.error("[advisor] Error checking cache efficiency:", err.message);
|
|
341
|
+
}
|
|
342
|
+
try {
|
|
343
|
+
const extCheck = await client.query("SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'");
|
|
344
|
+
if (extCheck.rows.length > 0) {
|
|
345
|
+
const r = await client.query(`
|
|
346
|
+
SELECT query, calls, mean_exec_time, total_exec_time,
|
|
347
|
+
round(mean_exec_time::numeric, 2) AS mean_ms,
|
|
348
|
+
round(total_exec_time::numeric / 1000, 2) AS total_sec
|
|
349
|
+
FROM pg_stat_statements
|
|
350
|
+
WHERE query NOT LIKE '%pg_stat%' AND query NOT LIKE '%pg_catalog%'
|
|
351
|
+
AND mean_exec_time > 100
|
|
352
|
+
ORDER BY mean_exec_time DESC LIMIT 5
|
|
353
|
+
`);
|
|
354
|
+
for (const row of r.rows) {
|
|
355
|
+
issues.push({
|
|
356
|
+
id: `perf-slow-${row.query.slice(0, 30).replace(/\W/g, "_")}`,
|
|
357
|
+
severity: parseFloat(row.mean_ms) > 1e3 ? "warning" : "info",
|
|
358
|
+
category: "performance",
|
|
359
|
+
title: `Slow query (avg ${row.mean_ms}ms)`,
|
|
360
|
+
description: `Query averaging ${row.mean_ms}ms over ${row.calls} calls (total: ${row.total_sec}s): ${row.query.slice(0, 200)}`,
|
|
361
|
+
fix: `EXPLAIN ANALYZE ${row.query.slice(0, 500)};`,
|
|
362
|
+
impact: "Slow queries degrade overall database responsiveness.",
|
|
363
|
+
effort: "moderate"
|
|
364
|
+
});
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
} catch (err) {
|
|
368
|
+
console.error("[advisor] Error checking slow queries:", err.message);
|
|
369
|
+
}
|
|
370
|
+
try {
|
|
371
|
+
const r = await client.query(`
|
|
372
|
+
SELECT schemaname, relname, last_vacuum, last_autovacuum, n_dead_tup
|
|
373
|
+
FROM pg_stat_user_tables
|
|
374
|
+
WHERE n_live_tup > 100
|
|
375
|
+
AND (last_vacuum IS NULL AND last_autovacuum IS NULL
|
|
376
|
+
OR GREATEST(last_vacuum, last_autovacuum) < now() - interval '7 days')
|
|
377
|
+
ORDER BY n_dead_tup DESC LIMIT 15
|
|
378
|
+
`);
|
|
379
|
+
for (const row of r.rows) {
|
|
380
|
+
const never = !row.last_vacuum && !row.last_autovacuum;
|
|
381
|
+
issues.push({
|
|
382
|
+
id: `maint-vacuum-${row.schemaname}-${row.relname}`,
|
|
383
|
+
severity: never ? "warning" : "info",
|
|
384
|
+
category: "maintenance",
|
|
385
|
+
title: `VACUUM ${never ? "never run" : "overdue"} on ${row.relname}`,
|
|
386
|
+
description: `${row.schemaname}.${row.relname} ${never ? "has never been vacuumed" : "was last vacuumed over 7 days ago"}. Dead tuples: ${Number(row.n_dead_tup).toLocaleString()}.`,
|
|
387
|
+
fix: `VACUUM ANALYZE ${row.schemaname}.${row.relname};`,
|
|
388
|
+
impact: "Dead tuples accumulate, increasing table size and degrading query performance.",
|
|
389
|
+
effort: "quick"
|
|
390
|
+
});
|
|
391
|
+
}
|
|
392
|
+
} catch (err) {
|
|
393
|
+
console.error("[advisor] Error checking vacuum overdue:", err.message);
|
|
394
|
+
}
|
|
395
|
+
try {
|
|
396
|
+
const r = await client.query(`
|
|
397
|
+
SELECT schemaname, relname
|
|
398
|
+
FROM pg_stat_user_tables
|
|
399
|
+
WHERE n_live_tup > 100
|
|
400
|
+
AND last_analyze IS NULL AND last_autoanalyze IS NULL
|
|
401
|
+
AND NOT EXISTS (
|
|
402
|
+
SELECT 1 FROM pg_stat_user_tables t2
|
|
403
|
+
WHERE t2.relname = pg_stat_user_tables.relname
|
|
404
|
+
AND (t2.last_vacuum IS NULL AND t2.last_autovacuum IS NULL)
|
|
405
|
+
)
|
|
406
|
+
LIMIT 10
|
|
407
|
+
`);
|
|
408
|
+
for (const row of r.rows) {
|
|
409
|
+
issues.push({
|
|
410
|
+
id: `maint-analyze-${row.schemaname}-${row.relname}`,
|
|
411
|
+
severity: "info",
|
|
412
|
+
category: "maintenance",
|
|
413
|
+
title: `ANALYZE never run on ${row.relname}`,
|
|
414
|
+
description: `${row.schemaname}.${row.relname} has never been analyzed. The query planner may choose suboptimal plans.`,
|
|
415
|
+
fix: `ANALYZE ${row.schemaname}.${row.relname};`,
|
|
416
|
+
impact: "Without statistics, the query planner makes poor estimates leading to slow queries.",
|
|
417
|
+
effort: "quick"
|
|
418
|
+
});
|
|
419
|
+
}
|
|
420
|
+
} catch (err) {
|
|
421
|
+
console.error("[advisor] Error checking analyze overdue:", err.message);
|
|
422
|
+
}
|
|
423
|
+
try {
|
|
424
|
+
const r = await client.query(`
|
|
425
|
+
SELECT datname, age(datfrozenxid) AS xid_age
|
|
426
|
+
FROM pg_database
|
|
427
|
+
WHERE datname = current_database()
|
|
428
|
+
`);
|
|
429
|
+
for (const row of r.rows) {
|
|
430
|
+
const age = parseInt(row.xid_age);
|
|
431
|
+
if (age > 1e9) {
|
|
432
|
+
issues.push({
|
|
433
|
+
id: `maint-xid-wraparound`,
|
|
434
|
+
severity: "critical",
|
|
435
|
+
category: "maintenance",
|
|
436
|
+
title: `Transaction ID wraparound risk`,
|
|
437
|
+
description: `Database ${row.datname} has datfrozenxid age of ${age.toLocaleString()}. Wraparound occurs at ~2 billion.`,
|
|
438
|
+
fix: `VACUUM FREEZE;`,
|
|
439
|
+
impact: "If wraparound occurs, PostgreSQL will shut down to prevent data loss.",
|
|
440
|
+
effort: "involved"
|
|
441
|
+
});
|
|
442
|
+
} else if (age > 5e8) {
|
|
443
|
+
issues.push({
|
|
444
|
+
id: `maint-xid-warning`,
|
|
445
|
+
severity: "warning",
|
|
446
|
+
category: "maintenance",
|
|
447
|
+
title: `Transaction ID age is high`,
|
|
448
|
+
description: `Database ${row.datname} has datfrozenxid age of ${age.toLocaleString()}.`,
|
|
449
|
+
fix: `VACUUM FREEZE;`,
|
|
450
|
+
impact: "Approaching transaction ID wraparound threshold.",
|
|
451
|
+
effort: "moderate"
|
|
452
|
+
});
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
} catch (err) {
|
|
456
|
+
console.error("[advisor] Error checking xid wraparound:", err.message);
|
|
457
|
+
}
|
|
458
|
+
try {
|
|
459
|
+
const r = await client.query(`
|
|
460
|
+
SELECT pid, state, now() - state_change AS idle_duration,
|
|
461
|
+
client_addr::text, application_name,
|
|
462
|
+
extract(epoch from now() - state_change)::int AS idle_seconds
|
|
463
|
+
FROM pg_stat_activity
|
|
464
|
+
WHERE state IN ('idle', 'idle in transaction')
|
|
465
|
+
AND now() - state_change > interval '10 minutes'
|
|
466
|
+
AND pid != pg_backend_pid()
|
|
467
|
+
`);
|
|
468
|
+
for (const row of r.rows) {
|
|
469
|
+
const isIdleTx = row.state === "idle in transaction";
|
|
470
|
+
issues.push({
|
|
471
|
+
id: `maint-idle-${row.pid}`,
|
|
472
|
+
severity: isIdleTx ? "warning" : "info",
|
|
473
|
+
category: "maintenance",
|
|
474
|
+
title: `${isIdleTx ? "Idle in transaction" : "Idle connection"} (PID ${row.pid})`,
|
|
475
|
+
description: `PID ${row.pid} from ${row.client_addr || "local"} (${row.application_name || "unknown"}) has been ${row.state} for ${Math.round(row.idle_seconds / 60)} minutes.`,
|
|
476
|
+
fix: `SELECT pg_terminate_backend(${row.pid});`,
|
|
477
|
+
impact: isIdleTx ? "Idle-in-transaction connections hold locks and prevent VACUUM." : "Idle connections consume connection slots.",
|
|
478
|
+
effort: "quick"
|
|
479
|
+
});
|
|
480
|
+
}
|
|
481
|
+
} catch (err) {
|
|
482
|
+
console.error("[advisor] Error checking idle connections:", err.message);
|
|
483
|
+
}
|
|
484
|
+
try {
|
|
485
|
+
const r = await client.query(`
|
|
486
|
+
SELECT c.relname AS table_name, n.nspname AS schema
|
|
487
|
+
FROM pg_class c
|
|
488
|
+
JOIN pg_namespace n ON c.relnamespace = n.oid
|
|
489
|
+
WHERE c.relkind = 'r' AND n.nspname = 'public'
|
|
490
|
+
AND NOT EXISTS (
|
|
491
|
+
SELECT 1 FROM pg_constraint con WHERE con.conrelid = c.oid AND con.contype = 'p'
|
|
492
|
+
)
|
|
493
|
+
`);
|
|
494
|
+
for (const row of r.rows) {
|
|
495
|
+
issues.push({
|
|
496
|
+
id: `schema-no-pk-${row.schema}-${row.table_name}`,
|
|
497
|
+
severity: "warning",
|
|
498
|
+
category: "schema",
|
|
499
|
+
title: `Missing primary key on ${row.table_name}`,
|
|
500
|
+
description: `Table ${row.schema}.${row.table_name} has no primary key. This can cause replication issues and makes row identification unreliable.`,
|
|
501
|
+
fix: `ALTER TABLE ${row.schema}.${row.table_name} ADD PRIMARY KEY (<column>);`,
|
|
502
|
+
impact: "No primary key means no unique row identity, problematic for replication and ORMs.",
|
|
503
|
+
effort: "moderate"
|
|
504
|
+
});
|
|
505
|
+
}
|
|
506
|
+
} catch (err) {
|
|
507
|
+
console.error("[advisor] Error checking missing primary keys:", err.message);
|
|
508
|
+
}
|
|
509
|
+
try {
|
|
510
|
+
const r = await client.query(`
|
|
511
|
+
SELECT schemaname, relname, indexrelname, idx_scan,
|
|
512
|
+
pg_size_pretty(pg_relation_size(indexrelid)) AS idx_size,
|
|
513
|
+
pg_relation_size(indexrelid) AS idx_bytes
|
|
514
|
+
FROM pg_stat_user_indexes
|
|
515
|
+
WHERE idx_scan = 0
|
|
516
|
+
AND indexrelname NOT LIKE '%_pkey'
|
|
517
|
+
AND pg_relation_size(indexrelid) > 1048576
|
|
518
|
+
ORDER BY pg_relation_size(indexrelid) DESC LIMIT 10
|
|
519
|
+
`);
|
|
520
|
+
for (const row of r.rows) {
|
|
521
|
+
issues.push({
|
|
522
|
+
id: `schema-unused-idx-${row.indexrelname}`,
|
|
523
|
+
severity: "warning",
|
|
524
|
+
category: "schema",
|
|
525
|
+
title: `Unused index ${row.indexrelname} (${row.idx_size})`,
|
|
526
|
+
description: `Index ${row.indexrelname} on ${row.relname} has never been used (0 scans) and takes ${row.idx_size}.`,
|
|
527
|
+
fix: `DROP INDEX CONCURRENTLY ${row.schemaname}.${row.indexrelname};`,
|
|
528
|
+
impact: "Unused indexes waste disk space and slow down writes.",
|
|
529
|
+
effort: "quick"
|
|
530
|
+
});
|
|
531
|
+
}
|
|
532
|
+
} catch (err) {
|
|
533
|
+
console.error("[advisor] Error checking unused indexes:", err.message);
|
|
534
|
+
}
|
|
535
|
+
try {
|
|
536
|
+
const r = await client.query(`
|
|
537
|
+
SELECT array_agg(idx.indexrelid::regclass::text) AS indexes,
|
|
538
|
+
idx.indrelid::regclass::text AS table_name,
|
|
539
|
+
pg_size_pretty(sum(pg_relation_size(idx.indexrelid))) AS total_size
|
|
540
|
+
FROM pg_index idx
|
|
541
|
+
GROUP BY idx.indrelid, idx.indkey
|
|
542
|
+
HAVING count(*) > 1
|
|
543
|
+
`);
|
|
544
|
+
for (const row of r.rows) {
|
|
545
|
+
issues.push({
|
|
546
|
+
id: `schema-dup-idx-${row.table_name}-${row.indexes[0]}`,
|
|
547
|
+
severity: "warning",
|
|
548
|
+
category: "schema",
|
|
549
|
+
title: `Duplicate indexes on ${row.table_name}`,
|
|
550
|
+
description: `These indexes cover the same columns on ${row.table_name}: ${row.indexes.join(", ")}. Total wasted space: ${row.total_size}.`,
|
|
551
|
+
fix: `-- Keep one, drop the rest:
|
|
552
|
+
DROP INDEX CONCURRENTLY ${row.indexes.slice(1).join(";\nDROP INDEX CONCURRENTLY ")};`,
|
|
553
|
+
impact: "Duplicate indexes double the write overhead and waste disk space.",
|
|
554
|
+
effort: "quick"
|
|
555
|
+
});
|
|
556
|
+
}
|
|
557
|
+
} catch (err) {
|
|
558
|
+
console.error("[advisor] Error checking duplicate indexes:", err.message);
|
|
559
|
+
}
|
|
560
|
+
try {
|
|
561
|
+
const r = await client.query(`
|
|
562
|
+
SELECT
|
|
563
|
+
conrelid::regclass::text AS table_name,
|
|
564
|
+
a.attname AS column_name,
|
|
565
|
+
confrelid::regclass::text AS referenced_table
|
|
566
|
+
FROM pg_constraint c
|
|
567
|
+
JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
|
|
568
|
+
WHERE c.contype = 'f'
|
|
569
|
+
AND NOT EXISTS (
|
|
570
|
+
SELECT 1 FROM pg_index i
|
|
571
|
+
WHERE i.indrelid = c.conrelid
|
|
572
|
+
AND a.attnum = ANY(i.indkey)
|
|
573
|
+
)
|
|
574
|
+
`);
|
|
575
|
+
for (const row of r.rows) {
|
|
576
|
+
issues.push({
|
|
577
|
+
id: `schema-fk-no-idx-${row.table_name}-${row.column_name}`,
|
|
578
|
+
severity: "warning",
|
|
579
|
+
category: "schema",
|
|
580
|
+
title: `Missing index on FK column ${row.table_name}.${row.column_name}`,
|
|
581
|
+
description: `Foreign key column ${row.column_name} on ${row.table_name} (references ${row.referenced_table}) has no index. This causes slow JOINs and cascading deletes.`,
|
|
582
|
+
fix: `CREATE INDEX CONCURRENTLY idx_${row.table_name.replace(/\./g, "_")}_${row.column_name} ON ${row.table_name} (${row.column_name});`,
|
|
583
|
+
impact: "JOINs and cascading deletes on this FK will require full table scans.",
|
|
584
|
+
effort: "quick"
|
|
585
|
+
});
|
|
586
|
+
}
|
|
587
|
+
} catch (err) {
|
|
588
|
+
console.error("[advisor] Error checking missing FK indexes:", err.message);
|
|
589
|
+
}
|
|
590
|
+
try {
|
|
591
|
+
const r = await client.query(`
|
|
592
|
+
SELECT blocked_locks.pid AS blocked_pid,
|
|
593
|
+
blocking_locks.pid AS blocking_pid,
|
|
594
|
+
blocked_activity.query AS blocked_query
|
|
595
|
+
FROM pg_catalog.pg_locks blocked_locks
|
|
596
|
+
JOIN pg_catalog.pg_locks blocking_locks ON blocking_locks.locktype = blocked_locks.locktype
|
|
597
|
+
AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
|
|
598
|
+
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
|
|
599
|
+
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
|
|
600
|
+
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
|
|
601
|
+
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
|
|
602
|
+
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
|
|
603
|
+
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
|
|
604
|
+
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
|
|
605
|
+
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
|
|
606
|
+
AND blocking_locks.pid != blocked_locks.pid
|
|
607
|
+
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
|
|
608
|
+
WHERE NOT blocked_locks.granted
|
|
609
|
+
`);
|
|
610
|
+
for (const row of r.rows) {
|
|
611
|
+
issues.push({
|
|
612
|
+
id: `perf-lock-blocked-${row.blocked_pid}`,
|
|
613
|
+
severity: "warning",
|
|
614
|
+
category: "performance",
|
|
615
|
+
title: `Blocked query (PID ${row.blocked_pid} blocked by PID ${row.blocking_pid})`,
|
|
616
|
+
description: `PID ${row.blocked_pid} is waiting for a lock held by PID ${row.blocking_pid}. Query: ${(row.blocked_query || "").slice(0, 200)}`,
|
|
617
|
+
fix: `SELECT pg_cancel_backend(${row.blocking_pid});`,
|
|
618
|
+
impact: "Blocked queries cause cascading delays and potential timeouts.",
|
|
619
|
+
effort: "quick"
|
|
620
|
+
});
|
|
621
|
+
}
|
|
622
|
+
} catch (err) {
|
|
623
|
+
console.error("[advisor] Error checking locks:", err.message);
|
|
624
|
+
}
|
|
625
|
+
try {
|
|
626
|
+
const r = await client.query(`
|
|
627
|
+
SELECT CASE WHEN pg_is_in_recovery()
|
|
628
|
+
THEN pg_wal_lsn_diff(pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn())
|
|
629
|
+
ELSE 0 END AS lag_bytes
|
|
630
|
+
`);
|
|
631
|
+
const lagBytes = parseInt(r.rows[0]?.lag_bytes ?? "0");
|
|
632
|
+
if (lagBytes > 1048576) {
|
|
633
|
+
issues.push({
|
|
634
|
+
id: `perf-replication-lag`,
|
|
635
|
+
severity: lagBytes > 104857600 ? "critical" : "warning",
|
|
636
|
+
category: "performance",
|
|
637
|
+
title: `Replication lag: ${(lagBytes / 1048576).toFixed(1)} MB`,
|
|
638
|
+
description: `WAL replay is lagging by ${(lagBytes / 1048576).toFixed(1)} MB. This indicates the replica is falling behind.`,
|
|
639
|
+
fix: `-- Check replication status:
|
|
640
|
+
SELECT * FROM pg_stat_replication;`,
|
|
641
|
+
impact: "High replication lag means the replica has stale data and failover may lose transactions.",
|
|
642
|
+
effort: "involved"
|
|
643
|
+
});
|
|
644
|
+
}
|
|
645
|
+
} catch (err) {
|
|
646
|
+
console.error("[advisor] Error checking replication lag:", err.message);
|
|
647
|
+
}
|
|
648
|
+
try {
|
|
649
|
+
const r = await client.query(`
|
|
650
|
+
SELECT checkpoints_req, checkpoints_timed,
|
|
651
|
+
CASE WHEN (checkpoints_req + checkpoints_timed) = 0 THEN 0
|
|
652
|
+
ELSE round(checkpoints_req::numeric / (checkpoints_req + checkpoints_timed) * 100, 1) END AS req_pct
|
|
653
|
+
FROM pg_stat_bgwriter
|
|
654
|
+
`);
|
|
655
|
+
const reqPct = parseFloat(r.rows[0]?.req_pct ?? "0");
|
|
656
|
+
if (reqPct > 50) {
|
|
657
|
+
issues.push({
|
|
658
|
+
id: `maint-checkpoint-frequency`,
|
|
659
|
+
severity: reqPct > 80 ? "warning" : "info",
|
|
660
|
+
category: "maintenance",
|
|
661
|
+
title: `${reqPct}% of checkpoints are requested (not timed)`,
|
|
662
|
+
description: `${r.rows[0]?.checkpoints_req} requested vs ${r.rows[0]?.checkpoints_timed} timed checkpoints. High requested checkpoints indicate checkpoint_completion_target or max_wal_size may need tuning.`,
|
|
663
|
+
fix: `-- Increase max_wal_size:
|
|
664
|
+
ALTER SYSTEM SET max_wal_size = '2GB';
|
|
665
|
+
SELECT pg_reload_conf();`,
|
|
666
|
+
impact: "Frequent requested checkpoints cause I/O spikes and degrade performance.",
|
|
667
|
+
effort: "moderate"
|
|
668
|
+
});
|
|
669
|
+
}
|
|
670
|
+
} catch (err) {
|
|
671
|
+
console.error("[advisor] Error checking checkpoint frequency:", err.message);
|
|
672
|
+
}
|
|
673
|
+
try {
|
|
674
|
+
const r = await client.query(`SELECT setting FROM pg_settings WHERE name = 'autovacuum'`);
|
|
675
|
+
if (r.rows[0]?.setting === "off") {
|
|
676
|
+
issues.push({
|
|
677
|
+
id: `maint-autovacuum-disabled`,
|
|
678
|
+
severity: "critical",
|
|
679
|
+
category: "maintenance",
|
|
680
|
+
title: `Autovacuum is disabled`,
|
|
681
|
+
description: `Autovacuum is turned off. Dead tuples will accumulate and transaction ID wraparound becomes a risk.`,
|
|
682
|
+
fix: `ALTER SYSTEM SET autovacuum = on;
|
|
683
|
+
SELECT pg_reload_conf();`,
|
|
684
|
+
impact: "Without autovacuum, tables bloat indefinitely and risk transaction ID wraparound shutdown.",
|
|
685
|
+
effort: "quick"
|
|
686
|
+
});
|
|
687
|
+
}
|
|
688
|
+
} catch (err) {
|
|
689
|
+
console.error("[advisor] Error checking autovacuum:", err.message);
|
|
690
|
+
}
|
|
691
|
+
try {
|
|
692
|
+
const sbRes = await client.query(`SELECT setting, unit FROM pg_settings WHERE name = 'shared_buffers'`);
|
|
693
|
+
const memRes = await client.query(`
|
|
694
|
+
SELECT (SELECT setting::bigint FROM pg_settings WHERE name = 'shared_buffers') *
|
|
695
|
+
(SELECT setting::bigint FROM pg_settings WHERE name = 'block_size') AS shared_bytes
|
|
696
|
+
`);
|
|
697
|
+
const sharedBytes = parseInt(memRes.rows[0]?.shared_bytes ?? "0");
|
|
698
|
+
if (sharedBytes > 0 && sharedBytes < 128 * 1024 * 1024) {
|
|
699
|
+
issues.push({
|
|
700
|
+
id: `perf-shared-buffers-low`,
|
|
701
|
+
severity: "warning",
|
|
702
|
+
category: "performance",
|
|
703
|
+
title: `shared_buffers is only ${(sharedBytes / 1048576).toFixed(0)} MB`,
|
|
704
|
+
description: `shared_buffers is set to ${sbRes.rows[0]?.setting}${sbRes.rows[0]?.unit || ""}. Recommended: ~25% of system RAM, typically at least 256MB for production.`,
|
|
705
|
+
fix: `ALTER SYSTEM SET shared_buffers = '256MB';
|
|
706
|
+
-- Requires restart`,
|
|
707
|
+
impact: "Low shared_buffers means more disk I/O and poor cache hit ratios.",
|
|
708
|
+
effort: "involved"
|
|
709
|
+
});
|
|
710
|
+
}
|
|
711
|
+
} catch (err) {
|
|
712
|
+
console.error("[advisor] Error checking shared_buffers:", err.message);
|
|
713
|
+
}
|
|
714
|
+
try {
|
|
715
|
+
const r = await client.query(`SELECT setting, unit FROM pg_settings WHERE name = 'work_mem'`);
|
|
716
|
+
const workMemKB = parseInt(r.rows[0]?.setting ?? "0");
|
|
717
|
+
if (workMemKB > 0 && workMemKB < 4096) {
|
|
718
|
+
issues.push({
|
|
719
|
+
id: `perf-work-mem-low`,
|
|
720
|
+
severity: "info",
|
|
721
|
+
category: "performance",
|
|
722
|
+
title: `work_mem is only ${workMemKB < 1024 ? workMemKB + "kB" : (workMemKB / 1024).toFixed(0) + "MB"}`,
|
|
723
|
+
description: `work_mem is ${r.rows[0]?.setting}${r.rows[0]?.unit || ""}. Low work_mem causes sorts and hash operations to spill to disk.`,
|
|
724
|
+
fix: `ALTER SYSTEM SET work_mem = '16MB';
|
|
725
|
+
SELECT pg_reload_conf();`,
|
|
726
|
+
impact: "Operations that exceed work_mem use temporary disk files, which is much slower.",
|
|
727
|
+
effort: "quick"
|
|
728
|
+
});
|
|
729
|
+
}
|
|
730
|
+
} catch (err) {
|
|
731
|
+
console.error("[advisor] Error checking work_mem:", err.message);
|
|
732
|
+
}
|
|
733
|
+
try {
|
|
734
|
+
const r = await client.query(`
|
|
735
|
+
SELECT pid, usename, client_addr::text
|
|
736
|
+
FROM pg_stat_activity
|
|
737
|
+
WHERE usename IN (SELECT rolname FROM pg_roles WHERE rolsuper)
|
|
738
|
+
AND client_addr IS NOT NULL
|
|
739
|
+
AND client_addr::text NOT IN ('127.0.0.1', '::1')
|
|
740
|
+
AND pid != pg_backend_pid()
|
|
741
|
+
`);
|
|
742
|
+
for (const row of r.rows) {
|
|
743
|
+
issues.push({
|
|
744
|
+
id: `sec-superuser-remote-${row.pid}`,
|
|
745
|
+
severity: "critical",
|
|
746
|
+
category: "security",
|
|
747
|
+
title: `Superuser ${row.usename} connected from ${row.client_addr}`,
|
|
748
|
+
description: `Superuser ${row.usename} has an active connection from non-localhost address ${row.client_addr}. This is a security risk.`,
|
|
749
|
+
fix: `-- Restrict superuser access in pg_hba.conf to localhost only.
|
|
750
|
+
-- Then: SELECT pg_reload_conf();`,
|
|
751
|
+
impact: "Remote superuser access is a significant security vulnerability.",
|
|
752
|
+
effort: "moderate"
|
|
753
|
+
});
|
|
754
|
+
}
|
|
755
|
+
} catch (err) {
|
|
756
|
+
console.error("[advisor] Error checking superuser connections:", err.message);
|
|
757
|
+
}
|
|
758
|
+
try {
|
|
759
|
+
const r = await client.query(`SELECT setting FROM pg_settings WHERE name = 'ssl'`);
|
|
760
|
+
if (r.rows[0]?.setting === "off") {
|
|
761
|
+
issues.push({
|
|
762
|
+
id: `sec-ssl-off`,
|
|
763
|
+
severity: "warning",
|
|
764
|
+
category: "security",
|
|
765
|
+
title: `SSL is disabled`,
|
|
766
|
+
description: `SSL is turned off. Database connections are not encrypted.`,
|
|
767
|
+
fix: `-- Enable SSL in postgresql.conf:
|
|
768
|
+
-- ssl = on
|
|
769
|
+
-- ssl_cert_file = 'server.crt'
|
|
770
|
+
-- ssl_key_file = 'server.key'
|
|
771
|
+
SELECT pg_reload_conf();`,
|
|
772
|
+
impact: "Database traffic can be intercepted and read in transit.",
|
|
773
|
+
effort: "involved"
|
|
774
|
+
});
|
|
775
|
+
}
|
|
776
|
+
} catch (err) {
|
|
777
|
+
console.error("[advisor] Error checking SSL check:", err.message);
|
|
778
|
+
}
|
|
779
|
+
try {
|
|
780
|
+
const r = await client.query(`
|
|
781
|
+
SELECT type, database, user_name, auth_method
|
|
782
|
+
FROM pg_hba_file_rules
|
|
783
|
+
WHERE auth_method = 'trust' AND type != 'local'
|
|
784
|
+
LIMIT 5
|
|
785
|
+
`);
|
|
786
|
+
for (const row of r.rows) {
|
|
787
|
+
issues.push({
|
|
788
|
+
id: `sec-trust-auth-${row.database}-${row.user_name}`,
|
|
789
|
+
severity: "critical",
|
|
790
|
+
category: "security",
|
|
791
|
+
title: `Trust authentication for ${row.user_name}@${row.database}`,
|
|
792
|
+
description: `HBA rule allows trust (no password) authentication for ${row.type} connections to ${row.database} as ${row.user_name}.`,
|
|
793
|
+
fix: `-- Change auth_method from 'trust' to 'scram-sha-256' in pg_hba.conf
|
|
794
|
+
-- Then: SELECT pg_reload_conf();`,
|
|
795
|
+
impact: "Anyone can connect without a password.",
|
|
796
|
+
effort: "moderate"
|
|
797
|
+
});
|
|
798
|
+
}
|
|
799
|
+
} catch (err) {
|
|
800
|
+
console.error("[advisor] Error checking trust auth:", err.message);
|
|
801
|
+
}
|
|
802
|
+
const score = computeAdvisorScore(issues);
|
|
803
|
+
return {
|
|
804
|
+
score,
|
|
805
|
+
grade: gradeFromScore(score),
|
|
806
|
+
issues,
|
|
807
|
+
breakdown: computeBreakdown(issues)
|
|
808
|
+
};
|
|
809
|
+
} finally {
|
|
810
|
+
client.release();
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
function isSafeFix(sql) {
|
|
814
|
+
const trimmed = sql.trim();
|
|
815
|
+
if (!trimmed) return false;
|
|
816
|
+
const statements = trimmed.replace(/;\s*$/, "").split(";").map((s) => s.trim()).filter(Boolean);
|
|
817
|
+
if (statements.length !== 1) return false;
|
|
818
|
+
const upper = statements[0].toUpperCase();
|
|
819
|
+
if (upper.startsWith("EXPLAIN ANALYZE")) {
|
|
820
|
+
const afterExplain = upper.replace(/^EXPLAIN\s+ANALYZE\s+/, "").trimStart();
|
|
821
|
+
return afterExplain.startsWith("SELECT");
|
|
822
|
+
}
|
|
823
|
+
const ALLOWED_PREFIXES = [
|
|
824
|
+
"VACUUM",
|
|
825
|
+
"ANALYZE",
|
|
826
|
+
"REINDEX",
|
|
827
|
+
"CREATE INDEX CONCURRENTLY",
|
|
828
|
+
"DROP INDEX CONCURRENTLY",
|
|
829
|
+
"SELECT PG_TERMINATE_BACKEND(",
|
|
830
|
+
"SELECT PG_CANCEL_BACKEND("
|
|
831
|
+
];
|
|
832
|
+
return ALLOWED_PREFIXES.some((p) => upper.startsWith(p));
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
// src/mcp.ts
|
|
836
|
+
import Database from "better-sqlite3";
|
|
837
|
+
import path from "path";
|
|
838
|
+
import os from "os";
|
|
839
|
+
import fs from "fs";
|
|
840
|
+
var connString = process.argv[2] || process.env.PG_DASH_CONNECTION_STRING;
|
|
841
|
+
if (!connString) {
|
|
842
|
+
console.error("Usage: pg-dash-mcp <connection-string>");
|
|
843
|
+
console.error(" or set PG_DASH_CONNECTION_STRING env var");
|
|
844
|
+
process.exit(1);
|
|
845
|
+
}
|
|
846
|
+
var pool = new Pool({ connectionString: connString });
|
|
847
|
+
var dataDir = process.env.PG_DASH_DATA_DIR || path.join(os.homedir(), ".pg-dash");
|
|
848
|
+
fs.mkdirSync(dataDir, { recursive: true });
|
|
849
|
+
var schemaDb = null;
|
|
850
|
+
var alertsDb = null;
|
|
851
|
+
try {
|
|
852
|
+
const schemaPath = path.join(dataDir, "schema.db");
|
|
853
|
+
if (fs.existsSync(schemaPath)) schemaDb = new Database(schemaPath, { readonly: true });
|
|
854
|
+
} catch (err) {
|
|
855
|
+
console.error("[mcp] Error:", err.message);
|
|
856
|
+
}
|
|
857
|
+
try {
|
|
858
|
+
const alertsPath = path.join(dataDir, "alerts.db");
|
|
859
|
+
if (fs.existsSync(alertsPath)) alertsDb = new Database(alertsPath, { readonly: true });
|
|
860
|
+
} catch (err) {
|
|
861
|
+
console.error("[mcp] Error:", err.message);
|
|
862
|
+
}
|
|
863
|
+
var server = new McpServer({ name: "pg-dash", version: "0.1.0" });
|
|
864
|
+
server.tool("pg_dash_overview", "Get database overview (version, uptime, size, connections)", {}, async () => {
|
|
865
|
+
try {
|
|
866
|
+
const data = await getOverview(pool);
|
|
867
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
868
|
+
} catch (err) {
|
|
869
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
870
|
+
}
|
|
871
|
+
});
|
|
872
|
+
server.tool("pg_dash_health", "Get health advisor report with score, grade, and issues", {}, async () => {
|
|
873
|
+
try {
|
|
874
|
+
const data = await getAdvisorReport(pool);
|
|
875
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
876
|
+
} catch (err) {
|
|
877
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
878
|
+
}
|
|
879
|
+
});
|
|
880
|
+
server.tool("pg_dash_tables", "List all tables with sizes and row counts", {}, async () => {
|
|
881
|
+
try {
|
|
882
|
+
const data = await getTables(pool);
|
|
883
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
884
|
+
} catch (err) {
|
|
885
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
886
|
+
}
|
|
887
|
+
});
|
|
888
|
+
server.tool("pg_dash_table_detail", "Get detailed info about a specific table", { table: z.string().describe("Table name (e.g. 'users' or 'public.users')") }, async ({ table }) => {
|
|
889
|
+
try {
|
|
890
|
+
const data = await getSchemaTableDetail(pool, table);
|
|
891
|
+
if (!data) return { content: [{ type: "text", text: "Table not found" }], isError: true };
|
|
892
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
893
|
+
} catch (err) {
|
|
894
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
895
|
+
}
|
|
896
|
+
});
|
|
897
|
+
server.tool("pg_dash_activity", "Get current database activity (active queries, connections)", {}, async () => {
|
|
898
|
+
try {
|
|
899
|
+
const data = await getActivity(pool);
|
|
900
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
901
|
+
} catch (err) {
|
|
902
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
903
|
+
}
|
|
904
|
+
});
|
|
905
|
+
server.tool("pg_dash_schema_changes", "Get recent schema changes", {}, async () => {
|
|
906
|
+
try {
|
|
907
|
+
if (!schemaDb) return { content: [{ type: "text", text: "No schema tracking data available. Run pg-dash server first." }] };
|
|
908
|
+
const changes = schemaDb.prepare("SELECT * FROM schema_changes ORDER BY timestamp DESC LIMIT 50").all();
|
|
909
|
+
return { content: [{ type: "text", text: JSON.stringify(changes, null, 2) }] };
|
|
910
|
+
} catch (err) {
|
|
911
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
912
|
+
}
|
|
913
|
+
});
|
|
914
|
+
server.tool("pg_dash_fix", "Execute a safe fix (VACUUM, ANALYZE, REINDEX, etc.)", { sql: z.string().describe("SQL to execute (must be a safe operation)") }, async ({ sql }) => {
|
|
915
|
+
try {
|
|
916
|
+
if (!isSafeFix(sql)) return { content: [{ type: "text", text: "Operation not allowed. Only VACUUM, ANALYZE, REINDEX, CREATE/DROP INDEX CONCURRENTLY, pg_terminate_backend, pg_cancel_backend, and EXPLAIN ANALYZE are permitted." }], isError: true };
|
|
917
|
+
const client = await pool.connect();
|
|
918
|
+
try {
|
|
919
|
+
const start = Date.now();
|
|
920
|
+
const result = await client.query(sql);
|
|
921
|
+
return { content: [{ type: "text", text: JSON.stringify({ ok: true, duration: Date.now() - start, rowCount: result.rowCount, rows: result.rows || [] }, null, 2) }] };
|
|
922
|
+
} finally {
|
|
923
|
+
client.release();
|
|
924
|
+
}
|
|
925
|
+
} catch (err) {
|
|
926
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
927
|
+
}
|
|
928
|
+
});
|
|
929
|
+
server.tool("pg_dash_alerts", "Get alert history", {}, async () => {
|
|
930
|
+
try {
|
|
931
|
+
if (!alertsDb) return { content: [{ type: "text", text: "No alerts data available. Run pg-dash server first." }] };
|
|
932
|
+
const history = alertsDb.prepare("SELECT * FROM alert_history ORDER BY timestamp DESC LIMIT 50").all();
|
|
933
|
+
return { content: [{ type: "text", text: JSON.stringify(history, null, 2) }] };
|
|
934
|
+
} catch (err) {
|
|
935
|
+
return { content: [{ type: "text", text: `Error: ${err.message}` }], isError: true };
|
|
936
|
+
}
|
|
937
|
+
});
|
|
938
|
+
var transport = new StdioServerTransport();
|
|
939
|
+
await server.connect(transport);
|
|
940
|
+
//# sourceMappingURL=mcp.js.map
|