@indiekitai/pg-dash 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js ADDED
@@ -0,0 +1,2285 @@
1
+ #!/usr/bin/env node
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropNames = Object.getOwnPropertyNames;
4
+ var __esm = (fn, res) => function __init() {
5
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
6
+ };
7
+ var __export = (target, all) => {
8
+ for (var name in all)
9
+ __defProp(target, name, { get: all[name], enumerable: true });
10
+ };
11
+
12
+ // src/server/advisor.ts
13
+ var advisor_exports = {};
14
+ __export(advisor_exports, {
15
+ computeAdvisorScore: () => computeAdvisorScore,
16
+ getAdvisorReport: () => getAdvisorReport,
17
+ gradeFromScore: () => gradeFromScore,
18
+ isSafeFix: () => isSafeFix
19
+ });
20
+ function computeAdvisorScore(issues) {
21
+ let score = 100;
22
+ const counts = { critical: 0, warning: 0, info: 0 };
23
+ for (const issue of issues) {
24
+ counts[issue.severity]++;
25
+ const n = counts[issue.severity];
26
+ const weight = SEVERITY_WEIGHT[issue.severity];
27
+ if (n <= 5) score -= weight;
28
+ else if (n <= 15) score -= weight * 0.5;
29
+ else score -= weight * 0.25;
30
+ }
31
+ return Math.max(0, Math.min(100, Math.round(score)));
32
+ }
33
+ function gradeFromScore(score) {
34
+ if (score >= 90) return "A";
35
+ if (score >= 80) return "B";
36
+ if (score >= 70) return "C";
37
+ if (score >= 50) return "D";
38
+ return "F";
39
+ }
40
+ function computeBreakdown(issues) {
41
+ const categories = ["performance", "maintenance", "schema", "security"];
42
+ const result = {};
43
+ for (const cat of categories) {
44
+ const catIssues = issues.filter((i) => i.category === cat);
45
+ const score = computeAdvisorScore(catIssues);
46
+ result[cat] = { score, grade: gradeFromScore(score), count: catIssues.length };
47
+ }
48
+ return result;
49
+ }
50
+ async function getAdvisorReport(pool) {
51
+ const client = await pool.connect();
52
+ const issues = [];
53
+ try {
54
+ try {
55
+ const r = await client.query(`
56
+ SELECT schemaname, relname, seq_scan, seq_tup_read, n_live_tup,
57
+ pg_size_pretty(pg_total_relation_size(relid)) AS size
58
+ FROM pg_stat_user_tables
59
+ WHERE n_live_tup > 10000 AND seq_scan > 100
60
+ ORDER BY seq_tup_read DESC LIMIT 10
61
+ `);
62
+ for (const row of r.rows) {
63
+ issues.push({
64
+ id: `perf-seq-scan-${row.schemaname}-${row.relname}`,
65
+ severity: row.seq_scan > 1e3 ? "warning" : "info",
66
+ category: "performance",
67
+ title: `High sequential scans on ${row.relname}`,
68
+ description: `Table ${row.schemaname}.${row.relname} (${row.n_live_tup} rows, ${row.size}) has ${row.seq_scan} sequential scans reading ${Number(row.seq_tup_read).toLocaleString()} tuples. Consider adding indexes on frequently filtered columns.`,
69
+ fix: `-- Identify commonly filtered columns and add indexes:
70
+ -- EXPLAIN ANALYZE SELECT * FROM ${row.schemaname}.${row.relname} WHERE <your_condition>;
71
+ CREATE INDEX CONCURRENTLY idx_${row.relname}_<column> ON ${row.schemaname}.${row.relname} (<column>);`,
72
+ impact: "Queries will continue to do full table scans, degrading performance as the table grows.",
73
+ effort: "moderate"
74
+ });
75
+ }
76
+ } catch (err) {
77
+ console.error("[advisor] Error checking seq scans:", err.message);
78
+ }
79
+ try {
80
+ const r = await client.query(`
81
+ SELECT
82
+ schemaname, relname, indexrelname,
83
+ pg_relation_size(indexrelid) AS idx_size,
84
+ pg_relation_size(relid) AS tbl_size,
85
+ pg_size_pretty(pg_relation_size(indexrelid)) AS idx_size_pretty,
86
+ pg_size_pretty(pg_relation_size(relid)) AS tbl_size_pretty
87
+ FROM pg_stat_user_indexes
88
+ WHERE pg_relation_size(indexrelid) > 1048576
89
+ AND pg_relation_size(indexrelid) > pg_relation_size(relid) * 3
90
+ ORDER BY pg_relation_size(indexrelid) DESC LIMIT 10
91
+ `);
92
+ for (const row of r.rows) {
93
+ issues.push({
94
+ id: `perf-bloated-idx-${row.indexrelname}`,
95
+ severity: "warning",
96
+ category: "performance",
97
+ title: `Bloated index ${row.indexrelname}`,
98
+ description: `Index ${row.indexrelname} on ${row.relname} is ${row.idx_size_pretty} but the table is only ${row.tbl_size_pretty}. The index may need rebuilding.`,
99
+ fix: `REINDEX INDEX CONCURRENTLY ${row.schemaname}.${row.indexrelname};`,
100
+ impact: "Bloated indexes waste disk space and slow down queries that use them.",
101
+ effort: "quick"
102
+ });
103
+ }
104
+ } catch (err) {
105
+ console.error("[advisor] Error checking bloated indexes:", err.message);
106
+ }
107
+ try {
108
+ const r = await client.query(`
109
+ SELECT schemaname, relname, n_dead_tup, n_live_tup,
110
+ CASE WHEN n_live_tup > 0 THEN round(n_dead_tup::numeric / n_live_tup * 100, 1) ELSE 0 END AS dead_pct,
111
+ pg_size_pretty(pg_total_relation_size(relid)) AS size
112
+ FROM pg_stat_user_tables
113
+ WHERE n_live_tup > 1000 AND n_dead_tup::float / GREATEST(n_live_tup, 1) > 0.1
114
+ ORDER BY n_dead_tup DESC LIMIT 10
115
+ `);
116
+ for (const row of r.rows) {
117
+ const pct = parseFloat(row.dead_pct);
118
+ issues.push({
119
+ id: `perf-bloat-${row.schemaname}-${row.relname}`,
120
+ severity: pct > 30 ? "critical" : "warning",
121
+ category: "performance",
122
+ title: `Table bloat on ${row.relname} (${row.dead_pct}% dead)`,
123
+ description: `${row.schemaname}.${row.relname} has ${Number(row.n_dead_tup).toLocaleString()} dead tuples (${row.dead_pct}% of ${Number(row.n_live_tup).toLocaleString()} live rows). Size: ${row.size}.`,
124
+ fix: `VACUUM FULL ${row.schemaname}.${row.relname};`,
125
+ impact: "Dead tuples waste storage and degrade scan performance.",
126
+ effort: pct > 30 ? "moderate" : "quick"
127
+ });
128
+ }
129
+ } catch (err) {
130
+ console.error("[advisor] Error checking table bloat:", err.message);
131
+ }
132
+ try {
133
+ const r = await client.query(`
134
+ SELECT schemaname, relname,
135
+ heap_blks_hit, heap_blks_read,
136
+ CASE WHEN (heap_blks_hit + heap_blks_read) = 0 THEN 1
137
+ ELSE heap_blks_hit::float / (heap_blks_hit + heap_blks_read) END AS ratio
138
+ FROM pg_statio_user_tables
139
+ WHERE (heap_blks_hit + heap_blks_read) > 100
140
+ ORDER BY ratio ASC LIMIT 5
141
+ `);
142
+ for (const row of r.rows) {
143
+ const ratio = parseFloat(row.ratio);
144
+ if (ratio < 0.9) {
145
+ issues.push({
146
+ id: `perf-cache-${row.schemaname}-${row.relname}`,
147
+ severity: ratio < 0.5 ? "critical" : "warning",
148
+ category: "performance",
149
+ title: `Poor cache hit ratio on ${row.relname}`,
150
+ description: `Table ${row.schemaname}.${row.relname} has a cache hit ratio of ${(ratio * 100).toFixed(1)}%. Most reads are going to disk.`,
151
+ fix: `-- Consider increasing shared_buffers or reducing working set:
152
+ SHOW shared_buffers;`,
153
+ impact: "Disk reads are orders of magnitude slower than memory reads.",
154
+ effort: "involved"
155
+ });
156
+ }
157
+ }
158
+ } catch (err) {
159
+ console.error("[advisor] Error checking cache efficiency:", err.message);
160
+ }
161
+ try {
162
+ const extCheck = await client.query("SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'");
163
+ if (extCheck.rows.length > 0) {
164
+ const r = await client.query(`
165
+ SELECT query, calls, mean_exec_time, total_exec_time,
166
+ round(mean_exec_time::numeric, 2) AS mean_ms,
167
+ round(total_exec_time::numeric / 1000, 2) AS total_sec
168
+ FROM pg_stat_statements
169
+ WHERE query NOT LIKE '%pg_stat%' AND query NOT LIKE '%pg_catalog%'
170
+ AND mean_exec_time > 100
171
+ ORDER BY mean_exec_time DESC LIMIT 5
172
+ `);
173
+ for (const row of r.rows) {
174
+ issues.push({
175
+ id: `perf-slow-${row.query.slice(0, 30).replace(/\W/g, "_")}`,
176
+ severity: parseFloat(row.mean_ms) > 1e3 ? "warning" : "info",
177
+ category: "performance",
178
+ title: `Slow query (avg ${row.mean_ms}ms)`,
179
+ description: `Query averaging ${row.mean_ms}ms over ${row.calls} calls (total: ${row.total_sec}s): ${row.query.slice(0, 200)}`,
180
+ fix: `EXPLAIN ANALYZE ${row.query.slice(0, 500)};`,
181
+ impact: "Slow queries degrade overall database responsiveness.",
182
+ effort: "moderate"
183
+ });
184
+ }
185
+ }
186
+ } catch (err) {
187
+ console.error("[advisor] Error checking slow queries:", err.message);
188
+ }
189
+ try {
190
+ const r = await client.query(`
191
+ SELECT schemaname, relname, last_vacuum, last_autovacuum, n_dead_tup
192
+ FROM pg_stat_user_tables
193
+ WHERE n_live_tup > 100
194
+ AND (last_vacuum IS NULL AND last_autovacuum IS NULL
195
+ OR GREATEST(last_vacuum, last_autovacuum) < now() - interval '7 days')
196
+ ORDER BY n_dead_tup DESC LIMIT 15
197
+ `);
198
+ for (const row of r.rows) {
199
+ const never = !row.last_vacuum && !row.last_autovacuum;
200
+ issues.push({
201
+ id: `maint-vacuum-${row.schemaname}-${row.relname}`,
202
+ severity: never ? "warning" : "info",
203
+ category: "maintenance",
204
+ title: `VACUUM ${never ? "never run" : "overdue"} on ${row.relname}`,
205
+ description: `${row.schemaname}.${row.relname} ${never ? "has never been vacuumed" : "was last vacuumed over 7 days ago"}. Dead tuples: ${Number(row.n_dead_tup).toLocaleString()}.`,
206
+ fix: `VACUUM ANALYZE ${row.schemaname}.${row.relname};`,
207
+ impact: "Dead tuples accumulate, increasing table size and degrading query performance.",
208
+ effort: "quick"
209
+ });
210
+ }
211
+ } catch (err) {
212
+ console.error("[advisor] Error checking vacuum overdue:", err.message);
213
+ }
214
+ try {
215
+ const r = await client.query(`
216
+ SELECT schemaname, relname
217
+ FROM pg_stat_user_tables
218
+ WHERE n_live_tup > 100
219
+ AND last_analyze IS NULL AND last_autoanalyze IS NULL
220
+ AND NOT EXISTS (
221
+ SELECT 1 FROM pg_stat_user_tables t2
222
+ WHERE t2.relname = pg_stat_user_tables.relname
223
+ AND (t2.last_vacuum IS NULL AND t2.last_autovacuum IS NULL)
224
+ )
225
+ LIMIT 10
226
+ `);
227
+ for (const row of r.rows) {
228
+ issues.push({
229
+ id: `maint-analyze-${row.schemaname}-${row.relname}`,
230
+ severity: "info",
231
+ category: "maintenance",
232
+ title: `ANALYZE never run on ${row.relname}`,
233
+ description: `${row.schemaname}.${row.relname} has never been analyzed. The query planner may choose suboptimal plans.`,
234
+ fix: `ANALYZE ${row.schemaname}.${row.relname};`,
235
+ impact: "Without statistics, the query planner makes poor estimates leading to slow queries.",
236
+ effort: "quick"
237
+ });
238
+ }
239
+ } catch (err) {
240
+ console.error("[advisor] Error checking analyze overdue:", err.message);
241
+ }
242
+ try {
243
+ const r = await client.query(`
244
+ SELECT datname, age(datfrozenxid) AS xid_age
245
+ FROM pg_database
246
+ WHERE datname = current_database()
247
+ `);
248
+ for (const row of r.rows) {
249
+ const age = parseInt(row.xid_age);
250
+ if (age > 1e9) {
251
+ issues.push({
252
+ id: `maint-xid-wraparound`,
253
+ severity: "critical",
254
+ category: "maintenance",
255
+ title: `Transaction ID wraparound risk`,
256
+ description: `Database ${row.datname} has datfrozenxid age of ${age.toLocaleString()}. Wraparound occurs at ~2 billion.`,
257
+ fix: `VACUUM FREEZE;`,
258
+ impact: "If wraparound occurs, PostgreSQL will shut down to prevent data loss.",
259
+ effort: "involved"
260
+ });
261
+ } else if (age > 5e8) {
262
+ issues.push({
263
+ id: `maint-xid-warning`,
264
+ severity: "warning",
265
+ category: "maintenance",
266
+ title: `Transaction ID age is high`,
267
+ description: `Database ${row.datname} has datfrozenxid age of ${age.toLocaleString()}.`,
268
+ fix: `VACUUM FREEZE;`,
269
+ impact: "Approaching transaction ID wraparound threshold.",
270
+ effort: "moderate"
271
+ });
272
+ }
273
+ }
274
+ } catch (err) {
275
+ console.error("[advisor] Error checking xid wraparound:", err.message);
276
+ }
277
+ try {
278
+ const r = await client.query(`
279
+ SELECT pid, state, now() - state_change AS idle_duration,
280
+ client_addr::text, application_name,
281
+ extract(epoch from now() - state_change)::int AS idle_seconds
282
+ FROM pg_stat_activity
283
+ WHERE state IN ('idle', 'idle in transaction')
284
+ AND now() - state_change > interval '10 minutes'
285
+ AND pid != pg_backend_pid()
286
+ `);
287
+ for (const row of r.rows) {
288
+ const isIdleTx = row.state === "idle in transaction";
289
+ issues.push({
290
+ id: `maint-idle-${row.pid}`,
291
+ severity: isIdleTx ? "warning" : "info",
292
+ category: "maintenance",
293
+ title: `${isIdleTx ? "Idle in transaction" : "Idle connection"} (PID ${row.pid})`,
294
+ description: `PID ${row.pid} from ${row.client_addr || "local"} (${row.application_name || "unknown"}) has been ${row.state} for ${Math.round(row.idle_seconds / 60)} minutes.`,
295
+ fix: `SELECT pg_terminate_backend(${row.pid});`,
296
+ impact: isIdleTx ? "Idle-in-transaction connections hold locks and prevent VACUUM." : "Idle connections consume connection slots.",
297
+ effort: "quick"
298
+ });
299
+ }
300
+ } catch (err) {
301
+ console.error("[advisor] Error checking idle connections:", err.message);
302
+ }
303
+ try {
304
+ const r = await client.query(`
305
+ SELECT c.relname AS table_name, n.nspname AS schema
306
+ FROM pg_class c
307
+ JOIN pg_namespace n ON c.relnamespace = n.oid
308
+ WHERE c.relkind = 'r' AND n.nspname = 'public'
309
+ AND NOT EXISTS (
310
+ SELECT 1 FROM pg_constraint con WHERE con.conrelid = c.oid AND con.contype = 'p'
311
+ )
312
+ `);
313
+ for (const row of r.rows) {
314
+ issues.push({
315
+ id: `schema-no-pk-${row.schema}-${row.table_name}`,
316
+ severity: "warning",
317
+ category: "schema",
318
+ title: `Missing primary key on ${row.table_name}`,
319
+ description: `Table ${row.schema}.${row.table_name} has no primary key. This can cause replication issues and makes row identification unreliable.`,
320
+ fix: `ALTER TABLE ${row.schema}.${row.table_name} ADD PRIMARY KEY (<column>);`,
321
+ impact: "No primary key means no unique row identity, problematic for replication and ORMs.",
322
+ effort: "moderate"
323
+ });
324
+ }
325
+ } catch (err) {
326
+ console.error("[advisor] Error checking missing primary keys:", err.message);
327
+ }
328
+ try {
329
+ const r = await client.query(`
330
+ SELECT schemaname, relname, indexrelname, idx_scan,
331
+ pg_size_pretty(pg_relation_size(indexrelid)) AS idx_size,
332
+ pg_relation_size(indexrelid) AS idx_bytes
333
+ FROM pg_stat_user_indexes
334
+ WHERE idx_scan = 0
335
+ AND indexrelname NOT LIKE '%_pkey'
336
+ AND pg_relation_size(indexrelid) > 1048576
337
+ ORDER BY pg_relation_size(indexrelid) DESC LIMIT 10
338
+ `);
339
+ for (const row of r.rows) {
340
+ issues.push({
341
+ id: `schema-unused-idx-${row.indexrelname}`,
342
+ severity: "warning",
343
+ category: "schema",
344
+ title: `Unused index ${row.indexrelname} (${row.idx_size})`,
345
+ description: `Index ${row.indexrelname} on ${row.relname} has never been used (0 scans) and takes ${row.idx_size}.`,
346
+ fix: `DROP INDEX CONCURRENTLY ${row.schemaname}.${row.indexrelname};`,
347
+ impact: "Unused indexes waste disk space and slow down writes.",
348
+ effort: "quick"
349
+ });
350
+ }
351
+ } catch (err) {
352
+ console.error("[advisor] Error checking unused indexes:", err.message);
353
+ }
354
+ try {
355
+ const r = await client.query(`
356
+ SELECT array_agg(idx.indexrelid::regclass::text) AS indexes,
357
+ idx.indrelid::regclass::text AS table_name,
358
+ pg_size_pretty(sum(pg_relation_size(idx.indexrelid))) AS total_size
359
+ FROM pg_index idx
360
+ GROUP BY idx.indrelid, idx.indkey
361
+ HAVING count(*) > 1
362
+ `);
363
+ for (const row of r.rows) {
364
+ issues.push({
365
+ id: `schema-dup-idx-${row.table_name}-${row.indexes[0]}`,
366
+ severity: "warning",
367
+ category: "schema",
368
+ title: `Duplicate indexes on ${row.table_name}`,
369
+ description: `These indexes cover the same columns on ${row.table_name}: ${row.indexes.join(", ")}. Total wasted space: ${row.total_size}.`,
370
+ fix: `-- Keep one, drop the rest:
371
+ DROP INDEX CONCURRENTLY ${row.indexes.slice(1).join(";\nDROP INDEX CONCURRENTLY ")};`,
372
+ impact: "Duplicate indexes double the write overhead and waste disk space.",
373
+ effort: "quick"
374
+ });
375
+ }
376
+ } catch (err) {
377
+ console.error("[advisor] Error checking duplicate indexes:", err.message);
378
+ }
379
+ try {
380
+ const r = await client.query(`
381
+ SELECT
382
+ conrelid::regclass::text AS table_name,
383
+ a.attname AS column_name,
384
+ confrelid::regclass::text AS referenced_table
385
+ FROM pg_constraint c
386
+ JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
387
+ WHERE c.contype = 'f'
388
+ AND NOT EXISTS (
389
+ SELECT 1 FROM pg_index i
390
+ WHERE i.indrelid = c.conrelid
391
+ AND a.attnum = ANY(i.indkey)
392
+ )
393
+ `);
394
+ for (const row of r.rows) {
395
+ issues.push({
396
+ id: `schema-fk-no-idx-${row.table_name}-${row.column_name}`,
397
+ severity: "warning",
398
+ category: "schema",
399
+ title: `Missing index on FK column ${row.table_name}.${row.column_name}`,
400
+ description: `Foreign key column ${row.column_name} on ${row.table_name} (references ${row.referenced_table}) has no index. This causes slow JOINs and cascading deletes.`,
401
+ fix: `CREATE INDEX CONCURRENTLY idx_${row.table_name.replace(/\./g, "_")}_${row.column_name} ON ${row.table_name} (${row.column_name});`,
402
+ impact: "JOINs and cascading deletes on this FK will require full table scans.",
403
+ effort: "quick"
404
+ });
405
+ }
406
+ } catch (err) {
407
+ console.error("[advisor] Error checking missing FK indexes:", err.message);
408
+ }
409
+ try {
410
+ const r = await client.query(`
411
+ SELECT blocked_locks.pid AS blocked_pid,
412
+ blocking_locks.pid AS blocking_pid,
413
+ blocked_activity.query AS blocked_query
414
+ FROM pg_catalog.pg_locks blocked_locks
415
+ JOIN pg_catalog.pg_locks blocking_locks ON blocking_locks.locktype = blocked_locks.locktype
416
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
417
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
418
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
419
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
420
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
421
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
422
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
423
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
424
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
425
+ AND blocking_locks.pid != blocked_locks.pid
426
+ JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
427
+ WHERE NOT blocked_locks.granted
428
+ `);
429
+ for (const row of r.rows) {
430
+ issues.push({
431
+ id: `perf-lock-blocked-${row.blocked_pid}`,
432
+ severity: "warning",
433
+ category: "performance",
434
+ title: `Blocked query (PID ${row.blocked_pid} blocked by PID ${row.blocking_pid})`,
435
+ description: `PID ${row.blocked_pid} is waiting for a lock held by PID ${row.blocking_pid}. Query: ${(row.blocked_query || "").slice(0, 200)}`,
436
+ fix: `SELECT pg_cancel_backend(${row.blocking_pid});`,
437
+ impact: "Blocked queries cause cascading delays and potential timeouts.",
438
+ effort: "quick"
439
+ });
440
+ }
441
+ } catch (err) {
442
+ console.error("[advisor] Error checking locks:", err.message);
443
+ }
444
+ try {
445
+ const r = await client.query(`
446
+ SELECT CASE WHEN pg_is_in_recovery()
447
+ THEN pg_wal_lsn_diff(pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn())
448
+ ELSE 0 END AS lag_bytes
449
+ `);
450
+ const lagBytes = parseInt(r.rows[0]?.lag_bytes ?? "0");
451
+ if (lagBytes > 1048576) {
452
+ issues.push({
453
+ id: `perf-replication-lag`,
454
+ severity: lagBytes > 104857600 ? "critical" : "warning",
455
+ category: "performance",
456
+ title: `Replication lag: ${(lagBytes / 1048576).toFixed(1)} MB`,
457
+ description: `WAL replay is lagging by ${(lagBytes / 1048576).toFixed(1)} MB. This indicates the replica is falling behind.`,
458
+ fix: `-- Check replication status:
459
+ SELECT * FROM pg_stat_replication;`,
460
+ impact: "High replication lag means the replica has stale data and failover may lose transactions.",
461
+ effort: "involved"
462
+ });
463
+ }
464
+ } catch (err) {
465
+ console.error("[advisor] Error checking replication lag:", err.message);
466
+ }
467
+ try {
468
+ const r = await client.query(`
469
+ SELECT checkpoints_req, checkpoints_timed,
470
+ CASE WHEN (checkpoints_req + checkpoints_timed) = 0 THEN 0
471
+ ELSE round(checkpoints_req::numeric / (checkpoints_req + checkpoints_timed) * 100, 1) END AS req_pct
472
+ FROM pg_stat_bgwriter
473
+ `);
474
+ const reqPct = parseFloat(r.rows[0]?.req_pct ?? "0");
475
+ if (reqPct > 50) {
476
+ issues.push({
477
+ id: `maint-checkpoint-frequency`,
478
+ severity: reqPct > 80 ? "warning" : "info",
479
+ category: "maintenance",
480
+ title: `${reqPct}% of checkpoints are requested (not timed)`,
481
+ description: `${r.rows[0]?.checkpoints_req} requested vs ${r.rows[0]?.checkpoints_timed} timed checkpoints. High requested checkpoints indicate checkpoint_completion_target or max_wal_size may need tuning.`,
482
+ fix: `-- Increase max_wal_size:
483
+ ALTER SYSTEM SET max_wal_size = '2GB';
484
+ SELECT pg_reload_conf();`,
485
+ impact: "Frequent requested checkpoints cause I/O spikes and degrade performance.",
486
+ effort: "moderate"
487
+ });
488
+ }
489
+ } catch (err) {
490
+ console.error("[advisor] Error checking checkpoint frequency:", err.message);
491
+ }
492
+ try {
493
+ const r = await client.query(`SELECT setting FROM pg_settings WHERE name = 'autovacuum'`);
494
+ if (r.rows[0]?.setting === "off") {
495
+ issues.push({
496
+ id: `maint-autovacuum-disabled`,
497
+ severity: "critical",
498
+ category: "maintenance",
499
+ title: `Autovacuum is disabled`,
500
+ description: `Autovacuum is turned off. Dead tuples will accumulate and transaction ID wraparound becomes a risk.`,
501
+ fix: `ALTER SYSTEM SET autovacuum = on;
502
+ SELECT pg_reload_conf();`,
503
+ impact: "Without autovacuum, tables bloat indefinitely and risk transaction ID wraparound shutdown.",
504
+ effort: "quick"
505
+ });
506
+ }
507
+ } catch (err) {
508
+ console.error("[advisor] Error checking autovacuum:", err.message);
509
+ }
510
+ try {
511
+ const sbRes = await client.query(`SELECT setting, unit FROM pg_settings WHERE name = 'shared_buffers'`);
512
+ const memRes = await client.query(`
513
+ SELECT (SELECT setting::bigint FROM pg_settings WHERE name = 'shared_buffers') *
514
+ (SELECT setting::bigint FROM pg_settings WHERE name = 'block_size') AS shared_bytes
515
+ `);
516
+ const sharedBytes = parseInt(memRes.rows[0]?.shared_bytes ?? "0");
517
+ if (sharedBytes > 0 && sharedBytes < 128 * 1024 * 1024) {
518
+ issues.push({
519
+ id: `perf-shared-buffers-low`,
520
+ severity: "warning",
521
+ category: "performance",
522
+ title: `shared_buffers is only ${(sharedBytes / 1048576).toFixed(0)} MB`,
523
+ description: `shared_buffers is set to ${sbRes.rows[0]?.setting}${sbRes.rows[0]?.unit || ""}. Recommended: ~25% of system RAM, typically at least 256MB for production.`,
524
+ fix: `ALTER SYSTEM SET shared_buffers = '256MB';
525
+ -- Requires restart`,
526
+ impact: "Low shared_buffers means more disk I/O and poor cache hit ratios.",
527
+ effort: "involved"
528
+ });
529
+ }
530
+ } catch (err) {
531
+ console.error("[advisor] Error checking shared_buffers:", err.message);
532
+ }
533
+ try {
534
+ const r = await client.query(`SELECT setting, unit FROM pg_settings WHERE name = 'work_mem'`);
535
+ const workMemKB = parseInt(r.rows[0]?.setting ?? "0");
536
+ if (workMemKB > 0 && workMemKB < 4096) {
537
+ issues.push({
538
+ id: `perf-work-mem-low`,
539
+ severity: "info",
540
+ category: "performance",
541
+ title: `work_mem is only ${workMemKB < 1024 ? workMemKB + "kB" : (workMemKB / 1024).toFixed(0) + "MB"}`,
542
+ description: `work_mem is ${r.rows[0]?.setting}${r.rows[0]?.unit || ""}. Low work_mem causes sorts and hash operations to spill to disk.`,
543
+ fix: `ALTER SYSTEM SET work_mem = '16MB';
544
+ SELECT pg_reload_conf();`,
545
+ impact: "Operations that exceed work_mem use temporary disk files, which is much slower.",
546
+ effort: "quick"
547
+ });
548
+ }
549
+ } catch (err) {
550
+ console.error("[advisor] Error checking work_mem:", err.message);
551
+ }
552
+ try {
553
+ const r = await client.query(`
554
+ SELECT pid, usename, client_addr::text
555
+ FROM pg_stat_activity
556
+ WHERE usename IN (SELECT rolname FROM pg_roles WHERE rolsuper)
557
+ AND client_addr IS NOT NULL
558
+ AND client_addr::text NOT IN ('127.0.0.1', '::1')
559
+ AND pid != pg_backend_pid()
560
+ `);
561
+ for (const row of r.rows) {
562
+ issues.push({
563
+ id: `sec-superuser-remote-${row.pid}`,
564
+ severity: "critical",
565
+ category: "security",
566
+ title: `Superuser ${row.usename} connected from ${row.client_addr}`,
567
+ description: `Superuser ${row.usename} has an active connection from non-localhost address ${row.client_addr}. This is a security risk.`,
568
+ fix: `-- Restrict superuser access in pg_hba.conf to localhost only.
569
+ -- Then: SELECT pg_reload_conf();`,
570
+ impact: "Remote superuser access is a significant security vulnerability.",
571
+ effort: "moderate"
572
+ });
573
+ }
574
+ } catch (err) {
575
+ console.error("[advisor] Error checking superuser connections:", err.message);
576
+ }
577
+ try {
578
+ const r = await client.query(`SELECT setting FROM pg_settings WHERE name = 'ssl'`);
579
+ if (r.rows[0]?.setting === "off") {
580
+ issues.push({
581
+ id: `sec-ssl-off`,
582
+ severity: "warning",
583
+ category: "security",
584
+ title: `SSL is disabled`,
585
+ description: `SSL is turned off. Database connections are not encrypted.`,
586
+ fix: `-- Enable SSL in postgresql.conf:
587
+ -- ssl = on
588
+ -- ssl_cert_file = 'server.crt'
589
+ -- ssl_key_file = 'server.key'
590
+ SELECT pg_reload_conf();`,
591
+ impact: "Database traffic can be intercepted and read in transit.",
592
+ effort: "involved"
593
+ });
594
+ }
595
+ } catch (err) {
596
+ console.error("[advisor] Error checking SSL check:", err.message);
597
+ }
598
+ try {
599
+ const r = await client.query(`
600
+ SELECT type, database, user_name, auth_method
601
+ FROM pg_hba_file_rules
602
+ WHERE auth_method = 'trust' AND type != 'local'
603
+ LIMIT 5
604
+ `);
605
+ for (const row of r.rows) {
606
+ issues.push({
607
+ id: `sec-trust-auth-${row.database}-${row.user_name}`,
608
+ severity: "critical",
609
+ category: "security",
610
+ title: `Trust authentication for ${row.user_name}@${row.database}`,
611
+ description: `HBA rule allows trust (no password) authentication for ${row.type} connections to ${row.database} as ${row.user_name}.`,
612
+ fix: `-- Change auth_method from 'trust' to 'scram-sha-256' in pg_hba.conf
613
+ -- Then: SELECT pg_reload_conf();`,
614
+ impact: "Anyone can connect without a password.",
615
+ effort: "moderate"
616
+ });
617
+ }
618
+ } catch (err) {
619
+ console.error("[advisor] Error checking trust auth:", err.message);
620
+ }
621
+ const score = computeAdvisorScore(issues);
622
+ return {
623
+ score,
624
+ grade: gradeFromScore(score),
625
+ issues,
626
+ breakdown: computeBreakdown(issues)
627
+ };
628
+ } finally {
629
+ client.release();
630
+ }
631
+ }
632
+ function isSafeFix(sql) {
633
+ const trimmed = sql.trim();
634
+ if (!trimmed) return false;
635
+ const statements = trimmed.replace(/;\s*$/, "").split(";").map((s) => s.trim()).filter(Boolean);
636
+ if (statements.length !== 1) return false;
637
+ const upper = statements[0].toUpperCase();
638
+ if (upper.startsWith("EXPLAIN ANALYZE")) {
639
+ const afterExplain = upper.replace(/^EXPLAIN\s+ANALYZE\s+/, "").trimStart();
640
+ return afterExplain.startsWith("SELECT");
641
+ }
642
+ const ALLOWED_PREFIXES = [
643
+ "VACUUM",
644
+ "ANALYZE",
645
+ "REINDEX",
646
+ "CREATE INDEX CONCURRENTLY",
647
+ "DROP INDEX CONCURRENTLY",
648
+ "SELECT PG_TERMINATE_BACKEND(",
649
+ "SELECT PG_CANCEL_BACKEND("
650
+ ];
651
+ return ALLOWED_PREFIXES.some((p) => upper.startsWith(p));
652
+ }
653
+ var SEVERITY_WEIGHT;
654
+ var init_advisor = __esm({
655
+ "src/server/advisor.ts"() {
656
+ "use strict";
657
+ SEVERITY_WEIGHT = { critical: 20, warning: 8, info: 3 };
658
+ }
659
+ });
660
+
661
+ // src/cli.ts
662
+ import { parseArgs } from "util";
663
+
664
+ // src/server/index.ts
665
+ import { Hono } from "hono";
666
+ import path2 from "path";
667
+ import fs2 from "fs";
668
+ import os2 from "os";
669
+ import { fileURLToPath } from "url";
670
+ import { Pool } from "pg";
671
+
672
+ // src/server/queries/overview.ts
673
+ async function getOverview(pool) {
674
+ const client = await pool.connect();
675
+ try {
676
+ const version = await client.query("SHOW server_version");
677
+ const uptime = await client.query(
678
+ "SELECT now() - pg_postmaster_start_time() AS uptime"
679
+ );
680
+ const dbSize = await client.query(
681
+ "SELECT pg_size_pretty(pg_database_size(current_database())) AS size"
682
+ );
683
+ const dbCount = await client.query(
684
+ "SELECT count(*)::int AS count FROM pg_database WHERE NOT datistemplate"
685
+ );
686
+ const connections = await client.query(`
687
+ SELECT
688
+ (SELECT count(*)::int FROM pg_stat_activity WHERE state = 'active') AS active,
689
+ (SELECT count(*)::int FROM pg_stat_activity WHERE state = 'idle') AS idle,
690
+ (SELECT setting::int FROM pg_settings WHERE name = 'max_connections') AS max
691
+ `);
692
+ return {
693
+ version: version.rows[0].server_version,
694
+ uptime: uptime.rows[0].uptime,
695
+ dbSize: dbSize.rows[0].size,
696
+ databaseCount: dbCount.rows[0].count,
697
+ connections: connections.rows[0]
698
+ };
699
+ } finally {
700
+ client.release();
701
+ }
702
+ }
703
+
704
+ // src/server/queries/databases.ts
705
+ async function getDatabases(pool) {
706
+ const client = await pool.connect();
707
+ try {
708
+ const r = await client.query(`
709
+ SELECT datname AS name,
710
+ pg_size_pretty(pg_database_size(datname)) AS size,
711
+ pg_database_size(datname) AS size_bytes
712
+ FROM pg_database
713
+ WHERE NOT datistemplate
714
+ ORDER BY pg_database_size(datname) DESC
715
+ `);
716
+ return r.rows;
717
+ } finally {
718
+ client.release();
719
+ }
720
+ }
721
+
722
+ // src/server/queries/tables.ts
723
+ async function getTables(pool) {
724
+ const client = await pool.connect();
725
+ try {
726
+ const r = await client.query(`
727
+ SELECT
728
+ schemaname AS schema,
729
+ relname AS name,
730
+ pg_size_pretty(pg_total_relation_size(relid)) AS total_size,
731
+ pg_total_relation_size(relid) AS size_bytes,
732
+ n_live_tup AS rows,
733
+ n_dead_tup AS dead_tuples,
734
+ CASE WHEN n_live_tup > 0
735
+ THEN round(n_dead_tup::numeric / n_live_tup * 100, 1)
736
+ ELSE 0 END AS dead_pct
737
+ FROM pg_stat_user_tables
738
+ ORDER BY pg_total_relation_size(relid) DESC
739
+ `);
740
+ return r.rows;
741
+ } finally {
742
+ client.release();
743
+ }
744
+ }
745
+
746
+ // src/server/queries/activity.ts
747
+ async function getActivity(pool) {
748
+ const client = await pool.connect();
749
+ try {
750
+ const r = await client.query(`
751
+ SELECT
752
+ pid,
753
+ COALESCE(query, '') AS query,
754
+ COALESCE(state, 'unknown') AS state,
755
+ wait_event,
756
+ wait_event_type,
757
+ CASE WHEN state = 'active' THEN (now() - query_start)::text
758
+ WHEN state = 'idle in transaction' THEN (now() - state_change)::text
759
+ ELSE NULL END AS duration,
760
+ client_addr::text,
761
+ COALESCE(application_name, '') AS application_name,
762
+ backend_start::text
763
+ FROM pg_stat_activity
764
+ WHERE pid != pg_backend_pid()
765
+ AND state IS NOT NULL
766
+ ORDER BY
767
+ CASE state
768
+ WHEN 'active' THEN 1
769
+ WHEN 'idle in transaction' THEN 2
770
+ ELSE 3
771
+ END,
772
+ query_start ASC NULLS LAST
773
+ `);
774
+ return r.rows;
775
+ } finally {
776
+ client.release();
777
+ }
778
+ }
779
+
780
+ // src/server/queries/slow-queries.ts
781
+ async function getSlowQueries(pool) {
782
+ const client = await pool.connect();
783
+ try {
784
+ const extCheck = await client.query(
785
+ "SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'"
786
+ );
787
+ if (extCheck.rows.length === 0) {
788
+ return [];
789
+ }
790
+ const r = await client.query(`
791
+ SELECT
792
+ queryid::text,
793
+ query,
794
+ calls::int,
795
+ total_exec_time AS total_time,
796
+ mean_exec_time AS mean_time,
797
+ rows::int,
798
+ round(total_exec_time::numeric / 1000, 2)::text || 's' AS total_time_pretty,
799
+ round(mean_exec_time::numeric, 2)::text || 'ms' AS mean_time_pretty
800
+ FROM pg_stat_statements
801
+ WHERE query NOT LIKE '%pg_stat%'
802
+ AND query NOT LIKE '%pg_catalog%'
803
+ ORDER BY total_exec_time DESC
804
+ LIMIT 50
805
+ `);
806
+ return r.rows;
807
+ } catch {
808
+ return [];
809
+ } finally {
810
+ client.release();
811
+ }
812
+ }
813
+
814
+ // src/server/index.ts
815
+ init_advisor();
816
+
817
+ // src/server/queries/schema.ts
818
+ async function getSchemaTables(pool) {
819
+ const client = await pool.connect();
820
+ try {
821
+ const r = await client.query(`
822
+ SELECT
823
+ c.relname AS name,
824
+ n.nspname AS schema,
825
+ pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
826
+ pg_total_relation_size(c.oid) AS total_size_bytes,
827
+ pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
828
+ pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
829
+ s.n_live_tup AS row_count,
830
+ obj_description(c.oid) AS description
831
+ FROM pg_class c
832
+ JOIN pg_namespace n ON c.relnamespace = n.oid
833
+ LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
834
+ WHERE c.relkind = 'r' AND n.nspname NOT IN ('pg_catalog', 'information_schema')
835
+ ORDER BY pg_total_relation_size(c.oid) DESC
836
+ `);
837
+ return r.rows;
838
+ } finally {
839
+ client.release();
840
+ }
841
+ }
842
+ async function getSchemaTableDetail(pool, tableName) {
843
+ const client = await pool.connect();
844
+ try {
845
+ const parts = tableName.split(".");
846
+ const schema = parts.length > 1 ? parts[0] : "public";
847
+ const name = parts.length > 1 ? parts[1] : parts[0];
848
+ const tableInfo = await client.query(`
849
+ SELECT
850
+ c.relname AS name, n.nspname AS schema,
851
+ pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
852
+ pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
853
+ pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
854
+ pg_size_pretty(pg_relation_size(c.reltoastrelid)) AS toast_size,
855
+ s.n_live_tup AS row_count, s.n_dead_tup AS dead_tuples,
856
+ s.last_vacuum, s.last_autovacuum, s.last_analyze, s.last_autoanalyze,
857
+ s.seq_scan, s.idx_scan
858
+ FROM pg_class c
859
+ JOIN pg_namespace n ON c.relnamespace = n.oid
860
+ LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
861
+ WHERE c.relname = $1 AND n.nspname = $2 AND c.relkind = 'r'
862
+ `, [name, schema]);
863
+ if (tableInfo.rows.length === 0) return null;
864
+ const columns = await client.query(`
865
+ SELECT
866
+ a.attname AS name,
867
+ pg_catalog.format_type(a.atttypid, a.atttypmod) AS type,
868
+ NOT a.attnotnull AS nullable,
869
+ pg_get_expr(d.adbin, d.adrelid) AS default_value,
870
+ col_description(a.attrelid, a.attnum) AS description
871
+ FROM pg_attribute a
872
+ LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
873
+ WHERE a.attrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
874
+ AND a.attnum > 0 AND NOT a.attisdropped
875
+ ORDER BY a.attnum
876
+ `, [name, schema]);
877
+ const indexes = await client.query(`
878
+ SELECT
879
+ i.relname AS name,
880
+ am.amname AS type,
881
+ pg_size_pretty(pg_relation_size(i.oid)) AS size,
882
+ pg_get_indexdef(idx.indexrelid) AS definition,
883
+ idx.indisunique AS is_unique,
884
+ idx.indisprimary AS is_primary,
885
+ s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
886
+ FROM pg_index idx
887
+ JOIN pg_class i ON idx.indexrelid = i.oid
888
+ JOIN pg_class t ON idx.indrelid = t.oid
889
+ JOIN pg_namespace n ON t.relnamespace = n.oid
890
+ JOIN pg_am am ON i.relam = am.oid
891
+ LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
892
+ WHERE t.relname = $1 AND n.nspname = $2
893
+ ORDER BY i.relname
894
+ `, [name, schema]);
895
+ const constraints = await client.query(`
896
+ SELECT
897
+ conname AS name,
898
+ CASE contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY'
899
+ WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' WHEN 'x' THEN 'EXCLUDE' END AS type,
900
+ pg_get_constraintdef(oid) AS definition
901
+ FROM pg_constraint
902
+ WHERE conrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
903
+ ORDER BY
904
+ CASE contype WHEN 'p' THEN 1 WHEN 'u' THEN 2 WHEN 'f' THEN 3 WHEN 'c' THEN 4 ELSE 5 END
905
+ `, [name, schema]);
906
+ const foreignKeys = await client.query(`
907
+ SELECT
908
+ conname AS name,
909
+ a.attname AS column_name,
910
+ confrelid::regclass::text AS referenced_table,
911
+ af.attname AS referenced_column
912
+ FROM pg_constraint c
913
+ JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
914
+ JOIN pg_attribute af ON af.attrelid = c.confrelid AND af.attnum = ANY(c.confkey)
915
+ WHERE c.contype = 'f'
916
+ AND c.conrelid = (SELECT cl.oid FROM pg_class cl JOIN pg_namespace n ON cl.relnamespace = n.oid WHERE cl.relname = $1 AND n.nspname = $2)
917
+ `, [name, schema]);
918
+ let sampleData = [];
919
+ try {
920
+ const sample = await client.query(
921
+ `SELECT * FROM ${client.escapeIdentifier(schema)}.${client.escapeIdentifier(name)} LIMIT 10`
922
+ );
923
+ sampleData = sample.rows;
924
+ } catch (err) {
925
+ console.error("[schema] Error:", err.message);
926
+ }
927
+ return {
928
+ ...tableInfo.rows[0],
929
+ columns: columns.rows,
930
+ indexes: indexes.rows,
931
+ constraints: constraints.rows,
932
+ foreignKeys: foreignKeys.rows,
933
+ sampleData
934
+ };
935
+ } finally {
936
+ client.release();
937
+ }
938
+ }
939
+ async function getSchemaIndexes(pool) {
940
+ const client = await pool.connect();
941
+ try {
942
+ const r = await client.query(`
943
+ SELECT
944
+ n.nspname AS schema,
945
+ t.relname AS table_name,
946
+ i.relname AS name,
947
+ am.amname AS type,
948
+ pg_size_pretty(pg_relation_size(i.oid)) AS size,
949
+ pg_relation_size(i.oid) AS size_bytes,
950
+ pg_get_indexdef(idx.indexrelid) AS definition,
951
+ idx.indisunique AS is_unique,
952
+ idx.indisprimary AS is_primary,
953
+ s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
954
+ FROM pg_index idx
955
+ JOIN pg_class i ON idx.indexrelid = i.oid
956
+ JOIN pg_class t ON idx.indrelid = t.oid
957
+ JOIN pg_namespace n ON t.relnamespace = n.oid
958
+ JOIN pg_am am ON i.relam = am.oid
959
+ LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
960
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
961
+ ORDER BY pg_relation_size(i.oid) DESC
962
+ `);
963
+ return r.rows;
964
+ } finally {
965
+ client.release();
966
+ }
967
+ }
968
+ async function getSchemaFunctions(pool) {
969
+ const client = await pool.connect();
970
+ try {
971
+ const r = await client.query(`
972
+ SELECT
973
+ n.nspname AS schema,
974
+ p.proname AS name,
975
+ pg_get_function_result(p.oid) AS return_type,
976
+ pg_get_function_arguments(p.oid) AS arguments,
977
+ l.lanname AS language,
978
+ p.prosrc AS source,
979
+ CASE p.prokind WHEN 'f' THEN 'function' WHEN 'p' THEN 'procedure' WHEN 'a' THEN 'aggregate' WHEN 'w' THEN 'window' END AS kind
980
+ FROM pg_proc p
981
+ JOIN pg_namespace n ON p.pronamespace = n.oid
982
+ JOIN pg_language l ON p.prolang = l.oid
983
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
984
+ ORDER BY n.nspname, p.proname
985
+ `);
986
+ return r.rows;
987
+ } finally {
988
+ client.release();
989
+ }
990
+ }
991
+ async function getSchemaExtensions(pool) {
992
+ const client = await pool.connect();
993
+ try {
994
+ const r = await client.query(`
995
+ SELECT extname AS name, extversion AS installed_version,
996
+ n.nspname AS schema, obj_description(e.oid) AS description
997
+ FROM pg_extension e
998
+ JOIN pg_namespace n ON e.extnamespace = n.oid
999
+ ORDER BY extname
1000
+ `);
1001
+ return r.rows;
1002
+ } finally {
1003
+ client.release();
1004
+ }
1005
+ }
1006
+ async function getSchemaEnums(pool) {
1007
+ const client = await pool.connect();
1008
+ try {
1009
+ const r = await client.query(`
1010
+ SELECT
1011
+ t.typname AS name,
1012
+ n.nspname AS schema,
1013
+ array_agg(e.enumlabel ORDER BY e.enumsortorder) AS values
1014
+ FROM pg_type t
1015
+ JOIN pg_namespace n ON t.typnamespace = n.oid
1016
+ JOIN pg_enum e ON t.oid = e.enumtypid
1017
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
1018
+ GROUP BY t.typname, n.nspname
1019
+ ORDER BY t.typname
1020
+ `);
1021
+ return r.rows;
1022
+ } finally {
1023
+ client.release();
1024
+ }
1025
+ }
1026
+
1027
+ // src/server/timeseries.ts
1028
+ import Database from "better-sqlite3";
1029
+ import path from "path";
1030
+ import os from "os";
1031
+ import fs from "fs";
1032
+ var DEFAULT_DIR = path.join(os.homedir(), ".pg-dash");
1033
+ var DEFAULT_RETENTION_DAYS = 7;
1034
+ var TimeseriesStore = class {
1035
+ db;
1036
+ insertStmt;
1037
+ retentionMs;
1038
+ constructor(dataDir, retentionDays = DEFAULT_RETENTION_DAYS) {
1039
+ const dir = dataDir || DEFAULT_DIR;
1040
+ fs.mkdirSync(dir, { recursive: true });
1041
+ const dbPath = path.join(dir, "metrics.db");
1042
+ this.db = new Database(dbPath);
1043
+ this.retentionMs = retentionDays * 24 * 60 * 60 * 1e3;
1044
+ this.db.pragma("journal_mode = WAL");
1045
+ this.db.exec(`
1046
+ CREATE TABLE IF NOT EXISTS metrics (
1047
+ timestamp INTEGER NOT NULL,
1048
+ metric TEXT NOT NULL,
1049
+ value REAL NOT NULL
1050
+ );
1051
+ CREATE INDEX IF NOT EXISTS idx_metrics_metric_ts ON metrics(metric, timestamp);
1052
+ `);
1053
+ this.insertStmt = this.db.prepare(
1054
+ "INSERT INTO metrics (timestamp, metric, value) VALUES (?, ?, ?)"
1055
+ );
1056
+ }
1057
+ insert(metric, value, timestamp) {
1058
+ this.insertStmt.run(timestamp ?? Date.now(), metric, value);
1059
+ }
1060
+ insertMany(points) {
1061
+ const tx = this.db.transaction((pts) => {
1062
+ for (const p of pts) {
1063
+ this.insertStmt.run(p.timestamp, p.metric, p.value);
1064
+ }
1065
+ });
1066
+ tx(points);
1067
+ }
1068
+ query(metric, startMs, endMs) {
1069
+ const end = endMs ?? Date.now();
1070
+ return this.db.prepare(
1071
+ "SELECT timestamp, value FROM metrics WHERE metric = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp"
1072
+ ).all(metric, startMs, end);
1073
+ }
1074
+ latest(metrics) {
1075
+ const result = {};
1076
+ if (metrics && metrics.length > 0) {
1077
+ const placeholders = metrics.map(() => "?").join(",");
1078
+ const rows = this.db.prepare(
1079
+ `SELECT m.metric, m.timestamp, m.value FROM metrics m INNER JOIN (SELECT metric, MAX(timestamp) as max_ts FROM metrics WHERE metric IN (${placeholders}) GROUP BY metric) g ON m.metric = g.metric AND m.timestamp = g.max_ts`
1080
+ ).all(...metrics);
1081
+ for (const r of rows) result[r.metric] = { timestamp: r.timestamp, value: r.value };
1082
+ } else {
1083
+ const rows = this.db.prepare(
1084
+ "SELECT m.metric, m.timestamp, m.value FROM metrics m INNER JOIN (SELECT metric, MAX(timestamp) as max_ts FROM metrics GROUP BY metric) g ON m.metric = g.metric AND m.timestamp = g.max_ts"
1085
+ ).all();
1086
+ for (const r of rows) result[r.metric] = { timestamp: r.timestamp, value: r.value };
1087
+ }
1088
+ return result;
1089
+ }
1090
+ prune() {
1091
+ const cutoff = Date.now() - this.retentionMs;
1092
+ const info = this.db.prepare("DELETE FROM metrics WHERE timestamp < ?").run(cutoff);
1093
+ return info.changes;
1094
+ }
1095
+ close() {
1096
+ this.db.close();
1097
+ }
1098
+ };
1099
+
1100
+ // src/server/collector.ts
1101
+ var Collector = class {
1102
+ constructor(pool, store, intervalMs = 3e4) {
1103
+ this.pool = pool;
1104
+ this.store = store;
1105
+ this.intervalMs = intervalMs;
1106
+ }
1107
+ timer = null;
1108
+ prev = null;
1109
+ lastSnapshot = {};
1110
+ start() {
1111
+ this.collect().catch((err) => console.error("[collector] Initial collection failed:", err));
1112
+ this.timer = setInterval(() => {
1113
+ this.collect().catch((err) => console.error("[collector] Collection failed:", err));
1114
+ }, this.intervalMs);
1115
+ setInterval(() => this.store.prune(), 60 * 60 * 1e3);
1116
+ }
1117
+ stop() {
1118
+ if (this.timer) {
1119
+ clearInterval(this.timer);
1120
+ this.timer = null;
1121
+ }
1122
+ }
1123
+ getLastSnapshot() {
1124
+ return { ...this.lastSnapshot };
1125
+ }
1126
+ async collect() {
1127
+ const now = Date.now();
1128
+ const snapshot = {};
1129
+ try {
1130
+ const client = await this.pool.connect();
1131
+ try {
1132
+ const connRes = await client.query(`
1133
+ SELECT
1134
+ count(*) FILTER (WHERE state = 'active')::int AS active,
1135
+ count(*) FILTER (WHERE state = 'idle')::int AS idle,
1136
+ count(*)::int AS total
1137
+ FROM pg_stat_activity
1138
+ `);
1139
+ const conn = connRes.rows[0];
1140
+ snapshot.connections_active = conn.active;
1141
+ snapshot.connections_idle = conn.idle;
1142
+ snapshot.connections_total = conn.total;
1143
+ const dbRes = await client.query(`
1144
+ SELECT
1145
+ xact_commit, xact_rollback, deadlocks, temp_bytes,
1146
+ tup_inserted, tup_updated, tup_deleted,
1147
+ CASE WHEN (blks_hit + blks_read) = 0 THEN 1
1148
+ ELSE blks_hit::float / (blks_hit + blks_read) END AS cache_ratio,
1149
+ pg_database_size(current_database()) AS db_size
1150
+ FROM pg_stat_database WHERE datname = current_database()
1151
+ `);
1152
+ const db = dbRes.rows[0];
1153
+ if (db) {
1154
+ snapshot.cache_hit_ratio = parseFloat(db.cache_ratio);
1155
+ snapshot.db_size_bytes = parseInt(db.db_size);
1156
+ const cur = {
1157
+ timestamp: now,
1158
+ xact_commit: parseInt(db.xact_commit),
1159
+ xact_rollback: parseInt(db.xact_rollback),
1160
+ deadlocks: parseInt(db.deadlocks),
1161
+ temp_bytes: parseInt(db.temp_bytes),
1162
+ tup_inserted: parseInt(db.tup_inserted),
1163
+ tup_updated: parseInt(db.tup_updated),
1164
+ tup_deleted: parseInt(db.tup_deleted)
1165
+ };
1166
+ if (this.prev) {
1167
+ const dtSec = (now - this.prev.timestamp) / 1e3;
1168
+ if (dtSec > 0) {
1169
+ snapshot.tps_commit = Math.max(0, (cur.xact_commit - this.prev.xact_commit) / dtSec);
1170
+ snapshot.tps_rollback = Math.max(0, (cur.xact_rollback - this.prev.xact_rollback) / dtSec);
1171
+ snapshot.deadlocks = Math.max(0, cur.deadlocks - this.prev.deadlocks);
1172
+ snapshot.temp_bytes = Math.max(0, cur.temp_bytes - this.prev.temp_bytes);
1173
+ snapshot.tuple_inserted = Math.max(0, (cur.tup_inserted - this.prev.tup_inserted) / dtSec);
1174
+ snapshot.tuple_updated = Math.max(0, (cur.tup_updated - this.prev.tup_updated) / dtSec);
1175
+ snapshot.tuple_deleted = Math.max(0, (cur.tup_deleted - this.prev.tup_deleted) / dtSec);
1176
+ }
1177
+ }
1178
+ this.prev = cur;
1179
+ }
1180
+ try {
1181
+ const repRes = await client.query(`
1182
+ SELECT CASE WHEN pg_is_in_recovery()
1183
+ THEN pg_wal_lsn_diff(pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn())
1184
+ ELSE 0 END AS lag_bytes
1185
+ `);
1186
+ snapshot.replication_lag_bytes = parseInt(repRes.rows[0]?.lag_bytes ?? "0");
1187
+ } catch {
1188
+ snapshot.replication_lag_bytes = 0;
1189
+ }
1190
+ } finally {
1191
+ client.release();
1192
+ }
1193
+ } catch (err) {
1194
+ console.error("[collector] Error collecting metrics:", err.message);
1195
+ return snapshot;
1196
+ }
1197
+ const points = Object.entries(snapshot).map(([metric, value]) => ({
1198
+ timestamp: now,
1199
+ metric,
1200
+ value
1201
+ }));
1202
+ if (points.length > 0) {
1203
+ this.store.insertMany(points);
1204
+ }
1205
+ this.lastSnapshot = snapshot;
1206
+ return snapshot;
1207
+ }
1208
+ };
1209
+
1210
+ // src/server/schema-diff.ts
1211
+ function diffSnapshots(oldSnap, newSnap) {
1212
+ const changes = [];
1213
+ const oldTableMap = new Map(oldSnap.tables.map((t) => [`${t.schema}.${t.name}`, t]));
1214
+ const newTableMap = new Map(newSnap.tables.map((t) => [`${t.schema}.${t.name}`, t]));
1215
+ for (const [key, t] of newTableMap) {
1216
+ if (!oldTableMap.has(key)) {
1217
+ changes.push({ change_type: "added", object_type: "table", table_name: key, detail: `Table ${key} added` });
1218
+ }
1219
+ }
1220
+ for (const [key] of oldTableMap) {
1221
+ if (!newTableMap.has(key)) {
1222
+ changes.push({ change_type: "removed", object_type: "table", table_name: key, detail: `Table ${key} removed` });
1223
+ }
1224
+ }
1225
+ for (const [key, newTable] of newTableMap) {
1226
+ const oldTable = oldTableMap.get(key);
1227
+ if (!oldTable) continue;
1228
+ const oldCols = new Map(oldTable.columns.map((c) => [c.name, c]));
1229
+ const newCols = new Map(newTable.columns.map((c) => [c.name, c]));
1230
+ for (const [name, col] of newCols) {
1231
+ const oldCol = oldCols.get(name);
1232
+ if (!oldCol) {
1233
+ changes.push({ change_type: "added", object_type: "column", table_name: key, detail: `Column ${name} added (${col.type})` });
1234
+ } else {
1235
+ if (oldCol.type !== col.type) {
1236
+ changes.push({ change_type: "modified", object_type: "column", table_name: key, detail: `Column ${name} type changed: ${oldCol.type} \u2192 ${col.type}` });
1237
+ }
1238
+ if (oldCol.nullable !== col.nullable) {
1239
+ changes.push({ change_type: "modified", object_type: "column", table_name: key, detail: `Column ${name} nullable changed: ${oldCol.nullable} \u2192 ${col.nullable}` });
1240
+ }
1241
+ if (oldCol.default_value !== col.default_value) {
1242
+ changes.push({ change_type: "modified", object_type: "column", table_name: key, detail: `Column ${name} default changed: ${oldCol.default_value ?? "NULL"} \u2192 ${col.default_value ?? "NULL"}` });
1243
+ }
1244
+ }
1245
+ }
1246
+ for (const name of oldCols.keys()) {
1247
+ if (!newCols.has(name)) {
1248
+ changes.push({ change_type: "removed", object_type: "column", table_name: key, detail: `Column ${name} removed` });
1249
+ }
1250
+ }
1251
+ const oldIdx = new Map(oldTable.indexes.map((i) => [i.name, i]));
1252
+ const newIdx = new Map(newTable.indexes.map((i) => [i.name, i]));
1253
+ for (const [name, idx] of newIdx) {
1254
+ if (!oldIdx.has(name)) {
1255
+ changes.push({ change_type: "added", object_type: "index", table_name: key, detail: `Index ${name} added` });
1256
+ } else if (oldIdx.get(name).definition !== idx.definition) {
1257
+ changes.push({ change_type: "modified", object_type: "index", table_name: key, detail: `Index ${name} definition changed` });
1258
+ }
1259
+ }
1260
+ for (const name of oldIdx.keys()) {
1261
+ if (!newIdx.has(name)) {
1262
+ changes.push({ change_type: "removed", object_type: "index", table_name: key, detail: `Index ${name} removed` });
1263
+ }
1264
+ }
1265
+ const oldCon = new Map(oldTable.constraints.map((c) => [c.name, c]));
1266
+ const newCon = new Map(newTable.constraints.map((c) => [c.name, c]));
1267
+ for (const [name, con] of newCon) {
1268
+ if (!oldCon.has(name)) {
1269
+ changes.push({ change_type: "added", object_type: "constraint", table_name: key, detail: `Constraint ${name} added (${con.type})` });
1270
+ } else if (oldCon.get(name).definition !== con.definition) {
1271
+ changes.push({ change_type: "modified", object_type: "constraint", table_name: key, detail: `Constraint ${name} definition changed` });
1272
+ }
1273
+ }
1274
+ for (const name of oldCon.keys()) {
1275
+ if (!newCon.has(name)) {
1276
+ changes.push({ change_type: "removed", object_type: "constraint", table_name: key, detail: `Constraint ${name} removed` });
1277
+ }
1278
+ }
1279
+ }
1280
+ const oldEnums = new Map((oldSnap.enums || []).map((e) => [`${e.schema}.${e.name}`, e]));
1281
+ const newEnums = new Map((newSnap.enums || []).map((e) => [`${e.schema}.${e.name}`, e]));
1282
+ for (const [key, en] of newEnums) {
1283
+ const oldEn = oldEnums.get(key);
1284
+ if (!oldEn) {
1285
+ changes.push({ change_type: "added", object_type: "enum", table_name: null, detail: `Enum ${key} added (${en.values.join(", ")})` });
1286
+ } else {
1287
+ const added = en.values.filter((v) => !oldEn.values.includes(v));
1288
+ const removed = oldEn.values.filter((v) => !en.values.includes(v));
1289
+ for (const v of added) {
1290
+ changes.push({ change_type: "modified", object_type: "enum", table_name: null, detail: `Enum ${key}: value '${v}' added` });
1291
+ }
1292
+ for (const v of removed) {
1293
+ changes.push({ change_type: "modified", object_type: "enum", table_name: null, detail: `Enum ${key}: value '${v}' removed` });
1294
+ }
1295
+ }
1296
+ }
1297
+ for (const key of oldEnums.keys()) {
1298
+ if (!newEnums.has(key)) {
1299
+ changes.push({ change_type: "removed", object_type: "enum", table_name: null, detail: `Enum ${key} removed` });
1300
+ }
1301
+ }
1302
+ return changes;
1303
+ }
1304
+
1305
+ // src/server/schema-tracker.ts
1306
+ var SchemaTracker = class {
1307
+ db;
1308
+ pool;
1309
+ intervalMs;
1310
+ timer = null;
1311
+ constructor(db, pool, intervalMs = 6 * 60 * 60 * 1e3) {
1312
+ this.db = db;
1313
+ this.pool = pool;
1314
+ this.intervalMs = intervalMs;
1315
+ this.initTables();
1316
+ }
1317
+ initTables() {
1318
+ this.db.exec(`
1319
+ CREATE TABLE IF NOT EXISTS schema_snapshots (
1320
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1321
+ timestamp INTEGER NOT NULL,
1322
+ snapshot TEXT NOT NULL
1323
+ );
1324
+ CREATE TABLE IF NOT EXISTS schema_changes (
1325
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1326
+ snapshot_id INTEGER NOT NULL,
1327
+ timestamp INTEGER NOT NULL,
1328
+ change_type TEXT NOT NULL,
1329
+ object_type TEXT NOT NULL,
1330
+ table_name TEXT,
1331
+ detail TEXT NOT NULL,
1332
+ FOREIGN KEY (snapshot_id) REFERENCES schema_snapshots(id)
1333
+ );
1334
+ `);
1335
+ }
1336
+ async takeSnapshot() {
1337
+ const snapshot = await this.buildSnapshot();
1338
+ const now = Date.now();
1339
+ const json = JSON.stringify(snapshot);
1340
+ const info = this.db.prepare("INSERT INTO schema_snapshots (timestamp, snapshot) VALUES (?, ?)").run(now, json);
1341
+ const snapshotId = Number(info.lastInsertRowid);
1342
+ const prev = this.db.prepare("SELECT snapshot FROM schema_snapshots WHERE id < ? ORDER BY id DESC LIMIT 1").get(snapshotId);
1343
+ let changes = [];
1344
+ if (prev) {
1345
+ const oldSnap = JSON.parse(prev.snapshot);
1346
+ changes = diffSnapshots(oldSnap, snapshot);
1347
+ if (changes.length > 0) {
1348
+ const insert = this.db.prepare("INSERT INTO schema_changes (snapshot_id, timestamp, change_type, object_type, table_name, detail) VALUES (?, ?, ?, ?, ?, ?)");
1349
+ const tx = this.db.transaction((chs) => {
1350
+ for (const c of chs) {
1351
+ insert.run(snapshotId, now, c.change_type, c.object_type, c.table_name, c.detail);
1352
+ }
1353
+ });
1354
+ tx(changes);
1355
+ }
1356
+ }
1357
+ return { snapshotId, changes };
1358
+ }
1359
+ async buildSnapshot() {
1360
+ const tables = await getSchemaTables(this.pool);
1361
+ const enums = await getSchemaEnums(this.pool);
1362
+ const detailedTables = await Promise.all(
1363
+ tables.map(async (t) => {
1364
+ const detail = await getSchemaTableDetail(this.pool, `${t.schema}.${t.name}`);
1365
+ if (!detail) return null;
1366
+ return {
1367
+ name: detail.name,
1368
+ schema: detail.schema,
1369
+ columns: detail.columns.map((c) => ({
1370
+ name: c.name,
1371
+ type: c.type,
1372
+ nullable: c.nullable,
1373
+ default_value: c.default_value
1374
+ })),
1375
+ indexes: detail.indexes.map((i) => ({
1376
+ name: i.name,
1377
+ definition: i.definition,
1378
+ is_unique: i.is_unique,
1379
+ is_primary: i.is_primary
1380
+ })),
1381
+ constraints: detail.constraints.map((c) => ({
1382
+ name: c.name,
1383
+ type: c.type,
1384
+ definition: c.definition
1385
+ }))
1386
+ };
1387
+ })
1388
+ );
1389
+ return {
1390
+ tables: detailedTables.filter(Boolean),
1391
+ enums: enums.map((e) => ({ name: e.name, schema: e.schema, values: e.values }))
1392
+ };
1393
+ }
1394
+ start() {
1395
+ this.takeSnapshot().catch((err) => console.error("Schema snapshot error:", err.message));
1396
+ this.timer = setInterval(() => {
1397
+ this.takeSnapshot().catch((err) => console.error("Schema snapshot error:", err.message));
1398
+ }, this.intervalMs);
1399
+ }
1400
+ stop() {
1401
+ if (this.timer) {
1402
+ clearInterval(this.timer);
1403
+ this.timer = null;
1404
+ }
1405
+ }
1406
+ // API helpers
1407
+ getHistory(limit = 30) {
1408
+ return this.db.prepare("SELECT id, timestamp FROM schema_snapshots ORDER BY id DESC LIMIT ?").all(limit);
1409
+ }
1410
+ getChanges(since) {
1411
+ if (since) {
1412
+ return this.db.prepare("SELECT * FROM schema_changes WHERE timestamp >= ? ORDER BY timestamp DESC").all(since);
1413
+ }
1414
+ return this.db.prepare("SELECT * FROM schema_changes ORDER BY timestamp DESC LIMIT 100").all();
1415
+ }
1416
+ getLatestChanges() {
1417
+ const latest = this.db.prepare("SELECT id FROM schema_snapshots ORDER BY id DESC LIMIT 1").get();
1418
+ if (!latest) return [];
1419
+ return this.db.prepare("SELECT * FROM schema_changes WHERE snapshot_id = ? ORDER BY id").all(latest.id);
1420
+ }
1421
+ getDiff(fromId, toId) {
1422
+ const from = this.db.prepare("SELECT snapshot FROM schema_snapshots WHERE id = ?").get(fromId);
1423
+ const to = this.db.prepare("SELECT snapshot FROM schema_snapshots WHERE id = ?").get(toId);
1424
+ if (!from || !to) return null;
1425
+ return diffSnapshots(JSON.parse(from.snapshot), JSON.parse(to.snapshot));
1426
+ }
1427
+ };
1428
+
1429
+ // src/server/alerts.ts
1430
+ var DEFAULT_RULES = [
1431
+ { name: "Connection utilization > 80%", metric: "connection_util", operator: "gt", threshold: 80, severity: "warning", enabled: 1, cooldown_minutes: 60 },
1432
+ { name: "Connection utilization > 90%", metric: "connection_util", operator: "gt", threshold: 90, severity: "critical", enabled: 1, cooldown_minutes: 30 },
1433
+ { name: "Cache hit ratio < 99%", metric: "cache_hit_pct", operator: "lt", threshold: 99, severity: "warning", enabled: 1, cooldown_minutes: 60 },
1434
+ { name: "Cache hit ratio < 95%", metric: "cache_hit_pct", operator: "lt", threshold: 95, severity: "critical", enabled: 1, cooldown_minutes: 30 },
1435
+ { name: "Long-running query > 5 min", metric: "long_query_count", operator: "gt", threshold: 0, severity: "warning", enabled: 1, cooldown_minutes: 15 },
1436
+ { name: "Idle in transaction > 10 min", metric: "idle_in_tx_count", operator: "gt", threshold: 0, severity: "warning", enabled: 1, cooldown_minutes: 15 },
1437
+ { name: "Health score below D", metric: "health_score", operator: "lt", threshold: 50, severity: "warning", enabled: 1, cooldown_minutes: 120 }
1438
+ ];
1439
+ var AlertManager = class {
1440
+ db;
1441
+ webhookUrl;
1442
+ constructor(db, webhookUrl) {
1443
+ this.db = db;
1444
+ this.webhookUrl = webhookUrl || null;
1445
+ this.initTables();
1446
+ }
1447
+ initTables() {
1448
+ this.db.exec(`
1449
+ CREATE TABLE IF NOT EXISTS alert_rules (
1450
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1451
+ name TEXT NOT NULL,
1452
+ metric TEXT NOT NULL,
1453
+ operator TEXT NOT NULL,
1454
+ threshold REAL NOT NULL,
1455
+ severity TEXT NOT NULL DEFAULT 'warning',
1456
+ enabled INTEGER DEFAULT 1,
1457
+ cooldown_minutes INTEGER DEFAULT 60
1458
+ );
1459
+ CREATE TABLE IF NOT EXISTS alert_history (
1460
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1461
+ rule_id INTEGER NOT NULL,
1462
+ timestamp INTEGER NOT NULL,
1463
+ value REAL NOT NULL,
1464
+ message TEXT NOT NULL,
1465
+ notified INTEGER DEFAULT 0,
1466
+ FOREIGN KEY (rule_id) REFERENCES alert_rules(id)
1467
+ );
1468
+ `);
1469
+ const count = this.db.prepare("SELECT COUNT(*) as c FROM alert_rules").get().c;
1470
+ if (count === 0) {
1471
+ const insert = this.db.prepare("INSERT INTO alert_rules (name, metric, operator, threshold, severity, enabled, cooldown_minutes) VALUES (?, ?, ?, ?, ?, ?, ?)");
1472
+ const tx = this.db.transaction(() => {
1473
+ for (const r of DEFAULT_RULES) {
1474
+ insert.run(r.name, r.metric, r.operator, r.threshold, r.severity, r.enabled, r.cooldown_minutes);
1475
+ }
1476
+ });
1477
+ tx();
1478
+ }
1479
+ }
1480
+ getRules() {
1481
+ return this.db.prepare("SELECT * FROM alert_rules ORDER BY id").all();
1482
+ }
1483
+ addRule(rule) {
1484
+ const info = this.db.prepare("INSERT INTO alert_rules (name, metric, operator, threshold, severity, enabled, cooldown_minutes) VALUES (?, ?, ?, ?, ?, ?, ?)").run(
1485
+ rule.name,
1486
+ rule.metric,
1487
+ rule.operator,
1488
+ rule.threshold,
1489
+ rule.severity,
1490
+ rule.enabled ?? 1,
1491
+ rule.cooldown_minutes ?? 60
1492
+ );
1493
+ return { ...rule, id: Number(info.lastInsertRowid) };
1494
+ }
1495
+ updateRule(id, updates) {
1496
+ const existing = this.db.prepare("SELECT * FROM alert_rules WHERE id = ?").get(id);
1497
+ if (!existing) return false;
1498
+ const merged = { ...existing, ...updates };
1499
+ this.db.prepare("UPDATE alert_rules SET name=?, metric=?, operator=?, threshold=?, severity=?, enabled=?, cooldown_minutes=? WHERE id=?").run(
1500
+ merged.name,
1501
+ merged.metric,
1502
+ merged.operator,
1503
+ merged.threshold,
1504
+ merged.severity,
1505
+ merged.enabled,
1506
+ merged.cooldown_minutes,
1507
+ id
1508
+ );
1509
+ return true;
1510
+ }
1511
+ deleteRule(id) {
1512
+ const info = this.db.prepare("DELETE FROM alert_rules WHERE id = ?").run(id);
1513
+ return info.changes > 0;
1514
+ }
1515
+ getHistory(limit = 50) {
1516
+ return this.db.prepare("SELECT * FROM alert_history ORDER BY timestamp DESC LIMIT ?").all(limit);
1517
+ }
1518
+ /**
1519
+ * Check all enabled rules against current metric values.
1520
+ * `metrics` is a map of metric name → current value.
1521
+ */
1522
+ checkAlerts(metrics) {
1523
+ const rules = this.db.prepare("SELECT * FROM alert_rules WHERE enabled = 1").all();
1524
+ const fired = [];
1525
+ const now = Date.now();
1526
+ for (const rule of rules) {
1527
+ const value = metrics[rule.metric];
1528
+ if (value === void 0) continue;
1529
+ const triggered = this.evaluateRule(rule, value);
1530
+ if (!triggered) continue;
1531
+ const lastAlert = this.db.prepare(
1532
+ "SELECT timestamp FROM alert_history WHERE rule_id = ? ORDER BY timestamp DESC LIMIT 1"
1533
+ ).get(rule.id);
1534
+ if (lastAlert && now - lastAlert.timestamp < rule.cooldown_minutes * 60 * 1e3) {
1535
+ continue;
1536
+ }
1537
+ const message = `${rule.name}: ${rule.metric} = ${value} (threshold: ${rule.operator} ${rule.threshold})`;
1538
+ const info = this.db.prepare("INSERT INTO alert_history (rule_id, timestamp, value, message, notified) VALUES (?, ?, ?, ?, 0)").run(
1539
+ rule.id,
1540
+ now,
1541
+ value,
1542
+ message
1543
+ );
1544
+ const entry = { id: Number(info.lastInsertRowid), rule_id: rule.id, timestamp: now, value, message, notified: 0 };
1545
+ fired.push(entry);
1546
+ const icon = rule.severity === "critical" ? "\u{1F534}" : rule.severity === "warning" ? "\u{1F7E1}" : "\u{1F535}";
1547
+ console.log(`[alert] ${icon} ${message}`);
1548
+ if (this.webhookUrl) {
1549
+ this.sendWebhook(rule, entry).catch((err) => console.error("[alert] Webhook failed:", err.message));
1550
+ }
1551
+ }
1552
+ return fired;
1553
+ }
1554
+ evaluateRule(rule, value) {
1555
+ switch (rule.operator) {
1556
+ case "gt":
1557
+ return value > rule.threshold;
1558
+ case "lt":
1559
+ return value < rule.threshold;
1560
+ case "eq":
1561
+ return value === rule.threshold;
1562
+ default:
1563
+ return false;
1564
+ }
1565
+ }
1566
+ async sendWebhook(rule, entry) {
1567
+ if (!this.webhookUrl) return;
1568
+ try {
1569
+ await fetch(this.webhookUrl, {
1570
+ method: "POST",
1571
+ headers: { "Content-Type": "application/json" },
1572
+ body: JSON.stringify({
1573
+ severity: rule.severity,
1574
+ rule: rule.name,
1575
+ metric: rule.metric,
1576
+ value: entry.value,
1577
+ message: entry.message,
1578
+ timestamp: entry.timestamp
1579
+ })
1580
+ });
1581
+ this.db.prepare("UPDATE alert_history SET notified = 1 WHERE id = ?").run(entry.id);
1582
+ } catch (err) {
1583
+ console.error("[alert] Webhook error:", err.message);
1584
+ }
1585
+ }
1586
+ };
1587
+
1588
+ // src/server/index.ts
1589
+ import Database2 from "better-sqlite3";
1590
+ import { WebSocketServer, WebSocket } from "ws";
1591
+ import http from "http";
1592
+ var __dirname = path2.dirname(fileURLToPath(import.meta.url));
1593
+ var RANGE_MAP = {
1594
+ "5m": 5 * 60 * 1e3,
1595
+ "15m": 15 * 60 * 1e3,
1596
+ "1h": 60 * 60 * 1e3,
1597
+ "6h": 6 * 60 * 60 * 1e3,
1598
+ "24h": 24 * 60 * 60 * 1e3,
1599
+ "7d": 7 * 24 * 60 * 60 * 1e3
1600
+ };
1601
+ async function startServer(opts) {
1602
+ const pool = new Pool({ connectionString: opts.connectionString });
1603
+ try {
1604
+ const client = await pool.connect();
1605
+ client.release();
1606
+ } catch (err) {
1607
+ console.error(`Failed to connect to PostgreSQL: ${err.message}`);
1608
+ process.exit(1);
1609
+ }
1610
+ if (opts.json) {
1611
+ try {
1612
+ const [overview, advisor, databases, tables] = await Promise.all([
1613
+ getOverview(pool),
1614
+ getAdvisorReport(pool),
1615
+ getDatabases(pool),
1616
+ getTables(pool)
1617
+ ]);
1618
+ console.log(JSON.stringify({ overview, advisor, databases, tables }, null, 2));
1619
+ } catch (err) {
1620
+ console.error(JSON.stringify({ error: err.message }));
1621
+ process.exit(1);
1622
+ }
1623
+ await pool.end();
1624
+ process.exit(0);
1625
+ }
1626
+ const dataDir = opts.dataDir || path2.join(os2.homedir(), ".pg-dash");
1627
+ fs2.mkdirSync(dataDir, { recursive: true });
1628
+ const store = new TimeseriesStore(opts.dataDir, opts.retentionDays);
1629
+ const intervalMs = (opts.interval || 30) * 1e3;
1630
+ const longQueryThreshold = opts.longQueryThreshold || 5;
1631
+ const collector = new Collector(pool, store, intervalMs);
1632
+ console.log(` Collecting metrics every ${intervalMs / 1e3}s...`);
1633
+ collector.start();
1634
+ const schemaDbPath = path2.join(dataDir, "schema.db");
1635
+ const schemaDb = new Database2(schemaDbPath);
1636
+ schemaDb.pragma("journal_mode = WAL");
1637
+ const snapshotIntervalMs = (opts.snapshotInterval || 6) * 60 * 60 * 1e3;
1638
+ const schemaTracker = new SchemaTracker(schemaDb, pool, snapshotIntervalMs);
1639
+ schemaTracker.start();
1640
+ console.log(" Schema change tracking enabled");
1641
+ const alertsDbPath = path2.join(dataDir, "alerts.db");
1642
+ const alertsDb = new Database2(alertsDbPath);
1643
+ alertsDb.pragma("journal_mode = WAL");
1644
+ const alertManager = new AlertManager(alertsDb, opts.webhook);
1645
+ console.log(" Alert monitoring enabled");
1646
+ const app = new Hono();
1647
+ if (opts.auth || opts.token) {
1648
+ app.use("*", async (c, next) => {
1649
+ const authHeader = c.req.header("authorization") || "";
1650
+ if (opts.token) {
1651
+ if (authHeader === `Bearer ${opts.token}`) return next();
1652
+ }
1653
+ if (opts.auth) {
1654
+ const [user, pass] = opts.auth.split(":");
1655
+ const expected = "Basic " + Buffer.from(`${user}:${pass}`).toString("base64");
1656
+ if (authHeader === expected) return next();
1657
+ }
1658
+ const url = new URL(c.req.url, "http://localhost");
1659
+ if (opts.token && url.searchParams.get("token") === opts.token) return next();
1660
+ if (opts.auth) {
1661
+ c.header("WWW-Authenticate", 'Basic realm="pg-dash"');
1662
+ }
1663
+ return c.text("Unauthorized", 401);
1664
+ });
1665
+ }
1666
+ app.get("/api/overview", async (c) => {
1667
+ try {
1668
+ return c.json(await getOverview(pool));
1669
+ } catch (err) {
1670
+ return c.json({ error: err.message }, 500);
1671
+ }
1672
+ });
1673
+ app.get("/api/databases", async (c) => {
1674
+ try {
1675
+ return c.json(await getDatabases(pool));
1676
+ } catch (err) {
1677
+ return c.json({ error: err.message }, 500);
1678
+ }
1679
+ });
1680
+ app.get("/api/tables", async (c) => {
1681
+ try {
1682
+ return c.json(await getTables(pool));
1683
+ } catch (err) {
1684
+ return c.json({ error: err.message }, 500);
1685
+ }
1686
+ });
1687
+ app.get("/api/metrics", (c) => {
1688
+ try {
1689
+ const metric = c.req.query("metric");
1690
+ const range = c.req.query("range") || "1h";
1691
+ if (!metric) return c.json({ error: "metric param required" }, 400);
1692
+ const rangeMs = RANGE_MAP[range] || RANGE_MAP["1h"];
1693
+ const now = Date.now();
1694
+ const data = store.query(metric, now - rangeMs, now);
1695
+ return c.json(data);
1696
+ } catch (err) {
1697
+ return c.json({ error: err.message }, 500);
1698
+ }
1699
+ });
1700
+ app.get("/api/metrics/latest", (_c) => {
1701
+ try {
1702
+ const snapshot = collector.getLastSnapshot();
1703
+ return _c.json(snapshot);
1704
+ } catch (err) {
1705
+ return _c.json({ error: err.message }, 500);
1706
+ }
1707
+ });
1708
+ app.get("/api/activity", async (c) => {
1709
+ try {
1710
+ return c.json(await getActivity(pool));
1711
+ } catch (err) {
1712
+ return c.json({ error: err.message }, 500);
1713
+ }
1714
+ });
1715
+ app.get("/api/queries", async (c) => {
1716
+ try {
1717
+ return c.json(await getSlowQueries(pool));
1718
+ } catch (err) {
1719
+ return c.json({ error: err.message }, 500);
1720
+ }
1721
+ });
1722
+ app.post("/api/activity/:pid/cancel", async (c) => {
1723
+ try {
1724
+ const pid = parseInt(c.req.param("pid"));
1725
+ const client = await pool.connect();
1726
+ try {
1727
+ await client.query("SELECT pg_cancel_backend($1)", [pid]);
1728
+ return c.json({ ok: true });
1729
+ } finally {
1730
+ client.release();
1731
+ }
1732
+ } catch (err) {
1733
+ return c.json({ error: err.message }, 500);
1734
+ }
1735
+ });
1736
+ app.get("/api/advisor", async (c) => {
1737
+ try {
1738
+ return c.json(await getAdvisorReport(pool));
1739
+ } catch (err) {
1740
+ return c.json({ error: err.message }, 500);
1741
+ }
1742
+ });
1743
+ app.post("/api/fix", async (c) => {
1744
+ try {
1745
+ const body = await c.req.json();
1746
+ const sql = body?.sql?.trim();
1747
+ if (!sql) return c.json({ error: "sql field required" }, 400);
1748
+ if (!isSafeFix(sql)) return c.json({ error: "Operation not allowed. Only VACUUM, ANALYZE, REINDEX, CREATE/DROP INDEX CONCURRENTLY, pg_terminate_backend, pg_cancel_backend, and EXPLAIN ANALYZE are permitted." }, 403);
1749
+ const client = await pool.connect();
1750
+ try {
1751
+ const start = Date.now();
1752
+ const result = await client.query(sql);
1753
+ const duration = Date.now() - start;
1754
+ return c.json({ ok: true, duration, rowCount: result.rowCount, rows: result.rows || [] });
1755
+ } finally {
1756
+ client.release();
1757
+ }
1758
+ } catch (err) {
1759
+ return c.json({ error: err.message }, 500);
1760
+ }
1761
+ });
1762
+ app.get("/api/schema/tables", async (c) => {
1763
+ try {
1764
+ return c.json(await getSchemaTables(pool));
1765
+ } catch (err) {
1766
+ return c.json({ error: err.message }, 500);
1767
+ }
1768
+ });
1769
+ app.get("/api/schema/tables/:name", async (c) => {
1770
+ try {
1771
+ const name = c.req.param("name");
1772
+ const detail = await getSchemaTableDetail(pool, name);
1773
+ if (!detail) return c.json({ error: "Table not found" }, 404);
1774
+ return c.json(detail);
1775
+ } catch (err) {
1776
+ return c.json({ error: err.message }, 500);
1777
+ }
1778
+ });
1779
+ app.get("/api/schema/indexes", async (c) => {
1780
+ try {
1781
+ return c.json(await getSchemaIndexes(pool));
1782
+ } catch (err) {
1783
+ return c.json({ error: err.message }, 500);
1784
+ }
1785
+ });
1786
+ app.get("/api/schema/functions", async (c) => {
1787
+ try {
1788
+ return c.json(await getSchemaFunctions(pool));
1789
+ } catch (err) {
1790
+ return c.json({ error: err.message }, 500);
1791
+ }
1792
+ });
1793
+ app.get("/api/schema/extensions", async (c) => {
1794
+ try {
1795
+ return c.json(await getSchemaExtensions(pool));
1796
+ } catch (err) {
1797
+ return c.json({ error: err.message }, 500);
1798
+ }
1799
+ });
1800
+ app.get("/api/schema/enums", async (c) => {
1801
+ try {
1802
+ return c.json(await getSchemaEnums(pool));
1803
+ } catch (err) {
1804
+ return c.json({ error: err.message }, 500);
1805
+ }
1806
+ });
1807
+ app.get("/api/schema/history", (c) => {
1808
+ try {
1809
+ const limit = parseInt(c.req.query("limit") || "30");
1810
+ return c.json(schemaTracker.getHistory(limit));
1811
+ } catch (err) {
1812
+ return c.json({ error: err.message }, 500);
1813
+ }
1814
+ });
1815
+ app.get("/api/schema/changes", (c) => {
1816
+ try {
1817
+ const since = c.req.query("since");
1818
+ return c.json(schemaTracker.getChanges(since ? parseInt(since) : void 0));
1819
+ } catch (err) {
1820
+ return c.json({ error: err.message }, 500);
1821
+ }
1822
+ });
1823
+ app.get("/api/schema/changes/latest", (c) => {
1824
+ try {
1825
+ return c.json(schemaTracker.getLatestChanges());
1826
+ } catch (err) {
1827
+ return c.json({ error: err.message }, 500);
1828
+ }
1829
+ });
1830
+ app.get("/api/schema/diff", (c) => {
1831
+ try {
1832
+ const from = parseInt(c.req.query("from") || "0");
1833
+ const to = parseInt(c.req.query("to") || "0");
1834
+ if (!from || !to) return c.json({ error: "from and to params required" }, 400);
1835
+ const diff = schemaTracker.getDiff(from, to);
1836
+ if (!diff) return c.json({ error: "Snapshot not found" }, 404);
1837
+ return c.json(diff);
1838
+ } catch (err) {
1839
+ return c.json({ error: err.message }, 500);
1840
+ }
1841
+ });
1842
+ app.post("/api/schema/snapshot", async (c) => {
1843
+ try {
1844
+ const result = await schemaTracker.takeSnapshot();
1845
+ return c.json(result);
1846
+ } catch (err) {
1847
+ return c.json({ error: err.message }, 500);
1848
+ }
1849
+ });
1850
+ app.get("/api/alerts/rules", (c) => {
1851
+ try {
1852
+ return c.json(alertManager.getRules());
1853
+ } catch (err) {
1854
+ return c.json({ error: err.message }, 500);
1855
+ }
1856
+ });
1857
+ app.post("/api/alerts/rules", async (c) => {
1858
+ try {
1859
+ const body = await c.req.json();
1860
+ const rule = alertManager.addRule(body);
1861
+ return c.json(rule, 201);
1862
+ } catch (err) {
1863
+ return c.json({ error: err.message }, 500);
1864
+ }
1865
+ });
1866
+ app.put("/api/alerts/rules/:id", async (c) => {
1867
+ try {
1868
+ const id = parseInt(c.req.param("id"));
1869
+ const body = await c.req.json();
1870
+ const ok = alertManager.updateRule(id, body);
1871
+ if (!ok) return c.json({ error: "Rule not found" }, 404);
1872
+ return c.json({ ok: true });
1873
+ } catch (err) {
1874
+ return c.json({ error: err.message }, 500);
1875
+ }
1876
+ });
1877
+ app.delete("/api/alerts/rules/:id", (c) => {
1878
+ try {
1879
+ const id = parseInt(c.req.param("id"));
1880
+ const ok = alertManager.deleteRule(id);
1881
+ if (!ok) return c.json({ error: "Rule not found" }, 404);
1882
+ return c.json({ ok: true });
1883
+ } catch (err) {
1884
+ return c.json({ error: err.message }, 500);
1885
+ }
1886
+ });
1887
+ app.get("/api/alerts/history", (c) => {
1888
+ try {
1889
+ const limit = parseInt(c.req.query("limit") || "50");
1890
+ return c.json(alertManager.getHistory(limit));
1891
+ } catch (err) {
1892
+ return c.json({ error: err.message }, 500);
1893
+ }
1894
+ });
1895
+ const uiPath = path2.resolve(__dirname, "ui");
1896
+ const MIME_TYPES = {
1897
+ ".html": "text/html",
1898
+ ".js": "application/javascript",
1899
+ ".css": "text/css",
1900
+ ".json": "application/json",
1901
+ ".png": "image/png",
1902
+ ".jpg": "image/jpeg",
1903
+ ".svg": "image/svg+xml",
1904
+ ".ico": "image/x-icon",
1905
+ ".woff": "font/woff",
1906
+ ".woff2": "font/woff2"
1907
+ };
1908
+ app.get("/*", async (c) => {
1909
+ const urlPath = c.req.path === "/" ? "/index.html" : c.req.path;
1910
+ const filePath = path2.join(uiPath, urlPath);
1911
+ try {
1912
+ const content = await fs2.promises.readFile(filePath);
1913
+ const ext = path2.extname(filePath);
1914
+ const contentType = MIME_TYPES[ext] || "application/octet-stream";
1915
+ return new Response(content, { headers: { "content-type": contentType } });
1916
+ } catch {
1917
+ try {
1918
+ const html = await fs2.promises.readFile(path2.join(uiPath, "index.html"));
1919
+ return new Response(html, { headers: { "content-type": "text/html" } });
1920
+ } catch (err) {
1921
+ console.error("[static] Error reading index.html:", err.message);
1922
+ return c.text("Not Found", 404);
1923
+ }
1924
+ }
1925
+ });
1926
+ const server = http.createServer(async (req, res) => {
1927
+ const chunks = [];
1928
+ for await (const chunk of req) chunks.push(chunk);
1929
+ const body = Buffer.concat(chunks);
1930
+ const url = new URL(req.url || "/", `http://localhost:${opts.port}`);
1931
+ const init = {
1932
+ method: req.method,
1933
+ headers: req.headers
1934
+ };
1935
+ if (req.method !== "GET" && req.method !== "HEAD" && body.length > 0) {
1936
+ init.body = body;
1937
+ }
1938
+ const request = new Request(url.toString(), init);
1939
+ app.fetch(request).then((response) => {
1940
+ res.writeHead(response.status, Object.fromEntries(response.headers.entries()));
1941
+ response.arrayBuffer().then((buf) => {
1942
+ res.end(Buffer.from(buf));
1943
+ });
1944
+ }).catch(() => {
1945
+ res.writeHead(500);
1946
+ res.end("Internal Server Error");
1947
+ });
1948
+ });
1949
+ const wss = new WebSocketServer({
1950
+ server,
1951
+ path: "/ws",
1952
+ verifyClient: opts.auth || opts.token ? (info, cb) => {
1953
+ const url = new URL(info.req.url || "/", `http://localhost:${opts.port}`);
1954
+ const qToken = url.searchParams.get("token");
1955
+ if (opts.token && qToken === opts.token) return cb(true);
1956
+ const authHeader = info.req.headers["authorization"] || "";
1957
+ if (opts.token && authHeader === `Bearer ${opts.token}`) return cb(true);
1958
+ if (opts.auth) {
1959
+ const [user, pass] = opts.auth.split(":");
1960
+ const expected = "Basic " + Buffer.from(`${user}:${pass}`).toString("base64");
1961
+ if (authHeader === expected) return cb(true);
1962
+ }
1963
+ cb(false, 401, "Unauthorized");
1964
+ } : void 0
1965
+ });
1966
+ const clients = /* @__PURE__ */ new Set();
1967
+ wss.on("connection", (ws) => {
1968
+ clients.add(ws);
1969
+ const snap = collector.getLastSnapshot();
1970
+ if (Object.keys(snap).length > 0) {
1971
+ ws.send(JSON.stringify({ type: "metrics", data: snap }));
1972
+ }
1973
+ ws.on("close", () => clients.delete(ws));
1974
+ ws.on("error", () => clients.delete(ws));
1975
+ });
1976
+ let collectCycleCount = 0;
1977
+ const origCollect = collector.collect.bind(collector);
1978
+ collector.collect = async () => {
1979
+ const snapshot = await origCollect();
1980
+ if (clients.size > 0 && Object.keys(snapshot).length > 0) {
1981
+ const metricsMsg = JSON.stringify({ type: "metrics", data: snapshot });
1982
+ let activityData = [];
1983
+ try {
1984
+ activityData = await getActivity(pool);
1985
+ } catch (err) {
1986
+ console.error("[ws] Error fetching activity:", err.message);
1987
+ }
1988
+ const activityMsg = JSON.stringify({ type: "activity", data: activityData });
1989
+ for (const ws of clients) {
1990
+ if (ws.readyState === WebSocket.OPEN) {
1991
+ ws.send(metricsMsg);
1992
+ ws.send(activityMsg);
1993
+ }
1994
+ }
1995
+ }
1996
+ if (Object.keys(snapshot).length > 0) {
1997
+ try {
1998
+ const alertMetrics = {};
1999
+ if (snapshot.connections_total !== void 0) {
2000
+ const client = await pool.connect();
2001
+ try {
2002
+ const r = await client.query("SELECT setting::int AS max FROM pg_settings WHERE name = 'max_connections'");
2003
+ const max = r.rows[0]?.max || 100;
2004
+ alertMetrics.connection_util = snapshot.connections_total / max * 100;
2005
+ } finally {
2006
+ client.release();
2007
+ }
2008
+ }
2009
+ if (snapshot.cache_hit_ratio !== void 0) {
2010
+ alertMetrics.cache_hit_pct = snapshot.cache_hit_ratio * 100;
2011
+ }
2012
+ try {
2013
+ const client = await pool.connect();
2014
+ try {
2015
+ const r = await client.query(`SELECT count(*)::int AS c FROM pg_stat_activity WHERE state = 'active' AND now() - query_start > interval '${longQueryThreshold} minutes' AND pid != pg_backend_pid()`);
2016
+ alertMetrics.long_query_count = r.rows[0]?.c || 0;
2017
+ } finally {
2018
+ client.release();
2019
+ }
2020
+ } catch (err) {
2021
+ console.error("[alerts] Error checking long queries:", err.message);
2022
+ }
2023
+ try {
2024
+ const client = await pool.connect();
2025
+ try {
2026
+ const r = await client.query("SELECT count(*)::int AS c FROM pg_stat_activity WHERE state = 'idle in transaction' AND now() - state_change > interval '10 minutes'");
2027
+ alertMetrics.idle_in_tx_count = r.rows[0]?.c || 0;
2028
+ } finally {
2029
+ client.release();
2030
+ }
2031
+ } catch (err) {
2032
+ console.error("[alerts] Error checking idle-in-tx:", err.message);
2033
+ }
2034
+ collectCycleCount++;
2035
+ if (collectCycleCount % 10 === 0) {
2036
+ try {
2037
+ const report = await getAdvisorReport(pool);
2038
+ alertMetrics.health_score = report.score;
2039
+ } catch (err) {
2040
+ console.error("[alerts] Error checking health score:", err.message);
2041
+ }
2042
+ }
2043
+ const fired = alertManager.checkAlerts(alertMetrics);
2044
+ if (fired.length > 0 && clients.size > 0) {
2045
+ const alertMsg = JSON.stringify({ type: "alerts", data: fired });
2046
+ for (const ws of clients) {
2047
+ if (ws.readyState === WebSocket.OPEN) {
2048
+ ws.send(alertMsg);
2049
+ }
2050
+ }
2051
+ }
2052
+ } catch (err) {
2053
+ console.error("[alerts] Error checking alerts:", err.message);
2054
+ }
2055
+ }
2056
+ return snapshot;
2057
+ };
2058
+ const bindAddr = opts.bind || "127.0.0.1";
2059
+ server.listen(opts.port, bindAddr, async () => {
2060
+ console.log(`
2061
+ pg-dash running at http://${bindAddr}:${opts.port}
2062
+ `);
2063
+ if (opts.open) {
2064
+ try {
2065
+ const openMod = await import("open");
2066
+ await openMod.default(`http://localhost:${opts.port}`);
2067
+ } catch (err) {
2068
+ console.error("[open] Failed to open browser:", err.message);
2069
+ }
2070
+ }
2071
+ });
2072
+ const shutdown = async () => {
2073
+ console.log("\n Shutting down gracefully...");
2074
+ collector.stop();
2075
+ schemaTracker.stop();
2076
+ wss.close();
2077
+ server.close();
2078
+ store.close();
2079
+ schemaDb.close();
2080
+ alertsDb.close();
2081
+ await pool.end();
2082
+ console.log(" Goodbye!");
2083
+ process.exit(0);
2084
+ };
2085
+ process.on("SIGINT", shutdown);
2086
+ process.on("SIGTERM", shutdown);
2087
+ await new Promise(() => {
2088
+ });
2089
+ }
2090
+
2091
+ // src/cli.ts
2092
+ import fs3 from "fs";
2093
+ import path3 from "path";
2094
+ import { fileURLToPath as fileURLToPath2 } from "url";
2095
+ process.on("uncaughtException", (err) => {
2096
+ console.error("Uncaught exception:", err);
2097
+ });
2098
+ process.on("unhandledRejection", (err) => {
2099
+ console.error("Unhandled rejection:", err);
2100
+ });
2101
+ var { values, positionals } = parseArgs({
2102
+ allowPositionals: true,
2103
+ options: {
2104
+ port: { type: "string", short: "p", default: "3480" },
2105
+ bind: { type: "string", default: "127.0.0.1" },
2106
+ auth: { type: "string" },
2107
+ token: { type: "string" },
2108
+ webhook: { type: "string" },
2109
+ "no-open": { type: "boolean", default: false },
2110
+ json: { type: "boolean", default: false },
2111
+ host: { type: "string" },
2112
+ user: { type: "string", short: "u" },
2113
+ password: { type: "string" },
2114
+ db: { type: "string", short: "d" },
2115
+ "pg-port": { type: "string" },
2116
+ "data-dir": { type: "string" },
2117
+ interval: { type: "string", short: "i" },
2118
+ "retention-days": { type: "string" },
2119
+ "snapshot-interval": { type: "string" },
2120
+ "long-query-threshold": { type: "string" },
2121
+ help: { type: "boolean", short: "h" },
2122
+ version: { type: "boolean", short: "v" },
2123
+ threshold: { type: "string" },
2124
+ format: { type: "string", short: "f" }
2125
+ }
2126
+ });
2127
+ if (values.version) {
2128
+ try {
2129
+ const __dirname2 = path3.dirname(fileURLToPath2(import.meta.url));
2130
+ const pkg = JSON.parse(fs3.readFileSync(path3.resolve(__dirname2, "../package.json"), "utf-8"));
2131
+ console.log(`pg-dash v${pkg.version}`);
2132
+ } catch {
2133
+ console.log("pg-dash v0.1.0");
2134
+ }
2135
+ process.exit(0);
2136
+ }
2137
+ if (values.help) {
2138
+ console.log(`
2139
+ pg-dash \u2014 Lightweight PostgreSQL Monitoring Dashboard
2140
+
2141
+ Usage:
2142
+ pg-dash <connection-string>
2143
+ pg-dash check <connection-string> Run health check and exit
2144
+ pg-dash schema-diff <connection-string> Show latest schema changes
2145
+ pg-dash --host localhost --user postgres --db mydb
2146
+
2147
+ Options:
2148
+ -p, --port <port> Dashboard port (default: 3480)
2149
+ --bind <addr> Bind address (default: 127.0.0.1)
2150
+ --auth <user:pass> Basic auth credentials (user:password)
2151
+ --token <token> Bearer token for authentication
2152
+ --webhook <url> Webhook URL for alert notifications
2153
+ --no-open Don't auto-open browser (default: opens)
2154
+ --json Dump health check as JSON and exit
2155
+ --host <host> PostgreSQL host
2156
+ -u, --user <user> PostgreSQL user
2157
+ --password <pass> PostgreSQL password
2158
+ --db, -d <database> PostgreSQL database
2159
+ --pg-port <port> PostgreSQL port (default: 5432)
2160
+ --data-dir <dir> Data directory for metrics (default: ~/.pg-dash)
2161
+ -i, --interval <sec> Collection interval in seconds (default: 30)
2162
+ --retention-days <N> Metrics retention in days (default: 7)
2163
+ --snapshot-interval <h> Schema snapshot interval in hours (default: 6)
2164
+ --long-query-threshold <min> Long query threshold in minutes (default: 5)
2165
+ --threshold <score> Health score threshold for check command (default: 70)
2166
+ -f, --format <fmt> Output format: text|json (default: text)
2167
+ -v, --version Show version
2168
+ -h, --help Show this help
2169
+
2170
+ Environment variables:
2171
+ PG_DASH_RETENTION_DAYS, PG_DASH_SNAPSHOT_INTERVAL, PG_DASH_LONG_QUERY_THRESHOLD
2172
+ `);
2173
+ process.exit(0);
2174
+ }
2175
+ var subcommand = positionals[0];
2176
+ function resolveConnectionString(startIdx = 0) {
2177
+ let connStr = positionals[startIdx];
2178
+ if (!connStr) {
2179
+ if (values.host) {
2180
+ const user = values.user || "postgres";
2181
+ const pass = values.password ? `:${values.password}` : "";
2182
+ const host = values.host;
2183
+ const pgPort = values["pg-port"] || "5432";
2184
+ const db = values.db || "postgres";
2185
+ connStr = `postgresql://${user}${pass}@${host}:${pgPort}/${db}`;
2186
+ } else {
2187
+ console.error("Error: provide a connection string or --host\n\nRun pg-dash --help for usage.");
2188
+ process.exit(1);
2189
+ }
2190
+ }
2191
+ return connStr;
2192
+ }
2193
+ if (subcommand === "check") {
2194
+ const connectionString = resolveConnectionString(1);
2195
+ const threshold = parseInt(values.threshold || "70", 10);
2196
+ const format = values.format || "text";
2197
+ const { Pool: Pool2 } = await import("pg");
2198
+ const { getAdvisorReport: getAdvisorReport2 } = await Promise.resolve().then(() => (init_advisor(), advisor_exports));
2199
+ const pool = new Pool2({ connectionString });
2200
+ try {
2201
+ const report = await getAdvisorReport2(pool);
2202
+ if (format === "json") {
2203
+ console.log(JSON.stringify(report, null, 2));
2204
+ } else {
2205
+ console.log(`
2206
+ Health Score: ${report.score}/100 (Grade: ${report.grade})
2207
+ `);
2208
+ for (const [cat, b] of Object.entries(report.breakdown)) {
2209
+ console.log(` ${cat.padEnd(14)} ${b.grade} (${b.score}/100) \u2014 ${b.count} issue${b.count !== 1 ? "s" : ""}`);
2210
+ }
2211
+ if (report.issues.length > 0) {
2212
+ console.log(`
2213
+ Issues (${report.issues.length}):
2214
+ `);
2215
+ for (const issue of report.issues) {
2216
+ const icon = issue.severity === "critical" ? "\u{1F534}" : issue.severity === "warning" ? "\u{1F7E1}" : "\u{1F535}";
2217
+ console.log(` ${icon} [${issue.severity}] ${issue.title}`);
2218
+ }
2219
+ }
2220
+ console.log();
2221
+ }
2222
+ await pool.end();
2223
+ process.exit(report.score < threshold ? 1 : 0);
2224
+ } catch (err) {
2225
+ console.error(`Error: ${err.message}`);
2226
+ await pool.end();
2227
+ process.exit(1);
2228
+ }
2229
+ } else if (subcommand === "schema-diff") {
2230
+ const connectionString = resolveConnectionString(1);
2231
+ const dataDir = values["data-dir"] || path3.join((await import("os")).homedir(), ".pg-dash");
2232
+ const schemaDbPath = path3.join(dataDir, "schema.db");
2233
+ if (!fs3.existsSync(schemaDbPath)) {
2234
+ console.error("No schema tracking data found. Run pg-dash server first to collect schema snapshots.");
2235
+ process.exit(1);
2236
+ }
2237
+ const Database3 = (await import("better-sqlite3")).default;
2238
+ const db = new Database3(schemaDbPath, { readonly: true });
2239
+ const changes = db.prepare("SELECT * FROM schema_changes ORDER BY timestamp DESC LIMIT 50").all();
2240
+ db.close();
2241
+ if (changes.length === 0) {
2242
+ console.log("No schema changes detected.");
2243
+ } else {
2244
+ console.log(`
2245
+ Schema Changes (${changes.length}):
2246
+ `);
2247
+ for (const c of changes) {
2248
+ const icon = c.change_type === "added" ? "\uFF0B" : c.change_type === "removed" ? "\u2212" : "~";
2249
+ const color = c.change_type === "added" ? "\x1B[32m" : c.change_type === "removed" ? "\x1B[31m" : "\x1B[33m";
2250
+ console.log(` ${color}${icon}\x1B[0m ${c.detail}${c.table_name ? ` (${c.table_name})` : ""} \u2014 ${new Date(c.timestamp).toLocaleString()}`);
2251
+ }
2252
+ console.log();
2253
+ }
2254
+ process.exit(0);
2255
+ } else {
2256
+ const connectionString = resolveConnectionString(0);
2257
+ const port = parseInt(values.port, 10);
2258
+ const bind = values.bind || process.env.PG_DASH_BIND || "127.0.0.1";
2259
+ const interval = values.interval ? parseInt(values.interval, 10) : void 0;
2260
+ const retentionDays = parseInt(values["retention-days"] || process.env.PG_DASH_RETENTION_DAYS || "7", 10);
2261
+ const snapshotInterval = parseInt(values["snapshot-interval"] || process.env.PG_DASH_SNAPSHOT_INTERVAL || "6", 10);
2262
+ const longQueryThreshold = parseInt(values["long-query-threshold"] || process.env.PG_DASH_LONG_QUERY_THRESHOLD || "5", 10);
2263
+ const auth = values.auth || void 0;
2264
+ const token = values.token || void 0;
2265
+ const webhook = values.webhook || void 0;
2266
+ if (bind === "0.0.0.0" && !auth && !token) {
2267
+ console.warn("\n \u26A0\uFE0F WARNING: Dashboard is exposed without authentication. Use --auth or --token.\n");
2268
+ }
2269
+ await startServer({
2270
+ connectionString,
2271
+ port,
2272
+ bind,
2273
+ open: !values["no-open"],
2274
+ json: values.json,
2275
+ dataDir: values["data-dir"],
2276
+ interval,
2277
+ retentionDays,
2278
+ snapshotInterval,
2279
+ longQueryThreshold,
2280
+ auth,
2281
+ token,
2282
+ webhook
2283
+ });
2284
+ }
2285
+ //# sourceMappingURL=cli.js.map