@indiekitai/pg-dash 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -47,7 +47,7 @@ function computeBreakdown(issues) {
47
47
  }
48
48
  return result;
49
49
  }
50
- async function getAdvisorReport(pool) {
50
+ async function getAdvisorReport(pool, longQueryThreshold = 5) {
51
51
  const client = await pool.connect();
52
52
  const issues = [];
53
53
  try {
@@ -281,7 +281,7 @@ SHOW shared_buffers;`,
281
281
  extract(epoch from now() - state_change)::int AS idle_seconds
282
282
  FROM pg_stat_activity
283
283
  WHERE state IN ('idle', 'idle in transaction')
284
- AND now() - state_change > interval '10 minutes'
284
+ AND now() - state_change > interval '${longQueryThreshold} minutes'
285
285
  AND pid != pg_backend_pid()
286
286
  `);
287
287
  for (const row of r.rows) {
@@ -777,253 +777,9 @@ async function getActivity(pool) {
777
777
  }
778
778
  }
779
779
 
780
- // src/server/queries/slow-queries.ts
781
- async function getSlowQueries(pool) {
782
- const client = await pool.connect();
783
- try {
784
- const extCheck = await client.query(
785
- "SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'"
786
- );
787
- if (extCheck.rows.length === 0) {
788
- return [];
789
- }
790
- const r = await client.query(`
791
- SELECT
792
- queryid::text,
793
- query,
794
- calls::int,
795
- total_exec_time AS total_time,
796
- mean_exec_time AS mean_time,
797
- rows::int,
798
- round(total_exec_time::numeric / 1000, 2)::text || 's' AS total_time_pretty,
799
- round(mean_exec_time::numeric, 2)::text || 'ms' AS mean_time_pretty
800
- FROM pg_stat_statements
801
- WHERE query NOT LIKE '%pg_stat%'
802
- AND query NOT LIKE '%pg_catalog%'
803
- ORDER BY total_exec_time DESC
804
- LIMIT 50
805
- `);
806
- return r.rows;
807
- } catch {
808
- return [];
809
- } finally {
810
- client.release();
811
- }
812
- }
813
-
814
780
  // src/server/index.ts
815
781
  init_advisor();
816
782
 
817
- // src/server/queries/schema.ts
818
- async function getSchemaTables(pool) {
819
- const client = await pool.connect();
820
- try {
821
- const r = await client.query(`
822
- SELECT
823
- c.relname AS name,
824
- n.nspname AS schema,
825
- pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
826
- pg_total_relation_size(c.oid) AS total_size_bytes,
827
- pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
828
- pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
829
- s.n_live_tup AS row_count,
830
- obj_description(c.oid) AS description
831
- FROM pg_class c
832
- JOIN pg_namespace n ON c.relnamespace = n.oid
833
- LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
834
- WHERE c.relkind = 'r' AND n.nspname NOT IN ('pg_catalog', 'information_schema')
835
- ORDER BY pg_total_relation_size(c.oid) DESC
836
- `);
837
- return r.rows;
838
- } finally {
839
- client.release();
840
- }
841
- }
842
- async function getSchemaTableDetail(pool, tableName) {
843
- const client = await pool.connect();
844
- try {
845
- const parts = tableName.split(".");
846
- const schema = parts.length > 1 ? parts[0] : "public";
847
- const name = parts.length > 1 ? parts[1] : parts[0];
848
- const tableInfo = await client.query(`
849
- SELECT
850
- c.relname AS name, n.nspname AS schema,
851
- pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
852
- pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
853
- pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
854
- pg_size_pretty(pg_relation_size(c.reltoastrelid)) AS toast_size,
855
- s.n_live_tup AS row_count, s.n_dead_tup AS dead_tuples,
856
- s.last_vacuum, s.last_autovacuum, s.last_analyze, s.last_autoanalyze,
857
- s.seq_scan, s.idx_scan
858
- FROM pg_class c
859
- JOIN pg_namespace n ON c.relnamespace = n.oid
860
- LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
861
- WHERE c.relname = $1 AND n.nspname = $2 AND c.relkind = 'r'
862
- `, [name, schema]);
863
- if (tableInfo.rows.length === 0) return null;
864
- const columns = await client.query(`
865
- SELECT
866
- a.attname AS name,
867
- pg_catalog.format_type(a.atttypid, a.atttypmod) AS type,
868
- NOT a.attnotnull AS nullable,
869
- pg_get_expr(d.adbin, d.adrelid) AS default_value,
870
- col_description(a.attrelid, a.attnum) AS description
871
- FROM pg_attribute a
872
- LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
873
- WHERE a.attrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
874
- AND a.attnum > 0 AND NOT a.attisdropped
875
- ORDER BY a.attnum
876
- `, [name, schema]);
877
- const indexes = await client.query(`
878
- SELECT
879
- i.relname AS name,
880
- am.amname AS type,
881
- pg_size_pretty(pg_relation_size(i.oid)) AS size,
882
- pg_get_indexdef(idx.indexrelid) AS definition,
883
- idx.indisunique AS is_unique,
884
- idx.indisprimary AS is_primary,
885
- s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
886
- FROM pg_index idx
887
- JOIN pg_class i ON idx.indexrelid = i.oid
888
- JOIN pg_class t ON idx.indrelid = t.oid
889
- JOIN pg_namespace n ON t.relnamespace = n.oid
890
- JOIN pg_am am ON i.relam = am.oid
891
- LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
892
- WHERE t.relname = $1 AND n.nspname = $2
893
- ORDER BY i.relname
894
- `, [name, schema]);
895
- const constraints = await client.query(`
896
- SELECT
897
- conname AS name,
898
- CASE contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY'
899
- WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' WHEN 'x' THEN 'EXCLUDE' END AS type,
900
- pg_get_constraintdef(oid) AS definition
901
- FROM pg_constraint
902
- WHERE conrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
903
- ORDER BY
904
- CASE contype WHEN 'p' THEN 1 WHEN 'u' THEN 2 WHEN 'f' THEN 3 WHEN 'c' THEN 4 ELSE 5 END
905
- `, [name, schema]);
906
- const foreignKeys = await client.query(`
907
- SELECT
908
- conname AS name,
909
- a.attname AS column_name,
910
- confrelid::regclass::text AS referenced_table,
911
- af.attname AS referenced_column
912
- FROM pg_constraint c
913
- JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
914
- JOIN pg_attribute af ON af.attrelid = c.confrelid AND af.attnum = ANY(c.confkey)
915
- WHERE c.contype = 'f'
916
- AND c.conrelid = (SELECT cl.oid FROM pg_class cl JOIN pg_namespace n ON cl.relnamespace = n.oid WHERE cl.relname = $1 AND n.nspname = $2)
917
- `, [name, schema]);
918
- let sampleData = [];
919
- try {
920
- const sample = await client.query(
921
- `SELECT * FROM ${client.escapeIdentifier(schema)}.${client.escapeIdentifier(name)} LIMIT 10`
922
- );
923
- sampleData = sample.rows;
924
- } catch (err) {
925
- console.error("[schema] Error:", err.message);
926
- }
927
- return {
928
- ...tableInfo.rows[0],
929
- columns: columns.rows,
930
- indexes: indexes.rows,
931
- constraints: constraints.rows,
932
- foreignKeys: foreignKeys.rows,
933
- sampleData
934
- };
935
- } finally {
936
- client.release();
937
- }
938
- }
939
- async function getSchemaIndexes(pool) {
940
- const client = await pool.connect();
941
- try {
942
- const r = await client.query(`
943
- SELECT
944
- n.nspname AS schema,
945
- t.relname AS table_name,
946
- i.relname AS name,
947
- am.amname AS type,
948
- pg_size_pretty(pg_relation_size(i.oid)) AS size,
949
- pg_relation_size(i.oid) AS size_bytes,
950
- pg_get_indexdef(idx.indexrelid) AS definition,
951
- idx.indisunique AS is_unique,
952
- idx.indisprimary AS is_primary,
953
- s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
954
- FROM pg_index idx
955
- JOIN pg_class i ON idx.indexrelid = i.oid
956
- JOIN pg_class t ON idx.indrelid = t.oid
957
- JOIN pg_namespace n ON t.relnamespace = n.oid
958
- JOIN pg_am am ON i.relam = am.oid
959
- LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
960
- WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
961
- ORDER BY pg_relation_size(i.oid) DESC
962
- `);
963
- return r.rows;
964
- } finally {
965
- client.release();
966
- }
967
- }
968
- async function getSchemaFunctions(pool) {
969
- const client = await pool.connect();
970
- try {
971
- const r = await client.query(`
972
- SELECT
973
- n.nspname AS schema,
974
- p.proname AS name,
975
- pg_get_function_result(p.oid) AS return_type,
976
- pg_get_function_arguments(p.oid) AS arguments,
977
- l.lanname AS language,
978
- p.prosrc AS source,
979
- CASE p.prokind WHEN 'f' THEN 'function' WHEN 'p' THEN 'procedure' WHEN 'a' THEN 'aggregate' WHEN 'w' THEN 'window' END AS kind
980
- FROM pg_proc p
981
- JOIN pg_namespace n ON p.pronamespace = n.oid
982
- JOIN pg_language l ON p.prolang = l.oid
983
- WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
984
- ORDER BY n.nspname, p.proname
985
- `);
986
- return r.rows;
987
- } finally {
988
- client.release();
989
- }
990
- }
991
- async function getSchemaExtensions(pool) {
992
- const client = await pool.connect();
993
- try {
994
- const r = await client.query(`
995
- SELECT extname AS name, extversion AS installed_version,
996
- n.nspname AS schema, obj_description(e.oid) AS description
997
- FROM pg_extension e
998
- JOIN pg_namespace n ON e.extnamespace = n.oid
999
- ORDER BY extname
1000
- `);
1001
- return r.rows;
1002
- } finally {
1003
- client.release();
1004
- }
1005
- }
1006
- async function getSchemaEnums(pool) {
1007
- const client = await pool.connect();
1008
- try {
1009
- const r = await client.query(`
1010
- SELECT
1011
- t.typname AS name,
1012
- n.nspname AS schema,
1013
- array_agg(e.enumlabel ORDER BY e.enumsortorder) AS values
1014
- FROM pg_type t
1015
- JOIN pg_namespace n ON t.typnamespace = n.oid
1016
- JOIN pg_enum e ON t.oid = e.enumtypid
1017
- WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
1018
- GROUP BY t.typname, n.nspname
1019
- ORDER BY t.typname
1020
- `);
1021
- return r.rows;
1022
- } finally {
1023
- client.release();
1024
- }
1025
- }
1026
-
1027
783
  // src/server/timeseries.ts
1028
784
  import Database from "better-sqlite3";
1029
785
  import path from "path";
@@ -1035,11 +791,15 @@ var TimeseriesStore = class {
1035
791
  db;
1036
792
  insertStmt;
1037
793
  retentionMs;
1038
- constructor(dataDir, retentionDays = DEFAULT_RETENTION_DAYS) {
1039
- const dir = dataDir || DEFAULT_DIR;
1040
- fs.mkdirSync(dir, { recursive: true });
1041
- const dbPath = path.join(dir, "metrics.db");
1042
- this.db = new Database(dbPath);
794
+ constructor(dbOrDir, retentionDays = DEFAULT_RETENTION_DAYS) {
795
+ if (dbOrDir instanceof Database) {
796
+ this.db = dbOrDir;
797
+ } else {
798
+ const dir = dbOrDir || DEFAULT_DIR;
799
+ fs.mkdirSync(dir, { recursive: true });
800
+ const dbPath = path.join(dir, "metrics.db");
801
+ this.db = new Database(dbPath);
802
+ }
1043
803
  this.retentionMs = retentionDays * 24 * 60 * 60 * 1e3;
1044
804
  this.db.pragma("journal_mode = WAL");
1045
805
  this.db.exec(`
@@ -1105,6 +865,7 @@ var Collector = class {
1105
865
  this.intervalMs = intervalMs;
1106
866
  }
1107
867
  timer = null;
868
+ pruneTimer = null;
1108
869
  prev = null;
1109
870
  lastSnapshot = {};
1110
871
  start() {
@@ -1112,13 +873,17 @@ var Collector = class {
1112
873
  this.timer = setInterval(() => {
1113
874
  this.collect().catch((err) => console.error("[collector] Collection failed:", err));
1114
875
  }, this.intervalMs);
1115
- setInterval(() => this.store.prune(), 60 * 60 * 1e3);
876
+ this.pruneTimer = setInterval(() => this.store.prune(), 60 * 60 * 1e3);
1116
877
  }
1117
878
  stop() {
1118
879
  if (this.timer) {
1119
880
  clearInterval(this.timer);
1120
881
  this.timer = null;
1121
882
  }
883
+ if (this.pruneTimer) {
884
+ clearInterval(this.pruneTimer);
885
+ this.pruneTimer = null;
886
+ }
1122
887
  }
1123
888
  getLastSnapshot() {
1124
889
  return { ...this.lastSnapshot };
@@ -1177,6 +942,17 @@ var Collector = class {
1177
942
  }
1178
943
  this.prev = cur;
1179
944
  }
945
+ try {
946
+ const tsRes = await client.query(`SELECT spcname, pg_tablespace_size(oid) AS size FROM pg_tablespace`);
947
+ let totalTablespaceSize = 0;
948
+ for (const row of tsRes.rows) {
949
+ totalTablespaceSize += parseInt(row.size);
950
+ }
951
+ if (totalTablespaceSize > 0) {
952
+ snapshot.disk_used_bytes = totalTablespaceSize;
953
+ }
954
+ } catch {
955
+ }
1180
956
  try {
1181
957
  const repRes = await client.query(`
1182
958
  SELECT CASE WHEN pg_is_in_recovery()
@@ -1191,21 +967,231 @@ var Collector = class {
1191
967
  client.release();
1192
968
  }
1193
969
  } catch (err) {
1194
- console.error("[collector] Error collecting metrics:", err.message);
1195
- return snapshot;
1196
- }
1197
- const points = Object.entries(snapshot).map(([metric, value]) => ({
1198
- timestamp: now,
1199
- metric,
1200
- value
1201
- }));
1202
- if (points.length > 0) {
1203
- this.store.insertMany(points);
970
+ console.error("[collector] Error collecting metrics:", err.message);
971
+ return snapshot;
972
+ }
973
+ const points = Object.entries(snapshot).map(([metric, value]) => ({
974
+ timestamp: now,
975
+ metric,
976
+ value
977
+ }));
978
+ if (points.length > 0) {
979
+ this.store.insertMany(points);
980
+ }
981
+ this.lastSnapshot = snapshot;
982
+ return snapshot;
983
+ }
984
+ };
985
+
986
+ // src/server/queries/schema.ts
987
+ async function getSchemaTables(pool) {
988
+ const client = await pool.connect();
989
+ try {
990
+ const r = await client.query(`
991
+ SELECT
992
+ c.relname AS name,
993
+ n.nspname AS schema,
994
+ pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
995
+ pg_total_relation_size(c.oid) AS total_size_bytes,
996
+ pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
997
+ pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
998
+ s.n_live_tup AS row_count,
999
+ obj_description(c.oid) AS description
1000
+ FROM pg_class c
1001
+ JOIN pg_namespace n ON c.relnamespace = n.oid
1002
+ LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
1003
+ WHERE c.relkind = 'r' AND n.nspname NOT IN ('pg_catalog', 'information_schema')
1004
+ ORDER BY pg_total_relation_size(c.oid) DESC
1005
+ `);
1006
+ return r.rows;
1007
+ } finally {
1008
+ client.release();
1009
+ }
1010
+ }
1011
+ async function getSchemaTableDetail(pool, tableName) {
1012
+ const client = await pool.connect();
1013
+ try {
1014
+ const parts = tableName.split(".");
1015
+ const schema = parts.length > 1 ? parts[0] : "public";
1016
+ const name = parts.length > 1 ? parts[1] : parts[0];
1017
+ const tableInfo = await client.query(`
1018
+ SELECT
1019
+ c.relname AS name, n.nspname AS schema,
1020
+ pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size,
1021
+ pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
1022
+ pg_size_pretty(pg_total_relation_size(c.oid) - pg_relation_size(c.oid)) AS index_size,
1023
+ pg_size_pretty(pg_relation_size(c.reltoastrelid)) AS toast_size,
1024
+ s.n_live_tup AS row_count, s.n_dead_tup AS dead_tuples,
1025
+ s.last_vacuum, s.last_autovacuum, s.last_analyze, s.last_autoanalyze,
1026
+ s.seq_scan, s.idx_scan
1027
+ FROM pg_class c
1028
+ JOIN pg_namespace n ON c.relnamespace = n.oid
1029
+ LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
1030
+ WHERE c.relname = $1 AND n.nspname = $2 AND c.relkind = 'r'
1031
+ `, [name, schema]);
1032
+ if (tableInfo.rows.length === 0) return null;
1033
+ const columns = await client.query(`
1034
+ SELECT
1035
+ a.attname AS name,
1036
+ pg_catalog.format_type(a.atttypid, a.atttypmod) AS type,
1037
+ NOT a.attnotnull AS nullable,
1038
+ pg_get_expr(d.adbin, d.adrelid) AS default_value,
1039
+ col_description(a.attrelid, a.attnum) AS description
1040
+ FROM pg_attribute a
1041
+ LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
1042
+ WHERE a.attrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
1043
+ AND a.attnum > 0 AND NOT a.attisdropped
1044
+ ORDER BY a.attnum
1045
+ `, [name, schema]);
1046
+ const indexes = await client.query(`
1047
+ SELECT
1048
+ i.relname AS name,
1049
+ am.amname AS type,
1050
+ pg_size_pretty(pg_relation_size(i.oid)) AS size,
1051
+ pg_get_indexdef(idx.indexrelid) AS definition,
1052
+ idx.indisunique AS is_unique,
1053
+ idx.indisprimary AS is_primary,
1054
+ s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
1055
+ FROM pg_index idx
1056
+ JOIN pg_class i ON idx.indexrelid = i.oid
1057
+ JOIN pg_class t ON idx.indrelid = t.oid
1058
+ JOIN pg_namespace n ON t.relnamespace = n.oid
1059
+ JOIN pg_am am ON i.relam = am.oid
1060
+ LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
1061
+ WHERE t.relname = $1 AND n.nspname = $2
1062
+ ORDER BY i.relname
1063
+ `, [name, schema]);
1064
+ const constraints = await client.query(`
1065
+ SELECT
1066
+ conname AS name,
1067
+ CASE contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY'
1068
+ WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' WHEN 'x' THEN 'EXCLUDE' END AS type,
1069
+ pg_get_constraintdef(oid) AS definition
1070
+ FROM pg_constraint
1071
+ WHERE conrelid = (SELECT c.oid FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = $1 AND n.nspname = $2)
1072
+ ORDER BY
1073
+ CASE contype WHEN 'p' THEN 1 WHEN 'u' THEN 2 WHEN 'f' THEN 3 WHEN 'c' THEN 4 ELSE 5 END
1074
+ `, [name, schema]);
1075
+ const foreignKeys = await client.query(`
1076
+ SELECT
1077
+ conname AS name,
1078
+ a.attname AS column_name,
1079
+ confrelid::regclass::text AS referenced_table,
1080
+ af.attname AS referenced_column
1081
+ FROM pg_constraint c
1082
+ JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
1083
+ JOIN pg_attribute af ON af.attrelid = c.confrelid AND af.attnum = ANY(c.confkey)
1084
+ WHERE c.contype = 'f'
1085
+ AND c.conrelid = (SELECT cl.oid FROM pg_class cl JOIN pg_namespace n ON cl.relnamespace = n.oid WHERE cl.relname = $1 AND n.nspname = $2)
1086
+ `, [name, schema]);
1087
+ let sampleData = [];
1088
+ try {
1089
+ const sample = await client.query(
1090
+ `SELECT * FROM ${client.escapeIdentifier(schema)}.${client.escapeIdentifier(name)} LIMIT 10`
1091
+ );
1092
+ sampleData = sample.rows;
1093
+ } catch (err) {
1094
+ console.error("[schema] Error:", err.message);
1204
1095
  }
1205
- this.lastSnapshot = snapshot;
1206
- return snapshot;
1096
+ return {
1097
+ ...tableInfo.rows[0],
1098
+ columns: columns.rows,
1099
+ indexes: indexes.rows,
1100
+ constraints: constraints.rows,
1101
+ foreignKeys: foreignKeys.rows,
1102
+ sampleData
1103
+ };
1104
+ } finally {
1105
+ client.release();
1207
1106
  }
1208
- };
1107
+ }
1108
+ async function getSchemaIndexes(pool) {
1109
+ const client = await pool.connect();
1110
+ try {
1111
+ const r = await client.query(`
1112
+ SELECT
1113
+ n.nspname AS schema,
1114
+ t.relname AS table_name,
1115
+ i.relname AS name,
1116
+ am.amname AS type,
1117
+ pg_size_pretty(pg_relation_size(i.oid)) AS size,
1118
+ pg_relation_size(i.oid) AS size_bytes,
1119
+ pg_get_indexdef(idx.indexrelid) AS definition,
1120
+ idx.indisunique AS is_unique,
1121
+ idx.indisprimary AS is_primary,
1122
+ s.idx_scan, s.idx_tup_read, s.idx_tup_fetch
1123
+ FROM pg_index idx
1124
+ JOIN pg_class i ON idx.indexrelid = i.oid
1125
+ JOIN pg_class t ON idx.indrelid = t.oid
1126
+ JOIN pg_namespace n ON t.relnamespace = n.oid
1127
+ JOIN pg_am am ON i.relam = am.oid
1128
+ LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
1129
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
1130
+ ORDER BY pg_relation_size(i.oid) DESC
1131
+ `);
1132
+ return r.rows;
1133
+ } finally {
1134
+ client.release();
1135
+ }
1136
+ }
1137
+ async function getSchemaFunctions(pool) {
1138
+ const client = await pool.connect();
1139
+ try {
1140
+ const r = await client.query(`
1141
+ SELECT
1142
+ n.nspname AS schema,
1143
+ p.proname AS name,
1144
+ pg_get_function_result(p.oid) AS return_type,
1145
+ pg_get_function_arguments(p.oid) AS arguments,
1146
+ l.lanname AS language,
1147
+ p.prosrc AS source,
1148
+ CASE p.prokind WHEN 'f' THEN 'function' WHEN 'p' THEN 'procedure' WHEN 'a' THEN 'aggregate' WHEN 'w' THEN 'window' END AS kind
1149
+ FROM pg_proc p
1150
+ JOIN pg_namespace n ON p.pronamespace = n.oid
1151
+ JOIN pg_language l ON p.prolang = l.oid
1152
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
1153
+ ORDER BY n.nspname, p.proname
1154
+ `);
1155
+ return r.rows;
1156
+ } finally {
1157
+ client.release();
1158
+ }
1159
+ }
1160
+ async function getSchemaExtensions(pool) {
1161
+ const client = await pool.connect();
1162
+ try {
1163
+ const r = await client.query(`
1164
+ SELECT extname AS name, extversion AS installed_version,
1165
+ n.nspname AS schema, obj_description(e.oid) AS description
1166
+ FROM pg_extension e
1167
+ JOIN pg_namespace n ON e.extnamespace = n.oid
1168
+ ORDER BY extname
1169
+ `);
1170
+ return r.rows;
1171
+ } finally {
1172
+ client.release();
1173
+ }
1174
+ }
1175
+ async function getSchemaEnums(pool) {
1176
+ const client = await pool.connect();
1177
+ try {
1178
+ const r = await client.query(`
1179
+ SELECT
1180
+ t.typname AS name,
1181
+ n.nspname AS schema,
1182
+ array_agg(e.enumlabel ORDER BY e.enumsortorder) AS values
1183
+ FROM pg_type t
1184
+ JOIN pg_namespace n ON t.typnamespace = n.oid
1185
+ JOIN pg_enum e ON t.oid = e.enumtypid
1186
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
1187
+ GROUP BY t.typname, n.nspname
1188
+ ORDER BY t.typname
1189
+ `);
1190
+ return r.rows;
1191
+ } finally {
1192
+ client.release();
1193
+ }
1194
+ }
1209
1195
 
1210
1196
  // src/server/schema-diff.ts
1211
1197
  function diffSnapshots(oldSnap, newSnap) {
@@ -1426,6 +1412,92 @@ var SchemaTracker = class {
1426
1412
  }
1427
1413
  };
1428
1414
 
1415
+ // src/server/notifiers.ts
1416
+ var SEVERITY_COLORS = {
1417
+ critical: { hex: "#e74c3c", decimal: 15158332, emoji: "\u{1F534}" },
1418
+ warning: { hex: "#f39c12", decimal: 15965202, emoji: "\u{1F7E1}" },
1419
+ info: { hex: "#3498db", decimal: 3447003, emoji: "\u{1F535}" }
1420
+ };
1421
+ function detectWebhookType(url) {
1422
+ if (url.includes("hooks.slack.com")) return "slack";
1423
+ if (url.includes("discord.com/api/webhooks") || url.includes("discordapp.com")) return "discord";
1424
+ return "generic";
1425
+ }
1426
+ function formatSlackMessage(alert, rule) {
1427
+ const colors = SEVERITY_COLORS[rule.severity] || SEVERITY_COLORS.info;
1428
+ return {
1429
+ attachments: [
1430
+ {
1431
+ color: colors.hex,
1432
+ blocks: [
1433
+ {
1434
+ type: "section",
1435
+ text: {
1436
+ type: "mrkdwn",
1437
+ text: `${colors.emoji} *pg-dash Alert: ${rule.name}*`
1438
+ }
1439
+ },
1440
+ {
1441
+ type: "section",
1442
+ fields: [
1443
+ { type: "mrkdwn", text: `*Metric:*
1444
+ ${rule.metric}` },
1445
+ { type: "mrkdwn", text: `*Current Value:*
1446
+ ${alert.value}` },
1447
+ { type: "mrkdwn", text: `*Threshold:*
1448
+ ${rule.operator} ${rule.threshold}` },
1449
+ { type: "mrkdwn", text: `*Severity:*
1450
+ ${rule.severity}` },
1451
+ { type: "mrkdwn", text: `*Timestamp:*
1452
+ ${new Date(alert.timestamp).toISOString()}` }
1453
+ ]
1454
+ }
1455
+ ]
1456
+ }
1457
+ ]
1458
+ };
1459
+ }
1460
+ function formatDiscordMessage(alert, rule) {
1461
+ const colors = SEVERITY_COLORS[rule.severity] || SEVERITY_COLORS.info;
1462
+ return {
1463
+ embeds: [
1464
+ {
1465
+ title: `${colors.emoji} pg-dash Alert: ${rule.name}`,
1466
+ color: colors.decimal,
1467
+ fields: [
1468
+ { name: "Metric", value: rule.metric, inline: true },
1469
+ { name: "Current Value", value: String(alert.value), inline: true },
1470
+ { name: "Threshold", value: `${rule.operator} ${rule.threshold}`, inline: true },
1471
+ { name: "Severity", value: rule.severity, inline: true },
1472
+ { name: "Timestamp", value: new Date(alert.timestamp).toISOString(), inline: false }
1473
+ ],
1474
+ footer: { text: "pg-dash \xB7 PostgreSQL Monitoring" }
1475
+ }
1476
+ ]
1477
+ };
1478
+ }
1479
+ function formatGenericWebhook(alert, rule) {
1480
+ return {
1481
+ severity: rule.severity,
1482
+ rule: rule.name,
1483
+ metric: rule.metric,
1484
+ value: alert.value,
1485
+ message: alert.message,
1486
+ timestamp: alert.timestamp
1487
+ };
1488
+ }
1489
+ function formatWebhookPayload(alert, rule, webhookUrl) {
1490
+ const type = detectWebhookType(webhookUrl);
1491
+ switch (type) {
1492
+ case "slack":
1493
+ return formatSlackMessage(alert, rule);
1494
+ case "discord":
1495
+ return formatDiscordMessage(alert, rule);
1496
+ default:
1497
+ return formatGenericWebhook(alert, rule);
1498
+ }
1499
+ }
1500
+
1429
1501
  // src/server/alerts.ts
1430
1502
  var DEFAULT_RULES = [
1431
1503
  { name: "Connection utilization > 80%", metric: "connection_util", operator: "gt", threshold: 80, severity: "warning", enabled: 1, cooldown_minutes: 60 },
@@ -1434,7 +1506,9 @@ var DEFAULT_RULES = [
1434
1506
  { name: "Cache hit ratio < 95%", metric: "cache_hit_pct", operator: "lt", threshold: 95, severity: "critical", enabled: 1, cooldown_minutes: 30 },
1435
1507
  { name: "Long-running query > 5 min", metric: "long_query_count", operator: "gt", threshold: 0, severity: "warning", enabled: 1, cooldown_minutes: 15 },
1436
1508
  { name: "Idle in transaction > 10 min", metric: "idle_in_tx_count", operator: "gt", threshold: 0, severity: "warning", enabled: 1, cooldown_minutes: 15 },
1437
- { name: "Health score below D", metric: "health_score", operator: "lt", threshold: 50, severity: "warning", enabled: 1, cooldown_minutes: 120 }
1509
+ { name: "Health score below D", metric: "health_score", operator: "lt", threshold: 50, severity: "warning", enabled: 1, cooldown_minutes: 120 },
1510
+ { name: "Database size growth > 10% in 24h", metric: "db_growth_pct_24h", operator: "gt", threshold: 10, severity: "warning", enabled: 1, cooldown_minutes: 60 },
1511
+ { name: "Predicted disk full within 7 days", metric: "days_until_full", operator: "lt", threshold: 7, severity: "critical", enabled: 1, cooldown_minutes: 360 }
1438
1512
  ];
1439
1513
  var AlertManager = class {
1440
1514
  db;
@@ -1563,20 +1637,55 @@ var AlertManager = class {
1563
1637
  return false;
1564
1638
  }
1565
1639
  }
1640
+ getWebhookUrl() {
1641
+ return this.webhookUrl;
1642
+ }
1643
+ getWebhookType() {
1644
+ if (!this.webhookUrl) return null;
1645
+ return detectWebhookType(this.webhookUrl);
1646
+ }
1647
+ async sendTestWebhook() {
1648
+ if (!this.webhookUrl) return { ok: false, type: "none", error: "No webhook URL configured" };
1649
+ const type = detectWebhookType(this.webhookUrl);
1650
+ const testRule = {
1651
+ id: 0,
1652
+ name: "Test Alert",
1653
+ metric: "test_metric",
1654
+ operator: "gt",
1655
+ threshold: 80,
1656
+ severity: "info",
1657
+ enabled: 1,
1658
+ cooldown_minutes: 60
1659
+ };
1660
+ const testEntry = {
1661
+ id: 0,
1662
+ rule_id: 0,
1663
+ timestamp: Date.now(),
1664
+ value: 85,
1665
+ message: "Test Alert: test_metric = 85 (threshold: gt 80)",
1666
+ notified: 0
1667
+ };
1668
+ try {
1669
+ const payload = formatWebhookPayload(testEntry, testRule, this.webhookUrl);
1670
+ const res = await fetch(this.webhookUrl, {
1671
+ method: "POST",
1672
+ headers: { "Content-Type": "application/json" },
1673
+ body: JSON.stringify(payload)
1674
+ });
1675
+ if (!res.ok) return { ok: false, type, error: `HTTP ${res.status}` };
1676
+ return { ok: true, type };
1677
+ } catch (err) {
1678
+ return { ok: false, type, error: err.message };
1679
+ }
1680
+ }
1566
1681
  async sendWebhook(rule, entry) {
1567
1682
  if (!this.webhookUrl) return;
1568
1683
  try {
1684
+ const payload = formatWebhookPayload(entry, rule, this.webhookUrl);
1569
1685
  await fetch(this.webhookUrl, {
1570
1686
  method: "POST",
1571
1687
  headers: { "Content-Type": "application/json" },
1572
- body: JSON.stringify({
1573
- severity: rule.severity,
1574
- rule: rule.name,
1575
- metric: rule.metric,
1576
- value: entry.value,
1577
- message: entry.message,
1578
- timestamp: entry.timestamp
1579
- })
1688
+ body: JSON.stringify(payload)
1580
1689
  });
1581
1690
  this.db.prepare("UPDATE alert_history SET notified = 1 WHERE id = ?").run(entry.id);
1582
1691
  } catch (err) {
@@ -1585,84 +1694,8 @@ var AlertManager = class {
1585
1694
  }
1586
1695
  };
1587
1696
 
1588
- // src/server/index.ts
1589
- import Database2 from "better-sqlite3";
1590
- import { WebSocketServer, WebSocket } from "ws";
1591
- import http from "http";
1592
- var __dirname = path2.dirname(fileURLToPath(import.meta.url));
1593
- var RANGE_MAP = {
1594
- "5m": 5 * 60 * 1e3,
1595
- "15m": 15 * 60 * 1e3,
1596
- "1h": 60 * 60 * 1e3,
1597
- "6h": 6 * 60 * 60 * 1e3,
1598
- "24h": 24 * 60 * 60 * 1e3,
1599
- "7d": 7 * 24 * 60 * 60 * 1e3
1600
- };
1601
- async function startServer(opts) {
1602
- const pool = new Pool({ connectionString: opts.connectionString });
1603
- try {
1604
- const client = await pool.connect();
1605
- client.release();
1606
- } catch (err) {
1607
- console.error(`Failed to connect to PostgreSQL: ${err.message}`);
1608
- process.exit(1);
1609
- }
1610
- if (opts.json) {
1611
- try {
1612
- const [overview, advisor, databases, tables] = await Promise.all([
1613
- getOverview(pool),
1614
- getAdvisorReport(pool),
1615
- getDatabases(pool),
1616
- getTables(pool)
1617
- ]);
1618
- console.log(JSON.stringify({ overview, advisor, databases, tables }, null, 2));
1619
- } catch (err) {
1620
- console.error(JSON.stringify({ error: err.message }));
1621
- process.exit(1);
1622
- }
1623
- await pool.end();
1624
- process.exit(0);
1625
- }
1626
- const dataDir = opts.dataDir || path2.join(os2.homedir(), ".pg-dash");
1627
- fs2.mkdirSync(dataDir, { recursive: true });
1628
- const store = new TimeseriesStore(opts.dataDir, opts.retentionDays);
1629
- const intervalMs = (opts.interval || 30) * 1e3;
1630
- const longQueryThreshold = opts.longQueryThreshold || 5;
1631
- const collector = new Collector(pool, store, intervalMs);
1632
- console.log(` Collecting metrics every ${intervalMs / 1e3}s...`);
1633
- collector.start();
1634
- const schemaDbPath = path2.join(dataDir, "schema.db");
1635
- const schemaDb = new Database2(schemaDbPath);
1636
- schemaDb.pragma("journal_mode = WAL");
1637
- const snapshotIntervalMs = (opts.snapshotInterval || 6) * 60 * 60 * 1e3;
1638
- const schemaTracker = new SchemaTracker(schemaDb, pool, snapshotIntervalMs);
1639
- schemaTracker.start();
1640
- console.log(" Schema change tracking enabled");
1641
- const alertsDbPath = path2.join(dataDir, "alerts.db");
1642
- const alertsDb = new Database2(alertsDbPath);
1643
- alertsDb.pragma("journal_mode = WAL");
1644
- const alertManager = new AlertManager(alertsDb, opts.webhook);
1645
- console.log(" Alert monitoring enabled");
1646
- const app = new Hono();
1647
- if (opts.auth || opts.token) {
1648
- app.use("*", async (c, next) => {
1649
- const authHeader = c.req.header("authorization") || "";
1650
- if (opts.token) {
1651
- if (authHeader === `Bearer ${opts.token}`) return next();
1652
- }
1653
- if (opts.auth) {
1654
- const [user, pass] = opts.auth.split(":");
1655
- const expected = "Basic " + Buffer.from(`${user}:${pass}`).toString("base64");
1656
- if (authHeader === expected) return next();
1657
- }
1658
- const url = new URL(c.req.url, "http://localhost");
1659
- if (opts.token && url.searchParams.get("token") === opts.token) return next();
1660
- if (opts.auth) {
1661
- c.header("WWW-Authenticate", 'Basic realm="pg-dash"');
1662
- }
1663
- return c.text("Unauthorized", 401);
1664
- });
1665
- }
1697
+ // src/server/routes/overview.ts
1698
+ function registerOverviewRoutes(app, pool) {
1666
1699
  app.get("/api/overview", async (c) => {
1667
1700
  try {
1668
1701
  return c.json(await getOverview(pool));
@@ -1684,6 +1717,18 @@ async function startServer(opts) {
1684
1717
  return c.json({ error: err.message }, 500);
1685
1718
  }
1686
1719
  });
1720
+ }
1721
+
1722
+ // src/server/routes/metrics.ts
1723
+ var RANGE_MAP = {
1724
+ "5m": 5 * 60 * 1e3,
1725
+ "15m": 15 * 60 * 1e3,
1726
+ "1h": 60 * 60 * 1e3,
1727
+ "6h": 6 * 60 * 60 * 1e3,
1728
+ "24h": 24 * 60 * 60 * 1e3,
1729
+ "7d": 7 * 24 * 60 * 60 * 1e3
1730
+ };
1731
+ function registerMetricsRoutes(app, store, collector) {
1687
1732
  app.get("/api/metrics", (c) => {
1688
1733
  try {
1689
1734
  const metric = c.req.query("metric");
@@ -1705,6 +1750,44 @@ async function startServer(opts) {
1705
1750
  return _c.json({ error: err.message }, 500);
1706
1751
  }
1707
1752
  });
1753
+ }
1754
+
1755
+ // src/server/queries/slow-queries.ts
1756
+ async function getSlowQueries(pool) {
1757
+ const client = await pool.connect();
1758
+ try {
1759
+ const extCheck = await client.query(
1760
+ "SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'"
1761
+ );
1762
+ if (extCheck.rows.length === 0) {
1763
+ return [];
1764
+ }
1765
+ const r = await client.query(`
1766
+ SELECT
1767
+ queryid::text,
1768
+ query,
1769
+ calls::int,
1770
+ total_exec_time AS total_time,
1771
+ mean_exec_time AS mean_time,
1772
+ rows::int,
1773
+ round(total_exec_time::numeric / 1000, 2)::text || 's' AS total_time_pretty,
1774
+ round(mean_exec_time::numeric, 2)::text || 'ms' AS mean_time_pretty
1775
+ FROM pg_stat_statements
1776
+ WHERE query NOT LIKE '%pg_stat%'
1777
+ AND query NOT LIKE '%pg_catalog%'
1778
+ ORDER BY total_exec_time DESC
1779
+ LIMIT 50
1780
+ `);
1781
+ return r.rows;
1782
+ } catch {
1783
+ return [];
1784
+ } finally {
1785
+ client.release();
1786
+ }
1787
+ }
1788
+
1789
+ // src/server/routes/activity.ts
1790
+ function registerActivityRoutes(app, pool) {
1708
1791
  app.get("/api/activity", async (c) => {
1709
1792
  try {
1710
1793
  return c.json(await getActivity(pool));
@@ -1733,9 +1816,14 @@ async function startServer(opts) {
1733
1816
  return c.json({ error: err.message }, 500);
1734
1817
  }
1735
1818
  });
1819
+ }
1820
+
1821
+ // src/server/routes/advisor.ts
1822
+ init_advisor();
1823
+ function registerAdvisorRoutes(app, pool, longQueryThreshold) {
1736
1824
  app.get("/api/advisor", async (c) => {
1737
1825
  try {
1738
- return c.json(await getAdvisorReport(pool));
1826
+ return c.json(await getAdvisorReport(pool, longQueryThreshold));
1739
1827
  } catch (err) {
1740
1828
  return c.json({ error: err.message }, 500);
1741
1829
  }
@@ -1759,6 +1847,10 @@ async function startServer(opts) {
1759
1847
  return c.json({ error: err.message }, 500);
1760
1848
  }
1761
1849
  });
1850
+ }
1851
+
1852
+ // src/server/routes/schema.ts
1853
+ function registerSchemaRoutes(app, pool, schemaTracker) {
1762
1854
  app.get("/api/schema/tables", async (c) => {
1763
1855
  try {
1764
1856
  return c.json(await getSchemaTables(pool));
@@ -1847,6 +1939,10 @@ async function startServer(opts) {
1847
1939
  return c.json({ error: err.message }, 500);
1848
1940
  }
1849
1941
  });
1942
+ }
1943
+
1944
+ // src/server/routes/alerts.ts
1945
+ function registerAlertsRoutes(app, alertManager) {
1850
1946
  app.get("/api/alerts/rules", (c) => {
1851
1947
  try {
1852
1948
  return c.json(alertManager.getRules());
@@ -1884,6 +1980,24 @@ async function startServer(opts) {
1884
1980
  return c.json({ error: err.message }, 500);
1885
1981
  }
1886
1982
  });
1983
+ app.get("/api/alerts/webhook-info", (c) => {
1984
+ try {
1985
+ const url = alertManager.getWebhookUrl();
1986
+ const type = alertManager.getWebhookType();
1987
+ const masked = url ? url.replace(/\/[^/]{8,}$/, "/****") : null;
1988
+ return c.json({ url: masked, type: type || "none", configured: !!url });
1989
+ } catch (err) {
1990
+ return c.json({ error: err.message }, 500);
1991
+ }
1992
+ });
1993
+ app.post("/api/alerts/test-webhook", async (c) => {
1994
+ try {
1995
+ const result = await alertManager.sendTestWebhook();
1996
+ return c.json(result, result.ok ? 200 : 400);
1997
+ } catch (err) {
1998
+ return c.json({ error: err.message }, 500);
1999
+ }
2000
+ });
1887
2001
  app.get("/api/alerts/history", (c) => {
1888
2002
  try {
1889
2003
  const limit = parseInt(c.req.query("limit") || "50");
@@ -1892,6 +2006,512 @@ async function startServer(opts) {
1892
2006
  return c.json({ error: err.message }, 500);
1893
2007
  }
1894
2008
  });
2009
+ }
2010
+
2011
+ // src/server/routes/explain.ts
2012
+ var DDL_PATTERN = /\b(CREATE|DROP|ALTER|TRUNCATE|GRANT|REVOKE)\b/i;
2013
+ function registerExplainRoutes(app, pool) {
2014
+ app.post("/api/explain", async (c) => {
2015
+ try {
2016
+ const body = await c.req.json();
2017
+ const query = body?.query?.trim();
2018
+ if (!query) return c.json({ error: "Missing query" }, 400);
2019
+ if (DDL_PATTERN.test(query)) return c.json({ error: "DDL statements are not allowed" }, 400);
2020
+ const client = await pool.connect();
2021
+ try {
2022
+ await client.query("SET statement_timeout = '30s'");
2023
+ await client.query("BEGIN");
2024
+ try {
2025
+ const r = await client.query(`EXPLAIN (ANALYZE, BUFFERS, FORMAT JSON) ${query}`);
2026
+ await client.query("ROLLBACK");
2027
+ await client.query("RESET statement_timeout");
2028
+ return c.json({ plan: r.rows[0]["QUERY PLAN"] });
2029
+ } catch (err) {
2030
+ await client.query("ROLLBACK").catch(() => {
2031
+ });
2032
+ await client.query("RESET statement_timeout").catch(() => {
2033
+ });
2034
+ return c.json({ error: err.message }, 400);
2035
+ }
2036
+ } finally {
2037
+ client.release();
2038
+ }
2039
+ } catch (err) {
2040
+ return c.json({ error: err.message }, 500);
2041
+ }
2042
+ });
2043
+ }
2044
+
2045
+ // src/server/disk-prediction.ts
2046
+ function linearRegression(points) {
2047
+ const n = points.length;
2048
+ if (n < 2) return { slope: 0, intercept: 0, r2: 0 };
2049
+ let sumX = 0, sumY = 0, sumXY = 0, sumX2 = 0, sumY2 = 0;
2050
+ for (const p of points) {
2051
+ sumX += p.x;
2052
+ sumY += p.y;
2053
+ sumXY += p.x * p.y;
2054
+ sumX2 += p.x * p.x;
2055
+ sumY2 += p.y * p.y;
2056
+ }
2057
+ const denom = n * sumX2 - sumX * sumX;
2058
+ if (denom === 0) return { slope: 0, intercept: sumY / n, r2: 0 };
2059
+ const slope = (n * sumXY - sumX * sumY) / denom;
2060
+ const intercept = (sumY - slope * sumX) / n;
2061
+ const meanY = sumY / n;
2062
+ let ssTot = 0, ssRes = 0;
2063
+ for (const p of points) {
2064
+ ssTot += (p.y - meanY) ** 2;
2065
+ ssRes += (p.y - (slope * p.x + intercept)) ** 2;
2066
+ }
2067
+ const r2 = ssTot === 0 ? 1 : Math.max(0, 1 - ssRes / ssTot);
2068
+ return { slope, intercept, r2 };
2069
+ }
2070
+ var DiskPredictor = class {
2071
+ /**
2072
+ * Predict disk growth based on historical metric data.
2073
+ * @param store TimeseriesStore instance
2074
+ * @param metric Metric name (e.g. "db_size_bytes")
2075
+ * @param daysAhead How many days to project ahead
2076
+ * @param maxDiskBytes Optional max disk capacity for "days until full" calc
2077
+ */
2078
+ predict(store, metric, daysAhead, maxDiskBytes) {
2079
+ const now = Date.now();
2080
+ const data = store.query(metric, now - 30 * 24 * 60 * 60 * 1e3, now);
2081
+ if (data.length < 2) return null;
2082
+ const timeSpanMs = data[data.length - 1].timestamp - data[0].timestamp;
2083
+ if (timeSpanMs < 24 * 60 * 60 * 1e3) return null;
2084
+ const currentBytes = data[data.length - 1].value;
2085
+ const t0 = data[0].timestamp;
2086
+ const points = data.map((d) => ({
2087
+ x: (d.timestamp - t0) / (24 * 60 * 60 * 1e3),
2088
+ // days
2089
+ y: d.value
2090
+ }));
2091
+ const { slope, r2 } = linearRegression(points);
2092
+ const growthRatePerDay = slope;
2093
+ let predictedFullDate = null;
2094
+ let daysUntilFull = null;
2095
+ if (maxDiskBytes && growthRatePerDay > 0) {
2096
+ const remainingBytes = maxDiskBytes - currentBytes;
2097
+ daysUntilFull = remainingBytes / growthRatePerDay;
2098
+ if (daysUntilFull > 0 && daysUntilFull < 365 * 10) {
2099
+ predictedFullDate = new Date(now + daysUntilFull * 24 * 60 * 60 * 1e3);
2100
+ }
2101
+ }
2102
+ return {
2103
+ currentBytes,
2104
+ growthRatePerDay,
2105
+ predictedFullDate,
2106
+ daysUntilFull: daysUntilFull !== null && daysUntilFull > 0 ? daysUntilFull : null,
2107
+ confidence: r2
2108
+ };
2109
+ }
2110
+ };
2111
+
2112
+ // src/server/routes/disk.ts
2113
+ var RANGE_MAP2 = {
2114
+ "24h": 24 * 60 * 60 * 1e3,
2115
+ "7d": 7 * 24 * 60 * 60 * 1e3,
2116
+ "30d": 30 * 24 * 60 * 60 * 1e3
2117
+ };
2118
+ function registerDiskRoutes(app, pool, store) {
2119
+ const predictor = new DiskPredictor();
2120
+ app.get("/api/disk/usage", async (c) => {
2121
+ try {
2122
+ const client = await pool.connect();
2123
+ try {
2124
+ const dbRes = await client.query(`
2125
+ SELECT pg_database_size(current_database()) AS db_size,
2126
+ (SELECT setting FROM pg_settings WHERE name = 'data_directory') AS data_dir
2127
+ `);
2128
+ const { db_size, data_dir } = dbRes.rows[0];
2129
+ const tsRes = await client.query(`SELECT spcname, pg_tablespace_size(oid) AS size FROM pg_tablespace`);
2130
+ const tablespaces = tsRes.rows.map((r) => ({
2131
+ name: r.spcname,
2132
+ size: parseInt(r.size)
2133
+ }));
2134
+ const tableRes = await client.query(`
2135
+ SELECT schemaname, relname,
2136
+ pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname)) as total_size,
2137
+ pg_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname)) as table_size,
2138
+ pg_indexes_size(quote_ident(schemaname) || '.' || quote_ident(relname)) as index_size
2139
+ FROM pg_stat_user_tables
2140
+ ORDER BY pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname)) DESC
2141
+ LIMIT 20
2142
+ `);
2143
+ const tables = tableRes.rows.map((r) => ({
2144
+ schema: r.schemaname,
2145
+ name: r.relname,
2146
+ totalSize: parseInt(r.total_size),
2147
+ tableSize: parseInt(r.table_size),
2148
+ indexSize: parseInt(r.index_size)
2149
+ }));
2150
+ return c.json({
2151
+ dbSize: parseInt(db_size),
2152
+ dataDir: data_dir,
2153
+ tablespaces,
2154
+ tables
2155
+ });
2156
+ } finally {
2157
+ client.release();
2158
+ }
2159
+ } catch (err) {
2160
+ return c.json({ error: err.message }, 500);
2161
+ }
2162
+ });
2163
+ app.get("/api/disk/prediction", (c) => {
2164
+ try {
2165
+ const days = parseInt(c.req.query("days") || "30");
2166
+ const maxDisk = c.req.query("maxDisk") ? parseInt(c.req.query("maxDisk")) : void 0;
2167
+ const prediction = predictor.predict(store, "db_size_bytes", days, maxDisk);
2168
+ return c.json({ prediction });
2169
+ } catch (err) {
2170
+ return c.json({ error: err.message }, 500);
2171
+ }
2172
+ });
2173
+ app.get("/api/disk/history", (c) => {
2174
+ try {
2175
+ const range = c.req.query("range") || "24h";
2176
+ const rangeMs = RANGE_MAP2[range] || RANGE_MAP2["24h"];
2177
+ const now = Date.now();
2178
+ const data = store.query("db_size_bytes", now - rangeMs, now);
2179
+ return c.json(data);
2180
+ } catch (err) {
2181
+ return c.json({ error: err.message }, 500);
2182
+ }
2183
+ });
2184
+ }
2185
+
2186
+ // src/server/query-stats.ts
2187
+ var DEFAULT_RETENTION_DAYS2 = 7;
2188
+ var QueryStatsStore = class {
2189
+ db;
2190
+ insertStmt;
2191
+ retentionMs;
2192
+ prev = /* @__PURE__ */ new Map();
2193
+ timer = null;
2194
+ pruneTimer = null;
2195
+ constructor(db, retentionDays = DEFAULT_RETENTION_DAYS2) {
2196
+ this.db = db;
2197
+ this.retentionMs = retentionDays * 24 * 60 * 60 * 1e3;
2198
+ this.db.exec(`
2199
+ CREATE TABLE IF NOT EXISTS query_stats (
2200
+ timestamp INTEGER NOT NULL,
2201
+ queryid TEXT NOT NULL,
2202
+ query TEXT,
2203
+ calls INTEGER,
2204
+ total_exec_time REAL,
2205
+ mean_exec_time REAL,
2206
+ min_exec_time REAL,
2207
+ max_exec_time REAL,
2208
+ rows INTEGER,
2209
+ shared_blks_hit INTEGER,
2210
+ shared_blks_read INTEGER
2211
+ );
2212
+ CREATE INDEX IF NOT EXISTS idx_qs_queryid_ts ON query_stats(queryid, timestamp);
2213
+ `);
2214
+ this.insertStmt = this.db.prepare(
2215
+ `INSERT INTO query_stats (timestamp, queryid, query, calls, total_exec_time, mean_exec_time, min_exec_time, max_exec_time, rows, shared_blks_hit, shared_blks_read)
2216
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
2217
+ );
2218
+ }
2219
+ startPeriodicSnapshot(pool, intervalMs = 5 * 60 * 1e3) {
2220
+ this.snapshot(pool).catch(
2221
+ (err) => console.error("[query-stats] Initial snapshot failed:", err.message)
2222
+ );
2223
+ this.timer = setInterval(() => {
2224
+ this.snapshot(pool).catch(
2225
+ (err) => console.error("[query-stats] Snapshot failed:", err.message)
2226
+ );
2227
+ }, intervalMs);
2228
+ this.pruneTimer = setInterval(() => this.prune(), 60 * 60 * 1e3);
2229
+ }
2230
+ stop() {
2231
+ if (this.timer) {
2232
+ clearInterval(this.timer);
2233
+ this.timer = null;
2234
+ }
2235
+ if (this.pruneTimer) {
2236
+ clearInterval(this.pruneTimer);
2237
+ this.pruneTimer = null;
2238
+ }
2239
+ }
2240
+ async snapshot(pool) {
2241
+ const client = await pool.connect();
2242
+ try {
2243
+ const extCheck = await client.query(
2244
+ "SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'"
2245
+ );
2246
+ if (extCheck.rows.length === 0) return 0;
2247
+ const r = await client.query(`
2248
+ SELECT
2249
+ queryid::text,
2250
+ query,
2251
+ calls::int,
2252
+ total_exec_time,
2253
+ mean_exec_time,
2254
+ min_exec_time,
2255
+ max_exec_time,
2256
+ rows::int,
2257
+ shared_blks_hit::int,
2258
+ shared_blks_read::int
2259
+ FROM pg_stat_statements
2260
+ WHERE query NOT LIKE '%pg_stat%'
2261
+ AND query NOT LIKE '%pg_catalog%'
2262
+ AND queryid IS NOT NULL
2263
+ `);
2264
+ const now = Date.now();
2265
+ const hasPrev = this.prev.size > 0;
2266
+ let count = 0;
2267
+ const tx = this.db.transaction((rows) => {
2268
+ for (const row of rows) {
2269
+ const prev = this.prev.get(row.queryid);
2270
+ if (hasPrev && prev) {
2271
+ const deltaCalls = Math.max(0, row.calls - prev.calls);
2272
+ if (deltaCalls === 0) continue;
2273
+ const deltaTime = Math.max(0, row.total_exec_time - prev.total_exec_time);
2274
+ const deltaRows = Math.max(0, row.rows - prev.rows);
2275
+ const deltaHit = Math.max(0, row.shared_blks_hit - prev.shared_blks_hit);
2276
+ const deltaRead = Math.max(0, row.shared_blks_read - prev.shared_blks_read);
2277
+ const meanTime = deltaCalls > 0 ? deltaTime / deltaCalls : 0;
2278
+ this.insertStmt.run(
2279
+ now,
2280
+ row.queryid,
2281
+ row.query,
2282
+ deltaCalls,
2283
+ deltaTime,
2284
+ meanTime,
2285
+ row.min_exec_time,
2286
+ row.max_exec_time,
2287
+ deltaRows,
2288
+ deltaHit,
2289
+ deltaRead
2290
+ );
2291
+ count++;
2292
+ } else if (!hasPrev) {
2293
+ this.insertStmt.run(
2294
+ now,
2295
+ row.queryid,
2296
+ row.query,
2297
+ row.calls,
2298
+ row.total_exec_time,
2299
+ row.mean_exec_time,
2300
+ row.min_exec_time,
2301
+ row.max_exec_time,
2302
+ row.rows,
2303
+ row.shared_blks_hit,
2304
+ row.shared_blks_read
2305
+ );
2306
+ count++;
2307
+ }
2308
+ }
2309
+ });
2310
+ tx(r.rows);
2311
+ this.prev.clear();
2312
+ for (const row of r.rows) {
2313
+ this.prev.set(row.queryid, row);
2314
+ }
2315
+ return count;
2316
+ } catch (err) {
2317
+ console.error("[query-stats] Error snapshotting:", err.message);
2318
+ return 0;
2319
+ } finally {
2320
+ client.release();
2321
+ }
2322
+ }
2323
+ /** Insert a row directly (for testing) */
2324
+ insertRow(row) {
2325
+ this.insertStmt.run(
2326
+ row.timestamp,
2327
+ row.queryid,
2328
+ row.query,
2329
+ row.calls,
2330
+ row.total_exec_time,
2331
+ row.mean_exec_time,
2332
+ row.min_exec_time,
2333
+ row.max_exec_time,
2334
+ row.rows,
2335
+ row.shared_blks_hit,
2336
+ row.shared_blks_read
2337
+ );
2338
+ }
2339
+ getTrend(queryid, startMs, endMs) {
2340
+ const end = endMs ?? Date.now();
2341
+ return this.db.prepare(
2342
+ `SELECT timestamp, queryid, query, calls, total_exec_time, mean_exec_time,
2343
+ min_exec_time, max_exec_time, rows, shared_blks_hit, shared_blks_read
2344
+ FROM query_stats
2345
+ WHERE queryid = ? AND timestamp >= ? AND timestamp <= ?
2346
+ ORDER BY timestamp`
2347
+ ).all(queryid, startMs, end);
2348
+ }
2349
+ getTopQueries(startMs, endMs, orderBy = "total_time", limit = 20) {
2350
+ const orderCol = orderBy === "total_time" ? "SUM(total_exec_time)" : orderBy === "calls" ? "SUM(calls)" : "AVG(mean_exec_time)";
2351
+ return this.db.prepare(
2352
+ `SELECT queryid,
2353
+ MAX(query) as query,
2354
+ SUM(calls) as total_calls,
2355
+ SUM(total_exec_time) as total_exec_time,
2356
+ AVG(mean_exec_time) as mean_exec_time,
2357
+ SUM(rows) as total_rows
2358
+ FROM query_stats
2359
+ WHERE timestamp >= ? AND timestamp <= ?
2360
+ GROUP BY queryid
2361
+ ORDER BY ${orderCol} DESC
2362
+ LIMIT ?`
2363
+ ).all(startMs, endMs, limit);
2364
+ }
2365
+ prune(retentionMs) {
2366
+ const cutoff = Date.now() - (retentionMs ?? this.retentionMs);
2367
+ const info = this.db.prepare("DELETE FROM query_stats WHERE timestamp < ?").run(cutoff);
2368
+ return info.changes;
2369
+ }
2370
+ close() {
2371
+ }
2372
+ };
2373
+
2374
+ // src/server/routes/query-stats.ts
2375
+ var RANGE_MAP3 = {
2376
+ "1h": 60 * 60 * 1e3,
2377
+ "6h": 6 * 60 * 60 * 1e3,
2378
+ "24h": 24 * 60 * 60 * 1e3,
2379
+ "7d": 7 * 24 * 60 * 60 * 1e3
2380
+ };
2381
+ function registerQueryStatsRoutes(app, store) {
2382
+ app.get("/api/query-stats/top", (c) => {
2383
+ try {
2384
+ const range = c.req.query("range") || "1h";
2385
+ const orderBy = c.req.query("orderBy") || "total_time";
2386
+ const limit = parseInt(c.req.query("limit") || "20", 10);
2387
+ const rangeMs = RANGE_MAP3[range] || RANGE_MAP3["1h"];
2388
+ const now = Date.now();
2389
+ const data = store.getTopQueries(now - rangeMs, now, orderBy, limit);
2390
+ return c.json(data);
2391
+ } catch (err) {
2392
+ return c.json({ error: err.message }, 500);
2393
+ }
2394
+ });
2395
+ app.get("/api/query-stats/trend/:queryid", (c) => {
2396
+ try {
2397
+ const queryid = c.req.param("queryid");
2398
+ const range = c.req.query("range") || "1h";
2399
+ const rangeMs = RANGE_MAP3[range] || RANGE_MAP3["1h"];
2400
+ const now = Date.now();
2401
+ const data = store.getTrend(queryid, now - rangeMs, now);
2402
+ return c.json(data);
2403
+ } catch (err) {
2404
+ return c.json({ error: err.message }, 500);
2405
+ }
2406
+ });
2407
+ }
2408
+
2409
+ // src/server/index.ts
2410
+ import Database2 from "better-sqlite3";
2411
+ import { WebSocketServer, WebSocket } from "ws";
2412
+ import http from "http";
2413
+ var __dirname = path2.dirname(fileURLToPath(import.meta.url));
2414
+ async function startServer(opts) {
2415
+ const pool = new Pool({ connectionString: opts.connectionString });
2416
+ try {
2417
+ const client = await pool.connect();
2418
+ client.release();
2419
+ } catch (err) {
2420
+ console.error(`Failed to connect to PostgreSQL: ${err.message}`);
2421
+ process.exit(1);
2422
+ }
2423
+ const longQueryThreshold = opts.longQueryThreshold || 5;
2424
+ const diskPredictor = new DiskPredictor();
2425
+ if (opts.json) {
2426
+ try {
2427
+ const [overview, advisor, databases, tables] = await Promise.all([
2428
+ getOverview(pool),
2429
+ getAdvisorReport(pool, longQueryThreshold),
2430
+ getDatabases(pool),
2431
+ getTables(pool)
2432
+ ]);
2433
+ console.log(JSON.stringify({ overview, advisor, databases, tables }, null, 2));
2434
+ } catch (err) {
2435
+ console.error(JSON.stringify({ error: err.message }));
2436
+ process.exit(1);
2437
+ }
2438
+ await pool.end();
2439
+ process.exit(0);
2440
+ }
2441
+ const dataDir = opts.dataDir || path2.join(os2.homedir(), ".pg-dash");
2442
+ fs2.mkdirSync(dataDir, { recursive: true });
2443
+ const metricsDbPath = path2.join(dataDir, "metrics.db");
2444
+ const metricsDb = new Database2(metricsDbPath);
2445
+ metricsDb.pragma("journal_mode = WAL");
2446
+ const store = new TimeseriesStore(metricsDb, opts.retentionDays);
2447
+ const intervalMs = (opts.interval || 30) * 1e3;
2448
+ const collector = new Collector(pool, store, intervalMs);
2449
+ console.log(` Collecting metrics every ${intervalMs / 1e3}s...`);
2450
+ collector.start();
2451
+ const schemaDbPath = path2.join(dataDir, "schema.db");
2452
+ const schemaDb = new Database2(schemaDbPath);
2453
+ schemaDb.pragma("journal_mode = WAL");
2454
+ const snapshotIntervalMs = (opts.snapshotInterval || 6) * 60 * 60 * 1e3;
2455
+ const schemaTracker = new SchemaTracker(schemaDb, pool, snapshotIntervalMs);
2456
+ schemaTracker.start();
2457
+ console.log(" Schema change tracking enabled");
2458
+ const alertsDbPath = path2.join(dataDir, "alerts.db");
2459
+ const alertsDb = new Database2(alertsDbPath);
2460
+ alertsDb.pragma("journal_mode = WAL");
2461
+ const alertManager = new AlertManager(alertsDb, opts.webhook);
2462
+ console.log(" Alert monitoring enabled");
2463
+ const queryStatsStore = new QueryStatsStore(metricsDb, opts.retentionDays);
2464
+ const querySnapshotIntervalMs = (opts.queryStatsInterval || 5) * 60 * 1e3;
2465
+ queryStatsStore.startPeriodicSnapshot(pool, querySnapshotIntervalMs);
2466
+ console.log(` Query stats snapshots every ${querySnapshotIntervalMs / 6e4}m`);
2467
+ const app = new Hono();
2468
+ if (opts.token) {
2469
+ app.post("/api/auth", async (c) => {
2470
+ try {
2471
+ const body = await c.req.json();
2472
+ if (body?.token === opts.token) {
2473
+ c.header("Set-Cookie", `pg-dash-token=${opts.token}; Path=/; HttpOnly; SameSite=Strict; Max-Age=86400`);
2474
+ return c.json({ ok: true });
2475
+ }
2476
+ return c.json({ error: "Invalid token" }, 401);
2477
+ } catch {
2478
+ return c.json({ error: "Invalid request" }, 400);
2479
+ }
2480
+ });
2481
+ }
2482
+ if (opts.auth || opts.token) {
2483
+ app.use("*", async (c, next) => {
2484
+ const authHeader = c.req.header("authorization") || "";
2485
+ if (opts.token) {
2486
+ if (authHeader === `Bearer ${opts.token}`) return next();
2487
+ }
2488
+ if (opts.auth) {
2489
+ const [user, pass] = opts.auth.split(":");
2490
+ const expected = "Basic " + Buffer.from(`${user}:${pass}`).toString("base64");
2491
+ if (authHeader === expected) return next();
2492
+ }
2493
+ const url = new URL(c.req.url, "http://localhost");
2494
+ if (opts.token && url.searchParams.get("token") === opts.token) return next();
2495
+ if (opts.token) {
2496
+ const cookies = c.req.header("cookie") || "";
2497
+ const match = cookies.match(/(?:^|;\s*)pg-dash-token=([^;]*)/);
2498
+ if (match && match[1] === opts.token) return next();
2499
+ }
2500
+ if (opts.auth) {
2501
+ c.header("WWW-Authenticate", 'Basic realm="pg-dash"');
2502
+ }
2503
+ return c.text("Unauthorized", 401);
2504
+ });
2505
+ }
2506
+ registerOverviewRoutes(app, pool);
2507
+ registerMetricsRoutes(app, store, collector);
2508
+ registerActivityRoutes(app, pool);
2509
+ registerAdvisorRoutes(app, pool, longQueryThreshold);
2510
+ registerSchemaRoutes(app, pool, schemaTracker);
2511
+ registerAlertsRoutes(app, alertManager);
2512
+ registerExplainRoutes(app, pool);
2513
+ registerDiskRoutes(app, pool, store);
2514
+ registerQueryStatsRoutes(app, queryStatsStore);
1895
2515
  const uiPath = path2.resolve(__dirname, "ui");
1896
2516
  const MIME_TYPES = {
1897
2517
  ".html": "text/html",
@@ -1960,6 +2580,11 @@ async function startServer(opts) {
1960
2580
  const expected = "Basic " + Buffer.from(`${user}:${pass}`).toString("base64");
1961
2581
  if (authHeader === expected) return cb(true);
1962
2582
  }
2583
+ if (opts.token) {
2584
+ const cookies = info.req.headers["cookie"] || "";
2585
+ const match = cookies.match(/(?:^|;\s*)pg-dash-token=([^;]*)/);
2586
+ if (match && match[1] === opts.token) return cb(true);
2587
+ }
1963
2588
  cb(false, 401, "Unauthorized");
1964
2589
  } : void 0
1965
2590
  });
@@ -2012,7 +2637,7 @@ async function startServer(opts) {
2012
2637
  try {
2013
2638
  const client = await pool.connect();
2014
2639
  try {
2015
- const r = await client.query(`SELECT count(*)::int AS c FROM pg_stat_activity WHERE state = 'active' AND now() - query_start > interval '${longQueryThreshold} minutes' AND pid != pg_backend_pid()`);
2640
+ const r = await client.query(`SELECT count(*)::int AS c FROM pg_stat_activity WHERE state = 'active' AND now() - query_start > $1 * interval '1 minute' AND pid != pg_backend_pid()`, [longQueryThreshold]);
2016
2641
  alertMetrics.long_query_count = r.rows[0]?.c || 0;
2017
2642
  } finally {
2018
2643
  client.release();
@@ -2023,7 +2648,7 @@ async function startServer(opts) {
2023
2648
  try {
2024
2649
  const client = await pool.connect();
2025
2650
  try {
2026
- const r = await client.query("SELECT count(*)::int AS c FROM pg_stat_activity WHERE state = 'idle in transaction' AND now() - state_change > interval '10 minutes'");
2651
+ const r = await client.query(`SELECT count(*)::int AS c FROM pg_stat_activity WHERE state = 'idle in transaction' AND now() - state_change > $1 * interval '1 minute'`, [longQueryThreshold]);
2027
2652
  alertMetrics.idle_in_tx_count = r.rows[0]?.c || 0;
2028
2653
  } finally {
2029
2654
  client.release();
@@ -2034,11 +2659,33 @@ async function startServer(opts) {
2034
2659
  collectCycleCount++;
2035
2660
  if (collectCycleCount % 10 === 0) {
2036
2661
  try {
2037
- const report = await getAdvisorReport(pool);
2662
+ const report = await getAdvisorReport(pool, longQueryThreshold);
2038
2663
  alertMetrics.health_score = report.score;
2039
2664
  } catch (err) {
2040
2665
  console.error("[alerts] Error checking health score:", err.message);
2041
2666
  }
2667
+ try {
2668
+ if (snapshot.db_size_bytes !== void 0) {
2669
+ const dayAgo = Date.now() - 24 * 60 * 60 * 1e3;
2670
+ const oldData = store.query("db_size_bytes", dayAgo, dayAgo + 5 * 60 * 1e3);
2671
+ if (oldData.length > 0) {
2672
+ const oldVal = oldData[0].value;
2673
+ if (oldVal > 0) {
2674
+ alertMetrics.db_growth_pct_24h = (snapshot.db_size_bytes - oldVal) / oldVal * 100;
2675
+ }
2676
+ }
2677
+ }
2678
+ } catch (err) {
2679
+ console.error("[alerts] Error computing db_growth_pct_24h:", err.message);
2680
+ }
2681
+ try {
2682
+ const pred = diskPredictor.predict(store, "db_size_bytes", 30);
2683
+ if (pred?.daysUntilFull !== null && pred?.daysUntilFull !== void 0) {
2684
+ alertMetrics.days_until_full = pred.daysUntilFull;
2685
+ }
2686
+ } catch (err) {
2687
+ console.error("[alerts] Error computing days_until_full:", err.message);
2688
+ }
2042
2689
  }
2043
2690
  const fired = alertManager.checkAlerts(alertMetrics);
2044
2691
  if (fired.length > 0 && clients.size > 0) {
@@ -2073,9 +2720,10 @@ async function startServer(opts) {
2073
2720
  console.log("\n Shutting down gracefully...");
2074
2721
  collector.stop();
2075
2722
  schemaTracker.stop();
2723
+ queryStatsStore.stop();
2076
2724
  wss.close();
2077
2725
  server.close();
2078
- store.close();
2726
+ metricsDb.close();
2079
2727
  schemaDb.close();
2080
2728
  alertsDb.close();
2081
2729
  await pool.end();
@@ -2106,6 +2754,8 @@ var { values, positionals } = parseArgs({
2106
2754
  auth: { type: "string" },
2107
2755
  token: { type: "string" },
2108
2756
  webhook: { type: "string" },
2757
+ "slack-webhook": { type: "string" },
2758
+ "discord-webhook": { type: "string" },
2109
2759
  "no-open": { type: "boolean", default: false },
2110
2760
  json: { type: "boolean", default: false },
2111
2761
  host: { type: "string" },
@@ -2117,6 +2767,7 @@ var { values, positionals } = parseArgs({
2117
2767
  interval: { type: "string", short: "i" },
2118
2768
  "retention-days": { type: "string" },
2119
2769
  "snapshot-interval": { type: "string" },
2770
+ "query-stats-interval": { type: "string" },
2120
2771
  "long-query-threshold": { type: "string" },
2121
2772
  help: { type: "boolean", short: "h" },
2122
2773
  version: { type: "boolean", short: "v" },
@@ -2150,6 +2801,8 @@ Options:
2150
2801
  --auth <user:pass> Basic auth credentials (user:password)
2151
2802
  --token <token> Bearer token for authentication
2152
2803
  --webhook <url> Webhook URL for alert notifications
2804
+ --slack-webhook <url> Slack webhook URL (convenience alias)
2805
+ --discord-webhook <url> Discord webhook URL (convenience alias)
2153
2806
  --no-open Don't auto-open browser (default: opens)
2154
2807
  --json Dump health check as JSON and exit
2155
2808
  --host <host> PostgreSQL host
@@ -2161,6 +2814,7 @@ Options:
2161
2814
  -i, --interval <sec> Collection interval in seconds (default: 30)
2162
2815
  --retention-days <N> Metrics retention in days (default: 7)
2163
2816
  --snapshot-interval <h> Schema snapshot interval in hours (default: 6)
2817
+ --query-stats-interval <min> Query stats snapshot interval in minutes (default: 5)
2164
2818
  --long-query-threshold <min> Long query threshold in minutes (default: 5)
2165
2819
  --threshold <score> Health score threshold for check command (default: 70)
2166
2820
  -f, --format <fmt> Output format: text|json (default: text)
@@ -2198,7 +2852,8 @@ if (subcommand === "check") {
2198
2852
  const { getAdvisorReport: getAdvisorReport2 } = await Promise.resolve().then(() => (init_advisor(), advisor_exports));
2199
2853
  const pool = new Pool2({ connectionString });
2200
2854
  try {
2201
- const report = await getAdvisorReport2(pool);
2855
+ const lqt = parseInt(values["long-query-threshold"] || process.env.PG_DASH_LONG_QUERY_THRESHOLD || "5", 10);
2856
+ const report = await getAdvisorReport2(pool, lqt);
2202
2857
  if (format === "json") {
2203
2858
  console.log(JSON.stringify(report, null, 2));
2204
2859
  } else {
@@ -2259,10 +2914,11 @@ if (subcommand === "check") {
2259
2914
  const interval = values.interval ? parseInt(values.interval, 10) : void 0;
2260
2915
  const retentionDays = parseInt(values["retention-days"] || process.env.PG_DASH_RETENTION_DAYS || "7", 10);
2261
2916
  const snapshotInterval = parseInt(values["snapshot-interval"] || process.env.PG_DASH_SNAPSHOT_INTERVAL || "6", 10);
2917
+ const queryStatsInterval = parseInt(values["query-stats-interval"] || process.env.PG_DASH_QUERY_STATS_INTERVAL || "5", 10);
2262
2918
  const longQueryThreshold = parseInt(values["long-query-threshold"] || process.env.PG_DASH_LONG_QUERY_THRESHOLD || "5", 10);
2263
2919
  const auth = values.auth || void 0;
2264
2920
  const token = values.token || void 0;
2265
- const webhook = values.webhook || void 0;
2921
+ const webhook = values["slack-webhook"] || values["discord-webhook"] || values.webhook || void 0;
2266
2922
  if (bind === "0.0.0.0" && !auth && !token) {
2267
2923
  console.warn("\n \u26A0\uFE0F WARNING: Dashboard is exposed without authentication. Use --auth or --token.\n");
2268
2924
  }
@@ -2276,6 +2932,7 @@ if (subcommand === "check") {
2276
2932
  interval,
2277
2933
  retentionDays,
2278
2934
  snapshotInterval,
2935
+ queryStatsInterval,
2279
2936
  longQueryThreshold,
2280
2937
  auth,
2281
2938
  token,