postgresai 0.14.0-dev.76 → 0.14.0-dev.78

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,6 +20,7 @@ import { maskSecret } from "../lib/util";
20
20
  import { createInterface } from "readline";
21
21
  import * as childProcess from "child_process";
22
22
  import { REPORT_GENERATORS, CHECK_INFO, generateAllReports } from "../lib/checkup";
23
+ import { getAllCheckupEntries, getCheckupEntry, isValidCheckupCode } from "../lib/checkup-dictionary";
23
24
  import { createCheckupReport, uploadCheckupReportJson, RpcError, formatRpcErrorForDisplay, withRetry } from "../lib/checkup-api";
24
25
 
25
26
  // Singleton readline interface for stdin prompts
@@ -1641,10 +1642,18 @@ program
1641
1642
  }
1642
1643
  });
1643
1644
 
1645
+ // Build help text showing all checks from dictionary with express-mode indicators
1646
+ const expressCheckIds = new Set(Object.keys(REPORT_GENERATORS));
1647
+ const allChecks = getAllCheckupEntries();
1648
+ const checkHelpLines = allChecks.map((entry) => {
1649
+ const isExpress = expressCheckIds.has(entry.code);
1650
+ return ` ${entry.code}: ${entry.title}${isExpress ? "" : " (*)"}`;
1651
+ });
1652
+
1644
1653
  program
1645
1654
  .command("checkup [conn]")
1646
1655
  .description("generate health check reports directly from PostgreSQL (express mode)")
1647
- .option("--check-id <id>", `specific check to run: ${Object.keys(CHECK_INFO).join(", ")}, or ALL`, "ALL")
1656
+ .option("--check-id <id>", `specific check to run (see list below), or ALL`, "ALL")
1648
1657
  .option("--node-name <name>", "node name for reports", "node-01")
1649
1658
  .option("--output <path>", "output directory for JSON files")
1650
1659
  .option("--[no-]upload", "upload JSON results to PostgresAI (default: enabled; requires API key)", undefined)
@@ -1658,7 +1667,10 @@ program
1658
1667
  [
1659
1668
  "",
1660
1669
  "Available checks:",
1661
- ...Object.entries(CHECK_INFO).map(([id, title]) => ` ${id}: ${title}`),
1670
+ ...checkHelpLines,
1671
+ "",
1672
+ " (*) = not yet available in express mode (coming soon)",
1673
+ " ALL = run all express-mode checks",
1662
1674
  "",
1663
1675
  "Examples:",
1664
1676
  " postgresai checkup postgresql://user:pass@host:5432/db",
@@ -1725,8 +1737,16 @@ program
1725
1737
  const generator = REPORT_GENERATORS[checkId];
1726
1738
  if (!generator) {
1727
1739
  spinner.stop();
1728
- console.error(`Unknown check ID: ${opts.checkId}`);
1729
- console.error(`Available: ${Object.keys(CHECK_INFO).join(", ")}, ALL`);
1740
+ // Check if it's a valid check ID from the dictionary (just not implemented in express mode)
1741
+ const dictEntry = getCheckupEntry(checkId);
1742
+ if (dictEntry) {
1743
+ console.error(`Check ${checkId} (${dictEntry.title}) is not yet available in express mode.`);
1744
+ console.error(`Express-mode checks: ${Object.keys(CHECK_INFO).join(", ")}`);
1745
+ console.error(`\nFull checkup reports are available at: https://postgres.ai/checkup`);
1746
+ } else {
1747
+ console.error(`Unknown check ID: ${opts.checkId}`);
1748
+ console.error(`See 'postgresai checkup --help' for available checks.`);
1749
+ }
1730
1750
  process.exitCode = 1;
1731
1751
  return;
1732
1752
  }
@@ -13064,7 +13064,7 @@ var {
13064
13064
  // package.json
13065
13065
  var package_default = {
13066
13066
  name: "postgresai",
13067
- version: "0.14.0-dev.76",
13067
+ version: "0.14.0-dev.78",
13068
13068
  description: "postgres_ai CLI",
13069
13069
  license: "Apache-2.0",
13070
13070
  private: false,
@@ -13090,15 +13090,17 @@ var package_default = {
13090
13090
  },
13091
13091
  scripts: {
13092
13092
  "embed-metrics": "bun run scripts/embed-metrics.ts",
13093
- build: `bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))" && cp -r ./sql ./dist/sql`,
13093
+ "embed-checkup-dictionary": "bun run scripts/embed-checkup-dictionary.ts",
13094
+ "embed-all": "bun run embed-metrics && bun run embed-checkup-dictionary",
13095
+ build: `bun run embed-all && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))" && cp -r ./sql ./dist/sql`,
13094
13096
  prepublishOnly: "npm run build",
13095
13097
  start: "bun ./bin/postgres-ai.ts --help",
13096
13098
  "start:node": "node ./dist/bin/postgres-ai.js --help",
13097
- dev: "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
13098
- test: "bun run embed-metrics && bun test",
13099
- "test:fast": "bun run embed-metrics && bun test --coverage=false",
13100
- "test:coverage": "bun run embed-metrics && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
13101
- typecheck: "bun run embed-metrics && bunx tsc --noEmit"
13099
+ dev: "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
13100
+ test: "bun run embed-all && bun test",
13101
+ "test:fast": "bun run embed-all && bun test",
13102
+ "test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
13103
+ typecheck: "bun run embed-all && bunx tsc --noEmit"
13102
13104
  },
13103
13105
  dependencies: {
13104
13106
  "@modelcontextprotocol/sdk": "^1.20.2",
@@ -15887,7 +15889,7 @@ var Result = import_lib.default.Result;
15887
15889
  var TypeOverrides = import_lib.default.TypeOverrides;
15888
15890
  var defaults = import_lib.default.defaults;
15889
15891
  // package.json
15890
- var version = "0.14.0-dev.76";
15892
+ var version = "0.14.0-dev.78";
15891
15893
  var package_default2 = {
15892
15894
  name: "postgresai",
15893
15895
  version,
@@ -15916,15 +15918,17 @@ var package_default2 = {
15916
15918
  },
15917
15919
  scripts: {
15918
15920
  "embed-metrics": "bun run scripts/embed-metrics.ts",
15919
- build: `bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))" && cp -r ./sql ./dist/sql`,
15921
+ "embed-checkup-dictionary": "bun run scripts/embed-checkup-dictionary.ts",
15922
+ "embed-all": "bun run embed-metrics && bun run embed-checkup-dictionary",
15923
+ build: `bun run embed-all && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))" && cp -r ./sql ./dist/sql`,
15920
15924
  prepublishOnly: "npm run build",
15921
15925
  start: "bun ./bin/postgres-ai.ts --help",
15922
15926
  "start:node": "node ./dist/bin/postgres-ai.js --help",
15923
- dev: "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
15924
- test: "bun run embed-metrics && bun test",
15925
- "test:fast": "bun run embed-metrics && bun test --coverage=false",
15926
- "test:coverage": "bun run embed-metrics && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
15927
- typecheck: "bun run embed-metrics && bunx tsc --noEmit"
15927
+ dev: "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
15928
+ test: "bun run embed-all && bun test",
15929
+ "test:fast": "bun run embed-all && bun test",
15930
+ "test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
15931
+ typecheck: "bun run embed-all && bunx tsc --noEmit"
15928
15932
  },
15929
15933
  dependencies: {
15930
15934
  "@modelcontextprotocol/sdk": "^1.20.2",
@@ -26542,6 +26546,572 @@ function transformMetricRow(row) {
26542
26546
  return result;
26543
26547
  }
26544
26548
 
26549
+ // lib/checkup-dictionary-embedded.ts
26550
+ var CHECKUP_DICTIONARY_DATA = [
26551
+ {
26552
+ code: "A001",
26553
+ title: "System information",
26554
+ description: "OS, kernel, hardware details",
26555
+ category: "system",
26556
+ sort_order: null,
26557
+ is_system_report: false
26558
+ },
26559
+ {
26560
+ code: "A002",
26561
+ title: "Postgres major version",
26562
+ description: "Major version and end-of-life status",
26563
+ category: "system",
26564
+ sort_order: null,
26565
+ is_system_report: false
26566
+ },
26567
+ {
26568
+ code: "A003",
26569
+ title: "Postgres settings",
26570
+ description: "Full list of current settings",
26571
+ category: "system",
26572
+ sort_order: null,
26573
+ is_system_report: false
26574
+ },
26575
+ {
26576
+ code: "A004",
26577
+ title: "Cluster information",
26578
+ description: "Data directory, cluster name, system identifier",
26579
+ category: "system",
26580
+ sort_order: null,
26581
+ is_system_report: false
26582
+ },
26583
+ {
26584
+ code: "A005",
26585
+ title: "Extensions",
26586
+ description: "Installed and available extensions",
26587
+ category: "system",
26588
+ sort_order: null,
26589
+ is_system_report: false
26590
+ },
26591
+ {
26592
+ code: "A006",
26593
+ title: "Postgres setting deviations",
26594
+ description: "Settings that differ from defaults",
26595
+ category: "system",
26596
+ sort_order: null,
26597
+ is_system_report: false
26598
+ },
26599
+ {
26600
+ code: "A007",
26601
+ title: "Altered settings",
26602
+ description: "Settings changed via ALTER SYSTEM or per-user/database",
26603
+ category: "system",
26604
+ sort_order: null,
26605
+ is_system_report: false
26606
+ },
26607
+ {
26608
+ code: "A008",
26609
+ title: "Disk usage and file system type",
26610
+ description: "Disk space, mount points, filesystem types",
26611
+ category: "system",
26612
+ sort_order: null,
26613
+ is_system_report: false
26614
+ },
26615
+ {
26616
+ code: "A009",
26617
+ title: "Tablespaces",
26618
+ description: "Tablespace locations and usage",
26619
+ category: "system",
26620
+ sort_order: null,
26621
+ is_system_report: false
26622
+ },
26623
+ {
26624
+ code: "A010",
26625
+ title: "Corruption control",
26626
+ description: "Checksums, wal_log_hints, backup safety",
26627
+ category: "system",
26628
+ sort_order: null,
26629
+ is_system_report: false
26630
+ },
26631
+ {
26632
+ code: "A011",
26633
+ title: "Connection management and pooling",
26634
+ description: "Connection limits and pooler detection",
26635
+ category: "system",
26636
+ sort_order: null,
26637
+ is_system_report: false
26638
+ },
26639
+ {
26640
+ code: "A012",
26641
+ title: "Anti-crash checks",
26642
+ description: "fsync, full_page_writes, synchronous_commit",
26643
+ category: "system",
26644
+ sort_order: null,
26645
+ is_system_report: false
26646
+ },
26647
+ {
26648
+ code: "A013",
26649
+ title: "Postgres minor version",
26650
+ description: "Minor version and available updates",
26651
+ category: "system",
26652
+ sort_order: null,
26653
+ is_system_report: false
26654
+ },
26655
+ {
26656
+ code: "B001",
26657
+ title: "SLO/SLA, RPO, RTO",
26658
+ description: "Recovery objectives and current capabilities",
26659
+ category: "backup",
26660
+ sort_order: null,
26661
+ is_system_report: false
26662
+ },
26663
+ {
26664
+ code: "B002",
26665
+ title: "File system and mount flags",
26666
+ description: "Mount options affecting durability",
26667
+ category: "backup",
26668
+ sort_order: null,
26669
+ is_system_report: false
26670
+ },
26671
+ {
26672
+ code: "B003",
26673
+ title: "Full and incremental backups",
26674
+ description: "Backup tooling and schedule",
26675
+ category: "backup",
26676
+ sort_order: null,
26677
+ is_system_report: false
26678
+ },
26679
+ {
26680
+ code: "B004",
26681
+ title: "WAL archiving",
26682
+ description: "Archive configuration and status",
26683
+ category: "backup",
26684
+ sort_order: null,
26685
+ is_system_report: false
26686
+ },
26687
+ {
26688
+ code: "B005",
26689
+ title: "Backup restore testing and monitoring",
26690
+ description: "Restore testing practices and alerting",
26691
+ category: "backup",
26692
+ sort_order: null,
26693
+ is_system_report: false
26694
+ },
26695
+ {
26696
+ code: "C001",
26697
+ title: "SLO/SLA",
26698
+ description: "Availability objectives",
26699
+ category: "replication",
26700
+ sort_order: null,
26701
+ is_system_report: false
26702
+ },
26703
+ {
26704
+ code: "C002",
26705
+ title: "Replication mode and method",
26706
+ description: "Sync/async, streaming/WAL shipping",
26707
+ category: "replication",
26708
+ sort_order: null,
26709
+ is_system_report: false
26710
+ },
26711
+ {
26712
+ code: "C003",
26713
+ title: "Single points of failure",
26714
+ description: "SPOF analysis",
26715
+ category: "replication",
26716
+ sort_order: null,
26717
+ is_system_report: false
26718
+ },
26719
+ {
26720
+ code: "C004",
26721
+ title: "Failover",
26722
+ description: "Failover mechanism and automation",
26723
+ category: "replication",
26724
+ sort_order: null,
26725
+ is_system_report: false
26726
+ },
26727
+ {
26728
+ code: "C005",
26729
+ title: "Switchover",
26730
+ description: "Planned switchover procedures",
26731
+ category: "replication",
26732
+ sort_order: null,
26733
+ is_system_report: false
26734
+ },
26735
+ {
26736
+ code: "C006",
26737
+ title: "Delayed replica",
26738
+ description: "Delayed standby configuration",
26739
+ category: "replication",
26740
+ sort_order: null,
26741
+ is_system_report: false
26742
+ },
26743
+ {
26744
+ code: "C007",
26745
+ title: "Replication slots and lag",
26746
+ description: "Slot status, lag, standby feedback",
26747
+ category: "replication",
26748
+ sort_order: null,
26749
+ is_system_report: false
26750
+ },
26751
+ {
26752
+ code: "D001",
26753
+ title: "Logging settings",
26754
+ description: "Log destination, verbosity, rotation",
26755
+ category: "monitoring",
26756
+ sort_order: null,
26757
+ is_system_report: false
26758
+ },
26759
+ {
26760
+ code: "D002",
26761
+ title: "Useful Linux tools",
26762
+ description: "Recommended OS-level tooling",
26763
+ category: "monitoring",
26764
+ sort_order: null,
26765
+ is_system_report: false
26766
+ },
26767
+ {
26768
+ code: "D003",
26769
+ title: "Monitoring metrics",
26770
+ description: "Key metrics to track",
26771
+ category: "monitoring",
26772
+ sort_order: null,
26773
+ is_system_report: false
26774
+ },
26775
+ {
26776
+ code: "D004",
26777
+ title: "pg_stat_statements and pg_stat_kcache",
26778
+ description: "Query stats extension settings",
26779
+ category: "monitoring",
26780
+ sort_order: null,
26781
+ is_system_report: false
26782
+ },
26783
+ {
26784
+ code: "D005",
26785
+ title: "track_io_timing and auto_explain",
26786
+ description: "I/O timing and plan logging",
26787
+ category: "monitoring",
26788
+ sort_order: null,
26789
+ is_system_report: false
26790
+ },
26791
+ {
26792
+ code: "D006",
26793
+ title: "Recommended DBA toolsets",
26794
+ description: "Useful third-party tools",
26795
+ category: "monitoring",
26796
+ sort_order: null,
26797
+ is_system_report: false
26798
+ },
26799
+ {
26800
+ code: "D007",
26801
+ title: "Postgres troubleshooting tools",
26802
+ description: "Built-in and ecosystem diagnostic tools",
26803
+ category: "monitoring",
26804
+ sort_order: null,
26805
+ is_system_report: false
26806
+ },
26807
+ {
26808
+ code: "E001",
26809
+ title: "WAL and checkpoint settings",
26810
+ description: "WAL size, checkpoint timing, I/O limits",
26811
+ category: "wal",
26812
+ sort_order: null,
26813
+ is_system_report: false
26814
+ },
26815
+ {
26816
+ code: "E002",
26817
+ title: "Checkpoint and bgwriter activity",
26818
+ description: "Checkpoint frequency, bgwriter stats",
26819
+ category: "wal",
26820
+ sort_order: null,
26821
+ is_system_report: false
26822
+ },
26823
+ {
26824
+ code: "F001",
26825
+ title: "Autovacuum: current settings",
26826
+ description: "Autovacuum configuration",
26827
+ category: "vacuum",
26828
+ sort_order: null,
26829
+ is_system_report: false
26830
+ },
26831
+ {
26832
+ code: "F002",
26833
+ title: "Autovacuum: transaction ID wraparound",
26834
+ description: "XID age and wraparound risk",
26835
+ category: "vacuum",
26836
+ sort_order: null,
26837
+ is_system_report: false
26838
+ },
26839
+ {
26840
+ code: "F003",
26841
+ title: "Autovacuum: dead tuples",
26842
+ description: "Dead tuple counts and cleanup status",
26843
+ category: "vacuum",
26844
+ sort_order: null,
26845
+ is_system_report: false
26846
+ },
26847
+ {
26848
+ code: "F004",
26849
+ title: "Autovacuum: heap bloat estimate",
26850
+ description: "Estimated table bloat",
26851
+ category: "vacuum",
26852
+ sort_order: null,
26853
+ is_system_report: false
26854
+ },
26855
+ {
26856
+ code: "F005",
26857
+ title: "Autovacuum: index bloat estimate",
26858
+ description: "Estimated index bloat",
26859
+ category: "vacuum",
26860
+ sort_order: null,
26861
+ is_system_report: false
26862
+ },
26863
+ {
26864
+ code: "F006",
26865
+ title: "Precise heap bloat analysis",
26866
+ description: "Detailed table bloat measurement",
26867
+ category: "vacuum",
26868
+ sort_order: null,
26869
+ is_system_report: false
26870
+ },
26871
+ {
26872
+ code: "F007",
26873
+ title: "Precise index bloat analysis",
26874
+ description: "Detailed index bloat measurement",
26875
+ category: "vacuum",
26876
+ sort_order: null,
26877
+ is_system_report: false
26878
+ },
26879
+ {
26880
+ code: "F008",
26881
+ title: "Autovacuum: resource usage",
26882
+ description: "Autovacuum I/O and CPU impact",
26883
+ category: "vacuum",
26884
+ sort_order: null,
26885
+ is_system_report: false
26886
+ },
26887
+ {
26888
+ code: "G001",
26889
+ title: "Memory-related settings",
26890
+ description: "shared_buffers, work_mem, maintenance_work_mem",
26891
+ category: "tuning",
26892
+ sort_order: null,
26893
+ is_system_report: false
26894
+ },
26895
+ {
26896
+ code: "G002",
26897
+ title: "Connections and current activity",
26898
+ description: "Connection usage and active sessions",
26899
+ category: "tuning",
26900
+ sort_order: null,
26901
+ is_system_report: false
26902
+ },
26903
+ {
26904
+ code: "G003",
26905
+ title: "Timeouts, locks, deadlocks",
26906
+ description: "Lock settings and deadlock stats",
26907
+ category: "tuning",
26908
+ sort_order: null,
26909
+ is_system_report: false
26910
+ },
26911
+ {
26912
+ code: "G004",
26913
+ title: "Query planner settings",
26914
+ description: "Planner cost and behavior settings",
26915
+ category: "tuning",
26916
+ sort_order: null,
26917
+ is_system_report: false
26918
+ },
26919
+ {
26920
+ code: "G005",
26921
+ title: "I/O settings",
26922
+ description: "effective_io_concurrency, random_page_cost",
26923
+ category: "tuning",
26924
+ sort_order: null,
26925
+ is_system_report: false
26926
+ },
26927
+ {
26928
+ code: "G006",
26929
+ title: "Statistics target settings",
26930
+ description: "default_statistics_target and per-column targets",
26931
+ category: "tuning",
26932
+ sort_order: null,
26933
+ is_system_report: false
26934
+ },
26935
+ {
26936
+ code: "H001",
26937
+ title: "Invalid indexes",
26938
+ description: "Indexes that failed to build",
26939
+ category: "indexes",
26940
+ sort_order: null,
26941
+ is_system_report: false
26942
+ },
26943
+ {
26944
+ code: "H002",
26945
+ title: "Unused indexes",
26946
+ description: "Indexes with no scans",
26947
+ category: "indexes",
26948
+ sort_order: null,
26949
+ is_system_report: false
26950
+ },
26951
+ {
26952
+ code: "H003",
26953
+ title: "Non-indexed foreign keys",
26954
+ description: "FKs missing supporting indexes",
26955
+ category: "indexes",
26956
+ sort_order: null,
26957
+ is_system_report: false
26958
+ },
26959
+ {
26960
+ code: "H004",
26961
+ title: "Redundant indexes",
26962
+ description: "Indexes covered by other indexes",
26963
+ category: "indexes",
26964
+ sort_order: null,
26965
+ is_system_report: false
26966
+ },
26967
+ {
26968
+ code: "J001",
26969
+ title: "Capacity planning",
26970
+ description: "Growth trends and resource projections",
26971
+ category: "capacity",
26972
+ sort_order: null,
26973
+ is_system_report: false
26974
+ },
26975
+ {
26976
+ code: "K001",
26977
+ title: "Globally aggregated query metrics",
26978
+ description: "Overall query stats summary",
26979
+ category: "queries",
26980
+ sort_order: null,
26981
+ is_system_report: false
26982
+ },
26983
+ {
26984
+ code: "K002",
26985
+ title: "Workload type",
26986
+ description: "Read/write/mixed workload classification",
26987
+ category: "queries",
26988
+ sort_order: null,
26989
+ is_system_report: false
26990
+ },
26991
+ {
26992
+ code: "K003",
26993
+ title: "Top queries by total time",
26994
+ description: "Highest total execution + planning time",
26995
+ category: "queries",
26996
+ sort_order: null,
26997
+ is_system_report: false
26998
+ },
26999
+ {
27000
+ code: "K004",
27001
+ title: "Top queries by temp bytes written",
27002
+ description: "Queries spilling to disk",
27003
+ category: "queries",
27004
+ sort_order: null,
27005
+ is_system_report: false
27006
+ },
27007
+ {
27008
+ code: "K005",
27009
+ title: "Top queries by WAL generation",
27010
+ description: "Queries generating most WAL",
27011
+ category: "queries",
27012
+ sort_order: null,
27013
+ is_system_report: false
27014
+ },
27015
+ {
27016
+ code: "K006",
27017
+ title: "Top queries by shared blocks read",
27018
+ description: "Queries with most disk reads",
27019
+ category: "queries",
27020
+ sort_order: null,
27021
+ is_system_report: false
27022
+ },
27023
+ {
27024
+ code: "K007",
27025
+ title: "Top queries by shared blocks hit",
27026
+ description: "Queries with most buffer hits",
27027
+ category: "queries",
27028
+ sort_order: null,
27029
+ is_system_report: false
27030
+ },
27031
+ {
27032
+ code: "K008",
27033
+ title: "Top queries by shared blocks accessed",
27034
+ description: "Queries touching most data",
27035
+ category: "queries",
27036
+ sort_order: null,
27037
+ is_system_report: false
27038
+ },
27039
+ {
27040
+ code: "L001",
27041
+ title: "Table sizes",
27042
+ description: "Table and toast sizes",
27043
+ category: "schema",
27044
+ sort_order: null,
27045
+ is_system_report: false
27046
+ },
27047
+ {
27048
+ code: "L002",
27049
+ title: "Data types being used",
27050
+ description: "Column type distribution",
27051
+ category: "schema",
27052
+ sort_order: null,
27053
+ is_system_report: false
27054
+ },
27055
+ {
27056
+ code: "L003",
27057
+ title: "Integer overflow risks in primary keys",
27058
+ description: "PKs approaching integer limits",
27059
+ category: "schema",
27060
+ sort_order: null,
27061
+ is_system_report: false
27062
+ },
27063
+ {
27064
+ code: "L004",
27065
+ title: "Tables without primary key",
27066
+ description: "Tables missing PK or unique constraint",
27067
+ category: "schema",
27068
+ sort_order: null,
27069
+ is_system_report: false
27070
+ },
27071
+ {
27072
+ code: "M001",
27073
+ title: "Top queries by mean execution time",
27074
+ description: "Slowest queries on average",
27075
+ category: "queries",
27076
+ sort_order: null,
27077
+ is_system_report: false
27078
+ },
27079
+ {
27080
+ code: "M002",
27081
+ title: "Top queries by rows processed",
27082
+ description: "Queries touching most rows",
27083
+ category: "queries",
27084
+ sort_order: null,
27085
+ is_system_report: false
27086
+ },
27087
+ {
27088
+ code: "M003",
27089
+ title: "Top queries by I/O time",
27090
+ description: "Queries with highest I/O wait",
27091
+ category: "queries",
27092
+ sort_order: null,
27093
+ is_system_report: false
27094
+ },
27095
+ {
27096
+ code: "N001",
27097
+ title: "Wait events by type and query",
27098
+ description: "Wait event analysis",
27099
+ category: "waits",
27100
+ sort_order: null,
27101
+ is_system_report: false
27102
+ }
27103
+ ];
27104
+
27105
+ // lib/checkup-dictionary.ts
27106
+ var dictionaryByCode = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry]));
27107
+ function buildCheckInfoMap() {
27108
+ const result = {};
27109
+ for (const entry of CHECKUP_DICTIONARY_DATA) {
27110
+ result[entry.code] = entry.title;
27111
+ }
27112
+ return result;
27113
+ }
27114
+
26545
27115
  // lib/checkup.ts
26546
27116
  var __dirname = "/builds/postgres-ai/postgres_ai/cli/lib";
26547
27117
  var SECONDS_PER_DAY = 86400;
@@ -27298,19 +27868,15 @@ var REPORT_GENERATORS = {
27298
27868
  H002: generateH002,
27299
27869
  H004: generateH004
27300
27870
  };
27301
- var CHECK_INFO = {
27302
- A002: "Postgres major version",
27303
- A003: "Postgres settings",
27304
- A004: "Cluster information",
27305
- A007: "Altered settings",
27306
- A013: "Postgres minor version",
27307
- D004: "pg_stat_statements and pg_stat_kcache settings",
27308
- F001: "Autovacuum: current settings",
27309
- G001: "Memory-related settings",
27310
- H001: "Invalid indexes",
27311
- H002: "Unused indexes",
27312
- H004: "Redundant indexes"
27313
- };
27871
+ var CHECK_INFO = (() => {
27872
+ const fullMap = buildCheckInfoMap();
27873
+ const expressCheckIds = Object.keys(REPORT_GENERATORS);
27874
+ const filtered = {};
27875
+ for (const checkId of expressCheckIds) {
27876
+ filtered[checkId] = fullMap[checkId] || checkId;
27877
+ }
27878
+ return filtered;
27879
+ })();
27314
27880
  async function generateAllReports(client, nodeName = "node-01", onProgress) {
27315
27881
  const reports = {};
27316
27882
  const entries = Object.entries(REPORT_GENERATORS);
@@ -27329,6 +27895,15 @@ async function generateAllReports(client, nodeName = "node-01", onProgress) {
27329
27895
  return reports;
27330
27896
  }
27331
27897
 
27898
+ // lib/checkup-dictionary.ts
27899
+ var dictionaryByCode2 = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry]));
27900
+ function getAllCheckupEntries() {
27901
+ return CHECKUP_DICTIONARY_DATA;
27902
+ }
27903
+ function getCheckupEntry(code) {
27904
+ return dictionaryByCode2.get(code.toUpperCase()) ?? null;
27905
+ }
27906
+
27332
27907
  // lib/checkup-api.ts
27333
27908
  import * as https from "https";
27334
27909
  import { URL as URL3 } from "url";
@@ -28835,10 +29410,19 @@ program2.command("unprepare-db [conn]").description("remove monitoring setup: dr
28835
29410
  closeReadline();
28836
29411
  }
28837
29412
  });
28838
- program2.command("checkup [conn]").description("generate health check reports directly from PostgreSQL (express mode)").option("--check-id <id>", `specific check to run: ${Object.keys(CHECK_INFO).join(", ")}, or ALL`, "ALL").option("--node-name <name>", "node name for reports", "node-01").option("--output <path>", "output directory for JSON files").option("--[no-]upload", "upload JSON results to PostgresAI (default: enabled; requires API key)", undefined).option("--project <project>", "project name or ID for remote upload (used with --upload; defaults to config defaultProject; auto-generated on first run)").option("--json", "output JSON to stdout (implies --no-upload)").addHelpText("after", [
29413
+ var expressCheckIds = new Set(Object.keys(REPORT_GENERATORS));
29414
+ var allChecks = getAllCheckupEntries();
29415
+ var checkHelpLines = allChecks.map((entry) => {
29416
+ const isExpress = expressCheckIds.has(entry.code);
29417
+ return ` ${entry.code}: ${entry.title}${isExpress ? "" : " (*)"}`;
29418
+ });
29419
+ program2.command("checkup [conn]").description("generate health check reports directly from PostgreSQL (express mode)").option("--check-id <id>", `specific check to run (see list below), or ALL`, "ALL").option("--node-name <name>", "node name for reports", "node-01").option("--output <path>", "output directory for JSON files").option("--[no-]upload", "upload JSON results to PostgresAI (default: enabled; requires API key)", undefined).option("--project <project>", "project name or ID for remote upload (used with --upload; defaults to config defaultProject; auto-generated on first run)").option("--json", "output JSON to stdout (implies --no-upload)").addHelpText("after", [
28839
29420
  "",
28840
29421
  "Available checks:",
28841
- ...Object.entries(CHECK_INFO).map(([id, title]) => ` ${id}: ${title}`),
29422
+ ...checkHelpLines,
29423
+ "",
29424
+ " (*) = not yet available in express mode (coming soon)",
29425
+ " ALL = run all express-mode checks",
28842
29426
  "",
28843
29427
  "Examples:",
28844
29428
  " postgresai checkup postgresql://user:pass@host:5432/db",
@@ -28894,8 +29478,16 @@ program2.command("checkup [conn]").description("generate health check reports di
28894
29478
  const generator = REPORT_GENERATORS[checkId];
28895
29479
  if (!generator) {
28896
29480
  spinner.stop();
28897
- console.error(`Unknown check ID: ${opts.checkId}`);
28898
- console.error(`Available: ${Object.keys(CHECK_INFO).join(", ")}, ALL`);
29481
+ const dictEntry = getCheckupEntry(checkId);
29482
+ if (dictEntry) {
29483
+ console.error(`Check ${checkId} (${dictEntry.title}) is not yet available in express mode.`);
29484
+ console.error(`Express-mode checks: ${Object.keys(CHECK_INFO).join(", ")}`);
29485
+ console.error(`
29486
+ Full checkup reports are available at: https://postgres.ai/checkup`);
29487
+ } else {
29488
+ console.error(`Unknown check ID: ${opts.checkId}`);
29489
+ console.error(`See 'postgresai checkup --help' for available checks.`);
29490
+ }
28899
29491
  process.exitCode = 1;
28900
29492
  return;
28901
29493
  }
@@ -0,0 +1,113 @@
1
+ /**
2
+ * Checkup Dictionary Module
3
+ * =========================
4
+ * Provides access to the checkup report dictionary data embedded at build time.
5
+ *
6
+ * The dictionary is fetched from https://postgres.ai/api/general/checkup_dictionary
7
+ * during the build process and embedded into checkup-dictionary-embedded.ts.
8
+ *
9
+ * This ensures no API calls are made at runtime while keeping the data up-to-date.
10
+ */
11
+
12
+ import { CHECKUP_DICTIONARY_DATA } from "./checkup-dictionary-embedded";
13
+
14
+ /**
15
+ * A checkup dictionary entry describing a single check type.
16
+ */
17
+ export interface CheckupDictionaryEntry {
18
+ /** Unique check code (e.g., "A001", "H002") */
19
+ code: string;
20
+ /** Human-readable title for the check */
21
+ title: string;
22
+ /** Brief description of what the check covers */
23
+ description: string;
24
+ /** Category grouping (e.g., "system", "indexes", "vacuum") */
25
+ category: string;
26
+ /** Optional sort order within category */
27
+ sort_order: number | null;
28
+ /** Whether this is a system-level report */
29
+ is_system_report: boolean;
30
+ }
31
+
32
+ /**
33
+ * Module-level cache for O(1) lookups by code.
34
+ * Initialized at module load time from embedded data.
35
+ */
36
+ const dictionaryByCode: Map<string, CheckupDictionaryEntry> = new Map(
37
+ CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry])
38
+ );
39
+
40
+ /**
41
+ * Get all checkup dictionary entries.
42
+ *
43
+ * @returns Array of all checkup dictionary entries
44
+ */
45
+ export function getAllCheckupEntries(): CheckupDictionaryEntry[] {
46
+ return CHECKUP_DICTIONARY_DATA;
47
+ }
48
+
49
+ /**
50
+ * Get a checkup dictionary entry by its code.
51
+ *
52
+ * @param code - The check code (e.g., "A001", "H002")
53
+ * @returns The dictionary entry or null if not found
54
+ */
55
+ export function getCheckupEntry(code: string): CheckupDictionaryEntry | null {
56
+ return dictionaryByCode.get(code.toUpperCase()) ?? null;
57
+ }
58
+
59
+ /**
60
+ * Get the title for a checkup code.
61
+ *
62
+ * @param code - The check code (e.g., "A001", "H002")
63
+ * @returns The title or the code itself if not found
64
+ */
65
+ export function getCheckupTitle(code: string): string {
66
+ const entry = getCheckupEntry(code);
67
+ return entry?.title ?? code;
68
+ }
69
+
70
+ /**
71
+ * Check if a code exists in the dictionary.
72
+ *
73
+ * @param code - The check code to validate
74
+ * @returns True if the code exists in the dictionary
75
+ */
76
+ export function isValidCheckupCode(code: string): boolean {
77
+ return dictionaryByCode.has(code.toUpperCase());
78
+ }
79
+
80
+ /**
81
+ * Get all check codes as an array.
82
+ *
83
+ * @returns Array of all check codes (e.g., ["A001", "A002", ...])
84
+ */
85
+ export function getAllCheckupCodes(): string[] {
86
+ return CHECKUP_DICTIONARY_DATA.map((entry) => entry.code);
87
+ }
88
+
89
+ /**
90
+ * Get checkup entries filtered by category.
91
+ *
92
+ * @param category - The category to filter by (e.g., "indexes", "vacuum")
93
+ * @returns Array of entries in the specified category
94
+ */
95
+ export function getCheckupEntriesByCategory(category: string): CheckupDictionaryEntry[] {
96
+ return CHECKUP_DICTIONARY_DATA.filter(
97
+ (entry) => entry.category.toLowerCase() === category.toLowerCase()
98
+ );
99
+ }
100
+
101
+ /**
102
+ * Build a code-to-title mapping object.
103
+ * Useful for backwards compatibility with CHECK_INFO style usage.
104
+ *
105
+ * @returns Object mapping check codes to titles (e.g., { "A001": "System information", ... })
106
+ */
107
+ export function buildCheckInfoMap(): Record<string, string> {
108
+ const result: Record<string, string> = {};
109
+ for (const entry of CHECKUP_DICTIONARY_DATA) {
110
+ result[entry.code] = entry.title;
111
+ }
112
+ return result;
113
+ }
package/lib/checkup.ts CHANGED
@@ -51,6 +51,7 @@ import * as fs from "fs";
51
51
  import * as path from "path";
52
52
  import * as pkg from "../package.json";
53
53
  import { getMetricSql, transformMetricRow, METRIC_NAMES } from "./metrics-loader";
54
+ import { getCheckupTitle, buildCheckInfoMap } from "./checkup-dictionary";
54
55
 
55
56
  // Time constants
56
57
  const SECONDS_PER_DAY = 86400;
@@ -1344,21 +1345,27 @@ export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string
1344
1345
  };
1345
1346
 
1346
1347
  /**
1347
- * Check IDs and titles
1348
+ * Check IDs and titles.
1349
+ *
1350
+ * This mapping is built from the embedded checkup dictionary, which is
1351
+ * fetched from https://postgres.ai/api/general/checkup_dictionary at build time.
1352
+ *
1353
+ * For the full dictionary (all available checks), use the checkup-dictionary module.
1354
+ * CHECK_INFO is filtered to only include checks that have express-mode generators.
1348
1355
  */
1349
- export const CHECK_INFO: Record<string, string> = {
1350
- A002: "Postgres major version",
1351
- A003: "Postgres settings",
1352
- A004: "Cluster information",
1353
- A007: "Altered settings",
1354
- A013: "Postgres minor version",
1355
- D004: "pg_stat_statements and pg_stat_kcache settings",
1356
- F001: "Autovacuum: current settings",
1357
- G001: "Memory-related settings",
1358
- H001: "Invalid indexes",
1359
- H002: "Unused indexes",
1360
- H004: "Redundant indexes",
1361
- };
1356
+ export const CHECK_INFO: Record<string, string> = (() => {
1357
+ // Build the full dictionary map
1358
+ const fullMap = buildCheckInfoMap();
1359
+
1360
+ // Filter to only include checks that have express-mode generators
1361
+ const expressCheckIds = Object.keys(REPORT_GENERATORS);
1362
+ const filtered: Record<string, string> = {};
1363
+ for (const checkId of expressCheckIds) {
1364
+ // Use dictionary title if available, otherwise use a fallback
1365
+ filtered[checkId] = fullMap[checkId] || checkId;
1366
+ }
1367
+ return filtered;
1368
+ })();
1362
1369
 
1363
1370
  /**
1364
1371
  * Generate all available health check reports.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "postgresai",
3
- "version": "0.14.0-dev.76",
3
+ "version": "0.14.0-dev.78",
4
4
  "description": "postgres_ai CLI",
5
5
  "license": "Apache-2.0",
6
6
  "private": false,
@@ -26,15 +26,17 @@
26
26
  },
27
27
  "scripts": {
28
28
  "embed-metrics": "bun run scripts/embed-metrics.ts",
29
- "build": "bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e \"const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))\" && cp -r ./sql ./dist/sql",
29
+ "embed-checkup-dictionary": "bun run scripts/embed-checkup-dictionary.ts",
30
+ "embed-all": "bun run embed-metrics && bun run embed-checkup-dictionary",
31
+ "build": "bun run embed-all && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e \"const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))\" && cp -r ./sql ./dist/sql",
30
32
  "prepublishOnly": "npm run build",
31
33
  "start": "bun ./bin/postgres-ai.ts --help",
32
34
  "start:node": "node ./dist/bin/postgres-ai.js --help",
33
- "dev": "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
34
- "test": "bun run embed-metrics && bun test",
35
- "test:fast": "bun run embed-metrics && bun test --coverage=false",
36
- "test:coverage": "bun run embed-metrics && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
37
- "typecheck": "bun run embed-metrics && bunx tsc --noEmit"
35
+ "dev": "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
36
+ "test": "bun run embed-all && bun test",
37
+ "test:fast": "bun run embed-all && bun test",
38
+ "test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
39
+ "typecheck": "bun run embed-all && bunx tsc --noEmit"
38
40
  },
39
41
  "dependencies": {
40
42
  "@modelcontextprotocol/sdk": "^1.20.2",
@@ -0,0 +1,106 @@
1
+ #!/usr/bin/env bun
2
+ /**
3
+ * Build script to fetch checkup dictionary from API and embed it.
4
+ *
5
+ * This script fetches from https://postgres.ai/api/general/checkup_dictionary
6
+ * and generates cli/lib/checkup-dictionary-embedded.ts with the data embedded.
7
+ *
8
+ * The generated file is NOT committed to git - it's regenerated at build time.
9
+ *
10
+ * Usage: bun run scripts/embed-checkup-dictionary.ts
11
+ */
12
+
13
+ import * as fs from "fs";
14
+ import * as path from "path";
15
+
16
+ // API endpoint - always available without auth
17
+ const DICTIONARY_URL = "https://postgres.ai/api/general/checkup_dictionary";
18
+
19
+ // Output path relative to cli/ directory
20
+ const CLI_DIR = path.resolve(__dirname, "..");
21
+ const OUTPUT_PATH = path.resolve(CLI_DIR, "lib/checkup-dictionary-embedded.ts");
22
+
23
+ // Request timeout (10 seconds)
24
+ const FETCH_TIMEOUT_MS = 10_000;
25
+
26
+ interface CheckupDictionaryEntry {
27
+ code: string;
28
+ title: string;
29
+ description: string;
30
+ category: string;
31
+ sort_order: number | null;
32
+ is_system_report: boolean;
33
+ }
34
+
35
+ function generateTypeScript(data: CheckupDictionaryEntry[], sourceUrl: string): string {
36
+ const lines: string[] = [
37
+ "// AUTO-GENERATED FILE - DO NOT EDIT",
38
+ `// Generated from: ${sourceUrl}`,
39
+ `// Generated at: ${new Date().toISOString()}`,
40
+ "// To regenerate: bun run embed-checkup-dictionary",
41
+ "",
42
+ 'import { CheckupDictionaryEntry } from "./checkup-dictionary";',
43
+ "",
44
+ "/**",
45
+ " * Embedded checkup dictionary data fetched from API at build time.",
46
+ " * Contains all available checkup report codes, titles, and metadata.",
47
+ " */",
48
+ `export const CHECKUP_DICTIONARY_DATA: CheckupDictionaryEntry[] = ${JSON.stringify(data, null, 2)};`,
49
+ "",
50
+ ];
51
+ return lines.join("\n");
52
+ }
53
+
54
+ async function fetchWithTimeout(url: string, timeoutMs: number): Promise<Response> {
55
+ const controller = new AbortController();
56
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
57
+
58
+ try {
59
+ const response = await fetch(url, { signal: controller.signal });
60
+ return response;
61
+ } finally {
62
+ clearTimeout(timeoutId);
63
+ }
64
+ }
65
+
66
+ async function main() {
67
+ console.log(`Fetching checkup dictionary from: ${DICTIONARY_URL}`);
68
+
69
+ try {
70
+ const response = await fetchWithTimeout(DICTIONARY_URL, FETCH_TIMEOUT_MS);
71
+
72
+ if (!response.ok) {
73
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
74
+ }
75
+
76
+ const data: CheckupDictionaryEntry[] = await response.json();
77
+
78
+ if (!Array.isArray(data)) {
79
+ throw new Error("Expected array response from API");
80
+ }
81
+
82
+ // Validate entries have required fields
83
+ for (const entry of data) {
84
+ if (!entry.code || !entry.title) {
85
+ throw new Error(`Invalid entry missing code or title: ${JSON.stringify(entry)}`);
86
+ }
87
+ }
88
+
89
+ const tsCode = generateTypeScript(data, DICTIONARY_URL);
90
+ fs.writeFileSync(OUTPUT_PATH, tsCode, "utf8");
91
+
92
+ console.log(`Generated: ${OUTPUT_PATH}`);
93
+ console.log(`Dictionary contains ${data.length} entries`);
94
+ } catch (err) {
95
+ const errorMsg = err instanceof Error ? err.message : String(err);
96
+ console.warn(`Warning: Failed to fetch checkup dictionary: ${errorMsg}`);
97
+ console.warn("Generating empty dictionary as fallback");
98
+
99
+ // Generate empty dictionary to allow build to proceed
100
+ const fallbackTs = generateTypeScript([], `N/A (fetch failed: ${errorMsg})`);
101
+ fs.writeFileSync(OUTPUT_PATH, fallbackTs, "utf8");
102
+ console.log(`Generated fallback dictionary at ${OUTPUT_PATH}`);
103
+ }
104
+ }
105
+
106
+ main();
@@ -85,28 +85,27 @@ describe("createBaseReport", () => {
85
85
 
86
86
  // Tests for CHECK_INFO
87
87
  describe("CHECK_INFO and REPORT_GENERATORS", () => {
88
- const expectedChecks: Record<string, string> = {
89
- A002: "Postgres major version",
90
- A003: "Postgres settings",
91
- A004: "Cluster information",
92
- A007: "Altered settings",
93
- A013: "Postgres minor version",
94
- D004: "pg_stat_statements and pg_stat_kcache settings",
95
- F001: "Autovacuum: current settings",
96
- G001: "Memory-related settings",
97
- H001: "Invalid indexes",
98
- H002: "Unused indexes",
99
- H004: "Redundant indexes",
100
- };
101
-
102
- test("CHECK_INFO contains all expected checks with correct descriptions", () => {
103
- for (const [checkId, description] of Object.entries(expectedChecks)) {
104
- expect(checkup.CHECK_INFO[checkId]).toBe(description);
88
+ // Express-mode checks that have generators
89
+ const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D004", "F001", "G001", "H001", "H002", "H004"];
90
+
91
+ test("CHECK_INFO contains all express-mode checks", () => {
92
+ for (const checkId of expressCheckIds) {
93
+ expect(checkup.CHECK_INFO[checkId]).toBeDefined();
94
+ expect(typeof checkup.CHECK_INFO[checkId]).toBe("string");
95
+ expect(checkup.CHECK_INFO[checkId].length).toBeGreaterThan(0);
105
96
  }
106
97
  });
107
98
 
99
+ test("CHECK_INFO titles are loaded from embedded dictionary", () => {
100
+ // Verify a few known titles match the API dictionary
101
+ // These are canonical titles from postgres.ai/api/general/checkup_dictionary
102
+ expect(checkup.CHECK_INFO["A002"]).toBe("Postgres major version");
103
+ expect(checkup.CHECK_INFO["H001"]).toBe("Invalid indexes");
104
+ expect(checkup.CHECK_INFO["H002"]).toBe("Unused indexes");
105
+ });
106
+
108
107
  test("REPORT_GENERATORS has function for each check", () => {
109
- for (const checkId of Object.keys(expectedChecks)) {
108
+ for (const checkId of expressCheckIds) {
110
109
  expect(typeof checkup.REPORT_GENERATORS[checkId]).toBe("function");
111
110
  }
112
111
  });
@@ -1,79 +0,0 @@
1
- // AUTO-GENERATED FILE - DO NOT EDIT
2
- // Generated from config/pgwatch-prometheus/metrics.yml by scripts/embed-metrics.ts
3
- // Generated at: 2026-01-13T04:02:15.672Z
4
-
5
- /**
6
- * Metric definition from metrics.yml
7
- */
8
- export interface MetricDefinition {
9
- description?: string;
10
- sqls: Record<number, string>; // PG major version -> SQL query
11
- gauges?: string[];
12
- statement_timeout_seconds?: number;
13
- }
14
-
15
- /**
16
- * Embedded metrics for express mode reports.
17
- * Only includes metrics required for CLI checkup reports.
18
- */
19
- export const METRICS: Record<string, MetricDefinition> = {
20
- "settings": {
21
- description: "This metric collects various PostgreSQL server settings and configurations. It provides insights into the server's configuration, including version, memory settings, and other important parameters. This metric is useful for monitoring server settings and ensuring optimal performance. Note: For lock_timeout and statement_timeout, we use reset_val instead of setting because pgwatch overrides these during metric collection, which would mask the actual configured values.",
22
- sqls: {
23
- 11: "with base as ( /* pgwatch_generated */\n select\n name,\n -- Use reset_val for lock_timeout/statement_timeout because pgwatch overrides them\n -- during collection (lock_timeout=100ms, statement_timeout per-metric).\n case\n when name in ('lock_timeout', 'statement_timeout') then reset_val\n else setting\n end as effective_setting,\n unit,\n category,\n vartype,\n -- For lock_timeout/statement_timeout, compare reset_val with boot_val\n -- since source becomes 'session' during collection.\n case\n when name in ('lock_timeout', 'statement_timeout') then (reset_val = boot_val)\n else (source = 'default')\n end as is_default_bool\n from pg_settings\n), with_numeric as (\n select\n *,\n case\n when effective_setting ~ '^-?[0-9]+$' then effective_setting::bigint\n else null\n end as numeric_value\n from base\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n name as tag_setting_name,\n effective_setting as tag_setting_value,\n unit as tag_unit,\n category as tag_category,\n vartype as tag_vartype,\n numeric_value,\n case\n when numeric_value is null then null\n when unit = '8kB' then numeric_value * 8192\n when unit = 'kB' then numeric_value * 1024\n when unit = 'MB' then numeric_value * 1024 * 1024\n when unit = 'B' then numeric_value\n when unit = 'ms' then numeric_value::numeric / 1000\n when unit = 's' then numeric_value::numeric\n when unit = 'min' then numeric_value::numeric * 60\n else null\n end as setting_normalized,\n case unit\n when '8kB' then 'bytes'\n when 'kB' then 'bytes'\n when 'MB' then 'bytes'\n when 'B' then 'bytes'\n when 'ms' then 'seconds'\n when 's' then 'seconds'\n when 'min' then 'seconds'\n else null\n end as unit_normalized,\n case when is_default_bool then 1 else 0 end as is_default,\n 1 as configured\nfrom with_numeric",
24
- },
25
- gauges: ["*"],
26
- statement_timeout_seconds: 15,
27
- },
28
- "db_stats": {
29
- description: "Retrieves key statistics from the PostgreSQL `pg_stat_database` view, providing insights into the current database's performance. It returns the number of backends, transaction commits and rollbacks, buffer reads and hits, tuple statistics, conflicts, temporary files and bytes, deadlocks, block read and write times, postmaster uptime, backup duration, recovery status, system identifier, and invalid indexes. This metric helps administrators monitor database activity and performance.",
30
- sqls: {
31
- 11: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
32
- 12: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
33
- 14: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n session_time::int8,\n active_time::int8,\n idle_in_transaction_time::int8,\n sessions,\n sessions_abandoned,\n sessions_fatal,\n sessions_killed,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
34
- 15: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n session_time::int8,\n active_time::int8,\n idle_in_transaction_time::int8,\n sessions,\n sessions_abandoned,\n sessions_fatal,\n sessions_killed,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
35
- },
36
- gauges: ["*"],
37
- statement_timeout_seconds: 15,
38
- },
39
- "db_size": {
40
- description: "Retrieves the size of the current database and the size of the `pg_catalog` schema, providing insights into the storage usage of the database. It returns the size in bytes for both the current database and the catalog schema. This metric helps administrators monitor database size and storage consumption.",
41
- sqls: {
42
- 11: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n pg_database_size(current_database()) as size_b,\n (select sum(pg_total_relation_size(c.oid))::int8\n from pg_class c join pg_namespace n on n.oid = c.relnamespace\n where nspname = 'pg_catalog' and relkind = 'r'\n ) as catalog_size_b",
43
- },
44
- gauges: ["*"],
45
- statement_timeout_seconds: 300,
46
- },
47
- "pg_invalid_indexes": {
48
- description: "This metric identifies invalid indexes in the database with decision tree data for remediation. It provides insights into whether to DROP (if duplicate exists), RECREATE (if backs constraint), or flag as UNCERTAIN (if additional RCA is needed to check query plans). Decision tree: 1) Valid duplicate exists -> DROP, 2) Backs PK/UNIQUE constraint -> RECREATE, 3) Table < 10K rows -> RECREATE (small tables rebuild quickly, typically under 1 second), 4) Otherwise -> UNCERTAIN (need query plan analysis to assess impact).",
49
- sqls: {
50
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n schemaname as schema_name,\n indexrelid,\n (indexrelid::regclass)::text as index_name,\n (relid::regclass)::text as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_stat_all_indexes\n join pg_index using (indexrelid)\n left join pg_constraint\n on array_to_string(indkey, ',') = array_to_string(conkey, ',')\n and schemaname = (connamespace::regnamespace)::text\n and conrelid = relid\n and contype = 'f'\n where idx_scan = 0\n and indisunique is false\n and conkey is not null\n),\n-- Find valid indexes that could be duplicates (same table, same columns)\nvalid_duplicates as (\n select\n inv.indexrelid as invalid_indexrelid,\n val.indexrelid as valid_indexrelid,\n (val.indexrelid::regclass)::text as valid_index_name,\n pg_get_indexdef(val.indexrelid) as valid_index_definition\n from pg_index inv\n join pg_index val on inv.indrelid = val.indrelid -- same table\n and inv.indkey = val.indkey -- same columns (in same order)\n and inv.indexrelid != val.indexrelid -- different index\n and val.indisvalid = true -- valid index\n where inv.indisvalid = false\n),\ndata as (\n select\n pci.relname as tag_index_name,\n pn.nspname as tag_schema_name,\n pct.relname as tag_table_name,\n coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,\n pg_get_indexdef(pidx.indexrelid) as index_definition,\n pg_relation_size(pidx.indexrelid) as index_size_bytes,\n -- Constraint info\n pidx.indisprimary as is_pk,\n pidx.indisunique as is_unique,\n con.conname as constraint_name,\n -- Table row estimate\n pct.reltuples::bigint as table_row_estimate,\n -- Valid duplicate check\n (vd.valid_indexrelid is not null) as has_valid_duplicate,\n vd.valid_index_name,\n vd.valid_index_definition,\n -- FK support check\n ((\n select count(1)\n from fk_indexes fi\n where fi.fk_table_ref = pct.relname\n and fi.opclasses like (array_to_string(pidx.indclass, ', ') || '%')\n ) > 0)::int as supports_fk\n from pg_index pidx\n join pg_class pci on pci.oid = pidx.indexrelid\n join pg_class pct on pct.oid = pidx.indrelid\n left join pg_namespace pn on pn.oid = pct.relnamespace\n left join pg_constraint con on con.conindid = pidx.indexrelid\n left join valid_duplicates vd on vd.invalid_indexrelid = pidx.indexrelid\n where pidx.indisvalid = false\n),\nnum_data as (\n select\n row_number() over () as num,\n data.*\n from data\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n num_data.*\nfrom num_data\nlimit 1000;\n",
51
- },
52
- gauges: ["*"],
53
- statement_timeout_seconds: 15,
54
- },
55
- "unused_indexes": {
56
- description: "This metric identifies unused indexes in the database. It provides insights into the number of unused indexes and their details. This metric helps administrators identify and fix unused indexes to improve database performance.",
57
- sqls: {
58
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n n.nspname as schema_name,\n ci.relname as index_name,\n cr.relname as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_constraint cn on cn.conrelid = cr.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n contype = 'f'\n and i.indisunique is false\n and conkey is not null\n and ci.relpages > 5\n and si.idx_scan < 10\n), table_scans as (\n select relid,\n tables.idx_scan + tables.seq_scan as all_scans,\n ( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,\n pg_relation_size(relid) as table_size\n from pg_stat_all_tables as tables\n join pg_class c on c.oid = relid\n where c.relpages > 5\n), indexes as (\n select\n i.indrelid,\n i.indexrelid,\n n.nspname as schema_name,\n cr.relname as table_name,\n ci.relname as index_name,\n si.idx_scan,\n pg_relation_size(i.indexrelid) as index_bytes,\n ci.relpages,\n (case when a.amname = 'btree' then true else false end) as idx_is_btree,\n array_to_string(i.indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_am a on ci.relam = a.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n i.indisunique = false\n and i.indisvalid = true\n and ci.relpages > 5\n), index_ratios as (\n select\n i.indexrelid as index_id,\n i.schema_name,\n i.table_name,\n i.index_name,\n idx_scan,\n all_scans,\n round(( case when all_scans = 0 then 0.0::numeric\n else idx_scan::numeric/all_scans * 100 end), 2) as index_scan_pct,\n writes,\n round((case when writes = 0 then idx_scan::numeric else idx_scan::numeric/writes end), 2)\n as scans_per_write,\n index_bytes as index_size_bytes,\n table_size as table_size_bytes,\n i.relpages,\n idx_is_btree,\n i.opclasses,\n (\n select count(1)\n from fk_indexes fi\n where fi.fk_table_ref = i.table_name\n and fi.schema_name = i.schema_name\n and fi.opclasses like (i.opclasses || '%')\n ) > 0 as supports_fk\n from indexes i\n join table_scans ts on ts.relid = i.indrelid\n)\nselect\n 'Never Used Indexes' as tag_reason,\n current_database() as tag_datname,\n index_id,\n schema_name as tag_schema_name,\n table_name as tag_table_name,\n index_name as tag_index_name,\n pg_get_indexdef(index_id) as index_definition,\n idx_scan,\n all_scans,\n index_scan_pct,\n writes,\n scans_per_write,\n index_size_bytes,\n table_size_bytes,\n relpages,\n idx_is_btree,\n opclasses as tag_opclasses,\n supports_fk\nfrom index_ratios\nwhere\n idx_scan = 0\n and idx_is_btree\norder by index_size_bytes desc\nlimit 1000;\n",
59
- },
60
- gauges: ["*"],
61
- statement_timeout_seconds: 15,
62
- },
63
- "redundant_indexes": {
64
- description: "This metric identifies redundant indexes that can potentially be dropped to save storage space and improve write performance. It analyzes index relationships and finds indexes that are covered by other indexes, considering column order, operator classes, and foreign key constraints. Uses the exact logic from tmp.sql with JSON aggregation and proper thresholds.",
65
- sqls: {
66
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n n.nspname as schema_name,\n ci.relname as index_name,\n cr.relname as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_constraint cn on cn.conrelid = cr.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n contype = 'f'\n and i.indisunique is false\n and conkey is not null\n and ci.relpages > 5\n and si.idx_scan < 10\n),\n-- Redundant indexes\nindex_data as (\n select\n *,\n indkey::text as columns,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n where indisvalid = true and ci.relpages > 5\n), redundant_indexes as (\n select\n i2.indexrelid as index_id,\n tnsp.nspname as schema_name,\n trel.relname as table_name,\n pg_relation_size(trel.oid) as table_size_bytes,\n irel.relname as index_name,\n am1.amname as access_method,\n (i1.indexrelid::regclass)::text as reason,\n i1.indexrelid as reason_index_id,\n pg_get_indexdef(i1.indexrelid) main_index_def,\n pg_relation_size(i1.indexrelid) main_index_size_bytes,\n pg_get_indexdef(i2.indexrelid) index_def,\n pg_relation_size(i2.indexrelid) index_size_bytes,\n s.idx_scan as index_usage,\n quote_ident(tnsp.nspname) as formated_schema_name,\n coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(irel.relname) as formated_index_name,\n quote_ident(trel.relname) as formated_table_name,\n coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(trel.relname) as formated_relation_name,\n i2.opclasses\n from (\n select indrelid, indexrelid, opclasses, indclass, indexprs, indpred, indisprimary, indisunique, columns\n from index_data\n order by indexrelid\n ) as i1\n join index_data as i2 on (\n i1.indrelid = i2.indrelid -- same table\n and i1.indexrelid <> i2.indexrelid -- NOT same index\n )\n inner join pg_opclass op1 on i1.indclass[0] = op1.oid\n inner join pg_opclass op2 on i2.indclass[0] = op2.oid\n inner join pg_am am1 on op1.opcmethod = am1.oid\n inner join pg_am am2 on op2.opcmethod = am2.oid\n join pg_stat_all_indexes as s on s.indexrelid = i2.indexrelid\n join pg_class as trel on trel.oid = i2.indrelid\n join pg_namespace as tnsp on trel.relnamespace = tnsp.oid\n join pg_class as irel on irel.oid = i2.indexrelid\n where\n not i2.indisprimary -- index 1 is not primary\n and not i2.indisunique -- index 1 is not unique (unique indexes serve constraint purpose)\n and am1.amname = am2.amname -- same access type\n and i1.columns like (i2.columns || '%') -- index 2 includes all columns from index 1\n and i1.opclasses like (i2.opclasses || '%')\n -- index expressions is same\n and pg_get_expr(i1.indexprs, i1.indrelid) is not distinct from pg_get_expr(i2.indexprs, i2.indrelid)\n -- index predicates is same\n and pg_get_expr(i1.indpred, i1.indrelid) is not distinct from pg_get_expr(i2.indpred, i2.indrelid)\n), redundant_indexes_fk as (\n select\n ri.*,\n ((\n select count(1)\n from fk_indexes fi\n where\n fi.fk_table_ref = ri.table_name\n and fi.opclasses like (ri.opclasses || '%')\n ) > 0)::int as supports_fk\n from redundant_indexes ri\n),\n-- Cut recursive links\nredundant_indexes_tmp_num as (\n select row_number() over () num, rig.*\n from redundant_indexes_fk rig\n), redundant_indexes_tmp_links as (\n select\n ri1.*,\n ri2.num as r_num\n from redundant_indexes_tmp_num ri1\n left join redundant_indexes_tmp_num ri2 on ri2.reason_index_id = ri1.index_id and ri1.reason_index_id = ri2.index_id\n), redundant_indexes_tmp_cut as (\n select\n *\n from redundant_indexes_tmp_links\n where num < r_num or r_num is null\n), redundant_indexes_cut_grouped as (\n select\n distinct(num),\n *\n from redundant_indexes_tmp_cut\n order by index_size_bytes desc\n), redundant_indexes_grouped as (\n select\n index_id,\n schema_name as tag_schema_name,\n table_name,\n table_size_bytes,\n index_name as tag_index_name,\n access_method as tag_access_method,\n string_agg(distinct reason, ', ') as tag_reason,\n index_size_bytes,\n index_usage,\n index_def as index_definition,\n formated_index_name as tag_index_name,\n formated_schema_name as tag_schema_name,\n formated_table_name as tag_table_name,\n formated_relation_name as tag_relation_name,\n supports_fk::int as supports_fk,\n json_agg(\n distinct jsonb_build_object(\n 'index_name', reason,\n 'index_definition', main_index_def,\n 'index_size_bytes', main_index_size_bytes\n )\n )::text as redundant_to_json\n from redundant_indexes_cut_grouped\n group by\n index_id,\n table_size_bytes,\n schema_name,\n table_name,\n index_name,\n access_method,\n index_def,\n index_size_bytes,\n index_usage,\n formated_index_name,\n formated_schema_name,\n formated_table_name,\n formated_relation_name,\n supports_fk\n order by index_size_bytes desc\n)\nselect * from redundant_indexes_grouped\nlimit 1000;\n",
67
- },
68
- gauges: ["*"],
69
- statement_timeout_seconds: 15,
70
- },
71
- "stats_reset": {
72
- description: "This metric tracks when statistics were last reset at the database level. It provides visibility into the freshness of statistics data, which is essential for understanding the reliability of usage metrics. A recent reset time indicates that usage statistics may not reflect long-term patterns. Note that Postgres tracks stats resets at the database level, not per-index or per-table.",
73
- sqls: {
74
- 11: "select /* pgwatch_generated */\n datname as tag_database_name,\n extract(epoch from stats_reset)::int as stats_reset_epoch,\n extract(epoch from now() - stats_reset)::int as seconds_since_reset\nfrom pg_stat_database\nwhere datname = current_database()\n and stats_reset is not null;\n",
75
- },
76
- gauges: ["stats_reset_epoch","seconds_since_reset"],
77
- statement_timeout_seconds: 15,
78
- },
79
- };