postgresai 0.14.0 → 0.15.0-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12833,7 +12833,7 @@ var require_formats = __commonJS((exports) => {
12833
12833
  }
12834
12834
  var TIME = /^(\d\d):(\d\d):(\d\d(?:\.\d+)?)(z|([+-])(\d\d)(?::?(\d\d))?)?$/i;
12835
12835
  function getTime(strictTimeZone) {
12836
- return function time(str2) {
12836
+ return function time3(str2) {
12837
12837
  const matches = TIME.exec(str2);
12838
12838
  if (!matches)
12839
12839
  return false;
@@ -13064,7 +13064,7 @@ var {
13064
13064
  // package.json
13065
13065
  var package_default = {
13066
13066
  name: "postgresai",
13067
- version: "0.14.0",
13067
+ version: "0.15.0-dev.1",
13068
13068
  description: "postgres_ai CLI",
13069
13069
  license: "Apache-2.0",
13070
13070
  private: false,
@@ -13141,7 +13141,8 @@ function readConfig() {
13141
13141
  apiKey: null,
13142
13142
  baseUrl: null,
13143
13143
  orgId: null,
13144
- defaultProject: null
13144
+ defaultProject: null,
13145
+ projectName: null
13145
13146
  };
13146
13147
  const userConfigPath = getConfigPath();
13147
13148
  if (fs.existsSync(userConfigPath)) {
@@ -13152,6 +13153,7 @@ function readConfig() {
13152
13153
  config.baseUrl = parsed.baseUrl ?? null;
13153
13154
  config.orgId = parsed.orgId ?? null;
13154
13155
  config.defaultProject = parsed.defaultProject ?? null;
13156
+ config.projectName = parsed.projectName ?? null;
13155
13157
  return config;
13156
13158
  } catch (err) {
13157
13159
  const message = err instanceof Error ? err.message : String(err);
@@ -15890,7 +15892,7 @@ var Result = import_lib.default.Result;
15890
15892
  var TypeOverrides = import_lib.default.TypeOverrides;
15891
15893
  var defaults = import_lib.default.defaults;
15892
15894
  // package.json
15893
- var version = "0.14.0";
15895
+ var version = "0.15.0-dev.1";
15894
15896
  var package_default2 = {
15895
15897
  name: "postgresai",
15896
15898
  version,
@@ -15970,7 +15972,8 @@ function readConfig2() {
15970
15972
  apiKey: null,
15971
15973
  baseUrl: null,
15972
15974
  orgId: null,
15973
- defaultProject: null
15975
+ defaultProject: null,
15976
+ projectName: null
15974
15977
  };
15975
15978
  const userConfigPath = getConfigPath2();
15976
15979
  if (fs2.existsSync(userConfigPath)) {
@@ -15981,6 +15984,7 @@ function readConfig2() {
15981
15984
  config.baseUrl = parsed.baseUrl ?? null;
15982
15985
  config.orgId = parsed.orgId ?? null;
15983
15986
  config.defaultProject = parsed.defaultProject ?? null;
15987
+ config.projectName = parsed.projectName ?? null;
15984
15988
  return config;
15985
15989
  } catch (err) {
15986
15990
  const message = err instanceof Error ? err.message : String(err);
@@ -26525,6 +26529,187 @@ where datname = current_database()
26525
26529
  },
26526
26530
  gauges: ["stats_reset_epoch", "seconds_since_reset"],
26527
26531
  statement_timeout_seconds: 15
26532
+ },
26533
+ pg_table_bloat: {
26534
+ description: "This metric analyzes estimated table bloat by calculating the estimated vs actual table pages and sizes. It provides insights into estimated bloat percentage, real size, extra size due to estimated bloat, and estimated bloat size considering fill factor. This metric helps administrators identify tables that may need maintenance like VACUUM FULL or table reorganization.",
26535
+ sqls: {
26536
+ 11: `select current_database() as tag_datname, schemaname as tag_schemaname, tblname as tag_tblname, (bs*tblpages)/(1024*1024)::float as real_size_mib, /* pgwatch_generated */
26537
+ (tblpages-est_tblpages)*bs as extra_size,
26538
+ case when tblpages > 0 and tblpages - est_tblpages > 0
26539
+ then 100 * (tblpages - est_tblpages)/tblpages::float
26540
+ else 0
26541
+ end as extra_pct, fillfactor,
26542
+ case when tblpages - est_tblpages_ff > 0
26543
+ then (tblpages-est_tblpages_ff)*bs
26544
+ else 0
26545
+ end as bloat_size,
26546
+ case when tblpages > 0 and tblpages - est_tblpages_ff > 0
26547
+ then 100 * (tblpages - est_tblpages_ff)/tblpages::float
26548
+ else 0
26549
+ end as bloat_pct, is_na
26550
+ -- , tpl_hdr_size, tpl_data_size, (pst).free_percent + (pst).dead_tuple_percent as real_frag -- (DEBUG INFO)
26551
+ from (
26552
+ select ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) as est_tblpages,
26553
+ ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) as est_tblpages_ff,
26554
+ tblpages, fillfactor, bs, tblid, schemaname, tblname, heappages, toastpages, is_na
26555
+ -- , tpl_hdr_size, tpl_data_size, pgstattuple(tblid) as pst -- (DEBUG INFO)
26556
+ from (
26557
+ select
26558
+ ( 4 + tpl_hdr_size + tpl_data_size + (2*ma)
26559
+ - case when tpl_hdr_size%ma = 0 then ma else tpl_hdr_size%ma end
26560
+ - case when ceil(tpl_data_size)::int%ma = 0 then ma else ceil(tpl_data_size)::int%ma end
26561
+ ) as tpl_size, bs - page_hdr as size_per_block, (heappages + toastpages) as tblpages, heappages,
26562
+ toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, tblname, fillfactor, is_na
26563
+ -- , tpl_hdr_size, tpl_data_size
26564
+ from (
26565
+ select
26566
+ tbl.oid as tblid, ns.nspname as schemaname, tbl.relname as tblname, tbl.reltuples,
26567
+ tbl.relpages as heappages, coalesce(toast.relpages, 0) as toastpages,
26568
+ coalesce(toast.reltuples, 0) as toasttuples,
26569
+ coalesce(substring(
26570
+ array_to_string(tbl.reloptions, ' ')
26571
+ from 'fillfactor=([0-9]+)')::smallint, 100) as fillfactor,
26572
+ current_setting('block_size')::numeric as bs,
26573
+ case when version()~'mingw32' or version()~'64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as ma,
26574
+ 24 as page_hdr,
26575
+ 23 + case when max(coalesce(s.null_frac,0)) > 0 then ( 7 + count(s.attname) ) / 8 else 0::int end
26576
+ + case when bool_or(att.attname = 'oid' and att.attnum < 0) then 4 else 0 end as tpl_hdr_size,
26577
+ sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0) ) as tpl_data_size,
26578
+ (bool_or(att.atttypid = 'pg_catalog.name'::regtype)
26579
+ or sum(case when att.attnum > 0 then 1 else 0 end) <> count(s.attname))::int as is_na
26580
+ from pg_attribute as att
26581
+ join pg_class as tbl on att.attrelid = tbl.oid
26582
+ join pg_namespace as ns on ns.oid = tbl.relnamespace
26583
+ left join postgres_ai.pg_statistic as s on s.schemaname=ns.nspname
26584
+ and s.tablename = tbl.relname and s.inherited=false and s.attname=att.attname
26585
+ left join pg_class as toast on tbl.reltoastrelid = toast.oid
26586
+ where not att.attisdropped
26587
+ and tbl.relkind in ('r','m')
26588
+ group by 1,2,3,4,5,6,7,8,9,10
26589
+ order by 2,3
26590
+ ) as s
26591
+ ) as s2
26592
+ ) as s3
26593
+ -- where not is_na
26594
+ -- and tblpages*((pst).free_percent + (pst).dead_tuple_percent)::float4/100 >= 1
26595
+ where (bs * tblpages::float / (1024 * 1024)) > 1 /* exclude tables below 1 MiB */
26596
+ order by is_na = 0 desc, bloat_pct desc
26597
+ limit 1000
26598
+ `
26599
+ },
26600
+ gauges: ["real_size_mib", "extra_size", "extra_pct", "fillfactor", "bloat_size", "bloat_pct", "is_na", "reltuples"],
26601
+ statement_timeout_seconds: 300
26602
+ },
26603
+ pg_btree_bloat: {
26604
+ description: "This metric analyzes estimated index bloat by calculating the estimated vs actual index pages and sizes. It provides insights into estimated bloat percentage, real size, extra size due to estimated bloat, and estimated bloat size considering fill factor. This metric helps administrators identify indexes that may need maintenance like VACUUM FULL or index reorganization.",
26605
+ sqls: {
26606
+ 11: `select /* pgwatch_generated */
26607
+ current_database() as tag_datname, nspname as tag_schemaname, tblname as tag_tblname, idxname as tag_idxname,
26608
+ (bs*(relpages)/(1024*1024))::float as real_size_mib,
26609
+ (pg_relation_size(tbloid)/(1024*1024))::float as table_size_mib,
26610
+ (bs*(relpages-est_pages))::float as extra_size,
26611
+ 100 * (relpages-est_pages)::float / relpages as extra_pct,
26612
+ fillfactor,
26613
+ case when relpages > est_pages_ff
26614
+ then bs*(relpages-est_pages_ff)
26615
+ else 0
26616
+ end as bloat_size,
26617
+ 100 * (relpages-est_pages_ff)::float / relpages as bloat_pct,
26618
+ is_na
26619
+ -- , 100-(pst).avg_leaf_density as pst_avg_bloat, est_pages, index_tuple_hdr_bm, maxalign, pagehdr, nulldatawidth, nulldatahdrwidth, reltuples, relpages -- (DEBUG INFO)
26620
+ from (
26621
+ select coalesce(1 +
26622
+ ceil(reltuples/floor((bs-pageopqdata-pagehdr)/(4+nulldatahdrwidth)::float)), 0 -- ItemIdData size + computed avg size of a tuple (nulldatahdrwidth)
26623
+ ) as est_pages,
26624
+ coalesce(1 +
26625
+ ceil(reltuples/floor((bs-pageopqdata-pagehdr)*fillfactor/(100*(4+nulldatahdrwidth)::float))), 0
26626
+ ) as est_pages_ff,
26627
+ bs, nspname, tblname, idxname, relpages, fillfactor, is_na, tbloid
26628
+ -- , pgstatindex(idxoid) as pst, index_tuple_hdr_bm, maxalign, pagehdr, nulldatawidth, nulldatahdrwidth, reltuples -- (DEBUG INFO)
26629
+ from (
26630
+ select maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, tbloid,
26631
+ ( index_tuple_hdr_bm +
26632
+ maxalign - case -- Add padding to the index tuple header to align on MAXALIGN
26633
+ when index_tuple_hdr_bm%maxalign = 0 then maxalign
26634
+ else index_tuple_hdr_bm%maxalign
26635
+ end
26636
+ + nulldatawidth + maxalign - case -- Add padding to the data to align on MAXALIGN
26637
+ when nulldatawidth = 0 then 0
26638
+ when nulldatawidth::integer%maxalign = 0 then maxalign
26639
+ else nulldatawidth::integer%maxalign
26640
+ end
26641
+ )::numeric as nulldatahdrwidth, pagehdr, pageopqdata, is_na
26642
+ -- , index_tuple_hdr_bm, nulldatawidth -- (DEBUG INFO)
26643
+ from (
26644
+ select n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid,
26645
+ i.idxoid, i.fillfactor, current_setting('block_size')::numeric as bs,
26646
+ case -- MAXALIGN: 4 on 32bits, 8 on 64bits (and mingw32 ?)
26647
+ when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8
26648
+ else 4
26649
+ end as maxalign,
26650
+ /* per page header, fixed size: 20 for 7.X, 24 for others */
26651
+ 24 as pagehdr,
26652
+ /* per page btree opaque data */
26653
+ 16 as pageopqdata,
26654
+ /* per tuple header: add IndexAttributeBitMapData if some cols are null-able */
26655
+ case when max(coalesce(s.null_frac,0)) = 0
26656
+ then 8 -- IndexTupleData size
26657
+ else 8 + (( 32 + 8 - 1 ) / 8) -- IndexTupleData size + IndexAttributeBitMapData size ( max num filed per index + 8 - 1 /8)
26658
+ end as index_tuple_hdr_bm,
26659
+ /* data len: we remove null values save space using it fractionnal part from stats */
26660
+ sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
26661
+ (max( case when i.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end ) > 0)::int as is_na
26662
+ from (
26663
+ select ct.relname as tblname, ct.relnamespace,
26664
+ ic.idxname, ic.attpos, ic.indkey, ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor,
26665
+ coalesce(a1.attnum, a2.attnum) as attnum, coalesce(a1.attname, a2.attname) as attname, coalesce(a1.atttypid, a2.atttypid) as atttypid,
26666
+ case when a1.attnum is null
26667
+ then ic.idxname
26668
+ else ct.relname
26669
+ end as attrelname
26670
+ from (
26671
+ select idxname, reltuples, relpages, tbloid, idxoid, fillfactor, indkey,
26672
+ pg_catalog.generate_series(1,indnatts) as attpos
26673
+ from (
26674
+ select ci.relname as idxname, ci.reltuples, ci.relpages, i.indrelid as tbloid,
26675
+ i.indexrelid as idxoid,
26676
+ coalesce(substring(
26677
+ array_to_string(ci.reloptions, ' ')
26678
+ from 'fillfactor=([0-9]+)')::smallint, 90) as fillfactor,
26679
+ i.indnatts,
26680
+ pg_catalog.string_to_array(pg_catalog.textin(
26681
+ pg_catalog.int2vectorout(i.indkey)),' ')::int[] as indkey
26682
+ from pg_catalog.pg_index i
26683
+ join pg_catalog.pg_class ci on ci.oid = i.indexrelid
26684
+ where ci.relam=(select oid from pg_am where amname = 'btree')
26685
+ and ci.relpages > 0
26686
+ ) as idx_data
26687
+ ) as ic
26688
+ join pg_catalog.pg_class ct on ct.oid = ic.tbloid
26689
+ left join pg_catalog.pg_attribute a1 on
26690
+ ic.indkey[ic.attpos] <> 0
26691
+ and a1.attrelid = ic.tbloid
26692
+ and a1.attnum = ic.indkey[ic.attpos]
26693
+ left join pg_catalog.pg_attribute a2 on
26694
+ ic.indkey[ic.attpos] = 0
26695
+ and a2.attrelid = ic.idxoid
26696
+ and a2.attnum = ic.attpos
26697
+ ) i
26698
+ join pg_catalog.pg_namespace n on n.oid = i.relnamespace
26699
+ join postgres_ai.pg_statistic s on s.schemaname = n.nspname
26700
+ and s.tablename = i.attrelname
26701
+ and s.attname = i.attname
26702
+ group by 1,2,3,4,5,6,7,8,9,10,11,12
26703
+ ) as rows_data_stats
26704
+ ) as rows_hdr_pdg_stats
26705
+ ) as relation_stats
26706
+ where (bs * relpages::float / (1024 * 1024)) > 1 /* exclude indexes below 1 MiB */
26707
+ order by is_na = 0 desc, bloat_pct desc
26708
+ limit 1000
26709
+ `
26710
+ },
26711
+ gauges: ["real_size_mib", "table_size_mib", "extra_size", "extra_pct", "fillfactor", "bloat_size", "bloat_pct", "is_na", "reltuples"],
26712
+ statement_timeout_seconds: 15
26528
26713
  }
26529
26714
  };
26530
26715
 
@@ -26545,6 +26730,8 @@ var METRIC_NAMES = {
26545
26730
  H001: "pg_invalid_indexes",
26546
26731
  H002: "unused_indexes",
26547
26732
  H004: "redundant_indexes",
26733
+ F004: "pg_table_bloat",
26734
+ F005: "pg_btree_bloat",
26548
26735
  settings: "settings",
26549
26736
  dbStats: "db_stats",
26550
26737
  dbSize: "db_size",
@@ -27805,6 +27992,149 @@ async function generateF001(client, nodeName) {
27805
27992
  };
27806
27993
  return report;
27807
27994
  }
27995
+ async function generateF004(client, nodeName) {
27996
+ const report = createBaseReport("F004", "Autovacuum: heap bloat (estimated)", nodeName);
27997
+ const postgresVersion = await getPostgresVersion(client);
27998
+ const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10);
27999
+ let bloatedTables = [];
28000
+ try {
28001
+ const sql = getMetricSql(METRIC_NAMES.F004, pgMajorVersion);
28002
+ const bloatResult = await client.query(sql);
28003
+ const vacuumStatsResult = await client.query(`
28004
+ SELECT schemaname, relname, last_vacuum, last_autovacuum
28005
+ FROM pg_stat_user_tables
28006
+ `);
28007
+ const vacuumStats = new Map;
28008
+ for (const row of vacuumStatsResult.rows) {
28009
+ const key = `${row.schemaname}.${row.relname}`;
28010
+ const vacuumTime = row.last_vacuum || row.last_autovacuum;
28011
+ vacuumStats.set(key, {
28012
+ last_vacuum: vacuumTime ? new Date(vacuumTime).toISOString() : null,
28013
+ last_vacuum_epoch: vacuumTime ? Math.floor(new Date(vacuumTime).getTime() / 1000) : 0
28014
+ });
28015
+ }
28016
+ bloatedTables = bloatResult.rows.map((row) => {
28017
+ const t = transformMetricRow(row);
28018
+ const schemaName = String(t.schemaname || "");
28019
+ const tableName = String(t.tblname || "");
28020
+ const realSizeBytes = Math.round((parseFloat(String(t.real_size_mib)) || 0) * 1024 * 1024);
28021
+ const extraSize = parseInt(String(t.extra_size || 0), 10);
28022
+ const bloatSize = parseInt(String(t.bloat_size || 0), 10);
28023
+ const vacuumInfo = vacuumStats.get(`${schemaName}.${tableName}`) || {
28024
+ last_vacuum: null,
28025
+ last_vacuum_epoch: 0
28026
+ };
28027
+ return {
28028
+ schema_name: schemaName,
28029
+ table_name: tableName,
28030
+ real_size: realSizeBytes,
28031
+ extra_size: extraSize,
28032
+ extra_pct: parseFloat(String(t.extra_pct)) || 0,
28033
+ bloat_size: bloatSize,
28034
+ bloat_pct: parseFloat(String(t.bloat_pct)) || 0,
28035
+ fillfactor: parseInt(String(t.fillfactor || 100), 10),
28036
+ last_vacuum: vacuumInfo.last_vacuum,
28037
+ last_vacuum_epoch: vacuumInfo.last_vacuum_epoch,
28038
+ real_size_pretty: formatBytes(realSizeBytes),
28039
+ extra_size_pretty: formatBytes(extraSize),
28040
+ bloat_size_pretty: formatBytes(bloatSize)
28041
+ };
28042
+ });
28043
+ } catch (err) {
28044
+ const errorMsg = err instanceof Error ? err.message : String(err);
28045
+ console.log(`[F004] Error estimating table bloat: ${errorMsg}`);
28046
+ }
28047
+ const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client, pgMajorVersion);
28048
+ const totalCount = bloatedTables.length;
28049
+ const totalBloatSizeBytes = bloatedTables.reduce((sum, t) => sum + t.bloat_size, 0);
28050
+ const dbEntry = {
28051
+ bloated_tables: bloatedTables,
28052
+ total_count: totalCount,
28053
+ total_bloat_size_bytes: totalBloatSizeBytes,
28054
+ total_bloat_size_pretty: formatBytes(totalBloatSizeBytes),
28055
+ database_size_bytes: dbSizeBytes,
28056
+ database_size_pretty: formatBytes(dbSizeBytes)
28057
+ };
28058
+ report.results[nodeName] = {
28059
+ data: { [dbName]: dbEntry },
28060
+ postgres_version: postgresVersion
28061
+ };
28062
+ return report;
28063
+ }
28064
+ async function generateF005(client, nodeName) {
28065
+ const report = createBaseReport("F005", "Autovacuum: index bloat (estimated)", nodeName);
28066
+ const postgresVersion = await getPostgresVersion(client);
28067
+ const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10);
28068
+ let bloatedIndexes = [];
28069
+ try {
28070
+ const sql = getMetricSql(METRIC_NAMES.F005, pgMajorVersion);
28071
+ const bloatResult = await client.query(sql);
28072
+ const vacuumStatsResult = await client.query(`
28073
+ SELECT schemaname, relname, last_vacuum, last_autovacuum
28074
+ FROM pg_stat_user_tables
28075
+ `);
28076
+ const vacuumStats = new Map;
28077
+ for (const row of vacuumStatsResult.rows) {
28078
+ const key = `${row.schemaname}.${row.relname}`;
28079
+ const vacuumTime = row.last_vacuum || row.last_autovacuum;
28080
+ vacuumStats.set(key, {
28081
+ last_vacuum: vacuumTime ? new Date(vacuumTime).toISOString() : null,
28082
+ last_vacuum_epoch: vacuumTime ? Math.floor(new Date(vacuumTime).getTime() / 1000) : 0
28083
+ });
28084
+ }
28085
+ bloatedIndexes = bloatResult.rows.map((row) => {
28086
+ const t = transformMetricRow(row);
28087
+ const schemaName = String(t.schemaname || "");
28088
+ const tableName = String(t.tblname || "");
28089
+ const indexName = String(t.idxname || "");
28090
+ const realSizeBytes = Math.round((parseFloat(String(t.real_size_mib)) || 0) * 1024 * 1024);
28091
+ const tableSizeBytes = Math.round((parseFloat(String(t.table_size_mib)) || 0) * 1024 * 1024);
28092
+ const extraSize = parseInt(String(t.extra_size || 0), 10);
28093
+ const bloatSize = parseInt(String(t.bloat_size || 0), 10);
28094
+ const vacuumInfo = vacuumStats.get(`${schemaName}.${tableName}`) || {
28095
+ last_vacuum: null,
28096
+ last_vacuum_epoch: 0
28097
+ };
28098
+ return {
28099
+ schema_name: schemaName,
28100
+ table_name: tableName,
28101
+ index_name: indexName,
28102
+ real_size: realSizeBytes,
28103
+ table_size: tableSizeBytes,
28104
+ extra_size: extraSize,
28105
+ extra_pct: parseFloat(String(t.extra_pct)) || 0,
28106
+ bloat_size: bloatSize,
28107
+ bloat_pct: parseFloat(String(t.bloat_pct)) || 0,
28108
+ fillfactor: parseInt(String(t.fillfactor || 90), 10),
28109
+ last_vacuum: vacuumInfo.last_vacuum,
28110
+ last_vacuum_epoch: vacuumInfo.last_vacuum_epoch,
28111
+ real_size_pretty: formatBytes(realSizeBytes),
28112
+ table_size_pretty: formatBytes(tableSizeBytes),
28113
+ extra_size_pretty: formatBytes(extraSize),
28114
+ bloat_size_pretty: formatBytes(bloatSize)
28115
+ };
28116
+ });
28117
+ } catch (err) {
28118
+ const errorMsg = err instanceof Error ? err.message : String(err);
28119
+ console.log(`[F005] Error estimating index bloat: ${errorMsg}`);
28120
+ }
28121
+ const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client, pgMajorVersion);
28122
+ const totalCount = bloatedIndexes.length;
28123
+ const totalBloatSizeBytes = bloatedIndexes.reduce((sum, idx) => sum + idx.bloat_size, 0);
28124
+ const dbEntry = {
28125
+ bloated_indexes: bloatedIndexes,
28126
+ total_count: totalCount,
28127
+ total_bloat_size_bytes: totalBloatSizeBytes,
28128
+ total_bloat_size_pretty: formatBytes(totalBloatSizeBytes),
28129
+ database_size_bytes: dbSizeBytes,
28130
+ database_size_pretty: formatBytes(dbSizeBytes)
28131
+ };
28132
+ report.results[nodeName] = {
28133
+ data: { [dbName]: dbEntry },
28134
+ postgres_version: postgresVersion
28135
+ };
28136
+ return report;
28137
+ }
27808
28138
  async function generateG001(client, nodeName) {
27809
28139
  const report = createBaseReport("G001", "Memory-related settings", nodeName);
27810
28140
  const postgresVersion = await getPostgresVersion(client);
@@ -27955,6 +28285,8 @@ var REPORT_GENERATORS = {
27955
28285
  D001: generateD001,
27956
28286
  D004: generateD004,
27957
28287
  F001: generateF001,
28288
+ F004: generateF004,
28289
+ F005: generateF005,
27958
28290
  G001: generateG001,
27959
28291
  G003: generateG003,
27960
28292
  H001: generateH001,
@@ -28472,6 +28804,15 @@ function summarizeG003(nodeData) {
28472
28804
  }
28473
28805
 
28474
28806
  // bin/postgres-ai.ts
28807
+ var nodeVersion = parseInt(process.versions.node.split(".")[0], 10);
28808
+ if (nodeVersion < 18) {
28809
+ console.error(`\x1B[31mError: postgresai requires Node 18 or higher.\x1B[0m`);
28810
+ console.error(`You are running Node.js ${process.versions.node}.`);
28811
+ console.error(`Please upgrade to Node.js 20 LTS or Node.js 22 for security updates.`);
28812
+ console.error(`
28813
+ Download: https://nodejs.org/`);
28814
+ process.exit(1);
28815
+ }
28475
28816
  var rl = null;
28476
28817
  function getReadline() {
28477
28818
  if (!rl) {
@@ -30031,6 +30372,64 @@ function checkRunningContainers() {
30031
30372
  return { running: false, containers: [] };
30032
30373
  }
30033
30374
  }
30375
+ function registerMonitoringInstance(apiKey, projectName, opts) {
30376
+ const { apiBaseUrl } = resolveBaseUrls2(opts);
30377
+ const url = `${apiBaseUrl}/rpc/monitoring_instance_register`;
30378
+ const debug = opts?.debug;
30379
+ if (debug) {
30380
+ console.log(`
30381
+ Debug: Registering monitoring instance...`);
30382
+ console.log(`Debug: POST ${url}`);
30383
+ console.log(`Debug: project_name=${projectName}`);
30384
+ }
30385
+ fetch(url, {
30386
+ method: "POST",
30387
+ headers: {
30388
+ "Content-Type": "application/json"
30389
+ },
30390
+ body: JSON.stringify({
30391
+ api_token: apiKey,
30392
+ project_name: projectName
30393
+ })
30394
+ }).then(async (res) => {
30395
+ const body = await res.text().catch(() => "");
30396
+ if (!res.ok) {
30397
+ if (debug) {
30398
+ console.log(`Debug: Monitoring registration failed: HTTP ${res.status}`);
30399
+ console.log(`Debug: Response: ${body}`);
30400
+ }
30401
+ return;
30402
+ }
30403
+ if (debug) {
30404
+ console.log(`Debug: Monitoring registration response: ${body}`);
30405
+ }
30406
+ }).catch((err) => {
30407
+ if (debug) {
30408
+ console.log(`Debug: Monitoring registration error: ${err.message}`);
30409
+ }
30410
+ });
30411
+ }
30412
+ function updatePgwatchConfig(configPath, updates) {
30413
+ let lines = [];
30414
+ if (fs5.existsSync(configPath)) {
30415
+ const stats = fs5.statSync(configPath);
30416
+ if (!stats.isDirectory()) {
30417
+ const content = fs5.readFileSync(configPath, "utf8");
30418
+ lines = content.split(/\r?\n/).filter((l) => l.trim() !== "");
30419
+ }
30420
+ }
30421
+ for (const [key, value] of Object.entries(updates)) {
30422
+ const existingIndex = lines.findIndex((l) => l.startsWith(key + "="));
30423
+ if (existingIndex >= 0) {
30424
+ lines[existingIndex] = `${key}=${value}`;
30425
+ } else {
30426
+ lines.push(`${key}=${value}`);
30427
+ }
30428
+ }
30429
+ fs5.writeFileSync(configPath, lines.join(`
30430
+ `) + `
30431
+ `, { encoding: "utf8", mode: 384 });
30432
+ }
30034
30433
  async function runCompose(args, grafanaPassword) {
30035
30434
  let composeFile;
30036
30435
  let projectDir;
@@ -30092,9 +30491,9 @@ program2.command("help", { isDefault: true }).description("show help").action(()
30092
30491
  program2.outputHelp();
30093
30492
  });
30094
30493
  var mon = program2.command("mon").description("monitoring services management");
30095
- mon.command("local-install").description("install local monitoring stack (generate config, start services)").option("--demo", "demo mode with sample database", false).option("--api-key <key>", "Postgres AI API key for automated report uploads").option("--db-url <url>", "PostgreSQL connection URL to monitor").option("--tag <tag>", "Docker image tag to use (e.g., 0.14.0, 0.14.0-dev.33)").option("-y, --yes", "accept all defaults and skip interactive prompts", false).action(async (opts) => {
30494
+ mon.command("local-install").description("install local monitoring stack (generate config, start services)").option("--demo", "demo mode with sample database", false).option("--api-key <key>", "Postgres AI API key for automated report uploads").option("--db-url <url>", "PostgreSQL connection URL to monitor").option("--tag <tag>", "Docker image tag to use (e.g., 0.14.0, 0.14.0-dev.33)").option("--project <name>", "Docker Compose project name (default: postgres_ai)").option("-y, --yes", "accept all defaults and skip interactive prompts", false).action(async (opts) => {
30096
30495
  const globalOpts = program2.opts();
30097
- const apiKey = opts.apiKey || globalOpts.apiKey;
30496
+ let apiKey = opts.apiKey || globalOpts.apiKey;
30098
30497
  console.log(`
30099
30498
  =================================`);
30100
30499
  console.log(" PostgresAI monitoring local install");
@@ -30105,6 +30504,12 @@ mon.command("local-install").description("install local monitoring stack (genera
30105
30504
  const { projectDir } = await resolveOrInitPaths();
30106
30505
  console.log(`Project directory: ${projectDir}
30107
30506
  `);
30507
+ if (opts.project) {
30508
+ const cfgPath2 = path5.resolve(projectDir, ".pgwatch-config");
30509
+ updatePgwatchConfig(cfgPath2, { project_name: opts.project });
30510
+ console.log(`Using project name: ${opts.project}
30511
+ `);
30512
+ }
30108
30513
  const envFile = path5.resolve(projectDir, ".env");
30109
30514
  let existingRegistry = null;
30110
30515
  let existingPassword = null;
@@ -30161,11 +30566,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
30161
30566
  if (apiKey) {
30162
30567
  console.log("Using API key provided via --api-key parameter");
30163
30568
  writeConfig({ apiKey });
30164
- fs5.writeFileSync(path5.resolve(projectDir, ".pgwatch-config"), `api_key=${apiKey}
30165
- `, {
30166
- encoding: "utf8",
30167
- mode: 384
30168
- });
30569
+ updatePgwatchConfig(path5.resolve(projectDir, ".pgwatch-config"), { api_key: apiKey });
30169
30570
  console.log(`\u2713 API key saved
30170
30571
  `);
30171
30572
  } else if (opts.yes) {
@@ -30182,11 +30583,8 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
30182
30583
  const trimmedKey = inputApiKey.trim();
30183
30584
  if (trimmedKey) {
30184
30585
  writeConfig({ apiKey: trimmedKey });
30185
- fs5.writeFileSync(path5.resolve(projectDir, ".pgwatch-config"), `api_key=${trimmedKey}
30186
- `, {
30187
- encoding: "utf8",
30188
- mode: 384
30189
- });
30586
+ updatePgwatchConfig(path5.resolve(projectDir, ".pgwatch-config"), { api_key: trimmedKey });
30587
+ apiKey = trimmedKey;
30190
30588
  console.log(`\u2713 API key saved
30191
30589
  `);
30192
30590
  break;
@@ -30393,6 +30791,13 @@ You can provide either:`);
30393
30791
  }
30394
30792
  console.log(`\u2713 Services started
30395
30793
  `);
30794
+ if (apiKey && !opts.demo) {
30795
+ const projectName = opts.project || "postgres-ai-monitoring";
30796
+ registerMonitoringInstance(apiKey, projectName, {
30797
+ apiBaseUrl: globalOpts.apiBaseUrl,
30798
+ debug: !!process.env.DEBUG
30799
+ });
30800
+ }
30396
30801
  console.log("=================================");
30397
30802
  console.log(" Local install completed!");
30398
30803
  console.log(`=================================