postgresai 0.14.0-dev.81 → 0.14.0-dev.82
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/postgres-ai.js +220 -6
- package/lib/checkup-dictionary.ts +3 -2
- package/lib/checkup.ts +268 -0
- package/package.json +2 -2
- package/test/checkup.test.ts +1 -1
package/dist/bin/postgres-ai.js
CHANGED
|
@@ -13064,7 +13064,7 @@ var {
|
|
|
13064
13064
|
// package.json
|
|
13065
13065
|
var package_default = {
|
|
13066
13066
|
name: "postgresai",
|
|
13067
|
-
version: "0.14.0-dev.
|
|
13067
|
+
version: "0.14.0-dev.82",
|
|
13068
13068
|
description: "postgres_ai CLI",
|
|
13069
13069
|
license: "Apache-2.0",
|
|
13070
13070
|
private: false,
|
|
@@ -13098,7 +13098,7 @@ var package_default = {
|
|
|
13098
13098
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
13099
13099
|
dev: "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
|
|
13100
13100
|
test: "bun run embed-all && bun test",
|
|
13101
|
-
"test:fast": "bun run embed-all && bun test",
|
|
13101
|
+
"test:fast": "bun run embed-all && bun test --coverage=false",
|
|
13102
13102
|
"test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
13103
13103
|
typecheck: "bun run embed-all && bunx tsc --noEmit"
|
|
13104
13104
|
},
|
|
@@ -15889,7 +15889,7 @@ var Result = import_lib.default.Result;
|
|
|
15889
15889
|
var TypeOverrides = import_lib.default.TypeOverrides;
|
|
15890
15890
|
var defaults = import_lib.default.defaults;
|
|
15891
15891
|
// package.json
|
|
15892
|
-
var version = "0.14.0-dev.
|
|
15892
|
+
var version = "0.14.0-dev.82";
|
|
15893
15893
|
var package_default2 = {
|
|
15894
15894
|
name: "postgresai",
|
|
15895
15895
|
version,
|
|
@@ -15926,7 +15926,7 @@ var package_default2 = {
|
|
|
15926
15926
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
15927
15927
|
dev: "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
|
|
15928
15928
|
test: "bun run embed-all && bun test",
|
|
15929
|
-
"test:fast": "bun run embed-all && bun test",
|
|
15929
|
+
"test:fast": "bun run embed-all && bun test --coverage=false",
|
|
15930
15930
|
"test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
15931
15931
|
typecheck: "bun run embed-all && bunx tsc --noEmit"
|
|
15932
15932
|
},
|
|
@@ -27103,7 +27103,7 @@ var CHECKUP_DICTIONARY_DATA = [
|
|
|
27103
27103
|
];
|
|
27104
27104
|
|
|
27105
27105
|
// lib/checkup-dictionary.ts
|
|
27106
|
-
var dictionaryByCode = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry]));
|
|
27106
|
+
var dictionaryByCode = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code.toUpperCase(), entry]));
|
|
27107
27107
|
function buildCheckInfoMap() {
|
|
27108
27108
|
const result = {};
|
|
27109
27109
|
for (const entry of CHECKUP_DICTIONARY_DATA) {
|
|
@@ -27789,6 +27789,218 @@ async function generateF001(client, nodeName) {
|
|
|
27789
27789
|
};
|
|
27790
27790
|
return report;
|
|
27791
27791
|
}
|
|
27792
|
+
async function generateF004(client, nodeName) {
|
|
27793
|
+
const report = createBaseReport("F004", "Autovacuum: heap bloat estimate", nodeName);
|
|
27794
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
27795
|
+
let bloatData = [];
|
|
27796
|
+
let bloatError = null;
|
|
27797
|
+
try {
|
|
27798
|
+
const result = await client.query(`
|
|
27799
|
+
select
|
|
27800
|
+
schemaname,
|
|
27801
|
+
tblname,
|
|
27802
|
+
(bs * tblpages)::bigint as real_size_bytes,
|
|
27803
|
+
pg_size_pretty((bs * tblpages)::bigint) as real_size_pretty,
|
|
27804
|
+
((tblpages - est_tblpages) * bs)::bigint as extra_size_bytes,
|
|
27805
|
+
pg_size_pretty(((tblpages - est_tblpages) * bs)::bigint) as extra_size_pretty,
|
|
27806
|
+
case when tblpages > 0 and tblpages - est_tblpages > 0
|
|
27807
|
+
then round(100.0 * (tblpages - est_tblpages) / tblpages, 2)
|
|
27808
|
+
else 0
|
|
27809
|
+
end as extra_pct,
|
|
27810
|
+
fillfactor,
|
|
27811
|
+
case when tblpages - est_tblpages_ff > 0
|
|
27812
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
27813
|
+
else 0
|
|
27814
|
+
end as bloat_size_bytes,
|
|
27815
|
+
pg_size_pretty(case when tblpages - est_tblpages_ff > 0
|
|
27816
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
27817
|
+
else 0
|
|
27818
|
+
end) as bloat_size_pretty,
|
|
27819
|
+
case when tblpages > 0 and tblpages - est_tblpages_ff > 0
|
|
27820
|
+
then round(100.0 * (tblpages - est_tblpages_ff) / tblpages, 2)
|
|
27821
|
+
else 0
|
|
27822
|
+
end as bloat_pct,
|
|
27823
|
+
is_na::text
|
|
27824
|
+
from (
|
|
27825
|
+
select
|
|
27826
|
+
ceil(reltuples / ((bs - page_hdr) / tpl_size)) + ceil(toasttuples / 4) as est_tblpages,
|
|
27827
|
+
ceil(reltuples / ((bs - page_hdr) * fillfactor / (tpl_size * 100))) + ceil(toasttuples / 4) as est_tblpages_ff,
|
|
27828
|
+
tblpages, fillfactor, bs, schemaname, tblname, is_na
|
|
27829
|
+
from (
|
|
27830
|
+
select
|
|
27831
|
+
(4 + tpl_hdr_size + tpl_data_size + (2 * ma)
|
|
27832
|
+
- case when tpl_hdr_size % ma = 0 then ma else tpl_hdr_size % ma end
|
|
27833
|
+
- case when ceil(tpl_data_size)::int % ma = 0 then ma else ceil(tpl_data_size)::int % ma end
|
|
27834
|
+
) as tpl_size,
|
|
27835
|
+
(heappages + toastpages) as tblpages,
|
|
27836
|
+
reltuples, toasttuples, bs, page_hdr, schemaname, tblname, fillfactor, is_na
|
|
27837
|
+
from (
|
|
27838
|
+
select
|
|
27839
|
+
ns.nspname as schemaname,
|
|
27840
|
+
tbl.relname as tblname,
|
|
27841
|
+
tbl.reltuples,
|
|
27842
|
+
tbl.relpages as heappages,
|
|
27843
|
+
coalesce(toast.relpages, 0) as toastpages,
|
|
27844
|
+
coalesce(toast.reltuples, 0) as toasttuples,
|
|
27845
|
+
coalesce(substring(array_to_string(tbl.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 100) as fillfactor,
|
|
27846
|
+
current_setting('block_size')::numeric as bs,
|
|
27847
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as ma,
|
|
27848
|
+
24 as page_hdr,
|
|
27849
|
+
23 + case when max(coalesce(s.null_frac, 0)) > 0 then (7 + count(s.attname)) / 8 else 0::int end
|
|
27850
|
+
+ case when bool_or(att.attname = 'oid' and att.attnum < 0) then 4 else 0 end as tpl_hdr_size,
|
|
27851
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0)) as tpl_data_size,
|
|
27852
|
+
(bool_or(att.atttypid = 'pg_catalog.name'::regtype)
|
|
27853
|
+
or sum(case when att.attnum > 0 then 1 else 0 end) <> count(s.attname))::int as is_na
|
|
27854
|
+
from pg_attribute as att
|
|
27855
|
+
join pg_class as tbl on att.attrelid = tbl.oid
|
|
27856
|
+
join pg_namespace as ns on ns.oid = tbl.relnamespace
|
|
27857
|
+
left join pg_stats as s on s.schemaname = ns.nspname
|
|
27858
|
+
and s.tablename = tbl.relname and s.attname = att.attname
|
|
27859
|
+
left join pg_class as toast on tbl.reltoastrelid = toast.oid
|
|
27860
|
+
where not att.attisdropped
|
|
27861
|
+
and tbl.relkind in ('r', 'm')
|
|
27862
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
27863
|
+
group by ns.nspname, tbl.relname, tbl.reltuples, tbl.relpages, toast.relpages, toast.reltuples, tbl.reloptions
|
|
27864
|
+
) as s
|
|
27865
|
+
) as s2
|
|
27866
|
+
) as s3
|
|
27867
|
+
where tblpages > 0 and (bs * tblpages) >= 1024 * 1024 -- exclude tables < 1 MiB
|
|
27868
|
+
order by bloat_size_bytes desc
|
|
27869
|
+
limit 100
|
|
27870
|
+
`);
|
|
27871
|
+
bloatData = result.rows;
|
|
27872
|
+
} catch (err) {
|
|
27873
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
27874
|
+
console.log(`[F004] Error estimating table bloat: ${errorMsg}`);
|
|
27875
|
+
bloatError = errorMsg;
|
|
27876
|
+
}
|
|
27877
|
+
report.results[nodeName] = {
|
|
27878
|
+
data: {
|
|
27879
|
+
tables: bloatData,
|
|
27880
|
+
...bloatError && { error: bloatError }
|
|
27881
|
+
},
|
|
27882
|
+
postgres_version: postgresVersion
|
|
27883
|
+
};
|
|
27884
|
+
return report;
|
|
27885
|
+
}
|
|
27886
|
+
async function generateF005(client, nodeName) {
|
|
27887
|
+
const report = createBaseReport("F005", "Autovacuum: index bloat estimate", nodeName);
|
|
27888
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
27889
|
+
let bloatData = [];
|
|
27890
|
+
let bloatError = null;
|
|
27891
|
+
try {
|
|
27892
|
+
const result = await client.query(`
|
|
27893
|
+
select
|
|
27894
|
+
nspname as schemaname,
|
|
27895
|
+
tblname,
|
|
27896
|
+
idxname,
|
|
27897
|
+
(bs * relpages)::bigint as real_size_bytes,
|
|
27898
|
+
pg_size_pretty((bs * relpages)::bigint) as real_size_pretty,
|
|
27899
|
+
pg_relation_size(tbloid)::bigint as table_size_bytes,
|
|
27900
|
+
pg_size_pretty(pg_relation_size(tbloid)) as table_size_pretty,
|
|
27901
|
+
((relpages - est_pages) * bs)::bigint as extra_size_bytes,
|
|
27902
|
+
pg_size_pretty(((relpages - est_pages) * bs)::bigint) as extra_size_pretty,
|
|
27903
|
+
round(100.0 * (relpages - est_pages) / relpages, 2) as extra_pct,
|
|
27904
|
+
fillfactor,
|
|
27905
|
+
case when relpages > est_pages_ff
|
|
27906
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
27907
|
+
else 0
|
|
27908
|
+
end as bloat_size_bytes,
|
|
27909
|
+
pg_size_pretty(case when relpages > est_pages_ff
|
|
27910
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
27911
|
+
else 0
|
|
27912
|
+
end) as bloat_size_pretty,
|
|
27913
|
+
case when relpages > est_pages_ff
|
|
27914
|
+
then round(100.0 * (relpages - est_pages_ff) / relpages, 2)
|
|
27915
|
+
else 0
|
|
27916
|
+
end as bloat_pct,
|
|
27917
|
+
is_na::text
|
|
27918
|
+
from (
|
|
27919
|
+
select
|
|
27920
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) / (4 + nulldatahdrwidth)::float)), 0) as est_pages,
|
|
27921
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) * fillfactor / (100 * (4 + nulldatahdrwidth)::float))), 0) as est_pages_ff,
|
|
27922
|
+
bs, nspname, tblname, idxname, relpages, fillfactor, is_na, tbloid
|
|
27923
|
+
from (
|
|
27924
|
+
select
|
|
27925
|
+
maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, tbloid,
|
|
27926
|
+
(index_tuple_hdr_bm + maxalign
|
|
27927
|
+
- case when index_tuple_hdr_bm % maxalign = 0 then maxalign else index_tuple_hdr_bm % maxalign end
|
|
27928
|
+
+ nulldatawidth + maxalign
|
|
27929
|
+
- case when nulldatawidth = 0 then 0
|
|
27930
|
+
when nulldatawidth::integer % maxalign = 0 then maxalign
|
|
27931
|
+
else nulldatawidth::integer % maxalign end
|
|
27932
|
+
)::numeric as nulldatahdrwidth,
|
|
27933
|
+
pagehdr, pageopqdata, is_na
|
|
27934
|
+
from (
|
|
27935
|
+
select
|
|
27936
|
+
n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor,
|
|
27937
|
+
current_setting('block_size')::numeric as bs,
|
|
27938
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as maxalign,
|
|
27939
|
+
24 as pagehdr,
|
|
27940
|
+
16 as pageopqdata,
|
|
27941
|
+
case when max(coalesce(s.null_frac, 0)) = 0
|
|
27942
|
+
then 8
|
|
27943
|
+
else 8 + ((32 + 8 - 1) / 8)
|
|
27944
|
+
end as index_tuple_hdr_bm,
|
|
27945
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
|
|
27946
|
+
(max(case when i.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end) > 0)::int as is_na
|
|
27947
|
+
from (
|
|
27948
|
+
select
|
|
27949
|
+
ct.relname as tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey,
|
|
27950
|
+
ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor,
|
|
27951
|
+
coalesce(a1.attnum, a2.attnum) as attnum,
|
|
27952
|
+
coalesce(a1.attname, a2.attname) as attname,
|
|
27953
|
+
coalesce(a1.atttypid, a2.atttypid) as atttypid,
|
|
27954
|
+
case when a1.attnum is null then ic.idxname else ct.relname end as attrelname
|
|
27955
|
+
from (
|
|
27956
|
+
select
|
|
27957
|
+
idxname, reltuples, relpages, tbloid, idxoid, fillfactor,
|
|
27958
|
+
indkey, generate_subscripts(indkey, 1) as attpos
|
|
27959
|
+
from (
|
|
27960
|
+
select
|
|
27961
|
+
ci.relname as idxname, ci.reltuples, ci.relpages, i.indrelid as tbloid,
|
|
27962
|
+
i.indexrelid as idxoid,
|
|
27963
|
+
coalesce(substring(array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) as fillfactor,
|
|
27964
|
+
i.indkey
|
|
27965
|
+
from pg_index as i
|
|
27966
|
+
join pg_class as ci on ci.oid = i.indexrelid
|
|
27967
|
+
join pg_namespace as ns on ns.oid = ci.relnamespace
|
|
27968
|
+
join pg_am as am on am.oid = ci.relam
|
|
27969
|
+
where am.amname = 'btree'
|
|
27970
|
+
and ci.relpages > 0
|
|
27971
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
27972
|
+
) as idx_data
|
|
27973
|
+
) as ic
|
|
27974
|
+
join pg_class as ct on ct.oid = ic.tbloid
|
|
27975
|
+
left join pg_attribute as a1 on a1.attrelid = ic.idxoid and a1.attnum = ic.indkey[ic.attpos]
|
|
27976
|
+
left join pg_attribute as a2 on a2.attrelid = ic.tbloid and a2.attnum = ic.indkey[ic.attpos]
|
|
27977
|
+
) as i
|
|
27978
|
+
join pg_namespace as n on n.oid = i.relnamespace
|
|
27979
|
+
left join pg_stats as s on s.schemaname = n.nspname
|
|
27980
|
+
and s.tablename = i.attrelname and s.attname = i.attname
|
|
27981
|
+
group by n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor
|
|
27982
|
+
) as rows_data_stats
|
|
27983
|
+
) as rows_hdr_pdg_stats
|
|
27984
|
+
) as relation_stats
|
|
27985
|
+
where relpages > 0 and (bs * relpages) >= 1024 * 1024 -- exclude indexes < 1 MiB
|
|
27986
|
+
order by bloat_size_bytes desc
|
|
27987
|
+
limit 100
|
|
27988
|
+
`);
|
|
27989
|
+
bloatData = result.rows;
|
|
27990
|
+
} catch (err) {
|
|
27991
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
27992
|
+
console.log(`[F005] Error estimating index bloat: ${errorMsg}`);
|
|
27993
|
+
bloatError = errorMsg;
|
|
27994
|
+
}
|
|
27995
|
+
report.results[nodeName] = {
|
|
27996
|
+
data: {
|
|
27997
|
+
indexes: bloatData,
|
|
27998
|
+
...bloatError && { error: bloatError }
|
|
27999
|
+
},
|
|
28000
|
+
postgres_version: postgresVersion
|
|
28001
|
+
};
|
|
28002
|
+
return report;
|
|
28003
|
+
}
|
|
27792
28004
|
async function generateG001(client, nodeName) {
|
|
27793
28005
|
const report = createBaseReport("G001", "Memory-related settings", nodeName);
|
|
27794
28006
|
const postgresVersion = await getPostgresVersion(client);
|
|
@@ -27939,6 +28151,8 @@ var REPORT_GENERATORS = {
|
|
|
27939
28151
|
D001: generateD001,
|
|
27940
28152
|
D004: generateD004,
|
|
27941
28153
|
F001: generateF001,
|
|
28154
|
+
F004: generateF004,
|
|
28155
|
+
F005: generateF005,
|
|
27942
28156
|
G001: generateG001,
|
|
27943
28157
|
G003: generateG003,
|
|
27944
28158
|
H001: generateH001,
|
|
@@ -27973,7 +28187,7 @@ async function generateAllReports(client, nodeName = "node-01", onProgress) {
|
|
|
27973
28187
|
}
|
|
27974
28188
|
|
|
27975
28189
|
// lib/checkup-dictionary.ts
|
|
27976
|
-
var dictionaryByCode2 = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry]));
|
|
28190
|
+
var dictionaryByCode2 = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code.toUpperCase(), entry]));
|
|
27977
28191
|
function getCheckupEntry(code) {
|
|
27978
28192
|
return dictionaryByCode2.get(code.toUpperCase()) ?? null;
|
|
27979
28193
|
}
|
|
@@ -32,9 +32,10 @@ export interface CheckupDictionaryEntry {
|
|
|
32
32
|
/**
|
|
33
33
|
* Module-level cache for O(1) lookups by code.
|
|
34
34
|
* Initialized at module load time from embedded data.
|
|
35
|
+
* Keys are normalized to uppercase for case-insensitive lookups.
|
|
35
36
|
*/
|
|
36
37
|
const dictionaryByCode: Map<string, CheckupDictionaryEntry> = new Map(
|
|
37
|
-
CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry])
|
|
38
|
+
CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code.toUpperCase(), entry])
|
|
38
39
|
);
|
|
39
40
|
|
|
40
41
|
/**
|
|
@@ -49,7 +50,7 @@ export function getAllCheckupEntries(): CheckupDictionaryEntry[] {
|
|
|
49
50
|
/**
|
|
50
51
|
* Get a checkup dictionary entry by its code.
|
|
51
52
|
*
|
|
52
|
-
* @param code - The check code (e.g., "A001", "H002")
|
|
53
|
+
* @param code - The check code (e.g., "A001", "H002"). Lookup is case-insensitive.
|
|
53
54
|
* @returns The dictionary entry or null if not found
|
|
54
55
|
*/
|
|
55
56
|
export function getCheckupEntry(code: string): CheckupDictionaryEntry | null {
|
package/lib/checkup.ts
CHANGED
|
@@ -1242,6 +1242,272 @@ async function generateF001(client: Client, nodeName: string): Promise<Report> {
|
|
|
1242
1242
|
return report;
|
|
1243
1243
|
}
|
|
1244
1244
|
|
|
1245
|
+
/**
|
|
1246
|
+
* Generate F004 report - Autovacuum: heap bloat estimate
|
|
1247
|
+
*
|
|
1248
|
+
* Estimates table bloat based on statistical analysis of table pages vs expected pages.
|
|
1249
|
+
* Uses pg_stats for column statistics to estimate row sizes.
|
|
1250
|
+
*/
|
|
1251
|
+
async function generateF004(client: Client, nodeName: string): Promise<Report> {
|
|
1252
|
+
const report = createBaseReport("F004", "Autovacuum: heap bloat estimate", nodeName);
|
|
1253
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1254
|
+
|
|
1255
|
+
interface TableBloatRow {
|
|
1256
|
+
schemaname: string;
|
|
1257
|
+
tblname: string;
|
|
1258
|
+
real_size_bytes: string;
|
|
1259
|
+
real_size_pretty: string;
|
|
1260
|
+
extra_size_bytes: string;
|
|
1261
|
+
extra_size_pretty: string;
|
|
1262
|
+
extra_pct: string;
|
|
1263
|
+
fillfactor: string;
|
|
1264
|
+
bloat_size_bytes: string;
|
|
1265
|
+
bloat_size_pretty: string;
|
|
1266
|
+
bloat_pct: string;
|
|
1267
|
+
is_na: string;
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1270
|
+
let bloatData: TableBloatRow[] = [];
|
|
1271
|
+
let bloatError: string | null = null;
|
|
1272
|
+
|
|
1273
|
+
try {
|
|
1274
|
+
const result = await client.query<TableBloatRow>(`
|
|
1275
|
+
select
|
|
1276
|
+
schemaname,
|
|
1277
|
+
tblname,
|
|
1278
|
+
(bs * tblpages)::bigint as real_size_bytes,
|
|
1279
|
+
pg_size_pretty((bs * tblpages)::bigint) as real_size_pretty,
|
|
1280
|
+
((tblpages - est_tblpages) * bs)::bigint as extra_size_bytes,
|
|
1281
|
+
pg_size_pretty(((tblpages - est_tblpages) * bs)::bigint) as extra_size_pretty,
|
|
1282
|
+
case when tblpages > 0 and tblpages - est_tblpages > 0
|
|
1283
|
+
then round(100.0 * (tblpages - est_tblpages) / tblpages, 2)
|
|
1284
|
+
else 0
|
|
1285
|
+
end as extra_pct,
|
|
1286
|
+
fillfactor,
|
|
1287
|
+
case when tblpages - est_tblpages_ff > 0
|
|
1288
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
1289
|
+
else 0
|
|
1290
|
+
end as bloat_size_bytes,
|
|
1291
|
+
pg_size_pretty(case when tblpages - est_tblpages_ff > 0
|
|
1292
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
1293
|
+
else 0
|
|
1294
|
+
end) as bloat_size_pretty,
|
|
1295
|
+
case when tblpages > 0 and tblpages - est_tblpages_ff > 0
|
|
1296
|
+
then round(100.0 * (tblpages - est_tblpages_ff) / tblpages, 2)
|
|
1297
|
+
else 0
|
|
1298
|
+
end as bloat_pct,
|
|
1299
|
+
is_na::text
|
|
1300
|
+
from (
|
|
1301
|
+
select
|
|
1302
|
+
ceil(reltuples / ((bs - page_hdr) / tpl_size)) + ceil(toasttuples / 4) as est_tblpages,
|
|
1303
|
+
ceil(reltuples / ((bs - page_hdr) * fillfactor / (tpl_size * 100))) + ceil(toasttuples / 4) as est_tblpages_ff,
|
|
1304
|
+
tblpages, fillfactor, bs, schemaname, tblname, is_na
|
|
1305
|
+
from (
|
|
1306
|
+
select
|
|
1307
|
+
(4 + tpl_hdr_size + tpl_data_size + (2 * ma)
|
|
1308
|
+
- case when tpl_hdr_size % ma = 0 then ma else tpl_hdr_size % ma end
|
|
1309
|
+
- case when ceil(tpl_data_size)::int % ma = 0 then ma else ceil(tpl_data_size)::int % ma end
|
|
1310
|
+
) as tpl_size,
|
|
1311
|
+
(heappages + toastpages) as tblpages,
|
|
1312
|
+
reltuples, toasttuples, bs, page_hdr, schemaname, tblname, fillfactor, is_na
|
|
1313
|
+
from (
|
|
1314
|
+
select
|
|
1315
|
+
ns.nspname as schemaname,
|
|
1316
|
+
tbl.relname as tblname,
|
|
1317
|
+
tbl.reltuples,
|
|
1318
|
+
tbl.relpages as heappages,
|
|
1319
|
+
coalesce(toast.relpages, 0) as toastpages,
|
|
1320
|
+
coalesce(toast.reltuples, 0) as toasttuples,
|
|
1321
|
+
coalesce(substring(array_to_string(tbl.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 100) as fillfactor,
|
|
1322
|
+
current_setting('block_size')::numeric as bs,
|
|
1323
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as ma,
|
|
1324
|
+
24 as page_hdr,
|
|
1325
|
+
23 + case when max(coalesce(s.null_frac, 0)) > 0 then (7 + count(s.attname)) / 8 else 0::int end
|
|
1326
|
+
+ case when bool_or(att.attname = 'oid' and att.attnum < 0) then 4 else 0 end as tpl_hdr_size,
|
|
1327
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0)) as tpl_data_size,
|
|
1328
|
+
(bool_or(att.atttypid = 'pg_catalog.name'::regtype)
|
|
1329
|
+
or sum(case when att.attnum > 0 then 1 else 0 end) <> count(s.attname))::int as is_na
|
|
1330
|
+
from pg_attribute as att
|
|
1331
|
+
join pg_class as tbl on att.attrelid = tbl.oid
|
|
1332
|
+
join pg_namespace as ns on ns.oid = tbl.relnamespace
|
|
1333
|
+
left join pg_stats as s on s.schemaname = ns.nspname
|
|
1334
|
+
and s.tablename = tbl.relname and s.attname = att.attname
|
|
1335
|
+
left join pg_class as toast on tbl.reltoastrelid = toast.oid
|
|
1336
|
+
where not att.attisdropped
|
|
1337
|
+
and tbl.relkind in ('r', 'm')
|
|
1338
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
1339
|
+
group by ns.nspname, tbl.relname, tbl.reltuples, tbl.relpages, toast.relpages, toast.reltuples, tbl.reloptions
|
|
1340
|
+
) as s
|
|
1341
|
+
) as s2
|
|
1342
|
+
) as s3
|
|
1343
|
+
where tblpages > 0 and (bs * tblpages) >= 1024 * 1024 -- exclude tables < 1 MiB
|
|
1344
|
+
order by bloat_size_bytes desc
|
|
1345
|
+
limit 100
|
|
1346
|
+
`);
|
|
1347
|
+
bloatData = result.rows;
|
|
1348
|
+
} catch (err) {
|
|
1349
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1350
|
+
console.log(`[F004] Error estimating table bloat: ${errorMsg}`);
|
|
1351
|
+
bloatError = errorMsg;
|
|
1352
|
+
}
|
|
1353
|
+
|
|
1354
|
+
report.results[nodeName] = {
|
|
1355
|
+
data: {
|
|
1356
|
+
tables: bloatData,
|
|
1357
|
+
...(bloatError && { error: bloatError }),
|
|
1358
|
+
},
|
|
1359
|
+
postgres_version: postgresVersion,
|
|
1360
|
+
};
|
|
1361
|
+
|
|
1362
|
+
return report;
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
/**
|
|
1366
|
+
* Generate F005 report - Autovacuum: index bloat estimate
|
|
1367
|
+
*
|
|
1368
|
+
* Estimates B-tree index bloat based on statistical analysis of index pages vs expected pages.
|
|
1369
|
+
*/
|
|
1370
|
+
async function generateF005(client: Client, nodeName: string): Promise<Report> {
|
|
1371
|
+
const report = createBaseReport("F005", "Autovacuum: index bloat estimate", nodeName);
|
|
1372
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1373
|
+
|
|
1374
|
+
interface IndexBloatRow {
|
|
1375
|
+
schemaname: string;
|
|
1376
|
+
tblname: string;
|
|
1377
|
+
idxname: string;
|
|
1378
|
+
real_size_bytes: string;
|
|
1379
|
+
real_size_pretty: string;
|
|
1380
|
+
table_size_bytes: string;
|
|
1381
|
+
table_size_pretty: string;
|
|
1382
|
+
extra_size_bytes: string;
|
|
1383
|
+
extra_size_pretty: string;
|
|
1384
|
+
extra_pct: string;
|
|
1385
|
+
fillfactor: string;
|
|
1386
|
+
bloat_size_bytes: string;
|
|
1387
|
+
bloat_size_pretty: string;
|
|
1388
|
+
bloat_pct: string;
|
|
1389
|
+
is_na: string;
|
|
1390
|
+
}
|
|
1391
|
+
|
|
1392
|
+
let bloatData: IndexBloatRow[] = [];
|
|
1393
|
+
let bloatError: string | null = null;
|
|
1394
|
+
|
|
1395
|
+
try {
|
|
1396
|
+
const result = await client.query<IndexBloatRow>(`
|
|
1397
|
+
select
|
|
1398
|
+
nspname as schemaname,
|
|
1399
|
+
tblname,
|
|
1400
|
+
idxname,
|
|
1401
|
+
(bs * relpages)::bigint as real_size_bytes,
|
|
1402
|
+
pg_size_pretty((bs * relpages)::bigint) as real_size_pretty,
|
|
1403
|
+
pg_relation_size(tbloid)::bigint as table_size_bytes,
|
|
1404
|
+
pg_size_pretty(pg_relation_size(tbloid)) as table_size_pretty,
|
|
1405
|
+
((relpages - est_pages) * bs)::bigint as extra_size_bytes,
|
|
1406
|
+
pg_size_pretty(((relpages - est_pages) * bs)::bigint) as extra_size_pretty,
|
|
1407
|
+
round(100.0 * (relpages - est_pages) / relpages, 2) as extra_pct,
|
|
1408
|
+
fillfactor,
|
|
1409
|
+
case when relpages > est_pages_ff
|
|
1410
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
1411
|
+
else 0
|
|
1412
|
+
end as bloat_size_bytes,
|
|
1413
|
+
pg_size_pretty(case when relpages > est_pages_ff
|
|
1414
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
1415
|
+
else 0
|
|
1416
|
+
end) as bloat_size_pretty,
|
|
1417
|
+
case when relpages > est_pages_ff
|
|
1418
|
+
then round(100.0 * (relpages - est_pages_ff) / relpages, 2)
|
|
1419
|
+
else 0
|
|
1420
|
+
end as bloat_pct,
|
|
1421
|
+
is_na::text
|
|
1422
|
+
from (
|
|
1423
|
+
select
|
|
1424
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) / (4 + nulldatahdrwidth)::float)), 0) as est_pages,
|
|
1425
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) * fillfactor / (100 * (4 + nulldatahdrwidth)::float))), 0) as est_pages_ff,
|
|
1426
|
+
bs, nspname, tblname, idxname, relpages, fillfactor, is_na, tbloid
|
|
1427
|
+
from (
|
|
1428
|
+
select
|
|
1429
|
+
maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, tbloid,
|
|
1430
|
+
(index_tuple_hdr_bm + maxalign
|
|
1431
|
+
- case when index_tuple_hdr_bm % maxalign = 0 then maxalign else index_tuple_hdr_bm % maxalign end
|
|
1432
|
+
+ nulldatawidth + maxalign
|
|
1433
|
+
- case when nulldatawidth = 0 then 0
|
|
1434
|
+
when nulldatawidth::integer % maxalign = 0 then maxalign
|
|
1435
|
+
else nulldatawidth::integer % maxalign end
|
|
1436
|
+
)::numeric as nulldatahdrwidth,
|
|
1437
|
+
pagehdr, pageopqdata, is_na
|
|
1438
|
+
from (
|
|
1439
|
+
select
|
|
1440
|
+
n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor,
|
|
1441
|
+
current_setting('block_size')::numeric as bs,
|
|
1442
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as maxalign,
|
|
1443
|
+
24 as pagehdr,
|
|
1444
|
+
16 as pageopqdata,
|
|
1445
|
+
case when max(coalesce(s.null_frac, 0)) = 0
|
|
1446
|
+
then 8
|
|
1447
|
+
else 8 + ((32 + 8 - 1) / 8)
|
|
1448
|
+
end as index_tuple_hdr_bm,
|
|
1449
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
|
|
1450
|
+
(max(case when i.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end) > 0)::int as is_na
|
|
1451
|
+
from (
|
|
1452
|
+
select
|
|
1453
|
+
ct.relname as tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey,
|
|
1454
|
+
ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor,
|
|
1455
|
+
coalesce(a1.attnum, a2.attnum) as attnum,
|
|
1456
|
+
coalesce(a1.attname, a2.attname) as attname,
|
|
1457
|
+
coalesce(a1.atttypid, a2.atttypid) as atttypid,
|
|
1458
|
+
case when a1.attnum is null then ic.idxname else ct.relname end as attrelname
|
|
1459
|
+
from (
|
|
1460
|
+
select
|
|
1461
|
+
idxname, reltuples, relpages, tbloid, idxoid, fillfactor,
|
|
1462
|
+
indkey, generate_subscripts(indkey, 1) as attpos
|
|
1463
|
+
from (
|
|
1464
|
+
select
|
|
1465
|
+
ci.relname as idxname, ci.reltuples, ci.relpages, i.indrelid as tbloid,
|
|
1466
|
+
i.indexrelid as idxoid,
|
|
1467
|
+
coalesce(substring(array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) as fillfactor,
|
|
1468
|
+
i.indkey
|
|
1469
|
+
from pg_index as i
|
|
1470
|
+
join pg_class as ci on ci.oid = i.indexrelid
|
|
1471
|
+
join pg_namespace as ns on ns.oid = ci.relnamespace
|
|
1472
|
+
join pg_am as am on am.oid = ci.relam
|
|
1473
|
+
where am.amname = 'btree'
|
|
1474
|
+
and ci.relpages > 0
|
|
1475
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
1476
|
+
) as idx_data
|
|
1477
|
+
) as ic
|
|
1478
|
+
join pg_class as ct on ct.oid = ic.tbloid
|
|
1479
|
+
left join pg_attribute as a1 on a1.attrelid = ic.idxoid and a1.attnum = ic.indkey[ic.attpos]
|
|
1480
|
+
left join pg_attribute as a2 on a2.attrelid = ic.tbloid and a2.attnum = ic.indkey[ic.attpos]
|
|
1481
|
+
) as i
|
|
1482
|
+
join pg_namespace as n on n.oid = i.relnamespace
|
|
1483
|
+
left join pg_stats as s on s.schemaname = n.nspname
|
|
1484
|
+
and s.tablename = i.attrelname and s.attname = i.attname
|
|
1485
|
+
group by n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor
|
|
1486
|
+
) as rows_data_stats
|
|
1487
|
+
) as rows_hdr_pdg_stats
|
|
1488
|
+
) as relation_stats
|
|
1489
|
+
where relpages > 0 and (bs * relpages) >= 1024 * 1024 -- exclude indexes < 1 MiB
|
|
1490
|
+
order by bloat_size_bytes desc
|
|
1491
|
+
limit 100
|
|
1492
|
+
`);
|
|
1493
|
+
bloatData = result.rows;
|
|
1494
|
+
} catch (err) {
|
|
1495
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1496
|
+
console.log(`[F005] Error estimating index bloat: ${errorMsg}`);
|
|
1497
|
+
bloatError = errorMsg;
|
|
1498
|
+
}
|
|
1499
|
+
|
|
1500
|
+
report.results[nodeName] = {
|
|
1501
|
+
data: {
|
|
1502
|
+
indexes: bloatData,
|
|
1503
|
+
...(bloatError && { error: bloatError }),
|
|
1504
|
+
},
|
|
1505
|
+
postgres_version: postgresVersion,
|
|
1506
|
+
};
|
|
1507
|
+
|
|
1508
|
+
return report;
|
|
1509
|
+
}
|
|
1510
|
+
|
|
1245
1511
|
/**
|
|
1246
1512
|
* Generate G001 report - Memory-related settings
|
|
1247
1513
|
*/
|
|
@@ -1446,6 +1712,8 @@ export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string
|
|
|
1446
1712
|
D001: generateD001,
|
|
1447
1713
|
D004: generateD004,
|
|
1448
1714
|
F001: generateF001,
|
|
1715
|
+
F004: generateF004,
|
|
1716
|
+
F005: generateF005,
|
|
1449
1717
|
G001: generateG001,
|
|
1450
1718
|
G003: generateG003,
|
|
1451
1719
|
H001: generateH001,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "postgresai",
|
|
3
|
-
"version": "0.14.0-dev.
|
|
3
|
+
"version": "0.14.0-dev.82",
|
|
4
4
|
"description": "postgres_ai CLI",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"private": false,
|
|
@@ -34,7 +34,7 @@
|
|
|
34
34
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
35
35
|
"dev": "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
|
|
36
36
|
"test": "bun run embed-all && bun test",
|
|
37
|
-
"test:fast": "bun run embed-all && bun test",
|
|
37
|
+
"test:fast": "bun run embed-all && bun test --coverage=false",
|
|
38
38
|
"test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
39
39
|
"typecheck": "bun run embed-all && bunx tsc --noEmit"
|
|
40
40
|
},
|
package/test/checkup.test.ts
CHANGED
|
@@ -86,7 +86,7 @@ describe("createBaseReport", () => {
|
|
|
86
86
|
// Tests for CHECK_INFO
|
|
87
87
|
describe("CHECK_INFO and REPORT_GENERATORS", () => {
|
|
88
88
|
// Express-mode checks that have generators
|
|
89
|
-
const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "G001", "G003", "H001", "H002", "H004"];
|
|
89
|
+
const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "F004", "F005", "G001", "G003", "H001", "H002", "H004"];
|
|
90
90
|
|
|
91
91
|
test("CHECK_INFO contains all express-mode checks", () => {
|
|
92
92
|
for (const checkId of expressCheckIds) {
|