postgresai 0.14.0-dev.80 → 0.14.0-dev.82
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/postgres-ai.js +279 -6
- package/lib/checkup-dictionary.ts +3 -2
- package/lib/checkup.ts +345 -0
- package/package.json +2 -2
- package/test/checkup.test.ts +1 -1
package/dist/bin/postgres-ai.js
CHANGED
|
@@ -13064,7 +13064,7 @@ var {
|
|
|
13064
13064
|
// package.json
|
|
13065
13065
|
var package_default = {
|
|
13066
13066
|
name: "postgresai",
|
|
13067
|
-
version: "0.14.0-dev.
|
|
13067
|
+
version: "0.14.0-dev.82",
|
|
13068
13068
|
description: "postgres_ai CLI",
|
|
13069
13069
|
license: "Apache-2.0",
|
|
13070
13070
|
private: false,
|
|
@@ -13098,7 +13098,7 @@ var package_default = {
|
|
|
13098
13098
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
13099
13099
|
dev: "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
|
|
13100
13100
|
test: "bun run embed-all && bun test",
|
|
13101
|
-
"test:fast": "bun run embed-all && bun test",
|
|
13101
|
+
"test:fast": "bun run embed-all && bun test --coverage=false",
|
|
13102
13102
|
"test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
13103
13103
|
typecheck: "bun run embed-all && bunx tsc --noEmit"
|
|
13104
13104
|
},
|
|
@@ -15889,7 +15889,7 @@ var Result = import_lib.default.Result;
|
|
|
15889
15889
|
var TypeOverrides = import_lib.default.TypeOverrides;
|
|
15890
15890
|
var defaults = import_lib.default.defaults;
|
|
15891
15891
|
// package.json
|
|
15892
|
-
var version = "0.14.0-dev.
|
|
15892
|
+
var version = "0.14.0-dev.82";
|
|
15893
15893
|
var package_default2 = {
|
|
15894
15894
|
name: "postgresai",
|
|
15895
15895
|
version,
|
|
@@ -15926,7 +15926,7 @@ var package_default2 = {
|
|
|
15926
15926
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
15927
15927
|
dev: "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
|
|
15928
15928
|
test: "bun run embed-all && bun test",
|
|
15929
|
-
"test:fast": "bun run embed-all && bun test",
|
|
15929
|
+
"test:fast": "bun run embed-all && bun test --coverage=false",
|
|
15930
15930
|
"test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
15931
15931
|
typecheck: "bun run embed-all && bunx tsc --noEmit"
|
|
15932
15932
|
},
|
|
@@ -27103,7 +27103,7 @@ var CHECKUP_DICTIONARY_DATA = [
|
|
|
27103
27103
|
];
|
|
27104
27104
|
|
|
27105
27105
|
// lib/checkup-dictionary.ts
|
|
27106
|
-
var dictionaryByCode = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry]));
|
|
27106
|
+
var dictionaryByCode = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code.toUpperCase(), entry]));
|
|
27107
27107
|
function buildCheckInfoMap() {
|
|
27108
27108
|
const result = {};
|
|
27109
27109
|
for (const entry of CHECKUP_DICTIONARY_DATA) {
|
|
@@ -27789,6 +27789,218 @@ async function generateF001(client, nodeName) {
|
|
|
27789
27789
|
};
|
|
27790
27790
|
return report;
|
|
27791
27791
|
}
|
|
27792
|
+
async function generateF004(client, nodeName) {
|
|
27793
|
+
const report = createBaseReport("F004", "Autovacuum: heap bloat estimate", nodeName);
|
|
27794
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
27795
|
+
let bloatData = [];
|
|
27796
|
+
let bloatError = null;
|
|
27797
|
+
try {
|
|
27798
|
+
const result = await client.query(`
|
|
27799
|
+
select
|
|
27800
|
+
schemaname,
|
|
27801
|
+
tblname,
|
|
27802
|
+
(bs * tblpages)::bigint as real_size_bytes,
|
|
27803
|
+
pg_size_pretty((bs * tblpages)::bigint) as real_size_pretty,
|
|
27804
|
+
((tblpages - est_tblpages) * bs)::bigint as extra_size_bytes,
|
|
27805
|
+
pg_size_pretty(((tblpages - est_tblpages) * bs)::bigint) as extra_size_pretty,
|
|
27806
|
+
case when tblpages > 0 and tblpages - est_tblpages > 0
|
|
27807
|
+
then round(100.0 * (tblpages - est_tblpages) / tblpages, 2)
|
|
27808
|
+
else 0
|
|
27809
|
+
end as extra_pct,
|
|
27810
|
+
fillfactor,
|
|
27811
|
+
case when tblpages - est_tblpages_ff > 0
|
|
27812
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
27813
|
+
else 0
|
|
27814
|
+
end as bloat_size_bytes,
|
|
27815
|
+
pg_size_pretty(case when tblpages - est_tblpages_ff > 0
|
|
27816
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
27817
|
+
else 0
|
|
27818
|
+
end) as bloat_size_pretty,
|
|
27819
|
+
case when tblpages > 0 and tblpages - est_tblpages_ff > 0
|
|
27820
|
+
then round(100.0 * (tblpages - est_tblpages_ff) / tblpages, 2)
|
|
27821
|
+
else 0
|
|
27822
|
+
end as bloat_pct,
|
|
27823
|
+
is_na::text
|
|
27824
|
+
from (
|
|
27825
|
+
select
|
|
27826
|
+
ceil(reltuples / ((bs - page_hdr) / tpl_size)) + ceil(toasttuples / 4) as est_tblpages,
|
|
27827
|
+
ceil(reltuples / ((bs - page_hdr) * fillfactor / (tpl_size * 100))) + ceil(toasttuples / 4) as est_tblpages_ff,
|
|
27828
|
+
tblpages, fillfactor, bs, schemaname, tblname, is_na
|
|
27829
|
+
from (
|
|
27830
|
+
select
|
|
27831
|
+
(4 + tpl_hdr_size + tpl_data_size + (2 * ma)
|
|
27832
|
+
- case when tpl_hdr_size % ma = 0 then ma else tpl_hdr_size % ma end
|
|
27833
|
+
- case when ceil(tpl_data_size)::int % ma = 0 then ma else ceil(tpl_data_size)::int % ma end
|
|
27834
|
+
) as tpl_size,
|
|
27835
|
+
(heappages + toastpages) as tblpages,
|
|
27836
|
+
reltuples, toasttuples, bs, page_hdr, schemaname, tblname, fillfactor, is_na
|
|
27837
|
+
from (
|
|
27838
|
+
select
|
|
27839
|
+
ns.nspname as schemaname,
|
|
27840
|
+
tbl.relname as tblname,
|
|
27841
|
+
tbl.reltuples,
|
|
27842
|
+
tbl.relpages as heappages,
|
|
27843
|
+
coalesce(toast.relpages, 0) as toastpages,
|
|
27844
|
+
coalesce(toast.reltuples, 0) as toasttuples,
|
|
27845
|
+
coalesce(substring(array_to_string(tbl.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 100) as fillfactor,
|
|
27846
|
+
current_setting('block_size')::numeric as bs,
|
|
27847
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as ma,
|
|
27848
|
+
24 as page_hdr,
|
|
27849
|
+
23 + case when max(coalesce(s.null_frac, 0)) > 0 then (7 + count(s.attname)) / 8 else 0::int end
|
|
27850
|
+
+ case when bool_or(att.attname = 'oid' and att.attnum < 0) then 4 else 0 end as tpl_hdr_size,
|
|
27851
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0)) as tpl_data_size,
|
|
27852
|
+
(bool_or(att.atttypid = 'pg_catalog.name'::regtype)
|
|
27853
|
+
or sum(case when att.attnum > 0 then 1 else 0 end) <> count(s.attname))::int as is_na
|
|
27854
|
+
from pg_attribute as att
|
|
27855
|
+
join pg_class as tbl on att.attrelid = tbl.oid
|
|
27856
|
+
join pg_namespace as ns on ns.oid = tbl.relnamespace
|
|
27857
|
+
left join pg_stats as s on s.schemaname = ns.nspname
|
|
27858
|
+
and s.tablename = tbl.relname and s.attname = att.attname
|
|
27859
|
+
left join pg_class as toast on tbl.reltoastrelid = toast.oid
|
|
27860
|
+
where not att.attisdropped
|
|
27861
|
+
and tbl.relkind in ('r', 'm')
|
|
27862
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
27863
|
+
group by ns.nspname, tbl.relname, tbl.reltuples, tbl.relpages, toast.relpages, toast.reltuples, tbl.reloptions
|
|
27864
|
+
) as s
|
|
27865
|
+
) as s2
|
|
27866
|
+
) as s3
|
|
27867
|
+
where tblpages > 0 and (bs * tblpages) >= 1024 * 1024 -- exclude tables < 1 MiB
|
|
27868
|
+
order by bloat_size_bytes desc
|
|
27869
|
+
limit 100
|
|
27870
|
+
`);
|
|
27871
|
+
bloatData = result.rows;
|
|
27872
|
+
} catch (err) {
|
|
27873
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
27874
|
+
console.log(`[F004] Error estimating table bloat: ${errorMsg}`);
|
|
27875
|
+
bloatError = errorMsg;
|
|
27876
|
+
}
|
|
27877
|
+
report.results[nodeName] = {
|
|
27878
|
+
data: {
|
|
27879
|
+
tables: bloatData,
|
|
27880
|
+
...bloatError && { error: bloatError }
|
|
27881
|
+
},
|
|
27882
|
+
postgres_version: postgresVersion
|
|
27883
|
+
};
|
|
27884
|
+
return report;
|
|
27885
|
+
}
|
|
27886
|
+
async function generateF005(client, nodeName) {
|
|
27887
|
+
const report = createBaseReport("F005", "Autovacuum: index bloat estimate", nodeName);
|
|
27888
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
27889
|
+
let bloatData = [];
|
|
27890
|
+
let bloatError = null;
|
|
27891
|
+
try {
|
|
27892
|
+
const result = await client.query(`
|
|
27893
|
+
select
|
|
27894
|
+
nspname as schemaname,
|
|
27895
|
+
tblname,
|
|
27896
|
+
idxname,
|
|
27897
|
+
(bs * relpages)::bigint as real_size_bytes,
|
|
27898
|
+
pg_size_pretty((bs * relpages)::bigint) as real_size_pretty,
|
|
27899
|
+
pg_relation_size(tbloid)::bigint as table_size_bytes,
|
|
27900
|
+
pg_size_pretty(pg_relation_size(tbloid)) as table_size_pretty,
|
|
27901
|
+
((relpages - est_pages) * bs)::bigint as extra_size_bytes,
|
|
27902
|
+
pg_size_pretty(((relpages - est_pages) * bs)::bigint) as extra_size_pretty,
|
|
27903
|
+
round(100.0 * (relpages - est_pages) / relpages, 2) as extra_pct,
|
|
27904
|
+
fillfactor,
|
|
27905
|
+
case when relpages > est_pages_ff
|
|
27906
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
27907
|
+
else 0
|
|
27908
|
+
end as bloat_size_bytes,
|
|
27909
|
+
pg_size_pretty(case when relpages > est_pages_ff
|
|
27910
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
27911
|
+
else 0
|
|
27912
|
+
end) as bloat_size_pretty,
|
|
27913
|
+
case when relpages > est_pages_ff
|
|
27914
|
+
then round(100.0 * (relpages - est_pages_ff) / relpages, 2)
|
|
27915
|
+
else 0
|
|
27916
|
+
end as bloat_pct,
|
|
27917
|
+
is_na::text
|
|
27918
|
+
from (
|
|
27919
|
+
select
|
|
27920
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) / (4 + nulldatahdrwidth)::float)), 0) as est_pages,
|
|
27921
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) * fillfactor / (100 * (4 + nulldatahdrwidth)::float))), 0) as est_pages_ff,
|
|
27922
|
+
bs, nspname, tblname, idxname, relpages, fillfactor, is_na, tbloid
|
|
27923
|
+
from (
|
|
27924
|
+
select
|
|
27925
|
+
maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, tbloid,
|
|
27926
|
+
(index_tuple_hdr_bm + maxalign
|
|
27927
|
+
- case when index_tuple_hdr_bm % maxalign = 0 then maxalign else index_tuple_hdr_bm % maxalign end
|
|
27928
|
+
+ nulldatawidth + maxalign
|
|
27929
|
+
- case when nulldatawidth = 0 then 0
|
|
27930
|
+
when nulldatawidth::integer % maxalign = 0 then maxalign
|
|
27931
|
+
else nulldatawidth::integer % maxalign end
|
|
27932
|
+
)::numeric as nulldatahdrwidth,
|
|
27933
|
+
pagehdr, pageopqdata, is_na
|
|
27934
|
+
from (
|
|
27935
|
+
select
|
|
27936
|
+
n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor,
|
|
27937
|
+
current_setting('block_size')::numeric as bs,
|
|
27938
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as maxalign,
|
|
27939
|
+
24 as pagehdr,
|
|
27940
|
+
16 as pageopqdata,
|
|
27941
|
+
case when max(coalesce(s.null_frac, 0)) = 0
|
|
27942
|
+
then 8
|
|
27943
|
+
else 8 + ((32 + 8 - 1) / 8)
|
|
27944
|
+
end as index_tuple_hdr_bm,
|
|
27945
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
|
|
27946
|
+
(max(case when i.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end) > 0)::int as is_na
|
|
27947
|
+
from (
|
|
27948
|
+
select
|
|
27949
|
+
ct.relname as tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey,
|
|
27950
|
+
ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor,
|
|
27951
|
+
coalesce(a1.attnum, a2.attnum) as attnum,
|
|
27952
|
+
coalesce(a1.attname, a2.attname) as attname,
|
|
27953
|
+
coalesce(a1.atttypid, a2.atttypid) as atttypid,
|
|
27954
|
+
case when a1.attnum is null then ic.idxname else ct.relname end as attrelname
|
|
27955
|
+
from (
|
|
27956
|
+
select
|
|
27957
|
+
idxname, reltuples, relpages, tbloid, idxoid, fillfactor,
|
|
27958
|
+
indkey, generate_subscripts(indkey, 1) as attpos
|
|
27959
|
+
from (
|
|
27960
|
+
select
|
|
27961
|
+
ci.relname as idxname, ci.reltuples, ci.relpages, i.indrelid as tbloid,
|
|
27962
|
+
i.indexrelid as idxoid,
|
|
27963
|
+
coalesce(substring(array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) as fillfactor,
|
|
27964
|
+
i.indkey
|
|
27965
|
+
from pg_index as i
|
|
27966
|
+
join pg_class as ci on ci.oid = i.indexrelid
|
|
27967
|
+
join pg_namespace as ns on ns.oid = ci.relnamespace
|
|
27968
|
+
join pg_am as am on am.oid = ci.relam
|
|
27969
|
+
where am.amname = 'btree'
|
|
27970
|
+
and ci.relpages > 0
|
|
27971
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
27972
|
+
) as idx_data
|
|
27973
|
+
) as ic
|
|
27974
|
+
join pg_class as ct on ct.oid = ic.tbloid
|
|
27975
|
+
left join pg_attribute as a1 on a1.attrelid = ic.idxoid and a1.attnum = ic.indkey[ic.attpos]
|
|
27976
|
+
left join pg_attribute as a2 on a2.attrelid = ic.tbloid and a2.attnum = ic.indkey[ic.attpos]
|
|
27977
|
+
) as i
|
|
27978
|
+
join pg_namespace as n on n.oid = i.relnamespace
|
|
27979
|
+
left join pg_stats as s on s.schemaname = n.nspname
|
|
27980
|
+
and s.tablename = i.attrelname and s.attname = i.attname
|
|
27981
|
+
group by n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor
|
|
27982
|
+
) as rows_data_stats
|
|
27983
|
+
) as rows_hdr_pdg_stats
|
|
27984
|
+
) as relation_stats
|
|
27985
|
+
where relpages > 0 and (bs * relpages) >= 1024 * 1024 -- exclude indexes < 1 MiB
|
|
27986
|
+
order by bloat_size_bytes desc
|
|
27987
|
+
limit 100
|
|
27988
|
+
`);
|
|
27989
|
+
bloatData = result.rows;
|
|
27990
|
+
} catch (err) {
|
|
27991
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
27992
|
+
console.log(`[F005] Error estimating index bloat: ${errorMsg}`);
|
|
27993
|
+
bloatError = errorMsg;
|
|
27994
|
+
}
|
|
27995
|
+
report.results[nodeName] = {
|
|
27996
|
+
data: {
|
|
27997
|
+
indexes: bloatData,
|
|
27998
|
+
...bloatError && { error: bloatError }
|
|
27999
|
+
},
|
|
28000
|
+
postgres_version: postgresVersion
|
|
28001
|
+
};
|
|
28002
|
+
return report;
|
|
28003
|
+
}
|
|
27792
28004
|
async function generateG001(client, nodeName) {
|
|
27793
28005
|
const report = createBaseReport("G001", "Memory-related settings", nodeName);
|
|
27794
28006
|
const postgresVersion = await getPostgresVersion(client);
|
|
@@ -27872,6 +28084,64 @@ async function generateG001(client, nodeName) {
|
|
|
27872
28084
|
};
|
|
27873
28085
|
return report;
|
|
27874
28086
|
}
|
|
28087
|
+
async function generateG003(client, nodeName) {
|
|
28088
|
+
const report = createBaseReport("G003", "Timeouts, locks, deadlocks", nodeName);
|
|
28089
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
28090
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
28091
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
28092
|
+
const lockTimeoutSettingNames = [
|
|
28093
|
+
"lock_timeout",
|
|
28094
|
+
"statement_timeout",
|
|
28095
|
+
"idle_in_transaction_session_timeout",
|
|
28096
|
+
"idle_session_timeout",
|
|
28097
|
+
"deadlock_timeout",
|
|
28098
|
+
"max_locks_per_transaction",
|
|
28099
|
+
"max_pred_locks_per_transaction",
|
|
28100
|
+
"max_pred_locks_per_relation",
|
|
28101
|
+
"max_pred_locks_per_page",
|
|
28102
|
+
"log_lock_waits",
|
|
28103
|
+
"transaction_timeout"
|
|
28104
|
+
];
|
|
28105
|
+
const lockSettings = {};
|
|
28106
|
+
for (const name of lockTimeoutSettingNames) {
|
|
28107
|
+
if (allSettings[name]) {
|
|
28108
|
+
lockSettings[name] = allSettings[name];
|
|
28109
|
+
}
|
|
28110
|
+
}
|
|
28111
|
+
let deadlockStats = null;
|
|
28112
|
+
let deadlockError = null;
|
|
28113
|
+
try {
|
|
28114
|
+
const statsResult = await client.query(`
|
|
28115
|
+
select
|
|
28116
|
+
coalesce(sum(deadlocks), 0)::bigint as deadlocks,
|
|
28117
|
+
coalesce(sum(conflicts), 0)::bigint as conflicts,
|
|
28118
|
+
min(stats_reset)::text as stats_reset
|
|
28119
|
+
from pg_stat_database
|
|
28120
|
+
where datname = current_database()
|
|
28121
|
+
`);
|
|
28122
|
+
if (statsResult.rows.length > 0) {
|
|
28123
|
+
const row = statsResult.rows[0];
|
|
28124
|
+
deadlockStats = {
|
|
28125
|
+
deadlocks: parseInt(row.deadlocks, 10),
|
|
28126
|
+
conflicts: parseInt(row.conflicts, 10),
|
|
28127
|
+
stats_reset: row.stats_reset || null
|
|
28128
|
+
};
|
|
28129
|
+
}
|
|
28130
|
+
} catch (err) {
|
|
28131
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
28132
|
+
console.log(`[G003] Error querying deadlock stats: ${errorMsg}`);
|
|
28133
|
+
deadlockError = errorMsg;
|
|
28134
|
+
}
|
|
28135
|
+
report.results[nodeName] = {
|
|
28136
|
+
data: {
|
|
28137
|
+
settings: lockSettings,
|
|
28138
|
+
deadlock_stats: deadlockStats,
|
|
28139
|
+
...deadlockError && { deadlock_stats_error: deadlockError }
|
|
28140
|
+
},
|
|
28141
|
+
postgres_version: postgresVersion
|
|
28142
|
+
};
|
|
28143
|
+
return report;
|
|
28144
|
+
}
|
|
27875
28145
|
var REPORT_GENERATORS = {
|
|
27876
28146
|
A002: generateA002,
|
|
27877
28147
|
A003: generateA003,
|
|
@@ -27881,7 +28151,10 @@ var REPORT_GENERATORS = {
|
|
|
27881
28151
|
D001: generateD001,
|
|
27882
28152
|
D004: generateD004,
|
|
27883
28153
|
F001: generateF001,
|
|
28154
|
+
F004: generateF004,
|
|
28155
|
+
F005: generateF005,
|
|
27884
28156
|
G001: generateG001,
|
|
28157
|
+
G003: generateG003,
|
|
27885
28158
|
H001: generateH001,
|
|
27886
28159
|
H002: generateH002,
|
|
27887
28160
|
H004: generateH004
|
|
@@ -27914,7 +28187,7 @@ async function generateAllReports(client, nodeName = "node-01", onProgress) {
|
|
|
27914
28187
|
}
|
|
27915
28188
|
|
|
27916
28189
|
// lib/checkup-dictionary.ts
|
|
27917
|
-
var dictionaryByCode2 = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry]));
|
|
28190
|
+
var dictionaryByCode2 = new Map(CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code.toUpperCase(), entry]));
|
|
27918
28191
|
function getCheckupEntry(code) {
|
|
27919
28192
|
return dictionaryByCode2.get(code.toUpperCase()) ?? null;
|
|
27920
28193
|
}
|
|
@@ -32,9 +32,10 @@ export interface CheckupDictionaryEntry {
|
|
|
32
32
|
/**
|
|
33
33
|
* Module-level cache for O(1) lookups by code.
|
|
34
34
|
* Initialized at module load time from embedded data.
|
|
35
|
+
* Keys are normalized to uppercase for case-insensitive lookups.
|
|
35
36
|
*/
|
|
36
37
|
const dictionaryByCode: Map<string, CheckupDictionaryEntry> = new Map(
|
|
37
|
-
CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code, entry])
|
|
38
|
+
CHECKUP_DICTIONARY_DATA.map((entry) => [entry.code.toUpperCase(), entry])
|
|
38
39
|
);
|
|
39
40
|
|
|
40
41
|
/**
|
|
@@ -49,7 +50,7 @@ export function getAllCheckupEntries(): CheckupDictionaryEntry[] {
|
|
|
49
50
|
/**
|
|
50
51
|
* Get a checkup dictionary entry by its code.
|
|
51
52
|
*
|
|
52
|
-
* @param code - The check code (e.g., "A001", "H002")
|
|
53
|
+
* @param code - The check code (e.g., "A001", "H002"). Lookup is case-insensitive.
|
|
53
54
|
* @returns The dictionary entry or null if not found
|
|
54
55
|
*/
|
|
55
56
|
export function getCheckupEntry(code: string): CheckupDictionaryEntry | null {
|
package/lib/checkup.ts
CHANGED
|
@@ -1242,6 +1242,272 @@ async function generateF001(client: Client, nodeName: string): Promise<Report> {
|
|
|
1242
1242
|
return report;
|
|
1243
1243
|
}
|
|
1244
1244
|
|
|
1245
|
+
/**
|
|
1246
|
+
* Generate F004 report - Autovacuum: heap bloat estimate
|
|
1247
|
+
*
|
|
1248
|
+
* Estimates table bloat based on statistical analysis of table pages vs expected pages.
|
|
1249
|
+
* Uses pg_stats for column statistics to estimate row sizes.
|
|
1250
|
+
*/
|
|
1251
|
+
async function generateF004(client: Client, nodeName: string): Promise<Report> {
|
|
1252
|
+
const report = createBaseReport("F004", "Autovacuum: heap bloat estimate", nodeName);
|
|
1253
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1254
|
+
|
|
1255
|
+
interface TableBloatRow {
|
|
1256
|
+
schemaname: string;
|
|
1257
|
+
tblname: string;
|
|
1258
|
+
real_size_bytes: string;
|
|
1259
|
+
real_size_pretty: string;
|
|
1260
|
+
extra_size_bytes: string;
|
|
1261
|
+
extra_size_pretty: string;
|
|
1262
|
+
extra_pct: string;
|
|
1263
|
+
fillfactor: string;
|
|
1264
|
+
bloat_size_bytes: string;
|
|
1265
|
+
bloat_size_pretty: string;
|
|
1266
|
+
bloat_pct: string;
|
|
1267
|
+
is_na: string;
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1270
|
+
let bloatData: TableBloatRow[] = [];
|
|
1271
|
+
let bloatError: string | null = null;
|
|
1272
|
+
|
|
1273
|
+
try {
|
|
1274
|
+
const result = await client.query<TableBloatRow>(`
|
|
1275
|
+
select
|
|
1276
|
+
schemaname,
|
|
1277
|
+
tblname,
|
|
1278
|
+
(bs * tblpages)::bigint as real_size_bytes,
|
|
1279
|
+
pg_size_pretty((bs * tblpages)::bigint) as real_size_pretty,
|
|
1280
|
+
((tblpages - est_tblpages) * bs)::bigint as extra_size_bytes,
|
|
1281
|
+
pg_size_pretty(((tblpages - est_tblpages) * bs)::bigint) as extra_size_pretty,
|
|
1282
|
+
case when tblpages > 0 and tblpages - est_tblpages > 0
|
|
1283
|
+
then round(100.0 * (tblpages - est_tblpages) / tblpages, 2)
|
|
1284
|
+
else 0
|
|
1285
|
+
end as extra_pct,
|
|
1286
|
+
fillfactor,
|
|
1287
|
+
case when tblpages - est_tblpages_ff > 0
|
|
1288
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
1289
|
+
else 0
|
|
1290
|
+
end as bloat_size_bytes,
|
|
1291
|
+
pg_size_pretty(case when tblpages - est_tblpages_ff > 0
|
|
1292
|
+
then ((tblpages - est_tblpages_ff) * bs)::bigint
|
|
1293
|
+
else 0
|
|
1294
|
+
end) as bloat_size_pretty,
|
|
1295
|
+
case when tblpages > 0 and tblpages - est_tblpages_ff > 0
|
|
1296
|
+
then round(100.0 * (tblpages - est_tblpages_ff) / tblpages, 2)
|
|
1297
|
+
else 0
|
|
1298
|
+
end as bloat_pct,
|
|
1299
|
+
is_na::text
|
|
1300
|
+
from (
|
|
1301
|
+
select
|
|
1302
|
+
ceil(reltuples / ((bs - page_hdr) / tpl_size)) + ceil(toasttuples / 4) as est_tblpages,
|
|
1303
|
+
ceil(reltuples / ((bs - page_hdr) * fillfactor / (tpl_size * 100))) + ceil(toasttuples / 4) as est_tblpages_ff,
|
|
1304
|
+
tblpages, fillfactor, bs, schemaname, tblname, is_na
|
|
1305
|
+
from (
|
|
1306
|
+
select
|
|
1307
|
+
(4 + tpl_hdr_size + tpl_data_size + (2 * ma)
|
|
1308
|
+
- case when tpl_hdr_size % ma = 0 then ma else tpl_hdr_size % ma end
|
|
1309
|
+
- case when ceil(tpl_data_size)::int % ma = 0 then ma else ceil(tpl_data_size)::int % ma end
|
|
1310
|
+
) as tpl_size,
|
|
1311
|
+
(heappages + toastpages) as tblpages,
|
|
1312
|
+
reltuples, toasttuples, bs, page_hdr, schemaname, tblname, fillfactor, is_na
|
|
1313
|
+
from (
|
|
1314
|
+
select
|
|
1315
|
+
ns.nspname as schemaname,
|
|
1316
|
+
tbl.relname as tblname,
|
|
1317
|
+
tbl.reltuples,
|
|
1318
|
+
tbl.relpages as heappages,
|
|
1319
|
+
coalesce(toast.relpages, 0) as toastpages,
|
|
1320
|
+
coalesce(toast.reltuples, 0) as toasttuples,
|
|
1321
|
+
coalesce(substring(array_to_string(tbl.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 100) as fillfactor,
|
|
1322
|
+
current_setting('block_size')::numeric as bs,
|
|
1323
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as ma,
|
|
1324
|
+
24 as page_hdr,
|
|
1325
|
+
23 + case when max(coalesce(s.null_frac, 0)) > 0 then (7 + count(s.attname)) / 8 else 0::int end
|
|
1326
|
+
+ case when bool_or(att.attname = 'oid' and att.attnum < 0) then 4 else 0 end as tpl_hdr_size,
|
|
1327
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0)) as tpl_data_size,
|
|
1328
|
+
(bool_or(att.atttypid = 'pg_catalog.name'::regtype)
|
|
1329
|
+
or sum(case when att.attnum > 0 then 1 else 0 end) <> count(s.attname))::int as is_na
|
|
1330
|
+
from pg_attribute as att
|
|
1331
|
+
join pg_class as tbl on att.attrelid = tbl.oid
|
|
1332
|
+
join pg_namespace as ns on ns.oid = tbl.relnamespace
|
|
1333
|
+
left join pg_stats as s on s.schemaname = ns.nspname
|
|
1334
|
+
and s.tablename = tbl.relname and s.attname = att.attname
|
|
1335
|
+
left join pg_class as toast on tbl.reltoastrelid = toast.oid
|
|
1336
|
+
where not att.attisdropped
|
|
1337
|
+
and tbl.relkind in ('r', 'm')
|
|
1338
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
1339
|
+
group by ns.nspname, tbl.relname, tbl.reltuples, tbl.relpages, toast.relpages, toast.reltuples, tbl.reloptions
|
|
1340
|
+
) as s
|
|
1341
|
+
) as s2
|
|
1342
|
+
) as s3
|
|
1343
|
+
where tblpages > 0 and (bs * tblpages) >= 1024 * 1024 -- exclude tables < 1 MiB
|
|
1344
|
+
order by bloat_size_bytes desc
|
|
1345
|
+
limit 100
|
|
1346
|
+
`);
|
|
1347
|
+
bloatData = result.rows;
|
|
1348
|
+
} catch (err) {
|
|
1349
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1350
|
+
console.log(`[F004] Error estimating table bloat: ${errorMsg}`);
|
|
1351
|
+
bloatError = errorMsg;
|
|
1352
|
+
}
|
|
1353
|
+
|
|
1354
|
+
report.results[nodeName] = {
|
|
1355
|
+
data: {
|
|
1356
|
+
tables: bloatData,
|
|
1357
|
+
...(bloatError && { error: bloatError }),
|
|
1358
|
+
},
|
|
1359
|
+
postgres_version: postgresVersion,
|
|
1360
|
+
};
|
|
1361
|
+
|
|
1362
|
+
return report;
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
/**
|
|
1366
|
+
* Generate F005 report - Autovacuum: index bloat estimate
|
|
1367
|
+
*
|
|
1368
|
+
* Estimates B-tree index bloat based on statistical analysis of index pages vs expected pages.
|
|
1369
|
+
*/
|
|
1370
|
+
async function generateF005(client: Client, nodeName: string): Promise<Report> {
|
|
1371
|
+
const report = createBaseReport("F005", "Autovacuum: index bloat estimate", nodeName);
|
|
1372
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1373
|
+
|
|
1374
|
+
interface IndexBloatRow {
|
|
1375
|
+
schemaname: string;
|
|
1376
|
+
tblname: string;
|
|
1377
|
+
idxname: string;
|
|
1378
|
+
real_size_bytes: string;
|
|
1379
|
+
real_size_pretty: string;
|
|
1380
|
+
table_size_bytes: string;
|
|
1381
|
+
table_size_pretty: string;
|
|
1382
|
+
extra_size_bytes: string;
|
|
1383
|
+
extra_size_pretty: string;
|
|
1384
|
+
extra_pct: string;
|
|
1385
|
+
fillfactor: string;
|
|
1386
|
+
bloat_size_bytes: string;
|
|
1387
|
+
bloat_size_pretty: string;
|
|
1388
|
+
bloat_pct: string;
|
|
1389
|
+
is_na: string;
|
|
1390
|
+
}
|
|
1391
|
+
|
|
1392
|
+
let bloatData: IndexBloatRow[] = [];
|
|
1393
|
+
let bloatError: string | null = null;
|
|
1394
|
+
|
|
1395
|
+
try {
|
|
1396
|
+
const result = await client.query<IndexBloatRow>(`
|
|
1397
|
+
select
|
|
1398
|
+
nspname as schemaname,
|
|
1399
|
+
tblname,
|
|
1400
|
+
idxname,
|
|
1401
|
+
(bs * relpages)::bigint as real_size_bytes,
|
|
1402
|
+
pg_size_pretty((bs * relpages)::bigint) as real_size_pretty,
|
|
1403
|
+
pg_relation_size(tbloid)::bigint as table_size_bytes,
|
|
1404
|
+
pg_size_pretty(pg_relation_size(tbloid)) as table_size_pretty,
|
|
1405
|
+
((relpages - est_pages) * bs)::bigint as extra_size_bytes,
|
|
1406
|
+
pg_size_pretty(((relpages - est_pages) * bs)::bigint) as extra_size_pretty,
|
|
1407
|
+
round(100.0 * (relpages - est_pages) / relpages, 2) as extra_pct,
|
|
1408
|
+
fillfactor,
|
|
1409
|
+
case when relpages > est_pages_ff
|
|
1410
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
1411
|
+
else 0
|
|
1412
|
+
end as bloat_size_bytes,
|
|
1413
|
+
pg_size_pretty(case when relpages > est_pages_ff
|
|
1414
|
+
then ((relpages - est_pages_ff) * bs)::bigint
|
|
1415
|
+
else 0
|
|
1416
|
+
end) as bloat_size_pretty,
|
|
1417
|
+
case when relpages > est_pages_ff
|
|
1418
|
+
then round(100.0 * (relpages - est_pages_ff) / relpages, 2)
|
|
1419
|
+
else 0
|
|
1420
|
+
end as bloat_pct,
|
|
1421
|
+
is_na::text
|
|
1422
|
+
from (
|
|
1423
|
+
select
|
|
1424
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) / (4 + nulldatahdrwidth)::float)), 0) as est_pages,
|
|
1425
|
+
coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) * fillfactor / (100 * (4 + nulldatahdrwidth)::float))), 0) as est_pages_ff,
|
|
1426
|
+
bs, nspname, tblname, idxname, relpages, fillfactor, is_na, tbloid
|
|
1427
|
+
from (
|
|
1428
|
+
select
|
|
1429
|
+
maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, tbloid,
|
|
1430
|
+
(index_tuple_hdr_bm + maxalign
|
|
1431
|
+
- case when index_tuple_hdr_bm % maxalign = 0 then maxalign else index_tuple_hdr_bm % maxalign end
|
|
1432
|
+
+ nulldatawidth + maxalign
|
|
1433
|
+
- case when nulldatawidth = 0 then 0
|
|
1434
|
+
when nulldatawidth::integer % maxalign = 0 then maxalign
|
|
1435
|
+
else nulldatawidth::integer % maxalign end
|
|
1436
|
+
)::numeric as nulldatahdrwidth,
|
|
1437
|
+
pagehdr, pageopqdata, is_na
|
|
1438
|
+
from (
|
|
1439
|
+
select
|
|
1440
|
+
n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor,
|
|
1441
|
+
current_setting('block_size')::numeric as bs,
|
|
1442
|
+
case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as maxalign,
|
|
1443
|
+
24 as pagehdr,
|
|
1444
|
+
16 as pageopqdata,
|
|
1445
|
+
case when max(coalesce(s.null_frac, 0)) = 0
|
|
1446
|
+
then 8
|
|
1447
|
+
else 8 + ((32 + 8 - 1) / 8)
|
|
1448
|
+
end as index_tuple_hdr_bm,
|
|
1449
|
+
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
|
|
1450
|
+
(max(case when i.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end) > 0)::int as is_na
|
|
1451
|
+
from (
|
|
1452
|
+
select
|
|
1453
|
+
ct.relname as tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey,
|
|
1454
|
+
ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor,
|
|
1455
|
+
coalesce(a1.attnum, a2.attnum) as attnum,
|
|
1456
|
+
coalesce(a1.attname, a2.attname) as attname,
|
|
1457
|
+
coalesce(a1.atttypid, a2.atttypid) as atttypid,
|
|
1458
|
+
case when a1.attnum is null then ic.idxname else ct.relname end as attrelname
|
|
1459
|
+
from (
|
|
1460
|
+
select
|
|
1461
|
+
idxname, reltuples, relpages, tbloid, idxoid, fillfactor,
|
|
1462
|
+
indkey, generate_subscripts(indkey, 1) as attpos
|
|
1463
|
+
from (
|
|
1464
|
+
select
|
|
1465
|
+
ci.relname as idxname, ci.reltuples, ci.relpages, i.indrelid as tbloid,
|
|
1466
|
+
i.indexrelid as idxoid,
|
|
1467
|
+
coalesce(substring(array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) as fillfactor,
|
|
1468
|
+
i.indkey
|
|
1469
|
+
from pg_index as i
|
|
1470
|
+
join pg_class as ci on ci.oid = i.indexrelid
|
|
1471
|
+
join pg_namespace as ns on ns.oid = ci.relnamespace
|
|
1472
|
+
join pg_am as am on am.oid = ci.relam
|
|
1473
|
+
where am.amname = 'btree'
|
|
1474
|
+
and ci.relpages > 0
|
|
1475
|
+
and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
|
|
1476
|
+
) as idx_data
|
|
1477
|
+
) as ic
|
|
1478
|
+
join pg_class as ct on ct.oid = ic.tbloid
|
|
1479
|
+
left join pg_attribute as a1 on a1.attrelid = ic.idxoid and a1.attnum = ic.indkey[ic.attpos]
|
|
1480
|
+
left join pg_attribute as a2 on a2.attrelid = ic.tbloid and a2.attnum = ic.indkey[ic.attpos]
|
|
1481
|
+
) as i
|
|
1482
|
+
join pg_namespace as n on n.oid = i.relnamespace
|
|
1483
|
+
left join pg_stats as s on s.schemaname = n.nspname
|
|
1484
|
+
and s.tablename = i.attrelname and s.attname = i.attname
|
|
1485
|
+
group by n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor
|
|
1486
|
+
) as rows_data_stats
|
|
1487
|
+
) as rows_hdr_pdg_stats
|
|
1488
|
+
) as relation_stats
|
|
1489
|
+
where relpages > 0 and (bs * relpages) >= 1024 * 1024 -- exclude indexes < 1 MiB
|
|
1490
|
+
order by bloat_size_bytes desc
|
|
1491
|
+
limit 100
|
|
1492
|
+
`);
|
|
1493
|
+
bloatData = result.rows;
|
|
1494
|
+
} catch (err) {
|
|
1495
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1496
|
+
console.log(`[F005] Error estimating index bloat: ${errorMsg}`);
|
|
1497
|
+
bloatError = errorMsg;
|
|
1498
|
+
}
|
|
1499
|
+
|
|
1500
|
+
report.results[nodeName] = {
|
|
1501
|
+
data: {
|
|
1502
|
+
indexes: bloatData,
|
|
1503
|
+
...(bloatError && { error: bloatError }),
|
|
1504
|
+
},
|
|
1505
|
+
postgres_version: postgresVersion,
|
|
1506
|
+
};
|
|
1507
|
+
|
|
1508
|
+
return report;
|
|
1509
|
+
}
|
|
1510
|
+
|
|
1245
1511
|
/**
|
|
1246
1512
|
* Generate G001 report - Memory-related settings
|
|
1247
1513
|
*/
|
|
@@ -1358,6 +1624,82 @@ async function generateG001(client: Client, nodeName: string): Promise<Report> {
|
|
|
1358
1624
|
return report;
|
|
1359
1625
|
}
|
|
1360
1626
|
|
|
1627
|
+
/**
|
|
1628
|
+
* Generate G003 report - Timeouts, locks, deadlocks
|
|
1629
|
+
*
|
|
1630
|
+
* Collects timeout and lock-related settings, plus deadlock statistics.
|
|
1631
|
+
*/
|
|
1632
|
+
async function generateG003(client: Client, nodeName: string): Promise<Report> {
|
|
1633
|
+
const report = createBaseReport("G003", "Timeouts, locks, deadlocks", nodeName);
|
|
1634
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1635
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
1636
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
1637
|
+
|
|
1638
|
+
// Timeout and lock-related setting names
|
|
1639
|
+
const lockTimeoutSettingNames = [
|
|
1640
|
+
"lock_timeout",
|
|
1641
|
+
"statement_timeout",
|
|
1642
|
+
"idle_in_transaction_session_timeout",
|
|
1643
|
+
"idle_session_timeout",
|
|
1644
|
+
"deadlock_timeout",
|
|
1645
|
+
"max_locks_per_transaction",
|
|
1646
|
+
"max_pred_locks_per_transaction",
|
|
1647
|
+
"max_pred_locks_per_relation",
|
|
1648
|
+
"max_pred_locks_per_page",
|
|
1649
|
+
"log_lock_waits",
|
|
1650
|
+
"transaction_timeout",
|
|
1651
|
+
];
|
|
1652
|
+
|
|
1653
|
+
const lockSettings: Record<string, SettingInfo> = {};
|
|
1654
|
+
for (const name of lockTimeoutSettingNames) {
|
|
1655
|
+
if (allSettings[name]) {
|
|
1656
|
+
lockSettings[name] = allSettings[name];
|
|
1657
|
+
}
|
|
1658
|
+
}
|
|
1659
|
+
|
|
1660
|
+
// Get deadlock statistics from pg_stat_database
|
|
1661
|
+
let deadlockStats: {
|
|
1662
|
+
deadlocks: number;
|
|
1663
|
+
conflicts: number;
|
|
1664
|
+
stats_reset: string | null;
|
|
1665
|
+
} | null = null;
|
|
1666
|
+
let deadlockError: string | null = null;
|
|
1667
|
+
|
|
1668
|
+
try {
|
|
1669
|
+
const statsResult = await client.query(`
|
|
1670
|
+
select
|
|
1671
|
+
coalesce(sum(deadlocks), 0)::bigint as deadlocks,
|
|
1672
|
+
coalesce(sum(conflicts), 0)::bigint as conflicts,
|
|
1673
|
+
min(stats_reset)::text as stats_reset
|
|
1674
|
+
from pg_stat_database
|
|
1675
|
+
where datname = current_database()
|
|
1676
|
+
`);
|
|
1677
|
+
if (statsResult.rows.length > 0) {
|
|
1678
|
+
const row = statsResult.rows[0];
|
|
1679
|
+
deadlockStats = {
|
|
1680
|
+
deadlocks: parseInt(row.deadlocks, 10),
|
|
1681
|
+
conflicts: parseInt(row.conflicts, 10),
|
|
1682
|
+
stats_reset: row.stats_reset || null,
|
|
1683
|
+
};
|
|
1684
|
+
}
|
|
1685
|
+
} catch (err) {
|
|
1686
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1687
|
+
console.log(`[G003] Error querying deadlock stats: ${errorMsg}`);
|
|
1688
|
+
deadlockError = errorMsg;
|
|
1689
|
+
}
|
|
1690
|
+
|
|
1691
|
+
report.results[nodeName] = {
|
|
1692
|
+
data: {
|
|
1693
|
+
settings: lockSettings,
|
|
1694
|
+
deadlock_stats: deadlockStats,
|
|
1695
|
+
...(deadlockError && { deadlock_stats_error: deadlockError }),
|
|
1696
|
+
},
|
|
1697
|
+
postgres_version: postgresVersion,
|
|
1698
|
+
};
|
|
1699
|
+
|
|
1700
|
+
return report;
|
|
1701
|
+
}
|
|
1702
|
+
|
|
1361
1703
|
/**
|
|
1362
1704
|
* Available report generators
|
|
1363
1705
|
*/
|
|
@@ -1370,7 +1712,10 @@ export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string
|
|
|
1370
1712
|
D001: generateD001,
|
|
1371
1713
|
D004: generateD004,
|
|
1372
1714
|
F001: generateF001,
|
|
1715
|
+
F004: generateF004,
|
|
1716
|
+
F005: generateF005,
|
|
1373
1717
|
G001: generateG001,
|
|
1718
|
+
G003: generateG003,
|
|
1374
1719
|
H001: generateH001,
|
|
1375
1720
|
H002: generateH002,
|
|
1376
1721
|
H004: generateH004,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "postgresai",
|
|
3
|
-
"version": "0.14.0-dev.
|
|
3
|
+
"version": "0.14.0-dev.82",
|
|
4
4
|
"description": "postgres_ai CLI",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"private": false,
|
|
@@ -34,7 +34,7 @@
|
|
|
34
34
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
35
35
|
"dev": "bun run embed-all && bun --watch ./bin/postgres-ai.ts",
|
|
36
36
|
"test": "bun run embed-all && bun test",
|
|
37
|
-
"test:fast": "bun run embed-all && bun test",
|
|
37
|
+
"test:fast": "bun run embed-all && bun test --coverage=false",
|
|
38
38
|
"test:coverage": "bun run embed-all && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
39
39
|
"typecheck": "bun run embed-all && bunx tsc --noEmit"
|
|
40
40
|
},
|
package/test/checkup.test.ts
CHANGED
|
@@ -86,7 +86,7 @@ describe("createBaseReport", () => {
|
|
|
86
86
|
// Tests for CHECK_INFO
|
|
87
87
|
describe("CHECK_INFO and REPORT_GENERATORS", () => {
|
|
88
88
|
// Express-mode checks that have generators
|
|
89
|
-
const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "G001", "H001", "H002", "H004"];
|
|
89
|
+
const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "F004", "F005", "G001", "G003", "H001", "H002", "H004"];
|
|
90
90
|
|
|
91
91
|
test("CHECK_INFO contains all express-mode checks", () => {
|
|
92
92
|
for (const checkId of expressCheckIds) {
|