postgresai 0.14.0-dev.53 → 0.14.0-dev.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -35
- package/bin/postgres-ai.ts +436 -4
- package/bun.lock +3 -1
- package/bunfig.toml +11 -0
- package/dist/bin/postgres-ai.js +2184 -218
- package/lib/auth-server.ts +52 -5
- package/lib/checkup-api.ts +386 -0
- package/lib/checkup.ts +1327 -0
- package/lib/config.ts +3 -0
- package/lib/issues.ts +5 -41
- package/lib/metrics-embedded.ts +79 -0
- package/lib/metrics-loader.ts +127 -0
- package/lib/util.ts +61 -0
- package/package.json +12 -6
- package/packages/postgres-ai/README.md +26 -0
- package/packages/postgres-ai/bin/postgres-ai.js +27 -0
- package/packages/postgres-ai/package.json +27 -0
- package/scripts/embed-metrics.ts +154 -0
- package/test/checkup.integration.test.ts +273 -0
- package/test/checkup.test.ts +890 -0
- package/test/init.integration.test.ts +36 -33
- package/test/schema-validation.test.ts +81 -0
- package/test/test-utils.ts +122 -0
- package/dist/sql/01.role.sql +0 -16
- package/dist/sql/02.permissions.sql +0 -37
- package/dist/sql/03.optional_rds.sql +0 -6
- package/dist/sql/04.optional_self_managed.sql +0 -8
- package/dist/sql/05.helpers.sql +0 -415
package/lib/checkup.ts
ADDED
|
@@ -0,0 +1,1327 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Express Checkup Module
|
|
3
|
+
* ======================
|
|
4
|
+
* Generates JSON health check reports directly from PostgreSQL without Prometheus.
|
|
5
|
+
*
|
|
6
|
+
* ARCHITECTURAL DECISIONS
|
|
7
|
+
* -----------------------
|
|
8
|
+
*
|
|
9
|
+
* 1. SINGLE SOURCE OF TRUTH FOR SQL QUERIES
|
|
10
|
+
* Complex metrics (index health, settings, db_stats) are loaded from
|
|
11
|
+
* config/pgwatch-prometheus/metrics.yml via getMetricSql() from metrics-loader.ts.
|
|
12
|
+
*
|
|
13
|
+
* Simple queries (version, database list, connection states, uptime) use
|
|
14
|
+
* inline SQL as they're trivial and CLI-specific.
|
|
15
|
+
*
|
|
16
|
+
* 2. JSON SCHEMA COMPLIANCE
|
|
17
|
+
* All generated reports MUST comply with JSON schemas in reporter/schemas/.
|
|
18
|
+
* These schemas define the expected format for both:
|
|
19
|
+
* - Full-fledged monitoring reporter output
|
|
20
|
+
* - Express checkup output
|
|
21
|
+
*
|
|
22
|
+
* Before adding or modifying a report, verify the corresponding schema exists
|
|
23
|
+
* and ensure the output matches. Run schema validation tests to confirm.
|
|
24
|
+
*
|
|
25
|
+
* 3. ERROR HANDLING STRATEGY
|
|
26
|
+
* Functions follow two patterns based on criticality:
|
|
27
|
+
*
|
|
28
|
+
* PROPAGATING (throws on error):
|
|
29
|
+
* - Core data functions: getPostgresVersion, getSettings, getAlteredSettings,
|
|
30
|
+
* getDatabaseSizes, getInvalidIndexes, getUnusedIndexes, getRedundantIndexes
|
|
31
|
+
* - If these fail, the entire report should fail (data is required)
|
|
32
|
+
* - Callers should handle errors at the report generation level
|
|
33
|
+
*
|
|
34
|
+
* GRACEFUL DEGRADATION (catches errors, includes error in output):
|
|
35
|
+
* - Optional/supplementary queries: pg_stat_statements, pg_stat_kcache checks,
|
|
36
|
+
* memory calculations, postmaster startup time
|
|
37
|
+
* - These are nice-to-have; missing data shouldn't fail the whole report
|
|
38
|
+
* - Errors are logged and included in report output for visibility
|
|
39
|
+
*
|
|
40
|
+
* ADDING NEW REPORTS
|
|
41
|
+
* ------------------
|
|
42
|
+
* 1. Add/verify the metric exists in config/pgwatch-prometheus/metrics.yml
|
|
43
|
+
* 2. Add the metric name mapping to METRIC_NAMES in metrics-loader.ts
|
|
44
|
+
* 3. Verify JSON schema exists in reporter/schemas/{CHECK_ID}.schema.json
|
|
45
|
+
* 4. Implement the generator function using getMetricSql()
|
|
46
|
+
* 5. Add schema validation test in test/schema-validation.test.ts
|
|
47
|
+
*/
|
|
48
|
+
|
|
49
|
+
import { Client } from "pg";
|
|
50
|
+
import * as fs from "fs";
|
|
51
|
+
import * as path from "path";
|
|
52
|
+
import * as pkg from "../package.json";
|
|
53
|
+
import { getMetricSql, transformMetricRow, METRIC_NAMES } from "./metrics-loader";
|
|
54
|
+
|
|
55
|
+
// Time constants
|
|
56
|
+
const SECONDS_PER_DAY = 86400;
|
|
57
|
+
const SECONDS_PER_HOUR = 3600;
|
|
58
|
+
const SECONDS_PER_MINUTE = 60;
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Convert various boolean representations to boolean.
|
|
62
|
+
* PostgreSQL returns booleans as true/false, 1/0, 't'/'f', or 'true'/'false'
|
|
63
|
+
* depending on context (query result, JDBC driver, etc.).
|
|
64
|
+
*/
|
|
65
|
+
function toBool(val: unknown): boolean {
|
|
66
|
+
return val === true || val === 1 || val === "t" || val === "true";
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* PostgreSQL version information
|
|
71
|
+
*/
|
|
72
|
+
export interface PostgresVersion {
|
|
73
|
+
version: string;
|
|
74
|
+
server_version_num: string;
|
|
75
|
+
server_major_ver: string;
|
|
76
|
+
server_minor_ver: string;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Setting information from pg_settings
|
|
81
|
+
*/
|
|
82
|
+
export interface SettingInfo {
|
|
83
|
+
setting: string;
|
|
84
|
+
unit: string;
|
|
85
|
+
category: string;
|
|
86
|
+
context: string;
|
|
87
|
+
vartype: string;
|
|
88
|
+
pretty_value: string;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Altered setting (A007) - subset of SettingInfo
|
|
93
|
+
*/
|
|
94
|
+
export interface AlteredSetting {
|
|
95
|
+
value: string;
|
|
96
|
+
unit: string;
|
|
97
|
+
category: string;
|
|
98
|
+
pretty_value: string;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Cluster metric (A004)
|
|
103
|
+
*/
|
|
104
|
+
export interface ClusterMetric {
|
|
105
|
+
value: string;
|
|
106
|
+
unit: string;
|
|
107
|
+
description: string;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Invalid index entry (H001) - matches H001.schema.json invalidIndex
|
|
112
|
+
*/
|
|
113
|
+
export interface InvalidIndex {
|
|
114
|
+
schema_name: string;
|
|
115
|
+
table_name: string;
|
|
116
|
+
index_name: string;
|
|
117
|
+
relation_name: string;
|
|
118
|
+
index_size_bytes: number;
|
|
119
|
+
index_size_pretty: string;
|
|
120
|
+
supports_fk: boolean;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Unused index entry (H002) - matches H002.schema.json unusedIndex
|
|
125
|
+
*/
|
|
126
|
+
export interface UnusedIndex {
|
|
127
|
+
schema_name: string;
|
|
128
|
+
table_name: string;
|
|
129
|
+
index_name: string;
|
|
130
|
+
index_definition: string;
|
|
131
|
+
reason: string;
|
|
132
|
+
idx_scan: number;
|
|
133
|
+
index_size_bytes: number;
|
|
134
|
+
idx_is_btree: boolean;
|
|
135
|
+
supports_fk: boolean;
|
|
136
|
+
index_size_pretty: string;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Stats reset info for H002 - matches H002.schema.json statsReset
|
|
141
|
+
*/
|
|
142
|
+
export interface StatsReset {
|
|
143
|
+
stats_reset_epoch: number | null;
|
|
144
|
+
stats_reset_time: string | null;
|
|
145
|
+
days_since_reset: number | null;
|
|
146
|
+
postmaster_startup_epoch: number | null;
|
|
147
|
+
postmaster_startup_time: string | null;
|
|
148
|
+
/** Set when postmaster startup time query fails - indicates data availability issue */
|
|
149
|
+
postmaster_startup_error?: string;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Redundant index entry (H004) - matches H004.schema.json redundantIndex
|
|
154
|
+
*/
|
|
155
|
+
/**
|
|
156
|
+
* Index that makes another index redundant.
|
|
157
|
+
* Used in redundant_to array to show which indexes this one is redundant to.
|
|
158
|
+
*/
|
|
159
|
+
export interface RedundantToIndex {
|
|
160
|
+
index_name: string;
|
|
161
|
+
index_definition: string;
|
|
162
|
+
index_size_bytes: number;
|
|
163
|
+
index_size_pretty: string;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
export interface RedundantIndex {
|
|
167
|
+
schema_name: string;
|
|
168
|
+
table_name: string;
|
|
169
|
+
index_name: string;
|
|
170
|
+
relation_name: string;
|
|
171
|
+
access_method: string;
|
|
172
|
+
reason: string;
|
|
173
|
+
index_size_bytes: number;
|
|
174
|
+
table_size_bytes: number;
|
|
175
|
+
index_usage: number;
|
|
176
|
+
supports_fk: boolean;
|
|
177
|
+
index_definition: string;
|
|
178
|
+
index_size_pretty: string;
|
|
179
|
+
table_size_pretty: string;
|
|
180
|
+
redundant_to: RedundantToIndex[];
|
|
181
|
+
/** Set when redundant_to_json parsing fails - indicates data quality issue */
|
|
182
|
+
redundant_to_parse_error?: string;
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
/**
|
|
186
|
+
* Node result for reports
|
|
187
|
+
*/
|
|
188
|
+
export interface NodeResult {
|
|
189
|
+
data: Record<string, any>;
|
|
190
|
+
postgres_version?: PostgresVersion;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
/**
|
|
194
|
+
* Report structure matching JSON schemas
|
|
195
|
+
*/
|
|
196
|
+
export interface Report {
|
|
197
|
+
version: string | null;
|
|
198
|
+
build_ts: string | null;
|
|
199
|
+
generation_mode: string | null;
|
|
200
|
+
checkId: string;
|
|
201
|
+
checkTitle: string;
|
|
202
|
+
timestamptz: string;
|
|
203
|
+
nodes: {
|
|
204
|
+
primary: string;
|
|
205
|
+
standbys: string[];
|
|
206
|
+
};
|
|
207
|
+
results: Record<string, NodeResult>;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
/**
|
|
211
|
+
* Parse PostgreSQL version number into major and minor components
|
|
212
|
+
*/
|
|
213
|
+
export function parseVersionNum(versionNum: string): { major: string; minor: string } {
|
|
214
|
+
if (!versionNum || versionNum.length < 6) {
|
|
215
|
+
return { major: "", minor: "" };
|
|
216
|
+
}
|
|
217
|
+
try {
|
|
218
|
+
const num = parseInt(versionNum, 10);
|
|
219
|
+
return {
|
|
220
|
+
major: Math.floor(num / 10000).toString(),
|
|
221
|
+
minor: (num % 10000).toString(),
|
|
222
|
+
};
|
|
223
|
+
} catch (err) {
|
|
224
|
+
// parseInt shouldn't throw, but handle edge cases defensively
|
|
225
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
226
|
+
console.log(`[parseVersionNum] Warning: Failed to parse "${versionNum}": ${errorMsg}`);
|
|
227
|
+
return { major: "", minor: "" };
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Format bytes to human readable string using binary units (1024-based).
|
|
233
|
+
* Uses IEC standard: KiB, MiB, GiB, etc.
|
|
234
|
+
*
|
|
235
|
+
* Note: PostgreSQL's pg_size_pretty() uses kB/MB/GB with 1024 base (technically
|
|
236
|
+
* incorrect SI usage), but we follow IEC binary units per project style guide.
|
|
237
|
+
*/
|
|
238
|
+
export function formatBytes(bytes: number): string {
|
|
239
|
+
if (bytes === 0) return "0 B";
|
|
240
|
+
if (bytes < 0) return `-${formatBytes(-bytes)}`; // Handle negative values
|
|
241
|
+
if (!Number.isFinite(bytes)) return `${bytes} B`; // Handle NaN/Infinity
|
|
242
|
+
const units = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"];
|
|
243
|
+
const i = Math.min(Math.floor(Math.log(bytes) / Math.log(1024)), units.length - 1);
|
|
244
|
+
return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
/**
|
|
248
|
+
* Format a setting's pretty value from the normalized value and unit.
|
|
249
|
+
* The settings metric provides setting_normalized (bytes or seconds) and unit_normalized.
|
|
250
|
+
*/
|
|
251
|
+
function formatSettingPrettyValue(
|
|
252
|
+
settingNormalized: number | null,
|
|
253
|
+
unitNormalized: string | null,
|
|
254
|
+
rawValue: string
|
|
255
|
+
): string {
|
|
256
|
+
if (settingNormalized === null || unitNormalized === null) {
|
|
257
|
+
return rawValue;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
if (unitNormalized === "bytes") {
|
|
261
|
+
return formatBytes(settingNormalized);
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
if (unitNormalized === "seconds") {
|
|
265
|
+
// Format time values with appropriate units based on magnitude:
|
|
266
|
+
// - Sub-second values (< 1s): show in milliseconds for precision
|
|
267
|
+
// - Small values (< 60s): show in seconds
|
|
268
|
+
// - Larger values (>= 60s): show in minutes for readability
|
|
269
|
+
const MS_PER_SECOND = 1000;
|
|
270
|
+
if (settingNormalized < 1) {
|
|
271
|
+
return `${(settingNormalized * MS_PER_SECOND).toFixed(0)} ms`;
|
|
272
|
+
} else if (settingNormalized < SECONDS_PER_MINUTE) {
|
|
273
|
+
return `${settingNormalized} s`;
|
|
274
|
+
} else {
|
|
275
|
+
return `${(settingNormalized / SECONDS_PER_MINUTE).toFixed(1)} min`;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return rawValue;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
/**
|
|
283
|
+
* Get PostgreSQL version information.
|
|
284
|
+
* Uses simple inline SQL (trivial query, CLI-specific).
|
|
285
|
+
*
|
|
286
|
+
* @throws {Error} If database query fails (propagating - critical data)
|
|
287
|
+
*/
|
|
288
|
+
export async function getPostgresVersion(client: Client): Promise<PostgresVersion> {
|
|
289
|
+
const result = await client.query(`
|
|
290
|
+
select name, setting
|
|
291
|
+
from pg_settings
|
|
292
|
+
where name in ('server_version', 'server_version_num')
|
|
293
|
+
`);
|
|
294
|
+
|
|
295
|
+
let version = "";
|
|
296
|
+
let serverVersionNum = "";
|
|
297
|
+
|
|
298
|
+
for (const row of result.rows) {
|
|
299
|
+
if (row.name === "server_version") {
|
|
300
|
+
version = row.setting;
|
|
301
|
+
} else if (row.name === "server_version_num") {
|
|
302
|
+
serverVersionNum = row.setting;
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
const { major, minor } = parseVersionNum(serverVersionNum);
|
|
307
|
+
|
|
308
|
+
return {
|
|
309
|
+
version,
|
|
310
|
+
server_version_num: serverVersionNum,
|
|
311
|
+
server_major_ver: major,
|
|
312
|
+
server_minor_ver: minor,
|
|
313
|
+
};
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
/**
|
|
317
|
+
* Get all PostgreSQL settings
|
|
318
|
+
* Uses 'settings' metric from metrics.yml
|
|
319
|
+
*/
|
|
320
|
+
export async function getSettings(client: Client, pgMajorVersion: number = 16): Promise<Record<string, SettingInfo>> {
|
|
321
|
+
const sql = getMetricSql(METRIC_NAMES.settings, pgMajorVersion);
|
|
322
|
+
const result = await client.query(sql);
|
|
323
|
+
const settings: Record<string, SettingInfo> = {};
|
|
324
|
+
|
|
325
|
+
for (const row of result.rows) {
|
|
326
|
+
// The settings metric uses tag_setting_name, tag_setting_value, etc.
|
|
327
|
+
const name = row.tag_setting_name;
|
|
328
|
+
const settingValue = row.tag_setting_value;
|
|
329
|
+
const unit = row.tag_unit || "";
|
|
330
|
+
const category = row.tag_category || "";
|
|
331
|
+
const vartype = row.tag_vartype || "";
|
|
332
|
+
const settingNormalized = row.setting_normalized !== null ? parseFloat(row.setting_normalized) : null;
|
|
333
|
+
const unitNormalized = row.unit_normalized || null;
|
|
334
|
+
|
|
335
|
+
settings[name] = {
|
|
336
|
+
setting: settingValue,
|
|
337
|
+
unit,
|
|
338
|
+
category,
|
|
339
|
+
context: "", // Not available in the monitoring metric
|
|
340
|
+
vartype,
|
|
341
|
+
pretty_value: formatSettingPrettyValue(settingNormalized, unitNormalized, settingValue),
|
|
342
|
+
};
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
return settings;
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
/**
|
|
349
|
+
* Get altered (non-default) PostgreSQL settings
|
|
350
|
+
* Uses 'settings' metric from metrics.yml and filters for non-default
|
|
351
|
+
*/
|
|
352
|
+
export async function getAlteredSettings(client: Client, pgMajorVersion: number = 16): Promise<Record<string, AlteredSetting>> {
|
|
353
|
+
const sql = getMetricSql(METRIC_NAMES.settings, pgMajorVersion);
|
|
354
|
+
const result = await client.query(sql);
|
|
355
|
+
const settings: Record<string, AlteredSetting> = {};
|
|
356
|
+
|
|
357
|
+
for (const row of result.rows) {
|
|
358
|
+
// Filter for non-default settings (is_default = 0 means non-default)
|
|
359
|
+
if (!toBool(row.is_default)) {
|
|
360
|
+
const name = row.tag_setting_name;
|
|
361
|
+
const settingValue = row.tag_setting_value;
|
|
362
|
+
const unit = row.tag_unit || "";
|
|
363
|
+
const category = row.tag_category || "";
|
|
364
|
+
const settingNormalized = row.setting_normalized !== null ? parseFloat(row.setting_normalized) : null;
|
|
365
|
+
const unitNormalized = row.unit_normalized || null;
|
|
366
|
+
|
|
367
|
+
settings[name] = {
|
|
368
|
+
value: settingValue,
|
|
369
|
+
unit,
|
|
370
|
+
category,
|
|
371
|
+
pretty_value: formatSettingPrettyValue(settingNormalized, unitNormalized, settingValue),
|
|
372
|
+
};
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
return settings;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
/**
|
|
380
|
+
* Get database sizes (all non-template databases)
|
|
381
|
+
* Uses simple inline SQL (lists all databases, CLI-specific)
|
|
382
|
+
*/
|
|
383
|
+
export async function getDatabaseSizes(client: Client): Promise<Record<string, number>> {
|
|
384
|
+
const result = await client.query(`
|
|
385
|
+
select
|
|
386
|
+
datname,
|
|
387
|
+
pg_database_size(datname) as size_bytes
|
|
388
|
+
from pg_database
|
|
389
|
+
where datistemplate = false
|
|
390
|
+
order by size_bytes desc
|
|
391
|
+
`);
|
|
392
|
+
const sizes: Record<string, number> = {};
|
|
393
|
+
|
|
394
|
+
for (const row of result.rows) {
|
|
395
|
+
sizes[row.datname] = parseInt(row.size_bytes, 10);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
return sizes;
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
/**
|
|
402
|
+
* Get cluster general info metrics
|
|
403
|
+
* Uses 'db_stats' metric and inline SQL for connection states/uptime
|
|
404
|
+
*/
|
|
405
|
+
export async function getClusterInfo(client: Client, pgMajorVersion: number = 16): Promise<Record<string, ClusterMetric>> {
|
|
406
|
+
const info: Record<string, ClusterMetric> = {};
|
|
407
|
+
|
|
408
|
+
// Get database statistics from db_stats metric
|
|
409
|
+
const dbStatsSql = getMetricSql(METRIC_NAMES.dbStats, pgMajorVersion);
|
|
410
|
+
const statsResult = await client.query(dbStatsSql);
|
|
411
|
+
if (statsResult.rows.length > 0) {
|
|
412
|
+
const stats = statsResult.rows[0];
|
|
413
|
+
|
|
414
|
+
info.total_connections = {
|
|
415
|
+
value: String(stats.numbackends || 0),
|
|
416
|
+
unit: "connections",
|
|
417
|
+
description: "Current database connections",
|
|
418
|
+
};
|
|
419
|
+
|
|
420
|
+
info.total_commits = {
|
|
421
|
+
value: String(stats.xact_commit || 0),
|
|
422
|
+
unit: "transactions",
|
|
423
|
+
description: "Total committed transactions",
|
|
424
|
+
};
|
|
425
|
+
|
|
426
|
+
info.total_rollbacks = {
|
|
427
|
+
value: String(stats.xact_rollback || 0),
|
|
428
|
+
unit: "transactions",
|
|
429
|
+
description: "Total rolled back transactions",
|
|
430
|
+
};
|
|
431
|
+
|
|
432
|
+
const blocksHit = parseInt(stats.blks_hit || "0", 10);
|
|
433
|
+
const blocksRead = parseInt(stats.blks_read || "0", 10);
|
|
434
|
+
const totalBlocks = blocksHit + blocksRead;
|
|
435
|
+
const cacheHitRatio = totalBlocks > 0 ? ((blocksHit / totalBlocks) * 100).toFixed(2) : "0.00";
|
|
436
|
+
|
|
437
|
+
info.cache_hit_ratio = {
|
|
438
|
+
value: cacheHitRatio,
|
|
439
|
+
unit: "%",
|
|
440
|
+
description: "Buffer cache hit ratio",
|
|
441
|
+
};
|
|
442
|
+
|
|
443
|
+
info.blocks_read = {
|
|
444
|
+
value: String(blocksRead),
|
|
445
|
+
unit: "blocks",
|
|
446
|
+
description: "Total disk blocks read",
|
|
447
|
+
};
|
|
448
|
+
|
|
449
|
+
info.blocks_hit = {
|
|
450
|
+
value: String(blocksHit),
|
|
451
|
+
unit: "blocks",
|
|
452
|
+
description: "Total buffer cache hits",
|
|
453
|
+
};
|
|
454
|
+
|
|
455
|
+
info.tuples_returned = {
|
|
456
|
+
value: String(stats.tup_returned || 0),
|
|
457
|
+
unit: "rows",
|
|
458
|
+
description: "Total rows returned by queries",
|
|
459
|
+
};
|
|
460
|
+
|
|
461
|
+
info.tuples_fetched = {
|
|
462
|
+
value: String(stats.tup_fetched || 0),
|
|
463
|
+
unit: "rows",
|
|
464
|
+
description: "Total rows fetched by queries",
|
|
465
|
+
};
|
|
466
|
+
|
|
467
|
+
info.tuples_inserted = {
|
|
468
|
+
value: String(stats.tup_inserted || 0),
|
|
469
|
+
unit: "rows",
|
|
470
|
+
description: "Total rows inserted",
|
|
471
|
+
};
|
|
472
|
+
|
|
473
|
+
info.tuples_updated = {
|
|
474
|
+
value: String(stats.tup_updated || 0),
|
|
475
|
+
unit: "rows",
|
|
476
|
+
description: "Total rows updated",
|
|
477
|
+
};
|
|
478
|
+
|
|
479
|
+
info.tuples_deleted = {
|
|
480
|
+
value: String(stats.tup_deleted || 0),
|
|
481
|
+
unit: "rows",
|
|
482
|
+
description: "Total rows deleted",
|
|
483
|
+
};
|
|
484
|
+
|
|
485
|
+
info.total_deadlocks = {
|
|
486
|
+
value: String(stats.deadlocks || 0),
|
|
487
|
+
unit: "deadlocks",
|
|
488
|
+
description: "Total deadlocks detected",
|
|
489
|
+
};
|
|
490
|
+
|
|
491
|
+
info.temp_files_created = {
|
|
492
|
+
value: String(stats.temp_files || 0),
|
|
493
|
+
unit: "files",
|
|
494
|
+
description: "Total temporary files created",
|
|
495
|
+
};
|
|
496
|
+
|
|
497
|
+
const tempBytes = parseInt(stats.temp_bytes || "0", 10);
|
|
498
|
+
info.temp_bytes_written = {
|
|
499
|
+
value: formatBytes(tempBytes),
|
|
500
|
+
unit: "bytes",
|
|
501
|
+
description: "Total temporary file bytes written",
|
|
502
|
+
};
|
|
503
|
+
|
|
504
|
+
// Uptime from db_stats
|
|
505
|
+
if (stats.postmaster_uptime_s) {
|
|
506
|
+
const uptimeSeconds = parseInt(stats.postmaster_uptime_s, 10);
|
|
507
|
+
const days = Math.floor(uptimeSeconds / SECONDS_PER_DAY);
|
|
508
|
+
const hours = Math.floor((uptimeSeconds % SECONDS_PER_DAY) / SECONDS_PER_HOUR);
|
|
509
|
+
const minutes = Math.floor((uptimeSeconds % SECONDS_PER_HOUR) / SECONDS_PER_MINUTE);
|
|
510
|
+
info.uptime = {
|
|
511
|
+
value: `${days} days ${hours}:${String(minutes).padStart(2, "0")}:${String(uptimeSeconds % SECONDS_PER_MINUTE).padStart(2, "0")}`,
|
|
512
|
+
unit: "interval",
|
|
513
|
+
description: "Server uptime",
|
|
514
|
+
};
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
// Get connection states (simple inline SQL)
|
|
519
|
+
const connResult = await client.query(`
|
|
520
|
+
select
|
|
521
|
+
coalesce(state, 'null') as state,
|
|
522
|
+
count(*) as count
|
|
523
|
+
from pg_stat_activity
|
|
524
|
+
group by state
|
|
525
|
+
`);
|
|
526
|
+
for (const row of connResult.rows) {
|
|
527
|
+
const stateKey = `connections_${row.state.replace(/\s+/g, "_")}`;
|
|
528
|
+
info[stateKey] = {
|
|
529
|
+
value: String(row.count),
|
|
530
|
+
unit: "connections",
|
|
531
|
+
description: `Connections in '${row.state}' state`,
|
|
532
|
+
};
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// Get uptime info (simple inline SQL)
|
|
536
|
+
const uptimeResult = await client.query(`
|
|
537
|
+
select
|
|
538
|
+
pg_postmaster_start_time() as start_time,
|
|
539
|
+
current_timestamp - pg_postmaster_start_time() as uptime
|
|
540
|
+
`);
|
|
541
|
+
if (uptimeResult.rows.length > 0) {
|
|
542
|
+
const uptime = uptimeResult.rows[0];
|
|
543
|
+
const startTime = uptime.start_time instanceof Date
|
|
544
|
+
? uptime.start_time.toISOString()
|
|
545
|
+
: String(uptime.start_time);
|
|
546
|
+
info.start_time = {
|
|
547
|
+
value: startTime,
|
|
548
|
+
unit: "timestamp",
|
|
549
|
+
description: "PostgreSQL server start time",
|
|
550
|
+
};
|
|
551
|
+
if (!info.uptime) {
|
|
552
|
+
info.uptime = {
|
|
553
|
+
value: String(uptime.uptime),
|
|
554
|
+
unit: "interval",
|
|
555
|
+
description: "Server uptime",
|
|
556
|
+
};
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
return info;
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
/**
|
|
564
|
+
* Get invalid indexes from the database (H001).
|
|
565
|
+
* Invalid indexes are indexes that failed to build (e.g., due to CONCURRENTLY failure).
|
|
566
|
+
*
|
|
567
|
+
* @param client - Connected PostgreSQL client
|
|
568
|
+
* @param pgMajorVersion - PostgreSQL major version (default: 16)
|
|
569
|
+
* @returns Array of invalid index entries with size and FK support info
|
|
570
|
+
*/
|
|
571
|
+
export async function getInvalidIndexes(client: Client, pgMajorVersion: number = 16): Promise<InvalidIndex[]> {
|
|
572
|
+
const sql = getMetricSql(METRIC_NAMES.H001, pgMajorVersion);
|
|
573
|
+
const result = await client.query(sql);
|
|
574
|
+
return result.rows.map((row) => {
|
|
575
|
+
const transformed = transformMetricRow(row);
|
|
576
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
577
|
+
return {
|
|
578
|
+
schema_name: String(transformed.schema_name || ""),
|
|
579
|
+
table_name: String(transformed.table_name || ""),
|
|
580
|
+
index_name: String(transformed.index_name || ""),
|
|
581
|
+
relation_name: String(transformed.relation_name || ""),
|
|
582
|
+
index_size_bytes: indexSizeBytes,
|
|
583
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
584
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
585
|
+
};
|
|
586
|
+
});
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
/**
|
|
590
|
+
* Get unused indexes from the database (H002).
|
|
591
|
+
* Unused indexes have zero scans since stats were last reset.
|
|
592
|
+
*
|
|
593
|
+
* @param client - Connected PostgreSQL client
|
|
594
|
+
* @param pgMajorVersion - PostgreSQL major version (default: 16)
|
|
595
|
+
* @returns Array of unused index entries with scan counts and FK support info
|
|
596
|
+
*/
|
|
597
|
+
export async function getUnusedIndexes(client: Client, pgMajorVersion: number = 16): Promise<UnusedIndex[]> {
|
|
598
|
+
const sql = getMetricSql(METRIC_NAMES.H002, pgMajorVersion);
|
|
599
|
+
const result = await client.query(sql);
|
|
600
|
+
return result.rows.map((row) => {
|
|
601
|
+
const transformed = transformMetricRow(row);
|
|
602
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
603
|
+
return {
|
|
604
|
+
schema_name: String(transformed.schema_name || ""),
|
|
605
|
+
table_name: String(transformed.table_name || ""),
|
|
606
|
+
index_name: String(transformed.index_name || ""),
|
|
607
|
+
index_definition: String(transformed.index_definition || ""),
|
|
608
|
+
reason: String(transformed.reason || ""),
|
|
609
|
+
idx_scan: parseInt(String(transformed.idx_scan || 0), 10),
|
|
610
|
+
index_size_bytes: indexSizeBytes,
|
|
611
|
+
idx_is_btree: toBool(transformed.idx_is_btree),
|
|
612
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
613
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
614
|
+
};
|
|
615
|
+
});
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
/**
|
|
619
|
+
* Get stats reset info (H002)
|
|
620
|
+
* SQL loaded from config/pgwatch-prometheus/metrics.yml (stats_reset)
|
|
621
|
+
*/
|
|
622
|
+
export async function getStatsReset(client: Client, pgMajorVersion: number = 16): Promise<StatsReset> {
|
|
623
|
+
const sql = getMetricSql(METRIC_NAMES.statsReset, pgMajorVersion);
|
|
624
|
+
const result = await client.query(sql);
|
|
625
|
+
const row = result.rows[0] || {};
|
|
626
|
+
|
|
627
|
+
// The stats_reset metric returns stats_reset_epoch and seconds_since_reset
|
|
628
|
+
// We need to calculate additional fields
|
|
629
|
+
const statsResetEpoch = row.stats_reset_epoch ? parseFloat(row.stats_reset_epoch) : null;
|
|
630
|
+
const secondsSinceReset = row.seconds_since_reset ? parseInt(row.seconds_since_reset, 10) : null;
|
|
631
|
+
|
|
632
|
+
// Calculate stats_reset_time from epoch
|
|
633
|
+
const statsResetTime = statsResetEpoch
|
|
634
|
+
? new Date(statsResetEpoch * 1000).toISOString()
|
|
635
|
+
: null;
|
|
636
|
+
|
|
637
|
+
// Calculate days since reset
|
|
638
|
+
const daysSinceReset = secondsSinceReset !== null
|
|
639
|
+
? Math.floor(secondsSinceReset / SECONDS_PER_DAY)
|
|
640
|
+
: null;
|
|
641
|
+
|
|
642
|
+
// Get postmaster startup time separately (simple inline SQL)
|
|
643
|
+
// This is supplementary data - errors are captured in output, not propagated
|
|
644
|
+
let postmasterStartupEpoch: number | null = null;
|
|
645
|
+
let postmasterStartupTime: string | null = null;
|
|
646
|
+
let postmasterStartupError: string | undefined;
|
|
647
|
+
try {
|
|
648
|
+
const pmResult = await client.query(`
|
|
649
|
+
select
|
|
650
|
+
extract(epoch from pg_postmaster_start_time()) as postmaster_startup_epoch,
|
|
651
|
+
pg_postmaster_start_time()::text as postmaster_startup_time
|
|
652
|
+
`);
|
|
653
|
+
if (pmResult.rows.length > 0) {
|
|
654
|
+
postmasterStartupEpoch = pmResult.rows[0].postmaster_startup_epoch
|
|
655
|
+
? parseFloat(pmResult.rows[0].postmaster_startup_epoch)
|
|
656
|
+
: null;
|
|
657
|
+
postmasterStartupTime = pmResult.rows[0].postmaster_startup_time || null;
|
|
658
|
+
}
|
|
659
|
+
} catch (err) {
|
|
660
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
661
|
+
postmasterStartupError = `Failed to query postmaster start time: ${errorMsg}`;
|
|
662
|
+
console.log(`[getStatsReset] Warning: ${postmasterStartupError}`);
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
const statsResult: StatsReset = {
|
|
666
|
+
stats_reset_epoch: statsResetEpoch,
|
|
667
|
+
stats_reset_time: statsResetTime,
|
|
668
|
+
days_since_reset: daysSinceReset,
|
|
669
|
+
postmaster_startup_epoch: postmasterStartupEpoch,
|
|
670
|
+
postmaster_startup_time: postmasterStartupTime,
|
|
671
|
+
};
|
|
672
|
+
|
|
673
|
+
// Only include error field if there was an error (keeps output clean)
|
|
674
|
+
if (postmasterStartupError) {
|
|
675
|
+
statsResult.postmaster_startup_error = postmasterStartupError;
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
return statsResult;
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
/**
|
|
682
|
+
* Get current database name and size
|
|
683
|
+
* Uses 'db_size' metric from metrics.yml
|
|
684
|
+
*/
|
|
685
|
+
export async function getCurrentDatabaseInfo(client: Client, pgMajorVersion: number = 16): Promise<{ datname: string; size_bytes: number }> {
|
|
686
|
+
const sql = getMetricSql(METRIC_NAMES.dbSize, pgMajorVersion);
|
|
687
|
+
const result = await client.query(sql);
|
|
688
|
+
const row = result.rows[0] || {};
|
|
689
|
+
|
|
690
|
+
// db_size metric returns tag_datname and size_b
|
|
691
|
+
return {
|
|
692
|
+
datname: row.tag_datname || "postgres",
|
|
693
|
+
size_bytes: parseInt(row.size_b || "0", 10),
|
|
694
|
+
};
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
/**
|
|
698
|
+
* Type guard to validate redundant_to_json item structure.
|
|
699
|
+
* Returns true if item is a valid object (may have expected properties).
|
|
700
|
+
*/
|
|
701
|
+
function isValidRedundantToItem(item: unknown): item is Record<string, unknown> {
|
|
702
|
+
return typeof item === "object" && item !== null && !Array.isArray(item);
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
/**
|
|
706
|
+
* Get redundant indexes from the database (H004).
|
|
707
|
+
* Redundant indexes are covered by other indexes (same leading columns).
|
|
708
|
+
*
|
|
709
|
+
* @param client - Connected PostgreSQL client
|
|
710
|
+
* @param pgMajorVersion - PostgreSQL major version (default: 16)
|
|
711
|
+
* @returns Array of redundant index entries with covering index info
|
|
712
|
+
*/
|
|
713
|
+
export async function getRedundantIndexes(client: Client, pgMajorVersion: number = 16): Promise<RedundantIndex[]> {
|
|
714
|
+
const sql = getMetricSql(METRIC_NAMES.H004, pgMajorVersion);
|
|
715
|
+
const result = await client.query(sql);
|
|
716
|
+
return result.rows.map((row) => {
|
|
717
|
+
const transformed = transformMetricRow(row);
|
|
718
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
719
|
+
const tableSizeBytes = parseInt(String(transformed.table_size_bytes || 0), 10);
|
|
720
|
+
|
|
721
|
+
// Parse redundant_to JSON array (indexes that make this one redundant)
|
|
722
|
+
let redundantTo: RedundantToIndex[] = [];
|
|
723
|
+
let parseError: string | undefined;
|
|
724
|
+
try {
|
|
725
|
+
const jsonStr = String(transformed.redundant_to_json || "[]");
|
|
726
|
+
const parsed = JSON.parse(jsonStr);
|
|
727
|
+
if (Array.isArray(parsed)) {
|
|
728
|
+
redundantTo = parsed
|
|
729
|
+
.filter(isValidRedundantToItem)
|
|
730
|
+
.map((item) => {
|
|
731
|
+
const sizeBytes = parseInt(String(item.index_size_bytes ?? 0), 10);
|
|
732
|
+
return {
|
|
733
|
+
index_name: String(item.index_name ?? ""),
|
|
734
|
+
index_definition: String(item.index_definition ?? ""),
|
|
735
|
+
index_size_bytes: sizeBytes,
|
|
736
|
+
index_size_pretty: formatBytes(sizeBytes),
|
|
737
|
+
};
|
|
738
|
+
});
|
|
739
|
+
}
|
|
740
|
+
} catch (err) {
|
|
741
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
742
|
+
const indexName = String(transformed.index_name || "unknown");
|
|
743
|
+
parseError = `Failed to parse redundant_to_json: ${errorMsg}`;
|
|
744
|
+
console.log(`[H004] Warning: ${parseError} for index "${indexName}"`);
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
const result: RedundantIndex = {
|
|
748
|
+
schema_name: String(transformed.schema_name || ""),
|
|
749
|
+
table_name: String(transformed.table_name || ""),
|
|
750
|
+
index_name: String(transformed.index_name || ""),
|
|
751
|
+
relation_name: String(transformed.relation_name || ""),
|
|
752
|
+
access_method: String(transformed.access_method || ""),
|
|
753
|
+
reason: String(transformed.reason || ""),
|
|
754
|
+
index_size_bytes: indexSizeBytes,
|
|
755
|
+
table_size_bytes: tableSizeBytes,
|
|
756
|
+
index_usage: parseInt(String(transformed.index_usage || 0), 10),
|
|
757
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
758
|
+
index_definition: String(transformed.index_definition || ""),
|
|
759
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
760
|
+
table_size_pretty: formatBytes(tableSizeBytes),
|
|
761
|
+
redundant_to: redundantTo,
|
|
762
|
+
};
|
|
763
|
+
|
|
764
|
+
// Only include parse error field if there was an error (keeps output clean)
|
|
765
|
+
if (parseError) {
|
|
766
|
+
result.redundant_to_parse_error = parseError;
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
return result;
|
|
770
|
+
});
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
/**
|
|
774
|
+
* Create base report structure
|
|
775
|
+
*/
|
|
776
|
+
export function createBaseReport(
|
|
777
|
+
checkId: string,
|
|
778
|
+
checkTitle: string,
|
|
779
|
+
nodeName: string
|
|
780
|
+
): Report {
|
|
781
|
+
const buildTs = resolveBuildTs();
|
|
782
|
+
return {
|
|
783
|
+
version: pkg.version || null,
|
|
784
|
+
build_ts: buildTs,
|
|
785
|
+
generation_mode: "express",
|
|
786
|
+
checkId,
|
|
787
|
+
checkTitle,
|
|
788
|
+
timestamptz: new Date().toISOString(),
|
|
789
|
+
nodes: {
|
|
790
|
+
primary: nodeName,
|
|
791
|
+
standbys: [],
|
|
792
|
+
},
|
|
793
|
+
results: {},
|
|
794
|
+
};
|
|
795
|
+
}
|
|
796
|
+
|
|
797
|
+
function readTextFileSafe(p: string): string | null {
|
|
798
|
+
try {
|
|
799
|
+
const value = fs.readFileSync(p, "utf8").trim();
|
|
800
|
+
return value || null;
|
|
801
|
+
} catch {
|
|
802
|
+
// Intentionally silent: this is a "safe" read that returns null on any error
|
|
803
|
+
// (file not found, permission denied, etc.) - used for optional config files
|
|
804
|
+
return null;
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
function resolveBuildTs(): string | null {
|
|
809
|
+
// Follow reporter.py approach: read BUILD_TS from filesystem, with env override.
|
|
810
|
+
// Default: /BUILD_TS (useful in container images).
|
|
811
|
+
const envPath = process.env.PGAI_BUILD_TS_FILE;
|
|
812
|
+
const p = (envPath && envPath.trim()) ? envPath.trim() : "/BUILD_TS";
|
|
813
|
+
|
|
814
|
+
const fromFile = readTextFileSafe(p);
|
|
815
|
+
if (fromFile) return fromFile;
|
|
816
|
+
|
|
817
|
+
// Fallback for packaged CLI: allow placing BUILD_TS next to dist/ (package root).
|
|
818
|
+
// dist/lib/checkup.js => package root: dist/..
|
|
819
|
+
try {
|
|
820
|
+
const pkgRoot = path.resolve(__dirname, "..");
|
|
821
|
+
const fromPkgFile = readTextFileSafe(path.join(pkgRoot, "BUILD_TS"));
|
|
822
|
+
if (fromPkgFile) return fromPkgFile;
|
|
823
|
+
} catch (err) {
|
|
824
|
+
// Path resolution failing is unexpected - warn about it
|
|
825
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
826
|
+
console.warn(`[resolveBuildTs] Warning: path resolution failed: ${errorMsg}`);
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
// Last resort: use package.json mtime as an approximation (non-null, stable-ish).
|
|
830
|
+
try {
|
|
831
|
+
const pkgJsonPath = path.resolve(__dirname, "..", "package.json");
|
|
832
|
+
const st = fs.statSync(pkgJsonPath);
|
|
833
|
+
return st.mtime.toISOString();
|
|
834
|
+
} catch (err) {
|
|
835
|
+
// package.json not found is expected in some environments (e.g., bundled) - debug only
|
|
836
|
+
if (process.env.DEBUG) {
|
|
837
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
838
|
+
console.log(`[resolveBuildTs] Could not stat package.json, using current time: ${errorMsg}`);
|
|
839
|
+
}
|
|
840
|
+
return new Date().toISOString();
|
|
841
|
+
}
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
// ============================================================================
|
|
845
|
+
// Unified Report Generator Helpers
|
|
846
|
+
// ============================================================================
|
|
847
|
+
|
|
848
|
+
/**
|
|
849
|
+
* Generate a simple version report (A002, A013).
|
|
850
|
+
* These reports only contain PostgreSQL version information.
|
|
851
|
+
*/
|
|
852
|
+
async function generateVersionReport(
|
|
853
|
+
client: Client,
|
|
854
|
+
nodeName: string,
|
|
855
|
+
checkId: string,
|
|
856
|
+
checkTitle: string
|
|
857
|
+
): Promise<Report> {
|
|
858
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
859
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
860
|
+
report.results[nodeName] = { data: { version: postgresVersion } };
|
|
861
|
+
return report;
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
/**
|
|
865
|
+
* Generate a settings-based report (A003, A007).
|
|
866
|
+
* Fetches settings using provided function and includes postgres_version.
|
|
867
|
+
*/
|
|
868
|
+
async function generateSettingsReport(
|
|
869
|
+
client: Client,
|
|
870
|
+
nodeName: string,
|
|
871
|
+
checkId: string,
|
|
872
|
+
checkTitle: string,
|
|
873
|
+
fetchSettings: (client: Client, pgMajorVersion: number) => Promise<Record<string, unknown>>
|
|
874
|
+
): Promise<Report> {
|
|
875
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
876
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
877
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
878
|
+
const settings = await fetchSettings(client, pgMajorVersion);
|
|
879
|
+
report.results[nodeName] = { data: settings, postgres_version: postgresVersion };
|
|
880
|
+
return report;
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
/**
|
|
884
|
+
* Generate an index report (H001, H002, H004).
|
|
885
|
+
* Common structure: index list + totals + database info, keyed by database name.
|
|
886
|
+
*/
|
|
887
|
+
async function generateIndexReport<T extends { index_size_bytes: number }>(
|
|
888
|
+
client: Client,
|
|
889
|
+
nodeName: string,
|
|
890
|
+
checkId: string,
|
|
891
|
+
checkTitle: string,
|
|
892
|
+
indexFieldName: string,
|
|
893
|
+
fetchIndexes: (client: Client, pgMajorVersion: number) => Promise<T[]>,
|
|
894
|
+
extraFields?: (client: Client, pgMajorVersion: number) => Promise<Record<string, unknown>>
|
|
895
|
+
): Promise<Report> {
|
|
896
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
897
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
898
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
899
|
+
const indexes = await fetchIndexes(client, pgMajorVersion);
|
|
900
|
+
const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client, pgMajorVersion);
|
|
901
|
+
|
|
902
|
+
const totalCount = indexes.length;
|
|
903
|
+
const totalSizeBytes = indexes.reduce((sum, idx) => sum + idx.index_size_bytes, 0);
|
|
904
|
+
|
|
905
|
+
const dbEntry: Record<string, unknown> = {
|
|
906
|
+
[indexFieldName]: indexes,
|
|
907
|
+
total_count: totalCount,
|
|
908
|
+
total_size_bytes: totalSizeBytes,
|
|
909
|
+
total_size_pretty: formatBytes(totalSizeBytes),
|
|
910
|
+
database_size_bytes: dbSizeBytes,
|
|
911
|
+
database_size_pretty: formatBytes(dbSizeBytes),
|
|
912
|
+
};
|
|
913
|
+
|
|
914
|
+
// Add extra fields if provided (e.g., stats_reset for H002)
|
|
915
|
+
if (extraFields) {
|
|
916
|
+
Object.assign(dbEntry, await extraFields(client, pgMajorVersion));
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
report.results[nodeName] = { data: { [dbName]: dbEntry }, postgres_version: postgresVersion };
|
|
920
|
+
return report;
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
// ============================================================================
|
|
924
|
+
// Report Generators (using unified helpers)
|
|
925
|
+
// ============================================================================
|
|
926
|
+
|
|
927
|
+
/** Generate A002 report - Postgres major version */
|
|
928
|
+
export const generateA002 = (client: Client, nodeName = "node-01") =>
|
|
929
|
+
generateVersionReport(client, nodeName, "A002", "Postgres major version");
|
|
930
|
+
|
|
931
|
+
/** Generate A003 report - Postgres settings */
|
|
932
|
+
export const generateA003 = (client: Client, nodeName = "node-01") =>
|
|
933
|
+
generateSettingsReport(client, nodeName, "A003", "Postgres settings", getSettings);
|
|
934
|
+
|
|
935
|
+
/** Generate A004 report - Cluster information (custom structure) */
|
|
936
|
+
export async function generateA004(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
937
|
+
const report = createBaseReport("A004", "Cluster information", nodeName);
|
|
938
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
939
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
940
|
+
report.results[nodeName] = {
|
|
941
|
+
data: {
|
|
942
|
+
general_info: await getClusterInfo(client, pgMajorVersion),
|
|
943
|
+
database_sizes: await getDatabaseSizes(client),
|
|
944
|
+
},
|
|
945
|
+
postgres_version: postgresVersion,
|
|
946
|
+
};
|
|
947
|
+
return report;
|
|
948
|
+
}
|
|
949
|
+
|
|
950
|
+
/** Generate A007 report - Altered settings */
|
|
951
|
+
export const generateA007 = (client: Client, nodeName = "node-01") =>
|
|
952
|
+
generateSettingsReport(client, nodeName, "A007", "Altered settings", getAlteredSettings);
|
|
953
|
+
|
|
954
|
+
/** Generate A013 report - Postgres minor version */
|
|
955
|
+
export const generateA013 = (client: Client, nodeName = "node-01") =>
|
|
956
|
+
generateVersionReport(client, nodeName, "A013", "Postgres minor version");
|
|
957
|
+
|
|
958
|
+
/** Generate H001 report - Invalid indexes */
|
|
959
|
+
export const generateH001 = (client: Client, nodeName = "node-01") =>
|
|
960
|
+
generateIndexReport(client, nodeName, "H001", "Invalid indexes", "invalid_indexes", getInvalidIndexes);
|
|
961
|
+
|
|
962
|
+
/** Generate H002 report - Unused indexes (includes stats_reset) */
|
|
963
|
+
export const generateH002 = (client: Client, nodeName = "node-01") =>
|
|
964
|
+
generateIndexReport(client, nodeName, "H002", "Unused indexes", "unused_indexes", getUnusedIndexes,
|
|
965
|
+
async (c, v) => ({ stats_reset: await getStatsReset(c, v) }));
|
|
966
|
+
|
|
967
|
+
/** Generate H004 report - Redundant indexes */
|
|
968
|
+
export const generateH004 = (client: Client, nodeName = "node-01") =>
|
|
969
|
+
generateIndexReport(client, nodeName, "H004", "Redundant indexes", "redundant_indexes", getRedundantIndexes);
|
|
970
|
+
|
|
971
|
+
/**
|
|
972
|
+
* Generate D004 report - pg_stat_statements and pg_stat_kcache settings.
|
|
973
|
+
*
|
|
974
|
+
* Uses graceful degradation: extension queries are wrapped in try-catch
|
|
975
|
+
* because extensions may not be installed. Errors are included in the
|
|
976
|
+
* report output rather than failing the entire report.
|
|
977
|
+
*/
|
|
978
|
+
async function generateD004(client: Client, nodeName: string): Promise<Report> {
|
|
979
|
+
const report = createBaseReport("D004", "pg_stat_statements and pg_stat_kcache settings", nodeName);
|
|
980
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
981
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
982
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
983
|
+
|
|
984
|
+
// Filter settings related to pg_stat_statements and pg_stat_kcache
|
|
985
|
+
const pgssSettings: Record<string, SettingInfo> = {};
|
|
986
|
+
for (const [name, setting] of Object.entries(allSettings)) {
|
|
987
|
+
if (name.startsWith("pg_stat_statements") || name.startsWith("pg_stat_kcache")) {
|
|
988
|
+
pgssSettings[name] = setting;
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
|
|
992
|
+
// Check pg_stat_statements extension
|
|
993
|
+
let pgssAvailable = false;
|
|
994
|
+
let pgssMetricsCount = 0;
|
|
995
|
+
let pgssTotalCalls = 0;
|
|
996
|
+
let pgssError: string | null = null;
|
|
997
|
+
const pgssSampleQueries: Array<{ queryid: string; user: string; database: string; calls: number }> = [];
|
|
998
|
+
|
|
999
|
+
try {
|
|
1000
|
+
const extCheck = await client.query(
|
|
1001
|
+
"select 1 from pg_extension where extname = 'pg_stat_statements'"
|
|
1002
|
+
);
|
|
1003
|
+
if (extCheck.rows.length > 0) {
|
|
1004
|
+
pgssAvailable = true;
|
|
1005
|
+
const statsResult = await client.query(`
|
|
1006
|
+
select count(*) as cnt, coalesce(sum(calls), 0) as total_calls
|
|
1007
|
+
from pg_stat_statements
|
|
1008
|
+
`);
|
|
1009
|
+
pgssMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
1010
|
+
pgssTotalCalls = parseInt(statsResult.rows[0]?.total_calls || "0", 10);
|
|
1011
|
+
|
|
1012
|
+
// Get sample queries (top 5 by calls)
|
|
1013
|
+
const sampleResult = await client.query(`
|
|
1014
|
+
select
|
|
1015
|
+
queryid::text as queryid,
|
|
1016
|
+
coalesce(usename, 'unknown') as "user",
|
|
1017
|
+
coalesce(datname, 'unknown') as database,
|
|
1018
|
+
calls
|
|
1019
|
+
from pg_stat_statements s
|
|
1020
|
+
left join pg_database d on s.dbid = d.oid
|
|
1021
|
+
left join pg_user u on s.userid = u.usesysid
|
|
1022
|
+
order by calls desc
|
|
1023
|
+
limit 5
|
|
1024
|
+
`);
|
|
1025
|
+
for (const row of sampleResult.rows) {
|
|
1026
|
+
pgssSampleQueries.push({
|
|
1027
|
+
queryid: row.queryid,
|
|
1028
|
+
user: row.user,
|
|
1029
|
+
database: row.database,
|
|
1030
|
+
calls: parseInt(row.calls, 10),
|
|
1031
|
+
});
|
|
1032
|
+
}
|
|
1033
|
+
}
|
|
1034
|
+
} catch (err) {
|
|
1035
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1036
|
+
console.log(`[D004] Error querying pg_stat_statements: ${errorMsg}`);
|
|
1037
|
+
pgssError = errorMsg;
|
|
1038
|
+
}
|
|
1039
|
+
|
|
1040
|
+
// Check pg_stat_kcache extension
|
|
1041
|
+
let kcacheAvailable = false;
|
|
1042
|
+
let kcacheMetricsCount = 0;
|
|
1043
|
+
let kcacheTotalExecTime = 0;
|
|
1044
|
+
let kcacheTotalUserTime = 0;
|
|
1045
|
+
let kcacheTotalSystemTime = 0;
|
|
1046
|
+
let kcacheError: string | null = null;
|
|
1047
|
+
const kcacheSampleQueries: Array<{ queryid: string; user: string; exec_total_time: number }> = [];
|
|
1048
|
+
|
|
1049
|
+
try {
|
|
1050
|
+
const extCheck = await client.query(
|
|
1051
|
+
"select 1 from pg_extension where extname = 'pg_stat_kcache'"
|
|
1052
|
+
);
|
|
1053
|
+
if (extCheck.rows.length > 0) {
|
|
1054
|
+
kcacheAvailable = true;
|
|
1055
|
+
const statsResult = await client.query(`
|
|
1056
|
+
select
|
|
1057
|
+
count(*) as cnt,
|
|
1058
|
+
coalesce(sum(exec_user_time + exec_system_time), 0) as total_exec_time,
|
|
1059
|
+
coalesce(sum(exec_user_time), 0) as total_user_time,
|
|
1060
|
+
coalesce(sum(exec_system_time), 0) as total_system_time
|
|
1061
|
+
from pg_stat_kcache
|
|
1062
|
+
`);
|
|
1063
|
+
kcacheMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
1064
|
+
kcacheTotalExecTime = parseFloat(statsResult.rows[0]?.total_exec_time || "0");
|
|
1065
|
+
kcacheTotalUserTime = parseFloat(statsResult.rows[0]?.total_user_time || "0");
|
|
1066
|
+
kcacheTotalSystemTime = parseFloat(statsResult.rows[0]?.total_system_time || "0");
|
|
1067
|
+
|
|
1068
|
+
// Get sample queries (top 5 by exec time)
|
|
1069
|
+
const sampleResult = await client.query(`
|
|
1070
|
+
select
|
|
1071
|
+
queryid::text as queryid,
|
|
1072
|
+
coalesce(usename, 'unknown') as "user",
|
|
1073
|
+
(exec_user_time + exec_system_time) as exec_total_time
|
|
1074
|
+
from pg_stat_kcache k
|
|
1075
|
+
left join pg_user u on k.userid = u.usesysid
|
|
1076
|
+
order by (exec_user_time + exec_system_time) desc
|
|
1077
|
+
limit 5
|
|
1078
|
+
`);
|
|
1079
|
+
for (const row of sampleResult.rows) {
|
|
1080
|
+
kcacheSampleQueries.push({
|
|
1081
|
+
queryid: row.queryid,
|
|
1082
|
+
user: row.user,
|
|
1083
|
+
exec_total_time: parseFloat(row.exec_total_time),
|
|
1084
|
+
});
|
|
1085
|
+
}
|
|
1086
|
+
}
|
|
1087
|
+
} catch (err) {
|
|
1088
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1089
|
+
console.log(`[D004] Error querying pg_stat_kcache: ${errorMsg}`);
|
|
1090
|
+
kcacheError = errorMsg;
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1093
|
+
report.results[nodeName] = {
|
|
1094
|
+
data: {
|
|
1095
|
+
settings: pgssSettings,
|
|
1096
|
+
pg_stat_statements_status: {
|
|
1097
|
+
extension_available: pgssAvailable,
|
|
1098
|
+
metrics_count: pgssMetricsCount,
|
|
1099
|
+
total_calls: pgssTotalCalls,
|
|
1100
|
+
sample_queries: pgssSampleQueries,
|
|
1101
|
+
...(pgssError && { error: pgssError }),
|
|
1102
|
+
},
|
|
1103
|
+
pg_stat_kcache_status: {
|
|
1104
|
+
extension_available: kcacheAvailable,
|
|
1105
|
+
metrics_count: kcacheMetricsCount,
|
|
1106
|
+
total_exec_time: kcacheTotalExecTime,
|
|
1107
|
+
total_user_time: kcacheTotalUserTime,
|
|
1108
|
+
total_system_time: kcacheTotalSystemTime,
|
|
1109
|
+
sample_queries: kcacheSampleQueries,
|
|
1110
|
+
...(kcacheError && { error: kcacheError }),
|
|
1111
|
+
},
|
|
1112
|
+
},
|
|
1113
|
+
postgres_version: postgresVersion,
|
|
1114
|
+
};
|
|
1115
|
+
|
|
1116
|
+
return report;
|
|
1117
|
+
}
|
|
1118
|
+
|
|
1119
|
+
/**
|
|
1120
|
+
* Generate F001 report - Autovacuum: current settings
|
|
1121
|
+
*/
|
|
1122
|
+
async function generateF001(client: Client, nodeName: string): Promise<Report> {
|
|
1123
|
+
const report = createBaseReport("F001", "Autovacuum: current settings", nodeName);
|
|
1124
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1125
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
1126
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
1127
|
+
|
|
1128
|
+
// Filter autovacuum-related settings
|
|
1129
|
+
const autovacuumSettings: Record<string, SettingInfo> = {};
|
|
1130
|
+
for (const [name, setting] of Object.entries(allSettings)) {
|
|
1131
|
+
if (name.includes("autovacuum") || name.includes("vacuum")) {
|
|
1132
|
+
autovacuumSettings[name] = setting;
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
1135
|
+
|
|
1136
|
+
report.results[nodeName] = {
|
|
1137
|
+
data: autovacuumSettings,
|
|
1138
|
+
postgres_version: postgresVersion,
|
|
1139
|
+
};
|
|
1140
|
+
|
|
1141
|
+
return report;
|
|
1142
|
+
}
|
|
1143
|
+
|
|
1144
|
+
/**
|
|
1145
|
+
* Generate G001 report - Memory-related settings
|
|
1146
|
+
*/
|
|
1147
|
+
async function generateG001(client: Client, nodeName: string): Promise<Report> {
|
|
1148
|
+
const report = createBaseReport("G001", "Memory-related settings", nodeName);
|
|
1149
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
1150
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
1151
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
1152
|
+
|
|
1153
|
+
// Memory-related setting names
|
|
1154
|
+
const memorySettingNames = [
|
|
1155
|
+
"shared_buffers",
|
|
1156
|
+
"work_mem",
|
|
1157
|
+
"maintenance_work_mem",
|
|
1158
|
+
"effective_cache_size",
|
|
1159
|
+
"wal_buffers",
|
|
1160
|
+
"temp_buffers",
|
|
1161
|
+
"max_connections",
|
|
1162
|
+
"autovacuum_work_mem",
|
|
1163
|
+
"hash_mem_multiplier",
|
|
1164
|
+
"logical_decoding_work_mem",
|
|
1165
|
+
"max_stack_depth",
|
|
1166
|
+
"max_prepared_transactions",
|
|
1167
|
+
"max_locks_per_transaction",
|
|
1168
|
+
"max_pred_locks_per_transaction",
|
|
1169
|
+
];
|
|
1170
|
+
|
|
1171
|
+
const memorySettings: Record<string, SettingInfo> = {};
|
|
1172
|
+
for (const name of memorySettingNames) {
|
|
1173
|
+
if (allSettings[name]) {
|
|
1174
|
+
memorySettings[name] = allSettings[name];
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
|
|
1178
|
+
// Calculate memory usage estimates
|
|
1179
|
+
interface MemoryUsage {
|
|
1180
|
+
shared_buffers_bytes: number;
|
|
1181
|
+
shared_buffers_pretty: string;
|
|
1182
|
+
wal_buffers_bytes: number;
|
|
1183
|
+
wal_buffers_pretty: string;
|
|
1184
|
+
shared_memory_total_bytes: number;
|
|
1185
|
+
shared_memory_total_pretty: string;
|
|
1186
|
+
work_mem_per_connection_bytes: number;
|
|
1187
|
+
work_mem_per_connection_pretty: string;
|
|
1188
|
+
max_work_mem_usage_bytes: number;
|
|
1189
|
+
max_work_mem_usage_pretty: string;
|
|
1190
|
+
maintenance_work_mem_bytes: number;
|
|
1191
|
+
maintenance_work_mem_pretty: string;
|
|
1192
|
+
effective_cache_size_bytes: number;
|
|
1193
|
+
effective_cache_size_pretty: string;
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1196
|
+
let memoryUsage: MemoryUsage | Record<string, never> = {};
|
|
1197
|
+
let memoryError: string | null = null;
|
|
1198
|
+
|
|
1199
|
+
try {
|
|
1200
|
+
// Get actual byte values from PostgreSQL
|
|
1201
|
+
const memQuery = await client.query(`
|
|
1202
|
+
select
|
|
1203
|
+
pg_size_bytes(current_setting('shared_buffers')) as shared_buffers_bytes,
|
|
1204
|
+
pg_size_bytes(current_setting('wal_buffers')) as wal_buffers_bytes,
|
|
1205
|
+
pg_size_bytes(current_setting('work_mem')) as work_mem_bytes,
|
|
1206
|
+
pg_size_bytes(current_setting('maintenance_work_mem')) as maintenance_work_mem_bytes,
|
|
1207
|
+
pg_size_bytes(current_setting('effective_cache_size')) as effective_cache_size_bytes,
|
|
1208
|
+
current_setting('max_connections')::int as max_connections
|
|
1209
|
+
`);
|
|
1210
|
+
|
|
1211
|
+
if (memQuery.rows.length > 0) {
|
|
1212
|
+
const row = memQuery.rows[0];
|
|
1213
|
+
const sharedBuffersBytes = parseInt(row.shared_buffers_bytes, 10);
|
|
1214
|
+
const walBuffersBytes = parseInt(row.wal_buffers_bytes, 10);
|
|
1215
|
+
const workMemBytes = parseInt(row.work_mem_bytes, 10);
|
|
1216
|
+
const maintenanceWorkMemBytes = parseInt(row.maintenance_work_mem_bytes, 10);
|
|
1217
|
+
const effectiveCacheSizeBytes = parseInt(row.effective_cache_size_bytes, 10);
|
|
1218
|
+
const maxConnections = row.max_connections;
|
|
1219
|
+
|
|
1220
|
+
const sharedMemoryTotal = sharedBuffersBytes + walBuffersBytes;
|
|
1221
|
+
const maxWorkMemUsage = workMemBytes * maxConnections;
|
|
1222
|
+
|
|
1223
|
+
memoryUsage = {
|
|
1224
|
+
shared_buffers_bytes: sharedBuffersBytes,
|
|
1225
|
+
shared_buffers_pretty: formatBytes(sharedBuffersBytes),
|
|
1226
|
+
wal_buffers_bytes: walBuffersBytes,
|
|
1227
|
+
wal_buffers_pretty: formatBytes(walBuffersBytes),
|
|
1228
|
+
shared_memory_total_bytes: sharedMemoryTotal,
|
|
1229
|
+
shared_memory_total_pretty: formatBytes(sharedMemoryTotal),
|
|
1230
|
+
work_mem_per_connection_bytes: workMemBytes,
|
|
1231
|
+
work_mem_per_connection_pretty: formatBytes(workMemBytes),
|
|
1232
|
+
max_work_mem_usage_bytes: maxWorkMemUsage,
|
|
1233
|
+
max_work_mem_usage_pretty: formatBytes(maxWorkMemUsage),
|
|
1234
|
+
maintenance_work_mem_bytes: maintenanceWorkMemBytes,
|
|
1235
|
+
maintenance_work_mem_pretty: formatBytes(maintenanceWorkMemBytes),
|
|
1236
|
+
effective_cache_size_bytes: effectiveCacheSizeBytes,
|
|
1237
|
+
effective_cache_size_pretty: formatBytes(effectiveCacheSizeBytes),
|
|
1238
|
+
};
|
|
1239
|
+
}
|
|
1240
|
+
} catch (err) {
|
|
1241
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
1242
|
+
console.log(`[G001] Error calculating memory usage: ${errorMsg}`);
|
|
1243
|
+
memoryError = errorMsg;
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
report.results[nodeName] = {
|
|
1247
|
+
data: {
|
|
1248
|
+
settings: memorySettings,
|
|
1249
|
+
analysis: {
|
|
1250
|
+
estimated_total_memory_usage: memoryUsage,
|
|
1251
|
+
...(memoryError && { error: memoryError }),
|
|
1252
|
+
},
|
|
1253
|
+
},
|
|
1254
|
+
postgres_version: postgresVersion,
|
|
1255
|
+
};
|
|
1256
|
+
|
|
1257
|
+
return report;
|
|
1258
|
+
}
|
|
1259
|
+
|
|
1260
|
+
/**
|
|
1261
|
+
* Available report generators
|
|
1262
|
+
*/
|
|
1263
|
+
export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string) => Promise<Report>> = {
|
|
1264
|
+
A002: generateA002,
|
|
1265
|
+
A003: generateA003,
|
|
1266
|
+
A004: generateA004,
|
|
1267
|
+
A007: generateA007,
|
|
1268
|
+
A013: generateA013,
|
|
1269
|
+
D004: generateD004,
|
|
1270
|
+
F001: generateF001,
|
|
1271
|
+
G001: generateG001,
|
|
1272
|
+
H001: generateH001,
|
|
1273
|
+
H002: generateH002,
|
|
1274
|
+
H004: generateH004,
|
|
1275
|
+
};
|
|
1276
|
+
|
|
1277
|
+
/**
|
|
1278
|
+
* Check IDs and titles
|
|
1279
|
+
*/
|
|
1280
|
+
export const CHECK_INFO: Record<string, string> = {
|
|
1281
|
+
A002: "Postgres major version",
|
|
1282
|
+
A003: "Postgres settings",
|
|
1283
|
+
A004: "Cluster information",
|
|
1284
|
+
A007: "Altered settings",
|
|
1285
|
+
A013: "Postgres minor version",
|
|
1286
|
+
D004: "pg_stat_statements and pg_stat_kcache settings",
|
|
1287
|
+
F001: "Autovacuum: current settings",
|
|
1288
|
+
G001: "Memory-related settings",
|
|
1289
|
+
H001: "Invalid indexes",
|
|
1290
|
+
H002: "Unused indexes",
|
|
1291
|
+
H004: "Redundant indexes",
|
|
1292
|
+
};
|
|
1293
|
+
|
|
1294
|
+
/**
|
|
1295
|
+
* Generate all available health check reports.
|
|
1296
|
+
* This is the main entry point for express mode checkup generation.
|
|
1297
|
+
*
|
|
1298
|
+
* @param client - Connected PostgreSQL client
|
|
1299
|
+
* @param nodeName - Node identifier for the report (default: "node-01")
|
|
1300
|
+
* @param onProgress - Optional callback for progress updates during generation
|
|
1301
|
+
* @returns Object mapping check IDs (e.g., "H001", "A002") to their reports
|
|
1302
|
+
* @throws {Error} If any critical report generation fails
|
|
1303
|
+
*/
|
|
1304
|
+
export async function generateAllReports(
|
|
1305
|
+
client: Client,
|
|
1306
|
+
nodeName: string = "node-01",
|
|
1307
|
+
onProgress?: (info: { checkId: string; checkTitle: string; index: number; total: number }) => void
|
|
1308
|
+
): Promise<Record<string, Report>> {
|
|
1309
|
+
const reports: Record<string, Report> = {};
|
|
1310
|
+
|
|
1311
|
+
const entries = Object.entries(REPORT_GENERATORS);
|
|
1312
|
+
const total = entries.length;
|
|
1313
|
+
let index = 0;
|
|
1314
|
+
|
|
1315
|
+
for (const [checkId, generator] of entries) {
|
|
1316
|
+
index += 1;
|
|
1317
|
+
onProgress?.({
|
|
1318
|
+
checkId,
|
|
1319
|
+
checkTitle: CHECK_INFO[checkId] || checkId,
|
|
1320
|
+
index,
|
|
1321
|
+
total,
|
|
1322
|
+
});
|
|
1323
|
+
reports[checkId] = await generator(client, nodeName);
|
|
1324
|
+
}
|
|
1325
|
+
|
|
1326
|
+
return reports;
|
|
1327
|
+
}
|