postgresai 0.14.0-dev.51 → 0.14.0-dev.52
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/postgres-ai.ts +0 -324
- package/bun.lock +1 -3
- package/dist/bin/postgres-ai.js +171 -1712
- package/dist/sql/01.role.sql +16 -0
- package/dist/sql/02.permissions.sql +37 -0
- package/dist/sql/03.optional_rds.sql +6 -0
- package/dist/sql/04.optional_self_managed.sql +8 -0
- package/dist/sql/05.helpers.sql +415 -0
- package/lib/auth-server.ts +75 -65
- package/lib/config.ts +0 -3
- package/package.json +2 -4
- package/test/init.integration.test.ts +6 -6
- package/lib/checkup-api.ts +0 -175
- package/lib/checkup.ts +0 -1141
- package/lib/metrics-loader.ts +0 -514
- package/test/checkup.test.ts +0 -1016
- package/test/schema-validation.test.ts +0 -260
package/lib/checkup.ts
DELETED
|
@@ -1,1141 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Express Checkup Module
|
|
3
|
-
* ======================
|
|
4
|
-
* Generates JSON health check reports directly from PostgreSQL without Prometheus.
|
|
5
|
-
*
|
|
6
|
-
* ARCHITECTURAL DECISIONS
|
|
7
|
-
* -----------------------
|
|
8
|
-
*
|
|
9
|
-
* 1. SINGLE SOURCE OF TRUTH FOR SQL QUERIES
|
|
10
|
-
* All SQL queries MUST be loaded from config/pgwatch-prometheus/metrics.yml
|
|
11
|
-
* via getMetricSql() from metrics-loader.ts.
|
|
12
|
-
*
|
|
13
|
-
* DO NOT copy-paste or inline SQL queries in this file!
|
|
14
|
-
*
|
|
15
|
-
* The metrics.yml file is the single source of truth for metric extraction
|
|
16
|
-
* logic, shared between:
|
|
17
|
-
* - Full-fledged monitoring (Prometheus/pgwatch)
|
|
18
|
-
* - Express checkup (this CLI tool)
|
|
19
|
-
*
|
|
20
|
-
* This ensures consistency and avoids maintenance burden of duplicate queries.
|
|
21
|
-
*
|
|
22
|
-
* 2. JSON SCHEMA COMPLIANCE
|
|
23
|
-
* All generated reports MUST comply with JSON schemas in reporter/schemas/.
|
|
24
|
-
* These schemas define the expected format for both:
|
|
25
|
-
* - Full-fledged monitoring reporter output
|
|
26
|
-
* - Express checkup output
|
|
27
|
-
*
|
|
28
|
-
* Before adding or modifying a report, verify the corresponding schema exists
|
|
29
|
-
* and ensure the output matches. Run schema validation tests to confirm.
|
|
30
|
-
*
|
|
31
|
-
* ADDING NEW REPORTS
|
|
32
|
-
* ------------------
|
|
33
|
-
* 1. Add/verify the metric exists in config/pgwatch-prometheus/metrics.yml
|
|
34
|
-
* 2. Add the metric name mapping to METRIC_NAMES in metrics-loader.ts
|
|
35
|
-
* 3. Verify JSON schema exists in reporter/schemas/{CHECK_ID}.schema.json
|
|
36
|
-
* 4. Implement the generator function using getMetricSql()
|
|
37
|
-
* 5. Add schema validation test in test/schema-validation.test.ts
|
|
38
|
-
*/
|
|
39
|
-
|
|
40
|
-
import { Client } from "pg";
|
|
41
|
-
import * as fs from "fs";
|
|
42
|
-
import * as path from "path";
|
|
43
|
-
import * as pkg from "../package.json";
|
|
44
|
-
import { getMetricSql, transformMetricRow, METRIC_NAMES } from "./metrics-loader";
|
|
45
|
-
|
|
46
|
-
/**
|
|
47
|
-
* PostgreSQL version information
|
|
48
|
-
*/
|
|
49
|
-
export interface PostgresVersion {
|
|
50
|
-
version: string;
|
|
51
|
-
server_version_num: string;
|
|
52
|
-
server_major_ver: string;
|
|
53
|
-
server_minor_ver: string;
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
/**
|
|
57
|
-
* Setting information from pg_settings
|
|
58
|
-
*/
|
|
59
|
-
export interface SettingInfo {
|
|
60
|
-
setting: string;
|
|
61
|
-
unit: string;
|
|
62
|
-
category: string;
|
|
63
|
-
context: string;
|
|
64
|
-
vartype: string;
|
|
65
|
-
pretty_value: string;
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
/**
|
|
69
|
-
* Altered setting (A007) - subset of SettingInfo
|
|
70
|
-
*/
|
|
71
|
-
export interface AlteredSetting {
|
|
72
|
-
value: string;
|
|
73
|
-
unit: string;
|
|
74
|
-
category: string;
|
|
75
|
-
pretty_value: string;
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
/**
|
|
79
|
-
* Cluster metric (A004)
|
|
80
|
-
*/
|
|
81
|
-
export interface ClusterMetric {
|
|
82
|
-
value: string;
|
|
83
|
-
unit: string;
|
|
84
|
-
description: string;
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
/**
|
|
88
|
-
* Invalid index entry (H001) - matches H001.schema.json invalidIndex
|
|
89
|
-
*/
|
|
90
|
-
export interface InvalidIndex {
|
|
91
|
-
schema_name: string;
|
|
92
|
-
table_name: string;
|
|
93
|
-
index_name: string;
|
|
94
|
-
relation_name: string;
|
|
95
|
-
index_size_bytes: number;
|
|
96
|
-
index_size_pretty: string;
|
|
97
|
-
supports_fk: boolean;
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
/**
|
|
101
|
-
* Unused index entry (H002) - matches H002.schema.json unusedIndex
|
|
102
|
-
*/
|
|
103
|
-
export interface UnusedIndex {
|
|
104
|
-
schema_name: string;
|
|
105
|
-
table_name: string;
|
|
106
|
-
index_name: string;
|
|
107
|
-
index_definition: string;
|
|
108
|
-
reason: string;
|
|
109
|
-
idx_scan: number;
|
|
110
|
-
index_size_bytes: number;
|
|
111
|
-
idx_is_btree: boolean;
|
|
112
|
-
supports_fk: boolean;
|
|
113
|
-
index_size_pretty: string;
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
/**
|
|
117
|
-
* Stats reset info for H002 - matches H002.schema.json statsReset
|
|
118
|
-
*/
|
|
119
|
-
export interface StatsReset {
|
|
120
|
-
stats_reset_epoch: number | null;
|
|
121
|
-
stats_reset_time: string | null;
|
|
122
|
-
days_since_reset: number | null;
|
|
123
|
-
postmaster_startup_epoch: number | null;
|
|
124
|
-
postmaster_startup_time: string | null;
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
/**
|
|
128
|
-
* Redundant index entry (H004) - matches H004.schema.json redundantIndex
|
|
129
|
-
*/
|
|
130
|
-
/**
|
|
131
|
-
* Covering index definition (the index that makes another index redundant)
|
|
132
|
-
*/
|
|
133
|
-
export interface CoveringIndex {
|
|
134
|
-
index_name: string;
|
|
135
|
-
index_definition: string;
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
export interface RedundantIndex {
|
|
139
|
-
schema_name: string;
|
|
140
|
-
table_name: string;
|
|
141
|
-
index_name: string;
|
|
142
|
-
relation_name: string;
|
|
143
|
-
access_method: string;
|
|
144
|
-
reason: string;
|
|
145
|
-
index_size_bytes: number;
|
|
146
|
-
table_size_bytes: number;
|
|
147
|
-
index_usage: number;
|
|
148
|
-
supports_fk: boolean;
|
|
149
|
-
index_definition: string;
|
|
150
|
-
index_size_pretty: string;
|
|
151
|
-
table_size_pretty: string;
|
|
152
|
-
covering_indexes: CoveringIndex[];
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
/**
|
|
156
|
-
* Node result for reports
|
|
157
|
-
*/
|
|
158
|
-
export interface NodeResult {
|
|
159
|
-
data: Record<string, any>;
|
|
160
|
-
postgres_version?: PostgresVersion;
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
/**
|
|
164
|
-
* Report structure matching JSON schemas
|
|
165
|
-
*/
|
|
166
|
-
export interface Report {
|
|
167
|
-
version: string | null;
|
|
168
|
-
build_ts: string | null;
|
|
169
|
-
generation_mode: string | null;
|
|
170
|
-
checkId: string;
|
|
171
|
-
checkTitle: string;
|
|
172
|
-
timestamptz: string;
|
|
173
|
-
nodes: {
|
|
174
|
-
primary: string;
|
|
175
|
-
standbys: string[];
|
|
176
|
-
};
|
|
177
|
-
results: Record<string, NodeResult>;
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
/**
|
|
181
|
-
* Parse PostgreSQL version number into major and minor components
|
|
182
|
-
*/
|
|
183
|
-
export function parseVersionNum(versionNum: string): { major: string; minor: string } {
|
|
184
|
-
if (!versionNum || versionNum.length < 6) {
|
|
185
|
-
return { major: "", minor: "" };
|
|
186
|
-
}
|
|
187
|
-
try {
|
|
188
|
-
const num = parseInt(versionNum, 10);
|
|
189
|
-
return {
|
|
190
|
-
major: Math.floor(num / 10000).toString(),
|
|
191
|
-
minor: (num % 10000).toString(),
|
|
192
|
-
};
|
|
193
|
-
} catch {
|
|
194
|
-
return { major: "", minor: "" };
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
/**
|
|
199
|
-
* Format bytes to human readable string using binary units (1024-based).
|
|
200
|
-
* Uses IEC standard: KiB, MiB, GiB, etc.
|
|
201
|
-
*
|
|
202
|
-
* Note: PostgreSQL's pg_size_pretty() uses kB/MB/GB with 1024 base (technically
|
|
203
|
-
* incorrect SI usage), but we follow IEC binary units per project style guide.
|
|
204
|
-
*/
|
|
205
|
-
export function formatBytes(bytes: number): string {
|
|
206
|
-
if (bytes === 0) return "0 B";
|
|
207
|
-
const units = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"];
|
|
208
|
-
const i = Math.floor(Math.log(bytes) / Math.log(1024));
|
|
209
|
-
return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
/**
|
|
213
|
-
* Get PostgreSQL version information
|
|
214
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_version)
|
|
215
|
-
*/
|
|
216
|
-
export async function getPostgresVersion(client: Client): Promise<PostgresVersion> {
|
|
217
|
-
const sql = getMetricSql(METRIC_NAMES.version);
|
|
218
|
-
const result = await client.query(sql);
|
|
219
|
-
|
|
220
|
-
let version = "";
|
|
221
|
-
let serverVersionNum = "";
|
|
222
|
-
|
|
223
|
-
for (const row of result.rows) {
|
|
224
|
-
if (row.name === "server_version") {
|
|
225
|
-
version = row.setting;
|
|
226
|
-
} else if (row.name === "server_version_num") {
|
|
227
|
-
serverVersionNum = row.setting;
|
|
228
|
-
}
|
|
229
|
-
}
|
|
230
|
-
|
|
231
|
-
const { major, minor } = parseVersionNum(serverVersionNum);
|
|
232
|
-
|
|
233
|
-
return {
|
|
234
|
-
version,
|
|
235
|
-
server_version_num: serverVersionNum,
|
|
236
|
-
server_major_ver: major,
|
|
237
|
-
server_minor_ver: minor,
|
|
238
|
-
};
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
/**
|
|
242
|
-
* Get all PostgreSQL settings
|
|
243
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_settings)
|
|
244
|
-
*/
|
|
245
|
-
export async function getSettings(client: Client): Promise<Record<string, SettingInfo>> {
|
|
246
|
-
const sql = getMetricSql(METRIC_NAMES.settings);
|
|
247
|
-
const result = await client.query(sql);
|
|
248
|
-
const settings: Record<string, SettingInfo> = {};
|
|
249
|
-
|
|
250
|
-
for (const row of result.rows) {
|
|
251
|
-
settings[row.name] = {
|
|
252
|
-
setting: row.setting,
|
|
253
|
-
unit: row.unit || "",
|
|
254
|
-
category: row.category,
|
|
255
|
-
context: row.context,
|
|
256
|
-
vartype: row.vartype,
|
|
257
|
-
pretty_value: row.pretty_value,
|
|
258
|
-
};
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
return settings;
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
/**
|
|
265
|
-
* Get altered (non-default) PostgreSQL settings
|
|
266
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_altered_settings)
|
|
267
|
-
*/
|
|
268
|
-
export async function getAlteredSettings(client: Client): Promise<Record<string, AlteredSetting>> {
|
|
269
|
-
const sql = getMetricSql(METRIC_NAMES.alteredSettings);
|
|
270
|
-
const result = await client.query(sql);
|
|
271
|
-
const settings: Record<string, AlteredSetting> = {};
|
|
272
|
-
|
|
273
|
-
for (const row of result.rows) {
|
|
274
|
-
settings[row.name] = {
|
|
275
|
-
value: row.setting,
|
|
276
|
-
unit: row.unit || "",
|
|
277
|
-
category: row.category,
|
|
278
|
-
pretty_value: row.pretty_value,
|
|
279
|
-
};
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
return settings;
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
/**
|
|
286
|
-
* Get database sizes
|
|
287
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_database_sizes)
|
|
288
|
-
*/
|
|
289
|
-
export async function getDatabaseSizes(client: Client): Promise<Record<string, number>> {
|
|
290
|
-
const sql = getMetricSql(METRIC_NAMES.databaseSizes);
|
|
291
|
-
const result = await client.query(sql);
|
|
292
|
-
const sizes: Record<string, number> = {};
|
|
293
|
-
|
|
294
|
-
for (const row of result.rows) {
|
|
295
|
-
sizes[row.datname] = parseInt(row.size_bytes, 10);
|
|
296
|
-
}
|
|
297
|
-
|
|
298
|
-
return sizes;
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
/**
|
|
302
|
-
* Get cluster general info metrics
|
|
303
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_cluster_stats, express_connection_states, express_uptime)
|
|
304
|
-
*/
|
|
305
|
-
export async function getClusterInfo(client: Client): Promise<Record<string, ClusterMetric>> {
|
|
306
|
-
const info: Record<string, ClusterMetric> = {};
|
|
307
|
-
|
|
308
|
-
// Get cluster statistics
|
|
309
|
-
const clusterStatsSql = getMetricSql(METRIC_NAMES.clusterStats);
|
|
310
|
-
const statsResult = await client.query(clusterStatsSql);
|
|
311
|
-
if (statsResult.rows.length > 0) {
|
|
312
|
-
const stats = statsResult.rows[0];
|
|
313
|
-
|
|
314
|
-
info.total_connections = {
|
|
315
|
-
value: String(stats.total_connections || 0),
|
|
316
|
-
unit: "connections",
|
|
317
|
-
description: "Total active database connections",
|
|
318
|
-
};
|
|
319
|
-
|
|
320
|
-
info.total_commits = {
|
|
321
|
-
value: String(stats.total_commits || 0),
|
|
322
|
-
unit: "transactions",
|
|
323
|
-
description: "Total committed transactions",
|
|
324
|
-
};
|
|
325
|
-
|
|
326
|
-
info.total_rollbacks = {
|
|
327
|
-
value: String(stats.total_rollbacks || 0),
|
|
328
|
-
unit: "transactions",
|
|
329
|
-
description: "Total rolled back transactions",
|
|
330
|
-
};
|
|
331
|
-
|
|
332
|
-
const blocksHit = parseInt(stats.blocks_hit || "0", 10);
|
|
333
|
-
const blocksRead = parseInt(stats.blocks_read || "0", 10);
|
|
334
|
-
const totalBlocks = blocksHit + blocksRead;
|
|
335
|
-
const cacheHitRatio = totalBlocks > 0 ? ((blocksHit / totalBlocks) * 100).toFixed(2) : "0.00";
|
|
336
|
-
|
|
337
|
-
info.cache_hit_ratio = {
|
|
338
|
-
value: cacheHitRatio,
|
|
339
|
-
unit: "%",
|
|
340
|
-
description: "Buffer cache hit ratio",
|
|
341
|
-
};
|
|
342
|
-
|
|
343
|
-
info.blocks_read = {
|
|
344
|
-
value: String(blocksRead),
|
|
345
|
-
unit: "blocks",
|
|
346
|
-
description: "Total disk blocks read",
|
|
347
|
-
};
|
|
348
|
-
|
|
349
|
-
info.blocks_hit = {
|
|
350
|
-
value: String(blocksHit),
|
|
351
|
-
unit: "blocks",
|
|
352
|
-
description: "Total buffer cache hits",
|
|
353
|
-
};
|
|
354
|
-
|
|
355
|
-
info.tuples_returned = {
|
|
356
|
-
value: String(stats.tuples_returned || 0),
|
|
357
|
-
unit: "rows",
|
|
358
|
-
description: "Total rows returned by queries",
|
|
359
|
-
};
|
|
360
|
-
|
|
361
|
-
info.tuples_fetched = {
|
|
362
|
-
value: String(stats.tuples_fetched || 0),
|
|
363
|
-
unit: "rows",
|
|
364
|
-
description: "Total rows fetched by queries",
|
|
365
|
-
};
|
|
366
|
-
|
|
367
|
-
info.tuples_inserted = {
|
|
368
|
-
value: String(stats.tuples_inserted || 0),
|
|
369
|
-
unit: "rows",
|
|
370
|
-
description: "Total rows inserted",
|
|
371
|
-
};
|
|
372
|
-
|
|
373
|
-
info.tuples_updated = {
|
|
374
|
-
value: String(stats.tuples_updated || 0),
|
|
375
|
-
unit: "rows",
|
|
376
|
-
description: "Total rows updated",
|
|
377
|
-
};
|
|
378
|
-
|
|
379
|
-
info.tuples_deleted = {
|
|
380
|
-
value: String(stats.tuples_deleted || 0),
|
|
381
|
-
unit: "rows",
|
|
382
|
-
description: "Total rows deleted",
|
|
383
|
-
};
|
|
384
|
-
|
|
385
|
-
info.total_deadlocks = {
|
|
386
|
-
value: String(stats.total_deadlocks || 0),
|
|
387
|
-
unit: "deadlocks",
|
|
388
|
-
description: "Total deadlocks detected",
|
|
389
|
-
};
|
|
390
|
-
|
|
391
|
-
info.temp_files_created = {
|
|
392
|
-
value: String(stats.temp_files_created || 0),
|
|
393
|
-
unit: "files",
|
|
394
|
-
description: "Total temporary files created",
|
|
395
|
-
};
|
|
396
|
-
|
|
397
|
-
const tempBytes = parseInt(stats.temp_bytes_written || "0", 10);
|
|
398
|
-
info.temp_bytes_written = {
|
|
399
|
-
value: formatBytes(tempBytes),
|
|
400
|
-
unit: "bytes",
|
|
401
|
-
description: "Total temporary file bytes written",
|
|
402
|
-
};
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
// Get connection states
|
|
406
|
-
const connStatesSql = getMetricSql(METRIC_NAMES.connectionStates);
|
|
407
|
-
const connResult = await client.query(connStatesSql);
|
|
408
|
-
for (const row of connResult.rows) {
|
|
409
|
-
const stateKey = `connections_${row.state.replace(/\s+/g, "_")}`;
|
|
410
|
-
info[stateKey] = {
|
|
411
|
-
value: String(row.count),
|
|
412
|
-
unit: "connections",
|
|
413
|
-
description: `Connections in '${row.state}' state`,
|
|
414
|
-
};
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
// Get uptime info
|
|
418
|
-
const uptimeSql = getMetricSql(METRIC_NAMES.uptimeInfo);
|
|
419
|
-
const uptimeResult = await client.query(uptimeSql);
|
|
420
|
-
if (uptimeResult.rows.length > 0) {
|
|
421
|
-
const uptime = uptimeResult.rows[0];
|
|
422
|
-
info.start_time = {
|
|
423
|
-
value: uptime.start_time.toISOString(),
|
|
424
|
-
unit: "timestamp",
|
|
425
|
-
description: "PostgreSQL server start time",
|
|
426
|
-
};
|
|
427
|
-
info.uptime = {
|
|
428
|
-
value: uptime.uptime,
|
|
429
|
-
unit: "interval",
|
|
430
|
-
description: "Server uptime",
|
|
431
|
-
};
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
return info;
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
/**
|
|
438
|
-
* Get invalid indexes (H001)
|
|
439
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (pg_invalid_indexes)
|
|
440
|
-
*/
|
|
441
|
-
export async function getInvalidIndexes(client: Client): Promise<InvalidIndex[]> {
|
|
442
|
-
const sql = getMetricSql(METRIC_NAMES.H001);
|
|
443
|
-
const result = await client.query(sql);
|
|
444
|
-
return result.rows.map((row) => {
|
|
445
|
-
const transformed = transformMetricRow(row);
|
|
446
|
-
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
447
|
-
return {
|
|
448
|
-
schema_name: String(transformed.schema_name || ""),
|
|
449
|
-
table_name: String(transformed.table_name || ""),
|
|
450
|
-
index_name: String(transformed.index_name || ""),
|
|
451
|
-
relation_name: String(transformed.relation_name || ""),
|
|
452
|
-
index_size_bytes: indexSizeBytes,
|
|
453
|
-
index_size_pretty: formatBytes(indexSizeBytes),
|
|
454
|
-
supports_fk: transformed.supports_fk === true || transformed.supports_fk === 1,
|
|
455
|
-
};
|
|
456
|
-
});
|
|
457
|
-
}
|
|
458
|
-
|
|
459
|
-
/**
|
|
460
|
-
* Get unused indexes (H002)
|
|
461
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (unused_indexes)
|
|
462
|
-
*/
|
|
463
|
-
export async function getUnusedIndexes(client: Client): Promise<UnusedIndex[]> {
|
|
464
|
-
const sql = getMetricSql(METRIC_NAMES.H002);
|
|
465
|
-
const result = await client.query(sql);
|
|
466
|
-
return result.rows.map((row) => {
|
|
467
|
-
const transformed = transformMetricRow(row);
|
|
468
|
-
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
469
|
-
return {
|
|
470
|
-
schema_name: String(transformed.schema_name || ""),
|
|
471
|
-
table_name: String(transformed.table_name || ""),
|
|
472
|
-
index_name: String(transformed.index_name || ""),
|
|
473
|
-
index_definition: String(transformed.index_definition || ""),
|
|
474
|
-
reason: String(transformed.reason || ""),
|
|
475
|
-
idx_scan: parseInt(String(transformed.idx_scan || 0), 10),
|
|
476
|
-
index_size_bytes: indexSizeBytes,
|
|
477
|
-
idx_is_btree: transformed.idx_is_btree === true || transformed.idx_is_btree === "t",
|
|
478
|
-
supports_fk: transformed.supports_fk === true || transformed.supports_fk === 1,
|
|
479
|
-
index_size_pretty: formatBytes(indexSizeBytes),
|
|
480
|
-
};
|
|
481
|
-
});
|
|
482
|
-
}
|
|
483
|
-
|
|
484
|
-
/**
|
|
485
|
-
* Get stats reset info (H002)
|
|
486
|
-
*/
|
|
487
|
-
/**
|
|
488
|
-
* Get stats reset info (H002)
|
|
489
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_stats_reset)
|
|
490
|
-
*/
|
|
491
|
-
export async function getStatsReset(client: Client): Promise<StatsReset> {
|
|
492
|
-
const sql = getMetricSql(METRIC_NAMES.statsReset);
|
|
493
|
-
const result = await client.query(sql);
|
|
494
|
-
const row = result.rows[0] || {};
|
|
495
|
-
return {
|
|
496
|
-
stats_reset_epoch: row.stats_reset_epoch ? parseFloat(row.stats_reset_epoch) : null,
|
|
497
|
-
stats_reset_time: row.stats_reset_time || null,
|
|
498
|
-
days_since_reset: row.days_since_reset ? parseInt(row.days_since_reset, 10) : null,
|
|
499
|
-
postmaster_startup_epoch: row.postmaster_startup_epoch ? parseFloat(row.postmaster_startup_epoch) : null,
|
|
500
|
-
postmaster_startup_time: row.postmaster_startup_time || null,
|
|
501
|
-
};
|
|
502
|
-
}
|
|
503
|
-
|
|
504
|
-
/**
|
|
505
|
-
* Get current database name and size
|
|
506
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (express_current_database)
|
|
507
|
-
*/
|
|
508
|
-
export async function getCurrentDatabaseInfo(client: Client): Promise<{ datname: string; size_bytes: number }> {
|
|
509
|
-
const sql = getMetricSql(METRIC_NAMES.currentDatabase);
|
|
510
|
-
const result = await client.query(sql);
|
|
511
|
-
const row = result.rows[0] || {};
|
|
512
|
-
return {
|
|
513
|
-
datname: row.datname || "postgres",
|
|
514
|
-
size_bytes: parseInt(row.size_bytes, 10) || 0,
|
|
515
|
-
};
|
|
516
|
-
}
|
|
517
|
-
|
|
518
|
-
/**
|
|
519
|
-
* Get redundant indexes (H004)
|
|
520
|
-
* SQL loaded from config/pgwatch-prometheus/metrics.yml (redundant_indexes)
|
|
521
|
-
*/
|
|
522
|
-
export async function getRedundantIndexes(client: Client): Promise<RedundantIndex[]> {
|
|
523
|
-
const sql = getMetricSql(METRIC_NAMES.H004);
|
|
524
|
-
const result = await client.query(sql);
|
|
525
|
-
return result.rows.map((row) => {
|
|
526
|
-
const transformed = transformMetricRow(row);
|
|
527
|
-
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
528
|
-
const tableSizeBytes = parseInt(String(transformed.table_size_bytes || 0), 10);
|
|
529
|
-
|
|
530
|
-
// Parse covering_indexes JSON array
|
|
531
|
-
let coveringIndexes: CoveringIndex[] = [];
|
|
532
|
-
try {
|
|
533
|
-
const jsonStr = String(transformed.covering_indexes_json || "[]");
|
|
534
|
-
const parsed = JSON.parse(jsonStr);
|
|
535
|
-
if (Array.isArray(parsed)) {
|
|
536
|
-
coveringIndexes = parsed.map((item: any) => ({
|
|
537
|
-
index_name: String(item.index_name || ""),
|
|
538
|
-
index_definition: String(item.index_definition || ""),
|
|
539
|
-
}));
|
|
540
|
-
}
|
|
541
|
-
} catch {
|
|
542
|
-
// If JSON parsing fails, leave as empty array
|
|
543
|
-
}
|
|
544
|
-
|
|
545
|
-
return {
|
|
546
|
-
schema_name: String(transformed.schema_name || ""),
|
|
547
|
-
table_name: String(transformed.table_name || ""),
|
|
548
|
-
index_name: String(transformed.index_name || ""),
|
|
549
|
-
relation_name: String(transformed.relation_name || ""),
|
|
550
|
-
access_method: String(transformed.access_method || ""),
|
|
551
|
-
reason: String(transformed.reason || ""),
|
|
552
|
-
index_size_bytes: indexSizeBytes,
|
|
553
|
-
table_size_bytes: tableSizeBytes,
|
|
554
|
-
index_usage: parseInt(String(transformed.index_usage || 0), 10),
|
|
555
|
-
supports_fk: transformed.supports_fk === true || transformed.supports_fk === 1,
|
|
556
|
-
index_definition: String(transformed.index_definition || ""),
|
|
557
|
-
index_size_pretty: formatBytes(indexSizeBytes),
|
|
558
|
-
table_size_pretty: formatBytes(tableSizeBytes),
|
|
559
|
-
covering_indexes: coveringIndexes,
|
|
560
|
-
};
|
|
561
|
-
});
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
/**
|
|
565
|
-
* Create base report structure
|
|
566
|
-
*/
|
|
567
|
-
export function createBaseReport(
|
|
568
|
-
checkId: string,
|
|
569
|
-
checkTitle: string,
|
|
570
|
-
nodeName: string
|
|
571
|
-
): Report {
|
|
572
|
-
const buildTs = resolveBuildTs();
|
|
573
|
-
return {
|
|
574
|
-
version: pkg.version || null,
|
|
575
|
-
build_ts: buildTs,
|
|
576
|
-
generation_mode: "express",
|
|
577
|
-
checkId,
|
|
578
|
-
checkTitle,
|
|
579
|
-
timestamptz: new Date().toISOString(),
|
|
580
|
-
nodes: {
|
|
581
|
-
primary: nodeName,
|
|
582
|
-
standbys: [],
|
|
583
|
-
},
|
|
584
|
-
results: {},
|
|
585
|
-
};
|
|
586
|
-
}
|
|
587
|
-
|
|
588
|
-
function readTextFileSafe(p: string): string | null {
|
|
589
|
-
try {
|
|
590
|
-
const value = fs.readFileSync(p, "utf8").trim();
|
|
591
|
-
return value || null;
|
|
592
|
-
} catch {
|
|
593
|
-
return null;
|
|
594
|
-
}
|
|
595
|
-
}
|
|
596
|
-
|
|
597
|
-
function resolveBuildTs(): string | null {
|
|
598
|
-
// Follow reporter.py approach: read BUILD_TS from filesystem, with env override.
|
|
599
|
-
// Default: /BUILD_TS (useful in container images).
|
|
600
|
-
const envPath = process.env.PGAI_BUILD_TS_FILE;
|
|
601
|
-
const p = (envPath && envPath.trim()) ? envPath.trim() : "/BUILD_TS";
|
|
602
|
-
|
|
603
|
-
const fromFile = readTextFileSafe(p);
|
|
604
|
-
if (fromFile) return fromFile;
|
|
605
|
-
|
|
606
|
-
// Fallback for packaged CLI: allow placing BUILD_TS next to dist/ (package root).
|
|
607
|
-
// dist/lib/checkup.js => package root: dist/..
|
|
608
|
-
try {
|
|
609
|
-
const pkgRoot = path.resolve(__dirname, "..");
|
|
610
|
-
const fromPkgFile = readTextFileSafe(path.join(pkgRoot, "BUILD_TS"));
|
|
611
|
-
if (fromPkgFile) return fromPkgFile;
|
|
612
|
-
} catch {
|
|
613
|
-
// ignore
|
|
614
|
-
}
|
|
615
|
-
|
|
616
|
-
// Last resort: use package.json mtime as an approximation (non-null, stable-ish).
|
|
617
|
-
try {
|
|
618
|
-
const pkgJsonPath = path.resolve(__dirname, "..", "package.json");
|
|
619
|
-
const st = fs.statSync(pkgJsonPath);
|
|
620
|
-
return st.mtime.toISOString();
|
|
621
|
-
} catch {
|
|
622
|
-
return new Date().toISOString();
|
|
623
|
-
}
|
|
624
|
-
}
|
|
625
|
-
|
|
626
|
-
/**
|
|
627
|
-
* Generate A002 report - Postgres major version
|
|
628
|
-
*/
|
|
629
|
-
export async function generateA002(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
630
|
-
const report = createBaseReport("A002", "Postgres major version", nodeName);
|
|
631
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
632
|
-
|
|
633
|
-
report.results[nodeName] = {
|
|
634
|
-
data: {
|
|
635
|
-
version: postgresVersion,
|
|
636
|
-
},
|
|
637
|
-
};
|
|
638
|
-
|
|
639
|
-
return report;
|
|
640
|
-
}
|
|
641
|
-
|
|
642
|
-
/**
|
|
643
|
-
* Generate A003 report - Postgres settings
|
|
644
|
-
*/
|
|
645
|
-
export async function generateA003(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
646
|
-
const report = createBaseReport("A003", "Postgres settings", nodeName);
|
|
647
|
-
const settings = await getSettings(client);
|
|
648
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
649
|
-
|
|
650
|
-
report.results[nodeName] = {
|
|
651
|
-
data: settings,
|
|
652
|
-
postgres_version: postgresVersion,
|
|
653
|
-
};
|
|
654
|
-
|
|
655
|
-
return report;
|
|
656
|
-
}
|
|
657
|
-
|
|
658
|
-
/**
|
|
659
|
-
* Generate A004 report - Cluster information
|
|
660
|
-
*/
|
|
661
|
-
export async function generateA004(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
662
|
-
const report = createBaseReport("A004", "Cluster information", nodeName);
|
|
663
|
-
const generalInfo = await getClusterInfo(client);
|
|
664
|
-
const databaseSizes = await getDatabaseSizes(client);
|
|
665
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
666
|
-
|
|
667
|
-
report.results[nodeName] = {
|
|
668
|
-
data: {
|
|
669
|
-
general_info: generalInfo,
|
|
670
|
-
database_sizes: databaseSizes,
|
|
671
|
-
},
|
|
672
|
-
postgres_version: postgresVersion,
|
|
673
|
-
};
|
|
674
|
-
|
|
675
|
-
return report;
|
|
676
|
-
}
|
|
677
|
-
|
|
678
|
-
/**
|
|
679
|
-
* Generate A007 report - Altered settings
|
|
680
|
-
*/
|
|
681
|
-
export async function generateA007(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
682
|
-
const report = createBaseReport("A007", "Altered settings", nodeName);
|
|
683
|
-
const alteredSettings = await getAlteredSettings(client);
|
|
684
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
685
|
-
|
|
686
|
-
report.results[nodeName] = {
|
|
687
|
-
data: alteredSettings,
|
|
688
|
-
postgres_version: postgresVersion,
|
|
689
|
-
};
|
|
690
|
-
|
|
691
|
-
return report;
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
/**
|
|
695
|
-
* Generate A013 report - Postgres minor version
|
|
696
|
-
*/
|
|
697
|
-
export async function generateA013(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
698
|
-
const report = createBaseReport("A013", "Postgres minor version", nodeName);
|
|
699
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
700
|
-
|
|
701
|
-
report.results[nodeName] = {
|
|
702
|
-
data: {
|
|
703
|
-
version: postgresVersion,
|
|
704
|
-
},
|
|
705
|
-
};
|
|
706
|
-
|
|
707
|
-
return report;
|
|
708
|
-
}
|
|
709
|
-
|
|
710
|
-
/**
|
|
711
|
-
* Generate H001 report - Invalid indexes
|
|
712
|
-
*/
|
|
713
|
-
export async function generateH001(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
714
|
-
const report = createBaseReport("H001", "Invalid indexes", nodeName);
|
|
715
|
-
const invalidIndexes = await getInvalidIndexes(client);
|
|
716
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
717
|
-
|
|
718
|
-
// Get current database name and size
|
|
719
|
-
const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client);
|
|
720
|
-
|
|
721
|
-
// Calculate totals
|
|
722
|
-
const totalCount = invalidIndexes.length;
|
|
723
|
-
const totalSizeBytes = invalidIndexes.reduce((sum, idx) => sum + idx.index_size_bytes, 0);
|
|
724
|
-
|
|
725
|
-
// Structure data by database name per schema
|
|
726
|
-
report.results[nodeName] = {
|
|
727
|
-
data: {
|
|
728
|
-
[dbName]: {
|
|
729
|
-
invalid_indexes: invalidIndexes,
|
|
730
|
-
total_count: totalCount,
|
|
731
|
-
total_size_bytes: totalSizeBytes,
|
|
732
|
-
total_size_pretty: formatBytes(totalSizeBytes),
|
|
733
|
-
database_size_bytes: dbSizeBytes,
|
|
734
|
-
database_size_pretty: formatBytes(dbSizeBytes),
|
|
735
|
-
},
|
|
736
|
-
},
|
|
737
|
-
postgres_version: postgresVersion,
|
|
738
|
-
};
|
|
739
|
-
|
|
740
|
-
return report;
|
|
741
|
-
}
|
|
742
|
-
|
|
743
|
-
/**
|
|
744
|
-
* Generate H002 report - Unused indexes
|
|
745
|
-
*/
|
|
746
|
-
export async function generateH002(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
747
|
-
const report = createBaseReport("H002", "Unused indexes", nodeName);
|
|
748
|
-
const unusedIndexes = await getUnusedIndexes(client);
|
|
749
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
750
|
-
const statsReset = await getStatsReset(client);
|
|
751
|
-
|
|
752
|
-
// Get current database name and size
|
|
753
|
-
const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client);
|
|
754
|
-
|
|
755
|
-
// Calculate totals
|
|
756
|
-
const totalCount = unusedIndexes.length;
|
|
757
|
-
const totalSizeBytes = unusedIndexes.reduce((sum, idx) => sum + idx.index_size_bytes, 0);
|
|
758
|
-
|
|
759
|
-
// Structure data by database name per schema
|
|
760
|
-
report.results[nodeName] = {
|
|
761
|
-
data: {
|
|
762
|
-
[dbName]: {
|
|
763
|
-
unused_indexes: unusedIndexes,
|
|
764
|
-
total_count: totalCount,
|
|
765
|
-
total_size_bytes: totalSizeBytes,
|
|
766
|
-
total_size_pretty: formatBytes(totalSizeBytes),
|
|
767
|
-
database_size_bytes: dbSizeBytes,
|
|
768
|
-
database_size_pretty: formatBytes(dbSizeBytes),
|
|
769
|
-
stats_reset: statsReset,
|
|
770
|
-
},
|
|
771
|
-
},
|
|
772
|
-
postgres_version: postgresVersion,
|
|
773
|
-
};
|
|
774
|
-
|
|
775
|
-
return report;
|
|
776
|
-
}
|
|
777
|
-
|
|
778
|
-
/**
|
|
779
|
-
* Generate H004 report - Redundant indexes
|
|
780
|
-
*/
|
|
781
|
-
export async function generateH004(client: Client, nodeName: string = "node-01"): Promise<Report> {
|
|
782
|
-
const report = createBaseReport("H004", "Redundant indexes", nodeName);
|
|
783
|
-
const redundantIndexes = await getRedundantIndexes(client);
|
|
784
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
785
|
-
|
|
786
|
-
// Get current database name and size
|
|
787
|
-
const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client);
|
|
788
|
-
|
|
789
|
-
// Calculate totals
|
|
790
|
-
const totalCount = redundantIndexes.length;
|
|
791
|
-
const totalSizeBytes = redundantIndexes.reduce((sum, idx) => sum + idx.index_size_bytes, 0);
|
|
792
|
-
|
|
793
|
-
// Structure data by database name per schema
|
|
794
|
-
report.results[nodeName] = {
|
|
795
|
-
data: {
|
|
796
|
-
[dbName]: {
|
|
797
|
-
redundant_indexes: redundantIndexes,
|
|
798
|
-
total_count: totalCount,
|
|
799
|
-
total_size_bytes: totalSizeBytes,
|
|
800
|
-
total_size_pretty: formatBytes(totalSizeBytes),
|
|
801
|
-
database_size_bytes: dbSizeBytes,
|
|
802
|
-
database_size_pretty: formatBytes(dbSizeBytes),
|
|
803
|
-
},
|
|
804
|
-
},
|
|
805
|
-
postgres_version: postgresVersion,
|
|
806
|
-
};
|
|
807
|
-
|
|
808
|
-
return report;
|
|
809
|
-
}
|
|
810
|
-
|
|
811
|
-
/**
|
|
812
|
-
* Generate D004 report - pg_stat_statements and pg_stat_kcache settings
|
|
813
|
-
*/
|
|
814
|
-
async function generateD004(client: Client, nodeName: string): Promise<Report> {
|
|
815
|
-
const report = createBaseReport("D004", "pg_stat_statements and pg_stat_kcache settings", nodeName);
|
|
816
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
817
|
-
const allSettings = await getSettings(client);
|
|
818
|
-
|
|
819
|
-
// Filter settings related to pg_stat_statements and pg_stat_kcache
|
|
820
|
-
const pgssSettings: Record<string, SettingInfo> = {};
|
|
821
|
-
for (const [name, setting] of Object.entries(allSettings)) {
|
|
822
|
-
if (name.startsWith("pg_stat_statements") || name.startsWith("pg_stat_kcache")) {
|
|
823
|
-
pgssSettings[name] = setting;
|
|
824
|
-
}
|
|
825
|
-
}
|
|
826
|
-
|
|
827
|
-
// Check pg_stat_statements extension
|
|
828
|
-
let pgssAvailable = false;
|
|
829
|
-
let pgssMetricsCount = 0;
|
|
830
|
-
let pgssTotalCalls = 0;
|
|
831
|
-
const pgssSampleQueries: Array<{ queryid: string; user: string; database: string; calls: number }> = [];
|
|
832
|
-
|
|
833
|
-
try {
|
|
834
|
-
const extCheck = await client.query(
|
|
835
|
-
"select 1 from pg_extension where extname = 'pg_stat_statements'"
|
|
836
|
-
);
|
|
837
|
-
if (extCheck.rows.length > 0) {
|
|
838
|
-
pgssAvailable = true;
|
|
839
|
-
const statsResult = await client.query(`
|
|
840
|
-
select count(*) as cnt, coalesce(sum(calls), 0) as total_calls
|
|
841
|
-
from pg_stat_statements
|
|
842
|
-
`);
|
|
843
|
-
pgssMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
844
|
-
pgssTotalCalls = parseInt(statsResult.rows[0]?.total_calls || "0", 10);
|
|
845
|
-
|
|
846
|
-
// Get sample queries (top 5 by calls)
|
|
847
|
-
const sampleResult = await client.query(`
|
|
848
|
-
select
|
|
849
|
-
queryid::text as queryid,
|
|
850
|
-
coalesce(usename, 'unknown') as "user",
|
|
851
|
-
coalesce(datname, 'unknown') as database,
|
|
852
|
-
calls
|
|
853
|
-
from pg_stat_statements s
|
|
854
|
-
left join pg_database d on s.dbid = d.oid
|
|
855
|
-
left join pg_user u on s.userid = u.usesysid
|
|
856
|
-
order by calls desc
|
|
857
|
-
limit 5
|
|
858
|
-
`);
|
|
859
|
-
for (const row of sampleResult.rows) {
|
|
860
|
-
pgssSampleQueries.push({
|
|
861
|
-
queryid: row.queryid,
|
|
862
|
-
user: row.user,
|
|
863
|
-
database: row.database,
|
|
864
|
-
calls: parseInt(row.calls, 10),
|
|
865
|
-
});
|
|
866
|
-
}
|
|
867
|
-
}
|
|
868
|
-
} catch {
|
|
869
|
-
// Extension not available or accessible
|
|
870
|
-
}
|
|
871
|
-
|
|
872
|
-
// Check pg_stat_kcache extension
|
|
873
|
-
let kcacheAvailable = false;
|
|
874
|
-
let kcacheMetricsCount = 0;
|
|
875
|
-
let kcacheTotalExecTime = 0;
|
|
876
|
-
let kcacheTotalUserTime = 0;
|
|
877
|
-
let kcacheTotalSystemTime = 0;
|
|
878
|
-
const kcacheSampleQueries: Array<{ queryid: string; user: string; exec_total_time: number }> = [];
|
|
879
|
-
|
|
880
|
-
try {
|
|
881
|
-
const extCheck = await client.query(
|
|
882
|
-
"select 1 from pg_extension where extname = 'pg_stat_kcache'"
|
|
883
|
-
);
|
|
884
|
-
if (extCheck.rows.length > 0) {
|
|
885
|
-
kcacheAvailable = true;
|
|
886
|
-
const statsResult = await client.query(`
|
|
887
|
-
select
|
|
888
|
-
count(*) as cnt,
|
|
889
|
-
coalesce(sum(exec_user_time + exec_system_time), 0) as total_exec_time,
|
|
890
|
-
coalesce(sum(exec_user_time), 0) as total_user_time,
|
|
891
|
-
coalesce(sum(exec_system_time), 0) as total_system_time
|
|
892
|
-
from pg_stat_kcache
|
|
893
|
-
`);
|
|
894
|
-
kcacheMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
895
|
-
kcacheTotalExecTime = parseFloat(statsResult.rows[0]?.total_exec_time || "0");
|
|
896
|
-
kcacheTotalUserTime = parseFloat(statsResult.rows[0]?.total_user_time || "0");
|
|
897
|
-
kcacheTotalSystemTime = parseFloat(statsResult.rows[0]?.total_system_time || "0");
|
|
898
|
-
|
|
899
|
-
// Get sample queries (top 5 by exec time)
|
|
900
|
-
const sampleResult = await client.query(`
|
|
901
|
-
select
|
|
902
|
-
queryid::text as queryid,
|
|
903
|
-
coalesce(usename, 'unknown') as "user",
|
|
904
|
-
(exec_user_time + exec_system_time) as exec_total_time
|
|
905
|
-
from pg_stat_kcache k
|
|
906
|
-
left join pg_user u on k.userid = u.usesysid
|
|
907
|
-
order by (exec_user_time + exec_system_time) desc
|
|
908
|
-
limit 5
|
|
909
|
-
`);
|
|
910
|
-
for (const row of sampleResult.rows) {
|
|
911
|
-
kcacheSampleQueries.push({
|
|
912
|
-
queryid: row.queryid,
|
|
913
|
-
user: row.user,
|
|
914
|
-
exec_total_time: parseFloat(row.exec_total_time),
|
|
915
|
-
});
|
|
916
|
-
}
|
|
917
|
-
}
|
|
918
|
-
} catch {
|
|
919
|
-
// Extension not available or accessible
|
|
920
|
-
}
|
|
921
|
-
|
|
922
|
-
report.results[nodeName] = {
|
|
923
|
-
data: {
|
|
924
|
-
settings: pgssSettings,
|
|
925
|
-
pg_stat_statements_status: {
|
|
926
|
-
extension_available: pgssAvailable,
|
|
927
|
-
metrics_count: pgssMetricsCount,
|
|
928
|
-
total_calls: pgssTotalCalls,
|
|
929
|
-
sample_queries: pgssSampleQueries,
|
|
930
|
-
},
|
|
931
|
-
pg_stat_kcache_status: {
|
|
932
|
-
extension_available: kcacheAvailable,
|
|
933
|
-
metrics_count: kcacheMetricsCount,
|
|
934
|
-
total_exec_time: kcacheTotalExecTime,
|
|
935
|
-
total_user_time: kcacheTotalUserTime,
|
|
936
|
-
total_system_time: kcacheTotalSystemTime,
|
|
937
|
-
sample_queries: kcacheSampleQueries,
|
|
938
|
-
},
|
|
939
|
-
},
|
|
940
|
-
postgres_version: postgresVersion,
|
|
941
|
-
};
|
|
942
|
-
|
|
943
|
-
return report;
|
|
944
|
-
}
|
|
945
|
-
|
|
946
|
-
/**
|
|
947
|
-
* Generate F001 report - Autovacuum: current settings
|
|
948
|
-
*/
|
|
949
|
-
async function generateF001(client: Client, nodeName: string): Promise<Report> {
|
|
950
|
-
const report = createBaseReport("F001", "Autovacuum: current settings", nodeName);
|
|
951
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
952
|
-
const allSettings = await getSettings(client);
|
|
953
|
-
|
|
954
|
-
// Filter autovacuum-related settings
|
|
955
|
-
const autovacuumSettings: Record<string, SettingInfo> = {};
|
|
956
|
-
for (const [name, setting] of Object.entries(allSettings)) {
|
|
957
|
-
if (name.includes("autovacuum") || name.includes("vacuum")) {
|
|
958
|
-
autovacuumSettings[name] = setting;
|
|
959
|
-
}
|
|
960
|
-
}
|
|
961
|
-
|
|
962
|
-
report.results[nodeName] = {
|
|
963
|
-
data: autovacuumSettings,
|
|
964
|
-
postgres_version: postgresVersion,
|
|
965
|
-
};
|
|
966
|
-
|
|
967
|
-
return report;
|
|
968
|
-
}
|
|
969
|
-
|
|
970
|
-
/**
|
|
971
|
-
* Generate G001 report - Memory-related settings
|
|
972
|
-
*/
|
|
973
|
-
async function generateG001(client: Client, nodeName: string): Promise<Report> {
|
|
974
|
-
const report = createBaseReport("G001", "Memory-related settings", nodeName);
|
|
975
|
-
const postgresVersion = await getPostgresVersion(client);
|
|
976
|
-
const allSettings = await getSettings(client);
|
|
977
|
-
|
|
978
|
-
// Memory-related setting names
|
|
979
|
-
const memorySettingNames = [
|
|
980
|
-
"shared_buffers",
|
|
981
|
-
"work_mem",
|
|
982
|
-
"maintenance_work_mem",
|
|
983
|
-
"effective_cache_size",
|
|
984
|
-
"wal_buffers",
|
|
985
|
-
"temp_buffers",
|
|
986
|
-
"max_connections",
|
|
987
|
-
"autovacuum_work_mem",
|
|
988
|
-
"hash_mem_multiplier",
|
|
989
|
-
"logical_decoding_work_mem",
|
|
990
|
-
"max_stack_depth",
|
|
991
|
-
"max_prepared_transactions",
|
|
992
|
-
"max_locks_per_transaction",
|
|
993
|
-
"max_pred_locks_per_transaction",
|
|
994
|
-
];
|
|
995
|
-
|
|
996
|
-
const memorySettings: Record<string, SettingInfo> = {};
|
|
997
|
-
for (const name of memorySettingNames) {
|
|
998
|
-
if (allSettings[name]) {
|
|
999
|
-
memorySettings[name] = allSettings[name];
|
|
1000
|
-
}
|
|
1001
|
-
}
|
|
1002
|
-
|
|
1003
|
-
// Calculate memory usage estimates
|
|
1004
|
-
interface MemoryUsage {
|
|
1005
|
-
shared_buffers_bytes: number;
|
|
1006
|
-
shared_buffers_pretty: string;
|
|
1007
|
-
wal_buffers_bytes: number;
|
|
1008
|
-
wal_buffers_pretty: string;
|
|
1009
|
-
shared_memory_total_bytes: number;
|
|
1010
|
-
shared_memory_total_pretty: string;
|
|
1011
|
-
work_mem_per_connection_bytes: number;
|
|
1012
|
-
work_mem_per_connection_pretty: string;
|
|
1013
|
-
max_work_mem_usage_bytes: number;
|
|
1014
|
-
max_work_mem_usage_pretty: string;
|
|
1015
|
-
maintenance_work_mem_bytes: number;
|
|
1016
|
-
maintenance_work_mem_pretty: string;
|
|
1017
|
-
effective_cache_size_bytes: number;
|
|
1018
|
-
effective_cache_size_pretty: string;
|
|
1019
|
-
}
|
|
1020
|
-
|
|
1021
|
-
let memoryUsage: MemoryUsage | Record<string, never> = {};
|
|
1022
|
-
|
|
1023
|
-
try {
|
|
1024
|
-
// Get actual byte values from PostgreSQL
|
|
1025
|
-
const memQuery = await client.query(`
|
|
1026
|
-
select
|
|
1027
|
-
pg_size_bytes(current_setting('shared_buffers')) as shared_buffers_bytes,
|
|
1028
|
-
pg_size_bytes(current_setting('wal_buffers')) as wal_buffers_bytes,
|
|
1029
|
-
pg_size_bytes(current_setting('work_mem')) as work_mem_bytes,
|
|
1030
|
-
pg_size_bytes(current_setting('maintenance_work_mem')) as maintenance_work_mem_bytes,
|
|
1031
|
-
pg_size_bytes(current_setting('effective_cache_size')) as effective_cache_size_bytes,
|
|
1032
|
-
current_setting('max_connections')::int as max_connections
|
|
1033
|
-
`);
|
|
1034
|
-
|
|
1035
|
-
if (memQuery.rows.length > 0) {
|
|
1036
|
-
const row = memQuery.rows[0];
|
|
1037
|
-
const sharedBuffersBytes = parseInt(row.shared_buffers_bytes, 10);
|
|
1038
|
-
const walBuffersBytes = parseInt(row.wal_buffers_bytes, 10);
|
|
1039
|
-
const workMemBytes = parseInt(row.work_mem_bytes, 10);
|
|
1040
|
-
const maintenanceWorkMemBytes = parseInt(row.maintenance_work_mem_bytes, 10);
|
|
1041
|
-
const effectiveCacheSizeBytes = parseInt(row.effective_cache_size_bytes, 10);
|
|
1042
|
-
const maxConnections = row.max_connections;
|
|
1043
|
-
|
|
1044
|
-
const sharedMemoryTotal = sharedBuffersBytes + walBuffersBytes;
|
|
1045
|
-
const maxWorkMemUsage = workMemBytes * maxConnections;
|
|
1046
|
-
|
|
1047
|
-
memoryUsage = {
|
|
1048
|
-
shared_buffers_bytes: sharedBuffersBytes,
|
|
1049
|
-
shared_buffers_pretty: formatBytes(sharedBuffersBytes),
|
|
1050
|
-
wal_buffers_bytes: walBuffersBytes,
|
|
1051
|
-
wal_buffers_pretty: formatBytes(walBuffersBytes),
|
|
1052
|
-
shared_memory_total_bytes: sharedMemoryTotal,
|
|
1053
|
-
shared_memory_total_pretty: formatBytes(sharedMemoryTotal),
|
|
1054
|
-
work_mem_per_connection_bytes: workMemBytes,
|
|
1055
|
-
work_mem_per_connection_pretty: formatBytes(workMemBytes),
|
|
1056
|
-
max_work_mem_usage_bytes: maxWorkMemUsage,
|
|
1057
|
-
max_work_mem_usage_pretty: formatBytes(maxWorkMemUsage),
|
|
1058
|
-
maintenance_work_mem_bytes: maintenanceWorkMemBytes,
|
|
1059
|
-
maintenance_work_mem_pretty: formatBytes(maintenanceWorkMemBytes),
|
|
1060
|
-
effective_cache_size_bytes: effectiveCacheSizeBytes,
|
|
1061
|
-
effective_cache_size_pretty: formatBytes(effectiveCacheSizeBytes),
|
|
1062
|
-
};
|
|
1063
|
-
}
|
|
1064
|
-
} catch {
|
|
1065
|
-
// If we can't calculate, leave empty object (schema allows this)
|
|
1066
|
-
}
|
|
1067
|
-
|
|
1068
|
-
report.results[nodeName] = {
|
|
1069
|
-
data: {
|
|
1070
|
-
settings: memorySettings,
|
|
1071
|
-
analysis: {
|
|
1072
|
-
estimated_total_memory_usage: memoryUsage,
|
|
1073
|
-
},
|
|
1074
|
-
},
|
|
1075
|
-
postgres_version: postgresVersion,
|
|
1076
|
-
};
|
|
1077
|
-
|
|
1078
|
-
return report;
|
|
1079
|
-
}
|
|
1080
|
-
|
|
1081
|
-
/**
|
|
1082
|
-
* Available report generators
|
|
1083
|
-
*/
|
|
1084
|
-
export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string) => Promise<Report>> = {
|
|
1085
|
-
A002: generateA002,
|
|
1086
|
-
A003: generateA003,
|
|
1087
|
-
A004: generateA004,
|
|
1088
|
-
A007: generateA007,
|
|
1089
|
-
A013: generateA013,
|
|
1090
|
-
D004: generateD004,
|
|
1091
|
-
F001: generateF001,
|
|
1092
|
-
G001: generateG001,
|
|
1093
|
-
H001: generateH001,
|
|
1094
|
-
H002: generateH002,
|
|
1095
|
-
H004: generateH004,
|
|
1096
|
-
};
|
|
1097
|
-
|
|
1098
|
-
/**
|
|
1099
|
-
* Check IDs and titles
|
|
1100
|
-
*/
|
|
1101
|
-
export const CHECK_INFO: Record<string, string> = {
|
|
1102
|
-
A002: "Postgres major version",
|
|
1103
|
-
A003: "Postgres settings",
|
|
1104
|
-
A004: "Cluster information",
|
|
1105
|
-
A007: "Altered settings",
|
|
1106
|
-
A013: "Postgres minor version",
|
|
1107
|
-
D004: "pg_stat_statements and pg_stat_kcache settings",
|
|
1108
|
-
F001: "Autovacuum: current settings",
|
|
1109
|
-
G001: "Memory-related settings",
|
|
1110
|
-
H001: "Invalid indexes",
|
|
1111
|
-
H002: "Unused indexes",
|
|
1112
|
-
H004: "Redundant indexes",
|
|
1113
|
-
};
|
|
1114
|
-
|
|
1115
|
-
/**
|
|
1116
|
-
* Generate all available reports
|
|
1117
|
-
*/
|
|
1118
|
-
export async function generateAllReports(
|
|
1119
|
-
client: Client,
|
|
1120
|
-
nodeName: string = "node-01",
|
|
1121
|
-
onProgress?: (info: { checkId: string; checkTitle: string; index: number; total: number }) => void
|
|
1122
|
-
): Promise<Record<string, Report>> {
|
|
1123
|
-
const reports: Record<string, Report> = {};
|
|
1124
|
-
|
|
1125
|
-
const entries = Object.entries(REPORT_GENERATORS);
|
|
1126
|
-
const total = entries.length;
|
|
1127
|
-
let index = 0;
|
|
1128
|
-
|
|
1129
|
-
for (const [checkId, generator] of entries) {
|
|
1130
|
-
index += 1;
|
|
1131
|
-
onProgress?.({
|
|
1132
|
-
checkId,
|
|
1133
|
-
checkTitle: CHECK_INFO[checkId] || checkId,
|
|
1134
|
-
index,
|
|
1135
|
-
total,
|
|
1136
|
-
});
|
|
1137
|
-
reports[checkId] = await generator(client, nodeName);
|
|
1138
|
-
}
|
|
1139
|
-
|
|
1140
|
-
return reports;
|
|
1141
|
-
}
|