postgres-scout-mcp 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,617 @@
1
+ import { z } from 'zod';
2
+ import { ensureDatabaseExists, executeInternalQuery, getCurrentDatabaseName } from '../utils/database.js';
3
+ import { sanitizeIdentifier } from '../utils/sanitize.js';
4
+ const AnalyzeTableBloatSchema = z.object({
5
+ schema: z.string().optional().default('public'),
6
+ table: z.string().optional(),
7
+ thresholdPercent: z.number().optional().default(20)
8
+ });
9
+ const SuggestVacuumSchema = z.object({
10
+ schema: z.string().optional().default('public'),
11
+ minDeadTuples: z.number().optional().default(1000),
12
+ minBloatPercent: z.number().optional().default(10)
13
+ });
14
+ const GetHealthScoreSchema = z.object({
15
+ database: z.string().optional()
16
+ });
17
+ const GetSlowQueriesSchema = z.object({
18
+ minDurationMs: z.number().optional().default(100),
19
+ limit: z.number().optional().default(10),
20
+ orderBy: z.enum(['total_time', 'mean_time', 'calls']).optional().default('total_time')
21
+ });
22
+ export async function analyzeTableBloat(connection, logger, args) {
23
+ const { schema, table, thresholdPercent } = args;
24
+ logger.info('analyzeTableBloat', 'Analyzing table bloat', { schema, table });
25
+ const sanitizedSchema = sanitizeIdentifier(schema);
26
+ const tableFilter = table ? `AND c.relname = $2` : '';
27
+ const params = table ? [sanitizedSchema, sanitizeIdentifier(table)] : [sanitizedSchema];
28
+ // Table bloat analysis
29
+ const tableBloatQuery = `
30
+ SELECT
31
+ s.schemaname as schema,
32
+ s.relname as table,
33
+ 'table' as type,
34
+ pg_stat_get_live_tuples(c.oid) as live_tuples,
35
+ pg_stat_get_dead_tuples(c.oid) as dead_tuples,
36
+ pg_total_relation_size(c.oid) as total_size,
37
+ CASE
38
+ WHEN pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid) = 0 THEN 0
39
+ ELSE ROUND((pg_stat_get_dead_tuples(c.oid)::numeric /
40
+ (pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid))) * 100, 2)
41
+ END as bloat_percent,
42
+ pg_size_pretty(pg_total_relation_size(c.oid)) as total_size_pretty,
43
+ pg_stat_get_last_vacuum_time(c.oid) as last_vacuum,
44
+ pg_stat_get_last_autovacuum_time(c.oid) as last_autovacuum
45
+ FROM pg_stat_user_tables s
46
+ JOIN pg_class c ON c.relname = s.relname AND c.relnamespace = (
47
+ SELECT oid FROM pg_namespace WHERE nspname = s.schemaname
48
+ )
49
+ WHERE s.schemaname = $1
50
+ ${tableFilter}
51
+ ORDER BY (pg_stat_get_dead_tuples(c.oid)::numeric /
52
+ NULLIF(pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid), 0)) DESC NULLS LAST
53
+ `;
54
+ // Index bloat estimation
55
+ const indexBloatQuery = `
56
+ SELECT
57
+ sui.schemaname as schema,
58
+ sui.relname as table,
59
+ sui.indexrelname as index_name,
60
+ 'index' as type,
61
+ pg_relation_size(sui.indexrelid) as size_bytes,
62
+ pg_size_pretty(pg_relation_size(sui.indexrelid)) as size,
63
+ sui.idx_scan as scans
64
+ FROM pg_stat_user_indexes sui
65
+ WHERE sui.schemaname = $1
66
+ ${tableFilter ? tableFilter.replace('c.relname', 'sui.relname') : ''}
67
+ AND pg_relation_size(sui.indexrelid) > 0
68
+ ORDER BY pg_relation_size(sui.indexrelid) DESC
69
+ LIMIT 50
70
+ `;
71
+ const [tableBloatResult, indexBloatResult] = await Promise.all([
72
+ executeInternalQuery(connection, logger, { query: tableBloatQuery, params }),
73
+ executeInternalQuery(connection, logger, { query: indexBloatQuery, params })
74
+ ]);
75
+ const analysis = [];
76
+ // Process table bloat
77
+ for (const row of tableBloatResult.rows) {
78
+ const bloatPercent = parseFloat(row.bloat_percent || '0');
79
+ const deadTuples = parseInt(row.dead_tuples || '0', 10);
80
+ const liveTuples = parseInt(row.live_tuples || '0', 10);
81
+ if (bloatPercent >= thresholdPercent || deadTuples >= 1000) {
82
+ const bloatBytes = Math.round((deadTuples / (liveTuples + deadTuples)) * parseInt(row.total_size, 10));
83
+ analysis.push({
84
+ schema: row.schema,
85
+ table: row.table,
86
+ type: 'table',
87
+ liveTuples,
88
+ deadTuples,
89
+ totalSize: row.total_size_pretty,
90
+ bloatPercent,
91
+ bloatBytes,
92
+ wastedSpace: (bloatBytes / 1024 / 1024).toFixed(2) + ' MB',
93
+ lastVacuum: row.last_vacuum,
94
+ lastAutoVacuum: row.last_autovacuum
95
+ });
96
+ }
97
+ }
98
+ // Process index bloat (simplified heuristic)
99
+ for (const row of indexBloatResult.rows) {
100
+ const sizeBytes = parseInt(row.size_bytes, 10);
101
+ const scans = parseInt(row.scans || '0', 10);
102
+ // If index is large but rarely used, consider it potentially bloated
103
+ if (sizeBytes > 10 * 1024 * 1024 && scans < 100) {
104
+ analysis.push({
105
+ schema: row.schema,
106
+ table: row.table,
107
+ type: 'index',
108
+ indexName: row.index_name,
109
+ size: row.size,
110
+ sizeBytes,
111
+ scans,
112
+ estimatedBloatPercent: 'unknown',
113
+ recommendation: scans === 0 ? 'Consider dropping unused index' : 'Consider REINDEX if performance degrades'
114
+ });
115
+ }
116
+ }
117
+ const recommendations = [];
118
+ const highBloatTables = analysis.filter(a => a.type === 'table' && a.bloatPercent > 30);
119
+ const mediumBloatTables = analysis.filter(a => a.type === 'table' && a.bloatPercent >= 20 && a.bloatPercent <= 30);
120
+ if (highBloatTables.length > 0) {
121
+ recommendations.push(`⚠ ${highBloatTables.length} tables with >30% bloat - consider VACUUM FULL during maintenance window`);
122
+ highBloatTables.slice(0, 3).forEach(t => {
123
+ recommendations.push(` VACUUM FULL ANALYZE ${t.schema}.${t.table};`);
124
+ });
125
+ }
126
+ if (mediumBloatTables.length > 0) {
127
+ recommendations.push(`${mediumBloatTables.length} tables with 20-30% bloat - schedule regular VACUUM`);
128
+ mediumBloatTables.slice(0, 3).forEach(t => {
129
+ recommendations.push(` VACUUM ANALYZE ${t.schema}.${t.table};`);
130
+ });
131
+ }
132
+ const unusedIndexes = analysis.filter(a => a.type === 'index' && a.scans === 0);
133
+ if (unusedIndexes.length > 0) {
134
+ recommendations.push(`${unusedIndexes.length} unused indexes - consider dropping to reduce bloat`);
135
+ }
136
+ if (analysis.length === 0) {
137
+ recommendations.push('✓ No significant bloat detected');
138
+ }
139
+ return {
140
+ schema,
141
+ ...(table && { table }),
142
+ thresholdPercent,
143
+ analysis,
144
+ recommendations
145
+ };
146
+ }
147
+ export async function suggestVacuum(connection, logger, args) {
148
+ const { schema, minDeadTuples, minBloatPercent } = args;
149
+ logger.info('suggestVacuum', 'Suggesting VACUUM operations', { schema });
150
+ const sanitizedSchema = sanitizeIdentifier(schema);
151
+ const query = `
152
+ SELECT
153
+ s.schemaname as schema,
154
+ s.relname as table,
155
+ pg_stat_get_live_tuples(c.oid) as live_tuples,
156
+ pg_stat_get_dead_tuples(c.oid) as dead_tuples,
157
+ CASE
158
+ WHEN pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid) = 0 THEN 0
159
+ ELSE ROUND((pg_stat_get_dead_tuples(c.oid)::numeric /
160
+ (pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid))) * 100, 2)
161
+ END as dead_tuples_percent,
162
+ pg_stat_get_last_vacuum_time(c.oid) as last_vacuum,
163
+ pg_stat_get_last_autovacuum_time(c.oid) as last_autovacuum,
164
+ pg_total_relation_size(c.oid) as total_size,
165
+ pg_size_pretty(pg_total_relation_size(c.oid)) as size_pretty
166
+ FROM pg_stat_user_tables s
167
+ JOIN pg_class c ON c.relname = s.relname AND c.relnamespace = (
168
+ SELECT oid FROM pg_namespace WHERE nspname = s.schemaname
169
+ )
170
+ WHERE s.schemaname = $1
171
+ AND pg_stat_get_dead_tuples(c.oid) >= $2
172
+ ORDER BY pg_stat_get_dead_tuples(c.oid) DESC
173
+ `;
174
+ const result = await executeInternalQuery(connection, logger, {
175
+ query,
176
+ params: [sanitizedSchema, minDeadTuples]
177
+ });
178
+ const recommendations = [];
179
+ for (const row of result.rows) {
180
+ const deadTuples = parseInt(row.dead_tuples || '0', 10);
181
+ const liveTuples = parseInt(row.live_tuples || '0', 10);
182
+ const deadPercent = parseFloat(row.dead_tuples_percent || '0');
183
+ const totalSize = parseInt(row.total_size, 10);
184
+ if (deadPercent < minBloatPercent)
185
+ continue;
186
+ const lastVacuum = row.last_vacuum || row.last_autovacuum;
187
+ const daysSinceVacuum = lastVacuum
188
+ ? Math.floor((Date.now() - new Date(lastVacuum).getTime()) / (1000 * 60 * 60 * 24))
189
+ : null;
190
+ let priority = 'low';
191
+ let command = `VACUUM ANALYZE ${schema}.${row.table};`;
192
+ let warning = null;
193
+ if (deadPercent > 50) {
194
+ priority = 'critical';
195
+ command = `VACUUM FULL ANALYZE ${schema}.${row.table};`;
196
+ warning = `⚠ ${deadPercent.toFixed(1)}% bloat - VACUUM FULL recommended (requires exclusive lock)`;
197
+ }
198
+ else if (deadPercent > 30) {
199
+ priority = 'high';
200
+ command = `VACUUM ANALYZE ${schema}.${row.table};`;
201
+ }
202
+ else if (deadPercent > 20) {
203
+ priority = 'medium';
204
+ }
205
+ // Estimate duration based on table size
206
+ let estimatedDuration = '< 1 minute';
207
+ if (totalSize > 1024 * 1024 * 1024) { // > 1GB
208
+ estimatedDuration = command.includes('FULL') ? '10-30 minutes' : '1-5 minutes';
209
+ }
210
+ else if (totalSize > 100 * 1024 * 1024) { // > 100MB
211
+ estimatedDuration = command.includes('FULL') ? '1-10 minutes' : '< 1 minute';
212
+ }
213
+ recommendations.push({
214
+ table: row.table,
215
+ schema: row.schema,
216
+ deadTuples,
217
+ liveTuples,
218
+ deadTuplesPercent: deadPercent,
219
+ bloatPercent: deadPercent,
220
+ lastVacuum: row.last_vacuum,
221
+ lastAutoVacuum: row.last_autovacuum,
222
+ daysSinceVacuum,
223
+ priority,
224
+ command,
225
+ estimatedDuration,
226
+ recommendConcurrent: false, // VACUUM doesn't support CONCURRENTLY
227
+ ...(warning && { warning })
228
+ });
229
+ }
230
+ // Sort by priority
231
+ const priorityOrder = { critical: 0, high: 1, medium: 2, low: 3 };
232
+ recommendations.sort((a, b) => priorityOrder[a.priority] - priorityOrder[b.priority]);
233
+ return {
234
+ schema,
235
+ tablesNeedingVacuum: recommendations.length,
236
+ recommendations
237
+ };
238
+ }
239
+ export async function getHealthScore(connection, logger, args) {
240
+ const currentDatabase = await getCurrentDatabaseName(connection, logger);
241
+ const requestedDatabase = args.database;
242
+ if (requestedDatabase) {
243
+ await ensureDatabaseExists(connection, logger, requestedDatabase);
244
+ if (requestedDatabase !== currentDatabase) {
245
+ throw new Error(`Connected to "${currentDatabase}". Reconnect to "${requestedDatabase}" to calculate its health score.`);
246
+ }
247
+ }
248
+ logger.info('getHealthScore', 'Calculating health score', { database: currentDatabase });
249
+ // Gather multiple health metrics in parallel
250
+ const [cacheStats, connectionStats, bloatStats, indexStats, activityStats, replicationStats] = await Promise.all([
251
+ getCacheHitRatio(connection, logger),
252
+ getConnectionHealth(connection, logger),
253
+ getBloatHealth(connection, logger),
254
+ getIndexHealth(connection, logger),
255
+ getActivityHealth(connection, logger),
256
+ getReplicationHealth(connection, logger)
257
+ ]);
258
+ // Calculate component scores (0-100)
259
+ const scoreBreakdown = {
260
+ cacheHitRatio: calculateCacheScore(cacheStats.ratio),
261
+ indexUsage: calculateIndexScore(indexStats.usage),
262
+ bloat: calculateBloatScore(bloatStats.avgBloat),
263
+ connectionUsage: calculateConnectionScore(connectionStats.usage),
264
+ deadTuples: calculateDeadTuplesScore(bloatStats.avgDeadPercent),
265
+ longRunningQueries: calculateLongQueryScore(activityStats.longRunning),
266
+ replicationLag: calculateReplicationScore(replicationStats.lag)
267
+ };
268
+ // Calculate overall score (weighted average)
269
+ const weights = {
270
+ cacheHitRatio: 0.20,
271
+ indexUsage: 0.15,
272
+ bloat: 0.15,
273
+ connectionUsage: 0.10,
274
+ deadTuples: 0.15,
275
+ longRunningQueries: 0.15,
276
+ replicationLag: 0.10
277
+ };
278
+ let overallScore = 0;
279
+ for (const [key, weight] of Object.entries(weights)) {
280
+ overallScore += scoreBreakdown[key].score * weight;
281
+ }
282
+ overallScore = Math.round(overallScore);
283
+ // Generate issues and recommendations
284
+ const issues = [];
285
+ const recommendations = [];
286
+ if (scoreBreakdown.cacheHitRatio.score < 90) {
287
+ issues.push({
288
+ severity: 'warning',
289
+ category: 'cache',
290
+ message: `Cache hit ratio is ${(cacheStats.ratio * 100).toFixed(1)}%`,
291
+ recommendation: 'Consider increasing shared_buffers or investigating query patterns'
292
+ });
293
+ }
294
+ else {
295
+ recommendations.push(`✓ Cache hit ratio is excellent (${(cacheStats.ratio * 100).toFixed(1)}%)`);
296
+ }
297
+ if (scoreBreakdown.bloat.score < 70) {
298
+ issues.push({
299
+ severity: 'warning',
300
+ category: 'bloat',
301
+ message: `Average table bloat is ${bloatStats.avgBloat.toFixed(1)}%`,
302
+ recommendation: 'Schedule VACUUM for bloated tables during maintenance window'
303
+ });
304
+ }
305
+ if (scoreBreakdown.indexUsage.score < 80) {
306
+ issues.push({
307
+ severity: 'info',
308
+ category: 'indexes',
309
+ message: `${indexStats.unusedCount} indexes are unused`,
310
+ recommendation: `Consider dropping unused indexes to save ${indexStats.unusedSizeMB.toFixed(0)} MB`
311
+ });
312
+ }
313
+ if (scoreBreakdown.connectionUsage.score > 90) {
314
+ recommendations.push(`✓ Connection pool usage is healthy (${(connectionStats.usage * 100).toFixed(0)}% of max)`);
315
+ }
316
+ else if (scoreBreakdown.connectionUsage.score < 50) {
317
+ issues.push({
318
+ severity: 'critical',
319
+ category: 'connections',
320
+ message: `Using ${(connectionStats.usage * 100).toFixed(0)}% of max connections`,
321
+ recommendation: 'Increase max_connections or investigate connection leaks'
322
+ });
323
+ }
324
+ return {
325
+ database: currentDatabase,
326
+ overallScore,
327
+ scoreBreakdown,
328
+ issues,
329
+ recommendations
330
+ };
331
+ }
332
+ export async function getSlowQueries(connection, logger, args) {
333
+ const { minDurationMs, limit, orderBy } = args;
334
+ logger.info('getSlowQueries', 'Analyzing slow queries');
335
+ // Check if pg_stat_statements extension is available
336
+ const extensionCheck = `
337
+ SELECT COUNT(*) as count
338
+ FROM pg_extension
339
+ WHERE extname = 'pg_stat_statements'
340
+ `;
341
+ const extResult = await executeInternalQuery(connection, logger, { query: extensionCheck });
342
+ const hasExtension = parseInt(extResult.rows[0]?.count || '0', 10) > 0;
343
+ if (!hasExtension) {
344
+ throw new Error('pg_stat_statements extension not installed. Install with: CREATE EXTENSION pg_stat_statements;');
345
+ }
346
+ const orderByMap = {
347
+ total_time: 'total_exec_time DESC',
348
+ mean_time: 'mean_exec_time DESC',
349
+ calls: 'calls DESC'
350
+ };
351
+ const query = `
352
+ SELECT
353
+ query,
354
+ calls,
355
+ total_exec_time as total_time_ms,
356
+ mean_exec_time as mean_time_ms,
357
+ min_exec_time as min_time_ms,
358
+ max_exec_time as max_time_ms,
359
+ stddev_exec_time as stddev_time_ms,
360
+ rows,
361
+ CASE WHEN calls > 0 THEN rows::numeric / calls ELSE 0 END as rows_per_call,
362
+ 100.0 * shared_blks_hit / NULLIF(shared_blks_hit + shared_blks_read, 0) as cache_hit_percent
363
+ FROM pg_stat_statements
364
+ WHERE mean_exec_time >= $1
365
+ AND query NOT LIKE '%pg_stat_statements%'
366
+ ORDER BY ${orderByMap[orderBy]}
367
+ LIMIT $2
368
+ `;
369
+ const result = await executeInternalQuery(connection, logger, {
370
+ query,
371
+ params: [minDurationMs, limit]
372
+ });
373
+ const slowQueries = result.rows.map(row => {
374
+ const meanTime = parseFloat(row.mean_time_ms || '0');
375
+ const cacheHit = parseFloat(row.cache_hit_percent || '0');
376
+ const recommendations = [];
377
+ if (meanTime > 1000) {
378
+ recommendations.push('⚠ Very slow query (>1s average) - investigate and optimize');
379
+ }
380
+ if (cacheHit < 90) {
381
+ recommendations.push(`⚠ Low cache hit ratio (${cacheHit.toFixed(1)}%) - data mostly from disk`);
382
+ }
383
+ if (row.query.toLowerCase().includes('select *')) {
384
+ recommendations.push('Consider selecting only needed columns instead of SELECT *');
385
+ }
386
+ const rowsPerCall = parseFloat(row.rows_per_call || '0');
387
+ if (rowsPerCall > 1000) {
388
+ recommendations.push(`High rows per call (${Math.round(rowsPerCall)}) - consider pagination or filtering`);
389
+ }
390
+ return {
391
+ query: row.query.substring(0, 500),
392
+ calls: parseInt(row.calls, 10),
393
+ totalTimeMs: parseFloat(row.total_time_ms).toFixed(2),
394
+ meanTimeMs: meanTime.toFixed(2),
395
+ minTimeMs: parseFloat(row.min_time_ms).toFixed(2),
396
+ maxTimeMs: parseFloat(row.max_time_ms).toFixed(2),
397
+ stddevTimeMs: parseFloat(row.stddev_time_ms || '0').toFixed(2),
398
+ rows: parseInt(row.rows || '0', 10),
399
+ rowsPerCall: Math.round(rowsPerCall),
400
+ cacheHitPercent: cacheHit.toFixed(1),
401
+ ...(recommendations.length > 0 && { recommendations })
402
+ };
403
+ });
404
+ return {
405
+ minDurationMs,
406
+ queriesFound: slowQueries.length,
407
+ slowQueries
408
+ };
409
+ }
410
+ // Helper functions for health scoring
411
+ async function getCacheHitRatio(connection, logger) {
412
+ const query = `
413
+ SELECT
414
+ CASE
415
+ WHEN (blks_hit + blks_read) = 0 THEN 1.0
416
+ ELSE blks_hit::numeric / (blks_hit + blks_read)
417
+ END as ratio
418
+ FROM pg_stat_database
419
+ WHERE datname = current_database()
420
+ `;
421
+ const result = await executeInternalQuery(connection, logger, { query });
422
+ return { ratio: parseFloat(result.rows[0]?.ratio || '1') };
423
+ }
424
+ async function getConnectionHealth(connection, logger) {
425
+ const query = `
426
+ SELECT
427
+ (SELECT COUNT(*) FROM pg_stat_activity)::numeric /
428
+ NULLIF((SELECT setting::int FROM pg_settings WHERE name = 'max_connections'), 0) as usage
429
+ `;
430
+ const result = await executeInternalQuery(connection, logger, { query });
431
+ return { usage: parseFloat(result.rows[0]?.usage || '0') };
432
+ }
433
+ async function getBloatHealth(connection, logger) {
434
+ const query = `
435
+ SELECT
436
+ AVG(
437
+ CASE
438
+ WHEN pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid) = 0 THEN 0
439
+ ELSE (pg_stat_get_dead_tuples(c.oid)::numeric /
440
+ (pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid))) * 100
441
+ END
442
+ ) as avg_bloat,
443
+ AVG(
444
+ CASE
445
+ WHEN pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid) = 0 THEN 0
446
+ ELSE (pg_stat_get_dead_tuples(c.oid)::numeric /
447
+ (pg_stat_get_live_tuples(c.oid) + pg_stat_get_dead_tuples(c.oid))) * 100
448
+ END
449
+ ) as avg_dead_percent
450
+ FROM pg_stat_user_tables s
451
+ JOIN pg_class c ON c.relname = s.relname
452
+ `;
453
+ const result = await executeInternalQuery(connection, logger, { query });
454
+ return {
455
+ avgBloat: parseFloat(result.rows[0]?.avg_bloat || '0'),
456
+ avgDeadPercent: parseFloat(result.rows[0]?.avg_dead_percent || '0')
457
+ };
458
+ }
459
+ async function getIndexHealth(connection, logger) {
460
+ const query = `
461
+ SELECT
462
+ COUNT(*) FILTER (WHERE idx_scan = 0) as unused_count,
463
+ SUM(pg_relation_size(indexrelid)) FILTER (WHERE idx_scan = 0) as unused_size,
464
+ COUNT(*) FILTER (WHERE idx_scan > 0)::numeric / NULLIF(COUNT(*), 0) as usage_ratio
465
+ FROM pg_stat_user_indexes
466
+ `;
467
+ const result = await executeInternalQuery(connection, logger, { query });
468
+ return {
469
+ unusedCount: parseInt(result.rows[0]?.unused_count || '0', 10),
470
+ unusedSizeMB: (parseInt(result.rows[0]?.unused_size || '0', 10) / 1024 / 1024),
471
+ usage: parseFloat(result.rows[0]?.usage_ratio || '1')
472
+ };
473
+ }
474
+ async function getActivityHealth(connection, logger) {
475
+ const query = `
476
+ SELECT
477
+ COUNT(*) FILTER (
478
+ WHERE state = 'active'
479
+ AND EXTRACT(EPOCH FROM (NOW() - query_start)) > 30
480
+ ) as long_running
481
+ FROM pg_stat_activity
482
+ `;
483
+ const result = await executeInternalQuery(connection, logger, { query });
484
+ return {
485
+ longRunning: parseInt(result.rows[0]?.long_running || '0', 10)
486
+ };
487
+ }
488
+ async function getReplicationHealth(connection, logger) {
489
+ try {
490
+ const query = `
491
+ SELECT COALESCE(MAX(EXTRACT(EPOCH FROM (NOW() - pg_last_xact_replay_timestamp()))), 0) as lag
492
+ FROM pg_stat_replication
493
+ `;
494
+ const result = await executeInternalQuery(connection, logger, { query });
495
+ return { lag: parseFloat(result.rows[0]?.lag || '0') };
496
+ }
497
+ catch {
498
+ return { lag: 0 };
499
+ }
500
+ }
501
+ function calculateCacheScore(ratio) {
502
+ const percent = ratio * 100;
503
+ let score = 100;
504
+ let status = 'excellent';
505
+ if (percent < 80) {
506
+ score = 50;
507
+ status = 'poor';
508
+ }
509
+ else if (percent < 90) {
510
+ score = 70;
511
+ status = 'fair';
512
+ }
513
+ else if (percent < 95) {
514
+ score = 85;
515
+ status = 'good';
516
+ }
517
+ return { score, value: ratio, status };
518
+ }
519
+ function calculateIndexScore(usage) {
520
+ const percent = usage * 100;
521
+ let score = Math.round(usage * 100);
522
+ let status = percent > 90 ? 'excellent' : percent > 80 ? 'good' : percent > 70 ? 'fair' : 'poor';
523
+ return { score, value: usage, status };
524
+ }
525
+ function calculateBloatScore(bloatPercent) {
526
+ let score = 100;
527
+ let status = 'excellent';
528
+ if (bloatPercent > 30) {
529
+ score = 50;
530
+ status = 'poor';
531
+ }
532
+ else if (bloatPercent > 20) {
533
+ score = 70;
534
+ status = 'fair';
535
+ }
536
+ else if (bloatPercent > 10) {
537
+ score = 85;
538
+ status = 'good';
539
+ }
540
+ return { score, value: bloatPercent, status };
541
+ }
542
+ function calculateConnectionScore(usage) {
543
+ let score = 100;
544
+ let status = 'excellent';
545
+ if (usage > 0.9) {
546
+ score = 40;
547
+ status = 'critical';
548
+ }
549
+ else if (usage > 0.7) {
550
+ score = 60;
551
+ status = 'fair';
552
+ }
553
+ else if (usage > 0.5) {
554
+ score = 80;
555
+ status = 'good';
556
+ }
557
+ return { score, value: usage, status };
558
+ }
559
+ function calculateDeadTuplesScore(deadPercent) {
560
+ return calculateBloatScore(deadPercent);
561
+ }
562
+ function calculateLongQueryScore(count) {
563
+ let score = 100;
564
+ let status = 'excellent';
565
+ if (count > 10) {
566
+ score = 50;
567
+ status = 'poor';
568
+ }
569
+ else if (count > 5) {
570
+ score = 70;
571
+ status = 'fair';
572
+ }
573
+ else if (count > 2) {
574
+ score = 85;
575
+ status = 'good';
576
+ }
577
+ return { score, value: count, status };
578
+ }
579
+ function calculateReplicationScore(lag) {
580
+ if (lag === 0) {
581
+ return { score: 100, value: 0, status: 'n/a' };
582
+ }
583
+ let score = 100;
584
+ let status = 'excellent';
585
+ if (lag > 60) {
586
+ score = 50;
587
+ status = 'critical';
588
+ }
589
+ else if (lag > 10) {
590
+ score = 70;
591
+ status = 'fair';
592
+ }
593
+ else if (lag > 5) {
594
+ score = 85;
595
+ status = 'good';
596
+ }
597
+ return { score, value: lag, status };
598
+ }
599
+ export const maintenanceTools = {
600
+ analyzeTableBloat: {
601
+ schema: AnalyzeTableBloatSchema,
602
+ handler: analyzeTableBloat
603
+ },
604
+ suggestVacuum: {
605
+ schema: SuggestVacuumSchema,
606
+ handler: suggestVacuum
607
+ },
608
+ getHealthScore: {
609
+ schema: GetHealthScoreSchema,
610
+ handler: getHealthScore
611
+ },
612
+ getSlowQueries: {
613
+ schema: GetSlowQueriesSchema,
614
+ handler: getSlowQueries
615
+ }
616
+ };
617
+ //# sourceMappingURL=maintenance.js.map