postgres-scout-mcp 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,369 @@
1
+ import { z } from 'zod';
2
+ import { executeInternalQuery } from '../utils/database.js';
3
+ import { sanitizeIdentifier } from '../utils/sanitize.js';
4
+ const GetLiveMetricsSchema = z.object({
5
+ duration: z.number().optional().default(10000),
6
+ interval: z.number().optional().default(1000),
7
+ metrics: z.array(z.enum(['queries', 'connections', 'locks', 'transactions', 'cache'])).optional()
8
+ });
9
+ const GetHottestTablesSchema = z.object({
10
+ schema: z.string().optional().default('public'),
11
+ limit: z.number().optional().default(10),
12
+ sampleDuration: z.number().optional().default(5000),
13
+ orderBy: z.enum(['seq_scan', 'idx_scan', 'writes', 'size']).optional().default('seq_scan')
14
+ });
15
+ const GetTableMetricsSchema = z.object({
16
+ table: z.string(),
17
+ schema: z.string().optional().default('public'),
18
+ includeTrends: z.boolean().optional().default(true)
19
+ });
20
+ async function collectMetricSnapshot(connection, logger) {
21
+ const activityQuery = `
22
+ SELECT
23
+ COUNT(*) FILTER (WHERE state = 'active') as active,
24
+ COUNT(*) FILTER (WHERE state = 'idle') as idle,
25
+ COUNT(*) FILTER (WHERE state = 'idle in transaction') as idle_in_transaction,
26
+ COUNT(*) FILTER (WHERE wait_event IS NOT NULL AND state = 'active') as waiting
27
+ FROM pg_stat_activity
28
+ WHERE backend_type = 'client backend'
29
+ `;
30
+ const connectionsQuery = `
31
+ SELECT
32
+ (SELECT count(*) FROM pg_stat_activity WHERE backend_type = 'client backend') as current,
33
+ setting::int as max
34
+ FROM pg_settings
35
+ WHERE name = 'max_connections'
36
+ `;
37
+ const transactionsQuery = `
38
+ SELECT
39
+ xact_commit as committed,
40
+ xact_rollback as rolled_back
41
+ FROM pg_stat_database
42
+ WHERE datname = current_database()
43
+ `;
44
+ const cacheQuery = `
45
+ SELECT
46
+ COALESCE(
47
+ ROUND(
48
+ SUM(blks_hit) * 100.0 / NULLIF(SUM(blks_hit) + SUM(blks_read), 0),
49
+ 2
50
+ ),
51
+ 0
52
+ ) as hit_ratio,
53
+ SUM(blks_hit) as blocks_hit,
54
+ SUM(blks_read) as blocks_read
55
+ FROM pg_stat_database
56
+ WHERE datname = current_database()
57
+ `;
58
+ const locksQuery = `
59
+ SELECT
60
+ COUNT(*) as total,
61
+ COUNT(*) FILTER (WHERE NOT granted) as waiting
62
+ FROM pg_locks
63
+ `;
64
+ const [activityResult, connectionsResult, transactionsResult, cacheResult, locksResult] = await Promise.all([
65
+ executeInternalQuery(connection, logger, { query: activityQuery, params: [] }),
66
+ executeInternalQuery(connection, logger, { query: connectionsQuery, params: [] }),
67
+ executeInternalQuery(connection, logger, { query: transactionsQuery, params: [] }),
68
+ executeInternalQuery(connection, logger, { query: cacheQuery, params: [] }),
69
+ executeInternalQuery(connection, logger, { query: locksQuery, params: [] })
70
+ ]);
71
+ const activity = activityResult.rows[0];
72
+ const connections = connectionsResult.rows[0];
73
+ const transactions = transactionsResult.rows[0];
74
+ const cache = cacheResult.rows[0];
75
+ const locks = locksResult.rows[0];
76
+ const current = parseInt(connections.current || '0', 10);
77
+ const max = parseInt(connections.max || '100', 10);
78
+ return {
79
+ timestamp: new Date().toISOString(),
80
+ queries: {
81
+ active: parseInt(activity.active || '0', 10),
82
+ idle: parseInt(activity.idle || '0', 10),
83
+ idleInTransaction: parseInt(activity.idle_in_transaction || '0', 10),
84
+ waiting: parseInt(activity.waiting || '0', 10)
85
+ },
86
+ connections: {
87
+ current,
88
+ max,
89
+ usagePercent: Math.round((current / max) * 100)
90
+ },
91
+ transactions: {
92
+ committed: parseInt(transactions.committed || '0', 10),
93
+ rolledBack: parseInt(transactions.rolled_back || '0', 10)
94
+ },
95
+ cache: {
96
+ hitRatio: parseFloat(cache.hit_ratio || '0'),
97
+ blocksHit: parseInt(cache.blocks_hit || '0', 10),
98
+ blocksRead: parseInt(cache.blocks_read || '0', 10)
99
+ },
100
+ locks: {
101
+ total: parseInt(locks.total || '0', 10),
102
+ waiting: parseInt(locks.waiting || '0', 10)
103
+ }
104
+ };
105
+ }
106
+ export async function getLiveMetrics(connection, logger, args) {
107
+ const { duration, interval } = args;
108
+ logger.info('getLiveMetrics', 'Collecting live metrics', { duration, interval });
109
+ const snapshots = [];
110
+ const iterations = Math.floor(duration / interval);
111
+ for (let i = 0; i < iterations; i++) {
112
+ const snapshot = await collectMetricSnapshot(connection, logger);
113
+ snapshots.push(snapshot);
114
+ if (i < iterations - 1) {
115
+ await new Promise(resolve => setTimeout(resolve, interval));
116
+ }
117
+ }
118
+ const first = snapshots[0];
119
+ const last = snapshots[snapshots.length - 1];
120
+ const summary = {
121
+ duration: `${duration}ms`,
122
+ samples: snapshots.length,
123
+ averages: {
124
+ activeQueries: Math.round(snapshots.reduce((sum, s) => sum + s.queries.active, 0) / snapshots.length),
125
+ connectionUsage: Math.round(snapshots.reduce((sum, s) => sum + s.connections.usagePercent, 0) / snapshots.length),
126
+ cacheHitRatio: Math.round(snapshots.reduce((sum, s) => sum + s.cache.hitRatio, 0) / snapshots.length * 10) / 10,
127
+ waitingLocks: Math.round(snapshots.reduce((sum, s) => sum + s.locks.waiting, 0) / snapshots.length * 10) / 10
128
+ },
129
+ peaks: {
130
+ maxActiveQueries: Math.max(...snapshots.map(s => s.queries.active)),
131
+ maxConnections: Math.max(...snapshots.map(s => s.connections.current)),
132
+ maxWaitingLocks: Math.max(...snapshots.map(s => s.locks.waiting))
133
+ },
134
+ deltas: {
135
+ transactionsCommitted: last.transactions.committed - first.transactions.committed,
136
+ transactionsRolledBack: last.transactions.rolledBack - first.transactions.rolledBack,
137
+ blocksRead: last.cache.blocksRead - first.cache.blocksRead
138
+ }
139
+ };
140
+ const issues = [];
141
+ if (summary.averages.connectionUsage > 80) {
142
+ issues.push(`High connection usage: ${summary.averages.connectionUsage}%`);
143
+ }
144
+ if (summary.averages.cacheHitRatio < 90) {
145
+ issues.push(`Low cache hit ratio: ${summary.averages.cacheHitRatio}%`);
146
+ }
147
+ if (summary.peaks.maxWaitingLocks > 5) {
148
+ issues.push(`Lock contention detected: up to ${summary.peaks.maxWaitingLocks} waiting locks`);
149
+ }
150
+ return {
151
+ summary,
152
+ issues: issues.length > 0 ? issues : undefined,
153
+ snapshots
154
+ };
155
+ }
156
+ export async function getHottestTables(connection, logger, args) {
157
+ const { schema, limit, sampleDuration, orderBy } = args;
158
+ logger.info('getHottestTables', 'Identifying hottest tables', { schema, orderBy });
159
+ const sanitizedSchema = sanitizeIdentifier(schema);
160
+ const beforeQuery = `
161
+ SELECT
162
+ relname as table_name,
163
+ seq_scan,
164
+ seq_tup_read,
165
+ idx_scan,
166
+ idx_tup_fetch,
167
+ n_tup_ins,
168
+ n_tup_upd,
169
+ n_tup_del
170
+ FROM pg_stat_user_tables
171
+ WHERE schemaname = '${sanitizedSchema}'
172
+ `;
173
+ const beforeResult = await executeInternalQuery(connection, logger, { query: beforeQuery, params: [] });
174
+ const beforeStats = new Map(beforeResult.rows.map(r => [r.table_name, r]));
175
+ await new Promise(resolve => setTimeout(resolve, sampleDuration));
176
+ const afterResult = await executeInternalQuery(connection, logger, { query: beforeQuery, params: [] });
177
+ const activity = afterResult.rows.map(after => {
178
+ const before = beforeStats.get(after.table_name) || after;
179
+ const seqScanDelta = parseInt(after.seq_scan || '0', 10) - parseInt(before.seq_scan || '0', 10);
180
+ const idxScanDelta = parseInt(after.idx_scan || '0', 10) - parseInt(before.idx_scan || '0', 10);
181
+ const insertsDelta = parseInt(after.n_tup_ins || '0', 10) - parseInt(before.n_tup_ins || '0', 10);
182
+ const updatesDelta = parseInt(after.n_tup_upd || '0', 10) - parseInt(before.n_tup_upd || '0', 10);
183
+ const deletesDelta = parseInt(after.n_tup_del || '0', 10) - parseInt(before.n_tup_del || '0', 10);
184
+ return {
185
+ table: after.table_name,
186
+ activity: {
187
+ seqScans: seqScanDelta,
188
+ idxScans: idxScanDelta,
189
+ totalScans: seqScanDelta + idxScanDelta,
190
+ writes: insertsDelta + updatesDelta + deletesDelta,
191
+ inserts: insertsDelta,
192
+ updates: updatesDelta,
193
+ deletes: deletesDelta
194
+ },
195
+ totals: {
196
+ seqScans: parseInt(after.seq_scan || '0', 10),
197
+ idxScans: parseInt(after.idx_scan || '0', 10)
198
+ }
199
+ };
200
+ });
201
+ let sortKey;
202
+ switch (orderBy) {
203
+ case 'seq_scan':
204
+ sortKey = a => a.activity.seqScans;
205
+ break;
206
+ case 'idx_scan':
207
+ sortKey = a => a.activity.idxScans;
208
+ break;
209
+ case 'writes':
210
+ sortKey = a => a.activity.writes;
211
+ break;
212
+ default:
213
+ sortKey = a => a.activity.totalScans;
214
+ }
215
+ const sorted = activity
216
+ .filter(a => a.activity.totalScans > 0 || a.activity.writes > 0)
217
+ .sort((a, b) => sortKey(b) - sortKey(a))
218
+ .slice(0, limit);
219
+ return {
220
+ schema,
221
+ sampleDuration: `${sampleDuration}ms`,
222
+ orderBy,
223
+ tables: sorted,
224
+ totalTablesWithActivity: sorted.length
225
+ };
226
+ }
227
+ export async function getTableMetrics(connection, logger, args) {
228
+ const { table, schema, includeTrends } = args;
229
+ logger.info('getTableMetrics', 'Getting table metrics', { schema, table });
230
+ const sanitizedSchema = sanitizeIdentifier(schema);
231
+ const sanitizedTable = sanitizeIdentifier(table);
232
+ const statsQuery = `
233
+ SELECT
234
+ relname as table_name,
235
+ seq_scan,
236
+ seq_tup_read,
237
+ idx_scan,
238
+ idx_tup_fetch,
239
+ n_tup_ins,
240
+ n_tup_upd,
241
+ n_tup_del,
242
+ n_tup_hot_upd,
243
+ n_live_tup,
244
+ n_dead_tup,
245
+ n_mod_since_analyze,
246
+ last_vacuum,
247
+ last_autovacuum,
248
+ last_analyze,
249
+ last_autoanalyze,
250
+ vacuum_count,
251
+ autovacuum_count,
252
+ analyze_count,
253
+ autoanalyze_count
254
+ FROM pg_stat_user_tables
255
+ WHERE schemaname = '${sanitizedSchema}' AND relname = '${sanitizedTable}'
256
+ `;
257
+ const sizeQuery = `
258
+ SELECT
259
+ pg_total_relation_size('${sanitizedSchema}.${sanitizedTable}') as total_bytes,
260
+ pg_relation_size('${sanitizedSchema}.${sanitizedTable}') as table_bytes,
261
+ pg_indexes_size('${sanitizedSchema}.${sanitizedTable}'::regclass) as index_bytes
262
+ `;
263
+ const ioQuery = `
264
+ SELECT
265
+ heap_blks_read,
266
+ heap_blks_hit,
267
+ idx_blks_read,
268
+ idx_blks_hit,
269
+ toast_blks_read,
270
+ toast_blks_hit
271
+ FROM pg_statio_user_tables
272
+ WHERE schemaname = '${sanitizedSchema}' AND relname = '${sanitizedTable}'
273
+ `;
274
+ const [statsResult, sizeResult, ioResult] = await Promise.all([
275
+ executeInternalQuery(connection, logger, { query: statsQuery, params: [] }),
276
+ executeInternalQuery(connection, logger, { query: sizeQuery, params: [] }),
277
+ executeInternalQuery(connection, logger, { query: ioQuery, params: [] })
278
+ ]);
279
+ if (statsResult.rows.length === 0) {
280
+ return { error: `Table ${schema}.${table} not found` };
281
+ }
282
+ const stats = statsResult.rows[0];
283
+ const size = sizeResult.rows[0];
284
+ const io = ioResult.rows[0];
285
+ const liveTuples = parseInt(stats.n_live_tup || '0', 10);
286
+ const deadTuples = parseInt(stats.n_dead_tup || '0', 10);
287
+ const heapHit = parseInt(io?.heap_blks_hit || '0', 10);
288
+ const heapRead = parseInt(io?.heap_blks_read || '0', 10);
289
+ const idxHit = parseInt(io?.idx_blks_hit || '0', 10);
290
+ const idxRead = parseInt(io?.idx_blks_read || '0', 10);
291
+ const result = {
292
+ table: `${schema}.${table}`,
293
+ size: {
294
+ total: formatBytes(parseInt(size.total_bytes || '0', 10)),
295
+ table: formatBytes(parseInt(size.table_bytes || '0', 10)),
296
+ indexes: formatBytes(parseInt(size.index_bytes || '0', 10))
297
+ },
298
+ rows: {
299
+ live: liveTuples,
300
+ dead: deadTuples,
301
+ deadPercent: liveTuples > 0 ? Math.round((deadTuples / (liveTuples + deadTuples)) * 100) : 0
302
+ },
303
+ scans: {
304
+ sequential: parseInt(stats.seq_scan || '0', 10),
305
+ index: parseInt(stats.idx_scan || '0', 10),
306
+ seqTuplesRead: parseInt(stats.seq_tup_read || '0', 10),
307
+ idxTuplesFetched: parseInt(stats.idx_tup_fetch || '0', 10)
308
+ },
309
+ modifications: {
310
+ inserts: parseInt(stats.n_tup_ins || '0', 10),
311
+ updates: parseInt(stats.n_tup_upd || '0', 10),
312
+ deletes: parseInt(stats.n_tup_del || '0', 10),
313
+ hotUpdates: parseInt(stats.n_tup_hot_upd || '0', 10),
314
+ modsSinceAnalyze: parseInt(stats.n_mod_since_analyze || '0', 10)
315
+ },
316
+ io: {
317
+ heapHitRatio: heapHit + heapRead > 0 ? Math.round((heapHit / (heapHit + heapRead)) * 100) : 100,
318
+ indexHitRatio: idxHit + idxRead > 0 ? Math.round((idxHit / (idxHit + idxRead)) * 100) : 100
319
+ },
320
+ maintenance: {
321
+ lastVacuum: stats.last_vacuum,
322
+ lastAutovacuum: stats.last_autovacuum,
323
+ lastAnalyze: stats.last_analyze,
324
+ lastAutoanalyze: stats.last_autoanalyze,
325
+ vacuumCount: parseInt(stats.vacuum_count || '0', 10),
326
+ analyzeCount: parseInt(stats.analyze_count || '0', 10)
327
+ }
328
+ };
329
+ const recommendations = [];
330
+ if (result.rows.deadPercent > 20) {
331
+ recommendations.push(`High dead tuple ratio (${result.rows.deadPercent}%) - consider VACUUM`);
332
+ }
333
+ if (result.scans.sequential > result.scans.index * 10 && result.scans.sequential > 100) {
334
+ recommendations.push('High sequential scan ratio - review indexes');
335
+ }
336
+ if (result.io.heapHitRatio < 90) {
337
+ recommendations.push(`Low heap cache hit ratio (${result.io.heapHitRatio}%) - consider increasing shared_buffers`);
338
+ }
339
+ if (result.modifications.modsSinceAnalyze > liveTuples * 0.1) {
340
+ recommendations.push('Many modifications since last ANALYZE - statistics may be stale');
341
+ }
342
+ if (recommendations.length > 0) {
343
+ result.recommendations = recommendations;
344
+ }
345
+ return result;
346
+ }
347
+ function formatBytes(bytes) {
348
+ if (bytes === 0)
349
+ return '0 B';
350
+ const k = 1024;
351
+ const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
352
+ const i = Math.floor(Math.log(bytes) / Math.log(k));
353
+ return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
354
+ }
355
+ export const liveMonitoringTools = {
356
+ getLiveMetrics: {
357
+ schema: GetLiveMetricsSchema,
358
+ handler: getLiveMetrics
359
+ },
360
+ getHottestTables: {
361
+ schema: GetHottestTablesSchema,
362
+ handler: getHottestTables
363
+ },
364
+ getTableMetrics: {
365
+ schema: GetTableMetricsSchema,
366
+ handler: getTableMetrics
367
+ }
368
+ };
369
+ //# sourceMappingURL=live-monitoring.js.map