postgres-scout-mcp 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,286 @@
1
+ import { z } from 'zod';
2
+ import { executeInternalQuery } from '../utils/database.js';
3
+ const GetCurrentActivitySchema = z.object({
4
+ includeIdle: z.boolean().optional().default(false),
5
+ minDurationMs: z.number().optional().default(0)
6
+ });
7
+ const AnalyzeLocksSchema = z.object({
8
+ includeWaiting: z.boolean().optional().default(true)
9
+ });
10
+ const GetIndexUsageSchema = z.object({
11
+ schema: z.string().optional().default('public'),
12
+ minSizeMB: z.number().optional().default(0),
13
+ includeUnused: z.boolean().optional().default(true)
14
+ });
15
+ export async function getCurrentActivity(connection, logger, args) {
16
+ const { includeIdle, minDurationMs } = args;
17
+ logger.info('getCurrentActivity', 'Getting current database activity');
18
+ if (!Number.isFinite(minDurationMs)) {
19
+ throw new Error('minDurationMs must be a finite number');
20
+ }
21
+ const stateFilter = includeIdle ? '' : "AND state != 'idle'";
22
+ const durationFilter = minDurationMs > 0
23
+ ? `AND EXTRACT(EPOCH FROM (NOW() - query_start)) * 1000 >= ${Number(minDurationMs)}`
24
+ : '';
25
+ const query = `
26
+ SELECT
27
+ pid,
28
+ usename as user,
29
+ datname as database,
30
+ state,
31
+ query,
32
+ EXTRACT(EPOCH FROM (NOW() - query_start)) * 1000 as duration_ms,
33
+ wait_event_type,
34
+ wait_event,
35
+ backend_type
36
+ FROM pg_stat_activity
37
+ WHERE pid != pg_backend_pid()
38
+ ${stateFilter}
39
+ ${durationFilter}
40
+ ORDER BY query_start DESC
41
+ LIMIT 100
42
+ `;
43
+ const statsQuery = `
44
+ SELECT
45
+ COUNT(*) as total_connections,
46
+ COUNT(*) FILTER (WHERE state = 'active') as active_queries,
47
+ COUNT(*) FILTER (WHERE state = 'idle') as idle_connections,
48
+ COUNT(*) FILTER (WHERE state = 'idle in transaction') as idle_in_transaction
49
+ FROM pg_stat_activity
50
+ WHERE pid != pg_backend_pid()
51
+ `;
52
+ const [result, statsResult] = await Promise.all([
53
+ executeInternalQuery(connection, logger, { query }),
54
+ executeInternalQuery(connection, logger, { query: statsQuery })
55
+ ]);
56
+ const stats = statsResult.rows[0];
57
+ const queries = result.rows.map(row => {
58
+ const durationMs = parseFloat(row.duration_ms || '0');
59
+ const warnings = [];
60
+ if (row.state === 'idle in transaction' && durationMs > 30000) {
61
+ warnings.push('⚠ Long idle transaction - potential lock holder');
62
+ }
63
+ if (row.state === 'active' && durationMs > 30000) {
64
+ warnings.push('⚠ Long running query - monitor for timeout');
65
+ }
66
+ return {
67
+ pid: parseInt(row.pid, 10),
68
+ user: row.user,
69
+ database: row.database,
70
+ state: row.state,
71
+ query: row.query?.substring(0, 500),
72
+ durationMs: Math.round(durationMs),
73
+ waitEventType: row.wait_event_type,
74
+ waitEvent: row.wait_event,
75
+ backendType: row.backend_type,
76
+ ...(warnings.length > 0 && { warnings })
77
+ };
78
+ });
79
+ const recommendations = [];
80
+ const idleInTransaction = parseInt(stats.idle_in_transaction || '0', 10);
81
+ if (idleInTransaction > 0) {
82
+ recommendations.push(`${idleInTransaction} idle transactions - check application connection handling`);
83
+ }
84
+ const longRunning = queries.filter(q => q.durationMs > 5000).length;
85
+ if (longRunning > 0) {
86
+ recommendations.push(`${longRunning} queries running for >5s - monitor for timeouts`);
87
+ }
88
+ return {
89
+ totalConnections: parseInt(stats.total_connections || '0', 10),
90
+ activeQueries: parseInt(stats.active_queries || '0', 10),
91
+ idleConnections: parseInt(stats.idle_connections || '0', 10),
92
+ idleInTransaction,
93
+ queries,
94
+ recommendations
95
+ };
96
+ }
97
+ export async function analyzeLocks(connection, logger, args) {
98
+ const { includeWaiting } = args;
99
+ logger.info('analyzeLocks', 'Analyzing database locks');
100
+ const query = `
101
+ SELECT
102
+ l.locktype,
103
+ l.database,
104
+ l.relation::regclass::text as relation,
105
+ l.mode,
106
+ l.granted,
107
+ l.pid,
108
+ a.query,
109
+ EXTRACT(EPOCH FROM (NOW() - a.query_start)) * 1000 as duration_ms,
110
+ (SELECT pid FROM pg_locks WHERE NOT granted AND relation = l.relation AND pid != l.pid LIMIT 1) as blocked_pid
111
+ FROM pg_locks l
112
+ LEFT JOIN pg_stat_activity a ON a.pid = l.pid
113
+ WHERE l.relation IS NOT NULL
114
+ ${includeWaiting ? '' : 'AND l.granted = true'}
115
+ ORDER BY l.granted, duration_ms DESC
116
+ LIMIT 100
117
+ `;
118
+ const blockingQuery = `
119
+ SELECT
120
+ blocking.pid as blocking_pid,
121
+ blocked.pid as blocked_pid,
122
+ blocking_activity.query as blocking_query,
123
+ blocked_activity.query as blocked_query,
124
+ EXTRACT(EPOCH FROM (NOW() - blocked_activity.query_start)) * 1000 as wait_time_ms
125
+ FROM pg_locks blocked
126
+ JOIN pg_stat_activity blocked_activity ON blocked_activity.pid = blocked.pid
127
+ JOIN pg_locks blocking ON blocking.relation = blocked.relation
128
+ AND blocking.granted = true
129
+ AND blocking.pid != blocked.pid
130
+ JOIN pg_stat_activity blocking_activity ON blocking_activity.pid = blocking.pid
131
+ WHERE NOT blocked.granted
132
+ AND blocked_activity.state = 'active'
133
+ LIMIT 50
134
+ `;
135
+ const [result, blockingResult] = await Promise.all([
136
+ executeInternalQuery(connection, logger, { query }),
137
+ executeInternalQuery(connection, logger, { query: blockingQuery })
138
+ ]);
139
+ const locks = result.rows.map(row => ({
140
+ lockType: row.locktype,
141
+ database: row.database,
142
+ relation: row.relation,
143
+ mode: row.mode,
144
+ granted: row.granted,
145
+ pid: parseInt(row.pid, 10),
146
+ query: row.query?.substring(0, 200),
147
+ durationMs: Math.round(parseFloat(row.duration_ms || '0')),
148
+ ...(row.blocked_pid && { blockedPid: parseInt(row.blocked_pid, 10) })
149
+ }));
150
+ const blockingInfo = blockingResult.rows.map(row => ({
151
+ blockingPid: parseInt(row.blocking_pid, 10),
152
+ blockedPid: parseInt(row.blocked_pid, 10),
153
+ blockingQuery: row.blocking_query?.substring(0, 200),
154
+ blockedQuery: row.blocked_query?.substring(0, 200),
155
+ waitTimeMs: Math.round(parseFloat(row.wait_time_ms || '0'))
156
+ }));
157
+ const recommendations = [];
158
+ if (blockingInfo.length > 0) {
159
+ recommendations.push(`⚠ ${blockingInfo.length} blocking queries detected`);
160
+ blockingInfo.forEach(b => {
161
+ recommendations.push(`PID ${b.blockingPid} is blocking PID ${b.blockedPid} (waiting ${Math.round(b.waitTimeMs / 1000)}s)`);
162
+ });
163
+ recommendations.push('Consider shorter transactions to reduce lock contention');
164
+ }
165
+ else {
166
+ recommendations.push('✓ No blocking queries detected');
167
+ }
168
+ return {
169
+ activeLocks: locks.length,
170
+ blockingQueries: blockingInfo.length,
171
+ locks,
172
+ blockingInfo,
173
+ recommendations
174
+ };
175
+ }
176
+ export async function getIndexUsage(connection, logger, args) {
177
+ const { schema, minSizeMB, includeUnused } = args;
178
+ logger.info('getIndexUsage', 'Analyzing index usage', { schema });
179
+ if (!Number.isFinite(minSizeMB)) {
180
+ throw new Error('minSizeMB must be a finite number');
181
+ }
182
+ const sizeFilter = minSizeMB > 0 ? `AND pg_relation_size(i.indexrelid) >= ${Number(minSizeMB) * 1024 * 1024}` : '';
183
+ const usageFilter = includeUnused ? '' : 'AND (s.idx_scan IS NULL OR s.idx_scan > 0)';
184
+ const query = `
185
+ SELECT
186
+ n.nspname as schema,
187
+ t.relname as table,
188
+ i.relname as index,
189
+ pg_size_pretty(pg_relation_size(i.oid)) as size,
190
+ pg_relation_size(i.oid) as size_bytes,
191
+ COALESCE(s.idx_scan, 0) as scans,
192
+ COALESCE(s.idx_tup_read, 0) as tuples_read,
193
+ COALESCE(s.idx_tup_fetch, 0) as tuples_fetched,
194
+ ix.indisunique as unique,
195
+ ix.indisprimary as primary
196
+ FROM pg_class i
197
+ JOIN pg_index ix ON ix.indexrelid = i.oid
198
+ JOIN pg_class t ON t.oid = ix.indrelid
199
+ JOIN pg_namespace n ON n.oid = i.relnamespace
200
+ LEFT JOIN pg_stat_user_indexes s ON s.indexrelid = i.oid
201
+ WHERE n.nspname = $1
202
+ AND i.relkind = 'i'
203
+ ${sizeFilter}
204
+ ${usageFilter}
205
+ ORDER BY pg_relation_size(i.oid) DESC
206
+ LIMIT 100
207
+ `;
208
+ const result = await executeInternalQuery(connection, logger, {
209
+ query,
210
+ params: [schema]
211
+ });
212
+ const indexes = result.rows.map(row => {
213
+ const scans = parseInt(row.scans || '0', 10);
214
+ const sizeBytes = parseInt(row.size_bytes, 10);
215
+ const sizeMB = (sizeBytes / 1024 / 1024).toFixed(2);
216
+ let usage = 'unknown';
217
+ let recommendation = '';
218
+ if (scans === 0) {
219
+ usage = 'unused';
220
+ recommendation = `⚠ Never used - consider dropping to save ${sizeMB} MB`;
221
+ }
222
+ else if (scans < 100) {
223
+ usage = 'rarely used';
224
+ recommendation = 'Rarely used - evaluate if necessary';
225
+ }
226
+ else if (scans < 1000) {
227
+ usage = 'occasionally used';
228
+ recommendation = 'Occasionally used';
229
+ }
230
+ else {
231
+ usage = 'frequently used';
232
+ recommendation = '✓ Frequently used - keep';
233
+ }
234
+ return {
235
+ schema: row.schema,
236
+ table: row.table,
237
+ index: row.index,
238
+ size: row.size,
239
+ sizeMB: parseFloat(sizeMB),
240
+ scans,
241
+ tuplesRead: parseInt(row.tuples_read || '0', 10),
242
+ tuplesFetched: parseInt(row.tuples_fetched || '0', 10),
243
+ unique: row.unique,
244
+ primary: row.primary,
245
+ usage,
246
+ recommendation,
247
+ ...(scans === 0 && !row.primary && { dropCommand: `DROP INDEX CONCURRENTLY ${row.schema}.${row.index};` })
248
+ };
249
+ });
250
+ const unusedIndexes = indexes.filter(idx => idx.usage === 'unused' && !idx.primary);
251
+ const unusedSizeMB = unusedIndexes.reduce((sum, idx) => sum + idx.sizeMB, 0);
252
+ const recommendations = [];
253
+ if (unusedIndexes.length > 0) {
254
+ recommendations.push(`${unusedIndexes.length} unused indexes consuming ${unusedSizeMB.toFixed(2)} MB`);
255
+ recommendations.push('Drop unused indexes to reduce write overhead and save space');
256
+ unusedIndexes.slice(0, 5).forEach(idx => {
257
+ recommendations.push(`Consider: DROP INDEX CONCURRENTLY ${idx.schema}.${idx.index};`);
258
+ });
259
+ }
260
+ else {
261
+ recommendations.push('✓ All indexes are being used');
262
+ }
263
+ return {
264
+ schema,
265
+ totalIndexes: indexes.length,
266
+ totalSizeMB: indexes.reduce((sum, idx) => sum + idx.sizeMB, 0).toFixed(2),
267
+ unusedCount: unusedIndexes.length,
268
+ indexes,
269
+ recommendations
270
+ };
271
+ }
272
+ export const monitoringTools = {
273
+ getCurrentActivity: {
274
+ schema: GetCurrentActivitySchema,
275
+ handler: getCurrentActivity
276
+ },
277
+ analyzeLocks: {
278
+ schema: AnalyzeLocksSchema,
279
+ handler: analyzeLocks
280
+ },
281
+ getIndexUsage: {
282
+ schema: GetIndexUsageSchema,
283
+ handler: getIndexUsage
284
+ }
285
+ };
286
+ //# sourceMappingURL=monitoring.js.map
@@ -0,0 +1,410 @@
1
+ import { z } from 'zod';
2
+ import { executeInternalQuery } from '../utils/database.js';
3
+ import { sanitizeIdentifier, escapeIdentifier, parseIntSafe } from '../utils/sanitize.js';
4
+ import { buildWhereClause, WhereConditionSchema } from '../utils/query-builder.js';
5
+ function clampMaxRows(clientMaxRows) {
6
+ const serverMax = parseIntSafe(process.env.MAX_MUTATION_ROWS || '10000', 10000);
7
+ return Math.min(clientMaxRows, serverMax);
8
+ }
9
+ export const PreviewUpdateSchema = z.object({
10
+ table: z.string(),
11
+ schema: z.string().optional().default('public'),
12
+ where: z.array(WhereConditionSchema),
13
+ limit: z.number().optional().default(5)
14
+ });
15
+ export const PreviewDeleteSchema = z.object({
16
+ table: z.string(),
17
+ schema: z.string().optional().default('public'),
18
+ where: z.array(WhereConditionSchema),
19
+ limit: z.number().optional().default(5)
20
+ });
21
+ export const SafeUpdateSchema = z.object({
22
+ table: z.string(),
23
+ schema: z.string().optional().default('public'),
24
+ set: z.record(z.any()).refine(obj => Object.keys(obj).length > 0, { message: 'SET must have at least one column' }),
25
+ where: z.array(WhereConditionSchema),
26
+ dryRun: z.boolean().optional().default(false),
27
+ maxRows: z.number().optional().default(1000),
28
+ allowEmptyWhere: z.boolean().optional().default(false)
29
+ });
30
+ const SafeInsertSchema = z.object({
31
+ table: z.string(),
32
+ schema: z.string().optional().default('public'),
33
+ columns: z.array(z.string()),
34
+ rows: z.array(z.string()),
35
+ dryRun: z.boolean().optional().default(false),
36
+ maxRows: z.number().optional().default(1000),
37
+ onConflict: z.enum(['error', 'skip']).optional().default('error'),
38
+ });
39
+ export const SafeDeleteSchema = z.object({
40
+ table: z.string(),
41
+ schema: z.string().optional().default('public'),
42
+ where: z.array(WhereConditionSchema),
43
+ dryRun: z.boolean().optional().default(false),
44
+ maxRows: z.number().optional().default(1000),
45
+ allowEmptyWhere: z.boolean().optional().default(false)
46
+ });
47
+ function validateWhereClause(where, allowEmpty, context = 'mutate') {
48
+ if (where.length === 0) {
49
+ if (!allowEmpty) {
50
+ const hint = context === 'preview'
51
+ ? 'Provide at least one WHERE condition.'
52
+ : 'Set allowEmptyWhere=true to proceed.';
53
+ return {
54
+ valid: false,
55
+ warning: `No WHERE conditions provided. This would affect ALL rows. ${hint}`
56
+ };
57
+ }
58
+ return {
59
+ valid: true,
60
+ warning: 'WARNING: This will affect ALL rows in the table.'
61
+ };
62
+ }
63
+ return { valid: true };
64
+ }
65
+ function getOperationWarning(count, operation) {
66
+ if (count > 10000) {
67
+ return `CRITICAL: This ${operation} will affect ${count.toLocaleString()} rows. Consider using smaller batches.`;
68
+ }
69
+ if (count > 1000) {
70
+ return `WARNING: This ${operation} will affect ${count.toLocaleString()} rows.`;
71
+ }
72
+ if (count > 100) {
73
+ return `Note: This ${operation} will affect ${count.toLocaleString()} rows.`;
74
+ }
75
+ return undefined;
76
+ }
77
+ export async function previewUpdate(connection, logger, args) {
78
+ const { table, schema, where, limit } = args;
79
+ logger.info('previewUpdate', 'Previewing UPDATE operation', { schema, table });
80
+ const sanitizedSchema = sanitizeIdentifier(schema);
81
+ const sanitizedTable = sanitizeIdentifier(table);
82
+ const validation = validateWhereClause(where, false, 'preview');
83
+ if (!validation.valid) {
84
+ return {
85
+ blocked: true,
86
+ reason: validation.warning
87
+ };
88
+ }
89
+ const whereResult = buildWhereClause(where);
90
+ const countQuery = `
91
+ SELECT COUNT(*) as count
92
+ FROM ${sanitizedSchema}.${sanitizedTable}
93
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
94
+ `;
95
+ const sampleQuery = `
96
+ SELECT *
97
+ FROM ${sanitizedSchema}.${sanitizedTable}
98
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
99
+ LIMIT ${limit}
100
+ `;
101
+ const [countResult, sampleResult] = await Promise.all([
102
+ executeInternalQuery(connection, logger, { query: countQuery, params: whereResult.params }),
103
+ executeInternalQuery(connection, logger, { query: sampleQuery, params: whereResult.params })
104
+ ]);
105
+ const affectedCount = parseInt(countResult.rows[0]?.count || '0', 10);
106
+ const warning = getOperationWarning(affectedCount, 'UPDATE');
107
+ return {
108
+ willAffect: affectedCount,
109
+ sampleDocuments: sampleResult.rows,
110
+ samplesShown: sampleResult.rows.length,
111
+ message: warning || (affectedCount <= 10
112
+ ? `Will update ${affectedCount} row${affectedCount !== 1 ? 's' : ''}`
113
+ : undefined),
114
+ filterWarning: validation.warning
115
+ };
116
+ }
117
+ export async function previewDelete(connection, logger, args) {
118
+ const { table, schema, where, limit } = args;
119
+ logger.info('previewDelete', 'Previewing DELETE operation', { schema, table });
120
+ const sanitizedSchema = sanitizeIdentifier(schema);
121
+ const sanitizedTable = sanitizeIdentifier(table);
122
+ const validation = validateWhereClause(where, false, 'preview');
123
+ if (!validation.valid) {
124
+ return {
125
+ blocked: true,
126
+ reason: validation.warning
127
+ };
128
+ }
129
+ const whereResult = buildWhereClause(where);
130
+ const countQuery = `
131
+ SELECT COUNT(*) as count
132
+ FROM ${sanitizedSchema}.${sanitizedTable}
133
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
134
+ `;
135
+ const sampleQuery = `
136
+ SELECT *
137
+ FROM ${sanitizedSchema}.${sanitizedTable}
138
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
139
+ LIMIT ${limit}
140
+ `;
141
+ const [countResult, sampleResult] = await Promise.all([
142
+ executeInternalQuery(connection, logger, { query: countQuery, params: whereResult.params }),
143
+ executeInternalQuery(connection, logger, { query: sampleQuery, params: whereResult.params })
144
+ ]);
145
+ const deleteCount = parseInt(countResult.rows[0]?.count || '0', 10);
146
+ const warning = getOperationWarning(deleteCount, 'DELETE');
147
+ return {
148
+ willDelete: deleteCount,
149
+ sampleDocuments: sampleResult.rows,
150
+ samplesShown: sampleResult.rows.length,
151
+ message: warning || (deleteCount <= 10
152
+ ? `Will delete ${deleteCount} row${deleteCount !== 1 ? 's' : ''}`
153
+ : undefined),
154
+ filterWarning: validation.warning
155
+ };
156
+ }
157
+ export async function safeUpdate(connection, logger, args) {
158
+ const { table, schema, set, where, dryRun, maxRows: clientMaxRows, allowEmptyWhere } = args;
159
+ const maxRows = clampMaxRows(clientMaxRows);
160
+ logger.info('safeUpdate', 'Executing safe UPDATE', { schema, table, dryRun });
161
+ const sanitizedSchema = sanitizeIdentifier(schema);
162
+ const sanitizedTable = sanitizeIdentifier(table);
163
+ const validation = validateWhereClause(where, allowEmptyWhere);
164
+ if (!validation.valid) {
165
+ return {
166
+ blocked: true,
167
+ reason: validation.warning
168
+ };
169
+ }
170
+ // Build WHERE once for count/sample queries (startParam=1, no SET params)
171
+ const previewWhereResult = buildWhereClause(where);
172
+ const countQuery = `
173
+ SELECT COUNT(*) as count
174
+ FROM ${sanitizedSchema}.${sanitizedTable}
175
+ ${previewWhereResult.clause ? `WHERE ${previewWhereResult.clause}` : ''}
176
+ `;
177
+ const countResult = await executeInternalQuery(connection, logger, { query: countQuery, params: previewWhereResult.params });
178
+ const affectedCount = parseInt(countResult.rows[0]?.count || '0', 10);
179
+ if (affectedCount > maxRows) {
180
+ return {
181
+ blocked: true,
182
+ reason: `Operation blocked: Would affect ${affectedCount.toLocaleString()} rows, exceeds maxRows limit of ${maxRows.toLocaleString()}.`,
183
+ suggestion: 'Use previewUpdate() to see affected rows, or increase maxRows limit.'
184
+ };
185
+ }
186
+ if (dryRun) {
187
+ const sampleQuery = `
188
+ SELECT *
189
+ FROM ${sanitizedSchema}.${sanitizedTable}
190
+ ${previewWhereResult.clause ? `WHERE ${previewWhereResult.clause}` : ''}
191
+ LIMIT 5
192
+ `;
193
+ const sampleResult = await executeInternalQuery(connection, logger, { query: sampleQuery, params: previewWhereResult.params });
194
+ return {
195
+ dryRun: true,
196
+ operation: 'UPDATE',
197
+ table: `${schema}.${table}`,
198
+ wouldAffect: affectedCount,
199
+ sampleRows: sampleResult.rows,
200
+ setClause: set,
201
+ message: getOperationWarning(affectedCount, 'UPDATE'),
202
+ whereWarning: validation.warning
203
+ };
204
+ }
205
+ // Build SET clause with parameterized values
206
+ const setClauses = [];
207
+ const setParams = [];
208
+ let paramIndex = 1;
209
+ for (const [column, value] of Object.entries(set)) {
210
+ setClauses.push(`${escapeIdentifier(sanitizeIdentifier(column))} = $${paramIndex}`);
211
+ setParams.push(value);
212
+ paramIndex++;
213
+ }
214
+ const setClause = setClauses.join(', ');
215
+ // Build WHERE with offset after SET params
216
+ const whereResult = buildWhereClause(where, paramIndex);
217
+ const allParams = [...setParams, ...whereResult.params];
218
+ const updateQuery = `
219
+ UPDATE ${sanitizedSchema}.${sanitizedTable}
220
+ SET ${setClause}
221
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
222
+ `;
223
+ const result = await executeInternalQuery(connection, logger, { query: updateQuery, params: allParams });
224
+ return {
225
+ success: true,
226
+ operation: 'UPDATE',
227
+ table: `${schema}.${table}`,
228
+ rowsAffected: result.rowCount,
229
+ message: getOperationWarning(result.rowCount || 0, 'UPDATE'),
230
+ whereWarning: validation.warning
231
+ };
232
+ }
233
+ export async function safeDelete(connection, logger, args) {
234
+ const { table, schema, where, dryRun, maxRows: clientMaxRows, allowEmptyWhere } = args;
235
+ const maxRows = clampMaxRows(clientMaxRows);
236
+ logger.info('safeDelete', 'Executing safe DELETE', { schema, table, dryRun });
237
+ const sanitizedSchema = sanitizeIdentifier(schema);
238
+ const sanitizedTable = sanitizeIdentifier(table);
239
+ const validation = validateWhereClause(where, allowEmptyWhere);
240
+ if (!validation.valid) {
241
+ return {
242
+ blocked: true,
243
+ reason: validation.warning
244
+ };
245
+ }
246
+ const whereResult = buildWhereClause(where);
247
+ const countQuery = `
248
+ SELECT COUNT(*) as count
249
+ FROM ${sanitizedSchema}.${sanitizedTable}
250
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
251
+ `;
252
+ const countResult = await executeInternalQuery(connection, logger, { query: countQuery, params: whereResult.params });
253
+ const deleteCount = parseInt(countResult.rows[0]?.count || '0', 10);
254
+ if (deleteCount > maxRows) {
255
+ return {
256
+ blocked: true,
257
+ reason: `Operation blocked: Would delete ${deleteCount.toLocaleString()} rows, exceeds maxRows limit of ${maxRows.toLocaleString()}.`,
258
+ suggestion: 'Use previewDelete() to see affected rows, or increase maxRows limit.'
259
+ };
260
+ }
261
+ if (dryRun) {
262
+ const sampleQuery = `
263
+ SELECT *
264
+ FROM ${sanitizedSchema}.${sanitizedTable}
265
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
266
+ LIMIT 5
267
+ `;
268
+ const sampleResult = await executeInternalQuery(connection, logger, { query: sampleQuery, params: whereResult.params });
269
+ return {
270
+ dryRun: true,
271
+ operation: 'DELETE',
272
+ table: `${schema}.${table}`,
273
+ wouldDelete: deleteCount,
274
+ sampleRows: sampleResult.rows,
275
+ message: getOperationWarning(deleteCount, 'DELETE'),
276
+ whereWarning: validation.warning
277
+ };
278
+ }
279
+ const deleteQuery = `
280
+ DELETE FROM ${sanitizedSchema}.${sanitizedTable}
281
+ ${whereResult.clause ? `WHERE ${whereResult.clause}` : ''}
282
+ `;
283
+ const result = await executeInternalQuery(connection, logger, { query: deleteQuery, params: whereResult.params });
284
+ return {
285
+ success: true,
286
+ operation: 'DELETE',
287
+ table: `${schema}.${table}`,
288
+ rowsDeleted: result.rowCount,
289
+ message: getOperationWarning(result.rowCount || 0, 'DELETE'),
290
+ whereWarning: validation.warning
291
+ };
292
+ }
293
+ const INSERT_BATCH_SIZE = 500;
294
+ export async function safeInsert(connection, logger, args) {
295
+ const { table, schema, columns, rows, dryRun, maxRows: clientMaxRows, onConflict } = args;
296
+ const maxRows = clampMaxRows(clientMaxRows);
297
+ logger.info('safeInsert', 'Executing safe INSERT', { schema, table, dryRun });
298
+ // Validation guards
299
+ if (!columns.length) {
300
+ return { blocked: true, reason: 'No columns specified.' };
301
+ }
302
+ if (!rows.length) {
303
+ return { blocked: true, reason: 'No rows provided.' };
304
+ }
305
+ if (rows.length > maxRows) {
306
+ return {
307
+ blocked: true,
308
+ reason: `Row count (${rows.length}) exceeds maxRows limit (${maxRows}).`,
309
+ };
310
+ }
311
+ // Parse and validate each row
312
+ const parsedRows = [];
313
+ for (let i = 0; i < rows.length; i++) {
314
+ let parsed;
315
+ try {
316
+ parsed = JSON.parse(rows[i]);
317
+ }
318
+ catch {
319
+ return { blocked: true, reason: `Invalid row JSON at index ${i}: ${rows[i]}` };
320
+ }
321
+ if (!Array.isArray(parsed)) {
322
+ return { blocked: true, reason: `Invalid row JSON at index ${i}: expected array` };
323
+ }
324
+ if (parsed.length !== columns.length) {
325
+ return {
326
+ blocked: true,
327
+ reason: `Row ${i} has ${parsed.length} values but ${columns.length} columns expected.`,
328
+ };
329
+ }
330
+ parsedRows.push(parsed);
331
+ }
332
+ const sanitizedSchema = sanitizeIdentifier(schema);
333
+ const sanitizedTable = sanitizeIdentifier(table);
334
+ const sanitizedColumns = columns.map(c => sanitizeIdentifier(c));
335
+ if (dryRun) {
336
+ return {
337
+ dryRun: true,
338
+ operation: 'INSERT',
339
+ table: `${schema}.${table}`,
340
+ wouldInsert: parsedRows.length,
341
+ columns,
342
+ sampleRows: parsedRows.slice(0, 5),
343
+ };
344
+ }
345
+ // Execute in batches
346
+ let totalInserted = 0;
347
+ const allReturnedRows = [];
348
+ for (let batchStart = 0; batchStart < parsedRows.length; batchStart += INSERT_BATCH_SIZE) {
349
+ const batch = parsedRows.slice(batchStart, batchStart + INSERT_BATCH_SIZE);
350
+ const valuePlaceholders = [];
351
+ const params = [];
352
+ let paramIndex = 1;
353
+ for (const row of batch) {
354
+ const rowPlaceholders = [];
355
+ for (const value of row) {
356
+ rowPlaceholders.push(`$${paramIndex}`);
357
+ params.push(value);
358
+ paramIndex++;
359
+ }
360
+ valuePlaceholders.push(`(${rowPlaceholders.join(', ')})`);
361
+ }
362
+ let query = `INSERT INTO ${sanitizedSchema}.${sanitizedTable} (${sanitizedColumns.join(', ')}) VALUES ${valuePlaceholders.join(', ')}`;
363
+ if (onConflict === 'skip') {
364
+ query += ' ON CONFLICT DO NOTHING';
365
+ }
366
+ query += ' RETURNING *';
367
+ const result = await executeInternalQuery(connection, logger, { query, params });
368
+ totalInserted += result.rowCount || 0;
369
+ allReturnedRows.push(...result.rows);
370
+ }
371
+ return {
372
+ success: true,
373
+ operation: 'INSERT',
374
+ table: `${schema}.${table}`,
375
+ rowsInserted: totalInserted,
376
+ rows: allReturnedRows,
377
+ };
378
+ }
379
+ /** @internal Exposed for testing only */
380
+ export function _testNormalizeWhereForSafety(where) {
381
+ const validation = validateWhereClause(where, false);
382
+ return !validation.valid;
383
+ }
384
+ /** @internal Exposed for testing only */
385
+ export function _testClampMaxRows(maxRows) {
386
+ return clampMaxRows(maxRows);
387
+ }
388
+ export const mutationTools = {
389
+ previewUpdate: {
390
+ schema: PreviewUpdateSchema,
391
+ handler: previewUpdate
392
+ },
393
+ previewDelete: {
394
+ schema: PreviewDeleteSchema,
395
+ handler: previewDelete
396
+ },
397
+ safeUpdate: {
398
+ schema: SafeUpdateSchema,
399
+ handler: safeUpdate
400
+ },
401
+ safeDelete: {
402
+ schema: SafeDeleteSchema,
403
+ handler: safeDelete
404
+ },
405
+ safeInsert: {
406
+ schema: SafeInsertSchema,
407
+ handler: safeInsert
408
+ }
409
+ };
410
+ //# sourceMappingURL=mutations.js.map