mcp-db-analyzer 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +381 -0
- package/build/analyzers/bloat.js +150 -0
- package/build/analyzers/connections.js +183 -0
- package/build/analyzers/indexes.js +251 -0
- package/build/analyzers/query.js +241 -0
- package/build/analyzers/relationships.js +210 -0
- package/build/analyzers/schema.js +259 -0
- package/build/analyzers/slow-queries.js +127 -0
- package/build/analyzers/suggestions.js +166 -0
- package/build/analyzers/vacuum.js +187 -0
- package/build/db-mysql.js +80 -0
- package/build/db-postgres.js +73 -0
- package/build/db-sqlite.js +36 -0
- package/build/db.js +41 -0
- package/build/errors.js +13 -0
- package/build/index.js +380 -0
- package/build/license.js +114 -0
- package/package.json +65 -0
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
import { query, getDriverType } from "../db.js";
|
|
2
|
+
/**
|
|
3
|
+
* Suggest missing indexes by analyzing scan patterns,
|
|
4
|
+
* and cross-reference with unused indexes that waste resources.
|
|
5
|
+
*/
|
|
6
|
+
export async function suggestMissingIndexes(schema = "public") {
|
|
7
|
+
const driver = getDriverType();
|
|
8
|
+
if (driver === "sqlite") {
|
|
9
|
+
return suggestMissingIndexesSqlite();
|
|
10
|
+
}
|
|
11
|
+
if (driver === "mysql") {
|
|
12
|
+
return suggestMissingIndexesMysql(schema);
|
|
13
|
+
}
|
|
14
|
+
const needsIndex = await query(`
|
|
15
|
+
SELECT
|
|
16
|
+
relname AS table_name,
|
|
17
|
+
seq_scan::text,
|
|
18
|
+
COALESCE(idx_scan, 0)::text AS idx_scan,
|
|
19
|
+
n_live_tup::text,
|
|
20
|
+
pg_size_pretty(pg_table_size(quote_ident(schemaname) || '.' || quote_ident(relname))) AS table_size
|
|
21
|
+
FROM pg_stat_user_tables
|
|
22
|
+
WHERE schemaname = $1
|
|
23
|
+
AND seq_scan > 1000
|
|
24
|
+
AND COALESCE(idx_scan, 0) = 0
|
|
25
|
+
ORDER BY seq_scan DESC
|
|
26
|
+
`, [schema]);
|
|
27
|
+
const unused = await query(`
|
|
28
|
+
SELECT
|
|
29
|
+
s.relname AS table_name,
|
|
30
|
+
s.indexrelname AS index_name,
|
|
31
|
+
pg_size_pretty(pg_relation_size(s.indexrelid)) AS index_size,
|
|
32
|
+
i.indexdef AS index_def
|
|
33
|
+
FROM pg_stat_user_indexes s
|
|
34
|
+
JOIN pg_indexes i
|
|
35
|
+
ON s.schemaname = i.schemaname
|
|
36
|
+
AND s.relname = i.tablename
|
|
37
|
+
AND s.indexrelname = i.indexname
|
|
38
|
+
WHERE s.schemaname = $1
|
|
39
|
+
AND s.idx_scan = 0
|
|
40
|
+
AND s.indexrelname NOT LIKE '%_pkey'
|
|
41
|
+
ORDER BY pg_relation_size(s.indexrelid) DESC
|
|
42
|
+
`, [schema]);
|
|
43
|
+
return formatSuggestions(needsIndex.rows, unused.rows, schema);
|
|
44
|
+
}
|
|
45
|
+
async function suggestMissingIndexesSqlite() {
|
|
46
|
+
// Find tables with no indexes
|
|
47
|
+
const tables = await query(`
|
|
48
|
+
SELECT name FROM sqlite_master
|
|
49
|
+
WHERE type = 'table' AND name NOT LIKE 'sqlite_%'
|
|
50
|
+
ORDER BY name
|
|
51
|
+
`);
|
|
52
|
+
const tablesWithoutIndexes = [];
|
|
53
|
+
for (const table of tables.rows) {
|
|
54
|
+
const indexes = await query(`
|
|
55
|
+
SELECT name FROM sqlite_master
|
|
56
|
+
WHERE type = 'index' AND tbl_name = ? AND name NOT LIKE 'sqlite_%'
|
|
57
|
+
`, [table.name]);
|
|
58
|
+
if (indexes.rows.length === 0) {
|
|
59
|
+
const countResult = await query(`SELECT count(*) as cnt FROM "${table.name}"`);
|
|
60
|
+
const cnt = countResult.rows[0]?.cnt ?? 0;
|
|
61
|
+
if (cnt > 100) {
|
|
62
|
+
tablesWithoutIndexes.push({
|
|
63
|
+
table_name: table.name,
|
|
64
|
+
row_count: String(cnt),
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
const lines = [`## Index Suggestions (SQLite)\n`];
|
|
70
|
+
if (tablesWithoutIndexes.length > 0) {
|
|
71
|
+
lines.push(`### Tables Without Indexes (${tablesWithoutIndexes.length} found)\n`);
|
|
72
|
+
lines.push("| Table | Rows |");
|
|
73
|
+
lines.push("|-------|------|");
|
|
74
|
+
for (const t of tablesWithoutIndexes) {
|
|
75
|
+
lines.push(`| ${t.table_name} | ${t.row_count} |`);
|
|
76
|
+
}
|
|
77
|
+
lines.push("\n**Tip**: Use `explain_query` to check which queries do full table scans.");
|
|
78
|
+
}
|
|
79
|
+
else {
|
|
80
|
+
lines.push("All tables with >100 rows have at least one index.\n");
|
|
81
|
+
}
|
|
82
|
+
lines.push("\n**Note**: SQLite does not track scan statistics. Use `EXPLAIN QUERY PLAN` to identify slow queries.");
|
|
83
|
+
return lines.join("\n");
|
|
84
|
+
}
|
|
85
|
+
async function suggestMissingIndexesMysql(schema) {
|
|
86
|
+
try {
|
|
87
|
+
// Tables with no indexes beyond PRIMARY
|
|
88
|
+
const needsIndex = await query(`
|
|
89
|
+
SELECT
|
|
90
|
+
t.TABLE_NAME AS table_name,
|
|
91
|
+
CAST(COALESCE(tio.COUNT_READ, 0) AS CHAR) AS seq_scan,
|
|
92
|
+
'0' AS idx_scan,
|
|
93
|
+
CAST(t.TABLE_ROWS AS CHAR) AS n_live_tup,
|
|
94
|
+
CONCAT(ROUND(t.DATA_LENGTH / 1024 / 1024, 2), ' MB') AS table_size
|
|
95
|
+
FROM information_schema.TABLES t
|
|
96
|
+
LEFT JOIN performance_schema.table_io_waits_summary_by_table tio
|
|
97
|
+
ON tio.OBJECT_SCHEMA = t.TABLE_SCHEMA AND tio.OBJECT_NAME = t.TABLE_NAME
|
|
98
|
+
WHERE t.TABLE_SCHEMA = ?
|
|
99
|
+
AND t.TABLE_TYPE = 'BASE TABLE'
|
|
100
|
+
AND t.TABLE_ROWS > 1000
|
|
101
|
+
AND NOT EXISTS (
|
|
102
|
+
SELECT 1 FROM information_schema.STATISTICS s
|
|
103
|
+
WHERE s.TABLE_SCHEMA = t.TABLE_SCHEMA
|
|
104
|
+
AND s.TABLE_NAME = t.TABLE_NAME
|
|
105
|
+
AND s.INDEX_NAME != 'PRIMARY'
|
|
106
|
+
)
|
|
107
|
+
ORDER BY t.TABLE_ROWS DESC
|
|
108
|
+
`, [schema]);
|
|
109
|
+
// Unused non-primary indexes
|
|
110
|
+
const unused = await query(`
|
|
111
|
+
SELECT
|
|
112
|
+
s.OBJECT_NAME AS table_name,
|
|
113
|
+
s.INDEX_NAME AS index_name,
|
|
114
|
+
CONCAT(ROUND(COALESCE(ist.STAT_VALUE, 0) * @@innodb_page_size / 1024 / 1024, 2), ' MB') AS index_size,
|
|
115
|
+
CONCAT('INDEX ', s.INDEX_NAME, ' ON ', s.OBJECT_NAME) AS index_def
|
|
116
|
+
FROM performance_schema.table_io_waits_summary_by_index_usage s
|
|
117
|
+
LEFT JOIN mysql.innodb_index_stats ist
|
|
118
|
+
ON ist.database_name = s.OBJECT_SCHEMA
|
|
119
|
+
AND ist.table_name = s.OBJECT_NAME
|
|
120
|
+
AND ist.index_name = s.INDEX_NAME
|
|
121
|
+
AND ist.stat_name = 'size'
|
|
122
|
+
WHERE s.OBJECT_SCHEMA = ?
|
|
123
|
+
AND s.INDEX_NAME IS NOT NULL
|
|
124
|
+
AND s.INDEX_NAME != 'PRIMARY'
|
|
125
|
+
AND (s.COUNT_READ = 0 OR s.COUNT_READ IS NULL)
|
|
126
|
+
ORDER BY COALESCE(ist.STAT_VALUE, 0) DESC
|
|
127
|
+
`, [schema]);
|
|
128
|
+
return formatSuggestions(needsIndex.rows, unused.rows, schema);
|
|
129
|
+
}
|
|
130
|
+
catch {
|
|
131
|
+
return "## Index Suggestions\n\nUnable to query performance_schema. Ensure performance_schema is enabled (it is ON by default in MySQL 5.7+) and the user has SELECT privilege on performance_schema tables.";
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
function formatSuggestions(needsIndex, unused, schema) {
|
|
135
|
+
const lines = [`## Index Suggestions — schema '${schema}'\n`];
|
|
136
|
+
if (needsIndex.length > 0) {
|
|
137
|
+
lines.push(`### Tables Missing Indexes (${needsIndex.length} found)\n`);
|
|
138
|
+
lines.push("These tables have high scan counts with no non-primary indexes. They may be full-table-scanned on every query.\n");
|
|
139
|
+
lines.push("| Table | Seq Scans | Index Scans | Rows | Size |");
|
|
140
|
+
lines.push("|-------|-----------|-------------|------|------|");
|
|
141
|
+
for (const row of needsIndex) {
|
|
142
|
+
lines.push(`| ${row.table_name} | ${row.seq_scan} | ${row.idx_scan} | ${row.n_live_tup} | ${row.table_size} |`);
|
|
143
|
+
}
|
|
144
|
+
lines.push("\n**Next step**: Use `explain_query` to analyze your most common queries against these tables, then create indexes on the columns used in WHERE and JOIN clauses.\n");
|
|
145
|
+
}
|
|
146
|
+
else {
|
|
147
|
+
lines.push("### No critically unindexed tables found.\n");
|
|
148
|
+
}
|
|
149
|
+
if (unused.length > 0) {
|
|
150
|
+
lines.push(`### Unused Indexes (${unused.length} found)\n`);
|
|
151
|
+
lines.push("These non-primary-key indexes have zero scans. They slow down writes and waste storage.\n");
|
|
152
|
+
lines.push("| Table | Index | Size | Definition |");
|
|
153
|
+
lines.push("|-------|-------|------|------------|");
|
|
154
|
+
for (const idx of unused) {
|
|
155
|
+
lines.push(`| ${idx.table_name} | ${idx.index_name} | ${idx.index_size} | \`${idx.index_def}\` |`);
|
|
156
|
+
}
|
|
157
|
+
lines.push("\n**Recommended**: Drop unused indexes after confirming with your team:\n");
|
|
158
|
+
for (const idx of unused) {
|
|
159
|
+
lines.push(`\`\`\`sql\nDROP INDEX ${idx.index_name} ON ${schema}.${idx.table_name};\n\`\`\``);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
else {
|
|
163
|
+
lines.push("### No unused indexes found.\n");
|
|
164
|
+
}
|
|
165
|
+
return lines.join("\n");
|
|
166
|
+
}
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import { query, getDriverType } from "../db.js";
|
|
2
|
+
/**
|
|
3
|
+
* Analyze PostgreSQL VACUUM maintenance status.
|
|
4
|
+
*
|
|
5
|
+
* Checks dead tuple ratios, vacuum staleness, autovacuum configuration,
|
|
6
|
+
* and identifies tables that need manual VACUUM attention.
|
|
7
|
+
* PostgreSQL only — returns unsupported message for MySQL/SQLite.
|
|
8
|
+
*/
|
|
9
|
+
export async function analyzeVacuum(schema = "public") {
|
|
10
|
+
const driver = getDriverType();
|
|
11
|
+
if (driver === "sqlite") {
|
|
12
|
+
return "## VACUUM Analysis\n\nSQLite does not use autovacuum in the same way as PostgreSQL. Run `VACUUM` manually to defragment the database file.";
|
|
13
|
+
}
|
|
14
|
+
if (driver === "mysql") {
|
|
15
|
+
return "## VACUUM Analysis\n\nMySQL/InnoDB does not use VACUUM. Use `OPTIMIZE TABLE` to reclaim space from fragmented tables. See the `analyze_table_bloat` tool for fragmentation analysis.";
|
|
16
|
+
}
|
|
17
|
+
// Get table vacuum stats
|
|
18
|
+
const stats = await query(`
|
|
19
|
+
SELECT
|
|
20
|
+
relname AS table_name,
|
|
21
|
+
n_live_tup::text,
|
|
22
|
+
n_dead_tup::text,
|
|
23
|
+
last_vacuum::text,
|
|
24
|
+
last_autovacuum::text,
|
|
25
|
+
last_analyze::text,
|
|
26
|
+
last_autoanalyze::text,
|
|
27
|
+
vacuum_count::text,
|
|
28
|
+
autovacuum_count::text,
|
|
29
|
+
analyze_count::text,
|
|
30
|
+
autoanalyze_count::text
|
|
31
|
+
FROM pg_stat_user_tables
|
|
32
|
+
WHERE schemaname = $1
|
|
33
|
+
ORDER BY n_dead_tup DESC
|
|
34
|
+
`, [schema]);
|
|
35
|
+
if (stats.rows.length === 0) {
|
|
36
|
+
return `No user tables found in schema '${schema}'.`;
|
|
37
|
+
}
|
|
38
|
+
// Get autovacuum settings
|
|
39
|
+
const settings = await query(`
|
|
40
|
+
SELECT name, setting
|
|
41
|
+
FROM pg_settings
|
|
42
|
+
WHERE name LIKE 'autovacuum%'
|
|
43
|
+
ORDER BY name
|
|
44
|
+
`);
|
|
45
|
+
const findings = analyzeFindings(stats.rows, settings.rows);
|
|
46
|
+
return formatVacuumReport(schema, stats.rows, settings.rows, findings);
|
|
47
|
+
}
|
|
48
|
+
export function analyzeFindings(tables, settings) {
|
|
49
|
+
const findings = [];
|
|
50
|
+
// Check autovacuum enabled
|
|
51
|
+
const avEnabled = settings.find((s) => s.name === "autovacuum");
|
|
52
|
+
if (avEnabled && avEnabled.setting === "off") {
|
|
53
|
+
findings.push({
|
|
54
|
+
severity: "CRITICAL",
|
|
55
|
+
table: null,
|
|
56
|
+
message: "Autovacuum is DISABLED globally",
|
|
57
|
+
recommendation: "Enable autovacuum immediately: ALTER SYSTEM SET autovacuum = on; SELECT pg_reload_conf();",
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
for (const row of tables) {
|
|
61
|
+
const live = parseInt(row.n_live_tup, 10) || 0;
|
|
62
|
+
const dead = parseInt(row.n_dead_tup, 10) || 0;
|
|
63
|
+
const total = live + dead;
|
|
64
|
+
const deadRatio = total > 0 ? dead / total : 0;
|
|
65
|
+
// High dead tuple ratio (>20% = critical, >10% = warning)
|
|
66
|
+
if (deadRatio > 0.20 && dead > 100) {
|
|
67
|
+
findings.push({
|
|
68
|
+
severity: "CRITICAL",
|
|
69
|
+
table: row.table_name,
|
|
70
|
+
message: `${(deadRatio * 100).toFixed(1)}% dead tuples (${dead} dead / ${total} total)`,
|
|
71
|
+
recommendation: `Run: VACUUM ANALYZE ${row.table_name};`,
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
else if (deadRatio > 0.10 && dead > 50) {
|
|
75
|
+
findings.push({
|
|
76
|
+
severity: "WARNING",
|
|
77
|
+
table: row.table_name,
|
|
78
|
+
message: `${(deadRatio * 100).toFixed(1)}% dead tuples (${dead} dead / ${total} total)`,
|
|
79
|
+
recommendation: `Run: VACUUM ANALYZE ${row.table_name};`,
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
// Never vacuumed
|
|
83
|
+
const vacuumCount = parseInt(row.vacuum_count, 10) || 0;
|
|
84
|
+
const autovacuumCount = parseInt(row.autovacuum_count, 10) || 0;
|
|
85
|
+
if (vacuumCount === 0 && autovacuumCount === 0 && total > 0) {
|
|
86
|
+
findings.push({
|
|
87
|
+
severity: "WARNING",
|
|
88
|
+
table: row.table_name,
|
|
89
|
+
message: "Table has never been vacuumed (manual or auto)",
|
|
90
|
+
recommendation: `Run: VACUUM ANALYZE ${row.table_name};`,
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
// Never analyzed
|
|
94
|
+
const analyzeCount = parseInt(row.analyze_count, 10) || 0;
|
|
95
|
+
const autoanalyzeCount = parseInt(row.autoanalyze_count, 10) || 0;
|
|
96
|
+
if (analyzeCount === 0 && autoanalyzeCount === 0 && total > 0) {
|
|
97
|
+
findings.push({
|
|
98
|
+
severity: "INFO",
|
|
99
|
+
table: row.table_name,
|
|
100
|
+
message: "Table has never been analyzed — query planner statistics may be stale",
|
|
101
|
+
recommendation: `Run: ANALYZE ${row.table_name};`,
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
return findings;
|
|
106
|
+
}
|
|
107
|
+
export function formatVacuumReport(schema, tables, settings, findings) {
|
|
108
|
+
const lines = [`## VACUUM Analysis — schema '${schema}'\n`];
|
|
109
|
+
// Findings summary
|
|
110
|
+
const critical = findings.filter((f) => f.severity === "CRITICAL");
|
|
111
|
+
const warnings = findings.filter((f) => f.severity === "WARNING");
|
|
112
|
+
const info = findings.filter((f) => f.severity === "INFO");
|
|
113
|
+
if (findings.length === 0) {
|
|
114
|
+
lines.push("### All tables are well-maintained.\n");
|
|
115
|
+
lines.push("No vacuum issues detected. Autovacuum appears to be working correctly.\n");
|
|
116
|
+
}
|
|
117
|
+
else {
|
|
118
|
+
lines.push(`### Findings: ${critical.length} critical, ${warnings.length} warnings, ${info.length} info\n`);
|
|
119
|
+
if (critical.length > 0) {
|
|
120
|
+
lines.push("#### Critical Issues\n");
|
|
121
|
+
for (const f of critical) {
|
|
122
|
+
const prefix = f.table ? `**${f.table}**: ` : "";
|
|
123
|
+
lines.push(`- ${prefix}${f.message}`);
|
|
124
|
+
lines.push(` > ${f.recommendation}\n`);
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
if (warnings.length > 0) {
|
|
128
|
+
lines.push("#### Warnings\n");
|
|
129
|
+
for (const f of warnings) {
|
|
130
|
+
const prefix = f.table ? `**${f.table}**: ` : "";
|
|
131
|
+
lines.push(`- ${prefix}${f.message}`);
|
|
132
|
+
lines.push(` > ${f.recommendation}\n`);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
if (info.length > 0) {
|
|
136
|
+
lines.push("#### Info\n");
|
|
137
|
+
for (const f of info) {
|
|
138
|
+
const prefix = f.table ? `**${f.table}**: ` : "";
|
|
139
|
+
lines.push(`- ${prefix}${f.message}`);
|
|
140
|
+
lines.push(` > ${f.recommendation}\n`);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
// Tables needing vacuum (>10% dead tuples)
|
|
145
|
+
const needsVacuum = tables.filter((t) => {
|
|
146
|
+
const live = parseInt(t.n_live_tup, 10) || 0;
|
|
147
|
+
const dead = parseInt(t.n_dead_tup, 10) || 0;
|
|
148
|
+
const total = live + dead;
|
|
149
|
+
return total > 0 && dead / total > 0.10;
|
|
150
|
+
});
|
|
151
|
+
if (needsVacuum.length > 0) {
|
|
152
|
+
lines.push(`### Tables Needing VACUUM (${needsVacuum.length})\n`);
|
|
153
|
+
lines.push("| Table | Dead Tuples | Dead % | Last Vacuum | Last Autovacuum |");
|
|
154
|
+
lines.push("|-------|-------------|--------|-------------|-----------------|");
|
|
155
|
+
for (const row of needsVacuum) {
|
|
156
|
+
const live = parseInt(row.n_live_tup, 10) || 0;
|
|
157
|
+
const dead = parseInt(row.n_dead_tup, 10) || 0;
|
|
158
|
+
const total = live + dead;
|
|
159
|
+
const deadPct = total > 0 ? ((dead / total) * 100).toFixed(1) : "0.0";
|
|
160
|
+
lines.push(`| ${row.table_name} | ${dead} | ${deadPct}% | ${row.last_vacuum || "Never"} | ${row.last_autovacuum || "Never"} |`);
|
|
161
|
+
}
|
|
162
|
+
lines.push("");
|
|
163
|
+
}
|
|
164
|
+
// All tables overview
|
|
165
|
+
lines.push("### All Tables\n");
|
|
166
|
+
lines.push("| Table | Live | Dead | Dead % | Vacuum Count | Autovacuum Count | Last Vacuum | Last Analyze |");
|
|
167
|
+
lines.push("|-------|------|------|--------|--------------|------------------|-------------|--------------|");
|
|
168
|
+
for (const row of tables) {
|
|
169
|
+
const live = parseInt(row.n_live_tup, 10) || 0;
|
|
170
|
+
const dead = parseInt(row.n_dead_tup, 10) || 0;
|
|
171
|
+
const total = live + dead;
|
|
172
|
+
const deadPct = total > 0 ? ((dead / total) * 100).toFixed(1) : "0.0";
|
|
173
|
+
const lastVac = row.last_vacuum || row.last_autovacuum || "Never";
|
|
174
|
+
const lastAn = row.last_analyze || row.last_autoanalyze || "Never";
|
|
175
|
+
lines.push(`| ${row.table_name} | ${live} | ${dead} | ${deadPct}% | ${row.vacuum_count} | ${row.autovacuum_count} | ${lastVac} | ${lastAn} |`);
|
|
176
|
+
}
|
|
177
|
+
// Autovacuum settings
|
|
178
|
+
if (settings.length > 0) {
|
|
179
|
+
lines.push("\n### Autovacuum Configuration\n");
|
|
180
|
+
lines.push("| Setting | Value |");
|
|
181
|
+
lines.push("|---------|-------|");
|
|
182
|
+
for (const s of settings) {
|
|
183
|
+
lines.push(`| ${s.name} | ${s.setting} |`);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
return lines.join("\n");
|
|
187
|
+
}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import mysql from "mysql2/promise";
|
|
2
|
+
function wrapConnectionError(err) {
|
|
3
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
4
|
+
const sanitized = msg.replace(/\/\/[^@]+@/g, "//****:****@");
|
|
5
|
+
return new Error(`Cannot connect to MySQL: ${sanitized}\n\n` +
|
|
6
|
+
`Configure connection using one of:\n` +
|
|
7
|
+
` DATABASE_URL=mysql://user:pass@host:3306/dbname\n` +
|
|
8
|
+
` or individual vars: MYSQL_HOST, MYSQL_PORT, MYSQL_DATABASE, MYSQL_USER, MYSQL_PASSWORD`);
|
|
9
|
+
}
|
|
10
|
+
export function createMysqlAdapter() {
|
|
11
|
+
let pool = null;
|
|
12
|
+
function getPool() {
|
|
13
|
+
if (!pool) {
|
|
14
|
+
const uri = process.env.DATABASE_URL;
|
|
15
|
+
if (uri) {
|
|
16
|
+
pool = mysql.createPool(uri);
|
|
17
|
+
}
|
|
18
|
+
else {
|
|
19
|
+
pool = mysql.createPool({
|
|
20
|
+
host: process.env.MYSQL_HOST || process.env.DB_HOST || "localhost",
|
|
21
|
+
port: parseInt(process.env.MYSQL_PORT || process.env.DB_PORT || "3306", 10),
|
|
22
|
+
database: process.env.MYSQL_DATABASE || process.env.DB_NAME,
|
|
23
|
+
user: process.env.MYSQL_USER || process.env.DB_USER,
|
|
24
|
+
password: process.env.MYSQL_PASSWORD || process.env.DB_PASSWORD,
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
return pool;
|
|
29
|
+
}
|
|
30
|
+
return {
|
|
31
|
+
driver: "mysql",
|
|
32
|
+
async query(sql, params) {
|
|
33
|
+
let conn;
|
|
34
|
+
try {
|
|
35
|
+
conn = await getPool().getConnection();
|
|
36
|
+
}
|
|
37
|
+
catch (err) {
|
|
38
|
+
throw wrapConnectionError(err);
|
|
39
|
+
}
|
|
40
|
+
try {
|
|
41
|
+
await conn.query("SET SESSION TRANSACTION READ ONLY");
|
|
42
|
+
await conn.beginTransaction();
|
|
43
|
+
try {
|
|
44
|
+
const [rows] = await conn.query(sql, params);
|
|
45
|
+
await conn.rollback();
|
|
46
|
+
return { rows: rows };
|
|
47
|
+
}
|
|
48
|
+
catch (err) {
|
|
49
|
+
await conn.rollback();
|
|
50
|
+
throw err;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
finally {
|
|
54
|
+
conn.release();
|
|
55
|
+
}
|
|
56
|
+
},
|
|
57
|
+
async queryUnsafe(sql, params) {
|
|
58
|
+
let conn;
|
|
59
|
+
try {
|
|
60
|
+
conn = await getPool().getConnection();
|
|
61
|
+
}
|
|
62
|
+
catch (err) {
|
|
63
|
+
throw wrapConnectionError(err);
|
|
64
|
+
}
|
|
65
|
+
try {
|
|
66
|
+
const [rows] = await conn.query(sql, params);
|
|
67
|
+
return { rows: rows };
|
|
68
|
+
}
|
|
69
|
+
finally {
|
|
70
|
+
conn.release();
|
|
71
|
+
}
|
|
72
|
+
},
|
|
73
|
+
async close() {
|
|
74
|
+
if (pool) {
|
|
75
|
+
await pool.end();
|
|
76
|
+
pool = null;
|
|
77
|
+
}
|
|
78
|
+
},
|
|
79
|
+
};
|
|
80
|
+
}
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import pg from "pg";
|
|
2
|
+
const { Pool } = pg;
|
|
3
|
+
function wrapConnectionError(err) {
|
|
4
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
5
|
+
const sanitized = msg.replace(/\/\/[^@]+@/g, "//****:****@");
|
|
6
|
+
return new Error(`Cannot connect to PostgreSQL: ${sanitized}\n\n` +
|
|
7
|
+
`Configure connection using one of:\n` +
|
|
8
|
+
` DATABASE_URL=postgres://user:pass@host:5432/dbname\n` +
|
|
9
|
+
` or individual vars: PGHOST, PGPORT, PGDATABASE, PGUSER, PGPASSWORD`);
|
|
10
|
+
}
|
|
11
|
+
export function createPostgresAdapter() {
|
|
12
|
+
let pool = null;
|
|
13
|
+
function getPool() {
|
|
14
|
+
if (!pool) {
|
|
15
|
+
const connectionString = process.env.DATABASE_URL;
|
|
16
|
+
if (connectionString) {
|
|
17
|
+
pool = new Pool({ connectionString });
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
pool = new Pool({
|
|
21
|
+
host: process.env.PGHOST || "localhost",
|
|
22
|
+
port: parseInt(process.env.PGPORT || "5432", 10),
|
|
23
|
+
database: process.env.PGDATABASE || "postgres",
|
|
24
|
+
user: process.env.PGUSER,
|
|
25
|
+
password: process.env.PGPASSWORD,
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
return pool;
|
|
30
|
+
}
|
|
31
|
+
return {
|
|
32
|
+
driver: "postgres",
|
|
33
|
+
async query(sql, params) {
|
|
34
|
+
let client;
|
|
35
|
+
try {
|
|
36
|
+
client = await getPool().connect();
|
|
37
|
+
}
|
|
38
|
+
catch (err) {
|
|
39
|
+
throw wrapConnectionError(err);
|
|
40
|
+
}
|
|
41
|
+
try {
|
|
42
|
+
await client.query("SET TRANSACTION READ ONLY");
|
|
43
|
+
const result = await client.query(sql, params);
|
|
44
|
+
return { rows: result.rows };
|
|
45
|
+
}
|
|
46
|
+
finally {
|
|
47
|
+
client.release();
|
|
48
|
+
}
|
|
49
|
+
},
|
|
50
|
+
async queryUnsafe(sql, params) {
|
|
51
|
+
let client;
|
|
52
|
+
try {
|
|
53
|
+
client = await getPool().connect();
|
|
54
|
+
}
|
|
55
|
+
catch (err) {
|
|
56
|
+
throw wrapConnectionError(err);
|
|
57
|
+
}
|
|
58
|
+
try {
|
|
59
|
+
const result = await client.query(sql, params);
|
|
60
|
+
return { rows: result.rows };
|
|
61
|
+
}
|
|
62
|
+
finally {
|
|
63
|
+
client.release();
|
|
64
|
+
}
|
|
65
|
+
},
|
|
66
|
+
async close() {
|
|
67
|
+
if (pool) {
|
|
68
|
+
await pool.end();
|
|
69
|
+
pool = null;
|
|
70
|
+
}
|
|
71
|
+
},
|
|
72
|
+
};
|
|
73
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import Database from "better-sqlite3";
|
|
2
|
+
export function createSqliteAdapter() {
|
|
3
|
+
let db = null;
|
|
4
|
+
function getDb() {
|
|
5
|
+
if (!db) {
|
|
6
|
+
const dbPath = process.env.DATABASE_URL ||
|
|
7
|
+
process.env.SQLITE_PATH ||
|
|
8
|
+
process.env.DB_PATH;
|
|
9
|
+
if (!dbPath) {
|
|
10
|
+
throw new Error("SQLite path not configured. Set DATABASE_URL, SQLITE_PATH, or DB_PATH.");
|
|
11
|
+
}
|
|
12
|
+
db = new Database(dbPath, { readonly: true });
|
|
13
|
+
db.pragma("journal_mode = WAL");
|
|
14
|
+
}
|
|
15
|
+
return db;
|
|
16
|
+
}
|
|
17
|
+
return {
|
|
18
|
+
driver: "sqlite",
|
|
19
|
+
async query(sql, params) {
|
|
20
|
+
const stmt = getDb().prepare(sql);
|
|
21
|
+
const rows = params ? stmt.all(...params) : stmt.all();
|
|
22
|
+
return { rows: rows };
|
|
23
|
+
},
|
|
24
|
+
async queryUnsafe(sql, params) {
|
|
25
|
+
const stmt = getDb().prepare(sql);
|
|
26
|
+
const rows = params ? stmt.all(...params) : stmt.all();
|
|
27
|
+
return { rows: rows };
|
|
28
|
+
},
|
|
29
|
+
async close() {
|
|
30
|
+
if (db) {
|
|
31
|
+
db.close();
|
|
32
|
+
db = null;
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
}
|
package/build/db.js
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
let adapter = null;
|
|
2
|
+
let driverType = "postgres";
|
|
3
|
+
export function getDriverType() {
|
|
4
|
+
return driverType;
|
|
5
|
+
}
|
|
6
|
+
export function setAdapter(a) {
|
|
7
|
+
adapter = a;
|
|
8
|
+
driverType = a.driver;
|
|
9
|
+
}
|
|
10
|
+
function getAdapter() {
|
|
11
|
+
if (!adapter) {
|
|
12
|
+
throw new Error("Database adapter not initialized. Call initDriver() first.");
|
|
13
|
+
}
|
|
14
|
+
return adapter;
|
|
15
|
+
}
|
|
16
|
+
export async function initDriver(driver) {
|
|
17
|
+
if (driver === "mysql") {
|
|
18
|
+
const { createMysqlAdapter } = await import("./db-mysql.js");
|
|
19
|
+
setAdapter(createMysqlAdapter());
|
|
20
|
+
}
|
|
21
|
+
else if (driver === "sqlite") {
|
|
22
|
+
const { createSqliteAdapter } = await import("./db-sqlite.js");
|
|
23
|
+
setAdapter(createSqliteAdapter());
|
|
24
|
+
}
|
|
25
|
+
else {
|
|
26
|
+
const { createPostgresAdapter } = await import("./db-postgres.js");
|
|
27
|
+
setAdapter(createPostgresAdapter());
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
export async function query(sql, params) {
|
|
31
|
+
return getAdapter().query(sql, params);
|
|
32
|
+
}
|
|
33
|
+
export async function queryUnsafe(sql, params) {
|
|
34
|
+
return getAdapter().queryUnsafe(sql, params);
|
|
35
|
+
}
|
|
36
|
+
export async function closePool() {
|
|
37
|
+
if (adapter) {
|
|
38
|
+
await adapter.close();
|
|
39
|
+
adapter = null;
|
|
40
|
+
}
|
|
41
|
+
}
|
package/build/errors.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Format tool errors with connection-specific guidance when applicable.
|
|
3
|
+
* Sanitizes credentials from error messages.
|
|
4
|
+
*/
|
|
5
|
+
export function formatToolError(context, err) {
|
|
6
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
7
|
+
const sanitized = msg.replace(/\/\/[^@]+@/g, "//****:****@");
|
|
8
|
+
const isConnectionError = /ECONNREFUSED|ENOTFOUND|ETIMEDOUT|EHOSTUNREACH|getaddrinfo|connect ECONNRESET|password authentication failed|Access denied|no pg_hba\.conf|connection refused|Connection lost|SQLITE_CANTOPEN/i.test(msg);
|
|
9
|
+
if (isConnectionError) {
|
|
10
|
+
return `Error ${context}: ${sanitized}\n\nThis looks like a database connection issue. Check your configuration:\n- Set DATABASE_URL environment variable with a valid connection string\n- Or use driver-specific variables (PGHOST, MYSQL_HOST, SQLITE_PATH)\n- Ensure the database server is running and accessible`;
|
|
11
|
+
}
|
|
12
|
+
return `Error ${context}: ${sanitized}`;
|
|
13
|
+
}
|