@levnikolaevich/hex-line-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +293 -0
- package/benchmark.mjs +1180 -0
- package/hook.mjs +299 -0
- package/lib/bulk-replace.mjs +55 -0
- package/lib/changes.mjs +174 -0
- package/lib/coerce.mjs +43 -0
- package/lib/edit.mjs +420 -0
- package/lib/graph-enrich.mjs +208 -0
- package/lib/hash.mjs +109 -0
- package/lib/info.mjs +109 -0
- package/lib/normalize.mjs +106 -0
- package/lib/outline.mjs +200 -0
- package/lib/read.mjs +129 -0
- package/lib/search.mjs +132 -0
- package/lib/security.mjs +114 -0
- package/lib/setup.mjs +132 -0
- package/lib/tree.mjs +162 -0
- package/lib/update-check.mjs +56 -0
- package/lib/verify.mjs +54 -0
- package/package.json +57 -0
- package/server.mjs +368 -0
package/lib/edit.mjs
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Hash-verified file editing with diff output.
|
|
3
|
+
*
|
|
4
|
+
* Supports:
|
|
5
|
+
* - Range-based: range "ab.12-cd.15" + checksum
|
|
6
|
+
* - Anchor-based: set_line, replace_lines, insert_after
|
|
7
|
+
* - Text-based: replace { old_text, new_text, all }
|
|
8
|
+
* - dry_run preview, noop detection, diff output
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { readFileSync, writeFileSync } from "node:fs";
|
|
12
|
+
import { diffLines } from "diff";
|
|
13
|
+
import { fnv1a, lineTag } from "./hash.mjs";
|
|
14
|
+
import { validatePath } from "./security.mjs";
|
|
15
|
+
import { getGraphDB, blastRadius, getRelativePath } from "./graph-enrich.mjs";
|
|
16
|
+
|
|
17
|
+
// Unicode characters visually similar to ASCII hyphen-minus (U+002D)
|
|
18
|
+
const CONFUSABLE_HYPHENS = /[\u2010\u2011\u2012\u2013\u2014\u2212\uFE63\uFF0D]/g;
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Normalize confusable unicode hyphens to ASCII hyphen-minus.
|
|
22
|
+
*/
|
|
23
|
+
function normalizeConfusables(text) {
|
|
24
|
+
return text.replace(CONFUSABLE_HYPHENS, "-");
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Restore indentation from original lines onto replacement lines.
|
|
29
|
+
* Preserves relative indentation structure while matching the anchor's indent level.
|
|
30
|
+
*/
|
|
31
|
+
function restoreIndent(origLines, newLines) {
|
|
32
|
+
if (!origLines.length || !newLines.length) return newLines;
|
|
33
|
+
const origIndent = origLines[0].match(/^\s*/)[0];
|
|
34
|
+
const newIndent = newLines[0].match(/^\s*/)[0];
|
|
35
|
+
if (origIndent === newIndent) return newLines;
|
|
36
|
+
return newLines.map(line => {
|
|
37
|
+
if (!line.trim()) return line; // skip empty lines
|
|
38
|
+
if (line.startsWith(newIndent)) return origIndent + line.slice(newIndent.length);
|
|
39
|
+
return line;
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Build a hash index of all lines, keeping only unique tags.
|
|
45
|
+
* 2-char tags have collisions — duplicates are excluded to avoid wrong relocations.
|
|
46
|
+
* @param {string[]} lines
|
|
47
|
+
* @returns {Map<string, number>} tag → line index (0-based)
|
|
48
|
+
*/
|
|
49
|
+
function buildHashIndex(lines) {
|
|
50
|
+
const hashIndex = new Map();
|
|
51
|
+
const duplicates = new Set();
|
|
52
|
+
for (let i = 0; i < lines.length; i++) {
|
|
53
|
+
const tag = lineTag(fnv1a(lines[i]));
|
|
54
|
+
if (duplicates.has(tag)) continue;
|
|
55
|
+
if (hashIndex.has(tag)) { hashIndex.delete(tag); duplicates.add(tag); continue; }
|
|
56
|
+
hashIndex.set(tag, i);
|
|
57
|
+
}
|
|
58
|
+
return hashIndex;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Find line by tag.lineNum reference with fuzzy matching (+-5 lines).
|
|
63
|
+
* Falls back to global hash relocation via hashIndex before throwing.
|
|
64
|
+
*/
|
|
65
|
+
function findLine(lines, lineNum, expectedTag, hashIndex) {
|
|
66
|
+
const idx = lineNum - 1;
|
|
67
|
+
if (idx < 0 || idx >= lines.length) {
|
|
68
|
+
throw new Error(`Line ${lineNum} out of range (1-${lines.length})`);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const actual = lineTag(fnv1a(lines[idx]));
|
|
72
|
+
if (actual === expectedTag) return idx;
|
|
73
|
+
|
|
74
|
+
// Fuzzy: search +-5
|
|
75
|
+
for (let d = 1; d <= 5; d++) {
|
|
76
|
+
for (const off of [d, -d]) {
|
|
77
|
+
const c = idx + off;
|
|
78
|
+
if (c >= 0 && c < lines.length && lineTag(fnv1a(lines[c])) === expectedTag) return c;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Whitespace-tolerant
|
|
83
|
+
const stripped = lines[idx].replace(/\s+/g, "");
|
|
84
|
+
if (stripped.length > 0) {
|
|
85
|
+
for (let j = Math.max(0, idx - 5); j <= Math.min(lines.length - 1, idx + 5); j++) {
|
|
86
|
+
if (lines[j].replace(/\s+/g, "") === stripped && lineTag(fnv1a(lines[j])) === expectedTag) return j;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Confusable normalization: try matching after normalizing unicode hyphens
|
|
91
|
+
const normalizedExpected = normalizeConfusables(expectedTag);
|
|
92
|
+
for (let i = Math.max(0, idx - 10); i <= Math.min(lines.length - 1, idx + 10); i++) {
|
|
93
|
+
const normalizedActual = normalizeConfusables(lineTag(fnv1a(normalizeConfusables(lines[i]))));
|
|
94
|
+
if (normalizedActual === normalizedExpected) return i;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Global hash relocation: search entire file via pre-built unique-tag index
|
|
98
|
+
if (hashIndex) {
|
|
99
|
+
const relocated = hashIndex.get(expectedTag);
|
|
100
|
+
if (relocated !== undefined) return relocated;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Build snippet with fresh hashes so agent can retry without re-reading
|
|
104
|
+
const start = Math.max(0, idx - 5);
|
|
105
|
+
const end = Math.min(lines.length, idx + 6);
|
|
106
|
+
const snippet = lines.slice(start, end).map((line, i) => {
|
|
107
|
+
const num = start + i + 1;
|
|
108
|
+
const tag = lineTag(fnv1a(line));
|
|
109
|
+
return `${tag}.${num}\t${line}`;
|
|
110
|
+
}).join("\n");
|
|
111
|
+
|
|
112
|
+
throw new Error(
|
|
113
|
+
`Hash mismatch line ${lineNum}: expected ${expectedTag}, got ${actual}.\n\n` +
|
|
114
|
+
`Current content (lines ${start + 1}-${end}):\n${snippet}\n\n` +
|
|
115
|
+
`Tip: Use updated hashes above for retry.`
|
|
116
|
+
);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Parse a ref string: "ab.12" → { tag: "ab", line: 12 }
|
|
121
|
+
*/
|
|
122
|
+
function parseRef(ref) {
|
|
123
|
+
const m = ref.trim().match(/^([a-z2-7]{2})\.(\d+)$/);
|
|
124
|
+
if (!m) throw new Error(`Bad ref: "${ref}". Expected "ab.12"`);
|
|
125
|
+
return { tag: m[1], line: parseInt(m[2], 10) };
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Context diff via `diff` package (Myers O(ND) algorithm).
|
|
130
|
+
* Returns compact hunks with ±ctx context lines, or null if no changes.
|
|
131
|
+
*/
|
|
132
|
+
export function simpleDiff(oldLines, newLines, ctx = 3) {
|
|
133
|
+
const oldText = oldLines.join("\n") + "\n";
|
|
134
|
+
const newText = newLines.join("\n") + "\n";
|
|
135
|
+
const parts = diffLines(oldText, newText);
|
|
136
|
+
|
|
137
|
+
const out = [];
|
|
138
|
+
let oldNum = 1, newNum = 1;
|
|
139
|
+
let lastChange = false;
|
|
140
|
+
|
|
141
|
+
for (let i = 0; i < parts.length; i++) {
|
|
142
|
+
const part = parts[i];
|
|
143
|
+
const lines = part.value.replace(/\n$/, "").split("\n");
|
|
144
|
+
|
|
145
|
+
if (part.added || part.removed) {
|
|
146
|
+
for (const line of lines) {
|
|
147
|
+
if (part.removed) { out.push(`-${oldNum}| ${line}`); oldNum++; }
|
|
148
|
+
else { out.push(`+${newNum}| ${line}`); newNum++; }
|
|
149
|
+
}
|
|
150
|
+
lastChange = true;
|
|
151
|
+
} else {
|
|
152
|
+
const next = i < parts.length - 1 && (parts[i + 1].added || parts[i + 1].removed);
|
|
153
|
+
if (lastChange || next) {
|
|
154
|
+
let start = 0, end = lines.length;
|
|
155
|
+
if (!lastChange) start = Math.max(0, end - ctx);
|
|
156
|
+
if (!next && end - start > ctx) end = start + ctx;
|
|
157
|
+
if (start > 0) { out.push(`...`); oldNum += start; newNum += start; }
|
|
158
|
+
for (let k = start; k < end; k++) {
|
|
159
|
+
out.push(` ${oldNum}| ${lines[k]}`);
|
|
160
|
+
oldNum++; newNum++;
|
|
161
|
+
}
|
|
162
|
+
if (end < lines.length) {
|
|
163
|
+
out.push(`...`);
|
|
164
|
+
oldNum += lines.length - end;
|
|
165
|
+
newNum += lines.length - end;
|
|
166
|
+
}
|
|
167
|
+
} else {
|
|
168
|
+
oldNum += lines.length;
|
|
169
|
+
newNum += lines.length;
|
|
170
|
+
}
|
|
171
|
+
lastChange = false;
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
return out.length ? out.join("\n") : null;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Find the longest common substring between two strings.
|
|
179
|
+
* Returns { pos, len } — position in `haystack` and length of match.
|
|
180
|
+
*/
|
|
181
|
+
function longestCommonSubstring(haystack, needle) {
|
|
182
|
+
if (!haystack || !needle) return { pos: 0, len: 0 };
|
|
183
|
+
const h = haystack, n = needle;
|
|
184
|
+
let bestLen = 0, bestPos = 0;
|
|
185
|
+
// Sliding window: for each start in needle, check match lengths in haystack
|
|
186
|
+
// Use suffix approach limited to first 200 chars of needle for performance
|
|
187
|
+
const sample = n.slice(0, 200);
|
|
188
|
+
for (let i = 0; i < h.length && bestLen < sample.length; i++) {
|
|
189
|
+
let len = 0;
|
|
190
|
+
for (let j = 0; j < sample.length && i + len < h.length; j++) {
|
|
191
|
+
if (h[i + len] === sample[j]) { len++; } else { if (len > bestLen) { bestLen = len; bestPos = i; } len = 0; }
|
|
192
|
+
}
|
|
193
|
+
if (len > bestLen) { bestLen = len; bestPos = i; }
|
|
194
|
+
}
|
|
195
|
+
return { pos: bestPos, len: bestLen };
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Build a snippet of ~10 lines around a character position in normalized content.
|
|
200
|
+
*/
|
|
201
|
+
function buildSnippet(norm, charPos) {
|
|
202
|
+
const lines = norm.split("\n");
|
|
203
|
+
// Find which line the charPos falls on
|
|
204
|
+
let cumulative = 0;
|
|
205
|
+
let targetLine = 0;
|
|
206
|
+
for (let i = 0; i < lines.length; i++) {
|
|
207
|
+
cumulative += lines[i].length + 1; // +1 for \n
|
|
208
|
+
if (cumulative > charPos) { targetLine = i; break; }
|
|
209
|
+
}
|
|
210
|
+
const half = 5;
|
|
211
|
+
const start = Math.max(0, targetLine - half);
|
|
212
|
+
const end = Math.min(lines.length, start + 10);
|
|
213
|
+
const snippetLines = [];
|
|
214
|
+
for (let i = start; i < end; i++) {
|
|
215
|
+
snippetLines.push(` ${i + 1}| ${lines[i]}`);
|
|
216
|
+
}
|
|
217
|
+
return { start: start + 1, end, text: snippetLines.join("\n") };
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Fuzzy text replacement.
|
|
222
|
+
*/
|
|
223
|
+
function textReplace(content, oldText, newText, all) {
|
|
224
|
+
const norm = content.replace(/\r\n/g, "\n");
|
|
225
|
+
const normOld = oldText.replace(/\r\n/g, "\n");
|
|
226
|
+
const normNew = newText.replace(/\r\n/g, "\n");
|
|
227
|
+
|
|
228
|
+
let idx = norm.indexOf(normOld);
|
|
229
|
+
let confusableMatch = false;
|
|
230
|
+
if (idx === -1) {
|
|
231
|
+
// Confusable normalization: try matching after normalizing unicode hyphens
|
|
232
|
+
const normContent = normalizeConfusables(norm);
|
|
233
|
+
const normSearch = normalizeConfusables(normOld);
|
|
234
|
+
const confIdx = normContent.indexOf(normSearch);
|
|
235
|
+
if (confIdx !== -1) {
|
|
236
|
+
idx = confIdx;
|
|
237
|
+
confusableMatch = true;
|
|
238
|
+
} else {
|
|
239
|
+
const { pos, len } = longestCommonSubstring(norm, normOld);
|
|
240
|
+
const anchor = len > 3 ? pos : Math.floor(norm.length / 2);
|
|
241
|
+
const snip = buildSnippet(norm, anchor);
|
|
242
|
+
throw new Error(
|
|
243
|
+
`TEXT_NOT_FOUND: "${normOld.slice(0, 100)}..." not found.\n\n` +
|
|
244
|
+
`Nearest content (lines ${snip.start}-${snip.end}):\n${snip.text}\n\n` +
|
|
245
|
+
`Tip: Re-read file or adjust old_text to match actual content.`
|
|
246
|
+
);
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
// Determine the match length in original content (same as normOld.length for both paths)
|
|
251
|
+
const matchLen = normOld.length;
|
|
252
|
+
|
|
253
|
+
if (all) {
|
|
254
|
+
if (confusableMatch) {
|
|
255
|
+
// Replace all via normalized matching
|
|
256
|
+
const normContent = normalizeConfusables(norm);
|
|
257
|
+
const normSearch = normalizeConfusables(normOld);
|
|
258
|
+
let result = "";
|
|
259
|
+
let pos = 0;
|
|
260
|
+
let searchIdx = normContent.indexOf(normSearch, pos);
|
|
261
|
+
while (searchIdx !== -1) {
|
|
262
|
+
result += norm.slice(pos, searchIdx) + normNew;
|
|
263
|
+
pos = searchIdx + matchLen;
|
|
264
|
+
searchIdx = normContent.indexOf(normSearch, pos);
|
|
265
|
+
}
|
|
266
|
+
result += norm.slice(pos);
|
|
267
|
+
return result;
|
|
268
|
+
}
|
|
269
|
+
return norm.split(normOld).join(normNew);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// Check for multiple matches
|
|
273
|
+
if (confusableMatch) {
|
|
274
|
+
const normContent = normalizeConfusables(norm);
|
|
275
|
+
const normSearch = normalizeConfusables(normOld);
|
|
276
|
+
if (normContent.indexOf(normSearch, idx + 1) !== -1) {
|
|
277
|
+
throw new Error("MULTIPLE_MATCHES: Found multiple occurrences. Use all:true or add more context for unique match.");
|
|
278
|
+
}
|
|
279
|
+
} else if (norm.indexOf(normOld, idx + 1) !== -1) {
|
|
280
|
+
throw new Error("MULTIPLE_MATCHES: Found multiple occurrences. Use all:true or add more context for unique match.");
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
return norm.slice(0, idx) + normNew + norm.slice(idx + matchLen);
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
/**
|
|
287
|
+
* Strip boundary echo lines from replacement text.
|
|
288
|
+
* Agents often echo the start/end anchor lines in their replacement — strip them
|
|
289
|
+
* to avoid duplicating boundary content.
|
|
290
|
+
*/
|
|
291
|
+
function stripBoundaryEcho(lines, startIdx, endIdx, newLines) {
|
|
292
|
+
let result = [...newLines];
|
|
293
|
+
// Strip start boundary echo
|
|
294
|
+
if (result.length > 0 && lines[startIdx].trim() === result[0].trim()) {
|
|
295
|
+
result = result.slice(1);
|
|
296
|
+
}
|
|
297
|
+
// Strip end boundary echo
|
|
298
|
+
if (result.length > 0 && lines[endIdx].trim() === result[result.length - 1].trim()) {
|
|
299
|
+
result = result.slice(0, -1);
|
|
300
|
+
}
|
|
301
|
+
return result;
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
/**
|
|
305
|
+
* Apply edits to a file.
|
|
306
|
+
*
|
|
307
|
+
* @param {string} filePath
|
|
308
|
+
* @param {Array} edits - parsed edit objects
|
|
309
|
+
* @param {object} opts - { dryRun }
|
|
310
|
+
* @returns {string} result message with diff
|
|
311
|
+
*/
|
|
312
|
+
export function editFile(filePath, edits, opts = {}) {
|
|
313
|
+
const real = validatePath(filePath);
|
|
314
|
+
const original = readFileSync(real, "utf-8").replace(/\r\n/g, "\n");
|
|
315
|
+
const lines = original.split("\n");
|
|
316
|
+
const origLines = [...lines];
|
|
317
|
+
|
|
318
|
+
// Build hash index once for global relocation in findLine
|
|
319
|
+
const hashIndex = buildHashIndex(lines);
|
|
320
|
+
|
|
321
|
+
// Separate anchor edits from text-replace edits
|
|
322
|
+
const anchored = [];
|
|
323
|
+
const texts = [];
|
|
324
|
+
|
|
325
|
+
for (const e of edits) {
|
|
326
|
+
if (e.set_line || e.replace_lines || e.insert_after) anchored.push(e);
|
|
327
|
+
else if (e.replace) texts.push(e);
|
|
328
|
+
else throw new Error(`Unknown edit type: ${JSON.stringify(e)}`);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Sort anchor edits bottom-to-top
|
|
332
|
+
const sorted = anchored.map((e) => {
|
|
333
|
+
let sortKey;
|
|
334
|
+
if (e.set_line) sortKey = parseRef(e.set_line.anchor).line;
|
|
335
|
+
else if (e.replace_lines) sortKey = parseRef(e.replace_lines.start_anchor).line;
|
|
336
|
+
else if (e.insert_after) sortKey = parseRef(e.insert_after.anchor).line;
|
|
337
|
+
return { ...e, _k: sortKey };
|
|
338
|
+
}).sort((a, b) => b._k - a._k);
|
|
339
|
+
|
|
340
|
+
// Apply anchor edits
|
|
341
|
+
for (const e of sorted) {
|
|
342
|
+
if (e.set_line) {
|
|
343
|
+
const { tag, line } = parseRef(e.set_line.anchor);
|
|
344
|
+
const idx = findLine(lines, line, tag, hashIndex);
|
|
345
|
+
const txt = e.set_line.new_text;
|
|
346
|
+
if (!txt && txt !== 0) {
|
|
347
|
+
lines.splice(idx, 1);
|
|
348
|
+
} else {
|
|
349
|
+
const origLine = [lines[idx]];
|
|
350
|
+
const newLines = restoreIndent(origLine, String(txt).split("\n"));
|
|
351
|
+
lines.splice(idx, 1, ...newLines);
|
|
352
|
+
}
|
|
353
|
+
} else if (e.replace_lines) {
|
|
354
|
+
const s = parseRef(e.replace_lines.start_anchor);
|
|
355
|
+
const en = parseRef(e.replace_lines.end_anchor);
|
|
356
|
+
const si = findLine(lines, s.line, s.tag, hashIndex);
|
|
357
|
+
const ei = findLine(lines, en.line, en.tag, hashIndex);
|
|
358
|
+
const txt = e.replace_lines.new_text;
|
|
359
|
+
if (!txt && txt !== 0) {
|
|
360
|
+
lines.splice(si, ei - si + 1);
|
|
361
|
+
} else {
|
|
362
|
+
const origLines = lines.slice(si, ei + 1);
|
|
363
|
+
let newLines = stripBoundaryEcho(lines, si, ei, String(txt).split("\n"));
|
|
364
|
+
newLines = restoreIndent(origLines, newLines);
|
|
365
|
+
lines.splice(si, ei - si + 1, ...newLines);
|
|
366
|
+
}
|
|
367
|
+
} else if (e.insert_after) {
|
|
368
|
+
const { tag, line } = parseRef(e.insert_after.anchor);
|
|
369
|
+
const idx = findLine(lines, line, tag, hashIndex);
|
|
370
|
+
lines.splice(idx + 1, 0, ...e.insert_after.text.split("\n"));
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
// Apply text replacements
|
|
375
|
+
let content = lines.join("\n");
|
|
376
|
+
for (const e of texts) {
|
|
377
|
+
if (!e.replace.old_text) throw new Error("replace.old_text required");
|
|
378
|
+
content = textReplace(content, e.replace.old_text, e.replace.new_text || "", e.replace.all || false);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
if (original === content) {
|
|
382
|
+
throw new Error("NOOP_EDIT: All edits produced identical content. File unchanged. Re-read to verify current state.");
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
const diff = simpleDiff(origLines, content.split("\n"));
|
|
386
|
+
|
|
387
|
+
if (opts.dryRun) {
|
|
388
|
+
let msg = `Dry run: ${filePath} would change (${content.split("\n").length} lines)`;
|
|
389
|
+
if (diff) msg += `\n\nDiff:\n\`\`\`diff\n${diff}\n\`\`\``;
|
|
390
|
+
return msg;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
writeFileSync(real, content, "utf-8");
|
|
394
|
+
let msg = `Updated ${filePath} (${content.split("\n").length} lines)`;
|
|
395
|
+
if (diff) msg += `\n\nDiff:\n\`\`\`diff\n${diff}\n\`\`\``;
|
|
396
|
+
|
|
397
|
+
// Blast radius warning (optional — silent if no graph DB)
|
|
398
|
+
try {
|
|
399
|
+
const db = getGraphDB(real);
|
|
400
|
+
const relFile = db ? getRelativePath(real) : null;
|
|
401
|
+
if (db && relFile) {
|
|
402
|
+
// Find changed line range from diff
|
|
403
|
+
const diffLines = diff.split("\n");
|
|
404
|
+
let minLine = Infinity, maxLine = 0;
|
|
405
|
+
for (const dl of diffLines) {
|
|
406
|
+
const m = dl.match(/^[+-](\d+)\|/);
|
|
407
|
+
if (m) { const n = +m[1]; if (n < minLine) minLine = n; if (n > maxLine) maxLine = n; }
|
|
408
|
+
}
|
|
409
|
+
if (minLine <= maxLine) {
|
|
410
|
+
const affected = blastRadius(db, relFile, minLine, maxLine);
|
|
411
|
+
if (affected.length > 0) {
|
|
412
|
+
const list = affected.map(a => `${a.name} (${a.file}:${a.line})`).join(", ");
|
|
413
|
+
msg += `\n\n\u26A0 Blast radius: ${affected.length} dependents in other files\n ${list}`;
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
} catch { /* silent */ }
|
|
418
|
+
|
|
419
|
+
return msg;
|
|
420
|
+
}
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Graph enrichment for hex-line tools.
|
|
3
|
+
*
|
|
4
|
+
* Reads .codegraph/index.db (created by hex-graph-mcp) in readonly mode.
|
|
5
|
+
* Provides symbol annotations for outline, read_file, grep_search, edit_file.
|
|
6
|
+
*
|
|
7
|
+
* Lazy singleton: DB opened once per session, reused across calls.
|
|
8
|
+
* Graceful fallback: if better-sqlite3 or DB missing → returns null silently.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { existsSync } from "node:fs";
|
|
12
|
+
import { join, dirname, relative } from "node:path";
|
|
13
|
+
import { createRequire } from "node:module";
|
|
14
|
+
|
|
15
|
+
let _db = null;
|
|
16
|
+
|
|
17
|
+
let _unavailable = false;
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Get readonly graph DB for a project root.
|
|
21
|
+
* Returns null if DB missing or better-sqlite3 not installed.
|
|
22
|
+
* @param {string} filePath - any file path inside the project
|
|
23
|
+
* @returns {object|null} better-sqlite3 Database instance or null
|
|
24
|
+
*/
|
|
25
|
+
export function getGraphDB(filePath) {
|
|
26
|
+
if (_unavailable) return null;
|
|
27
|
+
if (_db) return _db;
|
|
28
|
+
|
|
29
|
+
try {
|
|
30
|
+
const projectRoot = findProjectRoot(filePath);
|
|
31
|
+
if (!projectRoot) return null;
|
|
32
|
+
|
|
33
|
+
const dbPath = join(projectRoot, ".codegraph", "index.db");
|
|
34
|
+
if (!existsSync(dbPath)) return null;
|
|
35
|
+
|
|
36
|
+
const require = createRequire(import.meta.url);
|
|
37
|
+
const Database = require("better-sqlite3");
|
|
38
|
+
_db = new Database(dbPath, { readonly: true });
|
|
39
|
+
|
|
40
|
+
return _db;
|
|
41
|
+
} catch {
|
|
42
|
+
_unavailable = true;
|
|
43
|
+
return null;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Get [N↓ M↑] annotation for a symbol.
|
|
49
|
+
* @param {object} db - better-sqlite3 instance
|
|
50
|
+
* @param {string} file - relative file path
|
|
51
|
+
* @param {string} name - symbol name
|
|
52
|
+
* @returns {string|null} e.g. "[5↓ 3↑]" or null
|
|
53
|
+
*/
|
|
54
|
+
export function symbolAnnotation(db, file, name) {
|
|
55
|
+
try {
|
|
56
|
+
const node = db.prepare(
|
|
57
|
+
"SELECT id FROM nodes WHERE file = ? AND name = ? AND kind != 'import' LIMIT 1"
|
|
58
|
+
).get(file, name);
|
|
59
|
+
if (!node) return null;
|
|
60
|
+
|
|
61
|
+
const callees = db.prepare(
|
|
62
|
+
"SELECT COUNT(*) as c FROM edges WHERE source_id = ? AND kind = 'calls'"
|
|
63
|
+
).get(node.id).c;
|
|
64
|
+
const callers = db.prepare(
|
|
65
|
+
"SELECT COUNT(*) as c FROM edges WHERE target_id = ? AND kind = 'calls'"
|
|
66
|
+
).get(node.id).c;
|
|
67
|
+
|
|
68
|
+
if (callees === 0 && callers === 0) return null;
|
|
69
|
+
return `[${callees}\u2193 ${callers}\u2191]`;
|
|
70
|
+
} catch {
|
|
71
|
+
return null;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Get all symbol annotations for a file (for read_file Graph: header).
|
|
77
|
+
* @param {object} db
|
|
78
|
+
* @param {string} file - relative file path
|
|
79
|
+
* @returns {Array<{name, kind, callees, callers}>}
|
|
80
|
+
*/
|
|
81
|
+
export function fileAnnotations(db, file) {
|
|
82
|
+
try {
|
|
83
|
+
const nodes = db.prepare(
|
|
84
|
+
"SELECT id, name, kind FROM nodes WHERE file = ? AND kind != 'import' ORDER BY line_start"
|
|
85
|
+
).all(file);
|
|
86
|
+
|
|
87
|
+
const result = [];
|
|
88
|
+
for (const node of nodes) {
|
|
89
|
+
const callees = db.prepare(
|
|
90
|
+
"SELECT COUNT(*) as c FROM edges WHERE source_id = ? AND kind = 'calls'"
|
|
91
|
+
).get(node.id).c;
|
|
92
|
+
const callers = db.prepare(
|
|
93
|
+
"SELECT COUNT(*) as c FROM edges WHERE target_id = ? AND kind = 'calls'"
|
|
94
|
+
).get(node.id).c;
|
|
95
|
+
result.push({
|
|
96
|
+
name: node.name,
|
|
97
|
+
kind: node.kind,
|
|
98
|
+
callees,
|
|
99
|
+
callers,
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
return result;
|
|
103
|
+
} catch {
|
|
104
|
+
return [];
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Blast radius: symbols affected by changes in given line range.
|
|
110
|
+
* @param {object} db
|
|
111
|
+
* @param {string} file - relative file path
|
|
112
|
+
* @param {number} startLine
|
|
113
|
+
* @param {number} endLine
|
|
114
|
+
* @returns {Array<{name, file, line}>} affected symbols (max 10)
|
|
115
|
+
*/
|
|
116
|
+
export function blastRadius(db, file, startLine, endLine) {
|
|
117
|
+
try {
|
|
118
|
+
const modified = db.prepare(
|
|
119
|
+
"SELECT id, name FROM nodes WHERE file = ? AND kind != 'import' AND line_start <= ? AND line_end >= ?"
|
|
120
|
+
).all(file, endLine, startLine);
|
|
121
|
+
|
|
122
|
+
if (modified.length === 0) return [];
|
|
123
|
+
|
|
124
|
+
const affected = [];
|
|
125
|
+
const seen = new Set();
|
|
126
|
+
|
|
127
|
+
for (const node of modified) {
|
|
128
|
+
const dependents = db.prepare(
|
|
129
|
+
"SELECT n.name, n.file, n.line_start FROM edges e JOIN nodes n ON n.id = e.source_id WHERE e.target_id = ? AND e.kind = 'calls'"
|
|
130
|
+
).all(node.id);
|
|
131
|
+
|
|
132
|
+
for (const dep of dependents) {
|
|
133
|
+
const key = `${dep.file}:${dep.name}`;
|
|
134
|
+
if (!seen.has(key) && dep.file !== file) {
|
|
135
|
+
seen.add(key);
|
|
136
|
+
affected.push({ name: dep.name, file: dep.file, line: dep.line_start });
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
return affected.slice(0, 10);
|
|
142
|
+
} catch {
|
|
143
|
+
return [];
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Get symbol kind + annotation for a grep match.
|
|
149
|
+
* @param {object} db
|
|
150
|
+
* @param {string} file - relative file path
|
|
151
|
+
* @param {number} line - line number
|
|
152
|
+
* @returns {string|null} e.g. "[fn 5↓ 3↑]" or null
|
|
153
|
+
*/
|
|
154
|
+
export function matchAnnotation(db, file, line) {
|
|
155
|
+
try {
|
|
156
|
+
const node = db.prepare(
|
|
157
|
+
"SELECT id, name, kind FROM nodes WHERE file = ? AND kind != 'import' AND line_start <= ? AND line_end >= ? LIMIT 1"
|
|
158
|
+
).get(file, line, line);
|
|
159
|
+
if (!node) return null;
|
|
160
|
+
|
|
161
|
+
const kindShort = { function: "fn", class: "cls", method: "mtd", variable: "var" }[node.kind] || node.kind;
|
|
162
|
+
|
|
163
|
+
const callees = db.prepare(
|
|
164
|
+
"SELECT COUNT(*) as c FROM edges WHERE source_id = ? AND kind = 'calls'"
|
|
165
|
+
).get(node.id).c;
|
|
166
|
+
const callers = db.prepare(
|
|
167
|
+
"SELECT COUNT(*) as c FROM edges WHERE target_id = ? AND kind = 'calls'"
|
|
168
|
+
).get(node.id).c;
|
|
169
|
+
|
|
170
|
+
if (callees === 0 && callers === 0) return `[${kindShort}]`;
|
|
171
|
+
return `[${kindShort} ${callees}\u2193 ${callers}\u2191]`;
|
|
172
|
+
} catch {
|
|
173
|
+
return null;
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Get relative path from project root (matching DB paths).
|
|
179
|
+
* @param {string} filePath - absolute file path
|
|
180
|
+
* @returns {string|null} relative path with forward slashes, or null
|
|
181
|
+
*/
|
|
182
|
+
export function getRelativePath(filePath) {
|
|
183
|
+
const root = findProjectRoot(filePath);
|
|
184
|
+
if (!root) return null;
|
|
185
|
+
return relative(root, filePath).replace(/\\/g, "/");
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
// --- Helpers ---
|
|
189
|
+
|
|
190
|
+
function findProjectRoot(filePath) {
|
|
191
|
+
// First pass: look for .codegraph/index.db (strongest signal)
|
|
192
|
+
let dir = dirname(filePath);
|
|
193
|
+
for (let i = 0; i < 10; i++) {
|
|
194
|
+
if (existsSync(join(dir, ".codegraph", "index.db"))) return dir;
|
|
195
|
+
const parent = dirname(dir);
|
|
196
|
+
if (parent === dir) break;
|
|
197
|
+
dir = parent;
|
|
198
|
+
}
|
|
199
|
+
// Second pass: fallback to .git
|
|
200
|
+
dir = dirname(filePath);
|
|
201
|
+
for (let i = 0; i < 10; i++) {
|
|
202
|
+
if (existsSync(join(dir, ".git"))) return dir;
|
|
203
|
+
const parent = dirname(dir);
|
|
204
|
+
if (parent === dir) break;
|
|
205
|
+
dir = parent;
|
|
206
|
+
}
|
|
207
|
+
return null;
|
|
208
|
+
}
|