@baitong-dev/filesystem-mcp 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/descriptions/edit.txt +10 -0
- package/descriptions/read.txt +13 -0
- package/descriptions/write.txt +7 -0
- package/dist/edit.d.ts +14 -0
- package/dist/edit.js +591 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +30 -0
- package/dist/read.d.ts +2 -0
- package/dist/read.js +293 -0
- package/dist/write.d.ts +2 -0
- package/dist/write.js +85 -0
- package/package.json +37 -0
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Performs exact string replacements in files.
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
- You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file.
|
|
5
|
+
- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: line number + colon + space (e.g., `1: `). Everything after that space is the actual file content to match. Never include any part of the line number prefix in the oldString or newString.
|
|
6
|
+
- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.
|
|
7
|
+
- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.
|
|
8
|
+
- The edit will FAIL if `oldString` is not found in the file with an error "oldString not found in content".
|
|
9
|
+
- The edit will FAIL if `oldString` is found multiple times in the file with an error "Found multiple matches for oldString. Provide more surrounding lines in oldString to identify the correct match." Either provide a larger string with more surrounding context to make it unique or use `replaceAll` to change every instance of `oldString`.
|
|
10
|
+
- Use `replaceAll` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Read a file or directory from the local filesystem. If the path does not exist, an error is returned.
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
- The filePath parameter should be an absolute path.
|
|
5
|
+
- By default, this tool returns up to 2000 lines from the start of the file.
|
|
6
|
+
- The offset parameter is the line number to start from (1-indexed).
|
|
7
|
+
- To read later sections, call this tool again with a larger offset.
|
|
8
|
+
- Use the grep tool to find specific content in large files or files with long lines.
|
|
9
|
+
- If you are unsure of the correct file path, use the glob tool to look up filenames by glob pattern.
|
|
10
|
+
- Contents are returned with each line prefixed by its line number as `<line>: <content>`. For example, if a file has contents "foo\n", you will receive "1: foo\n". For directories, entries are returned one per line (without line numbers) with a trailing `/` for subdirectories.
|
|
11
|
+
- Any line longer than 2000 characters is truncated.
|
|
12
|
+
- Call this tool in parallel when you know there are multiple files you want to read.
|
|
13
|
+
- Avoid tiny repeated slices (30 line chunks). If you need more context, read a larger window.
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
Writes a file to the local filesystem.
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
- This tool will overwrite the existing file if there is one at the provided path.
|
|
5
|
+
- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first.
|
|
6
|
+
- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.
|
|
7
|
+
- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.
|
package/dist/edit.d.ts
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
2
|
+
export type Replacer = (content: string, find: string) => Generator<string, void, unknown>;
|
|
3
|
+
export declare const SimpleReplacer: Replacer;
|
|
4
|
+
export declare const LineTrimmedReplacer: Replacer;
|
|
5
|
+
export declare const BlockAnchorReplacer: Replacer;
|
|
6
|
+
export declare const WhitespaceNormalizedReplacer: Replacer;
|
|
7
|
+
export declare const IndentationFlexibleReplacer: Replacer;
|
|
8
|
+
export declare const EscapeNormalizedReplacer: Replacer;
|
|
9
|
+
export declare const MultiOccurrenceReplacer: Replacer;
|
|
10
|
+
export declare const TrimmedBoundaryReplacer: Replacer;
|
|
11
|
+
export declare const ContextAwareReplacer: Replacer;
|
|
12
|
+
export declare function trimDiff(diff: string): string;
|
|
13
|
+
export declare function replace(content: string, oldString: string, newString: string, replaceAll?: boolean): string;
|
|
14
|
+
export declare function registerEditTool(server: McpServer): void;
|
package/dist/edit.js
ADDED
|
@@ -0,0 +1,591 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.ContextAwareReplacer = exports.TrimmedBoundaryReplacer = exports.MultiOccurrenceReplacer = exports.EscapeNormalizedReplacer = exports.IndentationFlexibleReplacer = exports.WhitespaceNormalizedReplacer = exports.BlockAnchorReplacer = exports.LineTrimmedReplacer = exports.SimpleReplacer = void 0;
|
|
7
|
+
exports.trimDiff = trimDiff;
|
|
8
|
+
exports.replace = replace;
|
|
9
|
+
exports.registerEditTool = registerEditTool;
|
|
10
|
+
const edit_txt_1 = __importDefault(require("../descriptions/edit.txt"));
|
|
11
|
+
const zod_1 = __importDefault(require("zod"));
|
|
12
|
+
const path_1 = __importDefault(require("path"));
|
|
13
|
+
const mcp_helpers_1 = require("@baitong-dev/mcp-helpers");
|
|
14
|
+
const diff_1 = require("diff");
|
|
15
|
+
// const MAX_DIAGNOSTICS_PER_FILE = 20
|
|
16
|
+
function normalizeLineEndings(text) {
|
|
17
|
+
return text.replaceAll('\r\n', '\n');
|
|
18
|
+
}
|
|
19
|
+
// Similarity thresholds for block anchor fallback matching
|
|
20
|
+
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0;
|
|
21
|
+
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3;
|
|
22
|
+
/**
|
|
23
|
+
* Levenshtein distance algorithm implementation
|
|
24
|
+
*/
|
|
25
|
+
function levenshtein(a, b) {
|
|
26
|
+
// Handle empty strings
|
|
27
|
+
if (a === '' || b === '') {
|
|
28
|
+
return Math.max(a.length, b.length);
|
|
29
|
+
}
|
|
30
|
+
const matrix = Array.from({ length: a.length + 1 }, (_, i) => Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0)));
|
|
31
|
+
for (let i = 1; i <= a.length; i++) {
|
|
32
|
+
for (let j = 1; j <= b.length; j++) {
|
|
33
|
+
const cost = a[i - 1] === b[j - 1] ? 0 : 1;
|
|
34
|
+
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return matrix[a.length][b.length];
|
|
38
|
+
}
|
|
39
|
+
const SimpleReplacer = function* (_content, find) {
|
|
40
|
+
yield find;
|
|
41
|
+
};
|
|
42
|
+
exports.SimpleReplacer = SimpleReplacer;
|
|
43
|
+
const LineTrimmedReplacer = function* (content, find) {
|
|
44
|
+
const originalLines = content.split('\n');
|
|
45
|
+
const searchLines = find.split('\n');
|
|
46
|
+
if (searchLines[searchLines.length - 1] === '') {
|
|
47
|
+
searchLines.pop();
|
|
48
|
+
}
|
|
49
|
+
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
|
50
|
+
let matches = true;
|
|
51
|
+
for (let j = 0; j < searchLines.length; j++) {
|
|
52
|
+
const originalTrimmed = originalLines[i + j].trim();
|
|
53
|
+
const searchTrimmed = searchLines[j].trim();
|
|
54
|
+
if (originalTrimmed !== searchTrimmed) {
|
|
55
|
+
matches = false;
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
if (matches) {
|
|
60
|
+
let matchStartIndex = 0;
|
|
61
|
+
for (let k = 0; k < i; k++) {
|
|
62
|
+
matchStartIndex += originalLines[k].length + 1;
|
|
63
|
+
}
|
|
64
|
+
let matchEndIndex = matchStartIndex;
|
|
65
|
+
for (let k = 0; k < searchLines.length; k++) {
|
|
66
|
+
matchEndIndex += originalLines[i + k].length;
|
|
67
|
+
if (k < searchLines.length - 1) {
|
|
68
|
+
matchEndIndex += 1; // Add newline character except for the last line
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
yield content.substring(matchStartIndex, matchEndIndex);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
};
|
|
75
|
+
exports.LineTrimmedReplacer = LineTrimmedReplacer;
|
|
76
|
+
const BlockAnchorReplacer = function* (content, find) {
|
|
77
|
+
const originalLines = content.split('\n');
|
|
78
|
+
const searchLines = find.split('\n');
|
|
79
|
+
if (searchLines.length < 3) {
|
|
80
|
+
return;
|
|
81
|
+
}
|
|
82
|
+
if (searchLines[searchLines.length - 1] === '') {
|
|
83
|
+
searchLines.pop();
|
|
84
|
+
}
|
|
85
|
+
const firstLineSearch = searchLines[0].trim();
|
|
86
|
+
const lastLineSearch = searchLines[searchLines.length - 1].trim();
|
|
87
|
+
const searchBlockSize = searchLines.length;
|
|
88
|
+
// Collect all candidate positions where both anchors match
|
|
89
|
+
const candidates = [];
|
|
90
|
+
for (let i = 0; i < originalLines.length; i++) {
|
|
91
|
+
if (originalLines[i].trim() !== firstLineSearch) {
|
|
92
|
+
continue;
|
|
93
|
+
}
|
|
94
|
+
// Look for the matching last line after this first line
|
|
95
|
+
for (let j = i + 2; j < originalLines.length; j++) {
|
|
96
|
+
if (originalLines[j].trim() === lastLineSearch) {
|
|
97
|
+
candidates.push({ startLine: i, endLine: j });
|
|
98
|
+
break; // Only match the first occurrence of the last line
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
// Return immediately if no candidates
|
|
103
|
+
if (candidates.length === 0) {
|
|
104
|
+
return;
|
|
105
|
+
}
|
|
106
|
+
// Handle single candidate scenario (using relaxed threshold)
|
|
107
|
+
if (candidates.length === 1) {
|
|
108
|
+
const { startLine, endLine } = candidates[0];
|
|
109
|
+
const actualBlockSize = endLine - startLine + 1;
|
|
110
|
+
let similarity = 0;
|
|
111
|
+
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2); // Middle lines only
|
|
112
|
+
if (linesToCheck > 0) {
|
|
113
|
+
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
|
114
|
+
const originalLine = originalLines[startLine + j].trim();
|
|
115
|
+
const searchLine = searchLines[j].trim();
|
|
116
|
+
const maxLen = Math.max(originalLine.length, searchLine.length);
|
|
117
|
+
if (maxLen === 0) {
|
|
118
|
+
continue;
|
|
119
|
+
}
|
|
120
|
+
const distance = levenshtein(originalLine, searchLine);
|
|
121
|
+
similarity += (1 - distance / maxLen) / linesToCheck;
|
|
122
|
+
// Exit early when threshold is reached
|
|
123
|
+
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
|
124
|
+
break;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
else {
|
|
129
|
+
// No middle lines to compare, just accept based on anchors
|
|
130
|
+
similarity = 1.0;
|
|
131
|
+
}
|
|
132
|
+
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
|
133
|
+
let matchStartIndex = 0;
|
|
134
|
+
for (let k = 0; k < startLine; k++) {
|
|
135
|
+
matchStartIndex += originalLines[k].length + 1;
|
|
136
|
+
}
|
|
137
|
+
let matchEndIndex = matchStartIndex;
|
|
138
|
+
for (let k = startLine; k <= endLine; k++) {
|
|
139
|
+
matchEndIndex += originalLines[k].length;
|
|
140
|
+
if (k < endLine) {
|
|
141
|
+
matchEndIndex += 1; // Add newline character except for the last line
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
yield content.substring(matchStartIndex, matchEndIndex);
|
|
145
|
+
}
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
// Calculate similarity for multiple candidates
|
|
149
|
+
let bestMatch = null;
|
|
150
|
+
let maxSimilarity = -1;
|
|
151
|
+
for (const candidate of candidates) {
|
|
152
|
+
const { startLine, endLine } = candidate;
|
|
153
|
+
const actualBlockSize = endLine - startLine + 1;
|
|
154
|
+
let similarity = 0;
|
|
155
|
+
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2); // Middle lines only
|
|
156
|
+
if (linesToCheck > 0) {
|
|
157
|
+
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
|
158
|
+
const originalLine = originalLines[startLine + j].trim();
|
|
159
|
+
const searchLine = searchLines[j].trim();
|
|
160
|
+
const maxLen = Math.max(originalLine.length, searchLine.length);
|
|
161
|
+
if (maxLen === 0) {
|
|
162
|
+
continue;
|
|
163
|
+
}
|
|
164
|
+
const distance = levenshtein(originalLine, searchLine);
|
|
165
|
+
similarity += 1 - distance / maxLen;
|
|
166
|
+
}
|
|
167
|
+
similarity /= linesToCheck; // Average similarity
|
|
168
|
+
}
|
|
169
|
+
else {
|
|
170
|
+
// No middle lines to compare, just accept based on anchors
|
|
171
|
+
similarity = 1.0;
|
|
172
|
+
}
|
|
173
|
+
if (similarity > maxSimilarity) {
|
|
174
|
+
maxSimilarity = similarity;
|
|
175
|
+
bestMatch = candidate;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
// Threshold judgment
|
|
179
|
+
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
|
180
|
+
const { startLine, endLine } = bestMatch;
|
|
181
|
+
let matchStartIndex = 0;
|
|
182
|
+
for (let k = 0; k < startLine; k++) {
|
|
183
|
+
matchStartIndex += originalLines[k].length + 1;
|
|
184
|
+
}
|
|
185
|
+
let matchEndIndex = matchStartIndex;
|
|
186
|
+
for (let k = startLine; k <= endLine; k++) {
|
|
187
|
+
matchEndIndex += originalLines[k].length;
|
|
188
|
+
if (k < endLine) {
|
|
189
|
+
matchEndIndex += 1;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
yield content.substring(matchStartIndex, matchEndIndex);
|
|
193
|
+
}
|
|
194
|
+
};
|
|
195
|
+
exports.BlockAnchorReplacer = BlockAnchorReplacer;
|
|
196
|
+
const WhitespaceNormalizedReplacer = function* (content, find) {
|
|
197
|
+
const normalizeWhitespace = (text) => text.replace(/\s+/g, ' ').trim();
|
|
198
|
+
const normalizedFind = normalizeWhitespace(find);
|
|
199
|
+
// Handle single line matches
|
|
200
|
+
const lines = content.split('\n');
|
|
201
|
+
for (let i = 0; i < lines.length; i++) {
|
|
202
|
+
const line = lines[i];
|
|
203
|
+
if (normalizeWhitespace(line) === normalizedFind) {
|
|
204
|
+
yield line;
|
|
205
|
+
}
|
|
206
|
+
else {
|
|
207
|
+
// Only check for substring matches if the full line doesn't match
|
|
208
|
+
const normalizedLine = normalizeWhitespace(line);
|
|
209
|
+
if (normalizedLine.includes(normalizedFind)) {
|
|
210
|
+
// Find the actual substring in the original line that matches
|
|
211
|
+
const words = find.trim().split(/\s+/);
|
|
212
|
+
if (words.length > 0) {
|
|
213
|
+
const pattern = words
|
|
214
|
+
.map(word => word.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'))
|
|
215
|
+
.join('\\s+');
|
|
216
|
+
try {
|
|
217
|
+
const regex = new RegExp(pattern);
|
|
218
|
+
const match = line.match(regex);
|
|
219
|
+
if (match) {
|
|
220
|
+
yield match[0];
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
catch {
|
|
224
|
+
// Invalid regex pattern, skip
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
// Handle multi-line matches
|
|
231
|
+
const findLines = find.split('\n');
|
|
232
|
+
if (findLines.length > 1) {
|
|
233
|
+
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
|
234
|
+
const block = lines.slice(i, i + findLines.length);
|
|
235
|
+
if (normalizeWhitespace(block.join('\n')) === normalizedFind) {
|
|
236
|
+
yield block.join('\n');
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
};
|
|
241
|
+
exports.WhitespaceNormalizedReplacer = WhitespaceNormalizedReplacer;
|
|
242
|
+
const IndentationFlexibleReplacer = function* (content, find) {
|
|
243
|
+
const removeIndentation = (text) => {
|
|
244
|
+
const lines = text.split('\n');
|
|
245
|
+
const nonEmptyLines = lines.filter(line => line.trim().length > 0);
|
|
246
|
+
if (nonEmptyLines.length === 0)
|
|
247
|
+
return text;
|
|
248
|
+
const minIndent = Math.min(...nonEmptyLines.map(line => {
|
|
249
|
+
const match = line.match(/^(\s*)/);
|
|
250
|
+
return match ? match[1].length : 0;
|
|
251
|
+
}));
|
|
252
|
+
return lines.map(line => (line.trim().length === 0 ? line : line.slice(minIndent))).join('\n');
|
|
253
|
+
};
|
|
254
|
+
const normalizedFind = removeIndentation(find);
|
|
255
|
+
const contentLines = content.split('\n');
|
|
256
|
+
const findLines = find.split('\n');
|
|
257
|
+
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
|
258
|
+
const block = contentLines.slice(i, i + findLines.length).join('\n');
|
|
259
|
+
if (removeIndentation(block) === normalizedFind) {
|
|
260
|
+
yield block;
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
};
|
|
264
|
+
exports.IndentationFlexibleReplacer = IndentationFlexibleReplacer;
|
|
265
|
+
const EscapeNormalizedReplacer = function* (content, find) {
|
|
266
|
+
const unescapeString = (str) => {
|
|
267
|
+
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
|
268
|
+
switch (capturedChar) {
|
|
269
|
+
case 'n':
|
|
270
|
+
return '\n';
|
|
271
|
+
case 't':
|
|
272
|
+
return '\t';
|
|
273
|
+
case 'r':
|
|
274
|
+
return '\r';
|
|
275
|
+
case "'":
|
|
276
|
+
return "'";
|
|
277
|
+
case '"':
|
|
278
|
+
return '"';
|
|
279
|
+
case '`':
|
|
280
|
+
return '`';
|
|
281
|
+
case '\\':
|
|
282
|
+
return '\\';
|
|
283
|
+
case '\n':
|
|
284
|
+
return '\n';
|
|
285
|
+
case '$':
|
|
286
|
+
return '$';
|
|
287
|
+
default:
|
|
288
|
+
return match;
|
|
289
|
+
}
|
|
290
|
+
});
|
|
291
|
+
};
|
|
292
|
+
const unescapedFind = unescapeString(find);
|
|
293
|
+
// Try direct match with unescaped find string
|
|
294
|
+
if (content.includes(unescapedFind)) {
|
|
295
|
+
yield unescapedFind;
|
|
296
|
+
}
|
|
297
|
+
// Also try finding escaped versions in content that match unescaped find
|
|
298
|
+
const lines = content.split('\n');
|
|
299
|
+
const findLines = unescapedFind.split('\n');
|
|
300
|
+
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
|
301
|
+
const block = lines.slice(i, i + findLines.length).join('\n');
|
|
302
|
+
const unescapedBlock = unescapeString(block);
|
|
303
|
+
if (unescapedBlock === unescapedFind) {
|
|
304
|
+
yield block;
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
};
|
|
308
|
+
exports.EscapeNormalizedReplacer = EscapeNormalizedReplacer;
|
|
309
|
+
const MultiOccurrenceReplacer = function* (content, find) {
|
|
310
|
+
// This replacer yields all exact matches, allowing the replace function
|
|
311
|
+
// to handle multiple occurrences based on replaceAll parameter
|
|
312
|
+
let startIndex = 0;
|
|
313
|
+
while (true) {
|
|
314
|
+
const index = content.indexOf(find, startIndex);
|
|
315
|
+
if (index === -1)
|
|
316
|
+
break;
|
|
317
|
+
yield find;
|
|
318
|
+
startIndex = index + find.length;
|
|
319
|
+
}
|
|
320
|
+
};
|
|
321
|
+
exports.MultiOccurrenceReplacer = MultiOccurrenceReplacer;
|
|
322
|
+
const TrimmedBoundaryReplacer = function* (content, find) {
|
|
323
|
+
const trimmedFind = find.trim();
|
|
324
|
+
if (trimmedFind === find) {
|
|
325
|
+
// Already trimmed, no point in trying
|
|
326
|
+
return;
|
|
327
|
+
}
|
|
328
|
+
// Try to find the trimmed version
|
|
329
|
+
if (content.includes(trimmedFind)) {
|
|
330
|
+
yield trimmedFind;
|
|
331
|
+
}
|
|
332
|
+
// Also try finding blocks where trimmed content matches
|
|
333
|
+
const lines = content.split('\n');
|
|
334
|
+
const findLines = find.split('\n');
|
|
335
|
+
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
|
336
|
+
const block = lines.slice(i, i + findLines.length).join('\n');
|
|
337
|
+
if (block.trim() === trimmedFind) {
|
|
338
|
+
yield block;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
};
|
|
342
|
+
exports.TrimmedBoundaryReplacer = TrimmedBoundaryReplacer;
|
|
343
|
+
const ContextAwareReplacer = function* (content, find) {
|
|
344
|
+
const findLines = find.split('\n');
|
|
345
|
+
if (findLines.length < 3) {
|
|
346
|
+
// Need at least 3 lines to have meaningful context
|
|
347
|
+
return;
|
|
348
|
+
}
|
|
349
|
+
// Remove trailing empty line if present
|
|
350
|
+
if (findLines[findLines.length - 1] === '') {
|
|
351
|
+
findLines.pop();
|
|
352
|
+
}
|
|
353
|
+
const contentLines = content.split('\n');
|
|
354
|
+
// Extract first and last lines as context anchors
|
|
355
|
+
const firstLine = findLines[0].trim();
|
|
356
|
+
const lastLine = findLines[findLines.length - 1].trim();
|
|
357
|
+
// Find blocks that start and end with the context anchors
|
|
358
|
+
for (let i = 0; i < contentLines.length; i++) {
|
|
359
|
+
if (contentLines[i].trim() !== firstLine)
|
|
360
|
+
continue;
|
|
361
|
+
// Look for the matching last line
|
|
362
|
+
for (let j = i + 2; j < contentLines.length; j++) {
|
|
363
|
+
if (contentLines[j].trim() === lastLine) {
|
|
364
|
+
// Found a potential context block
|
|
365
|
+
const blockLines = contentLines.slice(i, j + 1);
|
|
366
|
+
const block = blockLines.join('\n');
|
|
367
|
+
// Check if the middle content has reasonable similarity
|
|
368
|
+
// (simple heuristic: at least 50% of non-empty lines should match when trimmed)
|
|
369
|
+
if (blockLines.length === findLines.length) {
|
|
370
|
+
let matchingLines = 0;
|
|
371
|
+
let totalNonEmptyLines = 0;
|
|
372
|
+
for (let k = 1; k < blockLines.length - 1; k++) {
|
|
373
|
+
const blockLine = blockLines[k].trim();
|
|
374
|
+
const findLine = findLines[k].trim();
|
|
375
|
+
if (blockLine.length > 0 || findLine.length > 0) {
|
|
376
|
+
totalNonEmptyLines++;
|
|
377
|
+
if (blockLine === findLine) {
|
|
378
|
+
matchingLines++;
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
|
383
|
+
yield block;
|
|
384
|
+
break; // Only match the first occurrence
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
break;
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
};
|
|
392
|
+
exports.ContextAwareReplacer = ContextAwareReplacer;
|
|
393
|
+
function trimDiff(diff) {
|
|
394
|
+
const lines = diff.split('\n');
|
|
395
|
+
const contentLines = lines.filter(line => (line.startsWith('+') || line.startsWith('-') || line.startsWith(' ')) &&
|
|
396
|
+
!line.startsWith('---') &&
|
|
397
|
+
!line.startsWith('+++'));
|
|
398
|
+
if (contentLines.length === 0)
|
|
399
|
+
return diff;
|
|
400
|
+
let min = Infinity;
|
|
401
|
+
for (const line of contentLines) {
|
|
402
|
+
const content = line.slice(1);
|
|
403
|
+
if (content.trim().length > 0) {
|
|
404
|
+
const match = content.match(/^(\s*)/);
|
|
405
|
+
if (match)
|
|
406
|
+
min = Math.min(min, match[1].length);
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
if (min === Infinity || min === 0)
|
|
410
|
+
return diff;
|
|
411
|
+
const trimmedLines = lines.map(line => {
|
|
412
|
+
if ((line.startsWith('+') || line.startsWith('-') || line.startsWith(' ')) &&
|
|
413
|
+
!line.startsWith('---') &&
|
|
414
|
+
!line.startsWith('+++')) {
|
|
415
|
+
const prefix = line[0];
|
|
416
|
+
const content = line.slice(1);
|
|
417
|
+
return prefix + content.slice(min);
|
|
418
|
+
}
|
|
419
|
+
return line;
|
|
420
|
+
});
|
|
421
|
+
return trimmedLines.join('\n');
|
|
422
|
+
}
|
|
423
|
+
function replace(content, oldString, newString, replaceAll = false) {
|
|
424
|
+
if (oldString === newString) {
|
|
425
|
+
throw new Error('No changes to apply: oldString and newString are identical.');
|
|
426
|
+
}
|
|
427
|
+
let notFound = true;
|
|
428
|
+
for (const replacer of [
|
|
429
|
+
exports.SimpleReplacer,
|
|
430
|
+
exports.LineTrimmedReplacer,
|
|
431
|
+
exports.BlockAnchorReplacer,
|
|
432
|
+
exports.WhitespaceNormalizedReplacer,
|
|
433
|
+
exports.IndentationFlexibleReplacer,
|
|
434
|
+
exports.EscapeNormalizedReplacer,
|
|
435
|
+
exports.TrimmedBoundaryReplacer,
|
|
436
|
+
exports.ContextAwareReplacer,
|
|
437
|
+
exports.MultiOccurrenceReplacer
|
|
438
|
+
]) {
|
|
439
|
+
for (const search of replacer(content, oldString)) {
|
|
440
|
+
const index = content.indexOf(search);
|
|
441
|
+
if (index === -1)
|
|
442
|
+
continue;
|
|
443
|
+
notFound = false;
|
|
444
|
+
if (replaceAll) {
|
|
445
|
+
return content.replaceAll(search, newString);
|
|
446
|
+
}
|
|
447
|
+
const lastIndex = content.lastIndexOf(search);
|
|
448
|
+
if (index !== lastIndex)
|
|
449
|
+
continue;
|
|
450
|
+
return content.substring(0, index) + newString + content.substring(index + search.length);
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
if (notFound) {
|
|
454
|
+
throw new Error('Could not find oldString in the file. It must match exactly, including whitespace, indentation, and line endings.');
|
|
455
|
+
}
|
|
456
|
+
throw new Error('Found multiple matches for oldString. Provide more surrounding context to make the match unique.');
|
|
457
|
+
}
|
|
458
|
+
function registerEditTool(server) {
|
|
459
|
+
server.registerTool('edit', {
|
|
460
|
+
description: edit_txt_1.default,
|
|
461
|
+
inputSchema: zod_1.default.object({
|
|
462
|
+
filePath: zod_1.default.string().describe('The absolute path to the file to modify'),
|
|
463
|
+
oldString: zod_1.default.string().describe('The text to replace'),
|
|
464
|
+
newString: zod_1.default
|
|
465
|
+
.string()
|
|
466
|
+
.describe('The text to replace it with (must be different from oldString)'),
|
|
467
|
+
replaceAll: zod_1.default
|
|
468
|
+
.boolean()
|
|
469
|
+
.optional()
|
|
470
|
+
.describe('Replace all occurrences of oldString (default false)'),
|
|
471
|
+
toolInfo: mcp_helpers_1.Tool.Info
|
|
472
|
+
})
|
|
473
|
+
}, async (args) => {
|
|
474
|
+
if (!args.filePath) {
|
|
475
|
+
throw new Error('filePath is required');
|
|
476
|
+
}
|
|
477
|
+
if (args.oldString === args.newString) {
|
|
478
|
+
throw new Error('No changes to apply: oldString and newString are identical.');
|
|
479
|
+
}
|
|
480
|
+
const filePath = path_1.default.isAbsolute(args.filePath)
|
|
481
|
+
? args.filePath
|
|
482
|
+
: path_1.default.join(mcp_helpers_1.MCP_WORKSPACE_DIR, args.filePath);
|
|
483
|
+
// await assertExternalDirectory(ctx, filePath)
|
|
484
|
+
let diff = '';
|
|
485
|
+
let contentOld = '';
|
|
486
|
+
let contentNew = '';
|
|
487
|
+
const sessionID = args.toolInfo.sessionId;
|
|
488
|
+
await mcp_helpers_1.FileTime.withLock(filePath, async () => {
|
|
489
|
+
if (args.oldString === '') {
|
|
490
|
+
// const existed = await Filesystem.exists(filePath)
|
|
491
|
+
contentNew = args.newString;
|
|
492
|
+
diff = trimDiff((0, diff_1.createTwoFilesPatch)(filePath, filePath, contentOld, contentNew));
|
|
493
|
+
// await ctx.ask({
|
|
494
|
+
// permission: 'edit',
|
|
495
|
+
// patterns: [path.relative(Instance.worktree, filePath)],
|
|
496
|
+
// always: ['*'],
|
|
497
|
+
// metadata: {
|
|
498
|
+
// filepath: filePath,
|
|
499
|
+
// diff
|
|
500
|
+
// }
|
|
501
|
+
// })
|
|
502
|
+
await mcp_helpers_1.Filesystem.write(filePath, args.newString);
|
|
503
|
+
// await Bus.publish(File.Event.Edited, {
|
|
504
|
+
// file: filePath
|
|
505
|
+
// })
|
|
506
|
+
// await Bus.publish(FileWatcher.Event.Updated, {
|
|
507
|
+
// file: filePath,
|
|
508
|
+
// event: existed ? 'change' : 'add'
|
|
509
|
+
// })
|
|
510
|
+
mcp_helpers_1.FileTime.read(sessionID, filePath);
|
|
511
|
+
return;
|
|
512
|
+
}
|
|
513
|
+
const stats = await mcp_helpers_1.Filesystem.stat(filePath);
|
|
514
|
+
if (!stats)
|
|
515
|
+
throw new Error(`File ${filePath} not found`);
|
|
516
|
+
if (stats.isDirectory())
|
|
517
|
+
throw new Error(`Path is a directory, not a file: ${filePath}`);
|
|
518
|
+
await mcp_helpers_1.FileTime.assert(sessionID, filePath);
|
|
519
|
+
contentOld = await mcp_helpers_1.Filesystem.readText(filePath);
|
|
520
|
+
contentNew = replace(contentOld, args.oldString, args.newString, args.replaceAll);
|
|
521
|
+
diff = trimDiff((0, diff_1.createTwoFilesPatch)(filePath, filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)));
|
|
522
|
+
// await ctx.ask({
|
|
523
|
+
// permission: 'edit',
|
|
524
|
+
// patterns: [path.relative(Instance.worktree, filePath)],
|
|
525
|
+
// always: ['*'],
|
|
526
|
+
// metadata: {
|
|
527
|
+
// filepath: filePath,
|
|
528
|
+
// diff
|
|
529
|
+
// }
|
|
530
|
+
// })
|
|
531
|
+
await mcp_helpers_1.Filesystem.write(filePath, contentNew);
|
|
532
|
+
// await Bus.publish(File.Event.Edited, {
|
|
533
|
+
// file: filePath
|
|
534
|
+
// })
|
|
535
|
+
// await Bus.publish(FileWatcher.Event.Updated, {
|
|
536
|
+
// file: filePath,
|
|
537
|
+
// event: 'change'
|
|
538
|
+
// })
|
|
539
|
+
contentNew = await mcp_helpers_1.Filesystem.readText(filePath);
|
|
540
|
+
diff = trimDiff((0, diff_1.createTwoFilesPatch)(filePath, filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)));
|
|
541
|
+
mcp_helpers_1.FileTime.read(sessionID, filePath);
|
|
542
|
+
});
|
|
543
|
+
const filediff = {
|
|
544
|
+
file: filePath,
|
|
545
|
+
before: contentOld,
|
|
546
|
+
after: contentNew,
|
|
547
|
+
additions: 0,
|
|
548
|
+
deletions: 0
|
|
549
|
+
};
|
|
550
|
+
for (const change of (0, diff_1.diffLines)(contentOld, contentNew)) {
|
|
551
|
+
if (change.added)
|
|
552
|
+
filediff.additions += change.count || 0;
|
|
553
|
+
if (change.removed)
|
|
554
|
+
filediff.deletions += change.count || 0;
|
|
555
|
+
}
|
|
556
|
+
// ctx.metadata({
|
|
557
|
+
// metadata: {
|
|
558
|
+
// diff,
|
|
559
|
+
// filediff,
|
|
560
|
+
// diagnostics: {}
|
|
561
|
+
// }
|
|
562
|
+
// })
|
|
563
|
+
const output = 'Edit applied successfully.';
|
|
564
|
+
// await LSP.touchFile(filePath, true)
|
|
565
|
+
// const diagnostics = await LSP.diagnostics()
|
|
566
|
+
// const normalizedFilePath = Filesystem.normalizePath(filePath)
|
|
567
|
+
// const issues = diagnostics[normalizedFilePath] ?? []
|
|
568
|
+
// const errors = issues.filter(item => item.severity === 1)
|
|
569
|
+
// if (errors.length > 0) {
|
|
570
|
+
// const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
|
|
571
|
+
// const suffix =
|
|
572
|
+
// errors.length > MAX_DIAGNOSTICS_PER_FILE
|
|
573
|
+
// ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more`
|
|
574
|
+
// : ''
|
|
575
|
+
// output += `\n\nLSP errors detected in this file, please fix:\n<diagnostics file="${filePath}">\n${limited.map(LSP.Diagnostic.pretty).join('\n')}${suffix}\n</diagnostics>`
|
|
576
|
+
// }
|
|
577
|
+
const result = {
|
|
578
|
+
metadata: {
|
|
579
|
+
// diagnostics,
|
|
580
|
+
diff,
|
|
581
|
+
filediff
|
|
582
|
+
},
|
|
583
|
+
title: `${path_1.default.relative(mcp_helpers_1.MCP_WORKSPACE_DIR, filePath)}`,
|
|
584
|
+
output
|
|
585
|
+
};
|
|
586
|
+
return {
|
|
587
|
+
content: [{ type: 'text', text: result.output }],
|
|
588
|
+
structuredContent: result
|
|
589
|
+
};
|
|
590
|
+
});
|
|
591
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
"use strict";
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
|
|
5
|
+
const mcp_js_1 = require("@modelcontextprotocol/sdk/server/mcp.js");
|
|
6
|
+
const edit_1 = require("./edit");
|
|
7
|
+
const read_1 = require("./read");
|
|
8
|
+
const write_1 = require("./write");
|
|
9
|
+
const MCP_NAME = 'Filesystem MCP';
|
|
10
|
+
const MCP_VERSION = '0.0.1';
|
|
11
|
+
const server = new mcp_js_1.McpServer({
|
|
12
|
+
name: MCP_NAME,
|
|
13
|
+
version: MCP_VERSION
|
|
14
|
+
}, {
|
|
15
|
+
capabilities: {
|
|
16
|
+
logging: {}
|
|
17
|
+
}
|
|
18
|
+
});
|
|
19
|
+
(0, read_1.registerReadTool)(server);
|
|
20
|
+
(0, edit_1.registerEditTool)(server);
|
|
21
|
+
(0, write_1.registerWriteTool)(server);
|
|
22
|
+
async function main() {
|
|
23
|
+
const transport = new stdio_js_1.StdioServerTransport();
|
|
24
|
+
await server.connect(transport);
|
|
25
|
+
console.error(`${MCP_NAME} Server v${MCP_VERSION} running`);
|
|
26
|
+
}
|
|
27
|
+
main().catch(error => {
|
|
28
|
+
console.error('Fatal error in main():', error);
|
|
29
|
+
process.exit(1);
|
|
30
|
+
});
|
package/dist/read.d.ts
ADDED
package/dist/read.js
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.registerReadTool = registerReadTool;
|
|
40
|
+
const edit_txt_1 = __importDefault(require("../descriptions/edit.txt"));
|
|
41
|
+
const zod_1 = __importDefault(require("zod"));
|
|
42
|
+
const path_1 = __importDefault(require("path"));
|
|
43
|
+
const mcp_helpers_1 = require("@baitong-dev/mcp-helpers");
|
|
44
|
+
const fs_1 = require("fs");
|
|
45
|
+
const fs = __importStar(require("fs/promises"));
|
|
46
|
+
const readline_1 = require("readline");
|
|
47
|
+
const DEFAULT_READ_LIMIT = 2000;
|
|
48
|
+
const MAX_LINE_LENGTH = 2000;
|
|
49
|
+
const MAX_LINE_SUFFIX = `... (line truncated to ${MAX_LINE_LENGTH} chars)`;
|
|
50
|
+
const MAX_BYTES = 50 * 1024;
|
|
51
|
+
const MAX_BYTES_LABEL = `${MAX_BYTES / 1024} KB`;
|
|
52
|
+
async function isBinaryFile(filepath, fileSize) {
|
|
53
|
+
const ext = path_1.default.extname(filepath).toLowerCase();
|
|
54
|
+
// binary check for common non-text extensions
|
|
55
|
+
switch (ext) {
|
|
56
|
+
case '.zip':
|
|
57
|
+
case '.tar':
|
|
58
|
+
case '.gz':
|
|
59
|
+
case '.exe':
|
|
60
|
+
case '.dll':
|
|
61
|
+
case '.so':
|
|
62
|
+
case '.class':
|
|
63
|
+
case '.jar':
|
|
64
|
+
case '.war':
|
|
65
|
+
case '.7z':
|
|
66
|
+
case '.doc':
|
|
67
|
+
case '.docx':
|
|
68
|
+
case '.xls':
|
|
69
|
+
case '.xlsx':
|
|
70
|
+
case '.ppt':
|
|
71
|
+
case '.pptx':
|
|
72
|
+
case '.odt':
|
|
73
|
+
case '.ods':
|
|
74
|
+
case '.odp':
|
|
75
|
+
case '.bin':
|
|
76
|
+
case '.dat':
|
|
77
|
+
case '.obj':
|
|
78
|
+
case '.o':
|
|
79
|
+
case '.a':
|
|
80
|
+
case '.lib':
|
|
81
|
+
case '.wasm':
|
|
82
|
+
case '.pyc':
|
|
83
|
+
case '.pyo':
|
|
84
|
+
return true;
|
|
85
|
+
default:
|
|
86
|
+
break;
|
|
87
|
+
}
|
|
88
|
+
if (fileSize === 0)
|
|
89
|
+
return false;
|
|
90
|
+
const fh = await fs.open(filepath, 'r');
|
|
91
|
+
try {
|
|
92
|
+
const sampleSize = Math.min(4096, fileSize);
|
|
93
|
+
const bytes = Buffer.alloc(sampleSize);
|
|
94
|
+
const result = await fh.read(bytes, 0, sampleSize, 0);
|
|
95
|
+
if (result.bytesRead === 0)
|
|
96
|
+
return false;
|
|
97
|
+
let nonPrintableCount = 0;
|
|
98
|
+
for (let i = 0; i < result.bytesRead; i++) {
|
|
99
|
+
if (bytes[i] === 0)
|
|
100
|
+
return true;
|
|
101
|
+
if (bytes[i] < 9 || (bytes[i] > 13 && bytes[i] < 32)) {
|
|
102
|
+
nonPrintableCount++;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
// If >30% non-printable characters, consider it binary
|
|
106
|
+
return nonPrintableCount / result.bytesRead > 0.3;
|
|
107
|
+
}
|
|
108
|
+
finally {
|
|
109
|
+
await fh.close();
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
function registerReadTool(server) {
|
|
113
|
+
server.registerTool('read', {
|
|
114
|
+
description: edit_txt_1.default,
|
|
115
|
+
inputSchema: zod_1.default.object({
|
|
116
|
+
filePath: zod_1.default.string().describe('The absolute path to the file or directory to read'),
|
|
117
|
+
offset: zod_1.default.coerce
|
|
118
|
+
.number()
|
|
119
|
+
.describe('The line number to start reading from (1-indexed)')
|
|
120
|
+
.optional(),
|
|
121
|
+
limit: zod_1.default.coerce
|
|
122
|
+
.number()
|
|
123
|
+
.describe('The maximum number of lines to read (defaults to 2000)')
|
|
124
|
+
.optional(),
|
|
125
|
+
toolInfo: mcp_helpers_1.Tool.Info
|
|
126
|
+
})
|
|
127
|
+
}, async (args) => {
|
|
128
|
+
if (args.offset !== undefined && args.offset < 1) {
|
|
129
|
+
throw new Error('offset must be greater than or equal to 1');
|
|
130
|
+
}
|
|
131
|
+
const sessionID = args.toolInfo.sessionId;
|
|
132
|
+
let filepath = args.filePath;
|
|
133
|
+
if (!path_1.default.isAbsolute(filepath)) {
|
|
134
|
+
filepath = path_1.default.resolve(mcp_helpers_1.MCP_WORKSPACE_DIR, filepath);
|
|
135
|
+
}
|
|
136
|
+
const title = path_1.default.relative(mcp_helpers_1.MCP_WORKSPACE_DIR, filepath);
|
|
137
|
+
const stat = await mcp_helpers_1.Filesystem.stat(filepath);
|
|
138
|
+
// await assertExternalDirectory(ctx, filepath, {
|
|
139
|
+
// bypass: Boolean(ctx.extra?.['bypassCwdCheck']),
|
|
140
|
+
// kind: stat?.isDirectory() ? 'directory' : 'file'
|
|
141
|
+
// })
|
|
142
|
+
// await ctx.ask({
|
|
143
|
+
// permission: 'read',
|
|
144
|
+
// patterns: [filepath],
|
|
145
|
+
// always: ['*'],
|
|
146
|
+
// metadata: {}
|
|
147
|
+
// })
|
|
148
|
+
if (!stat) {
|
|
149
|
+
const dir = path_1.default.dirname(filepath);
|
|
150
|
+
const base = path_1.default.basename(filepath);
|
|
151
|
+
const suggestions = await fs
|
|
152
|
+
.readdir(dir)
|
|
153
|
+
.then(entries => entries
|
|
154
|
+
.filter(entry => entry.toLowerCase().includes(base.toLowerCase()) ||
|
|
155
|
+
base.toLowerCase().includes(entry.toLowerCase()))
|
|
156
|
+
.map(entry => path_1.default.join(dir, entry))
|
|
157
|
+
.slice(0, 3))
|
|
158
|
+
.catch(() => []);
|
|
159
|
+
if (suggestions.length > 0) {
|
|
160
|
+
throw new Error(`File not found: ${filepath}\n\nDid you mean one of these?\n${suggestions.join('\n')}`);
|
|
161
|
+
}
|
|
162
|
+
throw new Error(`File not found: ${filepath}`);
|
|
163
|
+
}
|
|
164
|
+
if (stat.isDirectory()) {
|
|
165
|
+
const dirents = await fs.readdir(filepath, { withFileTypes: true });
|
|
166
|
+
const entries = await Promise.all(dirents.map(async (dirent) => {
|
|
167
|
+
if (dirent.isDirectory())
|
|
168
|
+
return dirent.name + '/';
|
|
169
|
+
if (dirent.isSymbolicLink()) {
|
|
170
|
+
const target = await fs.stat(path_1.default.join(filepath, dirent.name)).catch(() => undefined);
|
|
171
|
+
if (target?.isDirectory())
|
|
172
|
+
return dirent.name + '/';
|
|
173
|
+
}
|
|
174
|
+
return dirent.name;
|
|
175
|
+
}));
|
|
176
|
+
entries.sort((a, b) => a.localeCompare(b));
|
|
177
|
+
const limit = args.limit ?? DEFAULT_READ_LIMIT;
|
|
178
|
+
const offset = args.offset ?? 1;
|
|
179
|
+
const start = offset - 1;
|
|
180
|
+
const sliced = entries.slice(start, start + limit);
|
|
181
|
+
const truncated = start + sliced.length < entries.length;
|
|
182
|
+
const output = [
|
|
183
|
+
`<path>${filepath}</path>`,
|
|
184
|
+
`<type>directory</type>`,
|
|
185
|
+
`<entries>`,
|
|
186
|
+
sliced.join('\n'),
|
|
187
|
+
truncated
|
|
188
|
+
? `\n(Showing ${sliced.length} of ${entries.length} entries. Use 'offset' parameter to read beyond entry ${offset + sliced.length})`
|
|
189
|
+
: `\n(${entries.length} entries)`,
|
|
190
|
+
`</entries>`
|
|
191
|
+
].join('\n');
|
|
192
|
+
const result = {
|
|
193
|
+
title,
|
|
194
|
+
output,
|
|
195
|
+
metadata: {
|
|
196
|
+
preview: sliced.slice(0, 20).join('\n'),
|
|
197
|
+
truncated,
|
|
198
|
+
loaded: []
|
|
199
|
+
}
|
|
200
|
+
};
|
|
201
|
+
return {
|
|
202
|
+
content: [{ type: 'text', text: result.output }],
|
|
203
|
+
structuredContent: result
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
const isBinary = await isBinaryFile(filepath, Number(stat.size));
|
|
207
|
+
if (isBinary)
|
|
208
|
+
throw new Error(`Cannot read binary file: ${filepath}`);
|
|
209
|
+
const stream = (0, fs_1.createReadStream)(filepath, { encoding: 'utf8' });
|
|
210
|
+
const rl = (0, readline_1.createInterface)({
|
|
211
|
+
input: stream,
|
|
212
|
+
// Note: we use the crlfDelay option to recognize all instances of CR LF
|
|
213
|
+
// ('\r\n') in file as a single line break.
|
|
214
|
+
crlfDelay: Infinity
|
|
215
|
+
});
|
|
216
|
+
const limit = args.limit ?? DEFAULT_READ_LIMIT;
|
|
217
|
+
const offset = args.offset ?? 1;
|
|
218
|
+
const start = offset - 1;
|
|
219
|
+
const raw = [];
|
|
220
|
+
let bytes = 0;
|
|
221
|
+
let lines = 0;
|
|
222
|
+
let truncatedByBytes = false;
|
|
223
|
+
let hasMoreLines = false;
|
|
224
|
+
try {
|
|
225
|
+
for await (const text of rl) {
|
|
226
|
+
lines += 1;
|
|
227
|
+
if (lines <= start)
|
|
228
|
+
continue;
|
|
229
|
+
if (raw.length >= limit) {
|
|
230
|
+
hasMoreLines = true;
|
|
231
|
+
continue;
|
|
232
|
+
}
|
|
233
|
+
const line = text.length > MAX_LINE_LENGTH
|
|
234
|
+
? text.substring(0, MAX_LINE_LENGTH) + MAX_LINE_SUFFIX
|
|
235
|
+
: text;
|
|
236
|
+
const size = Buffer.byteLength(line, 'utf-8') + (raw.length > 0 ? 1 : 0);
|
|
237
|
+
if (bytes + size > MAX_BYTES) {
|
|
238
|
+
truncatedByBytes = true;
|
|
239
|
+
hasMoreLines = true;
|
|
240
|
+
break;
|
|
241
|
+
}
|
|
242
|
+
raw.push(line);
|
|
243
|
+
bytes += size;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
finally {
|
|
247
|
+
rl.close();
|
|
248
|
+
stream.destroy();
|
|
249
|
+
}
|
|
250
|
+
if (lines < offset && !(lines === 0 && offset === 1)) {
|
|
251
|
+
throw new Error(`Offset ${offset} is out of range for this file (${lines} lines)`);
|
|
252
|
+
}
|
|
253
|
+
const content = raw.map((line, index) => {
|
|
254
|
+
return `${index + offset}: ${line}`;
|
|
255
|
+
});
|
|
256
|
+
const preview = raw.slice(0, 20).join('\n');
|
|
257
|
+
let output = [`<path>${filepath}</path>`, `<type>file</type>`, '<content>'].join('\n');
|
|
258
|
+
output += content.join('\n');
|
|
259
|
+
const totalLines = lines;
|
|
260
|
+
const lastReadLine = offset + raw.length - 1;
|
|
261
|
+
const nextOffset = lastReadLine + 1;
|
|
262
|
+
const truncated = hasMoreLines || truncatedByBytes;
|
|
263
|
+
if (truncatedByBytes) {
|
|
264
|
+
output += `\n\n(Output capped at ${MAX_BYTES_LABEL}. Showing lines ${offset}-${lastReadLine}. Use offset=${nextOffset} to continue.)`;
|
|
265
|
+
}
|
|
266
|
+
else if (hasMoreLines) {
|
|
267
|
+
output += `\n\n(Showing lines ${offset}-${lastReadLine} of ${totalLines}. Use offset=${nextOffset} to continue.)`;
|
|
268
|
+
}
|
|
269
|
+
else {
|
|
270
|
+
output += `\n\n(End of file - total ${totalLines} lines)`;
|
|
271
|
+
}
|
|
272
|
+
output += '\n</content>';
|
|
273
|
+
// just warms the lsp client
|
|
274
|
+
// LSP.touchFile(filepath, false)
|
|
275
|
+
mcp_helpers_1.FileTime.read(sessionID, filepath);
|
|
276
|
+
// if (args.instructions.length > 0) {
|
|
277
|
+
// output += `\n\n<system-reminder>\n${args.instructions.map(i => i.content).join('\n\n')}\n</system-reminder>`
|
|
278
|
+
// }
|
|
279
|
+
const result = {
|
|
280
|
+
title,
|
|
281
|
+
output,
|
|
282
|
+
metadata: {
|
|
283
|
+
preview,
|
|
284
|
+
truncated
|
|
285
|
+
// loaded: instructions.map(i => i.filepath)
|
|
286
|
+
}
|
|
287
|
+
};
|
|
288
|
+
return {
|
|
289
|
+
content: [{ type: 'text', text: result.output }],
|
|
290
|
+
structuredContent: result
|
|
291
|
+
};
|
|
292
|
+
});
|
|
293
|
+
}
|
package/dist/write.d.ts
ADDED
package/dist/write.js
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.registerWriteTool = registerWriteTool;
|
|
7
|
+
const edit_txt_1 = __importDefault(require("../descriptions/edit.txt"));
|
|
8
|
+
const zod_1 = __importDefault(require("zod"));
|
|
9
|
+
const path_1 = __importDefault(require("path"));
|
|
10
|
+
const mcp_helpers_1 = require("@baitong-dev/mcp-helpers");
|
|
11
|
+
function registerWriteTool(server) {
|
|
12
|
+
server.registerTool('write', {
|
|
13
|
+
description: edit_txt_1.default,
|
|
14
|
+
inputSchema: zod_1.default.object({
|
|
15
|
+
content: zod_1.default.string().describe('The content to write to the file'),
|
|
16
|
+
filePath: zod_1.default
|
|
17
|
+
.string()
|
|
18
|
+
.describe('The absolute path to the file to write (must be absolute, not relative)'),
|
|
19
|
+
toolInfo: mcp_helpers_1.Tool.Info
|
|
20
|
+
})
|
|
21
|
+
}, async (args) => {
|
|
22
|
+
const sessionID = args.toolInfo.sessionId;
|
|
23
|
+
const filepath = path_1.default.isAbsolute(args.filePath)
|
|
24
|
+
? args.filePath
|
|
25
|
+
: path_1.default.join(mcp_helpers_1.MCP_WORKSPACE_DIR, args.filePath);
|
|
26
|
+
// await assertExternalDirectory(sessionID, filepath)
|
|
27
|
+
const exists = await mcp_helpers_1.Filesystem.exists(filepath);
|
|
28
|
+
// const contentOld = exists ? await Filesystem.readText(filepath) : ''
|
|
29
|
+
if (exists)
|
|
30
|
+
await mcp_helpers_1.FileTime.assert(sessionID, filepath);
|
|
31
|
+
// const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, args.content))
|
|
32
|
+
// await ctx.ask({
|
|
33
|
+
// permission: 'edit',
|
|
34
|
+
// patterns: [path.relative(Instance.worktree, filepath)],
|
|
35
|
+
// always: ['*'],
|
|
36
|
+
// metadata: {
|
|
37
|
+
// filepath,
|
|
38
|
+
// diff
|
|
39
|
+
// }
|
|
40
|
+
// })
|
|
41
|
+
await mcp_helpers_1.Filesystem.write(filepath, args.content);
|
|
42
|
+
// await Bus.publish(File.Event.Edited, {
|
|
43
|
+
// file: filepath
|
|
44
|
+
// })
|
|
45
|
+
// await Bus.publish(FileWatcher.Event.Updated, {
|
|
46
|
+
// file: filepath,
|
|
47
|
+
// event: exists ? 'change' : 'add'
|
|
48
|
+
// })
|
|
49
|
+
mcp_helpers_1.FileTime.read(sessionID, filepath);
|
|
50
|
+
const output = 'Wrote file successfully.';
|
|
51
|
+
// await LSP.touchFile(filepath, true)
|
|
52
|
+
// const diagnostics = await LSP.diagnostics()
|
|
53
|
+
// const normalizedFilepath = Filesystem.normalizePath(filepath)
|
|
54
|
+
// let projectDiagnosticsCount = 0
|
|
55
|
+
// for (const [file, issues] of Object.entries(diagnostics)) {
|
|
56
|
+
// const errors = issues.filter(item => item.severity === 1)
|
|
57
|
+
// if (errors.length === 0) continue
|
|
58
|
+
// const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
|
|
59
|
+
// const suffix =
|
|
60
|
+
// errors.length > MAX_DIAGNOSTICS_PER_FILE
|
|
61
|
+
// ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more`
|
|
62
|
+
// : ''
|
|
63
|
+
// if (file === normalizedFilepath) {
|
|
64
|
+
// output += `\n\nLSP errors detected in this file, please fix:\n<diagnostics file="${filepath}">\n${limited.map(LSP.Diagnostic.pretty).join('\n')}${suffix}\n</diagnostics>`
|
|
65
|
+
// continue
|
|
66
|
+
// }
|
|
67
|
+
// if (projectDiagnosticsCount >= MAX_PROJECT_DIAGNOSTICS_FILES) continue
|
|
68
|
+
// projectDiagnosticsCount++
|
|
69
|
+
// output += `\n\nLSP errors detected in other files:\n<diagnostics file="${file}">\n${limited.map(LSP.Diagnostic.pretty).join('\n')}${suffix}\n</diagnostics>`
|
|
70
|
+
// }
|
|
71
|
+
const result = {
|
|
72
|
+
title: path_1.default.relative(mcp_helpers_1.MCP_WORKSPACE_DIR, filepath),
|
|
73
|
+
metadata: {
|
|
74
|
+
// diagnostics,
|
|
75
|
+
filepath,
|
|
76
|
+
exists: exists
|
|
77
|
+
},
|
|
78
|
+
output
|
|
79
|
+
};
|
|
80
|
+
return {
|
|
81
|
+
content: [{ type: 'text', text: result.output }],
|
|
82
|
+
structuredContent: result
|
|
83
|
+
};
|
|
84
|
+
});
|
|
85
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@baitong-dev/filesystem-mcp",
|
|
3
|
+
"version": "0.0.2",
|
|
4
|
+
"main": "./dist/index.js",
|
|
5
|
+
"bin": {
|
|
6
|
+
"@baitong-dev/filesystem-mcp": "./dist/index.js"
|
|
7
|
+
},
|
|
8
|
+
"files": [
|
|
9
|
+
"dist",
|
|
10
|
+
"descriptions",
|
|
11
|
+
"README.md"
|
|
12
|
+
],
|
|
13
|
+
"keywords": [
|
|
14
|
+
"mcp",
|
|
15
|
+
"filesystem",
|
|
16
|
+
"write",
|
|
17
|
+
"read",
|
|
18
|
+
"edit"
|
|
19
|
+
],
|
|
20
|
+
"description": "filesystem-mcp",
|
|
21
|
+
"dependencies": {
|
|
22
|
+
"@modelcontextprotocol/sdk": "^1.25.1",
|
|
23
|
+
"diff": "^8.0.4",
|
|
24
|
+
"zod": "^4.3.4",
|
|
25
|
+
"@baitong-dev/mcp-helpers": "0.0.7"
|
|
26
|
+
},
|
|
27
|
+
"devDependencies": {
|
|
28
|
+
"typescript": "^5.9.2"
|
|
29
|
+
},
|
|
30
|
+
"publishConfig": {
|
|
31
|
+
"access": "public",
|
|
32
|
+
"registry": "https://registry.npmjs.org"
|
|
33
|
+
},
|
|
34
|
+
"scripts": {
|
|
35
|
+
"tsc": "tsc ./src/index.ts --declaration --module commonjs --target es2021 --esModuleInterop --outDir ./dist"
|
|
36
|
+
}
|
|
37
|
+
}
|