@mrxkun/mcfast-mcp 1.0.2 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +1 -1
  2. package/package.json +1 -1
  3. package/src/index.js +126 -5
package/README.md CHANGED
@@ -81,7 +81,7 @@ replace: "api.example.com"
81
81
  ## 🔒 Privacy & Security
82
82
 
83
83
  - **Zero Persistence:** Code is processed in-memory and discarded immediately
84
- - **Open Source Client:** Audit the source at [github.com/ndpmmo/mcfast](https://github.com/ndpmmo/mcfast). Current stable version: `1.0.2`.
84
+ - **Open Source Client:** Audit the source at [github.com/ndpmmo/mcfast](https://github.com/ndpmmo/mcfast). Current stable version: `1.0.4`.
85
85
  - **Token Masking:** Your `MCFAST_TOKEN` is never logged
86
86
 
87
87
  ---
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mrxkun/mcfast-mcp",
3
- "version": "1.0.2",
3
+ "version": "1.0.4",
4
4
  "description": "Ultra-fast code editing via Mercury Coder Cloud API.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/index.js CHANGED
@@ -8,6 +8,8 @@
8
8
  import { Server } from "@modelcontextprotocol/sdk/server/index.js";
9
9
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
10
10
  import { ListToolsRequestSchema, CallToolRequestSchema } from "@modelcontextprotocol/sdk/types.js";
11
+ import fs from "fs/promises";
12
+ import path from "path";
11
13
 
12
14
  const API_URL = "https://mcfast.vercel.app/api/v1";
13
15
  const TOKEN = process.env.MCFAST_TOKEN;
@@ -74,6 +76,42 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
74
76
  },
75
77
  required: ["files", "search", "replace"]
76
78
  }
79
+ },
80
+ {
81
+ name: "search_code_ai",
82
+ description: "Intelligently search for code patterns across multiple files. Returns matches with surrounding context.",
83
+ inputSchema: {
84
+ type: "object",
85
+ properties: {
86
+ query: {
87
+ type: "string",
88
+ description: "Search query (supports natural language or literal strings)"
89
+ },
90
+ files: {
91
+ type: "object",
92
+ description: "Map of file paths to content to search within.",
93
+ additionalProperties: { type: "string" }
94
+ },
95
+ contextLines: {
96
+ type: "number",
97
+ description: "Number of lines to show before/after each match (default: 2)"
98
+ }
99
+ },
100
+ required: ["query", "files"]
101
+ }
102
+ },
103
+ {
104
+ name: "edit_file",
105
+ description: "Edit a local file by providing the target path and the new code content. This tool writes directly to the filesystem.",
106
+ inputSchema: {
107
+ type: "object",
108
+ properties: {
109
+ path: { type: "string", description: "Absolute or relative path to the file." },
110
+ content: { type: "string", description: "The full new content of the file." },
111
+ instruction: { type: "string", description: "Briefly what you changed (for logging)." }
112
+ },
113
+ required: ["path", "content"]
114
+ }
77
115
  }
78
116
  ],
79
117
  };
@@ -88,18 +126,34 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
88
126
  if (name === "apply_fast") {
89
127
  return await handleApplyFast(args);
90
128
  } else if (name === "apply_search_replace") {
91
- // Placeholder: You could implement a dedicated endpoint for this later to save tokens
92
- // For now, we route it to apply_fast with a synthesized instruction
93
129
  return await handleApplyFast({
94
130
  instruction: `Replace checking for exact match:\nSEARCH:\n${args.search}\n\nREPLACE WITH:\n${args.replace}`,
95
131
  files: args.files,
96
- dryRun: false
132
+ dryRun: args.dryRun || false
97
133
  });
134
+ } else if (name === "search_code_ai") {
135
+ return await handleSearchCodeAI(args);
136
+ } else if (name === "edit_file") {
137
+ return await handleEditFile(args);
98
138
  }
99
139
 
100
140
  throw new Error(`Tool not found: ${name}`);
101
141
  });
102
142
 
143
+ async function handleEditFile({ path: filePath, content, instruction = "" }) {
144
+ try {
145
+ await fs.writeFile(filePath, content, 'utf8');
146
+ return {
147
+ content: [{ type: "text", text: `✅ File saved successfully: ${filePath}` }]
148
+ };
149
+ } catch (error) {
150
+ return {
151
+ content: [{ type: "text", text: `❌ Failed to write file: ${error.message}` }],
152
+ isError: true
153
+ };
154
+ }
155
+ }
156
+
103
157
  async function handleApplyFast({ instruction, files, dryRun }) {
104
158
  try {
105
159
  const response = await fetch(`${API_URL}/apply`, {
@@ -121,8 +175,26 @@ async function handleApplyFast({ instruction, files, dryRun }) {
121
175
 
122
176
  const data = await response.json();
123
177
 
124
- // Format the result nicely for the AI Agent consuming this tool
125
- let output = `✅ Edit Applied Successfully via ${data.strategy} (${data.latency_ms}ms)\n\n`;
178
+ // AUTOMATIC FILE WRITING LOGIC
179
+ const writtenFiles = [];
180
+ if (!dryRun && data.files) {
181
+ for (const [filePath, content] of Object.entries(data.files)) {
182
+ try {
183
+ await fs.writeFile(filePath, content, 'utf8');
184
+ writtenFiles.push(filePath);
185
+ } catch (writeErr) {
186
+ console.error(`Failed to write ${filePath}:`, writeErr);
187
+ }
188
+ }
189
+ }
190
+
191
+ // Format the result nicely
192
+ let output = `✅ Edit Processed via ${data.strategy} (${data.latency_ms}ms)\n`;
193
+ if (writtenFiles.length > 0) {
194
+ output += `💾 Automatically saved ${writtenFiles.length} files to disk.\n\n`;
195
+ } else if (dryRun) {
196
+ output += `🔍 DRY RUN: No files were modified on disk.\n\n`;
197
+ }
126
198
 
127
199
  if (data.diffs && Object.keys(data.diffs).length > 0) {
128
200
  for (const [path, diff] of Object.entries(data.diffs)) {
@@ -144,6 +216,55 @@ async function handleApplyFast({ instruction, files, dryRun }) {
144
216
  }
145
217
  }
146
218
 
219
+ async function handleSearchCodeAI({ query, files, contextLines = 2 }) {
220
+ try {
221
+ const response = await fetch(`${API_URL}/search-ai`, {
222
+ method: "POST",
223
+ headers: {
224
+ "Content-Type": "application/json",
225
+ "Authorization": `Bearer ${TOKEN}`,
226
+ },
227
+ body: JSON.stringify({ query, files, contextLines }),
228
+ });
229
+
230
+ if (!response.ok) {
231
+ const errorText = await response.text();
232
+ return {
233
+ content: [{ type: "text", text: `Search Error (${response.status}): ${errorText}` }],
234
+ isError: true,
235
+ };
236
+ }
237
+
238
+ const data = await response.json();
239
+
240
+ // Format results nicely
241
+ let output = `🔍 Found ${data.totalMatches} matches for "${query}"\\n\\n`;
242
+
243
+ if (data.results.length === 0) {
244
+ output += "No matches found.";
245
+ } else {
246
+ data.results.forEach((result, i) => {
247
+ output += `📄 ${result.file}:${result.lineNumber}\\n`;
248
+ result.context.forEach(ctx => {
249
+ const prefix = ctx.isMatch ? "→ " : " ";
250
+ output += `${prefix}${ctx.lineNumber}: ${ctx.content}\\n`;
251
+ });
252
+ if (i < data.results.length - 1) output += "\\n";
253
+ });
254
+ }
255
+
256
+ return {
257
+ content: [{ type: "text", text: output }],
258
+ };
259
+
260
+ } catch (error) {
261
+ return {
262
+ content: [{ type: "text", text: `Search Connection Error: ${error.message}` }],
263
+ isError: true,
264
+ };
265
+ }
266
+ }
267
+
147
268
  /**
148
269
  * Start Server
149
270
  */