@azerate/claudette-mcp 1.0.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/checkpoints.test.d.ts +1 -0
- package/dist/__tests__/checkpoints.test.js +59 -0
- package/dist/__tests__/utils.test.d.ts +1 -0
- package/dist/__tests__/utils.test.js +28 -0
- package/dist/__tests__/workspace.test.d.ts +1 -0
- package/dist/__tests__/workspace.test.js +30 -0
- package/dist/actions.d.ts +6 -0
- package/dist/actions.js +30 -0
- package/dist/checkpoints.d.ts +21 -0
- package/dist/checkpoints.js +247 -0
- package/dist/errors.d.ts +9 -0
- package/dist/errors.js +37 -0
- package/dist/git.d.ts +6 -0
- package/dist/git.js +34 -0
- package/dist/index.js +557 -431
- package/dist/scripts.d.ts +29 -0
- package/dist/scripts.js +61 -0
- package/dist/utils.d.ts +1 -0
- package/dist/utils.js +12 -0
- package/dist/workspace.d.ts +12 -0
- package/dist/workspace.js +74 -0
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -2,444 +2,27 @@
|
|
|
2
2
|
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
3
3
|
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
4
|
import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
5
|
-
|
|
6
|
-
import {
|
|
7
|
-
import {
|
|
8
|
-
import {
|
|
5
|
+
// Import from extracted modules
|
|
6
|
+
import { getScripts, getScriptOutput, startScript, stopScript } from './scripts.js';
|
|
7
|
+
import { getWorkspaceConfig, saveWorkspaceConfig, clearMemory, deleteMemoryNotes, replaceMemory } from './workspace.js';
|
|
8
|
+
import { getCheckpoints, createCheckpoint, restoreCheckpoint, deleteCheckpoint } from './checkpoints.js';
|
|
9
|
+
import { getGitChanges } from './git.js';
|
|
10
|
+
import { getTypeScriptErrors } from './errors.js';
|
|
11
|
+
import { getPendingAction } from './actions.js';
|
|
9
12
|
// Claudette server API base URL
|
|
10
13
|
const CLAUDETTE_API = "http://localhost:52001";
|
|
11
|
-
//
|
|
12
|
-
|
|
13
|
-
const WORKSPACE_CONFIGS_DIR = join(DATA_DIR, "workspace-configs");
|
|
14
|
-
// Strip ANSI escape codes
|
|
15
|
-
function stripAnsi(str) {
|
|
16
|
-
return str
|
|
17
|
-
.replace(/\x1b\[[0-9;]*[a-zA-Z]/g, '')
|
|
18
|
-
.replace(/\x1b\][^\x07]*\x07/g, '')
|
|
19
|
-
.replace(/\x1b[()][AB012]/g, '')
|
|
20
|
-
.replace(/\x1b[>=]/g, '')
|
|
21
|
-
.replace(/\x07/g, '')
|
|
22
|
-
.replace(/\r/g, '')
|
|
23
|
-
.replace(/\x1b\[\?[0-9;]*[a-zA-Z]/g, '');
|
|
24
|
-
}
|
|
25
|
-
// Get list of scripts from workspace
|
|
26
|
-
async function getScripts(workspacePath) {
|
|
27
|
-
try {
|
|
28
|
-
const response = await fetch(`${CLAUDETTE_API}/api/scripts?path=${encodeURIComponent(workspacePath)}`);
|
|
29
|
-
if (!response.ok)
|
|
30
|
-
return { scripts: [], hasPackageJson: false };
|
|
31
|
-
return await response.json();
|
|
32
|
-
}
|
|
33
|
-
catch {
|
|
34
|
-
return { scripts: [], hasPackageJson: false };
|
|
35
|
-
}
|
|
36
|
-
}
|
|
37
|
-
// Get script output
|
|
38
|
-
async function getScriptOutput(workspacePath, scriptName) {
|
|
39
|
-
try {
|
|
40
|
-
const response = await fetch(`${CLAUDETTE_API}/api/scripts/status?path=${encodeURIComponent(workspacePath)}&scriptName=${encodeURIComponent(scriptName)}`);
|
|
41
|
-
if (!response.ok)
|
|
42
|
-
return null;
|
|
43
|
-
const data = await response.json();
|
|
44
|
-
if (!data.found || !data.script)
|
|
45
|
-
return null;
|
|
46
|
-
// Join output lines and strip ANSI codes
|
|
47
|
-
const output = data.script.output.join('');
|
|
48
|
-
return stripAnsi(output);
|
|
49
|
-
}
|
|
50
|
-
catch {
|
|
51
|
-
return null;
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
// Start a script
|
|
55
|
-
async function startScript(workspacePath, scriptName) {
|
|
56
|
-
try {
|
|
57
|
-
const response = await fetch(`${CLAUDETTE_API}/api/scripts/run`, {
|
|
58
|
-
method: 'POST',
|
|
59
|
-
headers: { 'Content-Type': 'application/json' },
|
|
60
|
-
body: JSON.stringify({ path: workspacePath, scriptName, autoLaunchBrowser: true }),
|
|
61
|
-
});
|
|
62
|
-
return await response.json();
|
|
63
|
-
}
|
|
64
|
-
catch (err) {
|
|
65
|
-
return { success: false, error: err.message };
|
|
66
|
-
}
|
|
67
|
-
}
|
|
68
|
-
// Stop a script
|
|
69
|
-
async function stopScript(workspacePath, scriptName) {
|
|
70
|
-
try {
|
|
71
|
-
const response = await fetch(`${CLAUDETTE_API}/api/scripts/stop`, {
|
|
72
|
-
method: 'POST',
|
|
73
|
-
headers: { 'Content-Type': 'application/json' },
|
|
74
|
-
body: JSON.stringify({ path: workspacePath, scriptName }),
|
|
75
|
-
});
|
|
76
|
-
return await response.json();
|
|
77
|
-
}
|
|
78
|
-
catch (err) {
|
|
79
|
-
return { success: false, error: err.message };
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
// Read pending action from file (file-based approach for reliability)
|
|
83
|
-
function getPendingAction(workspacePath) {
|
|
84
|
-
const filePath = join(workspacePath, '.claudette', 'pending-action.json');
|
|
85
|
-
if (!existsSync(filePath)) {
|
|
86
|
-
return null;
|
|
87
|
-
}
|
|
88
|
-
try {
|
|
89
|
-
const content = readFileSync(filePath, 'utf-8');
|
|
90
|
-
const action = JSON.parse(content);
|
|
91
|
-
// Check if action is too old (5 minutes)
|
|
92
|
-
if (Date.now() - action.timestamp > 5 * 60 * 1000) {
|
|
93
|
-
unlinkSync(filePath);
|
|
94
|
-
return null;
|
|
95
|
-
}
|
|
96
|
-
// Delete the file after reading (consume it)
|
|
97
|
-
unlinkSync(filePath);
|
|
98
|
-
return action;
|
|
99
|
-
}
|
|
100
|
-
catch {
|
|
101
|
-
// Try to clean up corrupted file
|
|
102
|
-
try {
|
|
103
|
-
unlinkSync(filePath);
|
|
104
|
-
}
|
|
105
|
-
catch { }
|
|
106
|
-
return null;
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
// Get workspace config path
|
|
110
|
-
function getWorkspaceConfigPath(workspacePath) {
|
|
111
|
-
const safeName = workspacePath.replace(/[\\/:*?"<>|]/g, "-").replace(/--+/g, "-");
|
|
112
|
-
return join(WORKSPACE_CONFIGS_DIR, `${safeName}.json`);
|
|
113
|
-
}
|
|
114
|
-
// Read workspace config
|
|
115
|
-
function getWorkspaceConfig(workspacePath) {
|
|
116
|
-
const configPath = getWorkspaceConfigPath(workspacePath);
|
|
117
|
-
if (!existsSync(configPath))
|
|
118
|
-
return null;
|
|
119
|
-
try {
|
|
120
|
-
return JSON.parse(readFileSync(configPath, "utf-8"));
|
|
121
|
-
}
|
|
122
|
-
catch {
|
|
123
|
-
return null;
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
// Save workspace config
|
|
127
|
-
function saveWorkspaceConfig(config) {
|
|
128
|
-
if (!existsSync(WORKSPACE_CONFIGS_DIR)) {
|
|
129
|
-
mkdirSync(WORKSPACE_CONFIGS_DIR, { recursive: true });
|
|
130
|
-
}
|
|
131
|
-
const configPath = getWorkspaceConfigPath(config.path);
|
|
132
|
-
writeFileSync(configPath, JSON.stringify(config, null, 2));
|
|
133
|
-
}
|
|
134
|
-
// Check TypeScript errors
|
|
135
|
-
function getTypeScriptErrors(workspacePath) {
|
|
136
|
-
const errors = [];
|
|
137
|
-
try {
|
|
138
|
-
// Check if tsconfig exists
|
|
139
|
-
if (!existsSync(join(workspacePath, "tsconfig.json"))) {
|
|
140
|
-
return [];
|
|
141
|
-
}
|
|
142
|
-
// Run TypeScript compiler
|
|
143
|
-
execSync("npx tsc --noEmit 2>&1", {
|
|
144
|
-
cwd: workspacePath,
|
|
145
|
-
encoding: "utf-8",
|
|
146
|
-
maxBuffer: 10 * 1024 * 1024,
|
|
147
|
-
});
|
|
148
|
-
}
|
|
149
|
-
catch (err) {
|
|
150
|
-
const output = err.stdout || err.message || "";
|
|
151
|
-
// Parse TypeScript errors: src/file.ts(10,5): error TS2322: ...
|
|
152
|
-
const errorRegex = /^(.+?)\((\d+),(\d+)\):\s*(error|warning)\s*(TS\d+):\s*(.+)$/gm;
|
|
153
|
-
let match;
|
|
154
|
-
while ((match = errorRegex.exec(output)) !== null) {
|
|
155
|
-
errors.push({
|
|
156
|
-
file: match[1],
|
|
157
|
-
line: parseInt(match[2], 10),
|
|
158
|
-
column: parseInt(match[3], 10),
|
|
159
|
-
severity: match[4],
|
|
160
|
-
code: match[5],
|
|
161
|
-
message: match[6],
|
|
162
|
-
});
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
return errors;
|
|
166
|
-
}
|
|
167
|
-
// Get checkpoints (git stashes)
|
|
168
|
-
function getCheckpoints(workspacePath) {
|
|
169
|
-
const checkpoints = [];
|
|
170
|
-
try {
|
|
171
|
-
const output = execSync("git stash list", {
|
|
172
|
-
cwd: workspacePath,
|
|
173
|
-
encoding: "utf-8",
|
|
174
|
-
});
|
|
175
|
-
if (!output.trim())
|
|
176
|
-
return [];
|
|
177
|
-
const lines = output.trim().split("\n");
|
|
178
|
-
for (const line of lines) {
|
|
179
|
-
const match = line.match(/^(stash@\{(\d+)\}):\s*(?:On\s+\w+:\s*)?(?:WIP on \w+:\s*\w+\s*)?(.+)$/);
|
|
180
|
-
if (match) {
|
|
181
|
-
const id = match[1];
|
|
182
|
-
let message = match[3].trim().replace(/^[a-f0-9]+\s+/, "");
|
|
183
|
-
try {
|
|
184
|
-
const dateOutput = execSync(`git log -1 --format="%ci" ${id}`, {
|
|
185
|
-
cwd: workspacePath,
|
|
186
|
-
encoding: "utf-8",
|
|
187
|
-
}).trim();
|
|
188
|
-
let filesChanged = 0;
|
|
189
|
-
try {
|
|
190
|
-
const diffOutput = execSync(`git stash show ${id} --stat`, {
|
|
191
|
-
cwd: workspacePath,
|
|
192
|
-
encoding: "utf-8",
|
|
193
|
-
});
|
|
194
|
-
const filesMatch = diffOutput.match(/(\d+)\s+files?\s+changed/);
|
|
195
|
-
if (filesMatch)
|
|
196
|
-
filesChanged = parseInt(filesMatch[1]);
|
|
197
|
-
}
|
|
198
|
-
catch { }
|
|
199
|
-
checkpoints.push({ id, message, timestamp: dateOutput, filesChanged });
|
|
200
|
-
}
|
|
201
|
-
catch { }
|
|
202
|
-
}
|
|
203
|
-
}
|
|
204
|
-
}
|
|
205
|
-
catch { }
|
|
206
|
-
return checkpoints;
|
|
207
|
-
}
|
|
208
|
-
// Create a checkpoint
|
|
209
|
-
function createCheckpoint(workspacePath, message) {
|
|
210
|
-
try {
|
|
211
|
-
const status = execSync("git status --porcelain", {
|
|
212
|
-
cwd: workspacePath,
|
|
213
|
-
encoding: "utf-8",
|
|
214
|
-
}).trim();
|
|
215
|
-
if (!status) {
|
|
216
|
-
return { success: false, error: "No changes to checkpoint" };
|
|
217
|
-
}
|
|
218
|
-
// Stage tracked files
|
|
219
|
-
try {
|
|
220
|
-
execSync("git add -u", { cwd: workspacePath, encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"] });
|
|
221
|
-
}
|
|
222
|
-
catch { }
|
|
223
|
-
// Create stash without removing changes
|
|
224
|
-
const stashHash = execSync("git stash create", {
|
|
225
|
-
cwd: workspacePath,
|
|
226
|
-
encoding: "utf-8",
|
|
227
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
228
|
-
}).trim();
|
|
229
|
-
if (!stashHash) {
|
|
230
|
-
return { success: false, error: "Failed to create checkpoint" };
|
|
231
|
-
}
|
|
232
|
-
// Generate name from changes if not provided
|
|
233
|
-
const stashMessage = message || generateCheckpointName(status);
|
|
234
|
-
execSync(`git stash store -m "${stashMessage.replace(/"/g, '\\"')}" ${stashHash}`, {
|
|
235
|
-
cwd: workspacePath,
|
|
236
|
-
encoding: "utf-8",
|
|
237
|
-
});
|
|
238
|
-
return { success: true };
|
|
239
|
-
}
|
|
240
|
-
catch (err) {
|
|
241
|
-
return { success: false, error: err.message };
|
|
242
|
-
}
|
|
243
|
-
}
|
|
244
|
-
// Generate checkpoint name from git status
|
|
245
|
-
function generateCheckpointName(status) {
|
|
246
|
-
const lines = status.trim().split("\n").filter(Boolean);
|
|
247
|
-
const changes = {
|
|
248
|
-
modified: [], added: [], deleted: [],
|
|
249
|
-
};
|
|
250
|
-
for (const line of lines) {
|
|
251
|
-
const code = line.substring(0, 2);
|
|
252
|
-
const file = line.substring(3).split("/").pop() || line.substring(3);
|
|
253
|
-
if (code.includes("M"))
|
|
254
|
-
changes.modified.push(file);
|
|
255
|
-
else if (code.includes("A") || code.includes("?"))
|
|
256
|
-
changes.added.push(file);
|
|
257
|
-
else if (code.includes("D"))
|
|
258
|
-
changes.deleted.push(file);
|
|
259
|
-
}
|
|
260
|
-
const parts = [];
|
|
261
|
-
if (changes.modified.length > 0) {
|
|
262
|
-
parts.push(changes.modified.length <= 2 ? `Edit ${changes.modified.join(", ")}` : `Edit ${changes.modified.length} files`);
|
|
263
|
-
}
|
|
264
|
-
if (changes.added.length > 0) {
|
|
265
|
-
parts.push(changes.added.length <= 2 ? `Add ${changes.added.join(", ")}` : `Add ${changes.added.length} files`);
|
|
266
|
-
}
|
|
267
|
-
if (changes.deleted.length > 0) {
|
|
268
|
-
parts.push(changes.deleted.length <= 2 ? `Delete ${changes.deleted.join(", ")}` : `Delete ${changes.deleted.length} files`);
|
|
269
|
-
}
|
|
270
|
-
if (parts.length === 0)
|
|
271
|
-
return `Checkpoint ${new Date().toLocaleTimeString()}`;
|
|
272
|
-
let result = parts.join(", ");
|
|
273
|
-
return result.length > 60 ? result.substring(0, 57) + "..." : result;
|
|
274
|
-
}
|
|
275
|
-
// Restore a checkpoint
|
|
276
|
-
function restoreCheckpoint(workspacePath, id) {
|
|
277
|
-
try {
|
|
278
|
-
// Parse original stash index to calculate new index after auto-save
|
|
279
|
-
const stashMatch = id.match(/stash@\{(\d+)\}/);
|
|
280
|
-
if (!stashMatch) {
|
|
281
|
-
return { success: false, error: "Invalid checkpoint ID" };
|
|
282
|
-
}
|
|
283
|
-
let stashIndex = parseInt(stashMatch[1], 10);
|
|
284
|
-
let autoSaveCreated = false;
|
|
285
|
-
// Auto-save current changes first
|
|
286
|
-
const status = execSync("git status --porcelain", {
|
|
287
|
-
cwd: workspacePath,
|
|
288
|
-
encoding: "utf-8",
|
|
289
|
-
}).trim();
|
|
290
|
-
if (status) {
|
|
291
|
-
try {
|
|
292
|
-
execSync("git add -u", { cwd: workspacePath, encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"] });
|
|
293
|
-
}
|
|
294
|
-
catch { }
|
|
295
|
-
const stashHash = execSync("git stash create", {
|
|
296
|
-
cwd: workspacePath,
|
|
297
|
-
encoding: "utf-8",
|
|
298
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
299
|
-
}).trim();
|
|
300
|
-
if (stashHash) {
|
|
301
|
-
execSync(`git stash store -m "Auto-save before restore" ${stashHash}`, {
|
|
302
|
-
cwd: workspacePath,
|
|
303
|
-
encoding: "utf-8",
|
|
304
|
-
});
|
|
305
|
-
// Stash indices shifted up by 1
|
|
306
|
-
stashIndex += 1;
|
|
307
|
-
autoSaveCreated = true;
|
|
308
|
-
}
|
|
309
|
-
}
|
|
310
|
-
// Reset working directory completely
|
|
311
|
-
execSync("git reset --hard HEAD", {
|
|
312
|
-
cwd: workspacePath,
|
|
313
|
-
encoding: "utf-8",
|
|
314
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
315
|
-
});
|
|
316
|
-
// Clean untracked files to prevent conflicts
|
|
317
|
-
execSync("git clean -fd", {
|
|
318
|
-
cwd: workspacePath,
|
|
319
|
-
encoding: "utf-8",
|
|
320
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
321
|
-
});
|
|
322
|
-
// Apply the checkpoint with adjusted index
|
|
323
|
-
try {
|
|
324
|
-
execSync(`git stash apply stash@{${stashIndex}}`, {
|
|
325
|
-
cwd: workspacePath,
|
|
326
|
-
encoding: "utf-8",
|
|
327
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
328
|
-
});
|
|
329
|
-
}
|
|
330
|
-
catch (applyErr) {
|
|
331
|
-
// Check if there are conflict markers in the working directory
|
|
332
|
-
try {
|
|
333
|
-
const conflictCheck = execSync("git diff --check", {
|
|
334
|
-
cwd: workspacePath,
|
|
335
|
-
encoding: "utf-8",
|
|
336
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
337
|
-
});
|
|
338
|
-
}
|
|
339
|
-
catch (diffErr) {
|
|
340
|
-
const diffOutput = diffErr.stdout || diffErr.message || "";
|
|
341
|
-
if (diffOutput.includes("conflict") || diffOutput.includes("leftover")) {
|
|
342
|
-
// Conflicts detected - abort and reset to clean state
|
|
343
|
-
execSync("git reset --hard HEAD", {
|
|
344
|
-
cwd: workspacePath,
|
|
345
|
-
encoding: "utf-8",
|
|
346
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
347
|
-
});
|
|
348
|
-
execSync("git clean -fd", {
|
|
349
|
-
cwd: workspacePath,
|
|
350
|
-
encoding: "utf-8",
|
|
351
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
352
|
-
});
|
|
353
|
-
return {
|
|
354
|
-
success: false,
|
|
355
|
-
error: "Checkpoint restore failed due to conflicts. Working directory reset to clean state." +
|
|
356
|
-
(autoSaveCreated ? " Your changes were saved as stash@{0}." : "")
|
|
357
|
-
};
|
|
358
|
-
}
|
|
359
|
-
}
|
|
360
|
-
// Also check for conflict markers in files directly
|
|
361
|
-
try {
|
|
362
|
-
const grepConflict = execSync("git grep -l \"^<<<<<<<\" || true", {
|
|
363
|
-
cwd: workspacePath,
|
|
364
|
-
encoding: "utf-8",
|
|
365
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
366
|
-
}).trim();
|
|
367
|
-
if (grepConflict) {
|
|
368
|
-
// Conflict markers found - abort
|
|
369
|
-
execSync("git reset --hard HEAD", {
|
|
370
|
-
cwd: workspacePath,
|
|
371
|
-
encoding: "utf-8",
|
|
372
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
373
|
-
});
|
|
374
|
-
execSync("git clean -fd", {
|
|
375
|
-
cwd: workspacePath,
|
|
376
|
-
encoding: "utf-8",
|
|
377
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
378
|
-
});
|
|
379
|
-
return {
|
|
380
|
-
success: false,
|
|
381
|
-
error: "Checkpoint restore created merge conflicts. Aborted and reset to clean state." +
|
|
382
|
-
(autoSaveCreated ? " Your changes were saved as stash@{0}." : "")
|
|
383
|
-
};
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
catch { }
|
|
387
|
-
// If we got here, the apply error wasn't about conflicts - re-throw
|
|
388
|
-
throw applyErr;
|
|
389
|
-
}
|
|
390
|
-
return {
|
|
391
|
-
success: true,
|
|
392
|
-
autoSaveId: autoSaveCreated ? "stash@{0}" : undefined
|
|
393
|
-
};
|
|
394
|
-
}
|
|
395
|
-
catch (err) {
|
|
396
|
-
return { success: false, error: err.message };
|
|
397
|
-
}
|
|
398
|
-
}
|
|
399
|
-
// Delete a checkpoint
|
|
400
|
-
function deleteCheckpoint(workspacePath, id) {
|
|
14
|
+
// Helper to log refactor activity
|
|
15
|
+
async function logRefactorActivity(workspacePath, type, action, message, details) {
|
|
401
16
|
try {
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
17
|
+
await fetch(`${CLAUDETTE_API}/api/refactor/activity`, {
|
|
18
|
+
method: "POST",
|
|
19
|
+
headers: { "Content-Type": "application/json" },
|
|
20
|
+
body: JSON.stringify({ path: workspacePath, type, action, message, details }),
|
|
405
21
|
});
|
|
406
|
-
return { success: true };
|
|
407
|
-
}
|
|
408
|
-
catch (err) {
|
|
409
|
-
return { success: false, error: err.message };
|
|
410
|
-
}
|
|
411
|
-
}
|
|
412
|
-
// Get git changes
|
|
413
|
-
function getGitChanges(workspacePath) {
|
|
414
|
-
const changes = [];
|
|
415
|
-
try {
|
|
416
|
-
const output = execSync("git status --porcelain", {
|
|
417
|
-
cwd: workspacePath,
|
|
418
|
-
encoding: "utf-8",
|
|
419
|
-
});
|
|
420
|
-
const lines = output.trim().split("\n").filter(Boolean);
|
|
421
|
-
for (const line of lines) {
|
|
422
|
-
const staged = line[0] !== " " && line[0] !== "?";
|
|
423
|
-
const statusCode = line.substring(0, 2).trim();
|
|
424
|
-
const file = line.substring(3);
|
|
425
|
-
let status = "unknown";
|
|
426
|
-
if (statusCode.includes("M"))
|
|
427
|
-
status = "modified";
|
|
428
|
-
else if (statusCode.includes("A"))
|
|
429
|
-
status = "added";
|
|
430
|
-
else if (statusCode.includes("D"))
|
|
431
|
-
status = "deleted";
|
|
432
|
-
else if (statusCode.includes("R"))
|
|
433
|
-
status = "renamed";
|
|
434
|
-
else if (statusCode.includes("?"))
|
|
435
|
-
status = "untracked";
|
|
436
|
-
changes.push({ file, status, staged });
|
|
437
|
-
}
|
|
438
22
|
}
|
|
439
23
|
catch {
|
|
440
|
-
//
|
|
24
|
+
// Ignore logging errors - don't break the main flow
|
|
441
25
|
}
|
|
442
|
-
return changes;
|
|
443
26
|
}
|
|
444
27
|
// Create the MCP server
|
|
445
28
|
const server = new Server({
|
|
@@ -514,6 +97,58 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
514
97
|
required: ["workspace_path", "note"],
|
|
515
98
|
},
|
|
516
99
|
},
|
|
100
|
+
{
|
|
101
|
+
name: "clear_memory",
|
|
102
|
+
description: "Clear all project memory notes. Use this before writing a fresh summary.",
|
|
103
|
+
inputSchema: {
|
|
104
|
+
type: "object",
|
|
105
|
+
properties: {
|
|
106
|
+
workspace_path: {
|
|
107
|
+
type: "string",
|
|
108
|
+
description: "Path to the workspace directory",
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
required: ["workspace_path"],
|
|
112
|
+
},
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
name: "delete_memory",
|
|
116
|
+
description: "Delete specific memory notes by their index (0-based). Use get_memory first to see indices.",
|
|
117
|
+
inputSchema: {
|
|
118
|
+
type: "object",
|
|
119
|
+
properties: {
|
|
120
|
+
workspace_path: {
|
|
121
|
+
type: "string",
|
|
122
|
+
description: "Path to the workspace directory",
|
|
123
|
+
},
|
|
124
|
+
indices: {
|
|
125
|
+
type: "array",
|
|
126
|
+
items: { type: "number" },
|
|
127
|
+
description: "Array of note indices to delete (0-based)",
|
|
128
|
+
},
|
|
129
|
+
},
|
|
130
|
+
required: ["workspace_path", "indices"],
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
name: "replace_memory",
|
|
135
|
+
description: "Replace all memory with new notes. Use for summarizing/consolidating existing notes into fewer, more concise notes.",
|
|
136
|
+
inputSchema: {
|
|
137
|
+
type: "object",
|
|
138
|
+
properties: {
|
|
139
|
+
workspace_path: {
|
|
140
|
+
type: "string",
|
|
141
|
+
description: "Path to the workspace directory",
|
|
142
|
+
},
|
|
143
|
+
notes: {
|
|
144
|
+
type: "array",
|
|
145
|
+
items: { type: "string" },
|
|
146
|
+
description: "Array of new notes to replace all existing memory",
|
|
147
|
+
},
|
|
148
|
+
},
|
|
149
|
+
required: ["workspace_path", "notes"],
|
|
150
|
+
},
|
|
151
|
+
},
|
|
517
152
|
{
|
|
518
153
|
name: "get_checkpoints",
|
|
519
154
|
description: "Get all saved checkpoints (snapshots) for the workspace. Checkpoints allow reverting to previous states.",
|
|
@@ -664,6 +299,126 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
664
299
|
required: ["workspace_path"],
|
|
665
300
|
},
|
|
666
301
|
},
|
|
302
|
+
{
|
|
303
|
+
name: "create_refactor_plan",
|
|
304
|
+
description: "Analyze workspace for files needing refactoring and create a verified plan. Automatically detects test framework (Jest, Vitest, pytest, Go, etc.), runs tests to capture baseline, and generates step-by-step refactoring instructions with test verification checkpoints. IMPORTANT: Always use verify_refactor after each extraction to ensure no regressions.",
|
|
305
|
+
inputSchema: {
|
|
306
|
+
type: "object",
|
|
307
|
+
properties: {
|
|
308
|
+
workspace_path: {
|
|
309
|
+
type: "string",
|
|
310
|
+
description: "Path to the workspace directory",
|
|
311
|
+
},
|
|
312
|
+
},
|
|
313
|
+
required: ["workspace_path"],
|
|
314
|
+
},
|
|
315
|
+
},
|
|
316
|
+
{
|
|
317
|
+
name: "get_refactor_plan",
|
|
318
|
+
description: "Get the current refactor plan for the workspace. Returns the full plan with file analysis and suggestions. Use this to read the plan before performing refactoring.",
|
|
319
|
+
inputSchema: {
|
|
320
|
+
type: "object",
|
|
321
|
+
properties: {
|
|
322
|
+
workspace_path: {
|
|
323
|
+
type: "string",
|
|
324
|
+
description: "Path to the workspace directory",
|
|
325
|
+
},
|
|
326
|
+
},
|
|
327
|
+
required: ["workspace_path"],
|
|
328
|
+
},
|
|
329
|
+
},
|
|
330
|
+
{
|
|
331
|
+
name: "complete_refactor",
|
|
332
|
+
description: "Mark the refactoring as complete and delete the refactor plan file. Call this after you have finished all refactoring tasks.",
|
|
333
|
+
inputSchema: {
|
|
334
|
+
type: "object",
|
|
335
|
+
properties: {
|
|
336
|
+
workspace_path: {
|
|
337
|
+
type: "string",
|
|
338
|
+
description: "Path to the workspace directory",
|
|
339
|
+
},
|
|
340
|
+
},
|
|
341
|
+
required: ["workspace_path"],
|
|
342
|
+
},
|
|
343
|
+
},
|
|
344
|
+
{
|
|
345
|
+
name: "verify_refactor",
|
|
346
|
+
description: "Run tests and compare against baseline to verify refactoring didn't break anything. Use this AFTER each extract_module step. Returns success if tests pass and match baseline, or detailed failure info if regression detected.",
|
|
347
|
+
inputSchema: {
|
|
348
|
+
type: "object",
|
|
349
|
+
properties: {
|
|
350
|
+
workspace_path: {
|
|
351
|
+
type: "string",
|
|
352
|
+
description: "Path to the workspace directory",
|
|
353
|
+
},
|
|
354
|
+
},
|
|
355
|
+
required: ["workspace_path"],
|
|
356
|
+
},
|
|
357
|
+
},
|
|
358
|
+
{
|
|
359
|
+
name: "complete_refactor_step",
|
|
360
|
+
description: "Mark a refactoring step as completed. Call this after successfully completing each step in the refactor plan.",
|
|
361
|
+
inputSchema: {
|
|
362
|
+
type: "object",
|
|
363
|
+
properties: {
|
|
364
|
+
workspace_path: {
|
|
365
|
+
type: "string",
|
|
366
|
+
description: "Path to the workspace directory",
|
|
367
|
+
},
|
|
368
|
+
step_id: {
|
|
369
|
+
type: "number",
|
|
370
|
+
description: "The step ID to mark as completed",
|
|
371
|
+
},
|
|
372
|
+
},
|
|
373
|
+
required: ["workspace_path", "step_id"],
|
|
374
|
+
},
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
name: "get_next_refactor_step",
|
|
378
|
+
description: "Get the next uncompleted step in the refactor plan. Returns the step details or null if all steps are complete.",
|
|
379
|
+
inputSchema: {
|
|
380
|
+
type: "object",
|
|
381
|
+
properties: {
|
|
382
|
+
workspace_path: {
|
|
383
|
+
type: "string",
|
|
384
|
+
description: "Path to the workspace directory",
|
|
385
|
+
},
|
|
386
|
+
},
|
|
387
|
+
required: ["workspace_path"],
|
|
388
|
+
},
|
|
389
|
+
},
|
|
390
|
+
{
|
|
391
|
+
name: "run_tests",
|
|
392
|
+
description: "Run Jest tests for the workspace. Returns test results including passed/failed counts and failure messages.",
|
|
393
|
+
inputSchema: {
|
|
394
|
+
type: "object",
|
|
395
|
+
properties: {
|
|
396
|
+
workspace_path: {
|
|
397
|
+
type: "string",
|
|
398
|
+
description: "Path to the workspace directory",
|
|
399
|
+
},
|
|
400
|
+
test_file: {
|
|
401
|
+
type: "string",
|
|
402
|
+
description: "Optional specific test file to run",
|
|
403
|
+
},
|
|
404
|
+
},
|
|
405
|
+
required: ["workspace_path"],
|
|
406
|
+
},
|
|
407
|
+
},
|
|
408
|
+
{
|
|
409
|
+
name: "get_test_results",
|
|
410
|
+
description: "Get the latest Jest test results for the workspace. Use this after running tests to see detailed results.",
|
|
411
|
+
inputSchema: {
|
|
412
|
+
type: "object",
|
|
413
|
+
properties: {
|
|
414
|
+
workspace_path: {
|
|
415
|
+
type: "string",
|
|
416
|
+
description: "Path to the workspace directory",
|
|
417
|
+
},
|
|
418
|
+
},
|
|
419
|
+
required: ["workspace_path"],
|
|
420
|
+
},
|
|
421
|
+
},
|
|
667
422
|
],
|
|
668
423
|
};
|
|
669
424
|
});
|
|
@@ -739,6 +494,42 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
739
494
|
saveWorkspaceConfig(config);
|
|
740
495
|
return { content: [{ type: "text", text: `Added to project memory: "${note}"` }] };
|
|
741
496
|
}
|
|
497
|
+
case "clear_memory": {
|
|
498
|
+
const workspacePath = args?.workspace_path;
|
|
499
|
+
if (!workspacePath) {
|
|
500
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
501
|
+
}
|
|
502
|
+
const success = clearMemory(workspacePath);
|
|
503
|
+
if (success) {
|
|
504
|
+
return { content: [{ type: "text", text: "Project memory cleared." }] };
|
|
505
|
+
}
|
|
506
|
+
else {
|
|
507
|
+
return { content: [{ type: "text", text: "No memory to clear or workspace not found." }] };
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
case "delete_memory": {
|
|
511
|
+
const workspacePath = args?.workspace_path;
|
|
512
|
+
const indices = args?.indices;
|
|
513
|
+
if (!workspacePath || !indices || !Array.isArray(indices)) {
|
|
514
|
+
return { content: [{ type: "text", text: "Error: workspace_path and indices array are required" }] };
|
|
515
|
+
}
|
|
516
|
+
const deleted = deleteMemoryNotes(workspacePath, indices);
|
|
517
|
+
return { content: [{ type: "text", text: `Deleted ${deleted} note(s) from project memory.` }] };
|
|
518
|
+
}
|
|
519
|
+
case "replace_memory": {
|
|
520
|
+
const workspacePath = args?.workspace_path;
|
|
521
|
+
const notes = args?.notes;
|
|
522
|
+
if (!workspacePath || !notes || !Array.isArray(notes)) {
|
|
523
|
+
return { content: [{ type: "text", text: "Error: workspace_path and notes array are required" }] };
|
|
524
|
+
}
|
|
525
|
+
const success = replaceMemory(workspacePath, notes);
|
|
526
|
+
if (success) {
|
|
527
|
+
return { content: [{ type: "text", text: `Replaced memory with ${notes.length} new note(s).` }] };
|
|
528
|
+
}
|
|
529
|
+
else {
|
|
530
|
+
return { content: [{ type: "text", text: "Failed to replace memory." }] };
|
|
531
|
+
}
|
|
532
|
+
}
|
|
742
533
|
case "get_checkpoints": {
|
|
743
534
|
const workspacePath = args?.workspace_path;
|
|
744
535
|
if (!workspacePath) {
|
|
@@ -877,6 +668,341 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
877
668
|
return { content: [{ type: "text", text: "No pending quick actions from Claudette IDE." }] };
|
|
878
669
|
}
|
|
879
670
|
}
|
|
671
|
+
case "create_refactor_plan": {
|
|
672
|
+
const workspacePath = args?.workspace_path;
|
|
673
|
+
if (!workspacePath) {
|
|
674
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
675
|
+
}
|
|
676
|
+
try {
|
|
677
|
+
const response = await fetch(`${CLAUDETTE_API}/api/refactor/analyze`, {
|
|
678
|
+
method: "POST",
|
|
679
|
+
headers: { "Content-Type": "application/json" },
|
|
680
|
+
body: JSON.stringify({ path: workspacePath }),
|
|
681
|
+
});
|
|
682
|
+
const data = await response.json();
|
|
683
|
+
if (data.success) {
|
|
684
|
+
const plan = data.plan;
|
|
685
|
+
let result = `Refactor Plan Created\n${"=".repeat(50)}\n\n`;
|
|
686
|
+
result += `Total files needing attention: ${plan.totalFiles}\n`;
|
|
687
|
+
result += ` - Critical: ${plan.criticalCount}\n`;
|
|
688
|
+
result += ` - Warning: ${plan.warningCount}\n\n`;
|
|
689
|
+
if (plan.files.length > 0) {
|
|
690
|
+
result += `Files:\n`;
|
|
691
|
+
for (const file of plan.files) {
|
|
692
|
+
const icon = file.severity === 'critical' ? '🔴' : '⚠️';
|
|
693
|
+
result += `${icon} ${file.relativePath}\n`;
|
|
694
|
+
result += ` Lines: ${file.lines}, Functions: ${file.functionCount}, Size: ${file.fileSize}KB\n`;
|
|
695
|
+
if (file.suggestionCount > 0) {
|
|
696
|
+
result += ` Suggestions: ${file.suggestionCount}\n`;
|
|
697
|
+
}
|
|
698
|
+
}
|
|
699
|
+
}
|
|
700
|
+
result += `\nPlan saved to .claudette/refactor-plan.json`;
|
|
701
|
+
result += `\nUse get_refactor_plan to see detailed suggestions.`;
|
|
702
|
+
return { content: [{ type: "text", text: result }] };
|
|
703
|
+
}
|
|
704
|
+
else {
|
|
705
|
+
return { content: [{ type: "text", text: `Failed to create refactor plan: ${data.error}` }] };
|
|
706
|
+
}
|
|
707
|
+
}
|
|
708
|
+
catch (err) {
|
|
709
|
+
return { content: [{ type: "text", text: `Error creating refactor plan: ${err.message}` }] };
|
|
710
|
+
}
|
|
711
|
+
}
|
|
712
|
+
case "get_refactor_plan": {
|
|
713
|
+
const workspacePath = args?.workspace_path;
|
|
714
|
+
if (!workspacePath) {
|
|
715
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
716
|
+
}
|
|
717
|
+
try {
|
|
718
|
+
await logRefactorActivity(workspacePath, 'info', 'read_plan', 'Reading refactor plan...');
|
|
719
|
+
const response = await fetch(`${CLAUDETTE_API}/api/refactor/plan?path=${encodeURIComponent(workspacePath)}`);
|
|
720
|
+
if (response.status === 404) {
|
|
721
|
+
return { content: [{ type: "text", text: "No refactor plan found. Use create_refactor_plan first." }] };
|
|
722
|
+
}
|
|
723
|
+
const plan = await response.json();
|
|
724
|
+
await logRefactorActivity(workspacePath, 'info', 'read_plan', `Plan loaded: ${plan.steps?.length || 0} steps, ${plan.totalFiles} files`, { totalFiles: plan.totalFiles, stepsCount: plan.steps?.length });
|
|
725
|
+
let result = `Refactor Plan\n${"=".repeat(50)}\n\n`;
|
|
726
|
+
result += `Workspace: ${plan.workspace}\n`;
|
|
727
|
+
result += `Created: ${plan.created}\n`;
|
|
728
|
+
result += `Status: ${plan.status}\n`;
|
|
729
|
+
// Show test framework and baseline info
|
|
730
|
+
if (plan.testFramework) {
|
|
731
|
+
result += `Test Framework: ${plan.testFramework}${plan.hasTests ? '' : ' (not detected)'}\n`;
|
|
732
|
+
}
|
|
733
|
+
if (plan.baseline) {
|
|
734
|
+
result += `Baseline: ${plan.baseline.passed} tests passing (captured ${plan.baseline.timestamp})\n`;
|
|
735
|
+
}
|
|
736
|
+
result += `\n`;
|
|
737
|
+
// Show steps if available
|
|
738
|
+
if (plan.steps && plan.steps.length > 0) {
|
|
739
|
+
result += `REFACTORING STEPS\n${"─".repeat(40)}\n`;
|
|
740
|
+
for (const step of plan.steps) {
|
|
741
|
+
const status = step.completed ? '✅' : (step.id === plan.currentStep ? '▶️' : '⬜');
|
|
742
|
+
result += `${status} Step ${step.id}: [${step.type}] ${step.description}\n`;
|
|
743
|
+
if (step.file) {
|
|
744
|
+
result += ` File: ${step.file}\n`;
|
|
745
|
+
}
|
|
746
|
+
if (step.targetFile) {
|
|
747
|
+
result += ` Target: ${step.targetFile}\n`;
|
|
748
|
+
}
|
|
749
|
+
if (step.functions && step.functions.length > 0) {
|
|
750
|
+
result += ` Functions: ${step.functions.slice(0, 5).join(', ')}${step.functions.length > 5 ? '...' : ''}\n`;
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
result += `\n`;
|
|
754
|
+
}
|
|
755
|
+
result += `FILES NEEDING REFACTORING\n${"─".repeat(40)}\n`;
|
|
756
|
+
for (const file of plan.files) {
|
|
757
|
+
const icon = file.severity === 'critical' ? '🔴 CRITICAL' : '⚠️ WARNING';
|
|
758
|
+
result += `\n${icon}: ${file.relativePath}\n`;
|
|
759
|
+
result += `Lines: ${file.lines} | Functions: ${file.functionCount} | Size: ${file.fileSize}KB\n`;
|
|
760
|
+
if (file.suggestions && file.suggestions.length > 0) {
|
|
761
|
+
for (const suggestion of file.suggestions) {
|
|
762
|
+
result += ` 📦 ${suggestion.description}\n`;
|
|
763
|
+
result += ` → ${suggestion.targetFile}\n`;
|
|
764
|
+
}
|
|
765
|
+
}
|
|
766
|
+
}
|
|
767
|
+
result += `\n${"=".repeat(50)}\n`;
|
|
768
|
+
result += `WORKFLOW: Use get_next_refactor_step → do extraction → verify_refactor → complete_refactor_step\n`;
|
|
769
|
+
result += `After all steps complete, call complete_refactor to clean up.`;
|
|
770
|
+
return { content: [{ type: "text", text: result }] };
|
|
771
|
+
}
|
|
772
|
+
catch (err) {
|
|
773
|
+
return { content: [{ type: "text", text: `Error reading refactor plan: ${err.message}` }] };
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
case "complete_refactor": {
|
|
777
|
+
const workspacePath = args?.workspace_path;
|
|
778
|
+
if (!workspacePath) {
|
|
779
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
780
|
+
}
|
|
781
|
+
try {
|
|
782
|
+
const response = await fetch(`${CLAUDETTE_API}/api/refactor/plan?path=${encodeURIComponent(workspacePath)}`, {
|
|
783
|
+
method: "DELETE",
|
|
784
|
+
});
|
|
785
|
+
const data = await response.json();
|
|
786
|
+
if (data.success) {
|
|
787
|
+
return { content: [{ type: "text", text: "Refactoring complete! Plan file has been deleted." }] };
|
|
788
|
+
}
|
|
789
|
+
else {
|
|
790
|
+
return { content: [{ type: "text", text: "No refactor plan found to delete." }] };
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
catch (err) {
|
|
794
|
+
return { content: [{ type: "text", text: `Error completing refactor: ${err.message}` }] };
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
case "verify_refactor": {
|
|
798
|
+
const workspacePath = args?.workspace_path;
|
|
799
|
+
if (!workspacePath) {
|
|
800
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
801
|
+
}
|
|
802
|
+
try {
|
|
803
|
+
await logRefactorActivity(workspacePath, 'info', 'verify', 'Running tests...');
|
|
804
|
+
const response = await fetch(`${CLAUDETTE_API}/api/refactor/verify`, {
|
|
805
|
+
method: "POST",
|
|
806
|
+
headers: { "Content-Type": "application/json" },
|
|
807
|
+
body: JSON.stringify({ path: workspacePath }),
|
|
808
|
+
});
|
|
809
|
+
const result = await response.json();
|
|
810
|
+
// Log test results
|
|
811
|
+
await logRefactorActivity(workspacePath, result.success ? 'success' : 'error', 'verify', result.success
|
|
812
|
+
? `Tests passed: ${result.current?.passed}/${result.current?.total}`
|
|
813
|
+
: `Tests failed: ${result.current?.failed} failures`, {
|
|
814
|
+
testsPassed: result.current?.passed,
|
|
815
|
+
testsFailed: result.current?.failed,
|
|
816
|
+
testsTotal: result.current?.total,
|
|
817
|
+
});
|
|
818
|
+
let output = `Refactor Verification\n${"=".repeat(50)}\n\n`;
|
|
819
|
+
output += `${result.success ? '✅ PASSED' : '❌ FAILED'}\n\n`;
|
|
820
|
+
output += `${result.message}\n`;
|
|
821
|
+
if (result.current) {
|
|
822
|
+
output += `\nCurrent Test Results:\n`;
|
|
823
|
+
output += ` Passed: ${result.current.passed}\n`;
|
|
824
|
+
output += ` Failed: ${result.current.failed}\n`;
|
|
825
|
+
output += ` Total: ${result.current.total}\n`;
|
|
826
|
+
}
|
|
827
|
+
if (result.comparison) {
|
|
828
|
+
output += `\nComparison to Baseline:\n`;
|
|
829
|
+
if (result.comparison.passedDiff !== 0) {
|
|
830
|
+
output += ` Passed: ${result.comparison.passedDiff > 0 ? '+' : ''}${result.comparison.passedDiff}\n`;
|
|
831
|
+
}
|
|
832
|
+
if (result.comparison.failedDiff !== 0) {
|
|
833
|
+
output += ` Failed: ${result.comparison.failedDiff > 0 ? '+' : ''}${result.comparison.failedDiff}\n`;
|
|
834
|
+
}
|
|
835
|
+
if (result.comparison.newTests.length > 0) {
|
|
836
|
+
output += ` New tests: ${result.comparison.newTests.length}\n`;
|
|
837
|
+
}
|
|
838
|
+
if (result.comparison.removedTests.length > 0) {
|
|
839
|
+
output += ` Removed tests: ${result.comparison.removedTests.join(', ')}\n`;
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
return { content: [{ type: "text", text: output }] };
|
|
843
|
+
}
|
|
844
|
+
catch (err) {
|
|
845
|
+
return { content: [{ type: "text", text: `Error verifying refactor: ${err.message}` }] };
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
case "complete_refactor_step": {
|
|
849
|
+
const workspacePath = args?.workspace_path;
|
|
850
|
+
const stepId = args?.step_id;
|
|
851
|
+
if (!workspacePath || stepId === undefined) {
|
|
852
|
+
return { content: [{ type: "text", text: "Error: workspace_path and step_id are required" }] };
|
|
853
|
+
}
|
|
854
|
+
try {
|
|
855
|
+
const response = await fetch(`${CLAUDETTE_API}/api/refactor/step/complete`, {
|
|
856
|
+
method: "POST",
|
|
857
|
+
headers: { "Content-Type": "application/json" },
|
|
858
|
+
body: JSON.stringify({ path: workspacePath, stepId }),
|
|
859
|
+
});
|
|
860
|
+
const data = await response.json();
|
|
861
|
+
if (data.success) {
|
|
862
|
+
await logRefactorActivity(workspacePath, 'success', 'complete_step', `Step ${stepId} completed`, { stepId });
|
|
863
|
+
return { content: [{ type: "text", text: `Step ${stepId} marked as completed. Use get_next_refactor_step to see what's next.` }] };
|
|
864
|
+
}
|
|
865
|
+
else {
|
|
866
|
+
return { content: [{ type: "text", text: `Failed to complete step: ${data.error}` }] };
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
catch (err) {
|
|
870
|
+
return { content: [{ type: "text", text: `Error completing step: ${err.message}` }] };
|
|
871
|
+
}
|
|
872
|
+
}
|
|
873
|
+
case "get_next_refactor_step": {
|
|
874
|
+
const workspacePath = args?.workspace_path;
|
|
875
|
+
if (!workspacePath) {
|
|
876
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
877
|
+
}
|
|
878
|
+
try {
|
|
879
|
+
const response = await fetch(`${CLAUDETTE_API}/api/refactor/step/next?path=${encodeURIComponent(workspacePath)}`);
|
|
880
|
+
const data = await response.json();
|
|
881
|
+
if (!data.hasNext) {
|
|
882
|
+
await logRefactorActivity(workspacePath, 'success', 'all_complete', 'All refactoring steps completed!');
|
|
883
|
+
return { content: [{ type: "text", text: "All refactoring steps completed! Use complete_refactor to clean up the plan." }] };
|
|
884
|
+
}
|
|
885
|
+
const step = data.step;
|
|
886
|
+
// Log step info with file details
|
|
887
|
+
await logRefactorActivity(workspacePath, 'step', 'next_step', `Step ${step.id}: ${step.description}`, { stepId: step.id, file: step.file });
|
|
888
|
+
let output = `Next Refactoring Step\n${"=".repeat(50)}\n\n`;
|
|
889
|
+
output += `Step ${step.id}: ${step.type.toUpperCase()}\n`;
|
|
890
|
+
output += `Description: ${step.description}\n`;
|
|
891
|
+
if (step.file) {
|
|
892
|
+
output += `Source File: ${step.file}\n`;
|
|
893
|
+
}
|
|
894
|
+
if (step.targetFile) {
|
|
895
|
+
output += `Target File: ${step.targetFile}\n`;
|
|
896
|
+
}
|
|
897
|
+
if (step.functions && step.functions.length > 0) {
|
|
898
|
+
output += `Functions to extract:\n`;
|
|
899
|
+
for (const fn of step.functions) {
|
|
900
|
+
output += ` - ${fn}\n`;
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
output += `\n${"─".repeat(40)}\n`;
|
|
904
|
+
if (step.type === 'verify_tests') {
|
|
905
|
+
output += `ACTION: Run verify_refactor to check tests against baseline.`;
|
|
906
|
+
}
|
|
907
|
+
else if (step.type === 'extract_module') {
|
|
908
|
+
output += `ACTION: Extract the listed functions to the target file, then run verify_refactor.`;
|
|
909
|
+
}
|
|
910
|
+
else if (step.type === 'verify_build') {
|
|
911
|
+
output += `ACTION: Run build command and verify_refactor for final check.`;
|
|
912
|
+
}
|
|
913
|
+
output += `\nAfter completing, call complete_refactor_step with step_id=${step.id}`;
|
|
914
|
+
return { content: [{ type: "text", text: output }] };
|
|
915
|
+
}
|
|
916
|
+
catch (err) {
|
|
917
|
+
return { content: [{ type: "text", text: `Error getting next step: ${err.message}` }] };
|
|
918
|
+
}
|
|
919
|
+
}
|
|
920
|
+
case "run_tests": {
|
|
921
|
+
const workspacePath = args?.workspace_path;
|
|
922
|
+
const testFile = args?.test_file;
|
|
923
|
+
if (!workspacePath) {
|
|
924
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
925
|
+
}
|
|
926
|
+
try {
|
|
927
|
+
// Start the tests
|
|
928
|
+
const response = await fetch(`${CLAUDETTE_API}/api/jest/run`, {
|
|
929
|
+
method: "POST",
|
|
930
|
+
headers: { "Content-Type": "application/json" },
|
|
931
|
+
body: JSON.stringify({ path: workspacePath, testFile }),
|
|
932
|
+
});
|
|
933
|
+
const data = await response.json();
|
|
934
|
+
if (!data.success) {
|
|
935
|
+
return { content: [{ type: "text", text: `Failed to start tests: ${data.error}` }] };
|
|
936
|
+
}
|
|
937
|
+
// Wait for tests to complete (poll for results)
|
|
938
|
+
let attempts = 0;
|
|
939
|
+
const maxAttempts = 60; // 60 seconds max
|
|
940
|
+
while (attempts < maxAttempts) {
|
|
941
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
942
|
+
attempts++;
|
|
943
|
+
const statusRes = await fetch(`${CLAUDETTE_API}/api/jest/status?path=${encodeURIComponent(workspacePath)}`);
|
|
944
|
+
const status = await statusRes.json();
|
|
945
|
+
if (!status.isRunning && status.lastResult) {
|
|
946
|
+
const result = status.lastResult;
|
|
947
|
+
let output = `Test Results\n${"=".repeat(50)}\n\n`;
|
|
948
|
+
output += `Status: ${result.numFailedTests === 0 ? "✅ PASSED" : "❌ FAILED"}\n`;
|
|
949
|
+
output += `Suites: ${result.numPassedTestSuites}/${result.numTotalTestSuites} passed\n`;
|
|
950
|
+
output += `Tests: ${result.numPassedTests}/${result.numTotalTests} passed\n`;
|
|
951
|
+
if (result.numFailedTests > 0 && result.testResults) {
|
|
952
|
+
output += `\nFailed Tests:\n`;
|
|
953
|
+
for (const test of result.testResults) {
|
|
954
|
+
if (test.status === 'failed') {
|
|
955
|
+
output += `\n❌ ${test.name}\n`;
|
|
956
|
+
if (test.failureMessages) {
|
|
957
|
+
output += test.failureMessages.join('\n');
|
|
958
|
+
}
|
|
959
|
+
}
|
|
960
|
+
}
|
|
961
|
+
}
|
|
962
|
+
return { content: [{ type: "text", text: output }] };
|
|
963
|
+
}
|
|
964
|
+
}
|
|
965
|
+
return { content: [{ type: "text", text: "Tests are still running. Use get_test_results to check the results." }] };
|
|
966
|
+
}
|
|
967
|
+
catch (err) {
|
|
968
|
+
return { content: [{ type: "text", text: `Error running tests: ${err.message}` }] };
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
case "get_test_results": {
|
|
972
|
+
const workspacePath = args?.workspace_path;
|
|
973
|
+
if (!workspacePath) {
|
|
974
|
+
return { content: [{ type: "text", text: "Error: workspace_path is required" }] };
|
|
975
|
+
}
|
|
976
|
+
try {
|
|
977
|
+
const response = await fetch(`${CLAUDETTE_API}/api/jest/status?path=${encodeURIComponent(workspacePath)}`);
|
|
978
|
+
const status = await response.json();
|
|
979
|
+
if (status.isRunning) {
|
|
980
|
+
return { content: [{ type: "text", text: "Tests are currently running..." }] };
|
|
981
|
+
}
|
|
982
|
+
if (!status.lastResult) {
|
|
983
|
+
return { content: [{ type: "text", text: "No test results found. Run tests first with run_tests." }] };
|
|
984
|
+
}
|
|
985
|
+
const result = status.lastResult;
|
|
986
|
+
let output = `Test Results\n${"=".repeat(50)}\n\n`;
|
|
987
|
+
output += `Status: ${result.numFailedTests === 0 ? "✅ PASSED" : "❌ FAILED"}\n`;
|
|
988
|
+
output += `Suites: ${result.numPassedTestSuites}/${result.numTotalTestSuites} passed\n`;
|
|
989
|
+
output += `Tests: ${result.numPassedTests}/${result.numTotalTests} passed\n`;
|
|
990
|
+
if (result.testResults && result.testResults.length > 0) {
|
|
991
|
+
output += `\nTest Details:\n`;
|
|
992
|
+
for (const test of result.testResults) {
|
|
993
|
+
const icon = test.status === 'passed' ? '✅' : '❌';
|
|
994
|
+
output += `${icon} ${test.name} (${test.duration}ms)\n`;
|
|
995
|
+
if (test.status === 'failed' && test.failureMessages) {
|
|
996
|
+
output += ` ${test.failureMessages.join('\n ')}\n`;
|
|
997
|
+
}
|
|
998
|
+
}
|
|
999
|
+
}
|
|
1000
|
+
return { content: [{ type: "text", text: output }] };
|
|
1001
|
+
}
|
|
1002
|
+
catch (err) {
|
|
1003
|
+
return { content: [{ type: "text", text: `Error getting test results: ${err.message}` }] };
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
880
1006
|
default:
|
|
881
1007
|
return { content: [{ type: "text", text: `Unknown tool: ${name}` }] };
|
|
882
1008
|
}
|