@wonderwhy-er/desktop-commander 0.1.39 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +88 -5
- package/dist/config-manager.d.ts +2 -0
- package/dist/config-manager.js +3 -1
- package/dist/config.d.ts +1 -0
- package/dist/config.js +1 -0
- package/dist/handlers/filesystem-handlers.js +37 -3
- package/dist/handlers/fuzzy-search-log-handlers.d.ts +13 -0
- package/dist/handlers/fuzzy-search-log-handlers.js +179 -0
- package/dist/server.js +37 -19
- package/dist/setup-claude-server.js +0 -20
- package/dist/tools/edit.js +46 -5
- package/dist/tools/filesystem.d.ts +7 -5
- package/dist/tools/filesystem.js +52 -24
- package/dist/tools/schemas.d.ts +9 -0
- package/dist/tools/schemas.js +3 -0
- package/dist/utils/fuzzySearchLogger.d.ts +30 -0
- package/dist/utils/fuzzySearchLogger.js +126 -0
- package/dist/utils/trackTools.d.ts +6 -0
- package/dist/utils/trackTools.js +54 -0
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +7 -2
package/README.md
CHANGED
|
@@ -58,6 +58,10 @@ Execute long-running terminal commands on your computer and manage processes thr
|
|
|
58
58
|
- Multiple file support
|
|
59
59
|
- Pattern-based replacements
|
|
60
60
|
- vscode-ripgrep based recursive code or text search in folders
|
|
61
|
+
- Comprehensive audit logging:
|
|
62
|
+
- All tool calls are automatically logged
|
|
63
|
+
- Log rotation with 10MB size limit
|
|
64
|
+
- Detailed timestamps and arguments
|
|
61
65
|
|
|
62
66
|
## Installation
|
|
63
67
|
First, ensure you've downloaded and installed the [Claude Desktop app](https://claude.ai/download) and you have [npm installed](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm).
|
|
@@ -140,24 +144,24 @@ The server provides a comprehensive set of tools organized into several categori
|
|
|
140
144
|
|
|
141
145
|
| Category | Tool | Description |
|
|
142
146
|
|----------|------|-------------|
|
|
143
|
-
| **Configuration** | `get_config` | Get the complete server configuration as JSON (includes blockedCommands, defaultShell, allowedDirectories) |
|
|
144
|
-
| | `set_config_value` | Set a specific configuration value by key. Available settings: <br>• `blockedCommands`: Array of shell commands that cannot be executed<br>• `defaultShell`: Shell to use for commands (e.g., bash, zsh, powershell)<br>• `allowedDirectories`: Array of filesystem paths the server can access for file operations (⚠️ terminal commands can still access files outside these directories) |
|
|
147
|
+
| **Configuration** | `get_config` | Get the complete server configuration as JSON (includes blockedCommands, defaultShell, allowedDirectories, fileReadLineLimit, fileWriteLineLimit, telemetryEnabled) |
|
|
148
|
+
| | `set_config_value` | Set a specific configuration value by key. Available settings: <br>• `blockedCommands`: Array of shell commands that cannot be executed<br>• `defaultShell`: Shell to use for commands (e.g., bash, zsh, powershell)<br>• `allowedDirectories`: Array of filesystem paths the server can access for file operations (⚠️ terminal commands can still access files outside these directories)<br>• `fileReadLineLimit`: Maximum lines to read at once (default: 1000)<br>• `fileWriteLineLimit`: Maximum lines to write at once (default: 50)<br>• `telemetryEnabled`: Enable/disable telemetry (boolean) |
|
|
145
149
|
| **Terminal** | `execute_command` | Execute a terminal command with configurable timeout and shell selection |
|
|
146
150
|
| | `read_output` | Read new output from a running terminal session |
|
|
147
151
|
| | `force_terminate` | Force terminate a running terminal session |
|
|
148
152
|
| | `list_sessions` | List all active terminal sessions |
|
|
149
153
|
| | `list_processes` | List all running processes with detailed information |
|
|
150
154
|
| | `kill_process` | Terminate a running process by PID |
|
|
151
|
-
| **Filesystem** | `read_file` | Read contents from local filesystem or URLs (supports
|
|
155
|
+
| **Filesystem** | `read_file` | Read contents from local filesystem or URLs with line-based pagination (supports offset and length parameters) |
|
|
152
156
|
| | `read_multiple_files` | Read multiple files simultaneously |
|
|
153
|
-
| | `write_file` |
|
|
157
|
+
| | `write_file` | Write file contents with options for rewrite or append mode (uses configurable line limits) |
|
|
154
158
|
| | `create_directory` | Create a new directory or ensure it exists |
|
|
155
159
|
| | `list_directory` | Get detailed listing of files and directories |
|
|
156
160
|
| | `move_file` | Move or rename files and directories |
|
|
157
161
|
| | `search_files` | Find files by name using case-insensitive substring matching |
|
|
158
162
|
| | `search_code` | Search for text/code patterns within file contents using ripgrep |
|
|
159
163
|
| | `get_file_info` | Retrieve detailed metadata about a file or directory |
|
|
160
|
-
| **Text Editing** | `edit_block` | Apply
|
|
164
|
+
| **Text Editing** | `edit_block` | Apply targeted text replacements with enhanced prompting for smaller edits (includes character-level diff feedback) |
|
|
161
165
|
|
|
162
166
|
### Tool Usage Examples
|
|
163
167
|
|
|
@@ -181,6 +185,18 @@ console.log("new message");
|
|
|
181
185
|
>>>>>>> REPLACE
|
|
182
186
|
```
|
|
183
187
|
|
|
188
|
+
### Enhanced Edit Block Features
|
|
189
|
+
|
|
190
|
+
The `edit_block` tool includes several enhancements for better reliability:
|
|
191
|
+
|
|
192
|
+
1. **Improved Prompting**: Tool descriptions now emphasize making multiple small, focused edits rather than one large change
|
|
193
|
+
2. **Fuzzy Search Fallback**: When exact matches fail, it performs fuzzy search and provides detailed feedback
|
|
194
|
+
3. **Character-level Diffs**: Shows exactly what's different using `{-removed-}{+added+}` format
|
|
195
|
+
4. **Multiple Occurrence Support**: Can replace multiple instances with `expected_replacements` parameter
|
|
196
|
+
5. **Comprehensive Logging**: All fuzzy searches are logged for analysis and debugging
|
|
197
|
+
|
|
198
|
+
When a search fails, you'll see detailed information about the closest match found, including similarity percentage, execution time, and character differences. All these details are automatically logged for later analysis using the fuzzy search log tools.
|
|
199
|
+
|
|
184
200
|
### URL Support
|
|
185
201
|
- `read_file` can now fetch content from both local files and URLs
|
|
186
202
|
- Example: `read_file` with `isUrl: true` parameter to read from web resources
|
|
@@ -189,6 +205,69 @@ console.log("new message");
|
|
|
189
205
|
- Claude can see and analyze the actual image content
|
|
190
206
|
- Default 30-second timeout for URL requests
|
|
191
207
|
|
|
208
|
+
## Fuzzy Search Log Analysis (npm scripts)
|
|
209
|
+
|
|
210
|
+
The fuzzy search logging system includes convenient npm scripts for analyzing logs outside of the MCP environment:
|
|
211
|
+
|
|
212
|
+
```bash
|
|
213
|
+
# View recent fuzzy search logs
|
|
214
|
+
npm run logs:view -- --count 20
|
|
215
|
+
|
|
216
|
+
# Analyze patterns and performance
|
|
217
|
+
npm run logs:analyze -- --threshold 0.8
|
|
218
|
+
|
|
219
|
+
# Export logs to CSV or JSON
|
|
220
|
+
npm run logs:export -- --format json --output analysis.json
|
|
221
|
+
|
|
222
|
+
# Clear all logs (with confirmation)
|
|
223
|
+
npm run logs:clear
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
For detailed documentation on these scripts, see [scripts/README.md](scripts/README.md).
|
|
227
|
+
|
|
228
|
+
## Fuzzy Search Logs
|
|
229
|
+
|
|
230
|
+
Desktop Commander includes comprehensive logging for fuzzy search operations in the `edit_block` tool. When an exact match isn't found, the system performs a fuzzy search and logs detailed information for analysis.
|
|
231
|
+
|
|
232
|
+
### What Gets Logged
|
|
233
|
+
|
|
234
|
+
Every fuzzy search operation logs:
|
|
235
|
+
- **Search and found text**: The text you're looking for vs. what was found
|
|
236
|
+
- **Similarity score**: How close the match is (0-100%)
|
|
237
|
+
- **Execution time**: How long the search took
|
|
238
|
+
- **Character differences**: Detailed diff showing exactly what's different
|
|
239
|
+
- **File metadata**: Extension, search/found text lengths
|
|
240
|
+
- **Character codes**: Specific character codes causing differences
|
|
241
|
+
|
|
242
|
+
### Log Location
|
|
243
|
+
|
|
244
|
+
Logs are automatically saved to:
|
|
245
|
+
- **macOS/Linux**: `~/.claude-server-commander-logs/fuzzy-search.log`
|
|
246
|
+
- **Windows**: `%USERPROFILE%\.claude-server-commander-logs\fuzzy-search.log`
|
|
247
|
+
|
|
248
|
+
### What You'll Learn
|
|
249
|
+
|
|
250
|
+
The fuzzy search logs help you understand:
|
|
251
|
+
1. **Why exact matches fail**: Common issues like whitespace differences, line endings, or character encoding
|
|
252
|
+
2. **Performance patterns**: How search complexity affects execution time
|
|
253
|
+
3. **File type issues**: Which file extensions commonly have matching problems
|
|
254
|
+
4. **Character encoding problems**: Specific character codes that cause diffs
|
|
255
|
+
|
|
256
|
+
## Audit Logging
|
|
257
|
+
|
|
258
|
+
Desktop Commander now includes comprehensive logging for all tool calls:
|
|
259
|
+
|
|
260
|
+
### What Gets Logged
|
|
261
|
+
- Every tool call is logged with timestamp, tool name, and arguments (sanitized for privacy)
|
|
262
|
+
- Logs are rotated automatically when they reach 10MB in size
|
|
263
|
+
|
|
264
|
+
### Log Location
|
|
265
|
+
Logs are saved to:
|
|
266
|
+
- **macOS/Linux**: `~/.claude-server-commander/claude_tool_call.log`
|
|
267
|
+
- **Windows**: `%USERPROFILE%\.claude-server-commander\claude_tool_call.log`
|
|
268
|
+
|
|
269
|
+
This audit trail helps with debugging, security monitoring, and understanding how Claude is interacting with your system.
|
|
270
|
+
|
|
192
271
|
## Handling Long-Running Commands
|
|
193
272
|
|
|
194
273
|
For commands that may take a while:
|
|
@@ -297,6 +376,8 @@ This project extends the MCP Filesystem Server to enable:
|
|
|
297
376
|
Created as part of exploring Claude MCPs: https://youtube.com/live/TlbjFDbl5Us
|
|
298
377
|
|
|
299
378
|
## DONE
|
|
379
|
+
- **20-05-2025 v0.1.40 Release** - Added audit logging for all tool calls, improved line-based file operations, enhanced edit_block with better prompting for smaller edits, added explicit telemetry opt-out prompting
|
|
380
|
+
- **05-05-2025 Fuzzy Search Logging** - Added comprehensive logging system for fuzzy search operations with detailed analysis tools, character-level diffs, and performance metrics to help debug edit_block failures
|
|
300
381
|
- **29-04-2025 Telemetry Opt Out through configuration** - There is now setting to disable telemetry in config, ask in chat
|
|
301
382
|
- **23-04-2025 Enhanced edit functionality** - Improved format, added fuzzy search and multi-occurrence replacements, should fail less and use edit block more often
|
|
302
383
|
- **16-04-2025 Better configurations** - Improved settings for allowed paths, commands and shell environments
|
|
@@ -333,11 +414,13 @@ The following features are currently being explored:
|
|
|
333
414
|
<ul style="list-style-type: none; padding: 0;">
|
|
334
415
|
<li>🌟 <a href="https://github.com/sponsors/wonderwhy-er"><strong>GitHub Sponsors</strong></a> - Recurring support</li>
|
|
335
416
|
<li>☕ <a href="https://www.buymeacoffee.com/wonderwhyer"><strong>Buy Me A Coffee</strong></a> - One-time contributions</li>
|
|
417
|
+
<li>💖 <a href="https://www.patreon.com/c/EduardsRuzga"><strong>Patreon</strong></a> - Become a patron and support us monthly</li>
|
|
336
418
|
<li>⭐ <a href="https://github.com/wonderwhy-er/DesktopCommanderMCP"><strong>Star on GitHub</strong></a> - Help others discover the project</li>
|
|
337
419
|
</ul>
|
|
338
420
|
</div>
|
|
339
421
|
</div>
|
|
340
422
|
|
|
423
|
+
|
|
341
424
|
### Supporters Hall of Fame
|
|
342
425
|
|
|
343
426
|
Generous supporters are featured here. Thank you for helping make this project possible!
|
package/dist/config-manager.d.ts
CHANGED
package/dist/config-manager.js
CHANGED
|
@@ -101,7 +101,9 @@ class ConfigManager {
|
|
|
101
101
|
],
|
|
102
102
|
defaultShell: os.platform() === 'win32' ? 'powershell.exe' : 'bash',
|
|
103
103
|
allowedDirectories: [],
|
|
104
|
-
telemetryEnabled: true // Default to opt-out approach (telemetry on by default)
|
|
104
|
+
telemetryEnabled: true, // Default to opt-out approach (telemetry on by default)
|
|
105
|
+
fileWriteLineLimit: 50, // Default line limit for file write operations (changed from 100)
|
|
106
|
+
fileReadLineLimit: 1000 // Default line limit for file read operations (changed from character-based)
|
|
105
107
|
};
|
|
106
108
|
}
|
|
107
109
|
/**
|
package/dist/config.d.ts
CHANGED
package/dist/config.js
CHANGED
|
@@ -6,4 +6,5 @@ const CONFIG_DIR = path.join(USER_HOME, '.claude-server-commander');
|
|
|
6
6
|
// Paths relative to the config directory
|
|
7
7
|
export const CONFIG_FILE = path.join(CONFIG_DIR, 'config.json');
|
|
8
8
|
export const TOOL_CALL_FILE = path.join(CONFIG_DIR, 'claude_tool_call.log');
|
|
9
|
+
export const TOOL_CALL_FILE_MAX_SIZE = 1024 * 1024 * 10; // 10 MB
|
|
9
10
|
export const DEFAULT_COMMAND_TIMEOUT = 1000; // milliseconds
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { readFile, readMultipleFiles, writeFile, createDirectory, listDirectory, moveFile, searchFiles, getFileInfo } from '../tools/filesystem.js';
|
|
2
2
|
import { withTimeout } from '../utils/withTimeout.js';
|
|
3
3
|
import { createErrorResponse } from '../error-handlers.js';
|
|
4
|
+
import { configManager } from '../config-manager.js';
|
|
4
5
|
import { ReadFileArgsSchema, ReadMultipleFilesArgsSchema, WriteFileArgsSchema, CreateDirectoryArgsSchema, ListDirectoryArgsSchema, MoveFileArgsSchema, SearchFilesArgsSchema, GetFileInfoArgsSchema } from '../tools/schemas.js';
|
|
5
6
|
/**
|
|
6
7
|
* Helper function to check if path contains an error
|
|
@@ -19,9 +20,22 @@ function getErrorFromPath(path) {
|
|
|
19
20
|
*/
|
|
20
21
|
export async function handleReadFile(args) {
|
|
21
22
|
const HANDLER_TIMEOUT = 60000; // 60 seconds total operation timeout
|
|
23
|
+
// Add input validation
|
|
24
|
+
if (args === null || args === undefined) {
|
|
25
|
+
return createErrorResponse('No arguments provided for read_file command');
|
|
26
|
+
}
|
|
22
27
|
const readFileOperation = async () => {
|
|
23
28
|
const parsed = ReadFileArgsSchema.parse(args);
|
|
24
|
-
|
|
29
|
+
// Get the configuration for file read limits
|
|
30
|
+
const config = await configManager.getConfig();
|
|
31
|
+
if (!config) {
|
|
32
|
+
return createErrorResponse('Configuration not available');
|
|
33
|
+
}
|
|
34
|
+
const defaultLimit = config.fileReadLineLimit ?? 1000;
|
|
35
|
+
// Use the provided limits or defaults
|
|
36
|
+
const offset = parsed.offset ?? 0;
|
|
37
|
+
const length = parsed.length ?? defaultLimit;
|
|
38
|
+
const fileResult = await readFile(parsed.path, parsed.isUrl, offset, length);
|
|
25
39
|
if (fileResult.isImage) {
|
|
26
40
|
// For image files, return as an image content type
|
|
27
41
|
return {
|
|
@@ -103,9 +117,29 @@ export async function handleReadMultipleFiles(args) {
|
|
|
103
117
|
export async function handleWriteFile(args) {
|
|
104
118
|
try {
|
|
105
119
|
const parsed = WriteFileArgsSchema.parse(args);
|
|
106
|
-
|
|
120
|
+
// Get the line limit from configuration
|
|
121
|
+
const config = await configManager.getConfig();
|
|
122
|
+
const MAX_LINES = config.fileWriteLineLimit ?? 50; // Default to 50 if not set
|
|
123
|
+
// Strictly enforce line count limit
|
|
124
|
+
const lines = parsed.content.split('\n');
|
|
125
|
+
const lineCount = lines.length;
|
|
126
|
+
let errorMessage = "";
|
|
127
|
+
if (lineCount > MAX_LINES) {
|
|
128
|
+
errorMessage = `File was written with warning: Line count limit exceeded: ${lineCount} lines (maximum: ${MAX_LINES}).
|
|
129
|
+
|
|
130
|
+
SOLUTION: Split your content into smaller chunks:
|
|
131
|
+
1. First chunk: write_file(path, firstChunk, {mode: 'rewrite'})
|
|
132
|
+
2. Additional chunks: write_file(path, nextChunk, {mode: 'append'})`;
|
|
133
|
+
}
|
|
134
|
+
// Pass the mode parameter to writeFile
|
|
135
|
+
await writeFile(parsed.path, parsed.content, parsed.mode);
|
|
136
|
+
// Provide more informative message based on mode
|
|
137
|
+
const modeMessage = parsed.mode === 'append' ? 'appended to' : 'wrote to';
|
|
107
138
|
return {
|
|
108
|
-
content: [{
|
|
139
|
+
content: [{
|
|
140
|
+
type: "text",
|
|
141
|
+
text: `Successfully ${modeMessage} ${parsed.path} (${lineCount} lines) ${errorMessage}`
|
|
142
|
+
}],
|
|
109
143
|
};
|
|
110
144
|
}
|
|
111
145
|
catch (error) {
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { ServerResult } from '../types.js';
|
|
2
|
+
/**
|
|
3
|
+
* View recent fuzzy search logs
|
|
4
|
+
*/
|
|
5
|
+
export declare function handleViewFuzzySearchLogs(args: unknown): Promise<ServerResult>;
|
|
6
|
+
/**
|
|
7
|
+
* Analyze fuzzy search logs to identify patterns and issues
|
|
8
|
+
*/
|
|
9
|
+
export declare function handleAnalyzeFuzzySearchLogs(args: unknown): Promise<ServerResult>;
|
|
10
|
+
/**
|
|
11
|
+
* Clear fuzzy search logs
|
|
12
|
+
*/
|
|
13
|
+
export declare function handleClearFuzzySearchLogs(args: unknown): Promise<ServerResult>;
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
import { fuzzySearchLogger } from '../utils/fuzzySearchLogger.js';
|
|
2
|
+
import { createErrorResponse } from '../error-handlers.js';
|
|
3
|
+
import { ViewFuzzySearchLogsArgsSchema, AnalyzeFuzzySearchLogsArgsSchema, ClearFuzzySearchLogsArgsSchema } from '../tools/schemas.js';
|
|
4
|
+
/**
|
|
5
|
+
* View recent fuzzy search logs
|
|
6
|
+
*/
|
|
7
|
+
export async function handleViewFuzzySearchLogs(args) {
|
|
8
|
+
try {
|
|
9
|
+
const parsed = ViewFuzzySearchLogsArgsSchema.parse(args);
|
|
10
|
+
const logs = await fuzzySearchLogger.getRecentLogs(parsed.count);
|
|
11
|
+
const logPath = await fuzzySearchLogger.getLogPath();
|
|
12
|
+
if (logs.length === 0) {
|
|
13
|
+
return {
|
|
14
|
+
content: [{
|
|
15
|
+
type: "text",
|
|
16
|
+
text: `No fuzzy search logs found. Log file location: ${logPath}`
|
|
17
|
+
}],
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
// Parse and format logs for better readability
|
|
21
|
+
const formattedLogs = logs.map((log, index) => {
|
|
22
|
+
const parts = log.split('\t');
|
|
23
|
+
if (parts.length >= 16) {
|
|
24
|
+
const [timestamp, searchText, foundText, similarity, executionTime, exactMatchCount, expectedReplacements, fuzzyThreshold, belowThreshold, diff, searchLength, foundLength, fileExtension, characterCodes, uniqueCharacterCount, diffLength] = parts;
|
|
25
|
+
return `
|
|
26
|
+
--- Log Entry ${index + 1} ---
|
|
27
|
+
Timestamp: ${timestamp}
|
|
28
|
+
File Extension: ${fileExtension}
|
|
29
|
+
Search Text: ${searchText.replace(/\\n/g, '\n').replace(/\\t/g, '\t')}
|
|
30
|
+
Found Text: ${foundText.replace(/\\n/g, '\n').replace(/\\t/g, '\t')}
|
|
31
|
+
Similarity: ${(parseFloat(similarity) * 100).toFixed(2)}%
|
|
32
|
+
Execution Time: ${parseFloat(executionTime).toFixed(2)}ms
|
|
33
|
+
Exact Match Count: ${exactMatchCount}
|
|
34
|
+
Expected Replacements: ${expectedReplacements}
|
|
35
|
+
Below Threshold: ${belowThreshold}
|
|
36
|
+
Diff: ${diff.replace(/\\n/g, '\n').replace(/\\t/g, '\t')}
|
|
37
|
+
Search Length: ${searchLength}
|
|
38
|
+
Found Length: ${foundLength}
|
|
39
|
+
Character Codes: ${characterCodes}
|
|
40
|
+
Unique Characters: ${uniqueCharacterCount}
|
|
41
|
+
Diff Length: ${diffLength}
|
|
42
|
+
`;
|
|
43
|
+
}
|
|
44
|
+
return `Malformed log entry: ${log}`;
|
|
45
|
+
}).join('\n');
|
|
46
|
+
return {
|
|
47
|
+
content: [{
|
|
48
|
+
type: "text",
|
|
49
|
+
text: `Recent Fuzzy Search Logs (${logs.length} entries):\n\n${formattedLogs}\n\nLog file location: ${logPath}`
|
|
50
|
+
}],
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
catch (error) {
|
|
54
|
+
return createErrorResponse(`Failed to view fuzzy search logs: ${error instanceof Error ? error.message : String(error)}`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Analyze fuzzy search logs to identify patterns and issues
|
|
59
|
+
*/
|
|
60
|
+
export async function handleAnalyzeFuzzySearchLogs(args) {
|
|
61
|
+
try {
|
|
62
|
+
const parsed = AnalyzeFuzzySearchLogsArgsSchema.parse(args);
|
|
63
|
+
const logs = await fuzzySearchLogger.getRecentLogs(100); // Analyze more logs
|
|
64
|
+
const logPath = await fuzzySearchLogger.getLogPath();
|
|
65
|
+
if (logs.length === 0) {
|
|
66
|
+
return {
|
|
67
|
+
content: [{
|
|
68
|
+
type: "text",
|
|
69
|
+
text: `No fuzzy search logs found. Log file location: ${logPath}`
|
|
70
|
+
}],
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
// Parse logs and gather statistics
|
|
74
|
+
let totalEntries = 0;
|
|
75
|
+
let exactMatches = 0;
|
|
76
|
+
let fuzzyMatches = 0;
|
|
77
|
+
let failures = 0;
|
|
78
|
+
let belowThresholdCount = 0;
|
|
79
|
+
const executionTimes = [];
|
|
80
|
+
const similarities = [];
|
|
81
|
+
const fileExtensions = new Map();
|
|
82
|
+
const commonCharacterCodes = new Map();
|
|
83
|
+
for (const log of logs) {
|
|
84
|
+
const parts = log.split('\t');
|
|
85
|
+
if (parts.length >= 16) {
|
|
86
|
+
totalEntries++;
|
|
87
|
+
const [timestamp, searchText, foundText, similarity, executionTime, exactMatchCount, expectedReplacements, fuzzyThreshold, belowThreshold, diff, searchLength, foundLength, fileExtension, characterCodes, uniqueCharacterCount, diffLength] = parts;
|
|
88
|
+
const simValue = parseFloat(similarity);
|
|
89
|
+
const execTime = parseFloat(executionTime);
|
|
90
|
+
const exactCount = parseInt(exactMatchCount);
|
|
91
|
+
const belowThresh = belowThreshold === 'true';
|
|
92
|
+
if (exactCount > 0) {
|
|
93
|
+
exactMatches++;
|
|
94
|
+
}
|
|
95
|
+
else if (simValue >= parsed.failureThreshold) {
|
|
96
|
+
fuzzyMatches++;
|
|
97
|
+
}
|
|
98
|
+
else {
|
|
99
|
+
failures++;
|
|
100
|
+
}
|
|
101
|
+
if (belowThresh) {
|
|
102
|
+
belowThresholdCount++;
|
|
103
|
+
}
|
|
104
|
+
executionTimes.push(execTime);
|
|
105
|
+
similarities.push(simValue);
|
|
106
|
+
// Track file extensions
|
|
107
|
+
fileExtensions.set(fileExtension, (fileExtensions.get(fileExtension) || 0) + 1);
|
|
108
|
+
// Track character codes that appear in diffs
|
|
109
|
+
if (characterCodes && characterCodes !== '') {
|
|
110
|
+
const codes = characterCodes.split(',');
|
|
111
|
+
for (const code of codes) {
|
|
112
|
+
const key = code.split(':')[0];
|
|
113
|
+
commonCharacterCodes.set(key, (commonCharacterCodes.get(key) || 0) + 1);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
// Calculate statistics
|
|
119
|
+
const avgExecutionTime = executionTimes.reduce((a, b) => a + b, 0) / executionTimes.length;
|
|
120
|
+
const avgSimilarity = similarities.reduce((a, b) => a + b, 0) / similarities.length;
|
|
121
|
+
// Sort by frequency
|
|
122
|
+
const sortedExtensions = Array.from(fileExtensions.entries()).sort((a, b) => b[1] - a[1]);
|
|
123
|
+
const sortedCharCodes = Array.from(commonCharacterCodes.entries()).sort((a, b) => b[1] - a[1]);
|
|
124
|
+
const analysis = `
|
|
125
|
+
=== Fuzzy Search Analysis ===
|
|
126
|
+
|
|
127
|
+
Total Entries: ${totalEntries}
|
|
128
|
+
Exact Matches: ${exactMatches} (${((exactMatches / totalEntries) * 100).toFixed(2)}%)
|
|
129
|
+
Fuzzy Matches: ${fuzzyMatches} (${((fuzzyMatches / totalEntries) * 100).toFixed(2)}%)
|
|
130
|
+
Failures: ${failures} (${((failures / totalEntries) * 100).toFixed(2)}%)
|
|
131
|
+
Below Threshold: ${belowThresholdCount} (${((belowThresholdCount / totalEntries) * 100).toFixed(2)}%)
|
|
132
|
+
|
|
133
|
+
Performance:
|
|
134
|
+
Average Execution Time: ${avgExecutionTime.toFixed(2)}ms
|
|
135
|
+
Average Similarity: ${(avgSimilarity * 100).toFixed(2)}%
|
|
136
|
+
|
|
137
|
+
File Extensions (Top 5):
|
|
138
|
+
${sortedExtensions.slice(0, 5).map(([ext, count]) => `${ext || 'none'}: ${count} times`).join('\n')}
|
|
139
|
+
|
|
140
|
+
Common Character Codes in Diffs (Top 5):
|
|
141
|
+
${sortedCharCodes.slice(0, 5).map(([code, count]) => {
|
|
142
|
+
const charCode = parseInt(code);
|
|
143
|
+
const char = String.fromCharCode(charCode);
|
|
144
|
+
const display = charCode < 32 || charCode > 126 ? `\\x${charCode.toString(16).padStart(2, '0')}` : char;
|
|
145
|
+
return `${code} [${display}]: ${count} times`;
|
|
146
|
+
}).join('\n')}
|
|
147
|
+
|
|
148
|
+
Log file location: ${logPath}
|
|
149
|
+
`;
|
|
150
|
+
return {
|
|
151
|
+
content: [{
|
|
152
|
+
type: "text",
|
|
153
|
+
text: analysis
|
|
154
|
+
}],
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
catch (error) {
|
|
158
|
+
return createErrorResponse(`Failed to analyze fuzzy search logs: ${error instanceof Error ? error.message : String(error)}`);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
* Clear fuzzy search logs
|
|
163
|
+
*/
|
|
164
|
+
export async function handleClearFuzzySearchLogs(args) {
|
|
165
|
+
try {
|
|
166
|
+
ClearFuzzySearchLogsArgsSchema.parse(args);
|
|
167
|
+
await fuzzySearchLogger.clearLog();
|
|
168
|
+
const logPath = await fuzzySearchLogger.getLogPath();
|
|
169
|
+
return {
|
|
170
|
+
content: [{
|
|
171
|
+
type: "text",
|
|
172
|
+
text: `Fuzzy search logs cleared. Log file location: ${logPath}`
|
|
173
|
+
}],
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
catch (error) {
|
|
177
|
+
return createErrorResponse(`Failed to clear fuzzy search logs: ${error instanceof Error ? error.message : String(error)}`);
|
|
178
|
+
}
|
|
179
|
+
}
|
package/dist/server.js
CHANGED
|
@@ -3,8 +3,10 @@ import { CallToolRequestSchema, ListToolsRequestSchema, ListResourcesRequestSche
|
|
|
3
3
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
4
4
|
// Shared constants for tool descriptions
|
|
5
5
|
const PATH_GUIDANCE = `IMPORTANT: Always use absolute paths (starting with '/' or drive letter like 'C:\\') for reliability. Relative paths may fail as they depend on the current working directory. Tilde paths (~/...) might not work in all contexts. Unless the user explicitly asks for relative paths, use absolute paths.`;
|
|
6
|
+
const CMD_PREFIX_DESCRIPTION = `This command can be referenced as "DC: ..." or "use Desktop Commander to ..." in your instructions.`;
|
|
6
7
|
import { ExecuteCommandArgsSchema, ReadOutputArgsSchema, ForceTerminateArgsSchema, ListSessionsArgsSchema, KillProcessArgsSchema, ReadFileArgsSchema, ReadMultipleFilesArgsSchema, WriteFileArgsSchema, CreateDirectoryArgsSchema, ListDirectoryArgsSchema, MoveFileArgsSchema, SearchFilesArgsSchema, GetFileInfoArgsSchema, SearchCodeArgsSchema, GetConfigArgsSchema, SetConfigValueArgsSchema, ListProcessesArgsSchema, EditBlockArgsSchema, } from './tools/schemas.js';
|
|
7
8
|
import { getConfig, setConfigValue } from './tools/config.js';
|
|
9
|
+
import { trackToolCall } from './utils/trackTools.js';
|
|
8
10
|
import { VERSION } from './version.js';
|
|
9
11
|
import { capture } from "./utils/capture.js";
|
|
10
12
|
console.error("Loading server.ts");
|
|
@@ -41,45 +43,58 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
41
43
|
// Configuration tools
|
|
42
44
|
{
|
|
43
45
|
name: "get_config",
|
|
44
|
-
description:
|
|
46
|
+
description: `Get the complete server configuration as JSON. Config includes fields for: blockedCommands (array of blocked shell commands), defaultShell (shell to use for commands), allowedDirectories (paths the server can access), fileReadLineLimit (max lines for read_file, default 1000), fileWriteLineLimit (max lines per write_file call, default 50), telemetryEnabled (boolean for telemetry opt-in/out). ${CMD_PREFIX_DESCRIPTION}`,
|
|
45
47
|
inputSchema: zodToJsonSchema(GetConfigArgsSchema),
|
|
46
48
|
},
|
|
47
49
|
{
|
|
48
50
|
name: "set_config_value",
|
|
49
|
-
description:
|
|
51
|
+
description: `Set a specific configuration value by key. WARNING: Should be used in a separate chat from file operations and command execution to prevent security issues. Config keys include: blockedCommands (array), defaultShell (string), allowedDirectories (array of paths), fileReadLineLimit (number, max lines for read_file), fileWriteLineLimit (number, max lines per write_file call), telemetryEnabled (boolean). IMPORTANT: Setting allowedDirectories to an empty array ([]) allows full access to the entire file system, regardless of the operating system. ${CMD_PREFIX_DESCRIPTION}`,
|
|
50
52
|
inputSchema: zodToJsonSchema(SetConfigValueArgsSchema),
|
|
51
53
|
},
|
|
52
54
|
// Filesystem tools
|
|
53
55
|
{
|
|
54
56
|
name: "read_file",
|
|
55
|
-
description: `Read the
|
|
57
|
+
description: `Read the contents of a file from the file system or a URL with optional offset and length parameters. Prefer this over 'execute_command' with cat/type for viewing files. Supports partial file reading with 'offset' (start line, default: 0) and 'length' (max lines to read, default: configurable via 'fileReadLineLimit' setting, initially 1000). When reading from the file system, only works within allowed directories. Can fetch content from URLs when isUrl parameter is set to true (URLs are always read in full regardless of offset/length). Handles text files normally and image files are returned as viewable images. Recognized image types: PNG, JPEG, GIF, WebP. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
56
58
|
inputSchema: zodToJsonSchema(ReadFileArgsSchema),
|
|
57
59
|
},
|
|
58
60
|
{
|
|
59
61
|
name: "read_multiple_files",
|
|
60
|
-
description: `Read the contents of multiple files simultaneously. Each file's content is returned with its path as a reference. Handles text files normally and renders images as viewable content. Recognized image types: PNG, JPEG, GIF, WebP. Failed reads for individual files won't stop the entire operation. Only works within allowed directories. ${PATH_GUIDANCE}`,
|
|
62
|
+
description: `Read the contents of multiple files simultaneously. Each file's content is returned with its path as a reference. Handles text files normally and renders images as viewable content. Recognized image types: PNG, JPEG, GIF, WebP. Failed reads for individual files won't stop the entire operation. Only works within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
61
63
|
inputSchema: zodToJsonSchema(ReadMultipleFilesArgsSchema),
|
|
62
64
|
},
|
|
63
65
|
{
|
|
64
66
|
name: "write_file",
|
|
65
|
-
description: `
|
|
67
|
+
description: `Write or append to file contents with a configurable line limit per call (default: 50 lines). THIS IS A STRICT REQUIREMENT. ANY file with more than the configured limit MUST BE written in chunks or IT WILL FAIL.
|
|
68
|
+
|
|
69
|
+
NEVER attempt to write more than the configured line limit at once.
|
|
70
|
+
|
|
71
|
+
REQUIRED PROCESS FOR LARGE FILES:
|
|
72
|
+
1. FIRST → write_file(filePath, firstChunk, {mode: 'rewrite'})
|
|
73
|
+
2. THEN → write_file(filePath, secondChunk, {mode: 'append'})
|
|
74
|
+
3. THEN → write_file(filePath, thirdChunk, {mode: 'append'})
|
|
75
|
+
... and so on for each chunk
|
|
76
|
+
|
|
77
|
+
If asked to continue writing do not restart from beginning, read end of file to see where you stopped and continue from there
|
|
78
|
+
|
|
79
|
+
Files over the line limit (configurable via 'fileWriteLineLimit' setting) WILL BE REJECTED if not broken into chunks as described above.
|
|
80
|
+
Only works within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
66
81
|
inputSchema: zodToJsonSchema(WriteFileArgsSchema),
|
|
67
82
|
},
|
|
68
83
|
{
|
|
69
84
|
name: "create_directory",
|
|
70
|
-
description: `Create a new directory or ensure a directory exists. Can create multiple nested directories in one operation. Only works within allowed directories. ${PATH_GUIDANCE}`,
|
|
85
|
+
description: `Create a new directory or ensure a directory exists. Can create multiple nested directories in one operation. Only works within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
71
86
|
inputSchema: zodToJsonSchema(CreateDirectoryArgsSchema),
|
|
72
87
|
},
|
|
73
88
|
{
|
|
74
89
|
name: "list_directory",
|
|
75
|
-
description: `Get a detailed listing of all files and directories in a specified path. Use this instead of 'execute_command' with ls/dir commands. Results distinguish between files and directories with [FILE] and [DIR] prefixes. Only works within allowed directories. ${PATH_GUIDANCE}`,
|
|
90
|
+
description: `Get a detailed listing of all files and directories in a specified path. Use this instead of 'execute_command' with ls/dir commands. Results distinguish between files and directories with [FILE] and [DIR] prefixes. Only works within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
76
91
|
inputSchema: zodToJsonSchema(ListDirectoryArgsSchema),
|
|
77
92
|
},
|
|
78
93
|
{
|
|
79
94
|
name: "move_file",
|
|
80
95
|
description: `Move or rename files and directories.
|
|
81
96
|
Can move files between directories and rename them in a single operation.
|
|
82
|
-
Both source and destination must be within allowed directories. ${PATH_GUIDANCE}`,
|
|
97
|
+
Both source and destination must be within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
83
98
|
inputSchema: zodToJsonSchema(MoveFileArgsSchema),
|
|
84
99
|
},
|
|
85
100
|
{
|
|
@@ -88,7 +103,7 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
88
103
|
Use this instead of 'execute_command' with find/dir/ls for locating files.
|
|
89
104
|
Searches through all subdirectories from the starting path.
|
|
90
105
|
Has a default timeout of 30 seconds which can be customized using the timeoutMs parameter.
|
|
91
|
-
Only searches within allowed directories. ${PATH_GUIDANCE}`,
|
|
106
|
+
Only searches within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
92
107
|
inputSchema: zodToJsonSchema(SearchFilesArgsSchema),
|
|
93
108
|
},
|
|
94
109
|
{
|
|
@@ -99,14 +114,14 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
99
114
|
Supports regular expressions, file pattern filtering, and context lines.
|
|
100
115
|
Has a default timeout of 30 seconds which can be customized.
|
|
101
116
|
Only searches within allowed directories.
|
|
102
|
-
${PATH_GUIDANCE}`,
|
|
117
|
+
${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
103
118
|
inputSchema: zodToJsonSchema(SearchCodeArgsSchema),
|
|
104
119
|
},
|
|
105
120
|
{
|
|
106
121
|
name: "get_file_info",
|
|
107
122
|
description: `Retrieve detailed metadata about a file or directory including size, creation time, last modified time,
|
|
108
123
|
permissions, and type.
|
|
109
|
-
Only works within allowed directories. ${PATH_GUIDANCE}`,
|
|
124
|
+
Only works within allowed directories. ${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
110
125
|
inputSchema: zodToJsonSchema(GetFileInfoArgsSchema),
|
|
111
126
|
},
|
|
112
127
|
// Note: list_allowed_directories removed - use get_config to check allowedDirectories
|
|
@@ -121,8 +136,9 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
121
136
|
To replace multiple occurrences, provide the expected_replacements parameter with the exact number of matches expected.
|
|
122
137
|
UNIQUENESS REQUIREMENT: When expected_replacements=1 (default), include the minimal amount of context necessary (typically 1-3 lines) before and after the change point, with exact whitespace and indentation.
|
|
123
138
|
When editing multiple sections, make separate edit_block calls for each distinct change rather than one large replacement.
|
|
124
|
-
When a close but non-exact match is found, a character-level diff is shown in the format: common_prefix{-removed-}{+added+}common_suffix to help you identify what's different.
|
|
125
|
-
|
|
139
|
+
When a close but non-exact match is found, a character-level diff is shown in the format: common_prefix{-removed-}{+added+}common_suffix to help you identify what's different.
|
|
140
|
+
Similar to write_file, there is a configurable line limit (fileWriteLineLimit) that warns if the edited file exceeds this limit. If this happens, consider breaking your edits into smaller, more focused changes.
|
|
141
|
+
${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
126
142
|
inputSchema: zodToJsonSchema(EditBlockArgsSchema),
|
|
127
143
|
},
|
|
128
144
|
// Terminal tools
|
|
@@ -131,32 +147,32 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
131
147
|
description: `Execute a terminal command with timeout.
|
|
132
148
|
Command will continue running in background if it doesn't complete within timeout.
|
|
133
149
|
NOTE: For file operations, prefer specialized tools like read_file, search_code, list_directory instead of cat, grep, or ls commands.
|
|
134
|
-
${PATH_GUIDANCE}`,
|
|
150
|
+
${PATH_GUIDANCE} ${CMD_PREFIX_DESCRIPTION}`,
|
|
135
151
|
inputSchema: zodToJsonSchema(ExecuteCommandArgsSchema),
|
|
136
152
|
},
|
|
137
153
|
{
|
|
138
154
|
name: "read_output",
|
|
139
|
-
description:
|
|
155
|
+
description: `Read new output from a running terminal session. ${CMD_PREFIX_DESCRIPTION}`,
|
|
140
156
|
inputSchema: zodToJsonSchema(ReadOutputArgsSchema),
|
|
141
157
|
},
|
|
142
158
|
{
|
|
143
159
|
name: "force_terminate",
|
|
144
|
-
description:
|
|
160
|
+
description: `Force terminate a running terminal session. ${CMD_PREFIX_DESCRIPTION}`,
|
|
145
161
|
inputSchema: zodToJsonSchema(ForceTerminateArgsSchema),
|
|
146
162
|
},
|
|
147
163
|
{
|
|
148
164
|
name: "list_sessions",
|
|
149
|
-
description:
|
|
165
|
+
description: `List all active terminal sessions. ${CMD_PREFIX_DESCRIPTION}`,
|
|
150
166
|
inputSchema: zodToJsonSchema(ListSessionsArgsSchema),
|
|
151
167
|
},
|
|
152
168
|
{
|
|
153
169
|
name: "list_processes",
|
|
154
|
-
description:
|
|
170
|
+
description: `List all running processes. Returns process information including PID, command name, CPU usage, and memory usage. ${CMD_PREFIX_DESCRIPTION}`,
|
|
155
171
|
inputSchema: zodToJsonSchema(ListProcessesArgsSchema),
|
|
156
172
|
},
|
|
157
173
|
{
|
|
158
174
|
name: "kill_process",
|
|
159
|
-
description:
|
|
175
|
+
description: `Terminate a running process by PID. Use with caution as this will forcefully terminate the specified process. ${CMD_PREFIX_DESCRIPTION}`,
|
|
160
176
|
inputSchema: zodToJsonSchema(KillProcessArgsSchema),
|
|
161
177
|
},
|
|
162
178
|
],
|
|
@@ -174,6 +190,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
174
190
|
capture('server_call_tool', {
|
|
175
191
|
name
|
|
176
192
|
});
|
|
193
|
+
// Track tool call
|
|
194
|
+
trackToolCall(name, args);
|
|
177
195
|
// Using a more structured approach with dedicated handlers
|
|
178
196
|
switch (name) {
|
|
179
197
|
// Config tools
|
|
@@ -385,26 +385,6 @@ function updateSetupStep(index, status, error = null) {
|
|
|
385
385
|
}
|
|
386
386
|
}
|
|
387
387
|
|
|
388
|
-
try {
|
|
389
|
-
// Only dependency is node-machine-id
|
|
390
|
-
const machineIdInitStep = addSetupStep('initialize_machine_id');
|
|
391
|
-
try {
|
|
392
|
-
const machineIdModule = await import('node-machine-id');
|
|
393
|
-
// Get a unique user ID
|
|
394
|
-
uniqueUserId = machineIdModule.machineIdSync();
|
|
395
|
-
updateSetupStep(machineIdInitStep, 'completed');
|
|
396
|
-
} catch (error) {
|
|
397
|
-
// Fall back to a semi-unique identifier if machine-id is not available
|
|
398
|
-
uniqueUserId = `${platform()}-${process.env.USER || process.env.USERNAME || 'unknown'}-${Date.now()}`;
|
|
399
|
-
updateSetupStep(machineIdInitStep, 'fallback', error);
|
|
400
|
-
}
|
|
401
|
-
} catch (error) {
|
|
402
|
-
addSetupStep('initialize_machine_id', 'failed', error);
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
388
|
async function execAsync(command) {
|
|
409
389
|
const execStep = addSetupStep(`exec_${command.substring(0, 20)}...`);
|
|
410
390
|
return new Promise((resolve, reject) => {
|
package/dist/tools/edit.js
CHANGED
|
@@ -4,6 +4,8 @@ import { capture } from '../utils/capture.js';
|
|
|
4
4
|
import { EditBlockArgsSchema } from "./schemas.js";
|
|
5
5
|
import path from 'path';
|
|
6
6
|
import { detectLineEnding, normalizeLineEndings } from '../utils/lineEndingHandler.js';
|
|
7
|
+
import { configManager } from '../config-manager.js';
|
|
8
|
+
import { fuzzySearchLogger } from '../utils/fuzzySearchLogger.js';
|
|
7
9
|
/**
|
|
8
10
|
* Threshold for fuzzy matching - similarity must be at least this value to be considered
|
|
9
11
|
* (0-1 scale where 1 is perfect match and 0 is completely different)
|
|
@@ -76,11 +78,14 @@ export async function performSearchReplace(filePath, block, expectedReplacements
|
|
|
76
78
|
// Capture file extension in telemetry without capturing the file path
|
|
77
79
|
capture('server_edit_block', { fileExtension: fileExtension });
|
|
78
80
|
// Read file as plain string
|
|
79
|
-
const { content } = await readFile(filePath);
|
|
81
|
+
const { content } = await readFile(filePath, false, 0, Number.MAX_SAFE_INTEGER);
|
|
80
82
|
// Make sure content is a string
|
|
81
83
|
if (typeof content !== 'string') {
|
|
82
84
|
throw new Error('Wrong content for file ' + filePath);
|
|
83
85
|
}
|
|
86
|
+
// Get the line limit from configuration
|
|
87
|
+
const config = await configManager.getConfig();
|
|
88
|
+
const MAX_LINES = config.fileWriteLineLimit ?? 50; // Default to 50 if not set
|
|
84
89
|
// Detect file's line ending style
|
|
85
90
|
const fileLineEnding = detectLineEnding(content);
|
|
86
91
|
// Normalize search string to match file's line endings
|
|
@@ -109,11 +114,22 @@ export async function performSearchReplace(filePath, block, expectedReplacements
|
|
|
109
114
|
// Replace all occurrences using split and join for multiple replacements
|
|
110
115
|
newContent = newContent.split(normalizedSearch).join(normalizeLineEndings(block.replace, fileLineEnding));
|
|
111
116
|
}
|
|
117
|
+
// Check if search or replace text has too many lines
|
|
118
|
+
const searchLines = block.search.split('\n').length;
|
|
119
|
+
const replaceLines = block.replace.split('\n').length;
|
|
120
|
+
const maxLines = Math.max(searchLines, replaceLines);
|
|
121
|
+
let warningMessage = "";
|
|
122
|
+
if (maxLines > MAX_LINES) {
|
|
123
|
+
const problemText = searchLines > replaceLines ? 'search text' : 'replacement text';
|
|
124
|
+
warningMessage = `\n\nWARNING: The ${problemText} has ${maxLines} lines (maximum: ${MAX_LINES}).
|
|
125
|
+
|
|
126
|
+
RECOMMENDATION: For large search/replace operations, consider breaking them into smaller chunks with fewer lines.`;
|
|
127
|
+
}
|
|
112
128
|
await writeFile(filePath, newContent);
|
|
113
129
|
return {
|
|
114
130
|
content: [{
|
|
115
131
|
type: "text",
|
|
116
|
-
text: `Successfully applied ${expectedReplacements} edit${expectedReplacements > 1 ? 's' : ''} to ${filePath}`
|
|
132
|
+
text: `Successfully applied ${expectedReplacements} edit${expectedReplacements > 1 ? 's' : ''} to ${filePath}${warningMessage}`
|
|
117
133
|
}],
|
|
118
134
|
};
|
|
119
135
|
}
|
|
@@ -142,6 +158,27 @@ export async function performSearchReplace(filePath, block, expectedReplacements
|
|
|
142
158
|
const diff = highlightDifferences(block.search, fuzzyResult.value);
|
|
143
159
|
// Count character codes in diff
|
|
144
160
|
const characterCodeData = getCharacterCodeData(block.search, fuzzyResult.value);
|
|
161
|
+
// Create comprehensive log entry
|
|
162
|
+
const logEntry = {
|
|
163
|
+
timestamp: new Date(),
|
|
164
|
+
searchText: block.search,
|
|
165
|
+
foundText: fuzzyResult.value,
|
|
166
|
+
similarity: similarity,
|
|
167
|
+
executionTime: executionTime,
|
|
168
|
+
exactMatchCount: count,
|
|
169
|
+
expectedReplacements: expectedReplacements,
|
|
170
|
+
fuzzyThreshold: FUZZY_THRESHOLD,
|
|
171
|
+
belowThreshold: similarity < FUZZY_THRESHOLD,
|
|
172
|
+
diff: diff,
|
|
173
|
+
searchLength: block.search.length,
|
|
174
|
+
foundLength: fuzzyResult.value.length,
|
|
175
|
+
fileExtension: fileExtension,
|
|
176
|
+
characterCodes: characterCodeData.report,
|
|
177
|
+
uniqueCharacterCount: characterCodeData.uniqueCount,
|
|
178
|
+
diffLength: characterCodeData.diffLength
|
|
179
|
+
};
|
|
180
|
+
// Log to file
|
|
181
|
+
await fuzzySearchLogger.log(logEntry);
|
|
145
182
|
// Combine all fuzzy search data for single capture
|
|
146
183
|
const fuzzySearchData = {
|
|
147
184
|
similarity: similarity,
|
|
@@ -165,8 +202,10 @@ export async function performSearchReplace(filePath, block, expectedReplacements
|
|
|
165
202
|
type: "text",
|
|
166
203
|
text: `Exact match not found, but found a similar text with ${Math.round(similarity * 100)}% similarity (found in ${executionTime.toFixed(2)}ms):\n\n` +
|
|
167
204
|
`Differences:\n${diff}\n\n` +
|
|
168
|
-
`To replace this text, use the exact text found in the file
|
|
169
|
-
|
|
205
|
+
`To replace this text, use the exact text found in the file.\n\n` +
|
|
206
|
+
`Log entry saved for analysis. Use the following command to check the log:\n` +
|
|
207
|
+
`Check log: ${await fuzzySearchLogger.getLogPath()}`
|
|
208
|
+
}], // TODO
|
|
170
209
|
};
|
|
171
210
|
}
|
|
172
211
|
else {
|
|
@@ -181,7 +220,9 @@ export async function performSearchReplace(filePath, block, expectedReplacements
|
|
|
181
220
|
type: "text",
|
|
182
221
|
text: `Search content not found in ${filePath}. The closest match was "${fuzzyResult.value}" ` +
|
|
183
222
|
`with only ${Math.round(similarity * 100)}% similarity, which is below the ${Math.round(FUZZY_THRESHOLD * 100)}% threshold. ` +
|
|
184
|
-
`(Fuzzy search completed in ${executionTime.toFixed(2)}ms)`
|
|
223
|
+
`(Fuzzy search completed in ${executionTime.toFixed(2)}ms)\n\n` +
|
|
224
|
+
`Log entry saved for analysis. Use the following command to check the log:\n` +
|
|
225
|
+
`Check log: ${await fuzzySearchLogger.getLogPath()}`
|
|
185
226
|
}],
|
|
186
227
|
};
|
|
187
228
|
}
|
|
@@ -22,19 +22,21 @@ export declare function readFileFromUrl(url: string): Promise<FileResult>;
|
|
|
22
22
|
/**
|
|
23
23
|
* Read file content from the local filesystem
|
|
24
24
|
* @param filePath Path to the file
|
|
25
|
-
* @param
|
|
25
|
+
* @param offset Starting line number to read from (default: 0)
|
|
26
|
+
* @param length Maximum number of lines to read (default: from config or 1000)
|
|
26
27
|
* @returns File content or file result with metadata
|
|
27
28
|
*/
|
|
28
|
-
export declare function readFileFromDisk(filePath: string): Promise<FileResult>;
|
|
29
|
+
export declare function readFileFromDisk(filePath: string, offset?: number, length?: number): Promise<FileResult>;
|
|
29
30
|
/**
|
|
30
31
|
* Read a file from either the local filesystem or a URL
|
|
31
32
|
* @param filePath Path to the file or URL
|
|
32
|
-
* @param returnMetadata Whether to return metadata with the content
|
|
33
33
|
* @param isUrl Whether the path is a URL
|
|
34
|
+
* @param offset Starting line number to read from (default: 0)
|
|
35
|
+
* @param length Maximum number of lines to read (default: from config or 1000)
|
|
34
36
|
* @returns File content or file result with metadata
|
|
35
37
|
*/
|
|
36
|
-
export declare function readFile(filePath: string, isUrl?: boolean): Promise<FileResult>;
|
|
37
|
-
export declare function writeFile(filePath: string, content: string): Promise<void>;
|
|
38
|
+
export declare function readFile(filePath: string, isUrl?: boolean, offset?: number, length?: number): Promise<FileResult>;
|
|
39
|
+
export declare function writeFile(filePath: string, content: string, mode?: 'rewrite' | 'append'): Promise<void>;
|
|
38
40
|
export interface MultiFileResult {
|
|
39
41
|
path: string;
|
|
40
42
|
content?: string;
|
package/dist/tools/filesystem.js
CHANGED
|
@@ -208,38 +208,41 @@ export async function readFileFromUrl(url) {
|
|
|
208
208
|
/**
|
|
209
209
|
* Read file content from the local filesystem
|
|
210
210
|
* @param filePath Path to the file
|
|
211
|
-
* @param
|
|
211
|
+
* @param offset Starting line number to read from (default: 0)
|
|
212
|
+
* @param length Maximum number of lines to read (default: from config or 1000)
|
|
212
213
|
* @returns File content or file result with metadata
|
|
213
214
|
*/
|
|
214
|
-
export async function readFileFromDisk(filePath) {
|
|
215
|
+
export async function readFileFromDisk(filePath, offset = 0, length) {
|
|
216
|
+
// Add validation for required parameters
|
|
217
|
+
if (!filePath || typeof filePath !== 'string') {
|
|
218
|
+
throw new Error('Invalid file path provided');
|
|
219
|
+
}
|
|
215
220
|
// Import the MIME type utilities
|
|
216
221
|
const { getMimeType, isImageFile } = await import('./mime-types.js');
|
|
222
|
+
// Get default length from config if not provided
|
|
223
|
+
if (length === undefined) {
|
|
224
|
+
const config = await configManager.getConfig();
|
|
225
|
+
length = config.fileReadLineLimit ?? 1000; // Default to 1000 lines if not set
|
|
226
|
+
}
|
|
217
227
|
const validPath = await validatePath(filePath);
|
|
218
228
|
// Get file extension for telemetry using path module consistently
|
|
219
229
|
const fileExtension = path.extname(validPath).toLowerCase();
|
|
220
230
|
// Check file size before attempting to read
|
|
221
231
|
try {
|
|
222
232
|
const stats = await fs.stat(validPath);
|
|
223
|
-
const MAX_SIZE = 100 * 1024; // 100KB limit
|
|
224
|
-
if (stats.size > MAX_SIZE) {
|
|
225
|
-
const message = `File too large (${(stats.size / 1024).toFixed(2)}KB > ${MAX_SIZE / 1024}KB limit)`;
|
|
226
|
-
// Capture file extension in telemetry without capturing the file path
|
|
227
|
-
capture('server_read_file_large', { fileExtension: fileExtension });
|
|
228
|
-
return {
|
|
229
|
-
content: message,
|
|
230
|
-
mimeType: 'text/plain',
|
|
231
|
-
isImage: false
|
|
232
|
-
};
|
|
233
|
-
}
|
|
234
233
|
// Capture file extension in telemetry without capturing the file path
|
|
235
|
-
capture('server_read_file', {
|
|
234
|
+
capture('server_read_file', {
|
|
235
|
+
fileExtension: fileExtension,
|
|
236
|
+
offset: offset,
|
|
237
|
+
length: length,
|
|
238
|
+
fileSize: stats.size
|
|
239
|
+
});
|
|
236
240
|
}
|
|
237
241
|
catch (error) {
|
|
238
242
|
console.error('error catch ' + error);
|
|
239
243
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
240
244
|
capture('server_read_file_error', { error: errorMessage, fileExtension: fileExtension });
|
|
241
245
|
// If we can't stat the file, continue anyway and let the read operation handle errors
|
|
242
|
-
//console.error(`Failed to stat file ${validPath}:`, error);
|
|
243
246
|
}
|
|
244
247
|
// Detect the MIME type based on file extension
|
|
245
248
|
const mimeType = getMimeType(validPath);
|
|
@@ -249,15 +252,30 @@ export async function readFileFromDisk(filePath) {
|
|
|
249
252
|
const readOperation = async () => {
|
|
250
253
|
if (isImage) {
|
|
251
254
|
// For image files, read as Buffer and convert to base64
|
|
255
|
+
// Images are always read in full, ignoring offset and length
|
|
252
256
|
const buffer = await fs.readFile(validPath);
|
|
253
257
|
const content = buffer.toString('base64');
|
|
254
258
|
return { content, mimeType, isImage };
|
|
255
259
|
}
|
|
256
260
|
else {
|
|
257
|
-
// For all other files, try to read as UTF-8 text
|
|
261
|
+
// For all other files, try to read as UTF-8 text with line-based offset and length
|
|
258
262
|
try {
|
|
263
|
+
// Read the entire file first
|
|
259
264
|
const buffer = await fs.readFile(validPath);
|
|
260
|
-
const
|
|
265
|
+
const fullContent = buffer.toString('utf-8');
|
|
266
|
+
// Split into lines for line-based access
|
|
267
|
+
const lines = fullContent.split('\n');
|
|
268
|
+
const totalLines = lines.length;
|
|
269
|
+
// Apply line-based offset and length
|
|
270
|
+
const startLine = Math.min(offset, totalLines);
|
|
271
|
+
const endLine = Math.min(startLine + length, totalLines);
|
|
272
|
+
const selectedLines = lines.slice(startLine, endLine);
|
|
273
|
+
const truncatedContent = selectedLines.join('\n');
|
|
274
|
+
// Add an informational message if truncated
|
|
275
|
+
let content = truncatedContent;
|
|
276
|
+
if (offset > 0 || endLine < totalLines) {
|
|
277
|
+
content = `[Reading ${endLine - startLine} lines from line ${offset} of ${totalLines} total lines]\n\n${truncatedContent}`;
|
|
278
|
+
}
|
|
261
279
|
return { content, mimeType, isImage };
|
|
262
280
|
}
|
|
263
281
|
catch (error) {
|
|
@@ -279,22 +297,32 @@ export async function readFileFromDisk(filePath) {
|
|
|
279
297
|
/**
|
|
280
298
|
* Read a file from either the local filesystem or a URL
|
|
281
299
|
* @param filePath Path to the file or URL
|
|
282
|
-
* @param returnMetadata Whether to return metadata with the content
|
|
283
300
|
* @param isUrl Whether the path is a URL
|
|
301
|
+
* @param offset Starting line number to read from (default: 0)
|
|
302
|
+
* @param length Maximum number of lines to read (default: from config or 1000)
|
|
284
303
|
* @returns File content or file result with metadata
|
|
285
304
|
*/
|
|
286
|
-
export async function readFile(filePath, isUrl) {
|
|
305
|
+
export async function readFile(filePath, isUrl, offset, length) {
|
|
287
306
|
return isUrl
|
|
288
307
|
? readFileFromUrl(filePath)
|
|
289
|
-
: readFileFromDisk(filePath);
|
|
308
|
+
: readFileFromDisk(filePath, offset, length);
|
|
290
309
|
}
|
|
291
|
-
export async function writeFile(filePath, content) {
|
|
310
|
+
export async function writeFile(filePath, content, mode = 'rewrite') {
|
|
292
311
|
const validPath = await validatePath(filePath);
|
|
293
312
|
// Get file extension for telemetry
|
|
294
313
|
const fileExtension = path.extname(validPath).toLowerCase();
|
|
295
|
-
// Capture file extension in telemetry without capturing the file path
|
|
296
|
-
capture('server_write_file', {
|
|
297
|
-
|
|
314
|
+
// Capture file extension and operation details in telemetry without capturing the file path
|
|
315
|
+
capture('server_write_file', {
|
|
316
|
+
fileExtension: fileExtension,
|
|
317
|
+
mode: mode,
|
|
318
|
+
});
|
|
319
|
+
// Use different fs methods based on mode
|
|
320
|
+
if (mode === 'append') {
|
|
321
|
+
await fs.appendFile(validPath, content);
|
|
322
|
+
}
|
|
323
|
+
else {
|
|
324
|
+
await fs.writeFile(validPath, content);
|
|
325
|
+
}
|
|
298
326
|
}
|
|
299
327
|
export async function readMultipleFiles(paths) {
|
|
300
328
|
return Promise.all(paths.map(async (filePath) => {
|
package/dist/tools/schemas.d.ts
CHANGED
|
@@ -49,12 +49,18 @@ export declare const KillProcessArgsSchema: z.ZodObject<{
|
|
|
49
49
|
export declare const ReadFileArgsSchema: z.ZodObject<{
|
|
50
50
|
path: z.ZodString;
|
|
51
51
|
isUrl: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
|
|
52
|
+
offset: z.ZodDefault<z.ZodOptional<z.ZodNumber>>;
|
|
53
|
+
length: z.ZodDefault<z.ZodOptional<z.ZodNumber>>;
|
|
52
54
|
}, "strip", z.ZodTypeAny, {
|
|
53
55
|
path: string;
|
|
56
|
+
length: number;
|
|
54
57
|
isUrl: boolean;
|
|
58
|
+
offset: number;
|
|
55
59
|
}, {
|
|
56
60
|
path: string;
|
|
61
|
+
length?: number | undefined;
|
|
57
62
|
isUrl?: boolean | undefined;
|
|
63
|
+
offset?: number | undefined;
|
|
58
64
|
}>;
|
|
59
65
|
export declare const ReadMultipleFilesArgsSchema: z.ZodObject<{
|
|
60
66
|
paths: z.ZodArray<z.ZodString, "many">;
|
|
@@ -66,12 +72,15 @@ export declare const ReadMultipleFilesArgsSchema: z.ZodObject<{
|
|
|
66
72
|
export declare const WriteFileArgsSchema: z.ZodObject<{
|
|
67
73
|
path: z.ZodString;
|
|
68
74
|
content: z.ZodString;
|
|
75
|
+
mode: z.ZodDefault<z.ZodEnum<["rewrite", "append"]>>;
|
|
69
76
|
}, "strip", z.ZodTypeAny, {
|
|
70
77
|
path: string;
|
|
71
78
|
content: string;
|
|
79
|
+
mode: "rewrite" | "append";
|
|
72
80
|
}, {
|
|
73
81
|
path: string;
|
|
74
82
|
content: string;
|
|
83
|
+
mode?: "rewrite" | "append" | undefined;
|
|
75
84
|
}>;
|
|
76
85
|
export declare const CreateDirectoryArgsSchema: z.ZodObject<{
|
|
77
86
|
path: z.ZodString;
|
package/dist/tools/schemas.js
CHANGED
|
@@ -28,6 +28,8 @@ export const KillProcessArgsSchema = z.object({
|
|
|
28
28
|
export const ReadFileArgsSchema = z.object({
|
|
29
29
|
path: z.string(),
|
|
30
30
|
isUrl: z.boolean().optional().default(false),
|
|
31
|
+
offset: z.number().optional().default(0),
|
|
32
|
+
length: z.number().optional().default(1000),
|
|
31
33
|
});
|
|
32
34
|
export const ReadMultipleFilesArgsSchema = z.object({
|
|
33
35
|
paths: z.array(z.string()),
|
|
@@ -35,6 +37,7 @@ export const ReadMultipleFilesArgsSchema = z.object({
|
|
|
35
37
|
export const WriteFileArgsSchema = z.object({
|
|
36
38
|
path: z.string(),
|
|
37
39
|
content: z.string(),
|
|
40
|
+
mode: z.enum(['rewrite', 'append']).default('rewrite'),
|
|
38
41
|
});
|
|
39
42
|
export const CreateDirectoryArgsSchema = z.object({
|
|
40
43
|
path: z.string(),
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
export interface FuzzySearchLogEntry {
|
|
2
|
+
timestamp: Date;
|
|
3
|
+
searchText: string;
|
|
4
|
+
foundText: string;
|
|
5
|
+
similarity: number;
|
|
6
|
+
executionTime: number;
|
|
7
|
+
exactMatchCount: number;
|
|
8
|
+
expectedReplacements: number;
|
|
9
|
+
fuzzyThreshold: number;
|
|
10
|
+
belowThreshold: boolean;
|
|
11
|
+
diff: string;
|
|
12
|
+
searchLength: number;
|
|
13
|
+
foundLength: number;
|
|
14
|
+
fileExtension: string;
|
|
15
|
+
characterCodes: string;
|
|
16
|
+
uniqueCharacterCount: number;
|
|
17
|
+
diffLength: number;
|
|
18
|
+
}
|
|
19
|
+
declare class FuzzySearchLogger {
|
|
20
|
+
private logPath;
|
|
21
|
+
private initialized;
|
|
22
|
+
constructor();
|
|
23
|
+
private ensureLogFile;
|
|
24
|
+
log(entry: FuzzySearchLogEntry): Promise<void>;
|
|
25
|
+
getLogPath(): Promise<string>;
|
|
26
|
+
getRecentLogs(count?: number): Promise<string[]>;
|
|
27
|
+
clearLog(): Promise<void>;
|
|
28
|
+
}
|
|
29
|
+
export declare const fuzzySearchLogger: FuzzySearchLogger;
|
|
30
|
+
export {};
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
import fs from 'fs/promises';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import os from 'os';
|
|
4
|
+
class FuzzySearchLogger {
|
|
5
|
+
constructor() {
|
|
6
|
+
this.initialized = false;
|
|
7
|
+
// Create log file in a dedicated directory
|
|
8
|
+
const logDir = path.join(os.homedir(), '.claude-server-commander-logs');
|
|
9
|
+
this.logPath = path.join(logDir, 'fuzzy-search.log');
|
|
10
|
+
}
|
|
11
|
+
async ensureLogFile() {
|
|
12
|
+
if (this.initialized)
|
|
13
|
+
return;
|
|
14
|
+
try {
|
|
15
|
+
// Create log directory if it doesn't exist
|
|
16
|
+
const logDir = path.dirname(this.logPath);
|
|
17
|
+
await fs.mkdir(logDir, { recursive: true });
|
|
18
|
+
// Check if log file exists, create with headers if not
|
|
19
|
+
try {
|
|
20
|
+
await fs.access(this.logPath);
|
|
21
|
+
}
|
|
22
|
+
catch {
|
|
23
|
+
// File doesn't exist, create with headers
|
|
24
|
+
const headers = [
|
|
25
|
+
'timestamp',
|
|
26
|
+
'searchText',
|
|
27
|
+
'foundText',
|
|
28
|
+
'similarity',
|
|
29
|
+
'executionTime',
|
|
30
|
+
'exactMatchCount',
|
|
31
|
+
'expectedReplacements',
|
|
32
|
+
'fuzzyThreshold',
|
|
33
|
+
'belowThreshold',
|
|
34
|
+
'diff',
|
|
35
|
+
'searchLength',
|
|
36
|
+
'foundLength',
|
|
37
|
+
'fileExtension',
|
|
38
|
+
'characterCodes',
|
|
39
|
+
'uniqueCharacterCount',
|
|
40
|
+
'diffLength'
|
|
41
|
+
].join('\t');
|
|
42
|
+
await fs.writeFile(this.logPath, headers + '\n');
|
|
43
|
+
}
|
|
44
|
+
this.initialized = true;
|
|
45
|
+
}
|
|
46
|
+
catch (error) {
|
|
47
|
+
console.error('Failed to initialize fuzzy search log file:', error);
|
|
48
|
+
throw error;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
async log(entry) {
|
|
52
|
+
try {
|
|
53
|
+
await this.ensureLogFile();
|
|
54
|
+
// Convert entry to tab-separated string
|
|
55
|
+
const logLine = [
|
|
56
|
+
entry.timestamp.toISOString(),
|
|
57
|
+
entry.searchText.replace(/\n/g, '\\n').replace(/\t/g, '\\t'),
|
|
58
|
+
entry.foundText.replace(/\n/g, '\\n').replace(/\t/g, '\\t'),
|
|
59
|
+
entry.similarity.toString(),
|
|
60
|
+
entry.executionTime.toString(),
|
|
61
|
+
entry.exactMatchCount.toString(),
|
|
62
|
+
entry.expectedReplacements.toString(),
|
|
63
|
+
entry.fuzzyThreshold.toString(),
|
|
64
|
+
entry.belowThreshold.toString(),
|
|
65
|
+
entry.diff.replace(/\n/g, '\\n').replace(/\t/g, '\\t'),
|
|
66
|
+
entry.searchLength.toString(),
|
|
67
|
+
entry.foundLength.toString(),
|
|
68
|
+
entry.fileExtension,
|
|
69
|
+
entry.characterCodes,
|
|
70
|
+
entry.uniqueCharacterCount.toString(),
|
|
71
|
+
entry.diffLength.toString()
|
|
72
|
+
].join('\t');
|
|
73
|
+
await fs.appendFile(this.logPath, logLine + '\n');
|
|
74
|
+
}
|
|
75
|
+
catch (error) {
|
|
76
|
+
console.error('Failed to write to fuzzy search log:', error);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
async getLogPath() {
|
|
80
|
+
await this.ensureLogFile();
|
|
81
|
+
return this.logPath;
|
|
82
|
+
}
|
|
83
|
+
async getRecentLogs(count = 10) {
|
|
84
|
+
try {
|
|
85
|
+
await this.ensureLogFile();
|
|
86
|
+
const content = await fs.readFile(this.logPath, 'utf-8');
|
|
87
|
+
const lines = content.split('\n').filter(line => line.trim());
|
|
88
|
+
// Return last N lines (excluding header)
|
|
89
|
+
return lines.slice(-count - 1, -1);
|
|
90
|
+
}
|
|
91
|
+
catch (error) {
|
|
92
|
+
console.error('Failed to read fuzzy search logs:', error);
|
|
93
|
+
return [];
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
async clearLog() {
|
|
97
|
+
try {
|
|
98
|
+
// Recreate with just headers
|
|
99
|
+
const headers = [
|
|
100
|
+
'timestamp',
|
|
101
|
+
'searchText',
|
|
102
|
+
'foundText',
|
|
103
|
+
'similarity',
|
|
104
|
+
'executionTime',
|
|
105
|
+
'exactMatchCount',
|
|
106
|
+
'expectedReplacements',
|
|
107
|
+
'fuzzyThreshold',
|
|
108
|
+
'belowThreshold',
|
|
109
|
+
'diff',
|
|
110
|
+
'searchLength',
|
|
111
|
+
'foundLength',
|
|
112
|
+
'fileExtension',
|
|
113
|
+
'characterCodes',
|
|
114
|
+
'uniqueCharacterCount',
|
|
115
|
+
'diffLength'
|
|
116
|
+
].join('\t');
|
|
117
|
+
await fs.writeFile(this.logPath, headers + '\n');
|
|
118
|
+
console.log('Fuzzy search log cleared');
|
|
119
|
+
}
|
|
120
|
+
catch (error) {
|
|
121
|
+
console.error('Failed to clear fuzzy search log:', error);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
// Singleton instance
|
|
126
|
+
export const fuzzySearchLogger = new FuzzySearchLogger();
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import * as fs from 'fs';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import { TOOL_CALL_FILE, TOOL_CALL_FILE_MAX_SIZE } from '../config.js';
|
|
4
|
+
// Ensure the directory for the log file exists
|
|
5
|
+
const logDir = path.dirname(TOOL_CALL_FILE);
|
|
6
|
+
await fs.promises.mkdir(logDir, { recursive: true });
|
|
7
|
+
/**
|
|
8
|
+
* Track tool calls and save them to a log file
|
|
9
|
+
* @param toolName Name of the tool being called
|
|
10
|
+
* @param args Arguments passed to the tool (optional)
|
|
11
|
+
*/
|
|
12
|
+
export async function trackToolCall(toolName, args) {
|
|
13
|
+
try {
|
|
14
|
+
// Get current timestamp
|
|
15
|
+
const timestamp = new Date().toISOString();
|
|
16
|
+
// Format the log entry
|
|
17
|
+
const logEntry = `${timestamp} | ${toolName.padEnd(20, ' ')}${args ? `\t| Arguments: ${JSON.stringify(args)}` : ''}\n`;
|
|
18
|
+
// Check if file exists and get its size
|
|
19
|
+
let fileSize = 0;
|
|
20
|
+
try {
|
|
21
|
+
const stats = await fs.promises.stat(TOOL_CALL_FILE);
|
|
22
|
+
fileSize = stats.size;
|
|
23
|
+
}
|
|
24
|
+
catch (err) {
|
|
25
|
+
// File doesn't exist yet, size remains 0
|
|
26
|
+
}
|
|
27
|
+
// If file size is 10MB or larger, rotate the log file
|
|
28
|
+
if (fileSize >= TOOL_CALL_FILE_MAX_SIZE) {
|
|
29
|
+
const fileExt = path.extname(TOOL_CALL_FILE);
|
|
30
|
+
const fileBase = path.basename(TOOL_CALL_FILE, fileExt);
|
|
31
|
+
const dirName = path.dirname(TOOL_CALL_FILE);
|
|
32
|
+
// Create a timestamp-based filename for the old log
|
|
33
|
+
const date = new Date();
|
|
34
|
+
const rotateTimestamp = `${date.getFullYear()}-${String(date.getMonth() + 1).padStart(2, '0')}-${String(date.getDate()).padStart(2, '0')}_${String(date.getHours()).padStart(2, '0')}-${String(date.getMinutes()).padStart(2, '0')}-${String(date.getSeconds()).padStart(2, '0')}`;
|
|
35
|
+
const newFileName = path.join(dirName, `${fileBase}_${rotateTimestamp}${fileExt}`);
|
|
36
|
+
// Rename the current file
|
|
37
|
+
await fs.promises.rename(TOOL_CALL_FILE, newFileName);
|
|
38
|
+
}
|
|
39
|
+
// Append to log file (if file was renamed, this will create a new file)
|
|
40
|
+
await fs.promises.appendFile(TOOL_CALL_FILE, logEntry, 'utf8');
|
|
41
|
+
}
|
|
42
|
+
catch (error) {
|
|
43
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
44
|
+
const { capture } = await import('./capture.js');
|
|
45
|
+
// Send a final telemetry event noting that the user has opted out
|
|
46
|
+
// This helps us track opt-out rates while respecting the user's choice
|
|
47
|
+
await capture('server_track_tool_call_error', {
|
|
48
|
+
error: errorMessage,
|
|
49
|
+
toolName
|
|
50
|
+
});
|
|
51
|
+
// Don't let logging errors affect the main functionality
|
|
52
|
+
console.error(`Error logging tool call: ${error instanceof Error ? error.message : String(error)}`);
|
|
53
|
+
}
|
|
54
|
+
}
|
package/dist/version.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export declare const VERSION = "0.
|
|
1
|
+
export declare const VERSION = "0.2.0";
|
package/dist/version.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export const VERSION = '0.
|
|
1
|
+
export const VERSION = '0.2.0';
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@wonderwhy-er/desktop-commander",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "MCP server for terminal operations and file editing",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Eduards Ruzga",
|
|
@@ -34,7 +34,11 @@
|
|
|
34
34
|
"test": "node test/run-all-tests.js",
|
|
35
35
|
"link:local": "npm run build && npm link",
|
|
36
36
|
"unlink:local": "npm unlink",
|
|
37
|
-
"inspector": "npx @modelcontextprotocol/inspector dist/index.js"
|
|
37
|
+
"inspector": "npx @modelcontextprotocol/inspector dist/index.js",
|
|
38
|
+
"logs:view": "npm run build && node scripts/view-fuzzy-logs.js",
|
|
39
|
+
"logs:analyze": "npm run build && node scripts/analyze-fuzzy-logs.js",
|
|
40
|
+
"logs:clear": "npm run build && node scripts/clear-fuzzy-logs.js",
|
|
41
|
+
"logs:export": "npm run build && node scripts/export-fuzzy-logs.js"
|
|
38
42
|
},
|
|
39
43
|
"publishConfig": {
|
|
40
44
|
"access": "public"
|
|
@@ -69,6 +73,7 @@
|
|
|
69
73
|
},
|
|
70
74
|
"devDependencies": {
|
|
71
75
|
"@types/node": "^20.17.24",
|
|
76
|
+
"commander": "^13.1.0",
|
|
72
77
|
"nexe": "^5.0.0-beta.4",
|
|
73
78
|
"nodemon": "^3.0.2",
|
|
74
79
|
"shx": "^0.3.4",
|