protoagent 0.0.5 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +99 -19
- package/dist/App.js +602 -0
- package/dist/agentic-loop.js +492 -525
- package/dist/cli.js +39 -0
- package/dist/components/CollapsibleBox.js +26 -0
- package/dist/components/ConfigDialog.js +40 -0
- package/dist/components/ConsolidatedToolMessage.js +41 -0
- package/dist/components/FormattedMessage.js +93 -0
- package/dist/components/Table.js +275 -0
- package/dist/config.js +171 -0
- package/dist/mcp.js +170 -0
- package/dist/providers.js +137 -0
- package/dist/sessions.js +161 -0
- package/dist/skills.js +229 -0
- package/dist/sub-agent.js +103 -0
- package/dist/system-prompt.js +131 -0
- package/dist/tools/bash.js +178 -0
- package/dist/tools/edit-file.js +65 -171
- package/dist/tools/index.js +79 -134
- package/dist/tools/list-directory.js +20 -73
- package/dist/tools/read-file.js +57 -101
- package/dist/tools/search-files.js +74 -162
- package/dist/tools/todo.js +57 -140
- package/dist/tools/webfetch.js +310 -0
- package/dist/tools/write-file.js +44 -135
- package/dist/utils/approval.js +69 -0
- package/dist/utils/compactor.js +87 -0
- package/dist/utils/cost-tracker.js +26 -81
- package/dist/utils/format-message.js +26 -0
- package/dist/utils/logger.js +101 -307
- package/dist/utils/path-validation.js +74 -0
- package/package.json +45 -51
- package/LICENSE +0 -21
- package/dist/config/client.js +0 -315
- package/dist/config/commands.js +0 -223
- package/dist/config/manager.js +0 -117
- package/dist/config/mcp-commands.js +0 -266
- package/dist/config/mcp-manager.js +0 -240
- package/dist/config/mcp-types.js +0 -28
- package/dist/config/providers.js +0 -229
- package/dist/config/setup.js +0 -209
- package/dist/config/system-prompt.js +0 -397
- package/dist/config/types.js +0 -4
- package/dist/index.js +0 -229
- package/dist/tools/create-directory.js +0 -76
- package/dist/tools/directory-operations.js +0 -195
- package/dist/tools/file-operations.js +0 -211
- package/dist/tools/run-shell-command.js +0 -746
- package/dist/tools/search-operations.js +0 -179
- package/dist/tools/shell-operations.js +0 -342
- package/dist/tools/task-complete.js +0 -26
- package/dist/tools/view-directory-tree.js +0 -125
- package/dist/tools.js +0 -2
- package/dist/utils/conversation-compactor.js +0 -140
- package/dist/utils/enhanced-prompt.js +0 -23
- package/dist/utils/file-operations-approval.js +0 -373
- package/dist/utils/interrupt-handler.js +0 -127
- package/dist/utils/user-cancellation.js +0 -34
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* webfetch tool — Fetch and process web content
|
|
3
|
+
*
|
|
4
|
+
* Features:
|
|
5
|
+
* - Single URL fetch per invocation
|
|
6
|
+
* - Three output formats: text, markdown, html
|
|
7
|
+
* - Configurable timeout (default 30s, max 120s)
|
|
8
|
+
* - 5MB response size limit + 2MB output limit
|
|
9
|
+
* - HTML to text/markdown conversion
|
|
10
|
+
* - AbortController support for cancellation
|
|
11
|
+
* - Robust HTML entity decoding
|
|
12
|
+
* - Proper redirect limiting
|
|
13
|
+
* - Charset-aware content decoding
|
|
14
|
+
*/
|
|
15
|
+
import { convert } from 'html-to-text';
|
|
16
|
+
const MAX_RESPONSE_SIZE = 5 * 1024 * 1024; // 5MB
|
|
17
|
+
const MAX_OUTPUT_SIZE = 2 * 1024 * 1024; // 2MB
|
|
18
|
+
const MAX_REDIRECTS = 10;
|
|
19
|
+
const MAX_URL_LENGTH = 4096;
|
|
20
|
+
const FETCH_HEADERS = {
|
|
21
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
|
|
22
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
23
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
24
|
+
'Accept-Encoding': 'gzip, deflate',
|
|
25
|
+
'DNT': '1',
|
|
26
|
+
'Connection': 'keep-alive',
|
|
27
|
+
'Upgrade-Insecure-Requests': '1',
|
|
28
|
+
};
|
|
29
|
+
// Text-based MIME types that are safe to process
|
|
30
|
+
const TEXT_MIME_TYPES = [
|
|
31
|
+
'text/',
|
|
32
|
+
'application/json',
|
|
33
|
+
'application/xml',
|
|
34
|
+
'application/x-www-form-urlencoded',
|
|
35
|
+
'application/atom+xml',
|
|
36
|
+
'application/rss+xml',
|
|
37
|
+
'application/javascript',
|
|
38
|
+
'application/typescript',
|
|
39
|
+
];
|
|
40
|
+
// Lazy-loaded Turndown instance (CJS module — dynamic import avoids forcing esbuild CJS output)
|
|
41
|
+
let _turndownService = null;
|
|
42
|
+
async function getTurndownService() {
|
|
43
|
+
if (!_turndownService) {
|
|
44
|
+
const { default: TurndownService } = await import('turndown');
|
|
45
|
+
_turndownService = new TurndownService({
|
|
46
|
+
headingStyle: 'atx',
|
|
47
|
+
codeBlockStyle: 'fenced',
|
|
48
|
+
bulletListMarker: '-',
|
|
49
|
+
emDelimiter: '*',
|
|
50
|
+
});
|
|
51
|
+
_turndownService.remove(['script', 'style', 'meta', 'link']);
|
|
52
|
+
}
|
|
53
|
+
return _turndownService;
|
|
54
|
+
}
|
|
55
|
+
// Lazy-loaded he module (CJS module)
|
|
56
|
+
let _he = null;
|
|
57
|
+
async function getHe() {
|
|
58
|
+
if (!_he) {
|
|
59
|
+
const { default: he } = await import('he');
|
|
60
|
+
_he = he;
|
|
61
|
+
}
|
|
62
|
+
return _he;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Check if MIME type is text-based
|
|
66
|
+
*/
|
|
67
|
+
function isTextMimeType(mimeType) {
|
|
68
|
+
return TEXT_MIME_TYPES.some((type) => mimeType.includes(type));
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Detect if content is HTML
|
|
72
|
+
*/
|
|
73
|
+
function detectHTML(content, contentType) {
|
|
74
|
+
// Header says HTML
|
|
75
|
+
if (contentType.includes('text/html')) {
|
|
76
|
+
return true;
|
|
77
|
+
}
|
|
78
|
+
// Sniff content for HTML signature
|
|
79
|
+
const trimmed = content.slice(0, 1024).trim().toLowerCase();
|
|
80
|
+
return /^<!doctype html|^<html|^<head|^<body|^<meta/.test(trimmed);
|
|
81
|
+
}
|
|
82
|
+
/**
|
|
83
|
+
* Parse charset from Content-Type header
|
|
84
|
+
*/
|
|
85
|
+
function parseCharset(contentType) {
|
|
86
|
+
const match = contentType.match(/charset=([^\s;]+)/i);
|
|
87
|
+
if (match) {
|
|
88
|
+
const charset = match[1].replace(/['"]/g, '');
|
|
89
|
+
// Validate charset is supported by TextDecoder
|
|
90
|
+
try {
|
|
91
|
+
new TextDecoder(charset);
|
|
92
|
+
return charset;
|
|
93
|
+
}
|
|
94
|
+
catch {
|
|
95
|
+
return 'utf-8';
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
return 'utf-8';
|
|
99
|
+
}
|
|
100
|
+
/**
|
|
101
|
+
* Truncate output if too large
|
|
102
|
+
*/
|
|
103
|
+
function truncateOutput(output, maxSize) {
|
|
104
|
+
if (output.length > maxSize) {
|
|
105
|
+
const truncatedSize = Math.max(100, maxSize - 100);
|
|
106
|
+
return (output.slice(0, truncatedSize) +
|
|
107
|
+
`\n\n[Content truncated: ${output.length} characters exceeds ${maxSize} limit]`);
|
|
108
|
+
}
|
|
109
|
+
return output;
|
|
110
|
+
}
|
|
111
|
+
export const webfetchTool = {
|
|
112
|
+
type: 'function',
|
|
113
|
+
function: {
|
|
114
|
+
name: 'webfetch',
|
|
115
|
+
description: 'Fetch and process content from a web URL. Supports text (plain text extraction), markdown (HTML to markdown conversion), or html (raw HTML) output formats.',
|
|
116
|
+
parameters: {
|
|
117
|
+
type: 'object',
|
|
118
|
+
properties: {
|
|
119
|
+
url: {
|
|
120
|
+
type: 'string',
|
|
121
|
+
description: 'HTTP(S) URL to fetch (must start with http:// or https://)',
|
|
122
|
+
},
|
|
123
|
+
format: {
|
|
124
|
+
type: 'string',
|
|
125
|
+
enum: ['text', 'markdown', 'html'],
|
|
126
|
+
description: 'Output format: text (plain text), markdown (HTML to markdown), or html (raw HTML)',
|
|
127
|
+
},
|
|
128
|
+
timeout: {
|
|
129
|
+
type: 'number',
|
|
130
|
+
description: 'Timeout in seconds (default 30, min 1, max 120)',
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
required: ['url', 'format'],
|
|
134
|
+
},
|
|
135
|
+
},
|
|
136
|
+
};
|
|
137
|
+
/**
|
|
138
|
+
* Convert HTML to plain text using html-to-text library
|
|
139
|
+
*/
|
|
140
|
+
function htmlToText(html) {
|
|
141
|
+
try {
|
|
142
|
+
return convert(html, {
|
|
143
|
+
wordwrap: 120,
|
|
144
|
+
selectors: [
|
|
145
|
+
{ selector: 'img', options: { ignoreHref: true } },
|
|
146
|
+
{ selector: 'a', options: { ignoreHref: true } },
|
|
147
|
+
],
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
catch (error) {
|
|
151
|
+
// Fallback: basic regex if library fails
|
|
152
|
+
return html
|
|
153
|
+
.replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, '')
|
|
154
|
+
.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '')
|
|
155
|
+
.replace(/<[^>]+>/g, ' ')
|
|
156
|
+
.split('\n')
|
|
157
|
+
.map((line) => line.trim())
|
|
158
|
+
.filter((line) => line.length > 0)
|
|
159
|
+
.join('\n');
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Convert HTML to Markdown using Turndown (cached instance)
|
|
164
|
+
*/
|
|
165
|
+
async function htmlToMarkdown(html) {
|
|
166
|
+
try {
|
|
167
|
+
const turndown = await getTurndownService();
|
|
168
|
+
return turndown.turndown(html);
|
|
169
|
+
}
|
|
170
|
+
catch (error) {
|
|
171
|
+
// Fallback: treat as code block
|
|
172
|
+
return `\`\`\`html\n${html}\n\`\`\``;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
/**
|
|
176
|
+
* Fetch with redirect limiting
|
|
177
|
+
*/
|
|
178
|
+
async function fetchWithRedirectLimit(url, signal) {
|
|
179
|
+
let redirectCount = 0;
|
|
180
|
+
let currentUrl = url;
|
|
181
|
+
// Create a custom fetch wrapper that tracks redirects
|
|
182
|
+
const originalFetch = global.fetch;
|
|
183
|
+
while (redirectCount < MAX_REDIRECTS) {
|
|
184
|
+
const response = await originalFetch(currentUrl, {
|
|
185
|
+
signal,
|
|
186
|
+
headers: FETCH_HEADERS,
|
|
187
|
+
redirect: 'manual', // Handle redirects manually to count them
|
|
188
|
+
});
|
|
189
|
+
// Check for redirect status
|
|
190
|
+
if (response.status >= 300 && response.status < 400) {
|
|
191
|
+
const location = response.headers.get('location');
|
|
192
|
+
if (location) {
|
|
193
|
+
redirectCount++;
|
|
194
|
+
// Resolve relative URLs
|
|
195
|
+
currentUrl = new URL(location, currentUrl).href;
|
|
196
|
+
continue;
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
return response;
|
|
200
|
+
}
|
|
201
|
+
throw new Error(`Too many redirects (max ${MAX_REDIRECTS})`);
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Fetch and process a URL
|
|
205
|
+
*
|
|
206
|
+
* @param url - HTTP(S) URL to fetch
|
|
207
|
+
* @param format - Output format: 'text', 'markdown', or 'html'
|
|
208
|
+
* @param timeout - Optional timeout in seconds (default 30, max 120)
|
|
209
|
+
* @returns Object with output, title, and metadata
|
|
210
|
+
* @throws Error on validation, network, or processing failures
|
|
211
|
+
*/
|
|
212
|
+
export async function webfetch(url, format, timeout) {
|
|
213
|
+
// Validate URL
|
|
214
|
+
if (!url.startsWith('http://') && !url.startsWith('https://')) {
|
|
215
|
+
throw new Error('Invalid URL format. Must start with http:// or https://');
|
|
216
|
+
}
|
|
217
|
+
if (url.length > MAX_URL_LENGTH) {
|
|
218
|
+
throw new Error(`URL too long (${url.length} characters, max ${MAX_URL_LENGTH})`);
|
|
219
|
+
}
|
|
220
|
+
// Validate format
|
|
221
|
+
if (!['text', 'markdown', 'html'].includes(format)) {
|
|
222
|
+
throw new Error("Invalid format. Must be 'text', 'markdown', or 'html'");
|
|
223
|
+
}
|
|
224
|
+
// Validate timeout
|
|
225
|
+
const timeoutSeconds = Math.min(timeout ?? 30, 120);
|
|
226
|
+
if (timeoutSeconds < 1) {
|
|
227
|
+
throw new Error('Timeout must be between 1 and 120 seconds');
|
|
228
|
+
}
|
|
229
|
+
// Setup timeout for entire operation
|
|
230
|
+
const controller = new AbortController();
|
|
231
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutSeconds * 1000);
|
|
232
|
+
try {
|
|
233
|
+
const startTime = Date.now();
|
|
234
|
+
// Fetch with redirect limiting
|
|
235
|
+
const response = await fetchWithRedirectLimit(url, controller.signal);
|
|
236
|
+
// Check HTTP status
|
|
237
|
+
if (!response.ok) {
|
|
238
|
+
throw new Error(`HTTP ${response.status} error: ${response.statusText}`);
|
|
239
|
+
}
|
|
240
|
+
// Validate response size by header
|
|
241
|
+
const contentLength = response.headers.get('content-length');
|
|
242
|
+
if (contentLength && parseInt(contentLength) > MAX_RESPONSE_SIZE) {
|
|
243
|
+
throw new Error(`Response too large (exceeds 5MB limit). Content-Length: ${contentLength}`);
|
|
244
|
+
}
|
|
245
|
+
// Get content type
|
|
246
|
+
const contentType = response.headers.get('content-type') ?? 'text/plain';
|
|
247
|
+
// Check if content type is text-based
|
|
248
|
+
if (!isTextMimeType(contentType)) {
|
|
249
|
+
throw new Error(`Content type '${contentType}' is not supported. Only text-based formats are allowed.`);
|
|
250
|
+
}
|
|
251
|
+
// Get response as ArrayBuffer
|
|
252
|
+
const arrayBuffer = await response.arrayBuffer();
|
|
253
|
+
// Check actual response size
|
|
254
|
+
if (arrayBuffer.byteLength > MAX_RESPONSE_SIZE) {
|
|
255
|
+
throw new Error(`Response too large (exceeds 5MB limit). Size: ${arrayBuffer.byteLength}`);
|
|
256
|
+
}
|
|
257
|
+
// Parse charset from Content-Type header
|
|
258
|
+
const charset = parseCharset(contentType);
|
|
259
|
+
// Decode response with appropriate charset
|
|
260
|
+
const decoder = new TextDecoder(charset, { fatal: false });
|
|
261
|
+
const content = decoder.decode(arrayBuffer);
|
|
262
|
+
const isHTML = detectHTML(content, contentType);
|
|
263
|
+
// Format content based on requested format
|
|
264
|
+
let output;
|
|
265
|
+
if (format === 'text') {
|
|
266
|
+
output = isHTML ? htmlToText(content) : content;
|
|
267
|
+
}
|
|
268
|
+
else if (format === 'markdown') {
|
|
269
|
+
output = isHTML ? await htmlToMarkdown(content) : `\`\`\`\n${content}\n\`\`\``;
|
|
270
|
+
}
|
|
271
|
+
else {
|
|
272
|
+
// format === 'html'
|
|
273
|
+
output = content;
|
|
274
|
+
}
|
|
275
|
+
// Decode HTML entities ONLY for text/markdown formats (not for raw HTML)
|
|
276
|
+
if (format !== 'html') {
|
|
277
|
+
const he = await getHe();
|
|
278
|
+
output = he.decode(output);
|
|
279
|
+
}
|
|
280
|
+
// Truncate output if too large
|
|
281
|
+
output = truncateOutput(output, MAX_OUTPUT_SIZE);
|
|
282
|
+
const fetchTime = Date.now() - startTime;
|
|
283
|
+
const title = `${url} (${contentType})`;
|
|
284
|
+
const metadata = {
|
|
285
|
+
url,
|
|
286
|
+
format,
|
|
287
|
+
contentType,
|
|
288
|
+
charset,
|
|
289
|
+
contentLength: arrayBuffer.byteLength,
|
|
290
|
+
outputLength: output.length,
|
|
291
|
+
fetchTime,
|
|
292
|
+
};
|
|
293
|
+
return { output, title, metadata };
|
|
294
|
+
}
|
|
295
|
+
catch (error) {
|
|
296
|
+
// Handle AbortError (timeout or cancellation)
|
|
297
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
298
|
+
throw new Error(`Fetch timeout after ${timeoutSeconds} seconds`);
|
|
299
|
+
}
|
|
300
|
+
// Re-throw our errors as-is
|
|
301
|
+
if (error instanceof Error) {
|
|
302
|
+
throw error;
|
|
303
|
+
}
|
|
304
|
+
// Handle unexpected errors
|
|
305
|
+
throw new Error(`Failed to fetch ${url}: ${String(error)}`);
|
|
306
|
+
}
|
|
307
|
+
finally {
|
|
308
|
+
clearTimeout(timeoutId);
|
|
309
|
+
}
|
|
310
|
+
}
|
package/dist/tools/write-file.js
CHANGED
|
@@ -1,144 +1,53 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import
|
|
5
|
-
import
|
|
6
|
-
import {
|
|
7
|
-
|
|
8
|
-
const workingDirectory = process.cwd();
|
|
9
|
-
// Security utilities
|
|
10
|
-
function normalizePath(p) {
|
|
11
|
-
return path.normalize(p);
|
|
12
|
-
}
|
|
13
|
-
async function validatePath(requestedPath) {
|
|
14
|
-
const absolute = path.isAbsolute(requestedPath)
|
|
15
|
-
? path.resolve(requestedPath)
|
|
16
|
-
: path.resolve(workingDirectory, requestedPath);
|
|
17
|
-
const normalizedRequested = normalizePath(absolute);
|
|
18
|
-
// Check if path is within working directory
|
|
19
|
-
if (!normalizedRequested.startsWith(workingDirectory)) {
|
|
20
|
-
throw new Error(`Access denied - path outside working directory: ${absolute}`);
|
|
21
|
-
}
|
|
22
|
-
// Handle symlinks by checking their real path
|
|
23
|
-
try {
|
|
24
|
-
const realPath = await fs.realpath(absolute);
|
|
25
|
-
const normalizedReal = normalizePath(realPath);
|
|
26
|
-
if (!normalizedReal.startsWith(workingDirectory)) {
|
|
27
|
-
throw new Error(`Access denied - symlink target outside working directory: ${realPath}`);
|
|
28
|
-
}
|
|
29
|
-
return realPath;
|
|
30
|
-
}
|
|
31
|
-
catch (error) {
|
|
32
|
-
// For new files that don't exist yet, verify parent directory
|
|
33
|
-
if (error.code === 'ENOENT') {
|
|
34
|
-
const parentDir = path.dirname(absolute);
|
|
35
|
-
try {
|
|
36
|
-
const realParentPath = await fs.realpath(parentDir);
|
|
37
|
-
const normalizedParent = normalizePath(realParentPath);
|
|
38
|
-
if (!normalizedParent.startsWith(workingDirectory)) {
|
|
39
|
-
throw new Error(`Access denied - parent directory outside working directory: ${realParentPath}`);
|
|
40
|
-
}
|
|
41
|
-
return absolute;
|
|
42
|
-
}
|
|
43
|
-
catch {
|
|
44
|
-
throw new Error(`Parent directory does not exist: ${parentDir}`);
|
|
45
|
-
}
|
|
46
|
-
}
|
|
47
|
-
throw error;
|
|
48
|
-
}
|
|
49
|
-
}
|
|
50
|
-
export async function writeFile(filePath, content) {
|
|
51
|
-
try {
|
|
52
|
-
const validPath = await validatePath(filePath);
|
|
53
|
-
// Check if file already exists for approval context
|
|
54
|
-
let fileExists = false;
|
|
55
|
-
try {
|
|
56
|
-
await fs.access(validPath);
|
|
57
|
-
fileExists = true;
|
|
58
|
-
}
|
|
59
|
-
catch {
|
|
60
|
-
// File doesn't exist, which is expected for write operations
|
|
61
|
-
}
|
|
62
|
-
// Request user approval for write operation
|
|
63
|
-
const approvalContext = {
|
|
64
|
-
operation: 'write',
|
|
65
|
-
filePath: filePath,
|
|
66
|
-
description: fileExists
|
|
67
|
-
? `Overwrite existing file with ${content.length} characters of new content`
|
|
68
|
-
: `Create new file with ${content.length} characters of content`,
|
|
69
|
-
contentPreview: undefined, // Will be shown in the enhanced preview
|
|
70
|
-
newContent: content,
|
|
71
|
-
changeContext: {
|
|
72
|
-
linesAdded: content.split('\n').length,
|
|
73
|
-
linesRemoved: fileExists ? 0 : 0, // We don't know the old content yet
|
|
74
|
-
totalLines: content.split('\n').length,
|
|
75
|
-
affectedLineNumbers: []
|
|
76
|
-
}
|
|
77
|
-
};
|
|
78
|
-
// Request user approval for write operation (throws UserCancellationError if cancelled)
|
|
79
|
-
await requestFileOperationApproval(approvalContext);
|
|
80
|
-
logger.debug(`📝 Writing file: ${filePath} (${content.length} chars)`, { component: 'WriteFile', operation: 'writeFile' });
|
|
81
|
-
try {
|
|
82
|
-
// Security: 'wx' flag ensures exclusive creation - fails if file/symlink exists,
|
|
83
|
-
// preventing writes through pre-existing symlinks
|
|
84
|
-
await fs.writeFile(validPath, content, { encoding: "utf-8", flag: 'wx' });
|
|
85
|
-
logger.info(`✅ Created new file: ${filePath}`, { component: 'WriteFile', operation: 'writeFile' });
|
|
86
|
-
}
|
|
87
|
-
catch (error) {
|
|
88
|
-
if (error.code === 'EEXIST') {
|
|
89
|
-
// Security: Use atomic rename to prevent race conditions where symlinks
|
|
90
|
-
// could be created between validation and write. Rename operations
|
|
91
|
-
// replace the target file atomically and don't follow symlinks.
|
|
92
|
-
const tempPath = `${validPath}.${randomBytes(16).toString('hex')}.tmp`;
|
|
93
|
-
try {
|
|
94
|
-
await fs.writeFile(tempPath, content, 'utf-8');
|
|
95
|
-
await fs.rename(tempPath, validPath);
|
|
96
|
-
logger.info(`✅ Overwrote existing file: ${filePath}`, { component: 'WriteFile', operation: 'writeFile' });
|
|
97
|
-
}
|
|
98
|
-
catch (renameError) {
|
|
99
|
-
try {
|
|
100
|
-
await fs.unlink(tempPath);
|
|
101
|
-
}
|
|
102
|
-
catch { }
|
|
103
|
-
throw renameError;
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
else {
|
|
107
|
-
throw error;
|
|
108
|
-
}
|
|
109
|
-
}
|
|
110
|
-
return `Successfully wrote to ${filePath}`;
|
|
111
|
-
}
|
|
112
|
-
catch (error) {
|
|
113
|
-
// Re-throw UserCancellationError without modification
|
|
114
|
-
if (isUserCancellation(error)) {
|
|
115
|
-
throw error;
|
|
116
|
-
}
|
|
117
|
-
if (error instanceof Error) {
|
|
118
|
-
throw new Error(`Failed to write file: ${error.message}`);
|
|
119
|
-
}
|
|
120
|
-
throw new Error('Failed to write file: Unknown error');
|
|
121
|
-
}
|
|
122
|
-
}
|
|
123
|
-
// Tool definition
|
|
1
|
+
/**
|
|
2
|
+
* write_file tool — Create or overwrite a file. Requires approval.
|
|
3
|
+
*/
|
|
4
|
+
import fs from 'node:fs/promises';
|
|
5
|
+
import path from 'node:path';
|
|
6
|
+
import { validatePath } from '../utils/path-validation.js';
|
|
7
|
+
import { requestApproval } from '../utils/approval.js';
|
|
124
8
|
export const writeFileTool = {
|
|
125
9
|
type: 'function',
|
|
126
10
|
function: {
|
|
127
11
|
name: 'write_file',
|
|
128
|
-
description: 'Create a new file or
|
|
12
|
+
description: 'Create a new file or overwrite an existing file with the given content. Prefer edit_file for modifying existing files.',
|
|
129
13
|
parameters: {
|
|
130
14
|
type: 'object',
|
|
131
15
|
properties: {
|
|
132
|
-
file_path: {
|
|
133
|
-
|
|
134
|
-
description: 'The path to the file to write, relative to the current working directory. Examples: "src/newfile.ts", "config.json", "README.md"'
|
|
135
|
-
},
|
|
136
|
-
content: {
|
|
137
|
-
type: 'string',
|
|
138
|
-
description: 'The content to write to the file. This will completely replace any existing content.'
|
|
139
|
-
}
|
|
16
|
+
file_path: { type: 'string', description: 'Path to the file to write (relative to working directory).' },
|
|
17
|
+
content: { type: 'string', description: 'The full content to write to the file.' },
|
|
140
18
|
},
|
|
141
|
-
required: ['file_path', 'content']
|
|
142
|
-
}
|
|
143
|
-
}
|
|
19
|
+
required: ['file_path', 'content'],
|
|
20
|
+
},
|
|
21
|
+
},
|
|
144
22
|
};
|
|
23
|
+
export async function writeFile(filePath, content, sessionId) {
|
|
24
|
+
const validated = await validatePath(filePath);
|
|
25
|
+
// Request approval
|
|
26
|
+
const preview = content.length > 500
|
|
27
|
+
? `${content.slice(0, 250)}\n... (${content.length} chars total) ...\n${content.slice(-250)}`
|
|
28
|
+
: content;
|
|
29
|
+
const approved = await requestApproval({
|
|
30
|
+
id: `write-${Date.now()}`,
|
|
31
|
+
type: 'file_write',
|
|
32
|
+
description: `Write file: ${filePath}`,
|
|
33
|
+
detail: preview,
|
|
34
|
+
sessionId,
|
|
35
|
+
sessionScopeKey: `file_write:${validated}`,
|
|
36
|
+
});
|
|
37
|
+
if (!approved) {
|
|
38
|
+
return `Operation cancelled: write to ${filePath} was rejected by user.`;
|
|
39
|
+
}
|
|
40
|
+
// Ensure parent directory exists
|
|
41
|
+
await fs.mkdir(path.dirname(validated), { recursive: true });
|
|
42
|
+
// Atomic write: write to temp file then rename
|
|
43
|
+
const tmpPath = path.join(path.dirname(validated), `.protoagent-write-${process.pid}-${Date.now()}-${path.basename(validated)}`);
|
|
44
|
+
try {
|
|
45
|
+
await fs.writeFile(tmpPath, content, 'utf8');
|
|
46
|
+
await fs.rename(tmpPath, validated);
|
|
47
|
+
}
|
|
48
|
+
finally {
|
|
49
|
+
await fs.rm(tmpPath, { force: true }).catch(() => undefined);
|
|
50
|
+
}
|
|
51
|
+
const lines = content.split('\n').length;
|
|
52
|
+
return `Successfully wrote ${lines} lines to ${filePath}`;
|
|
53
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Approval system for destructive operations.
|
|
3
|
+
*
|
|
4
|
+
* Two categories of approval:
|
|
5
|
+
* 1. File operations (write_file, edit_file)
|
|
6
|
+
* 2. Shell commands (non-whitelisted)
|
|
7
|
+
*
|
|
8
|
+
* Approval can be granted:
|
|
9
|
+
* - Per-operation (one-time)
|
|
10
|
+
* - Per-operation-type for the session (e.g., "approve all writes")
|
|
11
|
+
* - Globally via --dangerously-accept-all
|
|
12
|
+
*
|
|
13
|
+
* In the Ink UI, approvals are handled by emitting an event and waiting
|
|
14
|
+
* for the UI to resolve it (instead of blocking on stdin with inquirer).
|
|
15
|
+
*/
|
|
16
|
+
// Global state
|
|
17
|
+
let dangerouslyAcceptAll = false;
|
|
18
|
+
const sessionApprovals = new Set(); // stores approval keys scoped by session
|
|
19
|
+
// Callback that the Ink UI provides to handle interactive approval
|
|
20
|
+
let approvalHandler = null;
|
|
21
|
+
export function setDangerouslyAcceptAll(value) {
|
|
22
|
+
dangerouslyAcceptAll = value;
|
|
23
|
+
}
|
|
24
|
+
export function isDangerouslyAcceptAll() {
|
|
25
|
+
return dangerouslyAcceptAll;
|
|
26
|
+
}
|
|
27
|
+
export function setApprovalHandler(handler) {
|
|
28
|
+
approvalHandler = handler;
|
|
29
|
+
}
|
|
30
|
+
export function clearApprovalHandler() {
|
|
31
|
+
approvalHandler = null;
|
|
32
|
+
}
|
|
33
|
+
export function clearSessionApprovals() {
|
|
34
|
+
sessionApprovals.clear();
|
|
35
|
+
}
|
|
36
|
+
function getApprovalScopeKey(req) {
|
|
37
|
+
const sessionId = req.sessionId ?? '__global__';
|
|
38
|
+
const scope = req.sessionScopeKey ?? req.type;
|
|
39
|
+
return `${sessionId}:${scope}`;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Request approval for an operation. Returns true if approved.
|
|
43
|
+
*
|
|
44
|
+
* Check order:
|
|
45
|
+
* 1. --dangerously-accept-all → auto-approve
|
|
46
|
+
* 2. Session approval for this type → auto-approve
|
|
47
|
+
* 3. Interactive prompt via the UI handler
|
|
48
|
+
* 4. No handler registered → reject (fail closed)
|
|
49
|
+
*/
|
|
50
|
+
export async function requestApproval(req) {
|
|
51
|
+
if (dangerouslyAcceptAll)
|
|
52
|
+
return true;
|
|
53
|
+
const sessionKey = getApprovalScopeKey(req);
|
|
54
|
+
if (sessionApprovals.has(sessionKey))
|
|
55
|
+
return true;
|
|
56
|
+
if (!approvalHandler) {
|
|
57
|
+
return false;
|
|
58
|
+
}
|
|
59
|
+
const response = await approvalHandler(req);
|
|
60
|
+
switch (response) {
|
|
61
|
+
case 'approve_once':
|
|
62
|
+
return true;
|
|
63
|
+
case 'approve_session':
|
|
64
|
+
sessionApprovals.add(sessionKey);
|
|
65
|
+
return true;
|
|
66
|
+
case 'reject':
|
|
67
|
+
return false;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversation compaction.
|
|
3
|
+
*
|
|
4
|
+
* When the conversation approaches the context window limit (≥ 90%),
|
|
5
|
+
* the compactor summarises the older messages using the LLM and replaces
|
|
6
|
+
* them with a compact summary. The most recent messages are kept
|
|
7
|
+
* verbatim so the agent doesn't lose immediate context.
|
|
8
|
+
*/
|
|
9
|
+
import { estimateConversationTokens } from './cost-tracker.js';
|
|
10
|
+
import { logger } from './logger.js';
|
|
11
|
+
const RECENT_MESSAGES_TO_KEEP = 5;
|
|
12
|
+
function isProtectedSkillMessage(message) {
|
|
13
|
+
return message.role === 'tool' && typeof message.content === 'string' && message.content.includes('<skill_content ');
|
|
14
|
+
}
|
|
15
|
+
const COMPRESSION_PROMPT = `You are a conversation state manager. Your job is to compress a conversation history into a compact summary that preserves all important context.
|
|
16
|
+
|
|
17
|
+
Produce a structured summary in this format:
|
|
18
|
+
|
|
19
|
+
<state_snapshot>
|
|
20
|
+
<overall_goal>What the user is trying to accomplish</overall_goal>
|
|
21
|
+
<key_knowledge>Important facts, conventions, constraints discovered</key_knowledge>
|
|
22
|
+
<file_system_state>Files created, read, modified, or deleted (with paths)</file_system_state>
|
|
23
|
+
<recent_actions>Last significant actions and their outcomes</recent_actions>
|
|
24
|
+
<current_plan>Current step-by-step plan with status: [DONE], [IN PROGRESS], [TODO]</current_plan>
|
|
25
|
+
</state_snapshot>
|
|
26
|
+
|
|
27
|
+
Be thorough but concise. Do not lose any information that would be needed to continue the conversation.`;
|
|
28
|
+
/**
|
|
29
|
+
* Compact a conversation if it exceeds the context window threshold.
|
|
30
|
+
* Returns the original messages if compaction isn't needed or fails.
|
|
31
|
+
*/
|
|
32
|
+
export async function compactIfNeeded(client, model, messages, contextWindow, currentTokens) {
|
|
33
|
+
const utilisation = (currentTokens / contextWindow) * 100;
|
|
34
|
+
if (utilisation < 90)
|
|
35
|
+
return messages;
|
|
36
|
+
logger.info(`Compacting conversation (${utilisation.toFixed(1)}% of context window used)`);
|
|
37
|
+
try {
|
|
38
|
+
return await compactConversation(client, model, messages);
|
|
39
|
+
}
|
|
40
|
+
catch (err) {
|
|
41
|
+
logger.error(`Compaction failed, continuing with original messages: ${err}`);
|
|
42
|
+
return messages;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
async function compactConversation(client, model, messages) {
|
|
46
|
+
// Separate system message, history to compress, and recent messages
|
|
47
|
+
const systemMessage = messages[0];
|
|
48
|
+
const recentMessages = messages.slice(-RECENT_MESSAGES_TO_KEEP);
|
|
49
|
+
const middleMessages = messages.slice(1, messages.length - RECENT_MESSAGES_TO_KEEP);
|
|
50
|
+
const protectedMessages = middleMessages.filter(isProtectedSkillMessage);
|
|
51
|
+
const historyToCompress = middleMessages.filter((message) => !isProtectedSkillMessage(message));
|
|
52
|
+
if (historyToCompress.length === 0) {
|
|
53
|
+
logger.debug('Nothing to compact — conversation too short');
|
|
54
|
+
return messages;
|
|
55
|
+
}
|
|
56
|
+
// Build compression request
|
|
57
|
+
const compressionMessages = [
|
|
58
|
+
{ role: 'system', content: COMPRESSION_PROMPT },
|
|
59
|
+
{
|
|
60
|
+
role: 'user',
|
|
61
|
+
content: `Here is the conversation history to compress:\n\n${historyToCompress
|
|
62
|
+
.map((m) => `[${m.role}]: ${m.content || JSON.stringify(m.tool_calls || '')}`)
|
|
63
|
+
.join('\n\n')}`,
|
|
64
|
+
},
|
|
65
|
+
];
|
|
66
|
+
const response = await client.chat.completions.create({
|
|
67
|
+
model,
|
|
68
|
+
messages: compressionMessages,
|
|
69
|
+
max_tokens: 2000,
|
|
70
|
+
temperature: 0.1,
|
|
71
|
+
});
|
|
72
|
+
const summary = response.choices[0]?.message?.content;
|
|
73
|
+
if (!summary) {
|
|
74
|
+
throw new Error('Compression returned empty response');
|
|
75
|
+
}
|
|
76
|
+
// Reconstruct: system + summary + recent messages
|
|
77
|
+
const compacted = [
|
|
78
|
+
systemMessage,
|
|
79
|
+
{ role: 'system', content: `Previous conversation summary:\n\n${summary}` },
|
|
80
|
+
...protectedMessages,
|
|
81
|
+
...recentMessages,
|
|
82
|
+
];
|
|
83
|
+
const oldTokens = estimateConversationTokens(messages);
|
|
84
|
+
const newTokens = estimateConversationTokens(compacted);
|
|
85
|
+
logger.info(`Compacted ${oldTokens} → ${newTokens} tokens (${((1 - newTokens / oldTokens) * 100).toFixed(0)}% reduction)`);
|
|
86
|
+
return compacted;
|
|
87
|
+
}
|