mulby-cli 1.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/PLUGIN_DEVELOP_PROMPT.md +1164 -0
- package/README.md +852 -0
- package/assets/default-icon.png +0 -0
- package/dist/commands/ai-session.js +44 -0
- package/dist/commands/build.js +111 -0
- package/dist/commands/config-ai.js +291 -0
- package/dist/commands/config.js +53 -0
- package/dist/commands/create/ai-create.js +183 -0
- package/dist/commands/create/assets.js +53 -0
- package/dist/commands/create/basic.js +72 -0
- package/dist/commands/create/index.js +73 -0
- package/dist/commands/create/react.js +136 -0
- package/dist/commands/create/templates/basic.js +383 -0
- package/dist/commands/create/templates/react/backend.js +72 -0
- package/dist/commands/create/templates/react/config.js +166 -0
- package/dist/commands/create/templates/react/docs.js +78 -0
- package/dist/commands/create/templates/react/hooks.js +469 -0
- package/dist/commands/create/templates/react/index.js +41 -0
- package/dist/commands/create/templates/react/types.js +1228 -0
- package/dist/commands/create/templates/react/ui.js +528 -0
- package/dist/commands/create/templates/react.js +1888 -0
- package/dist/commands/dev.js +141 -0
- package/dist/commands/pack.js +160 -0
- package/dist/commands/resume.js +97 -0
- package/dist/commands/test-ui.js +50 -0
- package/dist/index.js +71 -0
- package/dist/services/ai/PLUGIN_API.md +1102 -0
- package/dist/services/ai/PLUGIN_DEVELOP_PROMPT.md +1164 -0
- package/dist/services/ai/context-manager.js +639 -0
- package/dist/services/ai/index.js +88 -0
- package/dist/services/ai/knowledge.js +52 -0
- package/dist/services/ai/prompts.js +114 -0
- package/dist/services/ai/providers/base.js +38 -0
- package/dist/services/ai/providers/claude.js +284 -0
- package/dist/services/ai/providers/deepseek.js +28 -0
- package/dist/services/ai/providers/gemini.js +191 -0
- package/dist/services/ai/providers/glm.js +31 -0
- package/dist/services/ai/providers/minimax.js +27 -0
- package/dist/services/ai/providers/openai.js +177 -0
- package/dist/services/ai/tools.js +204 -0
- package/dist/services/ai-generator.js +968 -0
- package/dist/services/config-manager.js +117 -0
- package/dist/services/dependency-manager.js +236 -0
- package/dist/services/file-writer.js +66 -0
- package/dist/services/plan-adapter.js +244 -0
- package/dist/services/plan-command-handler.js +172 -0
- package/dist/services/plan-manager.js +502 -0
- package/dist/services/session-manager.js +113 -0
- package/dist/services/task-analyzer.js +136 -0
- package/dist/services/tui/index.js +57 -0
- package/dist/services/tui/store.js +123 -0
- package/dist/types/ai.js +172 -0
- package/dist/types/plan.js +2 -0
- package/dist/ui/Terminal.js +56 -0
- package/dist/ui/components/InputArea.js +176 -0
- package/dist/ui/components/LogArea.js +19 -0
- package/dist/ui/components/PlanPanel.js +69 -0
- package/dist/ui/components/SelectArea.js +13 -0
- package/package.json +45 -0
|
@@ -0,0 +1,968 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.AIAgent = void 0;
|
|
40
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
41
|
+
const fs = __importStar(require("fs-extra"));
|
|
42
|
+
const path = __importStar(require("path"));
|
|
43
|
+
const child_process_1 = require("child_process");
|
|
44
|
+
const ai_1 = require("./ai");
|
|
45
|
+
const session_manager_1 = require("./session-manager");
|
|
46
|
+
const file_writer_1 = require("./file-writer");
|
|
47
|
+
const prompts_1 = require("./ai/prompts");
|
|
48
|
+
const tools_1 = require("./ai/tools");
|
|
49
|
+
const context_manager_1 = require("./ai/context-manager");
|
|
50
|
+
const tui_1 = require("./tui");
|
|
51
|
+
const react_1 = require("../commands/create/react");
|
|
52
|
+
const plan_manager_1 = require("./plan-manager");
|
|
53
|
+
const plan_command_handler_1 = require("./plan-command-handler");
|
|
54
|
+
const task_analyzer_1 = require("./task-analyzer");
|
|
55
|
+
class AIAgent {
|
|
56
|
+
constructor(session, systemPrompt) {
|
|
57
|
+
this.session = session;
|
|
58
|
+
this.systemPrompt = systemPrompt;
|
|
59
|
+
this.aiService = ai_1.AIServiceFactory.create();
|
|
60
|
+
this.sessionManager = session_manager_1.SessionManager.getInstance();
|
|
61
|
+
this.autoApproveCommands = false;
|
|
62
|
+
this.currentPlan = null;
|
|
63
|
+
this.lastCompressedTokens = 0; // 记录上次压缩后的 token 数
|
|
64
|
+
this.fileWriter = new file_writer_1.FileWriter(session.targetDir);
|
|
65
|
+
this.planManager = new plan_manager_1.PlanManager(path.join(session.targetDir, '.mulby'));
|
|
66
|
+
this.planCommandHandler = new plan_command_handler_1.PlanCommandHandler(this.planManager, () => this.currentPlan, (plan) => { this.currentPlan = plan; }, async (plan) => { await this.planManager.savePlan(plan, this.session.id); });
|
|
67
|
+
}
|
|
68
|
+
async start(options = {}) {
|
|
69
|
+
tui_1.tui.start();
|
|
70
|
+
tui_1.tui.log(chalk_1.default.blue('🤖 AI Agent 已启动...'));
|
|
71
|
+
// Load existing plan if available
|
|
72
|
+
try {
|
|
73
|
+
this.currentPlan = await this.planManager.loadSessionPlan(this.session.id);
|
|
74
|
+
if (this.currentPlan) {
|
|
75
|
+
tui_1.tui.log(chalk_1.default.cyan(`📋 已加载任务计划: ${this.currentPlan.goal}`));
|
|
76
|
+
const summary = this.planManager.getProgressSummary(this.currentPlan);
|
|
77
|
+
tui_1.tui.log(chalk_1.default.gray(` 进度: ${summary.completed}/${summary.total} (${Math.round(summary.percentage)}%)`));
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
catch (error) {
|
|
81
|
+
// No plan exists, that's fine
|
|
82
|
+
}
|
|
83
|
+
// Initialize history if empty
|
|
84
|
+
if (this.session.conversationHistory.length === 0) {
|
|
85
|
+
this.session.conversationHistory.push({
|
|
86
|
+
role: 'system',
|
|
87
|
+
content: this.systemPrompt || prompts_1.SYSTEM_PROMPT
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
if (this.session.conversationHistory.length > 0) {
|
|
91
|
+
await this.checkAndCompressContext();
|
|
92
|
+
}
|
|
93
|
+
if (options.waitForInput) {
|
|
94
|
+
await this.handleUserInteraction();
|
|
95
|
+
}
|
|
96
|
+
await this.runLoop();
|
|
97
|
+
}
|
|
98
|
+
async runLoop() {
|
|
99
|
+
let loopCount = 0;
|
|
100
|
+
while (this.session.status !== 'completed' && this.session.status !== 'failed') {
|
|
101
|
+
loopCount++;
|
|
102
|
+
try {
|
|
103
|
+
// 0.1 Check and compress context before each turn
|
|
104
|
+
await this.checkAndCompressContext();
|
|
105
|
+
// 0.2 Update Dynamic File Map (The Head)
|
|
106
|
+
// We update the System Prompt (the first message) with the current file structure
|
|
107
|
+
// This ensures the AI always has the latest "World View".
|
|
108
|
+
if (this.session.conversationHistory.length > 0 && this.session.conversationHistory[0].role === 'system') {
|
|
109
|
+
const currentFileMap = await this.generateFileMap();
|
|
110
|
+
// We need to re-build the system prompt with the new map
|
|
111
|
+
// Since we stored the initial systemPrompt in constructor, we can rebuild it.
|
|
112
|
+
// But wait, constructure systemPrompt might be the *result* string.
|
|
113
|
+
// Actually, prompts.ts exports `buildSystemPrompt`.
|
|
114
|
+
// We should probably just replace the "## Current Project Structure" section if we want to be fancy,
|
|
115
|
+
// or easier: just rebuild the whole string using `buildSystemPrompt`.
|
|
116
|
+
// However, we don't have the original `templates` here easily unless we stored them.
|
|
117
|
+
// Plan B: Just Append/Replace the file map at the end of the system prompt if it exists, or rely on a marker.
|
|
118
|
+
// Better approach: Let's import buildSystemPrompt and use it.
|
|
119
|
+
// But we don't have `templates` or `isScaffolded` state stored in AIAgent.
|
|
120
|
+
// Let's modify AIAgent to store the original config or just hack it:
|
|
121
|
+
// We will inject a specific marker in the prompts.ts and regex replace it here.
|
|
122
|
+
// Or simpler: Just rebuild it if we can.
|
|
123
|
+
// Actually, let's keep it simple. We will update the system prompt by replacing the
|
|
124
|
+
// content inside ```...``` of the "Current Project Structure" section if it exists,
|
|
125
|
+
// or append it if it doesn't.
|
|
126
|
+
const currentContent = this.session.conversationHistory[0].content;
|
|
127
|
+
let sysContent = typeof currentContent === 'string' ? currentContent : '';
|
|
128
|
+
const mapHeader = '## Current Project Structure';
|
|
129
|
+
if (sysContent.includes(mapHeader)) {
|
|
130
|
+
// Replace existing map
|
|
131
|
+
// Regex to match: ## Current Project Structure\n(Auto-updated file tree)\n```\n[\s\S]*?\n```
|
|
132
|
+
sysContent = sysContent.replace(/## Current Project Structure\n\(Auto-updated file tree\)\n```[\s\S]*?```/, `## Current Project Structure\n(Auto-updated file tree)\n\`\`\`\n${currentFileMap}\n\`\`\``);
|
|
133
|
+
}
|
|
134
|
+
else {
|
|
135
|
+
// Append new map (first run or migration)
|
|
136
|
+
sysContent += `\n\n## Current Project Structure\n(Auto-updated file tree)\n\`\`\`\n${currentFileMap}\n\`\`\``;
|
|
137
|
+
}
|
|
138
|
+
this.session.conversationHistory[0].content = sysContent;
|
|
139
|
+
}
|
|
140
|
+
tui_1.tui.setStatus(`Thinking... (Turn ${loopCount})`);
|
|
141
|
+
const startTime = Date.now();
|
|
142
|
+
let thinkingBuffer = '';
|
|
143
|
+
let isThinking = false;
|
|
144
|
+
let lastStatusUpdate = 0;
|
|
145
|
+
const response = await this.aiService.chatStream(this.session.conversationHistory, (chunk) => {
|
|
146
|
+
// Handle thinking tags added by provider
|
|
147
|
+
if (chunk.includes('<think>')) {
|
|
148
|
+
isThinking = true;
|
|
149
|
+
chunk = chunk.replace('<think>', '').trimStart();
|
|
150
|
+
}
|
|
151
|
+
if (chunk.includes('</think>')) {
|
|
152
|
+
isThinking = false;
|
|
153
|
+
chunk = chunk.replace('</think>', '').trimEnd();
|
|
154
|
+
tui_1.tui.setStatus(`Thinking completed. Generating response...`);
|
|
155
|
+
}
|
|
156
|
+
if (isThinking && chunk) {
|
|
157
|
+
thinkingBuffer += chunk;
|
|
158
|
+
// Update partial thinking status (last 80 chars)
|
|
159
|
+
// Clean newlines for status bar
|
|
160
|
+
// Throttle updates to avoid TUI lag (max 10fps) (every 100ms)
|
|
161
|
+
const now = Date.now();
|
|
162
|
+
if (now - lastStatusUpdate > 100) {
|
|
163
|
+
const display = thinkingBuffer.slice(-80).replace(/\n/g, ' ');
|
|
164
|
+
tui_1.tui.setStatus(`Thinking: ${display}`);
|
|
165
|
+
lastStatusUpdate = now;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}, {
|
|
169
|
+
tools: tools_1.PLUGIN_GENERATION_TOOLS,
|
|
170
|
+
toolChoice: 'auto'
|
|
171
|
+
});
|
|
172
|
+
const duration = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
173
|
+
let usageInfo = '';
|
|
174
|
+
if (response.usage) {
|
|
175
|
+
usageInfo = `, ${response.usage.totalTokens} tokens`;
|
|
176
|
+
}
|
|
177
|
+
// Clear previous "Thinking..." line and print stats
|
|
178
|
+
// process.stdout.write(`\r\x1b[K`); // Clear line
|
|
179
|
+
tui_1.tui.log(chalk_1.default.gray(`Thinking... (Turn ${loopCount}) - ${duration}s${usageInfo}`));
|
|
180
|
+
// 2. Add Assistant Message
|
|
181
|
+
const assistantMsg = {
|
|
182
|
+
role: 'assistant',
|
|
183
|
+
content: response.content,
|
|
184
|
+
reasoning_content: response.reasoning_content,
|
|
185
|
+
tool_calls: response.toolCalls
|
|
186
|
+
};
|
|
187
|
+
this.session.conversationHistory.push(assistantMsg);
|
|
188
|
+
this.sessionManager.saveSession(this.session);
|
|
189
|
+
if (response.content) {
|
|
190
|
+
tui_1.tui.log(chalk_1.default.white('AI: ' + response.content));
|
|
191
|
+
}
|
|
192
|
+
// 3. Handle Tool Calls
|
|
193
|
+
if (response.toolCalls && response.toolCalls.length > 0) {
|
|
194
|
+
for (const toolCall of response.toolCalls) {
|
|
195
|
+
const toolName = toolCall.function.name;
|
|
196
|
+
const toolCallId = toolCall.id;
|
|
197
|
+
let toolArgs;
|
|
198
|
+
try {
|
|
199
|
+
toolArgs = JSON.parse(toolCall.function.arguments);
|
|
200
|
+
}
|
|
201
|
+
catch (parseError) {
|
|
202
|
+
tui_1.tui.log(chalk_1.default.red(`[Tool] JSON 解析失败: ${parseError.message}`));
|
|
203
|
+
tui_1.tui.log(chalk_1.default.gray(` 原始参数: ${toolCall.function.arguments.slice(0, 200)}...`));
|
|
204
|
+
// 添加错误响应让 AI 知道参数解析失败
|
|
205
|
+
this.session.conversationHistory.push({
|
|
206
|
+
role: 'tool',
|
|
207
|
+
tool_call_id: toolCallId,
|
|
208
|
+
name: toolName,
|
|
209
|
+
content: `Error: Failed to parse tool arguments - ${parseError.message}. Please retry with valid JSON.`
|
|
210
|
+
});
|
|
211
|
+
this.sessionManager.saveSession(this.session);
|
|
212
|
+
continue;
|
|
213
|
+
}
|
|
214
|
+
tui_1.tui.log(chalk_1.default.cyan(`[Tool] Calling ${toolName}...`));
|
|
215
|
+
let result;
|
|
216
|
+
try {
|
|
217
|
+
tui_1.tui.setStatus(`Executing ${toolName}...`);
|
|
218
|
+
result = await this.executeTool(toolName, toolArgs);
|
|
219
|
+
}
|
|
220
|
+
catch (e) {
|
|
221
|
+
result = `Error executing tool ${toolName}: ${e.message}`;
|
|
222
|
+
tui_1.tui.log(chalk_1.default.red(`[Tool Error] ${result}`));
|
|
223
|
+
}
|
|
224
|
+
// Add Tool Result Message
|
|
225
|
+
this.session.conversationHistory.push({
|
|
226
|
+
role: 'tool',
|
|
227
|
+
tool_call_id: toolCallId,
|
|
228
|
+
name: toolName,
|
|
229
|
+
content: result
|
|
230
|
+
});
|
|
231
|
+
this.sessionManager.saveSession(this.session);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
else {
|
|
235
|
+
// No tools called. Check if it looks like a question to user or just chatter.
|
|
236
|
+
// If just chatter, maybe we continue? or wait for user?
|
|
237
|
+
// Usually if no tools, it's just talking. We might want to pause for user input?
|
|
238
|
+
// For now, if no tools are called, we assume it's waiting for user input or just speaking.
|
|
239
|
+
// But in non-interactive CLI loop, we need to prompt user if AI stops acting.
|
|
240
|
+
// However, we added 'ask_user' tool. The AI *should* use it.
|
|
241
|
+
// If it doesn't use 'ask_user' but stops, we'll prompt user regardless.
|
|
242
|
+
await this.handleUserInteraction();
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
catch (error) {
|
|
246
|
+
tui_1.tui.log(chalk_1.default.red('\n❌ Agent 发生错误: ' + error.message));
|
|
247
|
+
// Check for JSON truncation or parsing errors (likely due to token limit)
|
|
248
|
+
const isJsonError = error.message.includes('JSON') || error.message.includes('Unterminated string');
|
|
249
|
+
if (isJsonError) {
|
|
250
|
+
tui_1.tui.log(chalk_1.default.yellow('⚠️ 检测到 JSON 解析错误,通常是因为输出被截断 (上下文过长)。'));
|
|
251
|
+
const shouldRecover = await this.safePromptTui('是否尝试压缩上下文并重试本轮对话? (Y/n) [默认: Y]');
|
|
252
|
+
if (shouldRecover.toLowerCase() !== 'n') {
|
|
253
|
+
// 1. Compress context
|
|
254
|
+
await this.compressContext();
|
|
255
|
+
// 2. Remove the last assistant message if it was the one that failed parsing
|
|
256
|
+
// (If we failed during tool parsing, the assistant message was already added)
|
|
257
|
+
const lastMsg = this.session.conversationHistory[this.session.conversationHistory.length - 1];
|
|
258
|
+
if (lastMsg && lastMsg.role === 'assistant') {
|
|
259
|
+
this.session.conversationHistory.pop();
|
|
260
|
+
tui_1.tui.log(chalk_1.default.gray(' Info: Removed partial/failed assistant message from history.'));
|
|
261
|
+
}
|
|
262
|
+
this.sessionManager.saveSession(this.session);
|
|
263
|
+
tui_1.tui.log(chalk_1.default.green('✅ 恢复成功,正在重试...'));
|
|
264
|
+
continue; // Retry loop
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
// Generic Retry Prompt
|
|
268
|
+
const action = await this.safePromptTui('是否重试?(y/n/exit)');
|
|
269
|
+
if (action.toLowerCase() === 'y') {
|
|
270
|
+
continue;
|
|
271
|
+
}
|
|
272
|
+
this.session.status = 'failed';
|
|
273
|
+
this.session.error = error.message;
|
|
274
|
+
this.sessionManager.saveSession(this.session);
|
|
275
|
+
tui_1.tui.stop();
|
|
276
|
+
return;
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
tui_1.tui.stop();
|
|
280
|
+
}
|
|
281
|
+
async executeTool(name, args) {
|
|
282
|
+
switch (name) {
|
|
283
|
+
case 'read_file':
|
|
284
|
+
return await this.handleReadFile(args.path);
|
|
285
|
+
case 'replace_in_file':
|
|
286
|
+
return await this.handleReplaceInFile(args.path, args.target, args.replacement);
|
|
287
|
+
case 'write_file':
|
|
288
|
+
return await this.handleWriteFile(args.path, args.content);
|
|
289
|
+
case 'run_command':
|
|
290
|
+
return await this.handleRunCommand(args.command);
|
|
291
|
+
case 'ask_user':
|
|
292
|
+
return await this.handleAskUser(args.question);
|
|
293
|
+
case 'scaffold_project':
|
|
294
|
+
return await this.handleScaffoldProject(args.reason);
|
|
295
|
+
case 'finish':
|
|
296
|
+
return await this.handleFinish(args.summary);
|
|
297
|
+
// New Tools
|
|
298
|
+
case 'list_dir':
|
|
299
|
+
return await this.handleListDir(args.path);
|
|
300
|
+
case 'search_files':
|
|
301
|
+
return await this.handleSearchFiles(args.query, args.path);
|
|
302
|
+
case 'read_file_outline':
|
|
303
|
+
return await this.handleReadFileOutline(args.path);
|
|
304
|
+
case 'delete_file':
|
|
305
|
+
return await this.handleDeleteFile(args.path);
|
|
306
|
+
case 'move_file':
|
|
307
|
+
return await this.handleMoveFile(args.source, args.destination);
|
|
308
|
+
case 'fetch_url':
|
|
309
|
+
return await this.handleFetchUrl(args.url);
|
|
310
|
+
case 'check_types':
|
|
311
|
+
return await this.handleCheckTypes();
|
|
312
|
+
// Legacy/Deprecated
|
|
313
|
+
case 'plan_files':
|
|
314
|
+
return "Tool 'plan_files' is deprecated. Please use read_file/write_file directly.";
|
|
315
|
+
default:
|
|
316
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
// --- New Tool Handlers ---
|
|
320
|
+
async handleListDir(dirPath) {
|
|
321
|
+
const fullPath = path.resolve(this.session.targetDir, dirPath);
|
|
322
|
+
if (!await fs.pathExists(fullPath))
|
|
323
|
+
return `Directory not found: ${dirPath}`;
|
|
324
|
+
try {
|
|
325
|
+
const files = await fs.readdir(fullPath);
|
|
326
|
+
const detailed = await Promise.all(files.map(async (f) => {
|
|
327
|
+
const stat = await fs.stat(path.join(fullPath, f));
|
|
328
|
+
return `${f}${stat.isDirectory() ? '/' : ''}`;
|
|
329
|
+
}));
|
|
330
|
+
return `Contents of ${dirPath}:\n${detailed.join('\n')}`;
|
|
331
|
+
}
|
|
332
|
+
catch (e) {
|
|
333
|
+
return `Error listing directory: ${e.message}`;
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
async handleSearchFiles(query, searchPath = '.') {
|
|
337
|
+
const fullPath = path.resolve(this.session.targetDir, searchPath);
|
|
338
|
+
if (!await fs.pathExists(fullPath))
|
|
339
|
+
return `Path not found: ${searchPath}`;
|
|
340
|
+
const results = [];
|
|
341
|
+
try {
|
|
342
|
+
// Recursive walk
|
|
343
|
+
const walk = async (dir) => {
|
|
344
|
+
const files = await fs.readdir(dir);
|
|
345
|
+
for (const file of files) {
|
|
346
|
+
if (['node_modules', '.git', 'dist'].includes(file))
|
|
347
|
+
continue;
|
|
348
|
+
const fPath = path.join(dir, file);
|
|
349
|
+
const stat = await fs.stat(fPath);
|
|
350
|
+
if (stat.isDirectory()) {
|
|
351
|
+
await walk(fPath);
|
|
352
|
+
}
|
|
353
|
+
else {
|
|
354
|
+
const content = await fs.readFile(fPath, 'utf-8');
|
|
355
|
+
if (content.includes(query)) { // Simple string match, regex support would need eval or new RegExp
|
|
356
|
+
// If query is regex-like, try regex?
|
|
357
|
+
// The prompt says "String or Regex".
|
|
358
|
+
// Let's safe-guard: if it looks like regex, use regex.
|
|
359
|
+
// Actually, simple includes is safer for now.
|
|
360
|
+
// If user wants regex, we can try new RegExp(query).
|
|
361
|
+
let match = false;
|
|
362
|
+
try {
|
|
363
|
+
if (new RegExp(query).test(content))
|
|
364
|
+
match = true;
|
|
365
|
+
}
|
|
366
|
+
catch {
|
|
367
|
+
if (content.includes(query))
|
|
368
|
+
match = true;
|
|
369
|
+
}
|
|
370
|
+
if (match) {
|
|
371
|
+
// Find line number
|
|
372
|
+
const lines = content.split('\n');
|
|
373
|
+
lines.forEach((line, idx) => {
|
|
374
|
+
if (line.includes(query) || (new RegExp(query).test(line))) {
|
|
375
|
+
results.push(`${path.relative(this.session.targetDir, fPath)}:${idx + 1}: ${line.trim().slice(0, 100)}`);
|
|
376
|
+
}
|
|
377
|
+
});
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
};
|
|
383
|
+
await walk(fullPath);
|
|
384
|
+
return results.length > 0 ? results.join('\n') : 'No matches found.';
|
|
385
|
+
}
|
|
386
|
+
catch (e) {
|
|
387
|
+
return `Error searching files: ${e.message}`;
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
async handleReadFileOutline(filePath) {
|
|
391
|
+
const fullPath = path.resolve(this.session.targetDir, filePath);
|
|
392
|
+
if (!await fs.pathExists(fullPath))
|
|
393
|
+
return `File not found: ${filePath}`;
|
|
394
|
+
try {
|
|
395
|
+
const content = await fs.readFile(fullPath, 'utf-8');
|
|
396
|
+
const lines = content.split('\n');
|
|
397
|
+
const outline = lines
|
|
398
|
+
.map((line, idx) => ({ line, idx: idx + 1 }))
|
|
399
|
+
.filter(({ line }) => {
|
|
400
|
+
const l = line.trim();
|
|
401
|
+
return (l.startsWith('export ') ||
|
|
402
|
+
l.startsWith('class ') ||
|
|
403
|
+
l.startsWith('function ') ||
|
|
404
|
+
l.startsWith('interface ') ||
|
|
405
|
+
l.startsWith('type ') ||
|
|
406
|
+
l.match(/^const\s+[A-Z_]+\s+=/) // Constants
|
|
407
|
+
) && !l.startsWith('export default'); // Handle default exports maybe?
|
|
408
|
+
})
|
|
409
|
+
.map(({ line, idx }) => `${idx}: ${line.trim()}`);
|
|
410
|
+
return outline.length > 0 ? outline.join('\n') : 'No outline elements found (only imports or internal code?).';
|
|
411
|
+
}
|
|
412
|
+
catch (e) {
|
|
413
|
+
return `Error reading outline: ${e.message}`;
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
async handleDeleteFile(filePath) {
|
|
417
|
+
const fullPath = path.resolve(this.session.targetDir, filePath);
|
|
418
|
+
try {
|
|
419
|
+
await fs.remove(fullPath);
|
|
420
|
+
tui_1.tui.log(chalk_1.default.yellow(` ✓ Deleted ${filePath}`));
|
|
421
|
+
return `Deleted ${filePath}`;
|
|
422
|
+
}
|
|
423
|
+
catch (e) {
|
|
424
|
+
return `Error deleting file: ${e.message}`;
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
async handleMoveFile(source, dest) {
|
|
428
|
+
const fullSource = path.resolve(this.session.targetDir, source);
|
|
429
|
+
const fullDest = path.resolve(this.session.targetDir, dest);
|
|
430
|
+
try {
|
|
431
|
+
await fs.move(fullSource, fullDest, { overwrite: true });
|
|
432
|
+
tui_1.tui.log(chalk_1.default.yellow(` ✓ Moved ${source} -> ${dest}`));
|
|
433
|
+
return `Moved ${source} to ${dest}`;
|
|
434
|
+
}
|
|
435
|
+
catch (e) {
|
|
436
|
+
return `Error moving file: ${e.message}`;
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
async handleFetchUrl(url) {
|
|
440
|
+
tui_1.tui.log(chalk_1.default.cyan(` 🌐 Fetching ${url}...`));
|
|
441
|
+
try {
|
|
442
|
+
// Using global fetch (Node 18+)
|
|
443
|
+
const response = await fetch(url);
|
|
444
|
+
if (!response.ok)
|
|
445
|
+
return `Failed to fetch: ${response.status} ${response.statusText}`;
|
|
446
|
+
const text = await response.text();
|
|
447
|
+
// Simple naive HTML to text or just return raw?
|
|
448
|
+
// Use a simple heuristic to strip tags if HTML?
|
|
449
|
+
// For now return raw but truncated
|
|
450
|
+
const maxLength = 20000;
|
|
451
|
+
const content = text.length > maxLength
|
|
452
|
+
? text.slice(0, maxLength) + `\n...(Truncated ${text.length - maxLength} chars)`
|
|
453
|
+
: text;
|
|
454
|
+
return content;
|
|
455
|
+
}
|
|
456
|
+
catch (e) {
|
|
457
|
+
return `Fetch error: ${e.message}`;
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
async handleCheckTypes() {
|
|
461
|
+
tui_1.tui.log(chalk_1.default.cyan(` 🛡️ Checking types...`));
|
|
462
|
+
try {
|
|
463
|
+
return await this.handleRunCommand('npm run build'); // Commonly runs tsc
|
|
464
|
+
// Or explicitly: npx tsc --noEmit
|
|
465
|
+
}
|
|
466
|
+
catch (e) {
|
|
467
|
+
return `Type check failed: ${e.message}`;
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
async handleReadFile(filePath) {
|
|
471
|
+
const fullPath = path.resolve(this.session.targetDir, filePath);
|
|
472
|
+
if (!await fs.pathExists(fullPath)) {
|
|
473
|
+
return `File not found: ${filePath}`;
|
|
474
|
+
}
|
|
475
|
+
return await fs.readFile(fullPath, 'utf-8');
|
|
476
|
+
}
|
|
477
|
+
async handleWriteFile(filePath, content) {
|
|
478
|
+
await this.fileWriter.writeFile(filePath, content);
|
|
479
|
+
tui_1.tui.log(chalk_1.default.green(` ✓ Wrote ${filePath}`));
|
|
480
|
+
return `Successfully wrote file: ${filePath}`;
|
|
481
|
+
}
|
|
482
|
+
async handleScaffoldProject(reason) {
|
|
483
|
+
tui_1.tui.log(chalk_1.default.cyan(`📦 正在生成项目脚手架... (${reason})`));
|
|
484
|
+
const targetDir = this.session.targetDir;
|
|
485
|
+
const pluginName = this.session.pluginName || path.basename(targetDir);
|
|
486
|
+
try {
|
|
487
|
+
await (0, react_1.createReactProject)(targetDir, pluginName);
|
|
488
|
+
tui_1.tui.log(chalk_1.default.green('✓ 脚手架创建完成'));
|
|
489
|
+
return `Project scaffold created successfully at ${targetDir}. The following files were generated:
|
|
490
|
+
- package.json
|
|
491
|
+
- manifest.json
|
|
492
|
+
- vite.config.ts
|
|
493
|
+
- tsconfig.json
|
|
494
|
+
- src/ui/App.tsx
|
|
495
|
+
- src/ui/main.tsx
|
|
496
|
+
- src/ui/styles.css
|
|
497
|
+
- src/ui/index.html
|
|
498
|
+
- src/main.ts
|
|
499
|
+
|
|
500
|
+
Now you can start implementing the features by modifying these files.`;
|
|
501
|
+
}
|
|
502
|
+
catch (e) {
|
|
503
|
+
return `Failed to create scaffold: ${e.message}`;
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
async handleReplaceInFile(filePath, target, replacement) {
|
|
507
|
+
const fullPath = path.resolve(this.session.targetDir, filePath);
|
|
508
|
+
if (!await fs.pathExists(fullPath)) {
|
|
509
|
+
return `File not found: ${filePath}`;
|
|
510
|
+
}
|
|
511
|
+
const content = await fs.readFile(fullPath, 'utf-8');
|
|
512
|
+
if (!content.includes(target)) {
|
|
513
|
+
// Check for potential whitespace/formatting issues causing mismatch
|
|
514
|
+
// For now, strict match failure
|
|
515
|
+
return `Error: Target string not found in file. Please ensure 'target' matches exactly (including indentation). You might want to use read_file first to verify constraint.`;
|
|
516
|
+
}
|
|
517
|
+
const parts = content.split(target);
|
|
518
|
+
if (parts.length > 2) {
|
|
519
|
+
return `Error: Target string found multiple times (${parts.length - 1} times). Please provide a more unique target string context to ensure correct replacement.`;
|
|
520
|
+
}
|
|
521
|
+
const newContent = content.replace(target, replacement);
|
|
522
|
+
await this.fileWriter.writeFile(filePath, newContent);
|
|
523
|
+
tui_1.tui.log(chalk_1.default.green(` ✓ Modified ${filePath}`));
|
|
524
|
+
return `Successfully replaced content in ${filePath}.`;
|
|
525
|
+
}
|
|
526
|
+
async handleRunCommand(command) {
|
|
527
|
+
// Security check? whitelist?
|
|
528
|
+
// simple whitelist for now
|
|
529
|
+
const allowed = ['npm install', 'npm i', 'yarn add', 'pnpm add', 'mkdir', 'touch'];
|
|
530
|
+
const isAllowed = allowed.some(p => command.startsWith(p));
|
|
531
|
+
if (!isAllowed && !this.autoApproveCommands) {
|
|
532
|
+
const confirm = await this.safePromptTui(`AI wants to run command: "${command}". Allow? (y/n/a[lways])`);
|
|
533
|
+
const lower = confirm.toLowerCase();
|
|
534
|
+
if (lower === 'a' || lower === 'always') {
|
|
535
|
+
this.autoApproveCommands = true;
|
|
536
|
+
}
|
|
537
|
+
else if (lower !== 'y') {
|
|
538
|
+
return "User denied command execution.";
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
tui_1.tui.log(chalk_1.default.yellow(` > Executing: ${command}`));
|
|
542
|
+
return new Promise((resolve, reject) => {
|
|
543
|
+
const child = (0, child_process_1.spawn)(command, {
|
|
544
|
+
cwd: this.session.targetDir,
|
|
545
|
+
shell: true,
|
|
546
|
+
stdio: ['ignore', 'pipe', 'pipe']
|
|
547
|
+
});
|
|
548
|
+
let stdout = '';
|
|
549
|
+
let stderr = '';
|
|
550
|
+
child.stdout.on('data', (d) => stdout += d.toString());
|
|
551
|
+
child.stderr.on('data', (d) => stderr += d.toString());
|
|
552
|
+
child.on('close', (code) => {
|
|
553
|
+
if (code === 0) {
|
|
554
|
+
resolve(`Command executed successfully.\nOutput: ${stdout}`);
|
|
555
|
+
}
|
|
556
|
+
else {
|
|
557
|
+
resolve(`Command failed with code ${code}.\nStderr: ${stderr}\nStdout: ${stdout}`);
|
|
558
|
+
}
|
|
559
|
+
});
|
|
560
|
+
child.on('error', (err) => resolve(`Command execution error: ${err.message}`));
|
|
561
|
+
});
|
|
562
|
+
}
|
|
563
|
+
// ... (previous methods)
|
|
564
|
+
// Centralized handler for user input to intercept Slash Commands
|
|
565
|
+
async promptUser(message) {
|
|
566
|
+
tui_1.tui.setStatus('Waiting for user input...');
|
|
567
|
+
const prefix = chalk_1.default.blue('›');
|
|
568
|
+
// Use TUI prompt
|
|
569
|
+
const input = await tui_1.tui.prompt(`${prefix} ${message}`);
|
|
570
|
+
if (input.startsWith('/')) {
|
|
571
|
+
const handled = await this.handleSlashCommand(input);
|
|
572
|
+
if (handled) {
|
|
573
|
+
// If command handled (e.g. /tokens), we prompt again effectively (or return null to loop)
|
|
574
|
+
// For simplified flow, we return null to indicate "no input for AI yet, handled by system"
|
|
575
|
+
return null;
|
|
576
|
+
}
|
|
577
|
+
// If /exit, handleSlashCommand handles process exit or session ending
|
|
578
|
+
}
|
|
579
|
+
return input;
|
|
580
|
+
}
|
|
581
|
+
async handleSlashCommand(command) {
|
|
582
|
+
const [cmd, ...args] = command.split(' ');
|
|
583
|
+
switch (cmd) {
|
|
584
|
+
case '/exit':
|
|
585
|
+
case '/quit':
|
|
586
|
+
tui_1.tui.log(chalk_1.default.yellow('👋 Exiting session...'));
|
|
587
|
+
// Check if we are exiting while tool calls are pending (e.g. at a prompt inside a tool)
|
|
588
|
+
const last = this.session.conversationHistory[this.session.conversationHistory.length - 1];
|
|
589
|
+
if (last && last.role === 'assistant' && last.tool_calls && last.tool_calls.length > 0) {
|
|
590
|
+
tui_1.tui.log(chalk_1.default.gray('Closing pending tool calls...'));
|
|
591
|
+
for (const call of last.tool_calls) {
|
|
592
|
+
this.session.conversationHistory.push({
|
|
593
|
+
role: 'tool',
|
|
594
|
+
tool_call_id: call.id,
|
|
595
|
+
name: call.function.name,
|
|
596
|
+
content: 'Session exited by user.'
|
|
597
|
+
});
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
this.session.status = 'completed';
|
|
601
|
+
this.sessionManager.saveSession(this.session);
|
|
602
|
+
tui_1.tui.stop();
|
|
603
|
+
// 给 TUI 一点时间完成清理
|
|
604
|
+
setTimeout(() => process.exit(0), 100);
|
|
605
|
+
return true;
|
|
606
|
+
case '/clear':
|
|
607
|
+
tui_1.tui.log(chalk_1.default.yellow('🧹 Clearing context (keeping system prompt)...'));
|
|
608
|
+
const systemPrompt = this.session.conversationHistory.find(m => m.role === 'system');
|
|
609
|
+
this.session.conversationHistory = systemPrompt ? [systemPrompt] : [];
|
|
610
|
+
this.sessionManager.saveSession(this.session);
|
|
611
|
+
return true;
|
|
612
|
+
case '/tokens':
|
|
613
|
+
const count = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
614
|
+
tui_1.tui.log(chalk_1.default.cyan(`📊 Current Context: ~${count} tokens (${this.session.conversationHistory.length} messages)`));
|
|
615
|
+
return true;
|
|
616
|
+
case '/compress':
|
|
617
|
+
tui_1.tui.log(chalk_1.default.yellow('📦 Compressing context...'));
|
|
618
|
+
await this.compressContext();
|
|
619
|
+
return true;
|
|
620
|
+
case '/use':
|
|
621
|
+
// Switch Provider
|
|
622
|
+
if (args.length === 0) {
|
|
623
|
+
const providers = ai_1.AIServiceFactory.listProviders();
|
|
624
|
+
const defaultProvider = ai_1.AIServiceFactory.getDefaultProvider();
|
|
625
|
+
const current = this.currentProvider || defaultProvider || providers[0];
|
|
626
|
+
const items = providers.map(p => ({
|
|
627
|
+
label: p === current ? `${p} (Current)` : p,
|
|
628
|
+
value: p
|
|
629
|
+
}));
|
|
630
|
+
tui_1.tui.log(chalk_1.default.cyan('Select AI Provider:'));
|
|
631
|
+
const selected = await tui_1.tui.select(items);
|
|
632
|
+
if (selected) {
|
|
633
|
+
const config = ai_1.AIServiceFactory.getProviderConfig(selected);
|
|
634
|
+
if (config) {
|
|
635
|
+
this.currentProvider = selected;
|
|
636
|
+
this.currentModel = undefined;
|
|
637
|
+
this.aiService = ai_1.AIServiceFactory.create(selected);
|
|
638
|
+
tui_1.tui.log(chalk_1.default.green(`✓ Switched to "${selected}" (${config.provider})`));
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
else {
|
|
643
|
+
const providerName = args[0];
|
|
644
|
+
const config = ai_1.AIServiceFactory.getProviderConfig(providerName);
|
|
645
|
+
if (!config) {
|
|
646
|
+
tui_1.tui.log(chalk_1.default.red(`❌ Configuration "${providerName}" not found`));
|
|
647
|
+
}
|
|
648
|
+
else {
|
|
649
|
+
this.currentProvider = providerName;
|
|
650
|
+
this.currentModel = undefined;
|
|
651
|
+
this.aiService = ai_1.AIServiceFactory.create(providerName);
|
|
652
|
+
tui_1.tui.log(chalk_1.default.green(`✓ Switched to "${providerName}" (${config.provider})`));
|
|
653
|
+
}
|
|
654
|
+
}
|
|
655
|
+
return true;
|
|
656
|
+
case '/model':
|
|
657
|
+
// Switch Model
|
|
658
|
+
if (args.length === 0) {
|
|
659
|
+
const providers = ai_1.AIServiceFactory.listProviders();
|
|
660
|
+
const defaultProvider = ai_1.AIServiceFactory.getDefaultProvider();
|
|
661
|
+
const currentProviderName = this.currentProvider || defaultProvider || providers[0];
|
|
662
|
+
const config = ai_1.AIServiceFactory.getProviderConfig(currentProviderName);
|
|
663
|
+
if (config) {
|
|
664
|
+
const currentModel = this.currentModel || config.model || 'Default';
|
|
665
|
+
// Import PROVIDER_MODELS
|
|
666
|
+
const { PROVIDER_MODELS } = await Promise.resolve().then(() => __importStar(require('../types/ai')));
|
|
667
|
+
const availableModels = PROVIDER_MODELS[config.provider];
|
|
668
|
+
if (availableModels && availableModels.length > 0) {
|
|
669
|
+
const items = availableModels.map(m => ({
|
|
670
|
+
label: m === currentModel ? `${m} (Current)` : m,
|
|
671
|
+
value: m
|
|
672
|
+
}));
|
|
673
|
+
tui_1.tui.log(chalk_1.default.cyan(`Select Model for ${currentProviderName}:`));
|
|
674
|
+
const selected = await tui_1.tui.select(items);
|
|
675
|
+
if (selected) {
|
|
676
|
+
this.currentModel = selected;
|
|
677
|
+
this.aiService = ai_1.AIServiceFactory.create(currentProviderName, selected);
|
|
678
|
+
tui_1.tui.log(chalk_1.default.green(`✓ Switched model to "${selected}"`));
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
else {
|
|
682
|
+
tui_1.tui.log(chalk_1.default.yellow(`No models predefined for provider type: ${config.provider}`));
|
|
683
|
+
}
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
else {
|
|
687
|
+
const modelName = args.join(' ');
|
|
688
|
+
const providers = ai_1.AIServiceFactory.listProviders();
|
|
689
|
+
const defaultProvider = ai_1.AIServiceFactory.getDefaultProvider();
|
|
690
|
+
const currentProviderName = this.currentProvider || defaultProvider || providers[0];
|
|
691
|
+
this.currentModel = modelName;
|
|
692
|
+
this.aiService = ai_1.AIServiceFactory.create(currentProviderName, modelName);
|
|
693
|
+
tui_1.tui.log(chalk_1.default.green(`✓ Switched model to "${modelName}"`));
|
|
694
|
+
}
|
|
695
|
+
return true;
|
|
696
|
+
case '/help':
|
|
697
|
+
tui_1.tui.log(chalk_1.default.green(`
|
|
698
|
+
Available Commands:
|
|
699
|
+
/exit, /quit - Save and exit
|
|
700
|
+
/clear - Clear conversation history (keeps system prompt)
|
|
701
|
+
/tokens - Show estimated token usage
|
|
702
|
+
/compress - Manually compress context
|
|
703
|
+
/use [name] - Switch AI provider
|
|
704
|
+
/model [name] - Switch model
|
|
705
|
+
/plan [需求] - Show plan or force plan mode
|
|
706
|
+
/help - Show this help
|
|
707
|
+
`));
|
|
708
|
+
return true;
|
|
709
|
+
case '/plan':
|
|
710
|
+
const result = await this.planCommandHandler.handlePlanCommand(args);
|
|
711
|
+
if (!result.handled && result.requirement) {
|
|
712
|
+
// Force plan mode - add to conversation for AI to create plan
|
|
713
|
+
this.session.conversationHistory.push({
|
|
714
|
+
role: 'user',
|
|
715
|
+
content: `请为以下任务创建一个详细的执行计划:\n\n${result.requirement}\n\n请列出具体的步骤(作为 todo list),然后开始执行。`
|
|
716
|
+
});
|
|
717
|
+
this.sessionManager.saveSession(this.session);
|
|
718
|
+
return false; // Let AI process
|
|
719
|
+
}
|
|
720
|
+
return true;
|
|
721
|
+
default:
|
|
722
|
+
tui_1.tui.log(chalk_1.default.red(`Unknown command: ${cmd}`));
|
|
723
|
+
return true;
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
async checkAndCompressContext() {
|
|
727
|
+
const inputTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
728
|
+
// Get model's context window and max output tokens
|
|
729
|
+
const contextWindow = this.aiService.getContextWindow();
|
|
730
|
+
const maxOutputTokens = this.aiService.getMaxOutputTokens();
|
|
731
|
+
// Total tokens = input + output
|
|
732
|
+
const totalTokens = inputTokens + maxOutputTokens;
|
|
733
|
+
// 1. Growth check: avoid frequent compression
|
|
734
|
+
const growth = inputTokens - this.lastCompressedTokens;
|
|
735
|
+
const MIN_GROWTH = contextWindow * 0.08; // 8% growth threshold
|
|
736
|
+
if (this.lastCompressedTokens > 0 && growth < MIN_GROWTH) {
|
|
737
|
+
return; // Skip if growth is insufficient
|
|
738
|
+
}
|
|
739
|
+
// 2. Tiered compression zones (based on total tokens including output)
|
|
740
|
+
const SAFE_ZONE = contextWindow * 0.5; // 50% - safe, no compression
|
|
741
|
+
const LIGHT_ZONE = contextWindow * 0.65; // 65% - light compression (prune tool outputs)
|
|
742
|
+
const MEDIUM_ZONE = contextWindow * 0.78; // 78% - medium compression (summarize + keep important)
|
|
743
|
+
const HEAVY_ZONE = contextWindow * 0.88; // 88% - heavy compression (aggressive summarization)
|
|
744
|
+
const CRITICAL_ZONE = contextWindow * 0.95; // 95% - critical compression (keep only recent)
|
|
745
|
+
if (totalTokens < SAFE_ZONE) {
|
|
746
|
+
return; // Safe zone, no action needed
|
|
747
|
+
}
|
|
748
|
+
// Calculate target input tokens (reserve space for output)
|
|
749
|
+
const getTargetInputTokens = (percentage) => {
|
|
750
|
+
return Math.floor(contextWindow * percentage) - maxOutputTokens;
|
|
751
|
+
};
|
|
752
|
+
if (totalTokens < LIGHT_ZONE) {
|
|
753
|
+
// Light compression: only prune tool outputs
|
|
754
|
+
tui_1.tui.log(chalk_1.default.yellow(`⚠️ Context at ${inputTokens} input + ${maxOutputTokens} output = ${totalTokens} tokens (${Math.round(totalTokens / contextWindow * 100)}% of ${contextWindow}). Applying light compression...`));
|
|
755
|
+
await this.lightCompress();
|
|
756
|
+
const afterTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
757
|
+
tui_1.tui.log(chalk_1.default.green(`✅ Light compression: ${inputTokens} -> ${afterTokens} tokens.`));
|
|
758
|
+
this.lastCompressedTokens = afterTokens;
|
|
759
|
+
}
|
|
760
|
+
else if (totalTokens < MEDIUM_ZONE) {
|
|
761
|
+
// Medium compression: target 50% of context window (minus output)
|
|
762
|
+
const target = getTargetInputTokens(0.5);
|
|
763
|
+
tui_1.tui.log(chalk_1.default.yellow(`⚠️ Context at ${inputTokens} input + ${maxOutputTokens} output = ${totalTokens} tokens (${Math.round(totalTokens / contextWindow * 100)}% of ${contextWindow}). Applying medium compression (target: ${target})...`));
|
|
764
|
+
await this.compressContext(target);
|
|
765
|
+
const afterTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
766
|
+
tui_1.tui.log(chalk_1.default.green(`✅ Medium compression: ${inputTokens} -> ${afterTokens} tokens.`));
|
|
767
|
+
this.lastCompressedTokens = afterTokens;
|
|
768
|
+
}
|
|
769
|
+
else if (totalTokens < HEAVY_ZONE) {
|
|
770
|
+
// Heavy compression: target 35% of context window (minus output)
|
|
771
|
+
const target = getTargetInputTokens(0.35);
|
|
772
|
+
tui_1.tui.log(chalk_1.default.red(`⚠️ Context at ${inputTokens} input + ${maxOutputTokens} output = ${totalTokens} tokens (${Math.round(totalTokens / contextWindow * 100)}% of ${contextWindow}). Applying heavy compression (target: ${target})...`));
|
|
773
|
+
await this.compressContext(target);
|
|
774
|
+
const afterTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
775
|
+
tui_1.tui.log(chalk_1.default.green(`✅ Heavy compression: ${inputTokens} -> ${afterTokens} tokens.`));
|
|
776
|
+
this.lastCompressedTokens = afterTokens;
|
|
777
|
+
}
|
|
778
|
+
else if (totalTokens < CRITICAL_ZONE) {
|
|
779
|
+
// Critical compression: target 25% of context window (minus output)
|
|
780
|
+
const target = getTargetInputTokens(0.25);
|
|
781
|
+
tui_1.tui.log(chalk_1.default.red(`🚨 Context at ${inputTokens} input + ${maxOutputTokens} output = ${totalTokens} tokens (${Math.round(totalTokens / contextWindow * 100)}% of ${contextWindow}). Applying critical compression (target: ${target})...`));
|
|
782
|
+
await this.forceAggressiveCompress(target);
|
|
783
|
+
const afterTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
784
|
+
tui_1.tui.log(chalk_1.default.green(`✅ Critical compression: ${inputTokens} -> ${afterTokens} tokens.`));
|
|
785
|
+
this.lastCompressedTokens = afterTokens;
|
|
786
|
+
}
|
|
787
|
+
else {
|
|
788
|
+
// Emergency: over 95%, immediate aggressive compression
|
|
789
|
+
const target = getTargetInputTokens(0.15);
|
|
790
|
+
tui_1.tui.log(chalk_1.default.red(`🚨 EMERGENCY: Context at ${inputTokens} input + ${maxOutputTokens} output = ${totalTokens} tokens (${Math.round(totalTokens / contextWindow * 100)}% of ${contextWindow}). Forcing aggressive compression (target: ${target})...`));
|
|
791
|
+
await this.forceAggressiveCompress(target);
|
|
792
|
+
const afterTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
793
|
+
tui_1.tui.log(chalk_1.default.green(`✅ Emergency compression: ${inputTokens} -> ${afterTokens} tokens.`));
|
|
794
|
+
this.lastCompressedTokens = afterTokens;
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
async lightCompress() {
|
|
798
|
+
// Only prune tool outputs, no summarization
|
|
799
|
+
this.session.conversationHistory = context_manager_1.ContextManager.lightCompress(this.session.conversationHistory);
|
|
800
|
+
this.sessionManager.saveSession(this.session);
|
|
801
|
+
tui_1.tui.log(chalk_1.default.green('✅ Light compression applied (tool outputs pruned).'));
|
|
802
|
+
}
|
|
803
|
+
async compressContext(targetTokens = 8000) {
|
|
804
|
+
const beforeTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
805
|
+
// Simple text summarizer - no JSON parsing to avoid truncation/format issues
|
|
806
|
+
const summarizer = async (text) => {
|
|
807
|
+
const prompt = `请将以下技术对话历史总结为简洁的上下文摘要,帮助后续继续对话。
|
|
808
|
+
|
|
809
|
+
格式要求(纯文本,不要用 JSON 或 markdown):
|
|
810
|
+
- 用户目标:(1-2句话描述核心需求)
|
|
811
|
+
- 关键决策:(已做出的重要决定,用逗号分隔)
|
|
812
|
+
- 当前状态:(进度和状态)
|
|
813
|
+
- 修改的文件:(涉及的文件路径,用逗号分隔)
|
|
814
|
+
- 待完成:(剩余任务,用逗号分隔)
|
|
815
|
+
|
|
816
|
+
对话历史:
|
|
817
|
+
${text}
|
|
818
|
+
|
|
819
|
+
请直接输出摘要,不要用代码块或其他格式。`;
|
|
820
|
+
try {
|
|
821
|
+
const result = await this.aiService.chat([
|
|
822
|
+
{ role: 'system', content: '你是一个帮助生成对话摘要的助手。直接输出纯文本摘要,不要使用 JSON、markdown 或代码块格式。' },
|
|
823
|
+
{ role: 'user', content: prompt }
|
|
824
|
+
], { toolChoice: 'none' });
|
|
825
|
+
return result.content || '无法生成摘要';
|
|
826
|
+
}
|
|
827
|
+
catch (error) {
|
|
828
|
+
console.warn('Failed to generate summary:', error);
|
|
829
|
+
// Ultra simple fallback
|
|
830
|
+
return `对话摘要生成失败,保留原始上下文的最后部分。`;
|
|
831
|
+
}
|
|
832
|
+
};
|
|
833
|
+
this.session.conversationHistory = await context_manager_1.ContextManager.compressHistory(this.session.conversationHistory, targetTokens, summarizer);
|
|
834
|
+
const afterTokens = context_manager_1.ContextManager.estimateTokenCount(this.session.conversationHistory);
|
|
835
|
+
this.sessionManager.saveSession(this.session);
|
|
836
|
+
if (afterTokens >= beforeTokens * 0.9) {
|
|
837
|
+
// Compression didn't help much - this usually means most messages are important
|
|
838
|
+
// or the summarization couldn't reduce much. Just log a warning.
|
|
839
|
+
tui_1.tui.log(chalk_1.default.yellow(`⚠️ Compression limited: ${beforeTokens} -> ${afterTokens} tokens (most content deemed important).`));
|
|
840
|
+
}
|
|
841
|
+
else {
|
|
842
|
+
tui_1.tui.log(chalk_1.default.green(`✅ Context compressed: ${beforeTokens} -> ${afterTokens} tokens.`));
|
|
843
|
+
}
|
|
844
|
+
}
|
|
845
|
+
/**
|
|
846
|
+
* Force aggressive compression - uses the same summarization logic as compressContext
|
|
847
|
+
* but with a more aggressive target. The key is still summarization, not deletion.
|
|
848
|
+
*/
|
|
849
|
+
async forceAggressiveCompress(targetTokens) {
|
|
850
|
+
// Simply delegate to compressContext with the aggressive target
|
|
851
|
+
// This ensures we still get summarization of removed messages
|
|
852
|
+
await this.compressContext(targetTokens);
|
|
853
|
+
}
|
|
854
|
+
async handleAskUser(question) {
|
|
855
|
+
tui_1.tui.log(chalk_1.default.magenta(`\n🤖 AI Question: ${question}`));
|
|
856
|
+
while (true) {
|
|
857
|
+
const answer = await this.promptUser('Your Answer:');
|
|
858
|
+
if (answer !== null)
|
|
859
|
+
return answer;
|
|
860
|
+
// If answer is null, it meant a slash command was executed, so we loop again to ask for input.
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
async handleUserInteraction() {
|
|
864
|
+
while (true) {
|
|
865
|
+
const input = await this.promptUser('用户输入 (或直接回车继续):');
|
|
866
|
+
if (input === null)
|
|
867
|
+
continue; // Slash command executed
|
|
868
|
+
if (input && input.trim()) {
|
|
869
|
+
// Only analyze for complex tasks if no active plan
|
|
870
|
+
if (!this.currentPlan && !task_analyzer_1.TaskAnalyzer.shouldSkipAnalysis(input)) {
|
|
871
|
+
tui_1.tui.log(chalk_1.default.gray('正在分析任务...'));
|
|
872
|
+
const analysis = await task_analyzer_1.TaskAnalyzer.analyze(input);
|
|
873
|
+
// If complex task, suggest planning
|
|
874
|
+
if (analysis.shouldPlan) {
|
|
875
|
+
tui_1.tui.log(chalk_1.default.cyan(`\n📊 ${task_analyzer_1.TaskAnalyzer.getAnalysisDescription(analysis)}`));
|
|
876
|
+
const choice = await tui_1.tui.select([
|
|
877
|
+
{ label: '创建计划后执行 (推荐)', value: 'plan' },
|
|
878
|
+
{ label: '直接执行', value: 'direct' }
|
|
879
|
+
]);
|
|
880
|
+
if (choice === 'plan') {
|
|
881
|
+
// Ask AI to create plan first
|
|
882
|
+
this.session.conversationHistory.push({
|
|
883
|
+
role: 'user',
|
|
884
|
+
content: `请为以下任务创建一个执行计划,列出具体步骤,然后开始执行:\n\n${input}`
|
|
885
|
+
});
|
|
886
|
+
this.sessionManager.saveSession(this.session);
|
|
887
|
+
break;
|
|
888
|
+
}
|
|
889
|
+
// else: direct execution, fall through
|
|
890
|
+
}
|
|
891
|
+
}
|
|
892
|
+
this.session.conversationHistory.push({
|
|
893
|
+
role: 'user',
|
|
894
|
+
content: input
|
|
895
|
+
});
|
|
896
|
+
this.sessionManager.saveSession(this.session);
|
|
897
|
+
break;
|
|
898
|
+
}
|
|
899
|
+
else {
|
|
900
|
+
// Empty input (Enter)
|
|
901
|
+
break;
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
// Helper to allow slash commands during any prompt
|
|
906
|
+
async safePromptTui(message) {
|
|
907
|
+
while (true) {
|
|
908
|
+
const input = await tui_1.tui.prompt(message);
|
|
909
|
+
if (input.startsWith('/')) {
|
|
910
|
+
const handled = await this.handleSlashCommand(input);
|
|
911
|
+
if (handled)
|
|
912
|
+
continue; // Loop back to prompt if handled (unless exit killed process)
|
|
913
|
+
}
|
|
914
|
+
return input;
|
|
915
|
+
}
|
|
916
|
+
}
|
|
917
|
+
// ... (rest of the class)
|
|
918
|
+
async handleFinish(summary) {
|
|
919
|
+
tui_1.tui.log(chalk_1.default.green('\n✅ AI 认为任务已完成:'));
|
|
920
|
+
tui_1.tui.log(chalk_1.default.white(summary));
|
|
921
|
+
tui_1.tui.log(chalk_1.default.gray('(输入新需求继续,或输入 /exit 退出)'));
|
|
922
|
+
const input = await this.safePromptTui('请输入修改需求:');
|
|
923
|
+
// Since we are strictly "continuing" if the user gave input (otherwise /exit would happen in safePromptTui),
|
|
924
|
+
// we essentially treat this as a user feedback loop.
|
|
925
|
+
return `User provided new requirement after previous completion: ${input}`;
|
|
926
|
+
}
|
|
927
|
+
/**
|
|
928
|
+
* Generates a simplified file tree for the context.
|
|
929
|
+
* Ignores node_modules, .git, dist, etc.
|
|
930
|
+
*/
|
|
931
|
+
async generateFileMap() {
|
|
932
|
+
const rootDir = this.session.targetDir;
|
|
933
|
+
let fileMap = '';
|
|
934
|
+
const walk = async (currentDir, indent) => {
|
|
935
|
+
try {
|
|
936
|
+
const files = await fs.readdir(currentDir);
|
|
937
|
+
// Sort: directories first, then files
|
|
938
|
+
files.sort((a, b) => {
|
|
939
|
+
return a.localeCompare(b);
|
|
940
|
+
});
|
|
941
|
+
for (const file of files) {
|
|
942
|
+
if (['node_modules', '.git', 'dist', '.DS_Store', 'package-lock.json', 'yarn.lock'].includes(file))
|
|
943
|
+
continue;
|
|
944
|
+
const fullPath = path.join(currentDir, file);
|
|
945
|
+
const stats = await fs.stat(fullPath);
|
|
946
|
+
if (stats.isDirectory()) {
|
|
947
|
+
fileMap += `${indent}${file}/\n`;
|
|
948
|
+
await walk(fullPath, indent + ' ');
|
|
949
|
+
}
|
|
950
|
+
else {
|
|
951
|
+
fileMap += `${indent}${file}\n`;
|
|
952
|
+
}
|
|
953
|
+
}
|
|
954
|
+
}
|
|
955
|
+
catch (e) {
|
|
956
|
+
fileMap += `${indent}(Error reading directory)\n`;
|
|
957
|
+
}
|
|
958
|
+
};
|
|
959
|
+
if (await fs.pathExists(rootDir)) {
|
|
960
|
+
await walk(rootDir, '');
|
|
961
|
+
}
|
|
962
|
+
else {
|
|
963
|
+
fileMap = '(Target directory not created yet)';
|
|
964
|
+
}
|
|
965
|
+
return fileMap.trim();
|
|
966
|
+
}
|
|
967
|
+
}
|
|
968
|
+
exports.AIAgent = AIAgent;
|