snow-ai 0.3.37 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/promptOptimizeAgent.d.ts +54 -0
- package/dist/agents/promptOptimizeAgent.js +268 -0
- package/dist/api/anthropic.js +16 -10
- package/dist/hooks/useConversation.js +6 -2
- package/dist/i18n/lang/en.js +2 -0
- package/dist/i18n/lang/es.js +3 -1
- package/dist/i18n/lang/ja.js +3 -1
- package/dist/i18n/lang/ko.js +3 -1
- package/dist/i18n/lang/zh-TW.js +2 -0
- package/dist/i18n/lang/zh.js +2 -0
- package/dist/i18n/types.d.ts +2 -0
- package/dist/mcp/bash.js +8 -1
- package/dist/mcp/filesystem.js +17 -12
- package/dist/ui/components/ChatInput.js +1 -1
- package/dist/ui/components/DiffViewer.d.ts +1 -1
- package/dist/ui/components/DiffViewer.js +101 -91
- package/dist/ui/components/FileList.js +22 -11
- package/dist/ui/components/ToolResultPreview.js +16 -4
- package/dist/ui/pages/ChatScreen.js +51 -7
- package/dist/ui/pages/ConfigScreen.js +36 -0
- package/dist/utils/apiConfig.d.ts +2 -0
- package/dist/utils/sessionManager.d.ts +1 -0
- package/package.json +2 -1
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { type ChatMessage } from '../api/chat.js';
|
|
2
|
+
/**
|
|
3
|
+
* Prompt Optimization Agent Service
|
|
4
|
+
*
|
|
5
|
+
* Optimizes user prompts for better AI understanding and response quality.
|
|
6
|
+
* This service operates using the basic model for efficient, low-cost optimization.
|
|
7
|
+
*
|
|
8
|
+
* Features:
|
|
9
|
+
* - Uses basicModel for efficient prompt optimization
|
|
10
|
+
* - Follows the same API routing as main flow (chat, responses, gemini, anthropic)
|
|
11
|
+
* - Filters context to only include user->assistant pairs without tool calls
|
|
12
|
+
* - Returns optimized prompt that preserves user intent while improving clarity
|
|
13
|
+
* - Silent execution with error handling to prevent main flow disruption
|
|
14
|
+
*/
|
|
15
|
+
export declare class PromptOptimizeAgent {
|
|
16
|
+
private modelName;
|
|
17
|
+
private requestMethod;
|
|
18
|
+
private initialized;
|
|
19
|
+
/**
|
|
20
|
+
* Initialize the prompt optimization agent with current configuration
|
|
21
|
+
* @returns true if initialized successfully, false otherwise
|
|
22
|
+
*/
|
|
23
|
+
private initialize;
|
|
24
|
+
/**
|
|
25
|
+
* Check if prompt optimization agent is available
|
|
26
|
+
*/
|
|
27
|
+
isAvailable(): Promise<boolean>;
|
|
28
|
+
/**
|
|
29
|
+
* Call the model with streaming API and assemble complete response
|
|
30
|
+
* Uses the same routing logic as main flow for consistency
|
|
31
|
+
*
|
|
32
|
+
* @param messages - Chat messages
|
|
33
|
+
* @param abortSignal - Optional abort signal to cancel the request
|
|
34
|
+
*/
|
|
35
|
+
private callModel;
|
|
36
|
+
/**
|
|
37
|
+
* Filter conversation history to only include user->assistant pairs without tool calls
|
|
38
|
+
* This creates a lightweight context for prompt optimization
|
|
39
|
+
*
|
|
40
|
+
* @param messages - Full conversation history
|
|
41
|
+
* @returns Filtered messages containing only user->assistant exchanges
|
|
42
|
+
*/
|
|
43
|
+
private filterContextMessages;
|
|
44
|
+
/**
|
|
45
|
+
* Optimize user prompt for better AI understanding
|
|
46
|
+
*
|
|
47
|
+
* @param userPrompt - Original user prompt
|
|
48
|
+
* @param conversationHistory - Full conversation history for context
|
|
49
|
+
* @param abortSignal - Optional abort signal to cancel optimization
|
|
50
|
+
* @returns Optimized prompt, or original prompt if optimization fails
|
|
51
|
+
*/
|
|
52
|
+
optimizePrompt(userPrompt: string, conversationHistory: ChatMessage[], abortSignal?: AbortSignal): Promise<string>;
|
|
53
|
+
}
|
|
54
|
+
export declare const promptOptimizeAgent: PromptOptimizeAgent;
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
import { getOpenAiConfig } from '../utils/apiConfig.js';
|
|
2
|
+
import { logger } from '../utils/logger.js';
|
|
3
|
+
import { createStreamingChatCompletion } from '../api/chat.js';
|
|
4
|
+
import { createStreamingResponse } from '../api/responses.js';
|
|
5
|
+
import { createStreamingGeminiCompletion } from '../api/gemini.js';
|
|
6
|
+
import { createStreamingAnthropicCompletion } from '../api/anthropic.js';
|
|
7
|
+
/**
|
|
8
|
+
* Prompt Optimization Agent Service
|
|
9
|
+
*
|
|
10
|
+
* Optimizes user prompts for better AI understanding and response quality.
|
|
11
|
+
* This service operates using the basic model for efficient, low-cost optimization.
|
|
12
|
+
*
|
|
13
|
+
* Features:
|
|
14
|
+
* - Uses basicModel for efficient prompt optimization
|
|
15
|
+
* - Follows the same API routing as main flow (chat, responses, gemini, anthropic)
|
|
16
|
+
* - Filters context to only include user->assistant pairs without tool calls
|
|
17
|
+
* - Returns optimized prompt that preserves user intent while improving clarity
|
|
18
|
+
* - Silent execution with error handling to prevent main flow disruption
|
|
19
|
+
*/
|
|
20
|
+
export class PromptOptimizeAgent {
|
|
21
|
+
constructor() {
|
|
22
|
+
Object.defineProperty(this, "modelName", {
|
|
23
|
+
enumerable: true,
|
|
24
|
+
configurable: true,
|
|
25
|
+
writable: true,
|
|
26
|
+
value: ''
|
|
27
|
+
});
|
|
28
|
+
Object.defineProperty(this, "requestMethod", {
|
|
29
|
+
enumerable: true,
|
|
30
|
+
configurable: true,
|
|
31
|
+
writable: true,
|
|
32
|
+
value: 'chat'
|
|
33
|
+
});
|
|
34
|
+
Object.defineProperty(this, "initialized", {
|
|
35
|
+
enumerable: true,
|
|
36
|
+
configurable: true,
|
|
37
|
+
writable: true,
|
|
38
|
+
value: false
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Initialize the prompt optimization agent with current configuration
|
|
43
|
+
* @returns true if initialized successfully, false otherwise
|
|
44
|
+
*/
|
|
45
|
+
async initialize() {
|
|
46
|
+
try {
|
|
47
|
+
const config = getOpenAiConfig();
|
|
48
|
+
// Check if basic model is configured
|
|
49
|
+
if (!config.basicModel) {
|
|
50
|
+
logger.warn('Prompt optimize agent: Basic model not configured');
|
|
51
|
+
return false;
|
|
52
|
+
}
|
|
53
|
+
this.modelName = config.basicModel;
|
|
54
|
+
this.requestMethod = config.requestMethod;
|
|
55
|
+
this.initialized = true;
|
|
56
|
+
return true;
|
|
57
|
+
}
|
|
58
|
+
catch (error) {
|
|
59
|
+
logger.warn('Prompt optimize agent: Failed to initialize:', error);
|
|
60
|
+
return false;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Check if prompt optimization agent is available
|
|
65
|
+
*/
|
|
66
|
+
async isAvailable() {
|
|
67
|
+
if (!this.initialized) {
|
|
68
|
+
return await this.initialize();
|
|
69
|
+
}
|
|
70
|
+
return true;
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Call the model with streaming API and assemble complete response
|
|
74
|
+
* Uses the same routing logic as main flow for consistency
|
|
75
|
+
*
|
|
76
|
+
* @param messages - Chat messages
|
|
77
|
+
* @param abortSignal - Optional abort signal to cancel the request
|
|
78
|
+
*/
|
|
79
|
+
async callModel(messages, abortSignal) {
|
|
80
|
+
let streamGenerator;
|
|
81
|
+
// Route to appropriate streaming API based on request method
|
|
82
|
+
switch (this.requestMethod) {
|
|
83
|
+
case 'anthropic':
|
|
84
|
+
streamGenerator = createStreamingAnthropicCompletion({
|
|
85
|
+
model: this.modelName,
|
|
86
|
+
messages,
|
|
87
|
+
max_tokens: 1000, // Limited tokens for prompt optimization
|
|
88
|
+
includeBuiltinSystemPrompt: false,
|
|
89
|
+
disableThinking: true, // Agents don't use Extended Thinking
|
|
90
|
+
}, abortSignal);
|
|
91
|
+
break;
|
|
92
|
+
case 'gemini':
|
|
93
|
+
streamGenerator = createStreamingGeminiCompletion({
|
|
94
|
+
model: this.modelName,
|
|
95
|
+
messages,
|
|
96
|
+
includeBuiltinSystemPrompt: false,
|
|
97
|
+
}, abortSignal);
|
|
98
|
+
break;
|
|
99
|
+
case 'responses':
|
|
100
|
+
streamGenerator = createStreamingResponse({
|
|
101
|
+
model: this.modelName,
|
|
102
|
+
messages,
|
|
103
|
+
stream: true,
|
|
104
|
+
includeBuiltinSystemPrompt: false,
|
|
105
|
+
}, abortSignal);
|
|
106
|
+
break;
|
|
107
|
+
case 'chat':
|
|
108
|
+
default:
|
|
109
|
+
streamGenerator = createStreamingChatCompletion({
|
|
110
|
+
model: this.modelName,
|
|
111
|
+
messages,
|
|
112
|
+
stream: true,
|
|
113
|
+
includeBuiltinSystemPrompt: false,
|
|
114
|
+
}, abortSignal);
|
|
115
|
+
break;
|
|
116
|
+
}
|
|
117
|
+
// Assemble complete content from streaming response
|
|
118
|
+
let completeContent = '';
|
|
119
|
+
try {
|
|
120
|
+
for await (const chunk of streamGenerator) {
|
|
121
|
+
// Check abort signal
|
|
122
|
+
if (abortSignal?.aborted) {
|
|
123
|
+
throw new Error('Request aborted');
|
|
124
|
+
}
|
|
125
|
+
// Handle different chunk formats based on request method
|
|
126
|
+
if (this.requestMethod === 'chat') {
|
|
127
|
+
// Chat API uses standard OpenAI format
|
|
128
|
+
if (chunk.choices && chunk.choices[0]?.delta?.content) {
|
|
129
|
+
completeContent += chunk.choices[0].delta.content;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
else {
|
|
133
|
+
// Responses, Gemini, and Anthropic APIs use unified format
|
|
134
|
+
if (chunk.type === 'content' && chunk.content) {
|
|
135
|
+
completeContent += chunk.content;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
catch (streamError) {
|
|
141
|
+
logger.error('Prompt optimize agent: Streaming error:', streamError);
|
|
142
|
+
throw streamError;
|
|
143
|
+
}
|
|
144
|
+
return completeContent;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Filter conversation history to only include user->assistant pairs without tool calls
|
|
148
|
+
* This creates a lightweight context for prompt optimization
|
|
149
|
+
*
|
|
150
|
+
* @param messages - Full conversation history
|
|
151
|
+
* @returns Filtered messages containing only user->assistant exchanges
|
|
152
|
+
*/
|
|
153
|
+
filterContextMessages(messages) {
|
|
154
|
+
const filtered = [];
|
|
155
|
+
for (const msg of messages) {
|
|
156
|
+
// Only include user and assistant messages
|
|
157
|
+
if (msg.role === 'user' || msg.role === 'assistant') {
|
|
158
|
+
// For assistant messages, skip if they contain tool calls
|
|
159
|
+
if (msg.role === 'assistant') {
|
|
160
|
+
// Check if message has tool_calls (OpenAI format) or tool_use content (Anthropic format)
|
|
161
|
+
const hasToolCalls = !!msg.tool_calls;
|
|
162
|
+
const hasToolUseContent = Array.isArray(msg.content) &&
|
|
163
|
+
msg.content.some((c) => c.type === 'tool_use' || c.type === 'tool_call');
|
|
164
|
+
if (hasToolCalls || hasToolUseContent) {
|
|
165
|
+
continue; // Skip assistant messages with tool calls
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
// Add message to filtered list
|
|
169
|
+
filtered.push(msg);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
return filtered;
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* Optimize user prompt for better AI understanding
|
|
176
|
+
*
|
|
177
|
+
* @param userPrompt - Original user prompt
|
|
178
|
+
* @param conversationHistory - Full conversation history for context
|
|
179
|
+
* @param abortSignal - Optional abort signal to cancel optimization
|
|
180
|
+
* @returns Optimized prompt, or original prompt if optimization fails
|
|
181
|
+
*/
|
|
182
|
+
async optimizePrompt(userPrompt, conversationHistory, abortSignal) {
|
|
183
|
+
const available = await this.isAvailable();
|
|
184
|
+
if (!available) {
|
|
185
|
+
return userPrompt;
|
|
186
|
+
}
|
|
187
|
+
try {
|
|
188
|
+
// Check word count - if prompt > 100 words, skip optimization
|
|
189
|
+
// User likely provided detailed/important original text that should be preserved as-is
|
|
190
|
+
const wordCount = userPrompt.trim().split(/\s+/).length;
|
|
191
|
+
if (wordCount > 100) {
|
|
192
|
+
return userPrompt;
|
|
193
|
+
}
|
|
194
|
+
// Filter conversation history to lightweight context (only user<->assistant, no tool calls)
|
|
195
|
+
const contextMessages = this.filterContextMessages(conversationHistory);
|
|
196
|
+
// Build context summary if there's conversation history
|
|
197
|
+
let contextSummary = '';
|
|
198
|
+
if (contextMessages.length > 0) {
|
|
199
|
+
// Take last 8 messages to keep context focused, but use full content (no truncation)
|
|
200
|
+
const recentContext = contextMessages.slice(-8);
|
|
201
|
+
contextSummary =
|
|
202
|
+
'\n\nRecent conversation context:\n' +
|
|
203
|
+
recentContext
|
|
204
|
+
.map((msg) => {
|
|
205
|
+
const content = typeof msg.content === 'string'
|
|
206
|
+
? msg.content
|
|
207
|
+
: JSON.stringify(msg.content);
|
|
208
|
+
// Use full message content (no truncation)
|
|
209
|
+
return `${msg.role}: ${content}`;
|
|
210
|
+
})
|
|
211
|
+
.join('\n');
|
|
212
|
+
}
|
|
213
|
+
const optimizationPrompt = `You are a prompt optimization assistant. Your task is to improve user prompts for better AI understanding while maintaining HIGH FIDELITY to the original content.
|
|
214
|
+
|
|
215
|
+
User's original prompt:
|
|
216
|
+
${userPrompt}${contextSummary}
|
|
217
|
+
|
|
218
|
+
Your optimization goals (in priority order):
|
|
219
|
+
1. **HIGH FIDELITY REQUIREMENT**: Preserve ALL important information, details, and requirements from the user's original prompt - DO NOT lose or omit any critical content
|
|
220
|
+
2. Preserve the EXACT SAME LANGUAGE as the user (if Chinese, stay Chinese; if English, stay English)
|
|
221
|
+
3. Keep the core intent and meaning unchanged
|
|
222
|
+
4. Make the prompt clearer and more specific ONLY if vague - if already clear, keep it as-is
|
|
223
|
+
5. Add relevant context if the user is asking follow-up questions
|
|
224
|
+
6. Break down complex requests into clear requirements without losing details
|
|
225
|
+
7. Keep the tone natural and conversational
|
|
226
|
+
8. DO NOT add unnecessary formality or change the user's communication style
|
|
227
|
+
9. If the prompt is already clear and specific, return it as-is
|
|
228
|
+
|
|
229
|
+
CRITICAL RULES:
|
|
230
|
+
- NEVER remove important details, specific requirements, file paths, code snippets, or technical specifications
|
|
231
|
+
- NEVER simplify the prompt if it means losing user-provided information
|
|
232
|
+
- When in doubt, prefer preserving the original over optimizing
|
|
233
|
+
- The goal is CLARITY, not BREVITY - keep all important content
|
|
234
|
+
|
|
235
|
+
IMPORTANT: Output ONLY the optimized prompt text. No explanations, no meta-commentary, no JSON format. Just the optimized prompt itself.`;
|
|
236
|
+
const messages = [
|
|
237
|
+
{
|
|
238
|
+
role: 'user',
|
|
239
|
+
content: optimizationPrompt,
|
|
240
|
+
},
|
|
241
|
+
];
|
|
242
|
+
const optimizedPrompt = await this.callModel(messages, abortSignal);
|
|
243
|
+
if (!optimizedPrompt || optimizedPrompt.trim().length === 0) {
|
|
244
|
+
logger.warn('Prompt optimize agent: Empty response, using original prompt');
|
|
245
|
+
return userPrompt;
|
|
246
|
+
}
|
|
247
|
+
// Clean up the response (remove any markdown formatting if present)
|
|
248
|
+
let cleanedPrompt = optimizedPrompt.trim();
|
|
249
|
+
// Remove markdown code blocks if present
|
|
250
|
+
const codeBlockMatch = cleanedPrompt.match(/```[\s\S]*?\n([\s\S]*?)```/);
|
|
251
|
+
if (codeBlockMatch) {
|
|
252
|
+
cleanedPrompt = codeBlockMatch[1].trim();
|
|
253
|
+
}
|
|
254
|
+
// If optimized prompt is suspiciously short or looks like it failed, use original
|
|
255
|
+
if (cleanedPrompt.length < userPrompt.length * 0.3) {
|
|
256
|
+
logger.warn('Prompt optimize agent: Optimized prompt too short, using original');
|
|
257
|
+
return userPrompt;
|
|
258
|
+
}
|
|
259
|
+
return cleanedPrompt;
|
|
260
|
+
}
|
|
261
|
+
catch (error) {
|
|
262
|
+
logger.error('Prompt optimize agent: Failed to optimize prompt', error);
|
|
263
|
+
return userPrompt;
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
// Export singleton instance
|
|
268
|
+
export const promptOptimizeAgent = new PromptOptimizeAgent();
|
package/dist/api/anthropic.js
CHANGED
|
@@ -6,6 +6,8 @@ import { logger } from '../utils/logger.js';
|
|
|
6
6
|
import { addProxyToFetchOptions } from '../utils/proxyUtils.js';
|
|
7
7
|
import { saveUsageToFile } from '../utils/usageLogger.js';
|
|
8
8
|
let anthropicConfig = null;
|
|
9
|
+
// Persistent userId that remains the same until application restart
|
|
10
|
+
let persistentUserId = null;
|
|
9
11
|
function getAnthropicConfig() {
|
|
10
12
|
if (!anthropicConfig) {
|
|
11
13
|
const config = getOpenAiConfig();
|
|
@@ -27,18 +29,22 @@ function getAnthropicConfig() {
|
|
|
27
29
|
}
|
|
28
30
|
export function resetAnthropicClient() {
|
|
29
31
|
anthropicConfig = null;
|
|
32
|
+
persistentUserId = null; // Reset userId on client reset
|
|
30
33
|
}
|
|
31
34
|
/**
|
|
32
|
-
* Generate a user_id
|
|
35
|
+
* Generate a persistent user_id that remains the same until application restart
|
|
36
|
+
* Format: user_<hash>_account__session_<uuid>
|
|
33
37
|
* This matches Anthropic's expected format for tracking and caching
|
|
34
|
-
* The hash is based on sessionId only to keep it consistent within the same session
|
|
35
38
|
*/
|
|
36
|
-
function
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
39
|
+
function getPersistentUserId() {
|
|
40
|
+
if (!persistentUserId) {
|
|
41
|
+
const sessionId = randomUUID();
|
|
42
|
+
const hash = createHash('sha256')
|
|
43
|
+
.update(`anthropic_user_${sessionId}`)
|
|
44
|
+
.digest('hex');
|
|
45
|
+
persistentUserId = `user_${hash}_account__session_${sessionId}`;
|
|
46
|
+
}
|
|
47
|
+
return persistentUserId;
|
|
42
48
|
}
|
|
43
49
|
/**
|
|
44
50
|
* Convert OpenAI-style tools to Anthropic tool format
|
|
@@ -286,8 +292,8 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
286
292
|
yield* withRetryGenerator(async function* () {
|
|
287
293
|
const config = getAnthropicConfig();
|
|
288
294
|
const { system, messages } = convertToAnthropicMessages(options.messages, options.includeBuiltinSystemPrompt !== false);
|
|
289
|
-
|
|
290
|
-
const userId =
|
|
295
|
+
// Use persistent userId that remains the same until application restart
|
|
296
|
+
const userId = getPersistentUserId();
|
|
291
297
|
const requestBody = {
|
|
292
298
|
model: options.model,
|
|
293
299
|
max_tokens: options.max_tokens || 4096,
|
|
@@ -781,7 +781,9 @@ export async function handleConversationWithTools(options) {
|
|
|
781
781
|
break;
|
|
782
782
|
}
|
|
783
783
|
// 在工具执行完成后、发送结果到AI前,检查是否需要压缩
|
|
784
|
-
|
|
784
|
+
const config = getOpenAiConfig();
|
|
785
|
+
if (config.enableAutoCompress !== false &&
|
|
786
|
+
options.getCurrentContextPercentage &&
|
|
785
787
|
shouldAutoCompress(options.getCurrentContextPercentage())) {
|
|
786
788
|
try {
|
|
787
789
|
// 显示压缩提示消息
|
|
@@ -934,7 +936,9 @@ export async function handleConversationWithTools(options) {
|
|
|
934
936
|
const pendingMessages = options.getPendingMessages();
|
|
935
937
|
if (pendingMessages.length > 0) {
|
|
936
938
|
// 检查 token 占用,如果 >= 80% 先执行自动压缩
|
|
937
|
-
|
|
939
|
+
const config = getOpenAiConfig();
|
|
940
|
+
if (config.enableAutoCompress !== false &&
|
|
941
|
+
options.getCurrentContextPercentage &&
|
|
938
942
|
shouldAutoCompress(options.getCurrentContextPercentage())) {
|
|
939
943
|
try {
|
|
940
944
|
// 显示压缩提示消息
|
package/dist/i18n/lang/en.js
CHANGED
|
@@ -119,6 +119,8 @@ export const en = {
|
|
|
119
119
|
apiKey: 'API Key:',
|
|
120
120
|
requestMethod: 'Request Method:',
|
|
121
121
|
anthropicBeta: 'Anthropic Beta:',
|
|
122
|
+
enablePromptOptimization: 'Enable Prompt Optimization:',
|
|
123
|
+
enableAutoCompress: 'Enable Auto Compression:',
|
|
122
124
|
thinkingEnabled: 'Thinking Enabled:',
|
|
123
125
|
thinkingBudgetTokens: 'Thinking Budget Tokens:',
|
|
124
126
|
geminiThinkingEnabled: 'Gemini Thinking Enabled:',
|
package/dist/i18n/lang/es.js
CHANGED
|
@@ -119,7 +119,9 @@ export const es = {
|
|
|
119
119
|
apiKey: 'Clave API:',
|
|
120
120
|
requestMethod: 'Método de Solicitud:',
|
|
121
121
|
anthropicBeta: 'Anthropic Beta:',
|
|
122
|
-
|
|
122
|
+
enablePromptOptimization: 'Enable Prompt Optimization:',
|
|
123
|
+
enableAutoCompress: 'Enable Auto Compression:',
|
|
124
|
+
thinkingEnabled: 'Thinking Enabled:',
|
|
123
125
|
thinkingBudgetTokens: 'Tokens de Presupuesto de Pensamiento:',
|
|
124
126
|
geminiThinkingEnabled: 'Habilitar Pensamiento Gemini:',
|
|
125
127
|
geminiThinkingBudget: 'Presupuesto de Pensamiento Gemini:',
|
package/dist/i18n/lang/ja.js
CHANGED
|
@@ -119,7 +119,9 @@ export const ja = {
|
|
|
119
119
|
apiKey: 'APIキー:',
|
|
120
120
|
requestMethod: 'リクエスト方式:',
|
|
121
121
|
anthropicBeta: 'Anthropic Beta:',
|
|
122
|
-
|
|
122
|
+
enablePromptOptimization: 'Enable Prompt Optimization:',
|
|
123
|
+
enableAutoCompress: 'Enable Auto Compression:',
|
|
124
|
+
thinkingEnabled: 'Thinking Enabled:',
|
|
123
125
|
thinkingBudgetTokens: '思考予算トークン数:',
|
|
124
126
|
geminiThinkingEnabled: 'Gemini思考を有効化:',
|
|
125
127
|
geminiThinkingBudget: 'Gemini思考予算:',
|
package/dist/i18n/lang/ko.js
CHANGED
|
@@ -119,7 +119,9 @@ export const ko = {
|
|
|
119
119
|
apiKey: 'API 키:',
|
|
120
120
|
requestMethod: '요청 방식:',
|
|
121
121
|
anthropicBeta: 'Anthropic Beta:',
|
|
122
|
-
|
|
122
|
+
enablePromptOptimization: 'Enable Prompt Optimization:',
|
|
123
|
+
enableAutoCompress: 'Enable Auto Compression:',
|
|
124
|
+
thinkingEnabled: 'Thinking Enabled:',
|
|
123
125
|
thinkingBudgetTokens: '사고 예산 토큰 수:',
|
|
124
126
|
geminiThinkingEnabled: 'Gemini 사고 활성화:',
|
|
125
127
|
geminiThinkingBudget: 'Gemini 사고 예산:',
|
package/dist/i18n/lang/zh-TW.js
CHANGED
|
@@ -119,6 +119,8 @@ export const zhTW = {
|
|
|
119
119
|
apiKey: 'API 金鑰:',
|
|
120
120
|
requestMethod: '請求方式:',
|
|
121
121
|
anthropicBeta: 'Anthropic Beta:',
|
|
122
|
+
enablePromptOptimization: '啟用提示詞優化:',
|
|
123
|
+
enableAutoCompress: '啟用自動壓縮:',
|
|
122
124
|
thinkingEnabled: '啟用思考模式:',
|
|
123
125
|
thinkingBudgetTokens: '思考預算令牌數:',
|
|
124
126
|
geminiThinkingEnabled: '啟用 Gemini 思考:',
|
package/dist/i18n/lang/zh.js
CHANGED
|
@@ -119,6 +119,8 @@ export const zh = {
|
|
|
119
119
|
apiKey: 'API 密钥:',
|
|
120
120
|
requestMethod: '请求方式:',
|
|
121
121
|
anthropicBeta: 'Anthropic Beta:',
|
|
122
|
+
enablePromptOptimization: '启用提示词优化:',
|
|
123
|
+
enableAutoCompress: '启用自动压缩:',
|
|
122
124
|
thinkingEnabled: '启用思考模式:',
|
|
123
125
|
thinkingBudgetTokens: '思考预算令牌数:',
|
|
124
126
|
geminiThinkingEnabled: '启用 Gemini 思考:',
|
package/dist/i18n/types.d.ts
CHANGED
|
@@ -120,6 +120,8 @@ export type TranslationKeys = {
|
|
|
120
120
|
apiKey: string;
|
|
121
121
|
requestMethod: string;
|
|
122
122
|
anthropicBeta: string;
|
|
123
|
+
enablePromptOptimization: string;
|
|
124
|
+
enableAutoCompress: string;
|
|
123
125
|
thinkingEnabled: string;
|
|
124
126
|
thinkingBudgetTokens: string;
|
|
125
127
|
geminiThinkingEnabled: string;
|
package/dist/mcp/bash.js
CHANGED
|
@@ -65,7 +65,14 @@ export class TerminalCommandService {
|
|
|
65
65
|
childProcess.on('error', reject);
|
|
66
66
|
childProcess.on('close', (code, signal) => {
|
|
67
67
|
if (signal) {
|
|
68
|
-
|
|
68
|
+
// Process was killed by signal (e.g., timeout, manual kill)
|
|
69
|
+
// CRITICAL: Still preserve stdout/stderr for debugging
|
|
70
|
+
const error = new Error(`Process killed by signal ${signal}`);
|
|
71
|
+
error.code = code || 1;
|
|
72
|
+
error.stdout = stdoutData;
|
|
73
|
+
error.stderr = stderrData;
|
|
74
|
+
error.signal = signal;
|
|
75
|
+
reject(error);
|
|
69
76
|
}
|
|
70
77
|
else if (code === 0) {
|
|
71
78
|
resolve({ stdout: stdoutData, stderr: stderrData });
|
package/dist/mcp/filesystem.js
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import { promises as fs } from 'fs';
|
|
2
2
|
import * as path from 'path';
|
|
3
|
-
import
|
|
4
|
-
import { promisify } from 'util';
|
|
3
|
+
import * as prettier from 'prettier';
|
|
5
4
|
// IDE connection supports both VSCode and JetBrains IDEs
|
|
6
5
|
import { vscodeConnection } from '../utils/vscodeConnection.js';
|
|
7
6
|
import { incrementalSnapshotManager } from '../utils/incrementalSnapshot.js';
|
|
@@ -16,7 +15,6 @@ import { parseFileSymbols } from './utils/aceCodeSearch/symbol.utils.js';
|
|
|
16
15
|
// Notebook utilities for automatic note retrieval
|
|
17
16
|
import { queryNotebook } from '../utils/notebookManager.js';
|
|
18
17
|
const { resolve, dirname, isAbsolute } = path;
|
|
19
|
-
const execAsync = promisify(exec);
|
|
20
18
|
/**
|
|
21
19
|
* Filesystem MCP Service
|
|
22
20
|
* Provides basic file operations: read, create, and delete files
|
|
@@ -681,11 +679,14 @@ export class FilesystemMCPService {
|
|
|
681
679
|
const shouldFormat = this.prettierSupportedExtensions.includes(fileExtension);
|
|
682
680
|
if (shouldFormat) {
|
|
683
681
|
try {
|
|
684
|
-
|
|
685
|
-
|
|
682
|
+
// Use Prettier API for better performance (avoids npx overhead)
|
|
683
|
+
const prettierConfig = await prettier.resolveConfig(fullPath);
|
|
684
|
+
finalContent = await prettier.format(modifiedContent, {
|
|
685
|
+
filepath: fullPath,
|
|
686
|
+
...prettierConfig,
|
|
686
687
|
});
|
|
687
|
-
//
|
|
688
|
-
|
|
688
|
+
// Write formatted content back to file
|
|
689
|
+
await fs.writeFile(fullPath, finalContent, 'utf-8');
|
|
689
690
|
finalLines = finalContent.split('\n');
|
|
690
691
|
finalTotalLines = finalLines.length;
|
|
691
692
|
finalContextEnd = Math.min(finalTotalLines, contextStart + (contextEnd - contextStart) + lineDifference);
|
|
@@ -912,11 +913,15 @@ export class FilesystemMCPService {
|
|
|
912
913
|
const shouldFormat = this.prettierSupportedExtensions.includes(fileExtension);
|
|
913
914
|
if (shouldFormat) {
|
|
914
915
|
try {
|
|
915
|
-
|
|
916
|
-
|
|
916
|
+
// Use Prettier API for better performance (avoids npx overhead)
|
|
917
|
+
const prettierConfig = await prettier.resolveConfig(fullPath);
|
|
918
|
+
const newContent = modifiedLines.join('\n');
|
|
919
|
+
const formattedContent = await prettier.format(newContent, {
|
|
920
|
+
filepath: fullPath,
|
|
921
|
+
...prettierConfig,
|
|
917
922
|
});
|
|
918
|
-
//
|
|
919
|
-
|
|
923
|
+
// Write formatted content back to file
|
|
924
|
+
await fs.writeFile(fullPath, formattedContent, 'utf-8');
|
|
920
925
|
finalLines = formattedContent.split('\n');
|
|
921
926
|
finalTotalLines = finalLines.length;
|
|
922
927
|
// Recalculate the context end line based on formatted content
|
|
@@ -1072,7 +1077,7 @@ export const filesystemService = new FilesystemMCPService();
|
|
|
1072
1077
|
export const mcpTools = [
|
|
1073
1078
|
{
|
|
1074
1079
|
name: 'filesystem-read',
|
|
1075
|
-
description: 'Read file content with line numbers. **Read only when the actual file or folder path is found or provided by the user, do not make random guesses
|
|
1080
|
+
description: 'Read file content with line numbers. **Read only when the actual file or folder path is found or provided by the user, do not make random guesses,Search for specific documents or line numbers before reading more accurately** **SUPPORTS MULTIPLE FILES WITH FLEXIBLE LINE RANGES**: Pass either (1) a single file path (string), (2) array of file paths (strings) with unified startLine/endLine, or (3) array of file config objects with per-file line ranges. **INTEGRATED DIRECTORY LISTING**: When filePath is a directory, automatically lists its contents instead of throwing error. ⚠️ **IMPORTANT WORKFLOW**: (1) ALWAYS use ACE search tools FIRST (ace-text_search/ace-search_symbols/ace-file_outline) to locate the relevant code, (2) ONLY use filesystem-read when you know the approximate location and need precise line numbers for editing. **ANTI-PATTERN**: Reading files line-by-line from the top wastes tokens - use search instead! **USAGE**: Call without parameters to read entire file(s), or specify startLine/endLine for partial reads. Returns content with line numbers (format: "123→code") for precise editing. **EXAMPLES**: (A) Unified: filePath=["a.ts", "b.ts"], startLine=1, endLine=500 reads lines 1-500 from both. (B) Per-file: filePath=[{path:"a.ts", startLine:1, endLine:300}, {path:"b.ts", startLine:100, endLine:550}] reads different ranges from each file. (C) Directory: filePath="./src" returns list of files in src/.',
|
|
1076
1081
|
inputSchema: {
|
|
1077
1082
|
type: 'object',
|
|
1078
1083
|
properties: {
|
|
@@ -346,7 +346,7 @@ export default function ChatInput({ onSubmit, onCommand, placeholder = 'Type you
|
|
|
346
346
|
React.createElement(Text, { color: color },
|
|
347
347
|
percentage.toFixed(1),
|
|
348
348
|
"%"),
|
|
349
|
-
React.createElement(Text, null, " "),
|
|
349
|
+
React.createElement(Text, null, " \u00B7 "),
|
|
350
350
|
React.createElement(Text, { color: color }, formatNumber(totalInputTokens)),
|
|
351
351
|
React.createElement(Text, null, t.chatScreen.tokens),
|
|
352
352
|
hasCacheMetrics && (React.createElement(React.Fragment, null,
|
|
@@ -7,5 +7,5 @@ interface Props {
|
|
|
7
7
|
completeNewContent?: string;
|
|
8
8
|
startLineNumber?: number;
|
|
9
9
|
}
|
|
10
|
-
export default function DiffViewer({ oldContent, newContent, filename, completeOldContent, completeNewContent, startLineNumber, }: Props): React.JSX.Element;
|
|
10
|
+
export default function DiffViewer({ oldContent, newContent, filename, completeOldContent, completeNewContent, startLineNumber, }: Props): React.JSX.Element | null;
|
|
11
11
|
export {};
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import React from 'react';
|
|
1
|
+
import React, { useMemo } from 'react';
|
|
2
2
|
import { Box, Text } from 'ink';
|
|
3
3
|
import * as Diff from 'diff';
|
|
4
4
|
// Helper function to strip line numbers from content (format: "123→content")
|
|
@@ -23,109 +23,119 @@ export default function DiffViewer({ oldContent = '', newContent, filename, comp
|
|
|
23
23
|
: stripLineNumbers(newContent);
|
|
24
24
|
// If no old content, show as new file creation
|
|
25
25
|
const isNewFile = !diffOldContent || diffOldContent.trim() === '';
|
|
26
|
-
|
|
26
|
+
// Memoize new file rendering to avoid re-splitting lines on every render
|
|
27
|
+
const newFileContent = useMemo(() => {
|
|
28
|
+
if (!isNewFile)
|
|
29
|
+
return null;
|
|
27
30
|
const allLines = diffNewContent.split('\n');
|
|
28
31
|
return (React.createElement(Box, { flexDirection: "column" },
|
|
29
32
|
React.createElement(Box, { marginBottom: 1 },
|
|
30
33
|
React.createElement(Text, { bold: true, color: "green" }, "[New File]"),
|
|
31
|
-
filename &&
|
|
32
|
-
|
|
33
|
-
filename))
|
|
34
|
+
filename && React.createElement(Text, { color: "cyan" },
|
|
35
|
+
" ",
|
|
36
|
+
filename)),
|
|
34
37
|
React.createElement(Box, { flexDirection: "column" }, allLines.map((line, index) => (React.createElement(Text, { key: index, color: "white", backgroundColor: "#006400" },
|
|
35
38
|
"+ ",
|
|
36
39
|
line))))));
|
|
40
|
+
}, [isNewFile, diffNewContent, filename]);
|
|
41
|
+
if (isNewFile) {
|
|
42
|
+
return newFileContent;
|
|
37
43
|
}
|
|
38
|
-
//
|
|
39
|
-
const
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
});
|
|
72
|
-
// Find diff hunks (groups of changes with context)
|
|
73
|
-
const hunks = [];
|
|
74
|
-
const contextLines = 3; // Number of context lines before and after changes
|
|
75
|
-
for (let i = 0; i < allChanges.length; i++) {
|
|
76
|
-
const change = allChanges[i];
|
|
77
|
-
if (change?.type !== 'unchanged') {
|
|
78
|
-
// Found a change, create a hunk
|
|
79
|
-
const hunkStart = Math.max(0, i - contextLines);
|
|
80
|
-
let hunkEnd = i;
|
|
81
|
-
// Extend the hunk to include all consecutive changes
|
|
82
|
-
while (hunkEnd < allChanges.length - 1) {
|
|
83
|
-
const nextChange = allChanges[hunkEnd + 1];
|
|
84
|
-
if (!nextChange)
|
|
85
|
-
break;
|
|
86
|
-
// If next line is a change, extend the hunk
|
|
87
|
-
if (nextChange.type !== 'unchanged') {
|
|
88
|
-
hunkEnd++;
|
|
89
|
-
continue;
|
|
44
|
+
// Memoize expensive diff calculation - only recompute when content changes
|
|
45
|
+
const hunks = useMemo(() => {
|
|
46
|
+
// Generate line-by-line diff
|
|
47
|
+
const diffResult = Diff.diffLines(diffOldContent, diffNewContent);
|
|
48
|
+
const allChanges = [];
|
|
49
|
+
let oldLineNum = startLineNumber;
|
|
50
|
+
let newLineNum = startLineNumber;
|
|
51
|
+
diffResult.forEach(part => {
|
|
52
|
+
const lines = part.value.replace(/\n$/, '').split('\n');
|
|
53
|
+
lines.forEach(line => {
|
|
54
|
+
if (part.added) {
|
|
55
|
+
allChanges.push({
|
|
56
|
+
type: 'added',
|
|
57
|
+
content: line,
|
|
58
|
+
oldLineNum: null,
|
|
59
|
+
newLineNum: newLineNum++,
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
else if (part.removed) {
|
|
63
|
+
allChanges.push({
|
|
64
|
+
type: 'removed',
|
|
65
|
+
content: line,
|
|
66
|
+
oldLineNum: oldLineNum++,
|
|
67
|
+
newLineNum: null,
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
allChanges.push({
|
|
72
|
+
type: 'unchanged',
|
|
73
|
+
content: line,
|
|
74
|
+
oldLineNum: oldLineNum++,
|
|
75
|
+
newLineNum: newLineNum++,
|
|
76
|
+
});
|
|
90
77
|
}
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
78
|
+
});
|
|
79
|
+
});
|
|
80
|
+
// Find diff hunks (groups of changes with context)
|
|
81
|
+
const computedHunks = [];
|
|
82
|
+
const contextLines = 3; // Number of context lines before and after changes
|
|
83
|
+
for (let i = 0; i < allChanges.length; i++) {
|
|
84
|
+
const change = allChanges[i];
|
|
85
|
+
if (change?.type !== 'unchanged') {
|
|
86
|
+
// Found a change, create a hunk
|
|
87
|
+
const hunkStart = Math.max(0, i - contextLines);
|
|
88
|
+
let hunkEnd = i;
|
|
89
|
+
// Extend the hunk to include all consecutive changes
|
|
90
|
+
while (hunkEnd < allChanges.length - 1) {
|
|
91
|
+
const nextChange = allChanges[hunkEnd + 1];
|
|
92
|
+
if (!nextChange)
|
|
93
|
+
break;
|
|
94
|
+
// If next line is a change, extend the hunk
|
|
95
|
+
if (nextChange.type !== 'unchanged') {
|
|
96
|
+
hunkEnd++;
|
|
97
|
+
continue;
|
|
98
|
+
}
|
|
99
|
+
// If there are more changes within context distance, extend the hunk
|
|
100
|
+
let hasMoreChanges = false;
|
|
101
|
+
for (let j = hunkEnd + 1; j < Math.min(allChanges.length, hunkEnd + 1 + contextLines * 2); j++) {
|
|
102
|
+
if (allChanges[j]?.type !== 'unchanged') {
|
|
103
|
+
hasMoreChanges = true;
|
|
104
|
+
break;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
if (hasMoreChanges) {
|
|
108
|
+
hunkEnd++;
|
|
109
|
+
}
|
|
110
|
+
else {
|
|
96
111
|
break;
|
|
97
112
|
}
|
|
98
113
|
}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
114
|
+
// Add context lines after the hunk
|
|
115
|
+
hunkEnd = Math.min(allChanges.length - 1, hunkEnd + contextLines);
|
|
116
|
+
// Extract the hunk
|
|
117
|
+
const hunkChanges = allChanges.slice(hunkStart, hunkEnd + 1);
|
|
118
|
+
const firstChange = hunkChanges[0];
|
|
119
|
+
const lastChange = hunkChanges[hunkChanges.length - 1];
|
|
120
|
+
if (firstChange && lastChange) {
|
|
121
|
+
computedHunks.push({
|
|
122
|
+
startLine: firstChange.oldLineNum || firstChange.newLineNum || 1,
|
|
123
|
+
endLine: lastChange.oldLineNum || lastChange.newLineNum || 1,
|
|
124
|
+
changes: hunkChanges,
|
|
125
|
+
});
|
|
104
126
|
}
|
|
127
|
+
// Skip to the end of this hunk
|
|
128
|
+
i = hunkEnd;
|
|
105
129
|
}
|
|
106
|
-
// Add context lines after the hunk
|
|
107
|
-
hunkEnd = Math.min(allChanges.length - 1, hunkEnd + contextLines);
|
|
108
|
-
// Extract the hunk
|
|
109
|
-
const hunkChanges = allChanges.slice(hunkStart, hunkEnd + 1);
|
|
110
|
-
const firstChange = hunkChanges[0];
|
|
111
|
-
const lastChange = hunkChanges[hunkChanges.length - 1];
|
|
112
|
-
if (firstChange && lastChange) {
|
|
113
|
-
hunks.push({
|
|
114
|
-
startLine: firstChange.oldLineNum || firstChange.newLineNum || 1,
|
|
115
|
-
endLine: lastChange.oldLineNum || lastChange.newLineNum || 1,
|
|
116
|
-
changes: hunkChanges,
|
|
117
|
-
});
|
|
118
|
-
}
|
|
119
|
-
// Skip to the end of this hunk
|
|
120
|
-
i = hunkEnd;
|
|
121
130
|
}
|
|
122
|
-
|
|
131
|
+
return computedHunks;
|
|
132
|
+
}, [diffOldContent, diffNewContent, startLineNumber]);
|
|
123
133
|
return (React.createElement(Box, { flexDirection: "column" },
|
|
124
134
|
React.createElement(Box, { marginBottom: 1 },
|
|
125
135
|
React.createElement(Text, { bold: true, color: "yellow" }, "[File Modified]"),
|
|
126
|
-
filename &&
|
|
127
|
-
|
|
128
|
-
filename))
|
|
136
|
+
filename && React.createElement(Text, { color: "cyan" },
|
|
137
|
+
" ",
|
|
138
|
+
filename)),
|
|
129
139
|
React.createElement(Box, { flexDirection: "column" },
|
|
130
140
|
hunks.map((hunk, hunkIndex) => (React.createElement(Box, { key: hunkIndex, flexDirection: "column", marginBottom: 1 },
|
|
131
141
|
React.createElement(Text, { color: "cyan", dimColor: true },
|
|
@@ -136,10 +146,10 @@ export default function DiffViewer({ oldContent = '', newContent, filename, comp
|
|
|
136
146
|
" @@"),
|
|
137
147
|
hunk.changes.map((change, changeIndex) => {
|
|
138
148
|
// Calculate line number to display
|
|
139
|
-
const lineNum = change.type === 'added'
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
149
|
+
const lineNum = change.type === 'added' ? change.newLineNum : change.oldLineNum;
|
|
150
|
+
const lineNumStr = lineNum
|
|
151
|
+
? String(lineNum).padStart(4, ' ')
|
|
152
|
+
: ' ';
|
|
143
153
|
if (change.type === 'added') {
|
|
144
154
|
return (React.createElement(Text, { key: changeIndex, color: "white", backgroundColor: "#006400" },
|
|
145
155
|
lineNumStr,
|
|
@@ -155,7 +165,7 @@ export default function DiffViewer({ oldContent = '', newContent, filename, comp
|
|
|
155
165
|
// Unchanged lines (context)
|
|
156
166
|
return (React.createElement(Text, { key: changeIndex, dimColor: true },
|
|
157
167
|
lineNumStr,
|
|
158
|
-
"
|
|
168
|
+
" ",
|
|
159
169
|
change.content));
|
|
160
170
|
})))),
|
|
161
171
|
hunks.length > 1 && (React.createElement(Box, { marginTop: 1 },
|
|
@@ -15,9 +15,15 @@ const FileList = memo(forwardRef(({ query, selectedIndex, visible, maxItems = 10
|
|
|
15
15
|
? Math.min(maxItems, MAX_DISPLAY_ITEMS)
|
|
16
16
|
: MAX_DISPLAY_ITEMS;
|
|
17
17
|
}, [maxItems]);
|
|
18
|
-
// Get files from directory - optimized for performance with
|
|
18
|
+
// Get files from directory - optimized for performance with depth limit
|
|
19
19
|
const loadFiles = useCallback(async () => {
|
|
20
|
+
const MAX_DEPTH = 5; // Limit recursion depth to prevent performance issues
|
|
21
|
+
const MAX_FILES = 1000; // Reduced from 2000 for better performance
|
|
20
22
|
const getFilesRecursively = async (dir, depth = 0) => {
|
|
23
|
+
// Stop recursion if depth limit reached
|
|
24
|
+
if (depth > MAX_DEPTH) {
|
|
25
|
+
return [];
|
|
26
|
+
}
|
|
21
27
|
try {
|
|
22
28
|
const entries = await fs.promises.readdir(dir, {
|
|
23
29
|
withFileTypes: true,
|
|
@@ -48,6 +54,10 @@ const FileList = memo(forwardRef(({ query, selectedIndex, visible, maxItems = 10
|
|
|
48
54
|
'.env',
|
|
49
55
|
];
|
|
50
56
|
for (const entry of entries) {
|
|
57
|
+
// Early exit if we've collected enough files
|
|
58
|
+
if (result.length >= MAX_FILES) {
|
|
59
|
+
break;
|
|
60
|
+
}
|
|
51
61
|
// Skip hidden files and ignore patterns
|
|
52
62
|
if (entry.name.startsWith('.') ||
|
|
53
63
|
ignorePatterns.includes(entry.name)) {
|
|
@@ -70,20 +80,18 @@ const FileList = memo(forwardRef(({ query, selectedIndex, visible, maxItems = 10
|
|
|
70
80
|
!path.isAbsolute(relativePath)) {
|
|
71
81
|
relativePath = './' + relativePath;
|
|
72
82
|
}
|
|
83
|
+
// Normalize to forward slashes for cross-platform consistency
|
|
84
|
+
relativePath = relativePath.replace(/\\/g, '/');
|
|
73
85
|
result.push({
|
|
74
86
|
name: entry.name,
|
|
75
87
|
path: relativePath,
|
|
76
88
|
isDirectory: entry.isDirectory(),
|
|
77
89
|
});
|
|
78
|
-
// Recursively get files from subdirectories
|
|
79
|
-
if (entry.isDirectory()) {
|
|
90
|
+
// Recursively get files from subdirectories with depth limit
|
|
91
|
+
if (entry.isDirectory() && depth < MAX_DEPTH) {
|
|
80
92
|
const subFiles = await getFilesRecursively(fullPath, depth + 1);
|
|
81
93
|
result = result.concat(subFiles);
|
|
82
94
|
}
|
|
83
|
-
// Limit total files for performance (increased from 500 to 2000)
|
|
84
|
-
if (result.length > 2000) {
|
|
85
|
-
break;
|
|
86
|
-
}
|
|
87
95
|
}
|
|
88
96
|
return result;
|
|
89
97
|
}
|
|
@@ -211,12 +219,15 @@ const FileList = memo(forwardRef(({ query, selectedIndex, visible, maxItems = 10
|
|
|
211
219
|
}
|
|
212
220
|
return results;
|
|
213
221
|
}, [files, rootPath, terminalWidth]);
|
|
214
|
-
// Load files
|
|
222
|
+
// Load files when component becomes visible
|
|
223
|
+
// This ensures the file list is always fresh without complex file watching
|
|
215
224
|
useEffect(() => {
|
|
216
|
-
if (visible
|
|
217
|
-
|
|
225
|
+
if (!visible) {
|
|
226
|
+
return;
|
|
218
227
|
}
|
|
219
|
-
|
|
228
|
+
// Always reload when becoming visible to ensure fresh data
|
|
229
|
+
loadFiles();
|
|
230
|
+
}, [visible, rootPath, loadFiles]);
|
|
220
231
|
// State for filtered files (needed for async content search)
|
|
221
232
|
const [allFilteredFiles, setAllFilteredFiles] = useState([]);
|
|
222
233
|
// Filter files based on query and search mode with debounce
|
|
@@ -76,18 +76,30 @@ function renderTerminalExecutePreview(data, isSubAgentInternal) {
|
|
|
76
76
|
hasStderr &&
|
|
77
77
|
` (${data.stderr.trim().split('\n').length} lines stderr)`)));
|
|
78
78
|
}
|
|
79
|
-
// For main flow tools, show
|
|
79
|
+
// For main flow tools, show summary unless there's an error
|
|
80
|
+
// Only show full output when exitCode !== 0 or has stderr
|
|
81
|
+
const showFullOutput = hasError || hasStderr;
|
|
82
|
+
if (!showFullOutput) {
|
|
83
|
+
// Success case - show compact summary
|
|
84
|
+
const stdoutLines = hasStdout ? data.stdout.trim().split('\n').length : 0;
|
|
85
|
+
return (React.createElement(Box, { marginLeft: 2 },
|
|
86
|
+
React.createElement(Text, { color: "green", dimColor: true },
|
|
87
|
+
"\u2514\u2500 \u2713 Exit code: ",
|
|
88
|
+
data.exitCode,
|
|
89
|
+
hasStdout && ` (${stdoutLines} ${stdoutLines === 1 ? 'line' : 'lines'} output)`)));
|
|
90
|
+
}
|
|
91
|
+
// Error case - show full details
|
|
80
92
|
return (React.createElement(Box, { flexDirection: "column", marginLeft: 2 },
|
|
81
93
|
React.createElement(Text, { color: "gray", dimColor: true },
|
|
82
94
|
"\u251C\u2500 command: ",
|
|
83
95
|
data.command),
|
|
84
|
-
React.createElement(Text, { color:
|
|
96
|
+
React.createElement(Text, { color: "red", bold: true },
|
|
85
97
|
"\u251C\u2500 exitCode: ",
|
|
86
98
|
data.exitCode,
|
|
87
|
-
|
|
99
|
+
" \u26A0\uFE0F FAILED"),
|
|
88
100
|
hasStdout && (React.createElement(Box, { flexDirection: "column" },
|
|
89
101
|
React.createElement(Text, { color: "gray", dimColor: true }, "\u251C\u2500 stdout:"),
|
|
90
|
-
React.createElement(Box, { marginLeft: 2, flexDirection: "column" }, data.stdout.split('\n').map((line, idx) => (React.createElement(Text, { key: idx, color:
|
|
102
|
+
React.createElement(Box, { marginLeft: 2, flexDirection: "column" }, data.stdout.split('\n').map((line, idx) => (React.createElement(Text, { key: idx, color: "yellow" }, line)))))),
|
|
91
103
|
hasStderr && (React.createElement(Box, { flexDirection: "column" },
|
|
92
104
|
React.createElement(Text, { color: "red", bold: true }, "\u251C\u2500 stderr:"),
|
|
93
105
|
React.createElement(Box, { marginLeft: 2, flexDirection: "column" }, data.stderr.split('\n').map((line, idx) => (React.createElement(Text, { key: idx, color: "red" }, line)))))),
|
|
@@ -22,6 +22,7 @@ import { sessionManager } from '../../utils/sessionManager.js';
|
|
|
22
22
|
import { useSessionSave } from '../../hooks/useSessionSave.js';
|
|
23
23
|
import { useToolConfirmation } from '../../hooks/useToolConfirmation.js';
|
|
24
24
|
import { handleConversationWithTools } from '../../hooks/useConversation.js';
|
|
25
|
+
import { promptOptimizeAgent } from '../../agents/promptOptimizeAgent.js';
|
|
25
26
|
import { useVSCodeState } from '../../hooks/useVSCodeState.js';
|
|
26
27
|
import { useSnapshotState } from '../../hooks/useSnapshotState.js';
|
|
27
28
|
import { useStreamingState } from '../../hooks/useStreamingState.js';
|
|
@@ -675,8 +676,9 @@ export default function ChatScreen({ skipWelcome }) {
|
|
|
675
676
|
await processMessage(message, images);
|
|
676
677
|
};
|
|
677
678
|
const processMessage = async (message, images, useBasicModel, hideUserMessage) => {
|
|
678
|
-
// 检查 token 占用,如果 >= 80%
|
|
679
|
-
|
|
679
|
+
// 检查 token 占用,如果 >= 80% 且配置启用了自动压缩,先执行自动压缩
|
|
680
|
+
const autoCompressConfig = getOpenAiConfig();
|
|
681
|
+
if (autoCompressConfig.enableAutoCompress !== false && shouldAutoCompress(currentContextPercentageRef.current)) {
|
|
680
682
|
setIsCompressing(true);
|
|
681
683
|
setCompressionError(null);
|
|
682
684
|
try {
|
|
@@ -717,7 +719,7 @@ export default function ChatScreen({ skipWelcome }) {
|
|
|
717
719
|
}
|
|
718
720
|
// Clear any previous retry status when starting a new request
|
|
719
721
|
streamingState.setRetryStatus(null);
|
|
720
|
-
// Parse and validate file references
|
|
722
|
+
// Parse and validate file references (use original message for immediate UI display)
|
|
721
723
|
const { cleanContent, validFiles } = await parseAndValidateFileReferences(message);
|
|
722
724
|
// Separate image files from regular files
|
|
723
725
|
const imageFiles = validFiles.filter(f => f.isImage && f.imageData && f.mimeType);
|
|
@@ -735,7 +737,7 @@ export default function ChatScreen({ skipWelcome }) {
|
|
|
735
737
|
mimeType: f.mimeType,
|
|
736
738
|
})),
|
|
737
739
|
];
|
|
738
|
-
// Only add user message to UI if not hidden
|
|
740
|
+
// Only add user message to UI if not hidden (显示原始用户消息)
|
|
739
741
|
if (!hideUserMessage) {
|
|
740
742
|
const userMessage = {
|
|
741
743
|
role: 'user',
|
|
@@ -749,16 +751,58 @@ export default function ChatScreen({ skipWelcome }) {
|
|
|
749
751
|
// Create new abort controller for this request
|
|
750
752
|
const controller = new AbortController();
|
|
751
753
|
streamingState.setAbortController(controller);
|
|
754
|
+
// Optimize user prompt in the background (silent execution)
|
|
755
|
+
let originalMessage = message;
|
|
756
|
+
let optimizedMessage = message;
|
|
757
|
+
let optimizedCleanContent = cleanContent;
|
|
758
|
+
// Check if prompt optimization is enabled in config
|
|
759
|
+
const config = getOpenAiConfig();
|
|
760
|
+
const isOptimizationEnabled = config.enablePromptOptimization !== false; // Default to true
|
|
761
|
+
if (isOptimizationEnabled) {
|
|
762
|
+
try {
|
|
763
|
+
// Convert current UI messages to ChatMessage format for context
|
|
764
|
+
const conversationHistory = messages
|
|
765
|
+
.filter(m => m.role === 'user' || m.role === 'assistant')
|
|
766
|
+
.map(m => ({
|
|
767
|
+
role: m.role,
|
|
768
|
+
content: typeof m.content === 'string' ? m.content : '',
|
|
769
|
+
}));
|
|
770
|
+
// Try to optimize the prompt (background execution)
|
|
771
|
+
optimizedMessage = await promptOptimizeAgent.optimizePrompt(message, conversationHistory, controller.signal);
|
|
772
|
+
// Re-parse the optimized message to get clean content for AI
|
|
773
|
+
if (optimizedMessage !== originalMessage) {
|
|
774
|
+
const optimizedParsed = await parseAndValidateFileReferences(optimizedMessage);
|
|
775
|
+
optimizedCleanContent = optimizedParsed.cleanContent;
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
catch (error) {
|
|
779
|
+
// If optimization fails, silently fall back to original message
|
|
780
|
+
logger.warn('Prompt optimization failed, using original:', error);
|
|
781
|
+
}
|
|
782
|
+
}
|
|
752
783
|
try {
|
|
753
|
-
// Create message for AI with file read instructions and editor context
|
|
754
|
-
const messageForAI = createMessageWithFileInstructions(
|
|
784
|
+
// Create message for AI with file read instructions and editor context (使用优化后的内容)
|
|
785
|
+
const messageForAI = createMessageWithFileInstructions(optimizedCleanContent, regularFiles, vscodeState.vscodeConnected ? vscodeState.editorContext : undefined);
|
|
786
|
+
// Wrap saveMessage to add originalContent for user messages
|
|
787
|
+
const saveMessageWithOriginal = async (msg) => {
|
|
788
|
+
// If this is a user message and we have an optimized version, add originalContent
|
|
789
|
+
if (msg.role === 'user' && optimizedMessage !== originalMessage) {
|
|
790
|
+
await saveMessage({
|
|
791
|
+
...msg,
|
|
792
|
+
originalContent: originalMessage,
|
|
793
|
+
});
|
|
794
|
+
}
|
|
795
|
+
else {
|
|
796
|
+
await saveMessage(msg);
|
|
797
|
+
}
|
|
798
|
+
};
|
|
755
799
|
// Start conversation with tool support
|
|
756
800
|
await handleConversationWithTools({
|
|
757
801
|
userContent: messageForAI,
|
|
758
802
|
imageContents,
|
|
759
803
|
controller,
|
|
760
804
|
messages,
|
|
761
|
-
saveMessage,
|
|
805
|
+
saveMessage: saveMessageWithOriginal,
|
|
762
806
|
setMessages,
|
|
763
807
|
setStreamTokenCount: streamingState.setStreamTokenCount,
|
|
764
808
|
requestToolConfirmation,
|
|
@@ -51,6 +51,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
51
51
|
const [apiKey, setApiKey] = useState('');
|
|
52
52
|
const [requestMethod, setRequestMethod] = useState('chat');
|
|
53
53
|
const [anthropicBeta, setAnthropicBeta] = useState(false);
|
|
54
|
+
const [enablePromptOptimization, setEnablePromptOptimization] = useState(true);
|
|
55
|
+
const [enableAutoCompress, setEnableAutoCompress] = useState(true);
|
|
54
56
|
const [thinkingEnabled, setThinkingEnabled] = useState(false);
|
|
55
57
|
const [thinkingBudgetTokens, setThinkingBudgetTokens] = useState(10000);
|
|
56
58
|
const [geminiThinkingEnabled, setGeminiThinkingEnabled] = useState(false);
|
|
@@ -101,6 +103,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
101
103
|
'baseUrl',
|
|
102
104
|
'apiKey',
|
|
103
105
|
'requestMethod',
|
|
106
|
+
'enablePromptOptimization',
|
|
107
|
+
'enableAutoCompress',
|
|
104
108
|
...(requestMethod === 'anthropic'
|
|
105
109
|
? [
|
|
106
110
|
'anthropicBeta',
|
|
@@ -167,6 +171,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
167
171
|
setApiKey(config.apiKey);
|
|
168
172
|
setRequestMethod(config.requestMethod || 'chat');
|
|
169
173
|
setAnthropicBeta(config.anthropicBeta || false);
|
|
174
|
+
setEnablePromptOptimization(config.enablePromptOptimization !== false); // Default to true
|
|
175
|
+
setEnableAutoCompress(config.enableAutoCompress !== false); // Default to true
|
|
170
176
|
setThinkingEnabled(config.thinking?.type === 'enabled' || false);
|
|
171
177
|
setThinkingBudgetTokens(config.thinking?.budget_tokens || 10000);
|
|
172
178
|
setGeminiThinkingEnabled(config.geminiThinking?.enabled || false);
|
|
@@ -328,6 +334,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
328
334
|
apiKey,
|
|
329
335
|
requestMethod,
|
|
330
336
|
anthropicBeta,
|
|
337
|
+
enablePromptOptimization,
|
|
338
|
+
enableAutoCompress,
|
|
331
339
|
advancedModel,
|
|
332
340
|
basicModel,
|
|
333
341
|
maxContextTokens,
|
|
@@ -380,6 +388,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
380
388
|
apiKey,
|
|
381
389
|
requestMethod,
|
|
382
390
|
anthropicBeta,
|
|
391
|
+
enablePromptOptimization,
|
|
392
|
+
enableAutoCompress,
|
|
383
393
|
thinking: thinkingEnabled
|
|
384
394
|
? { type: 'enabled', budget_tokens: thinkingBudgetTokens }
|
|
385
395
|
: undefined,
|
|
@@ -460,6 +470,26 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
460
470
|
anthropicBeta ? t.configScreen.enabled : t.configScreen.disabled,
|
|
461
471
|
" ",
|
|
462
472
|
t.configScreen.toggleHint))));
|
|
473
|
+
case 'enablePromptOptimization':
|
|
474
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
475
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
476
|
+
isActive ? '❯ ' : ' ',
|
|
477
|
+
t.configScreen.enablePromptOptimization),
|
|
478
|
+
React.createElement(Box, { marginLeft: 3 },
|
|
479
|
+
React.createElement(Text, { color: "gray" },
|
|
480
|
+
enablePromptOptimization ? t.configScreen.enabled : t.configScreen.disabled,
|
|
481
|
+
" ",
|
|
482
|
+
t.configScreen.toggleHint))));
|
|
483
|
+
case 'enableAutoCompress':
|
|
484
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
485
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
486
|
+
isActive ? '❯ ' : ' ',
|
|
487
|
+
t.configScreen.enableAutoCompress),
|
|
488
|
+
React.createElement(Box, { marginLeft: 3 },
|
|
489
|
+
React.createElement(Text, { color: "gray" },
|
|
490
|
+
enableAutoCompress ? t.configScreen.enabled : t.configScreen.disabled,
|
|
491
|
+
" ",
|
|
492
|
+
t.configScreen.toggleHint))));
|
|
463
493
|
case 'thinkingEnabled':
|
|
464
494
|
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
465
495
|
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
@@ -809,6 +839,12 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
809
839
|
if (currentField === 'anthropicBeta') {
|
|
810
840
|
setAnthropicBeta(!anthropicBeta);
|
|
811
841
|
}
|
|
842
|
+
else if (currentField === 'enablePromptOptimization') {
|
|
843
|
+
setEnablePromptOptimization(!enablePromptOptimization);
|
|
844
|
+
}
|
|
845
|
+
else if (currentField === 'enableAutoCompress') {
|
|
846
|
+
setEnableAutoCompress(!enableAutoCompress);
|
|
847
|
+
}
|
|
812
848
|
else if (currentField === 'thinkingEnabled') {
|
|
813
849
|
setThinkingEnabled(!thinkingEnabled);
|
|
814
850
|
}
|
|
@@ -27,6 +27,8 @@ export interface ApiConfig {
|
|
|
27
27
|
thinking?: ThinkingConfig;
|
|
28
28
|
geminiThinking?: GeminiThinkingConfig;
|
|
29
29
|
responsesReasoning?: ResponsesReasoningConfig;
|
|
30
|
+
enablePromptOptimization?: boolean;
|
|
31
|
+
enableAutoCompress?: boolean;
|
|
30
32
|
}
|
|
31
33
|
export interface MCPServer {
|
|
32
34
|
url?: string;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "snow-ai",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.1",
|
|
4
4
|
"description": "Intelligent Command Line Assistant powered by AI",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"bin": {
|
|
@@ -65,6 +65,7 @@
|
|
|
65
65
|
"devDependencies": {
|
|
66
66
|
"@sindresorhus/tsconfig": "^3.0.1",
|
|
67
67
|
"@types/diff": "^7.0.2",
|
|
68
|
+
"@types/prettier": "^2.7.3",
|
|
68
69
|
"@types/react": "^18.0.32",
|
|
69
70
|
"@types/ws": "^8.5.8",
|
|
70
71
|
"@vdemedes/prettier-config": "^2.0.1",
|