snow-ai 0.3.6 → 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/compactAgent.js +7 -3
- package/dist/agents/reviewAgent.d.ts +50 -0
- package/dist/agents/reviewAgent.js +264 -0
- package/dist/agents/summaryAgent.d.ts +34 -8
- package/dist/agents/summaryAgent.js +167 -164
- package/dist/api/anthropic.d.ts +1 -0
- package/dist/api/anthropic.js +118 -78
- package/dist/api/chat.d.ts +2 -1
- package/dist/api/chat.js +82 -52
- package/dist/api/gemini.d.ts +1 -0
- package/dist/api/gemini.js +110 -64
- package/dist/api/responses.d.ts +10 -1
- package/dist/api/responses.js +127 -79
- package/dist/api/systemPrompt.d.ts +1 -1
- package/dist/api/systemPrompt.js +36 -7
- package/dist/api/types.d.ts +8 -0
- package/dist/app.js +15 -2
- package/dist/hooks/useCommandHandler.d.ts +1 -0
- package/dist/hooks/useCommandHandler.js +102 -1
- package/dist/hooks/useCommandPanel.d.ts +2 -1
- package/dist/hooks/useCommandPanel.js +19 -1
- package/dist/hooks/useConversation.d.ts +4 -1
- package/dist/hooks/useConversation.js +91 -29
- package/dist/hooks/useKeyboardInput.js +19 -0
- package/dist/hooks/useSnapshotState.d.ts +2 -0
- package/dist/hooks/useTerminalFocus.js +13 -3
- package/dist/mcp/aceCodeSearch.d.ts +2 -76
- package/dist/mcp/aceCodeSearch.js +31 -467
- package/dist/mcp/bash.d.ts +1 -8
- package/dist/mcp/bash.js +20 -40
- package/dist/mcp/filesystem.d.ts +131 -111
- package/dist/mcp/filesystem.js +212 -375
- package/dist/mcp/ideDiagnostics.js +2 -4
- package/dist/mcp/todo.d.ts +1 -17
- package/dist/mcp/todo.js +11 -15
- package/dist/mcp/types/aceCodeSearch.types.d.ts +92 -0
- package/dist/mcp/types/aceCodeSearch.types.js +4 -0
- package/dist/mcp/types/bash.types.d.ts +13 -0
- package/dist/mcp/types/bash.types.js +4 -0
- package/dist/mcp/types/filesystem.types.d.ts +135 -0
- package/dist/mcp/types/filesystem.types.js +4 -0
- package/dist/mcp/types/todo.types.d.ts +27 -0
- package/dist/mcp/types/todo.types.js +4 -0
- package/dist/mcp/types/websearch.types.d.ts +30 -0
- package/dist/mcp/types/websearch.types.js +4 -0
- package/dist/mcp/utils/aceCodeSearch/filesystem.utils.d.ts +34 -0
- package/dist/mcp/utils/aceCodeSearch/filesystem.utils.js +146 -0
- package/dist/mcp/utils/aceCodeSearch/language.utils.d.ts +14 -0
- package/dist/mcp/utils/aceCodeSearch/language.utils.js +99 -0
- package/dist/mcp/utils/aceCodeSearch/search.utils.d.ts +31 -0
- package/dist/mcp/utils/aceCodeSearch/search.utils.js +136 -0
- package/dist/mcp/utils/aceCodeSearch/symbol.utils.d.ts +20 -0
- package/dist/mcp/utils/aceCodeSearch/symbol.utils.js +141 -0
- package/dist/mcp/utils/bash/security.utils.d.ts +20 -0
- package/dist/mcp/utils/bash/security.utils.js +34 -0
- package/dist/mcp/utils/filesystem/batch-operations.utils.d.ts +39 -0
- package/dist/mcp/utils/filesystem/batch-operations.utils.js +182 -0
- package/dist/mcp/utils/filesystem/code-analysis.utils.d.ts +18 -0
- package/dist/mcp/utils/filesystem/code-analysis.utils.js +165 -0
- package/dist/mcp/utils/filesystem/match-finder.utils.d.ts +16 -0
- package/dist/mcp/utils/filesystem/match-finder.utils.js +85 -0
- package/dist/mcp/utils/filesystem/similarity.utils.d.ts +22 -0
- package/dist/mcp/utils/filesystem/similarity.utils.js +75 -0
- package/dist/mcp/utils/todo/date.utils.d.ts +9 -0
- package/dist/mcp/utils/todo/date.utils.js +14 -0
- package/dist/mcp/utils/websearch/browser.utils.d.ts +8 -0
- package/dist/mcp/utils/websearch/browser.utils.js +58 -0
- package/dist/mcp/utils/websearch/text.utils.d.ts +16 -0
- package/dist/mcp/utils/websearch/text.utils.js +39 -0
- package/dist/mcp/websearch.d.ts +1 -31
- package/dist/mcp/websearch.js +21 -97
- package/dist/ui/components/ChatInput.d.ts +3 -1
- package/dist/ui/components/ChatInput.js +12 -5
- package/dist/ui/components/CommandPanel.d.ts +2 -1
- package/dist/ui/components/CommandPanel.js +18 -3
- package/dist/ui/components/MarkdownRenderer.d.ts +1 -2
- package/dist/ui/components/MarkdownRenderer.js +25 -153
- package/dist/ui/components/MessageList.js +5 -5
- package/dist/ui/components/PendingMessages.js +1 -1
- package/dist/ui/components/PendingToolCalls.d.ts +11 -0
- package/dist/ui/components/PendingToolCalls.js +35 -0
- package/dist/ui/components/SessionListScreen.js +37 -17
- package/dist/ui/components/ToolResultPreview.d.ts +1 -1
- package/dist/ui/components/ToolResultPreview.js +119 -155
- package/dist/ui/components/UsagePanel.d.ts +2 -0
- package/dist/ui/components/UsagePanel.js +360 -0
- package/dist/ui/pages/ChatScreen.d.ts +5 -0
- package/dist/ui/pages/ChatScreen.js +164 -85
- package/dist/ui/pages/ConfigScreen.js +23 -19
- package/dist/ui/pages/HeadlessModeScreen.js +2 -4
- package/dist/ui/pages/SubAgentConfigScreen.js +17 -17
- package/dist/ui/pages/SystemPromptConfigScreen.js +7 -6
- package/dist/utils/chatExporter.d.ts +9 -0
- package/dist/utils/chatExporter.js +126 -0
- package/dist/utils/commandExecutor.d.ts +3 -3
- package/dist/utils/commandExecutor.js +4 -4
- package/dist/utils/commands/export.d.ts +2 -0
- package/dist/utils/commands/export.js +12 -0
- package/dist/utils/commands/home.d.ts +2 -0
- package/dist/utils/commands/home.js +12 -0
- package/dist/utils/commands/init.js +3 -3
- package/dist/utils/commands/review.d.ts +2 -0
- package/dist/utils/commands/review.js +81 -0
- package/dist/utils/commands/role.d.ts +2 -0
- package/dist/utils/commands/role.js +37 -0
- package/dist/utils/commands/usage.d.ts +2 -0
- package/dist/utils/commands/usage.js +12 -0
- package/dist/utils/contextCompressor.js +99 -367
- package/dist/utils/fileDialog.d.ts +9 -0
- package/dist/utils/fileDialog.js +74 -0
- package/dist/utils/incrementalSnapshot.d.ts +7 -0
- package/dist/utils/incrementalSnapshot.js +35 -0
- package/dist/utils/mcpToolsManager.js +12 -12
- package/dist/utils/messageFormatter.js +89 -6
- package/dist/utils/proxyUtils.d.ts +15 -0
- package/dist/utils/proxyUtils.js +50 -0
- package/dist/utils/retryUtils.d.ts +27 -0
- package/dist/utils/retryUtils.js +114 -2
- package/dist/utils/sessionConverter.js +11 -0
- package/dist/utils/sessionManager.d.ts +7 -5
- package/dist/utils/sessionManager.js +60 -82
- package/dist/utils/terminal.js +4 -3
- package/dist/utils/toolDisplayConfig.d.ts +16 -0
- package/dist/utils/toolDisplayConfig.js +42 -0
- package/dist/utils/usageLogger.d.ts +11 -0
- package/dist/utils/usageLogger.js +99 -0
- package/package.json +3 -7
|
@@ -1,58 +1,29 @@
|
|
|
1
|
-
import { getOpenAiConfig,
|
|
2
|
-
import {
|
|
1
|
+
import { getOpenAiConfig, getCustomSystemPrompt } from './apiConfig.js';
|
|
2
|
+
import { getSystemPrompt } from '../api/systemPrompt.js';
|
|
3
|
+
import { createStreamingChatCompletion } from '../api/chat.js';
|
|
4
|
+
import { createStreamingResponse } from '../api/responses.js';
|
|
5
|
+
import { createStreamingGeminiCompletion } from '../api/gemini.js';
|
|
6
|
+
import { createStreamingAnthropicCompletion } from '../api/anthropic.js';
|
|
3
7
|
/**
|
|
4
8
|
* Compression request prompt - asks AI to summarize conversation with focus on task continuity
|
|
5
9
|
*/
|
|
6
10
|
const COMPRESSION_PROMPT = 'Please provide a concise summary of our conversation so far. Focus on: 1) The current task or goal we are working on, 2) Key decisions and approaches we have agreed upon, 3) Important context needed to continue, 4) Any pending or unfinished work. Keep it brief but ensure I can seamlessly continue assisting with the task.';
|
|
7
11
|
/**
|
|
8
|
-
*
|
|
12
|
+
* Prepare messages for compression by adding system prompt and compression request
|
|
9
13
|
*/
|
|
10
|
-
|
|
11
|
-
const decoder = new TextDecoder();
|
|
12
|
-
let buffer = '';
|
|
13
|
-
while (true) {
|
|
14
|
-
const { done, value } = await reader.read();
|
|
15
|
-
if (done)
|
|
16
|
-
break;
|
|
17
|
-
buffer += decoder.decode(value, { stream: true });
|
|
18
|
-
const lines = buffer.split('\n');
|
|
19
|
-
buffer = lines.pop() || '';
|
|
20
|
-
for (const line of lines) {
|
|
21
|
-
const trimmed = line.trim();
|
|
22
|
-
if (!trimmed || trimmed.startsWith(':'))
|
|
23
|
-
continue;
|
|
24
|
-
if (trimmed === 'data: [DONE]') {
|
|
25
|
-
return;
|
|
26
|
-
}
|
|
27
|
-
if (trimmed.startsWith('data: ')) {
|
|
28
|
-
const data = trimmed.slice(6);
|
|
29
|
-
try {
|
|
30
|
-
yield JSON.parse(data);
|
|
31
|
-
}
|
|
32
|
-
catch (e) {
|
|
33
|
-
console.error('Failed to parse SSE data:', data);
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
/**
|
|
40
|
-
* Compress context using OpenAI Chat Completions API
|
|
41
|
-
*/
|
|
42
|
-
async function compressWithChatCompletions(baseUrl, apiKey, modelName, conversationMessages, systemPrompt) {
|
|
43
|
-
const customHeaders = getCustomHeaders();
|
|
44
|
-
// Build messages with system prompt support
|
|
14
|
+
function prepareMessagesForCompression(conversationMessages, customSystemPrompt) {
|
|
45
15
|
const messages = [];
|
|
46
|
-
|
|
16
|
+
// Add system prompt (handled by API modules)
|
|
17
|
+
if (customSystemPrompt) {
|
|
47
18
|
// If custom system prompt exists: custom as system, default as first user message
|
|
48
|
-
messages.push({ role: 'system', content:
|
|
49
|
-
messages.push({ role: 'user', content:
|
|
19
|
+
messages.push({ role: 'system', content: customSystemPrompt });
|
|
20
|
+
messages.push({ role: 'user', content: getSystemPrompt() });
|
|
50
21
|
}
|
|
51
22
|
else {
|
|
52
23
|
// No custom system prompt: default as system
|
|
53
|
-
messages.push({ role: 'system', content:
|
|
24
|
+
messages.push({ role: 'system', content: getSystemPrompt() });
|
|
54
25
|
}
|
|
55
|
-
// Add all conversation history (exclude system messages)
|
|
26
|
+
// Add all conversation history (exclude system and tool messages)
|
|
56
27
|
for (const msg of conversationMessages) {
|
|
57
28
|
if (msg.role !== 'system' && msg.role !== 'tool') {
|
|
58
29
|
messages.push({
|
|
@@ -66,42 +37,31 @@ async function compressWithChatCompletions(baseUrl, apiKey, modelName, conversat
|
|
|
66
37
|
role: 'user',
|
|
67
38
|
content: COMPRESSION_PROMPT,
|
|
68
39
|
});
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
77
|
-
method: 'POST',
|
|
78
|
-
headers: {
|
|
79
|
-
'Content-Type': 'application/json',
|
|
80
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
81
|
-
...customHeaders
|
|
82
|
-
},
|
|
83
|
-
body: JSON.stringify(requestPayload)
|
|
84
|
-
});
|
|
85
|
-
if (!response.ok) {
|
|
86
|
-
const errorText = await response.text();
|
|
87
|
-
throw new Error(`OpenAI API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
88
|
-
}
|
|
89
|
-
if (!response.body) {
|
|
90
|
-
throw new Error('No response body from OpenAI API');
|
|
91
|
-
}
|
|
40
|
+
return messages;
|
|
41
|
+
}
|
|
42
|
+
/**
|
|
43
|
+
* Compress context using OpenAI Chat Completions API (reuses chat.ts)
|
|
44
|
+
*/
|
|
45
|
+
async function compressWithChatCompletions(modelName, conversationMessages, customSystemPrompt) {
|
|
46
|
+
const messages = prepareMessagesForCompression(conversationMessages, customSystemPrompt);
|
|
92
47
|
let summary = '';
|
|
93
48
|
let usage = {
|
|
94
49
|
prompt_tokens: 0,
|
|
95
50
|
completion_tokens: 0,
|
|
96
51
|
total_tokens: 0,
|
|
97
52
|
};
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
53
|
+
// Use the existing streaming API from chat.ts (includes proxy support)
|
|
54
|
+
for await (const chunk of createStreamingChatCompletion({
|
|
55
|
+
model: modelName,
|
|
56
|
+
messages,
|
|
57
|
+
stream: true,
|
|
58
|
+
})) {
|
|
59
|
+
// Collect content
|
|
60
|
+
if (chunk.type === 'content' && chunk.content) {
|
|
61
|
+
summary += chunk.content;
|
|
102
62
|
}
|
|
103
|
-
// Collect usage info
|
|
104
|
-
if (chunk.usage) {
|
|
63
|
+
// Collect usage info
|
|
64
|
+
if (chunk.type === 'usage' && chunk.usage) {
|
|
105
65
|
usage = {
|
|
106
66
|
prompt_tokens: chunk.usage.prompt_tokens || 0,
|
|
107
67
|
completion_tokens: chunk.usage.completion_tokens || 0,
|
|
@@ -112,334 +72,111 @@ async function compressWithChatCompletions(baseUrl, apiKey, modelName, conversat
|
|
|
112
72
|
if (!summary) {
|
|
113
73
|
throw new Error('Failed to generate summary from compact model');
|
|
114
74
|
}
|
|
115
|
-
return {
|
|
116
|
-
summary,
|
|
117
|
-
usage,
|
|
118
|
-
};
|
|
75
|
+
return { summary, usage };
|
|
119
76
|
}
|
|
120
77
|
/**
|
|
121
|
-
* Compress context using OpenAI Responses API
|
|
78
|
+
* Compress context using OpenAI Responses API (reuses responses.ts)
|
|
122
79
|
*/
|
|
123
|
-
async function compressWithResponses(
|
|
124
|
-
const
|
|
125
|
-
// Build instructions
|
|
126
|
-
const instructions = systemPrompt || SYSTEM_PROMPT;
|
|
127
|
-
// Build input array with conversation history
|
|
128
|
-
const input = [];
|
|
129
|
-
// If custom system prompt exists, add default as first user message
|
|
130
|
-
if (systemPrompt) {
|
|
131
|
-
input.push({
|
|
132
|
-
type: 'message',
|
|
133
|
-
role: 'user',
|
|
134
|
-
content: [{ type: 'input_text', text: SYSTEM_PROMPT }],
|
|
135
|
-
});
|
|
136
|
-
}
|
|
137
|
-
// Add all conversation history (exclude system messages)
|
|
138
|
-
for (const msg of conversationMessages) {
|
|
139
|
-
if (msg.role !== 'system' && msg.role !== 'tool') {
|
|
140
|
-
input.push({
|
|
141
|
-
type: 'message',
|
|
142
|
-
role: msg.role,
|
|
143
|
-
content: [{
|
|
144
|
-
type: msg.role === 'user' ? 'input_text' : 'output_text',
|
|
145
|
-
text: msg.content,
|
|
146
|
-
}],
|
|
147
|
-
});
|
|
148
|
-
}
|
|
149
|
-
}
|
|
150
|
-
// Add compression request as final user message
|
|
151
|
-
input.push({
|
|
152
|
-
type: 'message',
|
|
153
|
-
role: 'user',
|
|
154
|
-
content: [{
|
|
155
|
-
type: 'input_text',
|
|
156
|
-
text: COMPRESSION_PROMPT,
|
|
157
|
-
}],
|
|
158
|
-
});
|
|
159
|
-
// Build request payload (no tools for compression)
|
|
160
|
-
const requestPayload = {
|
|
161
|
-
model: modelName,
|
|
162
|
-
instructions,
|
|
163
|
-
input,
|
|
164
|
-
stream: true,
|
|
165
|
-
};
|
|
166
|
-
const response = await fetch(`${baseUrl}/responses`, {
|
|
167
|
-
method: 'POST',
|
|
168
|
-
headers: {
|
|
169
|
-
'Content-Type': 'application/json',
|
|
170
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
171
|
-
...customHeaders
|
|
172
|
-
},
|
|
173
|
-
body: JSON.stringify(requestPayload)
|
|
174
|
-
});
|
|
175
|
-
if (!response.ok) {
|
|
176
|
-
const errorText = await response.text();
|
|
177
|
-
throw new Error(`OpenAI Responses API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
178
|
-
}
|
|
179
|
-
if (!response.body) {
|
|
180
|
-
throw new Error('No response body from OpenAI Responses API');
|
|
181
|
-
}
|
|
80
|
+
async function compressWithResponses(modelName, conversationMessages, customSystemPrompt) {
|
|
81
|
+
const messages = prepareMessagesForCompression(conversationMessages, customSystemPrompt);
|
|
182
82
|
let summary = '';
|
|
183
83
|
let usage = {
|
|
184
84
|
prompt_tokens: 0,
|
|
185
85
|
completion_tokens: 0,
|
|
186
86
|
total_tokens: 0,
|
|
187
87
|
};
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
88
|
+
// Use the existing streaming API from responses.ts (includes proxy support)
|
|
89
|
+
for await (const chunk of createStreamingResponse({
|
|
90
|
+
model: modelName,
|
|
91
|
+
messages,
|
|
92
|
+
stream: true,
|
|
93
|
+
})) {
|
|
94
|
+
// Collect content
|
|
95
|
+
if (chunk.type === 'content' && chunk.content) {
|
|
96
|
+
summary += chunk.content;
|
|
196
97
|
}
|
|
197
|
-
//
|
|
198
|
-
if (
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
usage
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
total_tokens: (response.usage.input_tokens || 0) + (response.usage.output_tokens || 0),
|
|
205
|
-
};
|
|
206
|
-
}
|
|
98
|
+
// Collect usage info
|
|
99
|
+
if (chunk.type === 'usage' && chunk.usage) {
|
|
100
|
+
usage = {
|
|
101
|
+
prompt_tokens: chunk.usage.prompt_tokens || 0,
|
|
102
|
+
completion_tokens: chunk.usage.completion_tokens || 0,
|
|
103
|
+
total_tokens: chunk.usage.total_tokens || 0,
|
|
104
|
+
};
|
|
207
105
|
}
|
|
208
106
|
}
|
|
209
107
|
if (!summary) {
|
|
210
108
|
throw new Error('Failed to generate summary from compact model (Responses API)');
|
|
211
109
|
}
|
|
212
|
-
return {
|
|
213
|
-
summary,
|
|
214
|
-
usage,
|
|
215
|
-
};
|
|
110
|
+
return { summary, usage };
|
|
216
111
|
}
|
|
217
112
|
/**
|
|
218
|
-
* Compress context using Gemini API
|
|
113
|
+
* Compress context using Gemini API (reuses gemini.ts)
|
|
219
114
|
*/
|
|
220
|
-
async function compressWithGemini(
|
|
221
|
-
const
|
|
222
|
-
// Build system instruction
|
|
223
|
-
const systemInstruction = systemPrompt || SYSTEM_PROMPT;
|
|
224
|
-
// Build contents array with conversation history
|
|
225
|
-
const contents = [];
|
|
226
|
-
// If custom system prompt exists, add default as first user message
|
|
227
|
-
if (systemPrompt) {
|
|
228
|
-
contents.push({
|
|
229
|
-
role: 'user',
|
|
230
|
-
parts: [{ text: SYSTEM_PROMPT }],
|
|
231
|
-
});
|
|
232
|
-
}
|
|
233
|
-
// Add all conversation history (exclude system messages)
|
|
234
|
-
for (const msg of conversationMessages) {
|
|
235
|
-
if (msg.role !== 'system' && msg.role !== 'tool') {
|
|
236
|
-
contents.push({
|
|
237
|
-
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
238
|
-
parts: [{ text: msg.content }],
|
|
239
|
-
});
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
// Add compression request as final user message
|
|
243
|
-
contents.push({
|
|
244
|
-
role: 'user',
|
|
245
|
-
parts: [{
|
|
246
|
-
text: COMPRESSION_PROMPT,
|
|
247
|
-
}],
|
|
248
|
-
});
|
|
249
|
-
const requestBody = {
|
|
250
|
-
contents,
|
|
251
|
-
systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined,
|
|
252
|
-
};
|
|
253
|
-
// Extract model name
|
|
254
|
-
const effectiveBaseUrl = baseUrl && baseUrl !== 'https://api.openai.com/v1'
|
|
255
|
-
? baseUrl
|
|
256
|
-
: 'https://generativelanguage.googleapis.com/v1beta';
|
|
257
|
-
const model = modelName.startsWith('models/') ? modelName : `models/${modelName}`;
|
|
258
|
-
const url = `${effectiveBaseUrl}/${model}:streamGenerateContent?key=${apiKey}&alt=sse`;
|
|
259
|
-
const response = await fetch(url, {
|
|
260
|
-
method: 'POST',
|
|
261
|
-
headers: {
|
|
262
|
-
'Content-Type': 'application/json',
|
|
263
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
264
|
-
...customHeaders
|
|
265
|
-
},
|
|
266
|
-
body: JSON.stringify(requestBody)
|
|
267
|
-
});
|
|
268
|
-
if (!response.ok) {
|
|
269
|
-
const errorText = await response.text();
|
|
270
|
-
throw new Error(`Gemini API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
271
|
-
}
|
|
272
|
-
if (!response.body) {
|
|
273
|
-
throw new Error('No response body from Gemini API');
|
|
274
|
-
}
|
|
115
|
+
async function compressWithGemini(modelName, conversationMessages, customSystemPrompt) {
|
|
116
|
+
const messages = prepareMessagesForCompression(conversationMessages, customSystemPrompt);
|
|
275
117
|
let summary = '';
|
|
276
118
|
let usage = {
|
|
277
119
|
prompt_tokens: 0,
|
|
278
120
|
completion_tokens: 0,
|
|
279
121
|
total_tokens: 0,
|
|
280
122
|
};
|
|
281
|
-
//
|
|
282
|
-
const
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
if (
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
const data = trimmed.slice(6);
|
|
298
|
-
try {
|
|
299
|
-
const chunk = JSON.parse(data);
|
|
300
|
-
// Process candidates
|
|
301
|
-
if (chunk.candidates && chunk.candidates.length > 0) {
|
|
302
|
-
const candidate = chunk.candidates[0];
|
|
303
|
-
if (candidate.content && candidate.content.parts) {
|
|
304
|
-
for (const part of candidate.content.parts) {
|
|
305
|
-
if (part.text) {
|
|
306
|
-
summary += part.text;
|
|
307
|
-
}
|
|
308
|
-
}
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
// Collect usage info
|
|
312
|
-
if (chunk.usageMetadata) {
|
|
313
|
-
usage = {
|
|
314
|
-
prompt_tokens: chunk.usageMetadata.promptTokenCount || 0,
|
|
315
|
-
completion_tokens: chunk.usageMetadata.candidatesTokenCount || 0,
|
|
316
|
-
total_tokens: chunk.usageMetadata.totalTokenCount || 0,
|
|
317
|
-
};
|
|
318
|
-
}
|
|
319
|
-
}
|
|
320
|
-
catch (e) {
|
|
321
|
-
console.error('Failed to parse Gemini SSE data:', data);
|
|
322
|
-
}
|
|
323
|
-
}
|
|
123
|
+
// Use the existing streaming API from gemini.ts (includes proxy support)
|
|
124
|
+
for await (const chunk of createStreamingGeminiCompletion({
|
|
125
|
+
model: modelName,
|
|
126
|
+
messages,
|
|
127
|
+
})) {
|
|
128
|
+
// Collect content
|
|
129
|
+
if (chunk.type === 'content' && chunk.content) {
|
|
130
|
+
summary += chunk.content;
|
|
131
|
+
}
|
|
132
|
+
// Collect usage info
|
|
133
|
+
if (chunk.type === 'usage' && chunk.usage) {
|
|
134
|
+
usage = {
|
|
135
|
+
prompt_tokens: chunk.usage.prompt_tokens || 0,
|
|
136
|
+
completion_tokens: chunk.usage.completion_tokens || 0,
|
|
137
|
+
total_tokens: chunk.usage.total_tokens || 0,
|
|
138
|
+
};
|
|
324
139
|
}
|
|
325
140
|
}
|
|
326
141
|
if (!summary) {
|
|
327
142
|
throw new Error('Failed to generate summary from Gemini model');
|
|
328
143
|
}
|
|
329
|
-
return {
|
|
330
|
-
summary,
|
|
331
|
-
usage,
|
|
332
|
-
};
|
|
144
|
+
return { summary, usage };
|
|
333
145
|
}
|
|
334
146
|
/**
|
|
335
|
-
* Compress context using Anthropic API
|
|
147
|
+
* Compress context using Anthropic API (reuses anthropic.ts)
|
|
336
148
|
*/
|
|
337
|
-
async function compressWithAnthropic(
|
|
338
|
-
const
|
|
339
|
-
// Build messages array with conversation history
|
|
340
|
-
const messages = [];
|
|
341
|
-
// If custom system prompt exists, add default as first user message
|
|
342
|
-
if (systemPrompt) {
|
|
343
|
-
messages.push({ role: 'user', content: SYSTEM_PROMPT });
|
|
344
|
-
}
|
|
345
|
-
// Add all conversation history (exclude system messages)
|
|
346
|
-
for (const msg of conversationMessages) {
|
|
347
|
-
if (msg.role !== 'system' && msg.role !== 'tool') {
|
|
348
|
-
messages.push({
|
|
349
|
-
role: msg.role,
|
|
350
|
-
content: msg.content,
|
|
351
|
-
});
|
|
352
|
-
}
|
|
353
|
-
}
|
|
354
|
-
// Add compression request as final user message
|
|
355
|
-
messages.push({
|
|
356
|
-
role: 'user',
|
|
357
|
-
content: COMPRESSION_PROMPT,
|
|
358
|
-
});
|
|
359
|
-
// Anthropic uses system parameter separately
|
|
360
|
-
const systemParam = systemPrompt || SYSTEM_PROMPT;
|
|
361
|
-
// Build request payload (no tools for compression)
|
|
362
|
-
const requestPayload = {
|
|
363
|
-
model: modelName,
|
|
364
|
-
max_tokens: 4096,
|
|
365
|
-
system: systemParam,
|
|
366
|
-
messages,
|
|
367
|
-
stream: true
|
|
368
|
-
};
|
|
369
|
-
const effectiveBaseUrl = baseUrl && baseUrl !== 'https://api.openai.com/v1'
|
|
370
|
-
? baseUrl
|
|
371
|
-
: 'https://api.anthropic.com/v1';
|
|
372
|
-
const response = await fetch(`${effectiveBaseUrl}/messages`, {
|
|
373
|
-
method: 'POST',
|
|
374
|
-
headers: {
|
|
375
|
-
'Content-Type': 'application/json',
|
|
376
|
-
'x-api-key': apiKey,
|
|
377
|
-
'authorization': `Bearer ${apiKey}`,
|
|
378
|
-
...customHeaders
|
|
379
|
-
},
|
|
380
|
-
body: JSON.stringify(requestPayload)
|
|
381
|
-
});
|
|
382
|
-
if (!response.ok) {
|
|
383
|
-
const errorText = await response.text();
|
|
384
|
-
throw new Error(`Anthropic API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
385
|
-
}
|
|
386
|
-
if (!response.body) {
|
|
387
|
-
throw new Error('No response body from Anthropic API');
|
|
388
|
-
}
|
|
149
|
+
async function compressWithAnthropic(modelName, conversationMessages, customSystemPrompt) {
|
|
150
|
+
const messages = prepareMessagesForCompression(conversationMessages, customSystemPrompt);
|
|
389
151
|
let summary = '';
|
|
390
152
|
let usage = {
|
|
391
153
|
prompt_tokens: 0,
|
|
392
154
|
completion_tokens: 0,
|
|
393
155
|
total_tokens: 0,
|
|
394
156
|
};
|
|
395
|
-
//
|
|
396
|
-
const
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
}
|
|
413
|
-
if (trimmed.startsWith('data: ')) {
|
|
414
|
-
const data = trimmed.slice(6);
|
|
415
|
-
try {
|
|
416
|
-
const event = JSON.parse(data);
|
|
417
|
-
if (event.type === 'content_block_delta' && event.delta?.type === 'text_delta') {
|
|
418
|
-
summary += event.delta.text;
|
|
419
|
-
}
|
|
420
|
-
// Collect usage info from message_start event
|
|
421
|
-
if (event.type === 'message_start' && event.message?.usage) {
|
|
422
|
-
usage.prompt_tokens = event.message.usage.input_tokens || 0;
|
|
423
|
-
}
|
|
424
|
-
// Collect usage info from message_delta event
|
|
425
|
-
if (event.type === 'message_delta' && event.usage) {
|
|
426
|
-
usage.completion_tokens = event.usage.output_tokens || 0;
|
|
427
|
-
usage.total_tokens = usage.prompt_tokens + usage.completion_tokens;
|
|
428
|
-
}
|
|
429
|
-
}
|
|
430
|
-
catch (e) {
|
|
431
|
-
console.error('Failed to parse Anthropic SSE data:', data);
|
|
432
|
-
}
|
|
433
|
-
}
|
|
157
|
+
// Use the existing streaming API from anthropic.ts (includes proxy support)
|
|
158
|
+
for await (const chunk of createStreamingAnthropicCompletion({
|
|
159
|
+
model: modelName,
|
|
160
|
+
messages,
|
|
161
|
+
max_tokens: 4096,
|
|
162
|
+
})) {
|
|
163
|
+
// Collect content
|
|
164
|
+
if (chunk.type === 'content' && chunk.content) {
|
|
165
|
+
summary += chunk.content;
|
|
166
|
+
}
|
|
167
|
+
// Collect usage info
|
|
168
|
+
if (chunk.type === 'usage' && chunk.usage) {
|
|
169
|
+
usage = {
|
|
170
|
+
prompt_tokens: chunk.usage.prompt_tokens || 0,
|
|
171
|
+
completion_tokens: chunk.usage.completion_tokens || 0,
|
|
172
|
+
total_tokens: chunk.usage.total_tokens || 0,
|
|
173
|
+
};
|
|
434
174
|
}
|
|
435
175
|
}
|
|
436
176
|
if (!summary) {
|
|
437
177
|
throw new Error('Failed to generate summary from Anthropic model');
|
|
438
178
|
}
|
|
439
|
-
return {
|
|
440
|
-
summary,
|
|
441
|
-
usage,
|
|
442
|
-
};
|
|
179
|
+
return { summary, usage };
|
|
443
180
|
}
|
|
444
181
|
/**
|
|
445
182
|
* Compress conversation history using the compact model
|
|
@@ -452,30 +189,25 @@ export async function compressContext(messages) {
|
|
|
452
189
|
if (!config.compactModel || !config.compactModel.modelName) {
|
|
453
190
|
throw new Error('Compact model not configured. Please configure it in API & Model Settings.');
|
|
454
191
|
}
|
|
455
|
-
// Use shared API credentials
|
|
456
|
-
const baseUrl = config.baseUrl;
|
|
457
|
-
const apiKey = config.apiKey;
|
|
458
192
|
const modelName = config.compactModel.modelName;
|
|
459
193
|
const requestMethod = config.requestMethod;
|
|
460
|
-
if (!baseUrl || !apiKey) {
|
|
461
|
-
throw new Error('API configuration incomplete. Please configure Base URL and API Key.');
|
|
462
|
-
}
|
|
463
194
|
// Get custom system prompt if configured
|
|
464
195
|
const customSystemPrompt = getCustomSystemPrompt();
|
|
465
196
|
try {
|
|
466
197
|
// Choose compression method based on request method
|
|
198
|
+
// All methods now reuse existing API modules which include proxy support
|
|
467
199
|
switch (requestMethod) {
|
|
468
200
|
case 'gemini':
|
|
469
|
-
return await compressWithGemini(
|
|
201
|
+
return await compressWithGemini(modelName, messages, customSystemPrompt || null);
|
|
470
202
|
case 'anthropic':
|
|
471
|
-
return await compressWithAnthropic(
|
|
203
|
+
return await compressWithAnthropic(modelName, messages, customSystemPrompt || null);
|
|
472
204
|
case 'responses':
|
|
473
205
|
// OpenAI Responses API
|
|
474
|
-
return await compressWithResponses(
|
|
206
|
+
return await compressWithResponses(modelName, messages, customSystemPrompt || null);
|
|
475
207
|
case 'chat':
|
|
476
208
|
default:
|
|
477
209
|
// OpenAI Chat Completions API
|
|
478
|
-
return await compressWithChatCompletions(
|
|
210
|
+
return await compressWithChatCompletions(modelName, messages, customSystemPrompt || null);
|
|
479
211
|
}
|
|
480
212
|
}
|
|
481
213
|
catch (error) {
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cross-platform file save dialog
|
|
3
|
+
* Opens a native file save dialog and returns the selected path
|
|
4
|
+
*/
|
|
5
|
+
export declare function showSaveDialog(defaultFilename?: string, title?: string): Promise<string | null>;
|
|
6
|
+
/**
|
|
7
|
+
* Check if native file dialogs are available on this platform
|
|
8
|
+
*/
|
|
9
|
+
export declare function isFileDialogSupported(): boolean;
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import { exec } from 'child_process';
|
|
2
|
+
import { promisify } from 'util';
|
|
3
|
+
import * as path from 'path';
|
|
4
|
+
import * as os from 'os';
|
|
5
|
+
const execAsync = promisify(exec);
|
|
6
|
+
/**
|
|
7
|
+
* Cross-platform file save dialog
|
|
8
|
+
* Opens a native file save dialog and returns the selected path
|
|
9
|
+
*/
|
|
10
|
+
export async function showSaveDialog(defaultFilename = 'export.txt', title = 'Save File') {
|
|
11
|
+
const platform = os.platform();
|
|
12
|
+
try {
|
|
13
|
+
if (platform === 'darwin') {
|
|
14
|
+
// macOS - use osascript (AppleScript)
|
|
15
|
+
const defaultPath = path.join(os.homedir(), 'Downloads', defaultFilename);
|
|
16
|
+
const script = `
|
|
17
|
+
set defaultPath to POSIX file "${defaultPath}"
|
|
18
|
+
set saveFile to choose file name with prompt "${title}" default location (POSIX file "${os.homedir()}/Downloads") default name "${defaultFilename}"
|
|
19
|
+
return POSIX path of saveFile
|
|
20
|
+
`;
|
|
21
|
+
const { stdout } = await execAsync(`osascript -e '${script.replace(/'/g, "'\\''")}'`);
|
|
22
|
+
return stdout.trim();
|
|
23
|
+
}
|
|
24
|
+
else if (platform === 'win32') {
|
|
25
|
+
// Windows - use PowerShell
|
|
26
|
+
const script = `
|
|
27
|
+
Add-Type -AssemblyName System.Windows.Forms
|
|
28
|
+
$dialog = New-Object System.Windows.Forms.SaveFileDialog
|
|
29
|
+
$dialog.Title = "${title}"
|
|
30
|
+
$dialog.Filter = "Text files (*.txt)|*.txt|Markdown files (*.md)|*.md|All files (*.*)|*.*"
|
|
31
|
+
$dialog.FileName = "${defaultFilename}"
|
|
32
|
+
$dialog.InitialDirectory = "${path.join(os.homedir(), 'Downloads').replace(/\\/g, '\\\\')}"
|
|
33
|
+
$result = $dialog.ShowDialog()
|
|
34
|
+
if ($result -eq 'OK') {
|
|
35
|
+
Write-Output $dialog.FileName
|
|
36
|
+
}
|
|
37
|
+
`;
|
|
38
|
+
const { stdout } = await execAsync(`powershell -NoProfile -Command "${script.replace(/"/g, '\\"')}"`);
|
|
39
|
+
const result = stdout.trim();
|
|
40
|
+
return result || null;
|
|
41
|
+
}
|
|
42
|
+
else {
|
|
43
|
+
// Linux - use zenity (most common) or kdialog as fallback
|
|
44
|
+
try {
|
|
45
|
+
const defaultPath = path.join(os.homedir(), 'Downloads', defaultFilename);
|
|
46
|
+
const { stdout } = await execAsync(`zenity --file-selection --save --title="${title}" --filename="${defaultPath}" --confirm-overwrite`);
|
|
47
|
+
return stdout.trim();
|
|
48
|
+
}
|
|
49
|
+
catch (error) {
|
|
50
|
+
// Try kdialog as fallback for KDE systems
|
|
51
|
+
try {
|
|
52
|
+
const defaultPath = path.join(os.homedir(), 'Downloads', defaultFilename);
|
|
53
|
+
const { stdout } = await execAsync(`kdialog --getsavefilename "${defaultPath}" "*.*|All Files" --title "${title}"`);
|
|
54
|
+
return stdout.trim();
|
|
55
|
+
}
|
|
56
|
+
catch {
|
|
57
|
+
// If both fail, return null
|
|
58
|
+
return null;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
catch (error) {
|
|
64
|
+
// User cancelled or error occurred
|
|
65
|
+
return null;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Check if native file dialogs are available on this platform
|
|
70
|
+
*/
|
|
71
|
+
export function isFileDialogSupported() {
|
|
72
|
+
const platform = os.platform();
|
|
73
|
+
return platform === 'darwin' || platform === 'win32' || platform === 'linux';
|
|
74
|
+
}
|
|
@@ -75,6 +75,13 @@ declare class IncrementalSnapshotManager {
|
|
|
75
75
|
* @returns Number of files rolled back
|
|
76
76
|
*/
|
|
77
77
|
rollbackToMessageIndex(sessionId: string, targetMessageIndex: number): Promise<number>;
|
|
78
|
+
/**
|
|
79
|
+
* Delete all snapshots >= targetMessageIndex
|
|
80
|
+
* This is used when user rolls back conversation to clean up snapshot files
|
|
81
|
+
* @param sessionId Session ID
|
|
82
|
+
* @param targetMessageIndex The message index to delete from (inclusive)
|
|
83
|
+
*/
|
|
84
|
+
deleteSnapshotsFromIndex(sessionId: string, targetMessageIndex: number): Promise<number>;
|
|
78
85
|
/**
|
|
79
86
|
* Clear all snapshots for a session
|
|
80
87
|
*/
|