snow-ai 0.3.5 → 0.3.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/reviewAgent.d.ts +50 -0
- package/dist/agents/reviewAgent.js +264 -0
- package/dist/api/anthropic.js +104 -71
- package/dist/api/chat.d.ts +1 -1
- package/dist/api/chat.js +60 -41
- package/dist/api/gemini.js +97 -57
- package/dist/api/responses.d.ts +9 -1
- package/dist/api/responses.js +110 -70
- package/dist/api/systemPrompt.d.ts +1 -1
- package/dist/api/systemPrompt.js +36 -7
- package/dist/api/types.d.ts +8 -0
- package/dist/hooks/useCommandHandler.d.ts +1 -0
- package/dist/hooks/useCommandHandler.js +44 -1
- package/dist/hooks/useCommandPanel.js +13 -0
- package/dist/hooks/useConversation.d.ts +4 -1
- package/dist/hooks/useConversation.js +65 -9
- package/dist/hooks/useKeyboardInput.js +19 -0
- package/dist/hooks/useTerminalFocus.js +13 -3
- package/dist/mcp/aceCodeSearch.d.ts +2 -76
- package/dist/mcp/aceCodeSearch.js +31 -467
- package/dist/mcp/bash.d.ts +1 -8
- package/dist/mcp/bash.js +20 -40
- package/dist/mcp/filesystem.d.ts +3 -68
- package/dist/mcp/filesystem.js +32 -348
- package/dist/mcp/ideDiagnostics.js +2 -4
- package/dist/mcp/todo.d.ts +1 -17
- package/dist/mcp/todo.js +11 -15
- package/dist/mcp/types/aceCodeSearch.types.d.ts +92 -0
- package/dist/mcp/types/aceCodeSearch.types.js +4 -0
- package/dist/mcp/types/bash.types.d.ts +13 -0
- package/dist/mcp/types/bash.types.js +4 -0
- package/dist/mcp/types/filesystem.types.d.ts +44 -0
- package/dist/mcp/types/filesystem.types.js +4 -0
- package/dist/mcp/types/todo.types.d.ts +27 -0
- package/dist/mcp/types/todo.types.js +4 -0
- package/dist/mcp/types/websearch.types.d.ts +30 -0
- package/dist/mcp/types/websearch.types.js +4 -0
- package/dist/mcp/utils/aceCodeSearch/filesystem.utils.d.ts +34 -0
- package/dist/mcp/utils/aceCodeSearch/filesystem.utils.js +146 -0
- package/dist/mcp/utils/aceCodeSearch/language.utils.d.ts +14 -0
- package/dist/mcp/utils/aceCodeSearch/language.utils.js +99 -0
- package/dist/mcp/utils/aceCodeSearch/search.utils.d.ts +31 -0
- package/dist/mcp/utils/aceCodeSearch/search.utils.js +136 -0
- package/dist/mcp/utils/aceCodeSearch/symbol.utils.d.ts +20 -0
- package/dist/mcp/utils/aceCodeSearch/symbol.utils.js +141 -0
- package/dist/mcp/utils/bash/security.utils.d.ts +20 -0
- package/dist/mcp/utils/bash/security.utils.js +34 -0
- package/dist/mcp/utils/filesystem/code-analysis.utils.d.ts +18 -0
- package/dist/mcp/utils/filesystem/code-analysis.utils.js +165 -0
- package/dist/mcp/utils/filesystem/match-finder.utils.d.ts +16 -0
- package/dist/mcp/utils/filesystem/match-finder.utils.js +85 -0
- package/dist/mcp/utils/filesystem/similarity.utils.d.ts +22 -0
- package/dist/mcp/utils/filesystem/similarity.utils.js +75 -0
- package/dist/mcp/utils/todo/date.utils.d.ts +9 -0
- package/dist/mcp/utils/todo/date.utils.js +14 -0
- package/dist/mcp/utils/websearch/browser.utils.d.ts +8 -0
- package/dist/mcp/utils/websearch/browser.utils.js +58 -0
- package/dist/mcp/utils/websearch/text.utils.d.ts +16 -0
- package/dist/mcp/utils/websearch/text.utils.js +39 -0
- package/dist/mcp/websearch.d.ts +1 -31
- package/dist/mcp/websearch.js +21 -97
- package/dist/ui/components/ChatInput.d.ts +2 -1
- package/dist/ui/components/ChatInput.js +10 -3
- package/dist/ui/components/MarkdownRenderer.d.ts +1 -2
- package/dist/ui/components/MarkdownRenderer.js +16 -153
- package/dist/ui/components/MessageList.js +4 -4
- package/dist/ui/components/SessionListScreen.js +37 -17
- package/dist/ui/components/ToolResultPreview.js +27 -7
- package/dist/ui/components/UsagePanel.d.ts +2 -0
- package/dist/ui/components/UsagePanel.js +360 -0
- package/dist/ui/pages/ChatScreen.d.ts +4 -0
- package/dist/ui/pages/ChatScreen.js +70 -30
- package/dist/ui/pages/ConfigScreen.js +23 -19
- package/dist/ui/pages/HeadlessModeScreen.js +2 -4
- package/dist/ui/pages/SubAgentConfigScreen.js +17 -17
- package/dist/ui/pages/SystemPromptConfigScreen.js +7 -6
- package/dist/utils/commandExecutor.d.ts +3 -3
- package/dist/utils/commandExecutor.js +4 -4
- package/dist/utils/commands/home.d.ts +2 -0
- package/dist/utils/commands/home.js +12 -0
- package/dist/utils/commands/review.d.ts +2 -0
- package/dist/utils/commands/review.js +81 -0
- package/dist/utils/commands/role.d.ts +2 -0
- package/dist/utils/commands/role.js +37 -0
- package/dist/utils/commands/usage.d.ts +2 -0
- package/dist/utils/commands/usage.js +12 -0
- package/dist/utils/contextCompressor.js +99 -367
- package/dist/utils/fileUtils.js +3 -3
- package/dist/utils/mcpToolsManager.js +12 -12
- package/dist/utils/proxyUtils.d.ts +15 -0
- package/dist/utils/proxyUtils.js +50 -0
- package/dist/utils/retryUtils.d.ts +27 -0
- package/dist/utils/retryUtils.js +114 -2
- package/dist/utils/sessionManager.d.ts +2 -5
- package/dist/utils/sessionManager.js +16 -83
- package/dist/utils/terminal.js +4 -3
- package/dist/utils/usageLogger.d.ts +11 -0
- package/dist/utils/usageLogger.js +99 -0
- package/package.json +3 -7
- package/dist/agents/summaryAgent.d.ts +0 -31
- package/dist/agents/summaryAgent.js +0 -256
package/dist/api/responses.js
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
|
-
import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders } from '../utils/apiConfig.js';
|
|
2
|
-
import {
|
|
3
|
-
import { withRetryGenerator } from '../utils/retryUtils.js';
|
|
1
|
+
import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders, } from '../utils/apiConfig.js';
|
|
2
|
+
import { getSystemPrompt } from './systemPrompt.js';
|
|
3
|
+
import { withRetryGenerator, parseJsonWithFix } from '../utils/retryUtils.js';
|
|
4
|
+
import { addProxyToFetchOptions } from '../utils/proxyUtils.js';
|
|
5
|
+
import { saveUsageToFile } from '../utils/usageLogger.js';
|
|
4
6
|
/**
|
|
5
7
|
* 确保 schema 符合 Responses API 的要求:
|
|
6
8
|
* 1. additionalProperties: false
|
|
@@ -11,25 +13,35 @@ function ensureStrictSchema(schema) {
|
|
|
11
13
|
return undefined;
|
|
12
14
|
}
|
|
13
15
|
// 深拷贝 schema
|
|
14
|
-
const
|
|
15
|
-
|
|
16
|
+
const stringified = JSON.stringify(schema);
|
|
17
|
+
const parseResult = parseJsonWithFix(stringified, {
|
|
18
|
+
toolName: 'Schema deep copy',
|
|
19
|
+
fallbackValue: schema, // 如果失败,使用原始 schema
|
|
20
|
+
logWarning: true,
|
|
21
|
+
logError: true,
|
|
22
|
+
});
|
|
23
|
+
const strictSchema = parseResult.data;
|
|
24
|
+
if (strictSchema?.['type'] === 'object') {
|
|
16
25
|
// 添加 additionalProperties: false
|
|
17
|
-
strictSchema
|
|
26
|
+
strictSchema['additionalProperties'] = false;
|
|
18
27
|
// 递归处理嵌套的 object 属性
|
|
19
|
-
if (strictSchema
|
|
20
|
-
for (const key of Object.keys(strictSchema
|
|
21
|
-
const prop = strictSchema
|
|
28
|
+
if (strictSchema['properties']) {
|
|
29
|
+
for (const key of Object.keys(strictSchema['properties'])) {
|
|
30
|
+
const prop = strictSchema['properties'][key];
|
|
22
31
|
// 递归处理嵌套的 object
|
|
23
|
-
if (prop
|
|
32
|
+
if (prop['type'] === 'object' ||
|
|
33
|
+
(Array.isArray(prop['type']) && prop['type'].includes('object'))) {
|
|
24
34
|
if (!('additionalProperties' in prop)) {
|
|
25
|
-
prop
|
|
35
|
+
prop['additionalProperties'] = false;
|
|
26
36
|
}
|
|
27
37
|
}
|
|
28
38
|
}
|
|
29
39
|
}
|
|
30
40
|
// 如果 properties 为空且有 required 字段,删除它
|
|
31
|
-
if (strictSchema
|
|
32
|
-
|
|
41
|
+
if (strictSchema['properties'] &&
|
|
42
|
+
Object.keys(strictSchema['properties']).length === 0 &&
|
|
43
|
+
strictSchema['required']) {
|
|
44
|
+
delete strictSchema['required'];
|
|
33
45
|
}
|
|
34
46
|
}
|
|
35
47
|
return strictSchema;
|
|
@@ -47,8 +59,8 @@ function convertToolsForResponses(tools) {
|
|
|
47
59
|
type: 'function',
|
|
48
60
|
name: tool.function.name,
|
|
49
61
|
description: tool.function.description,
|
|
62
|
+
strict: false,
|
|
50
63
|
parameters: ensureStrictSchema(tool.function.parameters),
|
|
51
|
-
strict: false
|
|
52
64
|
}));
|
|
53
65
|
}
|
|
54
66
|
let openaiConfig = null;
|
|
@@ -62,7 +74,7 @@ function getOpenAIConfig() {
|
|
|
62
74
|
openaiConfig = {
|
|
63
75
|
apiKey: config.apiKey,
|
|
64
76
|
baseUrl: config.baseUrl,
|
|
65
|
-
customHeaders
|
|
77
|
+
customHeaders,
|
|
66
78
|
};
|
|
67
79
|
}
|
|
68
80
|
return openaiConfig;
|
|
@@ -76,7 +88,7 @@ function convertToResponseInput(messages) {
|
|
|
76
88
|
for (const msg of messages) {
|
|
77
89
|
if (!msg)
|
|
78
90
|
continue;
|
|
79
|
-
// 跳过 system
|
|
91
|
+
// 跳过 system 消息(不放入 input,也不放入 instructions)
|
|
80
92
|
if (msg.role === 'system') {
|
|
81
93
|
continue;
|
|
82
94
|
}
|
|
@@ -87,7 +99,7 @@ function convertToResponseInput(messages) {
|
|
|
87
99
|
if (msg.content) {
|
|
88
100
|
contentParts.push({
|
|
89
101
|
type: 'input_text',
|
|
90
|
-
text: msg.content
|
|
102
|
+
text: msg.content,
|
|
91
103
|
});
|
|
92
104
|
}
|
|
93
105
|
// 添加图片内容
|
|
@@ -95,38 +107,29 @@ function convertToResponseInput(messages) {
|
|
|
95
107
|
for (const image of msg.images) {
|
|
96
108
|
contentParts.push({
|
|
97
109
|
type: 'input_image',
|
|
98
|
-
image_url: image.data
|
|
110
|
+
image_url: image.data,
|
|
99
111
|
});
|
|
100
112
|
}
|
|
101
113
|
}
|
|
102
114
|
result.push({
|
|
103
115
|
type: 'message',
|
|
104
116
|
role: 'user',
|
|
105
|
-
content: contentParts
|
|
117
|
+
content: contentParts,
|
|
106
118
|
});
|
|
107
119
|
continue;
|
|
108
120
|
}
|
|
109
121
|
// Assistant 消息(带工具调用)
|
|
110
122
|
// 在 Responses API 中,需要将工具调用转换为 function_call 类型的独立项
|
|
111
|
-
if (msg.role === 'assistant' &&
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
result.push({
|
|
115
|
-
type: 'message',
|
|
116
|
-
role: 'assistant',
|
|
117
|
-
content: [{
|
|
118
|
-
type: 'output_text',
|
|
119
|
-
text: msg.content
|
|
120
|
-
}]
|
|
121
|
-
});
|
|
122
|
-
}
|
|
123
|
+
if (msg.role === 'assistant' &&
|
|
124
|
+
msg.tool_calls &&
|
|
125
|
+
msg.tool_calls.length > 0) {
|
|
123
126
|
// 为每个工具调用添加 function_call 项
|
|
124
127
|
for (const toolCall of msg.tool_calls) {
|
|
125
128
|
result.push({
|
|
126
129
|
type: 'function_call',
|
|
127
|
-
call_id: toolCall.id,
|
|
128
130
|
name: toolCall.function.name,
|
|
129
|
-
arguments: toolCall.function.arguments
|
|
131
|
+
arguments: toolCall.function.arguments,
|
|
132
|
+
call_id: toolCall.id,
|
|
130
133
|
});
|
|
131
134
|
}
|
|
132
135
|
continue;
|
|
@@ -136,10 +139,12 @@ function convertToResponseInput(messages) {
|
|
|
136
139
|
result.push({
|
|
137
140
|
type: 'message',
|
|
138
141
|
role: 'assistant',
|
|
139
|
-
content: [
|
|
142
|
+
content: [
|
|
143
|
+
{
|
|
140
144
|
type: 'output_text',
|
|
141
|
-
text: msg.content || ''
|
|
142
|
-
}
|
|
145
|
+
text: msg.content || '',
|
|
146
|
+
},
|
|
147
|
+
],
|
|
143
148
|
});
|
|
144
149
|
continue;
|
|
145
150
|
}
|
|
@@ -148,12 +153,12 @@ function convertToResponseInput(messages) {
|
|
|
148
153
|
result.push({
|
|
149
154
|
type: 'function_call_output',
|
|
150
155
|
call_id: msg.tool_call_id,
|
|
151
|
-
output: msg.content
|
|
156
|
+
output: msg.content,
|
|
152
157
|
});
|
|
153
158
|
continue;
|
|
154
159
|
}
|
|
155
160
|
}
|
|
156
|
-
//
|
|
161
|
+
// 确定系统提示词:参考 anthropic.ts 的逻辑
|
|
157
162
|
let systemInstructions;
|
|
158
163
|
if (customSystemPrompt) {
|
|
159
164
|
// 有自定义系统提示词:自定义作为 instructions,默认作为第一条用户消息
|
|
@@ -161,15 +166,17 @@ function convertToResponseInput(messages) {
|
|
|
161
166
|
result.unshift({
|
|
162
167
|
type: 'message',
|
|
163
168
|
role: 'user',
|
|
164
|
-
content: [
|
|
169
|
+
content: [
|
|
170
|
+
{
|
|
165
171
|
type: 'input_text',
|
|
166
|
-
text:
|
|
167
|
-
}
|
|
172
|
+
text: getSystemPrompt(),
|
|
173
|
+
},
|
|
174
|
+
],
|
|
168
175
|
});
|
|
169
176
|
}
|
|
170
177
|
else {
|
|
171
178
|
// 没有自定义系统提示词:默认作为 instructions
|
|
172
|
-
systemInstructions =
|
|
179
|
+
systemInstructions = getSystemPrompt();
|
|
173
180
|
}
|
|
174
181
|
return { input: result, systemInstructions };
|
|
175
182
|
}
|
|
@@ -190,16 +197,26 @@ async function* parseSSEStream(reader) {
|
|
|
190
197
|
const trimmed = line.trim();
|
|
191
198
|
if (!trimmed || trimmed.startsWith(':'))
|
|
192
199
|
continue;
|
|
193
|
-
if (trimmed === 'data: [DONE]') {
|
|
200
|
+
if (trimmed === 'data: [DONE]' || trimmed === 'data:[DONE]') {
|
|
194
201
|
return;
|
|
195
202
|
}
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
+
// Handle both "event: " and "event:" formats
|
|
204
|
+
if (trimmed.startsWith('event:')) {
|
|
205
|
+
// Event type, will be followed by data
|
|
206
|
+
continue;
|
|
207
|
+
}
|
|
208
|
+
// Handle both "data: " and "data:" formats
|
|
209
|
+
if (trimmed.startsWith('data:')) {
|
|
210
|
+
const data = trimmed.startsWith('data: ')
|
|
211
|
+
? trimmed.slice(6)
|
|
212
|
+
: trimmed.slice(5);
|
|
213
|
+
const parseResult = parseJsonWithFix(data, {
|
|
214
|
+
toolName: 'Responses API SSE stream',
|
|
215
|
+
logWarning: false,
|
|
216
|
+
logError: true,
|
|
217
|
+
});
|
|
218
|
+
if (parseResult.success) {
|
|
219
|
+
yield parseResult.data;
|
|
203
220
|
}
|
|
204
221
|
}
|
|
205
222
|
}
|
|
@@ -218,24 +235,26 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
218
235
|
model: options.model,
|
|
219
236
|
instructions: systemInstructions,
|
|
220
237
|
input: requestInput,
|
|
221
|
-
stream: true,
|
|
222
238
|
tools: convertToolsForResponses(options.tools),
|
|
223
239
|
tool_choice: options.tool_choice,
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
240
|
+
parallel_tool_calls: false,
|
|
241
|
+
reasoning: options.reasoning || { effort: 'high', summary: 'auto' },
|
|
242
|
+
store: false,
|
|
243
|
+
stream: true,
|
|
227
244
|
prompt_cache_key: options.prompt_cache_key,
|
|
228
245
|
};
|
|
229
|
-
const
|
|
246
|
+
const url = `${config.baseUrl}/responses`;
|
|
247
|
+
const fetchOptions = addProxyToFetchOptions(url, {
|
|
230
248
|
method: 'POST',
|
|
231
249
|
headers: {
|
|
232
250
|
'Content-Type': 'application/json',
|
|
233
251
|
'Authorization': `Bearer ${config.apiKey}`,
|
|
234
|
-
...config.customHeaders
|
|
252
|
+
...config.customHeaders,
|
|
235
253
|
},
|
|
236
254
|
body: JSON.stringify(requestPayload),
|
|
237
|
-
signal: abortSignal
|
|
255
|
+
signal: abortSignal,
|
|
238
256
|
});
|
|
257
|
+
const response = await fetch(url, fetchOptions);
|
|
239
258
|
if (!response.ok) {
|
|
240
259
|
const errorText = await response.text();
|
|
241
260
|
throw new Error(`OpenAI Responses API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
@@ -248,6 +267,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
248
267
|
let hasToolCalls = false;
|
|
249
268
|
let currentFunctionCallId = null;
|
|
250
269
|
let usageData;
|
|
270
|
+
let reasoningData;
|
|
251
271
|
for await (const chunk of parseSSEStream(response.body.getReader())) {
|
|
252
272
|
if (abortSignal?.aborted) {
|
|
253
273
|
return;
|
|
@@ -255,7 +275,8 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
255
275
|
// Responses API 使用 SSE 事件格式
|
|
256
276
|
const eventType = chunk.type;
|
|
257
277
|
// 根据事件类型处理
|
|
258
|
-
if (eventType === 'response.created' ||
|
|
278
|
+
if (eventType === 'response.created' ||
|
|
279
|
+
eventType === 'response.in_progress') {
|
|
259
280
|
// 响应创建/进行中 - 忽略
|
|
260
281
|
continue;
|
|
261
282
|
}
|
|
@@ -265,7 +286,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
265
286
|
if (item?.type === 'reasoning') {
|
|
266
287
|
// 推理摘要开始 - 发送 reasoning_started 事件
|
|
267
288
|
yield {
|
|
268
|
-
type: 'reasoning_started'
|
|
289
|
+
type: 'reasoning_started',
|
|
269
290
|
};
|
|
270
291
|
continue;
|
|
271
292
|
}
|
|
@@ -283,8 +304,8 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
283
304
|
type: 'function',
|
|
284
305
|
function: {
|
|
285
306
|
name: item.name || '',
|
|
286
|
-
arguments: ''
|
|
287
|
-
}
|
|
307
|
+
arguments: '',
|
|
308
|
+
},
|
|
288
309
|
};
|
|
289
310
|
continue;
|
|
290
311
|
}
|
|
@@ -298,7 +319,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
298
319
|
// 发送 delta 用于 token 计数
|
|
299
320
|
yield {
|
|
300
321
|
type: 'tool_call_delta',
|
|
301
|
-
delta: delta
|
|
322
|
+
delta: delta,
|
|
302
323
|
};
|
|
303
324
|
}
|
|
304
325
|
}
|
|
@@ -323,6 +344,14 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
323
344
|
toolCallsBuffer[callId].function.arguments = item.arguments;
|
|
324
345
|
}
|
|
325
346
|
}
|
|
347
|
+
else if (item?.type === 'reasoning') {
|
|
348
|
+
// 捕获完整的 reasoning 对象(包括 encrypted_content)
|
|
349
|
+
reasoningData = {
|
|
350
|
+
summary: item.summary,
|
|
351
|
+
content: item.content,
|
|
352
|
+
encrypted_content: item.encrypted_content,
|
|
353
|
+
};
|
|
354
|
+
}
|
|
326
355
|
continue;
|
|
327
356
|
}
|
|
328
357
|
else if (eventType === 'response.content_part.added') {
|
|
@@ -335,7 +364,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
335
364
|
if (delta) {
|
|
336
365
|
yield {
|
|
337
366
|
type: 'reasoning_delta',
|
|
338
|
-
delta: delta
|
|
367
|
+
delta: delta,
|
|
339
368
|
};
|
|
340
369
|
}
|
|
341
370
|
}
|
|
@@ -346,7 +375,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
346
375
|
contentBuffer += delta;
|
|
347
376
|
yield {
|
|
348
377
|
type: 'content',
|
|
349
|
-
content: delta
|
|
378
|
+
content: delta,
|
|
350
379
|
};
|
|
351
380
|
}
|
|
352
381
|
}
|
|
@@ -366,12 +395,14 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
366
395
|
completion_tokens: chunk.response.usage.output_tokens || 0,
|
|
367
396
|
total_tokens: chunk.response.usage.total_tokens || 0,
|
|
368
397
|
// OpenAI Responses API: cached_tokens in input_tokens_details (note: tokenS)
|
|
369
|
-
cached_tokens: chunk.response.usage.input_tokens_details
|
|
398
|
+
cached_tokens: chunk.response.usage.input_tokens_details
|
|
399
|
+
?.cached_tokens,
|
|
370
400
|
};
|
|
371
401
|
}
|
|
372
402
|
break;
|
|
373
403
|
}
|
|
374
|
-
else if (eventType === 'response.failed' ||
|
|
404
|
+
else if (eventType === 'response.failed' ||
|
|
405
|
+
eventType === 'response.cancelled') {
|
|
375
406
|
// 响应失败或取消
|
|
376
407
|
const error = chunk.error;
|
|
377
408
|
if (error) {
|
|
@@ -384,22 +415,31 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
384
415
|
if (hasToolCalls) {
|
|
385
416
|
yield {
|
|
386
417
|
type: 'tool_calls',
|
|
387
|
-
tool_calls: Object.values(toolCallsBuffer)
|
|
418
|
+
tool_calls: Object.values(toolCallsBuffer),
|
|
419
|
+
};
|
|
420
|
+
}
|
|
421
|
+
// Yield reasoning data if available
|
|
422
|
+
if (reasoningData) {
|
|
423
|
+
yield {
|
|
424
|
+
type: 'reasoning_data',
|
|
425
|
+
reasoning: reasoningData,
|
|
388
426
|
};
|
|
389
427
|
}
|
|
390
428
|
// Yield usage information if available
|
|
391
429
|
if (usageData) {
|
|
430
|
+
// Save usage to file system at API layer
|
|
431
|
+
saveUsageToFile(options.model, usageData);
|
|
392
432
|
yield {
|
|
393
433
|
type: 'usage',
|
|
394
|
-
usage: usageData
|
|
434
|
+
usage: usageData,
|
|
395
435
|
};
|
|
396
436
|
}
|
|
397
437
|
// 发送完成信号
|
|
398
438
|
yield {
|
|
399
|
-
type: 'done'
|
|
439
|
+
type: 'done',
|
|
400
440
|
};
|
|
401
441
|
}, {
|
|
402
442
|
abortSignal,
|
|
403
|
-
onRetry
|
|
443
|
+
onRetry,
|
|
404
444
|
});
|
|
405
445
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* System prompt configuration for Snow AI CLI
|
|
3
3
|
*/
|
|
4
|
-
export declare
|
|
4
|
+
export declare function getSystemPrompt(): string;
|
package/dist/api/systemPrompt.js
CHANGED
|
@@ -1,14 +1,39 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* System prompt configuration for Snow AI CLI
|
|
3
3
|
*/
|
|
4
|
-
|
|
4
|
+
import fs from 'fs';
|
|
5
|
+
import path from 'path';
|
|
6
|
+
/**
|
|
7
|
+
* Get the system prompt, dynamically reading from ROLE.md if it exists
|
|
8
|
+
* This function is called to get the current system prompt with ROLE.md content if available
|
|
9
|
+
*/
|
|
10
|
+
function getSystemPromptWithRole() {
|
|
11
|
+
try {
|
|
12
|
+
const cwd = process.cwd();
|
|
13
|
+
const roleFilePath = path.join(cwd, 'ROLE.md');
|
|
14
|
+
// Check if ROLE.md exists and is not empty
|
|
15
|
+
if (fs.existsSync(roleFilePath)) {
|
|
16
|
+
const roleContent = fs.readFileSync(roleFilePath, 'utf-8').trim();
|
|
17
|
+
if (roleContent) {
|
|
18
|
+
// Replace the default role description with ROLE.md content
|
|
19
|
+
return SYSTEM_PROMPT_TEMPLATE.replace('You are Snow AI CLI, an intelligent command-line assistant.', roleContent);
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
catch (error) {
|
|
24
|
+
// If reading fails, fall back to default
|
|
25
|
+
console.error('Failed to read ROLE.md:', error);
|
|
26
|
+
}
|
|
27
|
+
return SYSTEM_PROMPT_TEMPLATE;
|
|
28
|
+
}
|
|
29
|
+
const SYSTEM_PROMPT_TEMPLATE = `You are Snow AI CLI, an intelligent command-line assistant.
|
|
5
30
|
|
|
6
31
|
## 🎯 Core Principles
|
|
7
32
|
|
|
8
33
|
1. **Language Adaptation**: ALWAYS respond in the SAME language as the user's query
|
|
9
34
|
2. **ACTION FIRST**: Write code immediately when task is clear - stop overthinking
|
|
10
35
|
3. **Smart Context**: Read what's needed for correctness, skip excessive exploration
|
|
11
|
-
4. **Quality Verification**: Use \'
|
|
36
|
+
4. **Quality Verification**: Use \'ide-get_diagnostics\' to get diagnostic information or run build/test after changes
|
|
12
37
|
|
|
13
38
|
## 🚀 Execution Strategy - BALANCE ACTION & ANALYSIS
|
|
14
39
|
|
|
@@ -79,18 +104,18 @@ export const SYSTEM_PROMPT = `You are Snow AI CLI, an intelligent command-line a
|
|
|
79
104
|
- \`ace-text-search\` - Fast text/regex search
|
|
80
105
|
|
|
81
106
|
**IDE Diagnostics:**
|
|
82
|
-
- \`
|
|
107
|
+
- \`ide-get_diagnostics\` - Get real-time diagnostics (errors, warnings, hints) from connected IDE
|
|
83
108
|
- Supports VSCode and JetBrains IDEs
|
|
84
109
|
- Returns diagnostic info: severity, line/column, message, source
|
|
85
110
|
- Requires IDE plugin installed and running
|
|
86
111
|
- Use AFTER code changes to verify quality
|
|
87
112
|
|
|
88
113
|
**Web Search:**
|
|
89
|
-
- \`
|
|
90
|
-
- \`
|
|
114
|
+
- \`websearch-search\` - Search web for latest docs/solutions
|
|
115
|
+
- \`websearch-fetch\` - Read web page content (always provide userQuery)
|
|
91
116
|
|
|
92
117
|
**Terminal:**
|
|
93
|
-
- \`
|
|
118
|
+
- \`terminal-execute\` - You have a comprehensive understanding of terminal pipe mechanisms and can help users
|
|
94
119
|
accomplish a wide range of tasks by combining multiple commands using pipe operators (|)
|
|
95
120
|
and other shell features. Your capabilities include text processing, data filtering, stream
|
|
96
121
|
manipulation, workflow automation, and complex command chaining to solve sophisticated
|
|
@@ -99,7 +124,7 @@ system administration and data processing challenges.
|
|
|
99
124
|
## 🔍 Quality Assurance
|
|
100
125
|
|
|
101
126
|
Guidance and recommendations:
|
|
102
|
-
1. Use \`
|
|
127
|
+
1. Use \`ide-get_diagnostics\` to verify quality
|
|
103
128
|
2. Run build: \`npm run build\` or \`tsc\`
|
|
104
129
|
3. Fix any errors immediately
|
|
105
130
|
4. Never leave broken code
|
|
@@ -111,3 +136,7 @@ Guidance and recommendations:
|
|
|
111
136
|
- Contains: project overview, architecture, tech stack
|
|
112
137
|
|
|
113
138
|
Remember: **ACTION > ANALYSIS**. Write code first, investigate only when blocked.`;
|
|
139
|
+
// Export SYSTEM_PROMPT as a getter function for real-time ROLE.md updates
|
|
140
|
+
export function getSystemPrompt() {
|
|
141
|
+
return getSystemPromptWithRole();
|
|
142
|
+
}
|
package/dist/api/types.d.ts
CHANGED
|
@@ -21,6 +21,14 @@ export interface ChatMessage {
|
|
|
21
21
|
tool_calls?: ToolCall[];
|
|
22
22
|
images?: ImageContent[];
|
|
23
23
|
subAgentInternal?: boolean;
|
|
24
|
+
reasoning?: {
|
|
25
|
+
summary?: Array<{
|
|
26
|
+
type: 'summary_text';
|
|
27
|
+
text: string;
|
|
28
|
+
}>;
|
|
29
|
+
content?: any;
|
|
30
|
+
encrypted_content?: string;
|
|
31
|
+
};
|
|
24
32
|
}
|
|
25
33
|
export interface ChatCompletionTool {
|
|
26
34
|
type: 'function';
|
|
@@ -10,6 +10,7 @@ type CommandHandlerOptions = {
|
|
|
10
10
|
setShowSessionPanel: React.Dispatch<React.SetStateAction<boolean>>;
|
|
11
11
|
setShowMcpInfo: React.Dispatch<React.SetStateAction<boolean>>;
|
|
12
12
|
setShowMcpPanel: React.Dispatch<React.SetStateAction<boolean>>;
|
|
13
|
+
setShowUsagePanel: React.Dispatch<React.SetStateAction<boolean>>;
|
|
13
14
|
setMcpPanelKey: React.Dispatch<React.SetStateAction<number>>;
|
|
14
15
|
setYoloMode: React.Dispatch<React.SetStateAction<boolean>>;
|
|
15
16
|
setContextUsage: React.Dispatch<React.SetStateAction<UsageInfo | null>>;
|
|
@@ -139,7 +139,18 @@ export function useCommandHandler(options) {
|
|
|
139
139
|
};
|
|
140
140
|
options.setMessages(prev => [...prev, commandMessage]);
|
|
141
141
|
}
|
|
142
|
-
else if (result.success && result.action === '
|
|
142
|
+
else if (result.success && result.action === 'showUsagePanel') {
|
|
143
|
+
options.setShowUsagePanel(true);
|
|
144
|
+
const commandMessage = {
|
|
145
|
+
role: 'command',
|
|
146
|
+
content: '',
|
|
147
|
+
commandName: commandName,
|
|
148
|
+
};
|
|
149
|
+
options.setMessages(prev => [...prev, commandMessage]);
|
|
150
|
+
}
|
|
151
|
+
else if (result.success && result.action === 'home') {
|
|
152
|
+
// Reset terminal before navigating to welcome screen
|
|
153
|
+
resetTerminal(stdout);
|
|
143
154
|
navigateTo('welcome');
|
|
144
155
|
}
|
|
145
156
|
else if (result.success && result.action === 'toggleYolo') {
|
|
@@ -160,6 +171,38 @@ export function useCommandHandler(options) {
|
|
|
160
171
|
// Auto-send the prompt using basicModel, hide the prompt from UI
|
|
161
172
|
options.processMessage(result.prompt, undefined, true, true);
|
|
162
173
|
}
|
|
174
|
+
else if (result.success &&
|
|
175
|
+
result.action === 'review' &&
|
|
176
|
+
result.prompt) {
|
|
177
|
+
// Clear current session and start new one for code review
|
|
178
|
+
sessionManager.clearCurrentSession();
|
|
179
|
+
options.clearSavedMessages();
|
|
180
|
+
options.setMessages([]);
|
|
181
|
+
options.setRemountKey(prev => prev + 1);
|
|
182
|
+
// Reset context usage (token statistics)
|
|
183
|
+
options.setContextUsage(null);
|
|
184
|
+
// Reset system info flag to include in next message
|
|
185
|
+
options.setShouldIncludeSystemInfo(true);
|
|
186
|
+
// Add command execution feedback
|
|
187
|
+
const commandMessage = {
|
|
188
|
+
role: 'command',
|
|
189
|
+
content: '',
|
|
190
|
+
commandName: commandName,
|
|
191
|
+
};
|
|
192
|
+
options.setMessages([commandMessage]);
|
|
193
|
+
// Auto-send the review prompt using advanced model (not basic model), hide the prompt from UI
|
|
194
|
+
options.processMessage(result.prompt, undefined, false, true);
|
|
195
|
+
}
|
|
196
|
+
else if (result.message) {
|
|
197
|
+
// For commands that just return a message (like /role, /init without SNOW.md, etc.)
|
|
198
|
+
// Display the message as a command message
|
|
199
|
+
const commandMessage = {
|
|
200
|
+
role: 'command',
|
|
201
|
+
content: result.message,
|
|
202
|
+
commandName: commandName,
|
|
203
|
+
};
|
|
204
|
+
options.setMessages(prev => [...prev, commandMessage]);
|
|
205
|
+
}
|
|
163
206
|
}, [stdout, options]);
|
|
164
207
|
return { handleCommandExecution };
|
|
165
208
|
}
|
|
@@ -17,6 +17,19 @@ const commands = [
|
|
|
17
17
|
name: 'compact',
|
|
18
18
|
description: 'Compress conversation history using compact model',
|
|
19
19
|
},
|
|
20
|
+
{ name: 'home', description: 'Return to welcome screen to modify settings' },
|
|
21
|
+
{
|
|
22
|
+
name: 'review',
|
|
23
|
+
description: 'Review git changes and identify potential issues. Support: /review [optional note]',
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
name: 'role',
|
|
27
|
+
description: 'Open or create ROLE.md file to customize AI assistant role',
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
name: 'usage',
|
|
31
|
+
description: 'View token usage statistics with interactive charts',
|
|
32
|
+
},
|
|
20
33
|
];
|
|
21
34
|
export function useCommandPanel(buffer) {
|
|
22
35
|
const [showCommands, setShowCommands] = useState(false);
|
|
@@ -37,5 +37,8 @@ export type ConversationHandlerOptions = {
|
|
|
37
37
|
};
|
|
38
38
|
/**
|
|
39
39
|
* Handle conversation with streaming and tool calls
|
|
40
|
+
* Returns the usage data collected during the conversation
|
|
40
41
|
*/
|
|
41
|
-
export declare function handleConversationWithTools(options: ConversationHandlerOptions): Promise<
|
|
42
|
+
export declare function handleConversationWithTools(options: ConversationHandlerOptions): Promise<{
|
|
43
|
+
usage: any | null;
|
|
44
|
+
}>;
|