snow-ai 0.3.12 → 0.3.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/compactAgent.js +6 -5
- package/dist/agents/reviewAgent.js +1 -0
- package/dist/agents/summaryAgent.js +1 -0
- package/dist/api/anthropic.d.ts +7 -1
- package/dist/api/anthropic.js +74 -7
- package/dist/api/chat.js +1 -1
- package/dist/api/responses.js +2 -2
- package/dist/api/systemPrompt.js +4 -5
- package/dist/api/types.d.ts +5 -0
- package/dist/hooks/useConversation.js +13 -3
- package/dist/ui/pages/ChatScreen.js +6 -2
- package/dist/ui/pages/ConfigScreen.js +245 -110
- package/dist/ui/pages/HeadlessModeScreen.js +3 -1
- package/dist/utils/apiConfig.d.ts +5 -0
- package/dist/utils/apiConfig.js +9 -3
- package/dist/utils/contextCompressor.js +7 -2
- package/dist/utils/subAgentExecutor.js +2 -1
- package/package.json +1 -1
|
@@ -102,6 +102,7 @@ export class CompactAgent {
|
|
|
102
102
|
messages,
|
|
103
103
|
max_tokens: 4096,
|
|
104
104
|
includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
|
|
105
|
+
disableThinking: true, // Agents 不使用 Extended Thinking
|
|
105
106
|
}, abortSignal);
|
|
106
107
|
break;
|
|
107
108
|
case 'gemini':
|
|
@@ -192,14 +193,14 @@ export class CompactAgent {
|
|
|
192
193
|
stack: streamError.stack,
|
|
193
194
|
name: streamError.name,
|
|
194
195
|
chunkCount,
|
|
195
|
-
contentLength: completeContent.length
|
|
196
|
+
contentLength: completeContent.length,
|
|
196
197
|
});
|
|
197
198
|
}
|
|
198
199
|
else {
|
|
199
200
|
logger.error('Compact agent: Unknown streaming error:', {
|
|
200
201
|
error: streamError,
|
|
201
202
|
chunkCount,
|
|
202
|
-
contentLength: completeContent.length
|
|
203
|
+
contentLength: completeContent.length,
|
|
203
204
|
});
|
|
204
205
|
}
|
|
205
206
|
throw streamError;
|
|
@@ -220,14 +221,14 @@ export class CompactAgent {
|
|
|
220
221
|
stack: error.stack,
|
|
221
222
|
name: error.name,
|
|
222
223
|
requestMethod: this.requestMethod,
|
|
223
|
-
modelName: this.modelName
|
|
224
|
+
modelName: this.modelName,
|
|
224
225
|
});
|
|
225
226
|
}
|
|
226
227
|
else {
|
|
227
228
|
logger.error('Compact agent: Unknown API error:', {
|
|
228
229
|
error,
|
|
229
230
|
requestMethod: this.requestMethod,
|
|
230
|
-
modelName: this.modelName
|
|
231
|
+
modelName: this.modelName,
|
|
231
232
|
});
|
|
232
233
|
}
|
|
233
234
|
throw error;
|
|
@@ -291,7 +292,7 @@ Provide the extracted content below:`;
|
|
|
291
292
|
logger.warn('Compact agent extraction failed, using original content:', {
|
|
292
293
|
error: error.message,
|
|
293
294
|
stack: error.stack,
|
|
294
|
-
name: error.name
|
|
295
|
+
name: error.name,
|
|
295
296
|
});
|
|
296
297
|
}
|
|
297
298
|
else {
|
package/dist/api/anthropic.d.ts
CHANGED
|
@@ -7,9 +7,10 @@ export interface AnthropicOptions {
|
|
|
7
7
|
tools?: ChatCompletionTool[];
|
|
8
8
|
sessionId?: string;
|
|
9
9
|
includeBuiltinSystemPrompt?: boolean;
|
|
10
|
+
disableThinking?: boolean;
|
|
10
11
|
}
|
|
11
12
|
export interface AnthropicStreamChunk {
|
|
12
|
-
type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
|
|
13
|
+
type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage' | 'reasoning_started' | 'reasoning_delta';
|
|
13
14
|
content?: string;
|
|
14
15
|
tool_calls?: Array<{
|
|
15
16
|
id: string;
|
|
@@ -21,6 +22,11 @@ export interface AnthropicStreamChunk {
|
|
|
21
22
|
}>;
|
|
22
23
|
delta?: string;
|
|
23
24
|
usage?: UsageInfo;
|
|
25
|
+
thinking?: {
|
|
26
|
+
type: 'thinking';
|
|
27
|
+
thinking: string;
|
|
28
|
+
signature?: string;
|
|
29
|
+
};
|
|
24
30
|
}
|
|
25
31
|
export interface AnthropicTool {
|
|
26
32
|
name: string;
|
package/dist/api/anthropic.js
CHANGED
|
@@ -20,6 +20,7 @@ function getAnthropicConfig() {
|
|
|
20
20
|
: 'https://api.anthropic.com/v1',
|
|
21
21
|
customHeaders,
|
|
22
22
|
anthropicBeta: config.anthropicBeta,
|
|
23
|
+
thinking: config.thinking,
|
|
23
24
|
};
|
|
24
25
|
}
|
|
25
26
|
return anthropicConfig;
|
|
@@ -124,6 +125,11 @@ function convertToAnthropicMessages(messages, includeBuiltinSystemPrompt = true)
|
|
|
124
125
|
msg.tool_calls &&
|
|
125
126
|
msg.tool_calls.length > 0) {
|
|
126
127
|
const content = [];
|
|
128
|
+
// When thinking is enabled, thinking block must come first
|
|
129
|
+
if (msg.thinking) {
|
|
130
|
+
// Use the complete thinking block object (includes signature)
|
|
131
|
+
content.push(msg.thinking);
|
|
132
|
+
}
|
|
127
133
|
if (msg.content) {
|
|
128
134
|
content.push({
|
|
129
135
|
type: 'text',
|
|
@@ -145,10 +151,29 @@ function convertToAnthropicMessages(messages, includeBuiltinSystemPrompt = true)
|
|
|
145
151
|
continue;
|
|
146
152
|
}
|
|
147
153
|
if (msg.role === 'user' || msg.role === 'assistant') {
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
content
|
|
151
|
-
|
|
154
|
+
// For assistant messages with thinking, convert to structured format
|
|
155
|
+
if (msg.role === 'assistant' && msg.thinking) {
|
|
156
|
+
const content = [];
|
|
157
|
+
// Thinking block must come first - use complete block object (includes signature)
|
|
158
|
+
content.push(msg.thinking);
|
|
159
|
+
// Then text content
|
|
160
|
+
if (msg.content) {
|
|
161
|
+
content.push({
|
|
162
|
+
type: 'text',
|
|
163
|
+
text: msg.content,
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
anthropicMessages.push({
|
|
167
|
+
role: 'assistant',
|
|
168
|
+
content,
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
else {
|
|
172
|
+
anthropicMessages.push({
|
|
173
|
+
role: msg.role,
|
|
174
|
+
content: msg.content,
|
|
175
|
+
});
|
|
176
|
+
}
|
|
152
177
|
}
|
|
153
178
|
}
|
|
154
179
|
// 如果配置了自定义系统提示词(最高优先级,始终添加)
|
|
@@ -266,7 +291,6 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
266
291
|
const requestBody = {
|
|
267
292
|
model: options.model,
|
|
268
293
|
max_tokens: options.max_tokens || 4096,
|
|
269
|
-
temperature: options.temperature ?? 0.7,
|
|
270
294
|
system,
|
|
271
295
|
messages,
|
|
272
296
|
tools: convertToolsToAnthropic(options.tools),
|
|
@@ -275,11 +299,18 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
275
299
|
},
|
|
276
300
|
stream: true,
|
|
277
301
|
};
|
|
302
|
+
// Add thinking configuration if enabled and not explicitly disabled
|
|
303
|
+
// When thinking is enabled, temperature must be 1
|
|
304
|
+
// Note: agents and other internal tools should set disableThinking=true
|
|
305
|
+
if (config.thinking && !options.disableThinking) {
|
|
306
|
+
requestBody.thinking = config.thinking;
|
|
307
|
+
requestBody.temperature = 1;
|
|
308
|
+
}
|
|
278
309
|
// Prepare headers
|
|
279
310
|
const headers = {
|
|
280
311
|
'Content-Type': 'application/json',
|
|
281
312
|
'x-api-key': config.apiKey,
|
|
282
|
-
|
|
313
|
+
Authorization: `Bearer ${config.apiKey}`,
|
|
283
314
|
'anthropic-version': '2023-06-01',
|
|
284
315
|
...config.customHeaders,
|
|
285
316
|
};
|
|
@@ -305,10 +336,13 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
305
336
|
throw new Error('No response body from Anthropic API');
|
|
306
337
|
}
|
|
307
338
|
let contentBuffer = '';
|
|
339
|
+
let thinkingTextBuffer = ''; // Accumulate thinking text content
|
|
340
|
+
let thinkingSignature = ''; // Accumulate thinking signature
|
|
308
341
|
let toolCallsBuffer = new Map();
|
|
309
342
|
let hasToolCalls = false;
|
|
310
343
|
let usageData;
|
|
311
344
|
let blockIndexToId = new Map();
|
|
345
|
+
let blockIndexToType = new Map(); // Track block types (text, thinking, tool_use)
|
|
312
346
|
let completedToolBlocks = new Set(); // Track which tool blocks have finished streaming
|
|
313
347
|
for await (const event of parseSSEStream(response.body.getReader())) {
|
|
314
348
|
if (abortSignal?.aborted) {
|
|
@@ -316,9 +350,11 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
316
350
|
}
|
|
317
351
|
if (event.type === 'content_block_start') {
|
|
318
352
|
const block = event.content_block;
|
|
353
|
+
const blockIndex = event.index;
|
|
354
|
+
// Track block type for later reference
|
|
355
|
+
blockIndexToType.set(blockIndex, block.type);
|
|
319
356
|
if (block.type === 'tool_use') {
|
|
320
357
|
hasToolCalls = true;
|
|
321
|
-
const blockIndex = event.index;
|
|
322
358
|
blockIndexToId.set(blockIndex, block.id);
|
|
323
359
|
toolCallsBuffer.set(block.id, {
|
|
324
360
|
id: block.id,
|
|
@@ -333,6 +369,13 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
333
369
|
delta: block.name,
|
|
334
370
|
};
|
|
335
371
|
}
|
|
372
|
+
// Handle thinking block start (Extended Thinking feature)
|
|
373
|
+
else if (block.type === 'thinking') {
|
|
374
|
+
// Thinking block started - emit reasoning_started event
|
|
375
|
+
yield {
|
|
376
|
+
type: 'reasoning_started',
|
|
377
|
+
};
|
|
378
|
+
}
|
|
336
379
|
}
|
|
337
380
|
else if (event.type === 'content_block_delta') {
|
|
338
381
|
const delta = event.delta;
|
|
@@ -344,6 +387,21 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
344
387
|
content: text,
|
|
345
388
|
};
|
|
346
389
|
}
|
|
390
|
+
// Handle thinking_delta (Extended Thinking feature)
|
|
391
|
+
// Emit reasoning_delta event for thinking content
|
|
392
|
+
if (delta.type === 'thinking_delta') {
|
|
393
|
+
const thinkingText = delta.thinking;
|
|
394
|
+
thinkingTextBuffer += thinkingText; // Accumulate thinking text
|
|
395
|
+
yield {
|
|
396
|
+
type: 'reasoning_delta',
|
|
397
|
+
delta: thinkingText,
|
|
398
|
+
};
|
|
399
|
+
}
|
|
400
|
+
// Handle signature_delta (Extended Thinking feature)
|
|
401
|
+
// Signature is required for thinking blocks
|
|
402
|
+
if (delta.type === 'signature_delta') {
|
|
403
|
+
thinkingSignature += delta.signature; // Accumulate signature
|
|
404
|
+
}
|
|
347
405
|
if (delta.type === 'input_json_delta') {
|
|
348
406
|
const jsonDelta = delta.partial_json;
|
|
349
407
|
const blockIndex = event.index;
|
|
@@ -457,8 +515,17 @@ export async function* createStreamingAnthropicCompletion(options, abortSignal,
|
|
|
457
515
|
usage: usageData,
|
|
458
516
|
};
|
|
459
517
|
}
|
|
518
|
+
// Return complete thinking block with signature if thinking content exists
|
|
519
|
+
const thinkingBlock = thinkingTextBuffer
|
|
520
|
+
? {
|
|
521
|
+
type: 'thinking',
|
|
522
|
+
thinking: thinkingTextBuffer,
|
|
523
|
+
signature: thinkingSignature || undefined,
|
|
524
|
+
}
|
|
525
|
+
: undefined;
|
|
460
526
|
yield {
|
|
461
527
|
type: 'done',
|
|
528
|
+
thinking: thinkingBlock,
|
|
462
529
|
};
|
|
463
530
|
}, {
|
|
464
531
|
abortSignal,
|
package/dist/api/chat.js
CHANGED
|
@@ -185,7 +185,7 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
|
|
|
185
185
|
method: 'POST',
|
|
186
186
|
headers: {
|
|
187
187
|
'Content-Type': 'application/json',
|
|
188
|
-
|
|
188
|
+
Authorization: `Bearer ${config.apiKey}`,
|
|
189
189
|
...config.customHeaders,
|
|
190
190
|
},
|
|
191
191
|
body: JSON.stringify(requestBody),
|
package/dist/api/responses.js
CHANGED
|
@@ -256,7 +256,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
256
256
|
method: 'POST',
|
|
257
257
|
headers: {
|
|
258
258
|
'Content-Type': 'application/json',
|
|
259
|
-
|
|
259
|
+
Authorization: `Bearer ${config.apiKey}`,
|
|
260
260
|
...config.customHeaders,
|
|
261
261
|
},
|
|
262
262
|
body: JSON.stringify(requestPayload),
|
|
@@ -442,7 +442,7 @@ export async function* createStreamingResponse(options, abortSignal, onRetry) {
|
|
|
442
442
|
usage: usageData,
|
|
443
443
|
};
|
|
444
444
|
}
|
|
445
|
-
// 发送完成信号
|
|
445
|
+
// 发送完成信号 - For Responses API, thinking content is in reasoning object, not separate thinking field
|
|
446
446
|
yield {
|
|
447
447
|
type: 'done',
|
|
448
448
|
};
|
package/dist/api/systemPrompt.js
CHANGED
|
@@ -33,7 +33,7 @@ const SYSTEM_PROMPT_TEMPLATE = `You are Snow AI CLI, an intelligent command-line
|
|
|
33
33
|
1. **Language Adaptation**: ALWAYS respond in the SAME language as the user's query
|
|
34
34
|
2. **ACTION FIRST**: Write code immediately when task is clear - stop overthinking
|
|
35
35
|
3. **Smart Context**: Read what's needed for correctness, skip excessive exploration
|
|
36
|
-
4. **Quality Verification**:
|
|
36
|
+
4. **Quality Verification**: run build/test after changes
|
|
37
37
|
|
|
38
38
|
## 🚀 Execution Strategy - BALANCE ACTION & ANALYSIS
|
|
39
39
|
|
|
@@ -124,10 +124,9 @@ system administration and data processing challenges.
|
|
|
124
124
|
## 🔍 Quality Assurance
|
|
125
125
|
|
|
126
126
|
Guidance and recommendations:
|
|
127
|
-
1.
|
|
128
|
-
2.
|
|
129
|
-
3.
|
|
130
|
-
4. Never leave broken code
|
|
127
|
+
1. Run build: \`npm run build\` or \`tsc\`
|
|
128
|
+
2. Fix any errors immediately
|
|
129
|
+
3. Never leave broken code
|
|
131
130
|
|
|
132
131
|
## 📚 Project Context (SNOW.md)
|
|
133
132
|
|
package/dist/api/types.d.ts
CHANGED
|
@@ -114,6 +114,7 @@ export async function handleConversationWithTools(options) {
|
|
|
114
114
|
let streamedContent = '';
|
|
115
115
|
let receivedToolCalls;
|
|
116
116
|
let receivedReasoning;
|
|
117
|
+
let receivedThinking; // Accumulate thinking content from all platforms
|
|
117
118
|
// Stream AI response - choose API based on config
|
|
118
119
|
let toolCallAccumulator = ''; // Accumulate tool call deltas for token counting
|
|
119
120
|
let reasoningAccumulator = ''; // Accumulate reasoning summary deltas for token counting (Responses API only)
|
|
@@ -194,6 +195,8 @@ export async function handleConversationWithTools(options) {
|
|
|
194
195
|
}
|
|
195
196
|
else if (chunk.type === 'tool_call_delta' && chunk.delta) {
|
|
196
197
|
// Accumulate tool call deltas and update token count in real-time
|
|
198
|
+
// When tool calls start, reasoning is done (OpenAI generally doesn't output text content during tool calls)
|
|
199
|
+
setIsReasoning?.(false);
|
|
197
200
|
toolCallAccumulator += chunk.delta;
|
|
198
201
|
try {
|
|
199
202
|
const tokens = encoder.encode(streamedContent + toolCallAccumulator + reasoningAccumulator);
|
|
@@ -222,6 +225,10 @@ export async function handleConversationWithTools(options) {
|
|
|
222
225
|
// Capture reasoning data from Responses API
|
|
223
226
|
receivedReasoning = chunk.reasoning;
|
|
224
227
|
}
|
|
228
|
+
else if (chunk.type === 'done' && chunk.thinking) {
|
|
229
|
+
// Capture thinking content from Anthropic only (includes signature)
|
|
230
|
+
receivedThinking = chunk.thinking;
|
|
231
|
+
}
|
|
225
232
|
else if (chunk.type === 'usage' && chunk.usage) {
|
|
226
233
|
// Capture usage information both in state and locally
|
|
227
234
|
setContextUsage(chunk.usage);
|
|
@@ -256,7 +263,8 @@ export async function handleConversationWithTools(options) {
|
|
|
256
263
|
}
|
|
257
264
|
if (chunk.usage.cached_tokens !== undefined) {
|
|
258
265
|
accumulatedUsage.cached_tokens =
|
|
259
|
-
(accumulatedUsage.cached_tokens || 0) +
|
|
266
|
+
(accumulatedUsage.cached_tokens || 0) +
|
|
267
|
+
chunk.usage.cached_tokens;
|
|
260
268
|
}
|
|
261
269
|
}
|
|
262
270
|
}
|
|
@@ -283,7 +291,8 @@ export async function handleConversationWithTools(options) {
|
|
|
283
291
|
arguments: tc.function.arguments,
|
|
284
292
|
},
|
|
285
293
|
})),
|
|
286
|
-
reasoning: receivedReasoning, // Include reasoning data for caching
|
|
294
|
+
reasoning: receivedReasoning, // Include reasoning data for caching (Responses API)
|
|
295
|
+
thinking: receivedThinking, // Include thinking content (Anthropic/OpenAI)
|
|
287
296
|
};
|
|
288
297
|
conversationMessages.push(assistantMessage);
|
|
289
298
|
// Save assistant message with tool calls
|
|
@@ -819,7 +828,8 @@ export async function handleConversationWithTools(options) {
|
|
|
819
828
|
const assistantMessage = {
|
|
820
829
|
role: 'assistant',
|
|
821
830
|
content: streamedContent.trim(),
|
|
822
|
-
reasoning: receivedReasoning, // Include reasoning data for caching
|
|
831
|
+
reasoning: receivedReasoning, // Include reasoning data for caching (Responses API)
|
|
832
|
+
thinking: receivedThinking, // Include thinking content (Anthropic/OpenAI)
|
|
823
833
|
};
|
|
824
834
|
conversationMessages.push(assistantMessage);
|
|
825
835
|
saveMessage(assistantMessage).catch(error => {
|
|
@@ -302,7 +302,9 @@ export default function ChatScreen({ skipWelcome }) {
|
|
|
302
302
|
// Check if all tool results exist after this assistant message
|
|
303
303
|
for (let j = i + 1; j < messages.length; j++) {
|
|
304
304
|
const followMsg = messages[j];
|
|
305
|
-
if (followMsg &&
|
|
305
|
+
if (followMsg &&
|
|
306
|
+
followMsg.role === 'tool' &&
|
|
307
|
+
followMsg.tool_call_id) {
|
|
306
308
|
toolCallIds.delete(followMsg.tool_call_id);
|
|
307
309
|
}
|
|
308
310
|
}
|
|
@@ -908,7 +910,9 @@ export default function ChatScreen({ skipWelcome }) {
|
|
|
908
910
|
React.createElement(Text, { color: "gray", dimColor: true },
|
|
909
911
|
React.createElement(ShimmerText, { text: streamingState.isReasoning
|
|
910
912
|
? 'Deep thinking...'
|
|
911
|
-
:
|
|
913
|
+
: streamingState.streamTokenCount > 0
|
|
914
|
+
? 'Writing...'
|
|
915
|
+
: 'Thinking...' }),
|
|
912
916
|
' ',
|
|
913
917
|
"(",
|
|
914
918
|
formatElapsedTime(streamingState.elapsedSeconds),
|
|
@@ -49,6 +49,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
49
49
|
const [apiKey, setApiKey] = useState('');
|
|
50
50
|
const [requestMethod, setRequestMethod] = useState('chat');
|
|
51
51
|
const [anthropicBeta, setAnthropicBeta] = useState(false);
|
|
52
|
+
const [thinkingEnabled, setThinkingEnabled] = useState(false);
|
|
53
|
+
const [thinkingBudgetTokens, setThinkingBudgetTokens] = useState(10000);
|
|
52
54
|
// Model settings
|
|
53
55
|
const [advancedModel, setAdvancedModel] = useState('');
|
|
54
56
|
const [basicModel, setBasicModel] = useState('');
|
|
@@ -66,6 +68,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
66
68
|
const [manualInputMode, setManualInputMode] = useState(false);
|
|
67
69
|
const [manualInputValue, setManualInputValue] = useState('');
|
|
68
70
|
const [, forceUpdate] = useState(0);
|
|
71
|
+
// Scrolling configuration
|
|
72
|
+
const MAX_VISIBLE_FIELDS = 8;
|
|
69
73
|
const requestMethodOptions = [
|
|
70
74
|
{
|
|
71
75
|
label: 'Chat Completions - Modern chat API (GPT-4, GPT-3.5-turbo)',
|
|
@@ -84,9 +88,45 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
84
88
|
value: 'anthropic',
|
|
85
89
|
},
|
|
86
90
|
];
|
|
91
|
+
// Get all available fields based on current request method
|
|
92
|
+
const getAllFields = () => {
|
|
93
|
+
return [
|
|
94
|
+
'profile',
|
|
95
|
+
'baseUrl',
|
|
96
|
+
'apiKey',
|
|
97
|
+
'requestMethod',
|
|
98
|
+
...(requestMethod === 'anthropic'
|
|
99
|
+
? [
|
|
100
|
+
'anthropicBeta',
|
|
101
|
+
'thinkingEnabled',
|
|
102
|
+
'thinkingBudgetTokens',
|
|
103
|
+
]
|
|
104
|
+
: []),
|
|
105
|
+
'advancedModel',
|
|
106
|
+
'basicModel',
|
|
107
|
+
'compactModelName',
|
|
108
|
+
'maxContextTokens',
|
|
109
|
+
'maxTokens',
|
|
110
|
+
];
|
|
111
|
+
};
|
|
112
|
+
// Get current field index and total count
|
|
113
|
+
const allFields = getAllFields();
|
|
114
|
+
const currentFieldIndex = allFields.indexOf(currentField);
|
|
115
|
+
const totalFields = allFields.length;
|
|
87
116
|
useEffect(() => {
|
|
88
117
|
loadProfilesAndConfig();
|
|
89
118
|
}, []);
|
|
119
|
+
// Auto-adjust currentField when requestMethod changes
|
|
120
|
+
useEffect(() => {
|
|
121
|
+
// If requestMethod is not 'anthropic' and currentField is on Anthropic-specific fields,
|
|
122
|
+
// move to the next available field
|
|
123
|
+
if (requestMethod !== 'anthropic' &&
|
|
124
|
+
(currentField === 'anthropicBeta' ||
|
|
125
|
+
currentField === 'thinkingEnabled' ||
|
|
126
|
+
currentField === 'thinkingBudgetTokens')) {
|
|
127
|
+
setCurrentField('advancedModel');
|
|
128
|
+
}
|
|
129
|
+
}, [requestMethod, currentField]);
|
|
90
130
|
const loadProfilesAndConfig = () => {
|
|
91
131
|
// Load profiles
|
|
92
132
|
const loadedProfiles = getAllProfiles();
|
|
@@ -97,6 +137,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
97
137
|
setApiKey(config.apiKey);
|
|
98
138
|
setRequestMethod(config.requestMethod || 'chat');
|
|
99
139
|
setAnthropicBeta(config.anthropicBeta || false);
|
|
140
|
+
setThinkingEnabled(config.thinking?.type === 'enabled' || false);
|
|
141
|
+
setThinkingBudgetTokens(config.thinking?.budget_tokens || 10000);
|
|
100
142
|
setAdvancedModel(config.advancedModel || '');
|
|
101
143
|
setBasicModel(config.basicModel || '');
|
|
102
144
|
setMaxContextTokens(config.maxContextTokens || 4000);
|
|
@@ -153,6 +195,8 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
153
195
|
return maxContextTokens.toString();
|
|
154
196
|
if (currentField === 'maxTokens')
|
|
155
197
|
return maxTokens.toString();
|
|
198
|
+
if (currentField === 'thinkingBudgetTokens')
|
|
199
|
+
return thinkingBudgetTokens.toString();
|
|
156
200
|
if (currentField === 'compactModelName')
|
|
157
201
|
return compactModelName;
|
|
158
202
|
return '';
|
|
@@ -171,6 +215,9 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
171
215
|
apiKey,
|
|
172
216
|
requestMethod,
|
|
173
217
|
anthropicBeta,
|
|
218
|
+
thinking: thinkingEnabled
|
|
219
|
+
? { type: 'enabled', budget_tokens: thinkingBudgetTokens }
|
|
220
|
+
: undefined,
|
|
174
221
|
advancedModel,
|
|
175
222
|
basicModel,
|
|
176
223
|
maxContextTokens,
|
|
@@ -248,6 +295,17 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
248
295
|
maxContextTokens,
|
|
249
296
|
maxTokens,
|
|
250
297
|
};
|
|
298
|
+
// Save thinking configuration (always save to preserve settings)
|
|
299
|
+
if (thinkingEnabled) {
|
|
300
|
+
config.thinking = {
|
|
301
|
+
type: 'enabled',
|
|
302
|
+
budget_tokens: thinkingBudgetTokens,
|
|
303
|
+
};
|
|
304
|
+
}
|
|
305
|
+
else {
|
|
306
|
+
// Explicitly set to undefined to clear it when disabled
|
|
307
|
+
config.thinking = undefined;
|
|
308
|
+
}
|
|
251
309
|
// Only save compactModel if modelName is provided (uses same baseUrl/apiKey)
|
|
252
310
|
if (compactModelName) {
|
|
253
311
|
config.compactModel = {
|
|
@@ -264,6 +322,9 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
264
322
|
apiKey,
|
|
265
323
|
requestMethod,
|
|
266
324
|
anthropicBeta,
|
|
325
|
+
thinking: thinkingEnabled
|
|
326
|
+
? { type: 'enabled', budget_tokens: thinkingBudgetTokens }
|
|
327
|
+
: undefined,
|
|
267
328
|
advancedModel,
|
|
268
329
|
basicModel,
|
|
269
330
|
maxContextTokens,
|
|
@@ -286,6 +347,121 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
286
347
|
return false;
|
|
287
348
|
}
|
|
288
349
|
};
|
|
350
|
+
// Helper function to render a single field
|
|
351
|
+
const renderField = (field) => {
|
|
352
|
+
const isActive = field === currentField;
|
|
353
|
+
const isCurrentlyEditing = isEditing && isActive;
|
|
354
|
+
switch (field) {
|
|
355
|
+
case 'profile':
|
|
356
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
357
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
358
|
+
isActive ? '❯ ' : ' ',
|
|
359
|
+
"Profile:"),
|
|
360
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
361
|
+
React.createElement(Text, { color: "gray" }, profiles.find(p => p.name === activeProfile)?.displayName ||
|
|
362
|
+
activeProfile)))));
|
|
363
|
+
case 'baseUrl':
|
|
364
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
365
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
366
|
+
isActive ? '❯ ' : ' ',
|
|
367
|
+
"Base URL:"),
|
|
368
|
+
isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
369
|
+
React.createElement(TextInput, { value: baseUrl, onChange: value => setBaseUrl(stripFocusArtifacts(value)), placeholder: "https://api.openai.com/v1" }))),
|
|
370
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
371
|
+
React.createElement(Text, { color: "gray" }, baseUrl || 'Not set')))));
|
|
372
|
+
case 'apiKey':
|
|
373
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
374
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
375
|
+
isActive ? '❯ ' : ' ',
|
|
376
|
+
"API Key:"),
|
|
377
|
+
isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
378
|
+
React.createElement(TextInput, { value: apiKey, onChange: value => setApiKey(stripFocusArtifacts(value)), placeholder: "sk-...", mask: "*" }))),
|
|
379
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
380
|
+
React.createElement(Text, { color: "gray" }, apiKey ? '*'.repeat(Math.min(apiKey.length, 20)) : 'Not set')))));
|
|
381
|
+
case 'requestMethod':
|
|
382
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
383
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
384
|
+
isActive ? '❯ ' : ' ',
|
|
385
|
+
"Request Method:"),
|
|
386
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
387
|
+
React.createElement(Text, { color: "gray" }, requestMethodOptions.find(opt => opt.value === requestMethod)
|
|
388
|
+
?.label || 'Not set')))));
|
|
389
|
+
case 'anthropicBeta':
|
|
390
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
391
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
392
|
+
isActive ? '❯ ' : ' ',
|
|
393
|
+
"Anthropic Beta:"),
|
|
394
|
+
React.createElement(Box, { marginLeft: 3 },
|
|
395
|
+
React.createElement(Text, { color: "gray" },
|
|
396
|
+
anthropicBeta ? '☒ Enabled' : '☐ Disabled',
|
|
397
|
+
" (Press Enter to toggle)"))));
|
|
398
|
+
case 'thinkingEnabled':
|
|
399
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
400
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
401
|
+
isActive ? '❯ ' : ' ',
|
|
402
|
+
"Thinking Enabled:"),
|
|
403
|
+
React.createElement(Box, { marginLeft: 3 },
|
|
404
|
+
React.createElement(Text, { color: "gray" },
|
|
405
|
+
thinkingEnabled ? '☒ Enabled' : '☐ Disabled',
|
|
406
|
+
" (Press Enter to toggle)"))));
|
|
407
|
+
case 'thinkingBudgetTokens':
|
|
408
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
409
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
410
|
+
isActive ? '❯ ' : ' ',
|
|
411
|
+
"Thinking Budget Tokens:"),
|
|
412
|
+
isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
413
|
+
React.createElement(Text, { color: "cyan" },
|
|
414
|
+
"Enter value: ",
|
|
415
|
+
thinkingBudgetTokens))),
|
|
416
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
417
|
+
React.createElement(Text, { color: "gray" }, thinkingBudgetTokens)))));
|
|
418
|
+
case 'advancedModel':
|
|
419
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
420
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
421
|
+
isActive ? '❯ ' : ' ',
|
|
422
|
+
"Advanced Model:"),
|
|
423
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
424
|
+
React.createElement(Text, { color: "gray" }, advancedModel || 'Not set')))));
|
|
425
|
+
case 'basicModel':
|
|
426
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
427
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
428
|
+
isActive ? '❯ ' : ' ',
|
|
429
|
+
"Basic Model:"),
|
|
430
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
431
|
+
React.createElement(Text, { color: "gray" }, basicModel || 'Not set')))));
|
|
432
|
+
case 'compactModelName':
|
|
433
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
434
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
435
|
+
isActive ? '❯ ' : ' ',
|
|
436
|
+
"Compact Model:"),
|
|
437
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
438
|
+
React.createElement(Text, { color: "gray" }, compactModelName || 'Not set')))));
|
|
439
|
+
case 'maxContextTokens':
|
|
440
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
441
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
442
|
+
isActive ? '❯ ' : ' ',
|
|
443
|
+
"Max Context Tokens:"),
|
|
444
|
+
isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
445
|
+
React.createElement(Text, { color: "cyan" },
|
|
446
|
+
"Enter value: ",
|
|
447
|
+
maxContextTokens))),
|
|
448
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
449
|
+
React.createElement(Text, { color: "gray" }, maxContextTokens)))));
|
|
450
|
+
case 'maxTokens':
|
|
451
|
+
return (React.createElement(Box, { key: field, flexDirection: "column" },
|
|
452
|
+
React.createElement(Text, { color: isActive ? 'green' : 'white' },
|
|
453
|
+
isActive ? '❯ ' : ' ',
|
|
454
|
+
"Max Tokens:"),
|
|
455
|
+
isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
456
|
+
React.createElement(Text, { color: "cyan" },
|
|
457
|
+
"Enter value: ",
|
|
458
|
+
maxTokens))),
|
|
459
|
+
!isCurrentlyEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
460
|
+
React.createElement(Text, { color: "gray" }, maxTokens)))));
|
|
461
|
+
default:
|
|
462
|
+
return null;
|
|
463
|
+
}
|
|
464
|
+
};
|
|
289
465
|
useInput((rawInput, key) => {
|
|
290
466
|
const input = stripFocusArtifacts(rawInput);
|
|
291
467
|
if (!input && isFocusEventInput(rawInput)) {
|
|
@@ -403,41 +579,68 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
403
579
|
return;
|
|
404
580
|
}
|
|
405
581
|
// Handle numeric input for token fields
|
|
406
|
-
if (currentField === 'maxContextTokens' ||
|
|
582
|
+
if (currentField === 'maxContextTokens' ||
|
|
583
|
+
currentField === 'maxTokens' ||
|
|
584
|
+
currentField === 'thinkingBudgetTokens') {
|
|
407
585
|
if (input && input.match(/[0-9]/)) {
|
|
408
|
-
const currentValue = currentField === 'maxContextTokens'
|
|
586
|
+
const currentValue = currentField === 'maxContextTokens'
|
|
587
|
+
? maxContextTokens
|
|
588
|
+
: currentField === 'maxTokens'
|
|
589
|
+
? maxTokens
|
|
590
|
+
: thinkingBudgetTokens;
|
|
409
591
|
const newValue = parseInt(currentValue.toString() + input, 10);
|
|
410
592
|
if (!isNaN(newValue)) {
|
|
411
593
|
if (currentField === 'maxContextTokens') {
|
|
412
594
|
setMaxContextTokens(newValue);
|
|
413
595
|
}
|
|
414
|
-
else {
|
|
596
|
+
else if (currentField === 'maxTokens') {
|
|
415
597
|
setMaxTokens(newValue);
|
|
416
598
|
}
|
|
599
|
+
else {
|
|
600
|
+
setThinkingBudgetTokens(newValue);
|
|
601
|
+
}
|
|
417
602
|
}
|
|
418
603
|
}
|
|
419
604
|
else if (key.backspace || key.delete) {
|
|
420
|
-
const currentValue = currentField === 'maxContextTokens'
|
|
605
|
+
const currentValue = currentField === 'maxContextTokens'
|
|
606
|
+
? maxContextTokens
|
|
607
|
+
: currentField === 'maxTokens'
|
|
608
|
+
? maxTokens
|
|
609
|
+
: thinkingBudgetTokens;
|
|
421
610
|
const currentStr = currentValue.toString();
|
|
422
611
|
const newStr = currentStr.slice(0, -1);
|
|
423
612
|
const newValue = parseInt(newStr, 10);
|
|
424
613
|
if (currentField === 'maxContextTokens') {
|
|
425
614
|
setMaxContextTokens(!isNaN(newValue) ? newValue : 0);
|
|
426
615
|
}
|
|
427
|
-
else {
|
|
616
|
+
else if (currentField === 'maxTokens') {
|
|
428
617
|
setMaxTokens(!isNaN(newValue) ? newValue : 0);
|
|
429
618
|
}
|
|
619
|
+
else {
|
|
620
|
+
setThinkingBudgetTokens(!isNaN(newValue) ? newValue : 0);
|
|
621
|
+
}
|
|
430
622
|
}
|
|
431
623
|
else if (key.return) {
|
|
432
|
-
const minValue = currentField === 'maxContextTokens'
|
|
433
|
-
|
|
624
|
+
const minValue = currentField === 'maxContextTokens'
|
|
625
|
+
? 4000
|
|
626
|
+
: currentField === 'maxTokens'
|
|
627
|
+
? 100
|
|
628
|
+
: 1000;
|
|
629
|
+
const currentValue = currentField === 'maxContextTokens'
|
|
630
|
+
? maxContextTokens
|
|
631
|
+
: currentField === 'maxTokens'
|
|
632
|
+
? maxTokens
|
|
633
|
+
: thinkingBudgetTokens;
|
|
434
634
|
const finalValue = currentValue < minValue ? minValue : currentValue;
|
|
435
635
|
if (currentField === 'maxContextTokens') {
|
|
436
636
|
setMaxContextTokens(finalValue);
|
|
437
637
|
}
|
|
438
|
-
else {
|
|
638
|
+
else if (currentField === 'maxTokens') {
|
|
439
639
|
setMaxTokens(finalValue);
|
|
440
640
|
}
|
|
641
|
+
else {
|
|
642
|
+
setThinkingBudgetTokens(finalValue);
|
|
643
|
+
}
|
|
441
644
|
setIsEditing(false);
|
|
442
645
|
}
|
|
443
646
|
return;
|
|
@@ -470,8 +673,12 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
470
673
|
if (currentField === 'anthropicBeta') {
|
|
471
674
|
setAnthropicBeta(!anthropicBeta);
|
|
472
675
|
}
|
|
676
|
+
else if (currentField === 'thinkingEnabled') {
|
|
677
|
+
setThinkingEnabled(!thinkingEnabled);
|
|
678
|
+
}
|
|
473
679
|
else if (currentField === 'maxContextTokens' ||
|
|
474
|
-
currentField === 'maxTokens'
|
|
680
|
+
currentField === 'maxTokens' ||
|
|
681
|
+
currentField === 'thinkingBudgetTokens') {
|
|
475
682
|
setIsEditing(true);
|
|
476
683
|
}
|
|
477
684
|
else if (currentField === 'advancedModel' ||
|
|
@@ -504,36 +711,14 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
504
711
|
}
|
|
505
712
|
}
|
|
506
713
|
else if (!isEditing && key.upArrow) {
|
|
507
|
-
const fields =
|
|
508
|
-
'profile',
|
|
509
|
-
'baseUrl',
|
|
510
|
-
'apiKey',
|
|
511
|
-
'requestMethod',
|
|
512
|
-
'anthropicBeta',
|
|
513
|
-
'advancedModel',
|
|
514
|
-
'basicModel',
|
|
515
|
-
'compactModelName',
|
|
516
|
-
'maxContextTokens',
|
|
517
|
-
'maxTokens',
|
|
518
|
-
];
|
|
714
|
+
const fields = getAllFields();
|
|
519
715
|
const currentIndex = fields.indexOf(currentField);
|
|
520
716
|
if (currentIndex > 0) {
|
|
521
717
|
setCurrentField(fields[currentIndex - 1]);
|
|
522
718
|
}
|
|
523
719
|
}
|
|
524
720
|
else if (!isEditing && key.downArrow) {
|
|
525
|
-
const fields =
|
|
526
|
-
'profile',
|
|
527
|
-
'baseUrl',
|
|
528
|
-
'apiKey',
|
|
529
|
-
'requestMethod',
|
|
530
|
-
'anthropicBeta',
|
|
531
|
-
'advancedModel',
|
|
532
|
-
'basicModel',
|
|
533
|
-
'compactModelName',
|
|
534
|
-
'maxContextTokens',
|
|
535
|
-
'maxTokens',
|
|
536
|
-
];
|
|
721
|
+
const fields = getAllFields();
|
|
537
722
|
const currentIndex = fields.indexOf(currentField);
|
|
538
723
|
if (currentIndex < fields.length - 1) {
|
|
539
724
|
setCurrentField(fields[currentIndex + 1]);
|
|
@@ -619,6 +804,16 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
619
804
|
activeProfile && (React.createElement(Text, { color: "cyan", dimColor: true },
|
|
620
805
|
"Active Profile: ",
|
|
621
806
|
activeProfile))))),
|
|
807
|
+
React.createElement(Box, { marginBottom: 1 },
|
|
808
|
+
React.createElement(Text, { color: "yellow", bold: true },
|
|
809
|
+
"Settings (",
|
|
810
|
+
currentFieldIndex + 1,
|
|
811
|
+
"/",
|
|
812
|
+
totalFields,
|
|
813
|
+
")"),
|
|
814
|
+
totalFields > MAX_VISIBLE_FIELDS && (React.createElement(Text, { color: "gray", dimColor: true },
|
|
815
|
+
' ',
|
|
816
|
+
"\u00B7 \u2191\u2193 to scroll"))),
|
|
622
817
|
isEditing &&
|
|
623
818
|
(currentField === 'profile' ||
|
|
624
819
|
currentField === 'requestMethod' ||
|
|
@@ -672,83 +867,23 @@ export default function ConfigScreen({ onBack, onSave, inlineMode = false, }) {
|
|
|
672
867
|
currentField === 'profile' &&
|
|
673
868
|
'↑↓ to select profile, N to create new, D to delete, Enter to confirm, Esc to cancel',
|
|
674
869
|
currentField === 'requestMethod' &&
|
|
675
|
-
'↑↓ to select, Enter to confirm, Esc to cancel')))) : (React.createElement(Box, { flexDirection: "column" },
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
React.createElement(Text, { color: currentField === 'apiKey' ? 'green' : 'white' },
|
|
693
|
-
currentField === 'apiKey' ? '❯ ' : ' ',
|
|
694
|
-
"API Key:"),
|
|
695
|
-
currentField === 'apiKey' && isEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
696
|
-
React.createElement(TextInput, { value: apiKey, onChange: value => setApiKey(stripFocusArtifacts(value)), placeholder: "sk-...", mask: "*" }))),
|
|
697
|
-
(!isEditing || currentField !== 'apiKey') && (React.createElement(Box, { marginLeft: 3 },
|
|
698
|
-
React.createElement(Text, { color: "gray" }, apiKey ? '*'.repeat(Math.min(apiKey.length, 20)) : 'Not set')))),
|
|
699
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
700
|
-
React.createElement(Text, { color: currentField === 'requestMethod' ? 'green' : 'white' },
|
|
701
|
-
currentField === 'requestMethod' ? '❯ ' : ' ',
|
|
702
|
-
"Request Method:"),
|
|
703
|
-
(!isEditing || currentField !== 'requestMethod') && (React.createElement(Box, { marginLeft: 3 },
|
|
704
|
-
React.createElement(Text, { color: "gray" }, requestMethodOptions.find(opt => opt.value === requestMethod)
|
|
705
|
-
?.label || 'Not set')))),
|
|
706
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
707
|
-
React.createElement(Text, { color: currentField === 'anthropicBeta' ? 'green' : 'white' },
|
|
708
|
-
currentField === 'anthropicBeta' ? '❯ ' : ' ',
|
|
709
|
-
"Anthropic Beta:"),
|
|
710
|
-
React.createElement(Box, { marginLeft: 3 },
|
|
711
|
-
React.createElement(Text, { color: "gray" },
|
|
712
|
-
anthropicBeta ? '☒ Enabled' : '☐ Disabled',
|
|
713
|
-
" (Press Enter to toggle)"))),
|
|
714
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
715
|
-
React.createElement(Text, { color: currentField === 'advancedModel' ? 'green' : 'white' },
|
|
716
|
-
currentField === 'advancedModel' ? '❯ ' : ' ',
|
|
717
|
-
"Advanced Model:"),
|
|
718
|
-
(!isEditing || currentField !== 'advancedModel') && (React.createElement(Box, { marginLeft: 3 },
|
|
719
|
-
React.createElement(Text, { color: "gray" }, advancedModel || 'Not set')))),
|
|
720
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
721
|
-
React.createElement(Text, { color: currentField === 'basicModel' ? 'green' : 'white' },
|
|
722
|
-
currentField === 'basicModel' ? '❯ ' : ' ',
|
|
723
|
-
"Basic Model:"),
|
|
724
|
-
(!isEditing || currentField !== 'basicModel') && (React.createElement(Box, { marginLeft: 3 },
|
|
725
|
-
React.createElement(Text, { color: "gray" }, basicModel || 'Not set')))),
|
|
726
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
727
|
-
React.createElement(Text, { color: currentField === 'compactModelName' ? 'green' : 'white' },
|
|
728
|
-
currentField === 'compactModelName' ? '❯ ' : ' ',
|
|
729
|
-
"Compact Model:"),
|
|
730
|
-
(!isEditing || currentField !== 'compactModelName') && (React.createElement(Box, { marginLeft: 3 },
|
|
731
|
-
React.createElement(Text, { color: "gray" }, compactModelName || 'Not set')))),
|
|
732
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
733
|
-
React.createElement(Text, { color: currentField === 'maxContextTokens' ? 'green' : 'white' },
|
|
734
|
-
currentField === 'maxContextTokens' ? '❯ ' : ' ',
|
|
735
|
-
"Max Context Tokens:"),
|
|
736
|
-
currentField === 'maxContextTokens' && isEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
737
|
-
React.createElement(Text, { color: "cyan" },
|
|
738
|
-
"Enter value: ",
|
|
739
|
-
maxContextTokens))),
|
|
740
|
-
(!isEditing || currentField !== 'maxContextTokens') && (React.createElement(Box, { marginLeft: 3 },
|
|
741
|
-
React.createElement(Text, { color: "gray" }, maxContextTokens)))),
|
|
742
|
-
React.createElement(Box, { flexDirection: "column" },
|
|
743
|
-
React.createElement(Text, { color: currentField === 'maxTokens' ? 'green' : 'white' },
|
|
744
|
-
currentField === 'maxTokens' ? '❯ ' : ' ',
|
|
745
|
-
"Max Tokens:"),
|
|
746
|
-
currentField === 'maxTokens' && isEditing && (React.createElement(Box, { marginLeft: 3 },
|
|
747
|
-
React.createElement(Text, { color: "cyan" },
|
|
748
|
-
"Enter value: ",
|
|
749
|
-
maxTokens))),
|
|
750
|
-
(!isEditing || currentField !== 'maxTokens') && (React.createElement(Box, { marginLeft: 3 },
|
|
751
|
-
React.createElement(Text, { color: "gray" }, maxTokens)))))),
|
|
870
|
+
'↑↓ to select, Enter to confirm, Esc to cancel')))) : (React.createElement(Box, { flexDirection: "column" }, (() => {
|
|
871
|
+
// Calculate visible window
|
|
872
|
+
if (allFields.length <= MAX_VISIBLE_FIELDS) {
|
|
873
|
+
// Show all fields if less than max
|
|
874
|
+
return allFields.map(field => renderField(field));
|
|
875
|
+
}
|
|
876
|
+
// Calculate scroll window
|
|
877
|
+
const halfWindow = Math.floor(MAX_VISIBLE_FIELDS / 2);
|
|
878
|
+
let startIndex = Math.max(0, currentFieldIndex - halfWindow);
|
|
879
|
+
let endIndex = Math.min(allFields.length, startIndex + MAX_VISIBLE_FIELDS);
|
|
880
|
+
// Adjust if we're near the end
|
|
881
|
+
if (endIndex - startIndex < MAX_VISIBLE_FIELDS) {
|
|
882
|
+
startIndex = Math.max(0, endIndex - MAX_VISIBLE_FIELDS);
|
|
883
|
+
}
|
|
884
|
+
const visibleFields = allFields.slice(startIndex, endIndex);
|
|
885
|
+
return visibleFields.map(field => renderField(field));
|
|
886
|
+
})())),
|
|
752
887
|
errors.length > 0 && (React.createElement(Box, { flexDirection: "column", marginTop: 1 },
|
|
753
888
|
React.createElement(Text, { color: "red", bold: true }, "Errors:"),
|
|
754
889
|
errors.map((error, index) => (React.createElement(Text, { key: index, color: "red" },
|
|
@@ -279,7 +279,9 @@ export default function HeadlessModeScreen({ prompt, onComplete }) {
|
|
|
279
279
|
// Show normal thinking status with colors
|
|
280
280
|
const thinkingText = streamingState.isReasoning
|
|
281
281
|
? 'Deep thinking...'
|
|
282
|
-
:
|
|
282
|
+
: streamingState.streamTokenCount > 0
|
|
283
|
+
? 'Writing...'
|
|
284
|
+
: 'Thinking...';
|
|
283
285
|
process.stdout.write(`\r\x1b[96m❆\x1b[90m ${thinkingText} \x1b[37m(\x1b[33m${streamingState.elapsedSeconds}s\x1b[37m · \x1b[32m↓ ${streamingState.streamTokenCount} tokens\x1b[37m)\x1b[0m`);
|
|
284
286
|
}
|
|
285
287
|
}
|
|
@@ -2,6 +2,10 @@ export type RequestMethod = 'chat' | 'responses' | 'gemini' | 'anthropic';
|
|
|
2
2
|
export interface CompactModelConfig {
|
|
3
3
|
modelName: string;
|
|
4
4
|
}
|
|
5
|
+
export interface ThinkingConfig {
|
|
6
|
+
type: 'enabled';
|
|
7
|
+
budget_tokens: number;
|
|
8
|
+
}
|
|
5
9
|
export interface ApiConfig {
|
|
6
10
|
baseUrl: string;
|
|
7
11
|
apiKey: string;
|
|
@@ -12,6 +16,7 @@ export interface ApiConfig {
|
|
|
12
16
|
maxTokens?: number;
|
|
13
17
|
compactModel?: CompactModelConfig;
|
|
14
18
|
anthropicBeta?: boolean;
|
|
19
|
+
thinking?: ThinkingConfig;
|
|
15
20
|
}
|
|
16
21
|
export interface MCPServer {
|
|
17
22
|
url?: string;
|
package/dist/utils/apiConfig.js
CHANGED
|
@@ -24,7 +24,10 @@ const CONFIG_DIR = join(homedir(), '.snow');
|
|
|
24
24
|
const SYSTEM_PROMPT_FILE = join(CONFIG_DIR, 'system-prompt.txt');
|
|
25
25
|
const CUSTOM_HEADERS_FILE = join(CONFIG_DIR, 'custom-headers.json');
|
|
26
26
|
function normalizeRequestMethod(method) {
|
|
27
|
-
if (method === 'chat' ||
|
|
27
|
+
if (method === 'chat' ||
|
|
28
|
+
method === 'responses' ||
|
|
29
|
+
method === 'gemini' ||
|
|
30
|
+
method === 'anthropic') {
|
|
28
31
|
return method;
|
|
29
32
|
}
|
|
30
33
|
if (method === 'completions') {
|
|
@@ -84,7 +87,8 @@ export function loadConfig() {
|
|
|
84
87
|
snowcfg: apiConfig,
|
|
85
88
|
};
|
|
86
89
|
// 如果是从旧版本迁移过来的,保存新配置
|
|
87
|
-
if (legacyMcp !== undefined ||
|
|
90
|
+
if (legacyMcp !== undefined ||
|
|
91
|
+
(configWithoutMcp.openai && !configWithoutMcp.snowcfg)) {
|
|
88
92
|
saveConfig(mergedConfig);
|
|
89
93
|
}
|
|
90
94
|
return mergedConfig;
|
|
@@ -264,7 +268,9 @@ export function getCustomHeaders() {
|
|
|
264
268
|
const content = readFileSync(CUSTOM_HEADERS_FILE, 'utf8');
|
|
265
269
|
const headers = JSON.parse(content);
|
|
266
270
|
// 验证格式:必须是对象,且所有值都是字符串
|
|
267
|
-
if (typeof headers !== 'object' ||
|
|
271
|
+
if (typeof headers !== 'object' ||
|
|
272
|
+
headers === null ||
|
|
273
|
+
Array.isArray(headers)) {
|
|
268
274
|
return {};
|
|
269
275
|
}
|
|
270
276
|
// 过滤掉非字符串的值
|
|
@@ -73,7 +73,9 @@ function findPreserveStartIndex(messages) {
|
|
|
73
73
|
// 向前找对应的 assistant with tool_calls
|
|
74
74
|
for (let i = messages.length - 2; i >= 0; i--) {
|
|
75
75
|
const msg = messages[i];
|
|
76
|
-
if (msg?.role === 'assistant' &&
|
|
76
|
+
if (msg?.role === 'assistant' &&
|
|
77
|
+
msg.tool_calls &&
|
|
78
|
+
msg.tool_calls.length > 0) {
|
|
77
79
|
// 找到了,从这个 assistant 开始保留
|
|
78
80
|
return i;
|
|
79
81
|
}
|
|
@@ -82,7 +84,9 @@ function findPreserveStartIndex(messages) {
|
|
|
82
84
|
return messages.length - 1;
|
|
83
85
|
}
|
|
84
86
|
// Case 2: 最后是 assistant with tool_calls → 保留 assistant(tool_calls)
|
|
85
|
-
if (lastMsg?.role === 'assistant' &&
|
|
87
|
+
if (lastMsg?.role === 'assistant' &&
|
|
88
|
+
lastMsg.tool_calls &&
|
|
89
|
+
lastMsg.tool_calls.length > 0) {
|
|
86
90
|
// 保留这个待处理的 tool_calls
|
|
87
91
|
return messages.length - 1;
|
|
88
92
|
}
|
|
@@ -244,6 +248,7 @@ async function compressWithAnthropic(modelName, conversationMessages, customSyst
|
|
|
244
248
|
model: modelName,
|
|
245
249
|
messages,
|
|
246
250
|
max_tokens: 4096,
|
|
251
|
+
disableThinking: true, // Context compression 不使用 Extended Thinking
|
|
247
252
|
})) {
|
|
248
253
|
// Collect content
|
|
249
254
|
if (chunk.type === 'content' && chunk.content) {
|
|
@@ -75,7 +75,7 @@ export async function executeSubAgent(agentId, prompt, onMessage, abortSignal, r
|
|
|
75
75
|
// Get API configuration
|
|
76
76
|
const config = getOpenAiConfig();
|
|
77
77
|
const currentSession = sessionManager.getCurrentSession();
|
|
78
|
-
const model = config.advancedModel || '
|
|
78
|
+
const model = config.advancedModel || 'gpt-5';
|
|
79
79
|
// Call API with sub-agent's tools - choose API based on config
|
|
80
80
|
const stream = config.requestMethod === 'anthropic'
|
|
81
81
|
? createStreamingAnthropicCompletion({
|
|
@@ -85,6 +85,7 @@ export async function executeSubAgent(agentId, prompt, onMessage, abortSignal, r
|
|
|
85
85
|
max_tokens: config.maxTokens || 4096,
|
|
86
86
|
tools: allowedTools,
|
|
87
87
|
sessionId: currentSession?.id,
|
|
88
|
+
disableThinking: true, // Sub-agents 不使用 Extended Thinking
|
|
88
89
|
}, abortSignal)
|
|
89
90
|
: config.requestMethod === 'gemini'
|
|
90
91
|
? createStreamingGeminiCompletion({
|