snow-ai 0.2.15 → 0.2.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/dist/api/anthropic.d.ts +1 -1
  2. package/dist/api/anthropic.js +52 -76
  3. package/dist/api/chat.d.ts +4 -4
  4. package/dist/api/chat.js +32 -17
  5. package/dist/api/gemini.d.ts +1 -1
  6. package/dist/api/gemini.js +20 -13
  7. package/dist/api/models.d.ts +3 -0
  8. package/dist/api/models.js +101 -17
  9. package/dist/api/responses.d.ts +5 -5
  10. package/dist/api/responses.js +29 -27
  11. package/dist/app.js +4 -1
  12. package/dist/hooks/useClipboard.d.ts +4 -0
  13. package/dist/hooks/useClipboard.js +120 -0
  14. package/dist/hooks/useCommandHandler.d.ts +26 -0
  15. package/dist/hooks/useCommandHandler.js +158 -0
  16. package/dist/hooks/useCommandPanel.d.ts +16 -0
  17. package/dist/hooks/useCommandPanel.js +53 -0
  18. package/dist/hooks/useConversation.d.ts +9 -1
  19. package/dist/hooks/useConversation.js +152 -58
  20. package/dist/hooks/useFilePicker.d.ts +17 -0
  21. package/dist/hooks/useFilePicker.js +91 -0
  22. package/dist/hooks/useHistoryNavigation.d.ts +21 -0
  23. package/dist/hooks/useHistoryNavigation.js +50 -0
  24. package/dist/hooks/useInputBuffer.d.ts +6 -0
  25. package/dist/hooks/useInputBuffer.js +29 -0
  26. package/dist/hooks/useKeyboardInput.d.ts +51 -0
  27. package/dist/hooks/useKeyboardInput.js +272 -0
  28. package/dist/hooks/useSnapshotState.d.ts +12 -0
  29. package/dist/hooks/useSnapshotState.js +28 -0
  30. package/dist/hooks/useStreamingState.d.ts +24 -0
  31. package/dist/hooks/useStreamingState.js +96 -0
  32. package/dist/hooks/useVSCodeState.d.ts +8 -0
  33. package/dist/hooks/useVSCodeState.js +63 -0
  34. package/dist/mcp/filesystem.d.ts +24 -5
  35. package/dist/mcp/filesystem.js +52 -17
  36. package/dist/mcp/todo.js +4 -8
  37. package/dist/ui/components/ChatInput.js +71 -560
  38. package/dist/ui/components/DiffViewer.js +57 -30
  39. package/dist/ui/components/FileList.js +70 -26
  40. package/dist/ui/components/MessageList.d.ts +6 -0
  41. package/dist/ui/components/MessageList.js +47 -15
  42. package/dist/ui/components/ShimmerText.d.ts +9 -0
  43. package/dist/ui/components/ShimmerText.js +30 -0
  44. package/dist/ui/components/TodoTree.d.ts +1 -1
  45. package/dist/ui/components/TodoTree.js +0 -4
  46. package/dist/ui/components/ToolConfirmation.js +14 -6
  47. package/dist/ui/pages/ChatScreen.js +174 -373
  48. package/dist/ui/pages/CustomHeadersScreen.d.ts +6 -0
  49. package/dist/ui/pages/CustomHeadersScreen.js +104 -0
  50. package/dist/ui/pages/WelcomeScreen.js +5 -0
  51. package/dist/utils/apiConfig.d.ts +10 -0
  52. package/dist/utils/apiConfig.js +51 -0
  53. package/dist/utils/incrementalSnapshot.d.ts +8 -0
  54. package/dist/utils/incrementalSnapshot.js +63 -0
  55. package/dist/utils/mcpToolsManager.js +6 -1
  56. package/dist/utils/retryUtils.d.ts +22 -0
  57. package/dist/utils/retryUtils.js +180 -0
  58. package/dist/utils/sessionConverter.js +80 -17
  59. package/dist/utils/sessionManager.js +35 -4
  60. package/dist/utils/textUtils.d.ts +4 -0
  61. package/dist/utils/textUtils.js +19 -0
  62. package/dist/utils/todoPreprocessor.d.ts +1 -1
  63. package/dist/utils/todoPreprocessor.js +0 -1
  64. package/dist/utils/vscodeConnection.d.ts +8 -0
  65. package/dist/utils/vscodeConnection.js +44 -0
  66. package/package.json +1 -1
  67. package/readme.md +3 -1
@@ -1,28 +1,112 @@
1
- import { getOpenAiConfig } from '../utils/apiConfig.js';
2
- export async function fetchAvailableModels() {
3
- const config = getOpenAiConfig();
4
- if (!config.baseUrl) {
5
- throw new Error('Base URL not configured. Please configure API settings first.');
1
+ import { getOpenAiConfig, getCustomHeaders } from '../utils/apiConfig.js';
2
+ /**
3
+ * Fetch models from OpenAI-compatible API
4
+ */
5
+ async function fetchOpenAIModels(baseUrl, apiKey, customHeaders) {
6
+ const url = `${baseUrl.replace(/\/$/, '')}/models`;
7
+ const headers = {
8
+ 'Content-Type': 'application/json',
9
+ ...customHeaders,
10
+ };
11
+ if (apiKey) {
12
+ headers['Authorization'] = `Bearer ${apiKey}`;
13
+ }
14
+ const response = await fetch(url, {
15
+ method: 'GET',
16
+ headers,
17
+ });
18
+ if (!response.ok) {
19
+ throw new Error(`Failed to fetch models: ${response.status} ${response.statusText}`);
20
+ }
21
+ const data = await response.json();
22
+ return data.data || [];
23
+ }
24
+ /**
25
+ * Fetch models from Gemini API
26
+ */
27
+ async function fetchGeminiModels(baseUrl, apiKey) {
28
+ // Gemini uses API key as query parameter
29
+ const url = `${baseUrl.replace(/\/$/, '')}/models?key=${apiKey}`;
30
+ const response = await fetch(url, {
31
+ method: 'GET',
32
+ headers: {
33
+ 'Content-Type': 'application/json',
34
+ },
35
+ });
36
+ if (!response.ok) {
37
+ throw new Error(`Failed to fetch models: ${response.status} ${response.statusText}`);
6
38
  }
7
- const url = `${config.baseUrl.replace(/\/$/, '')}/models`;
39
+ const data = await response.json();
40
+ // Convert Gemini format to standard Model format
41
+ return (data.models || []).map(model => ({
42
+ id: model.name.replace('models/', ''), // Remove "models/" prefix
43
+ object: 'model',
44
+ created: 0,
45
+ owned_by: 'google',
46
+ }));
47
+ }
48
+ /**
49
+ * Fetch models from Anthropic API
50
+ */
51
+ async function fetchAnthropicModels(baseUrl, apiKey, customHeaders) {
52
+ const url = `${baseUrl.replace(/\/$/, '')}/models`;
8
53
  const headers = {
9
54
  'Content-Type': 'application/json',
55
+ 'anthropic-version': '2023-06-01',
56
+ ...customHeaders,
10
57
  };
11
- // Add Authorization header only if API key is provided
12
- if (config.apiKey) {
13
- headers['Authorization'] = `Bearer ${config.apiKey}`;
58
+ if (apiKey) {
59
+ headers['x-api-key'] = apiKey;
60
+ }
61
+ const response = await fetch(url, {
62
+ method: 'GET',
63
+ headers,
64
+ });
65
+ if (!response.ok) {
66
+ throw new Error(`Failed to fetch models: ${response.status} ${response.statusText}`);
67
+ }
68
+ const data = await response.json();
69
+ // Convert Anthropic format to standard Model format
70
+ return (data.data || []).map(model => ({
71
+ id: model.id,
72
+ object: 'model',
73
+ created: new Date(model.created_at).getTime() / 1000, // Convert to Unix timestamp
74
+ owned_by: 'anthropic',
75
+ }));
76
+ }
77
+ /**
78
+ * Fetch available models based on configured request method
79
+ */
80
+ export async function fetchAvailableModels() {
81
+ const config = getOpenAiConfig();
82
+ if (!config.baseUrl) {
83
+ throw new Error('Base URL not configured. Please configure API settings first.');
14
84
  }
85
+ const customHeaders = getCustomHeaders();
15
86
  try {
16
- const response = await fetch(url, {
17
- method: 'GET',
18
- headers,
19
- });
20
- if (!response.ok) {
21
- throw new Error(`Failed to fetch models: ${response.status} ${response.statusText}`);
87
+ let models;
88
+ switch (config.requestMethod) {
89
+ case 'gemini':
90
+ if (!config.apiKey) {
91
+ throw new Error('API key is required for Gemini API');
92
+ }
93
+ models = await fetchGeminiModels(config.baseUrl.replace(/\/$/, '') + '/v1beta', config.apiKey);
94
+ break;
95
+ case 'anthropic':
96
+ if (!config.apiKey) {
97
+ throw new Error('API key is required for Anthropic API');
98
+ }
99
+ models = await fetchAnthropicModels(config.baseUrl.replace(/\/$/, '') + '/v1', config.apiKey, customHeaders);
100
+ break;
101
+ case 'chat':
102
+ case 'responses':
103
+ default:
104
+ // OpenAI-compatible API
105
+ models = await fetchOpenAIModels(config.baseUrl, config.apiKey, customHeaders);
106
+ break;
22
107
  }
23
- const data = await response.json();
24
108
  // Sort models alphabetically by id for better UX
25
- return (data.data || []).sort((a, b) => a.id.localeCompare(b.id));
109
+ return models.sort((a, b) => a.id.localeCompare(b.id));
26
110
  }
27
111
  catch (error) {
28
112
  if (error instanceof Error) {
@@ -31,7 +31,7 @@ export interface UsageInfo {
31
31
  cached_tokens?: number;
32
32
  }
33
33
  export interface ResponseStreamChunk {
34
- type: 'content' | 'tool_calls' | 'tool_call_delta' | 'reasoning_delta' | 'done' | 'usage';
34
+ type: 'content' | 'tool_calls' | 'tool_call_delta' | 'reasoning_delta' | 'reasoning_started' | 'done' | 'usage';
35
35
  content?: string;
36
36
  tool_calls?: ToolCall[];
37
37
  delta?: string;
@@ -39,17 +39,17 @@ export interface ResponseStreamChunk {
39
39
  }
40
40
  export declare function resetOpenAIClient(): void;
41
41
  /**
42
- * 使用 Responses API 创建响应(非流式,带自动工具调用)
42
+ * 使用 Responses API 创建响应(非流式,带自动工具调用)
43
43
  */
44
- export declare function createResponse(options: ResponseOptions): Promise<string>;
44
+ export declare function createResponse(options: ResponseOptions, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): Promise<string>;
45
45
  /**
46
46
  * 使用 Responses API 创建流式响应(带自动工具调用)
47
47
  */
48
- export declare function createStreamingResponse(options: ResponseOptions, abortSignal?: AbortSignal): AsyncGenerator<ResponseStreamChunk, void, unknown>;
48
+ export declare function createStreamingResponse(options: ResponseOptions, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): AsyncGenerator<ResponseStreamChunk, void, unknown>;
49
49
  /**
50
50
  * 使用 Responses API 创建响应(限制工具调用轮数)
51
51
  */
52
- export declare function createResponseWithTools(options: ResponseOptions, maxToolRounds?: number): Promise<{
52
+ export declare function createResponseWithTools(options: ResponseOptions, maxToolRounds?: number, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): Promise<{
53
53
  content: string;
54
54
  toolCalls: ToolCall[];
55
55
  }>;
@@ -1,7 +1,8 @@
1
1
  import OpenAI from 'openai';
2
- import { getOpenAiConfig, getCustomSystemPrompt } from '../utils/apiConfig.js';
2
+ import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders } from '../utils/apiConfig.js';
3
3
  import { executeMCPTool } from '../utils/mcpToolsManager.js';
4
4
  import { SYSTEM_PROMPT } from './systemPrompt.js';
5
+ import { withRetry, withRetryGenerator } from '../utils/retryUtils.js';
5
6
  /**
6
7
  * 确保 schema 符合 Responses API 的要求:
7
8
  * 1. additionalProperties: false
@@ -59,9 +60,14 @@ function getOpenAIClient() {
59
60
  if (!config.apiKey || !config.baseUrl) {
60
61
  throw new Error('OpenAI API configuration is incomplete. Please configure API settings first.');
61
62
  }
63
+ // Get custom headers
64
+ const customHeaders = getCustomHeaders();
62
65
  openaiClient = new OpenAI({
63
66
  apiKey: config.apiKey,
64
67
  baseURL: config.baseUrl,
68
+ defaultHeaders: {
69
+ ...customHeaders
70
+ }
65
71
  });
66
72
  }
67
73
  return openaiClient;
@@ -186,9 +192,9 @@ function convertToResponseInput(messages) {
186
192
  return { input: result, systemInstructions };
187
193
  }
188
194
  /**
189
- * 使用 Responses API 创建响应(非流式,带自动工具调用)
195
+ * 使用 Responses API 创建响应(非流式,带自动工具调用)
190
196
  */
191
- export async function createResponse(options) {
197
+ export async function createResponse(options, abortSignal, onRetry) {
192
198
  const client = getOpenAIClient();
193
199
  let messages = [...options.messages];
194
200
  // 提取系统提示词和转换后的消息
@@ -207,7 +213,10 @@ export async function createResponse(options) {
207
213
  include: options.include || ['reasoning.encrypted_content'],
208
214
  prompt_cache_key: options.prompt_cache_key,
209
215
  };
210
- const response = await client.responses.create(requestPayload);
216
+ const response = await withRetry(() => client.responses.create(requestPayload), {
217
+ abortSignal,
218
+ onRetry
219
+ });
211
220
  // 提取响应 - Responses API 返回 output 数组
212
221
  const output = response.output;
213
222
  if (!output || output.length === 0) {
@@ -271,11 +280,12 @@ export async function createResponse(options) {
271
280
  /**
272
281
  * 使用 Responses API 创建流式响应(带自动工具调用)
273
282
  */
274
- export async function* createStreamingResponse(options, abortSignal) {
283
+ export async function* createStreamingResponse(options, abortSignal, onRetry) {
275
284
  const client = getOpenAIClient();
276
285
  // 提取系统提示词和转换后的消息
277
286
  const { input: requestInput, systemInstructions } = convertToResponseInput(options.messages);
278
- try {
287
+ // 使用重试包装生成器
288
+ yield* withRetryGenerator(async function* () {
279
289
  const requestPayload = {
280
290
  model: options.model,
281
291
  instructions: systemInstructions,
@@ -311,7 +321,10 @@ export async function* createStreamingResponse(options, abortSignal) {
311
321
  // 新输出项添加
312
322
  const item = chunk.item;
313
323
  if (item?.type === 'reasoning') {
314
- // 推理摘要开始 - 忽略
324
+ // 推理摘要开始 - 发送 reasoning_started 事件
325
+ yield {
326
+ type: 'reasoning_started'
327
+ };
315
328
  continue;
316
329
  }
317
330
  else if (item?.type === 'message') {
@@ -443,29 +456,15 @@ export async function* createStreamingResponse(options, abortSignal) {
443
456
  yield {
444
457
  type: 'done'
445
458
  };
446
- }
447
- catch (error) {
448
- if (error instanceof Error && error.name === 'AbortError') {
449
- return;
450
- }
451
- if (error instanceof Error) {
452
- // 检查是否是 API 网关不支持 Responses API
453
- if (error.message.includes('Panic detected') ||
454
- error.message.includes('nil pointer') ||
455
- error.message.includes('404') ||
456
- error.message.includes('not found')) {
457
- throw new Error('Streaming response creation failed: Your API endpoint does not support the Responses API. ' +
458
- 'Please switch to "Chat Completions" method in API settings, or use an OpenAI-compatible endpoint that supports Responses API (OpenAI official API, or compatible providers).');
459
- }
460
- throw new Error(`Streaming response creation failed: ${error.message}`);
461
- }
462
- throw new Error('Streaming response creation failed: Unknown error');
463
- }
459
+ }, {
460
+ abortSignal,
461
+ onRetry
462
+ });
464
463
  }
465
464
  /**
466
465
  * 使用 Responses API 创建响应(限制工具调用轮数)
467
466
  */
468
- export async function createResponseWithTools(options, maxToolRounds = 5) {
467
+ export async function createResponseWithTools(options, maxToolRounds = 5, abortSignal, onRetry) {
469
468
  const client = getOpenAIClient();
470
469
  let messages = [...options.messages];
471
470
  let allToolCalls = [];
@@ -485,7 +484,10 @@ export async function createResponseWithTools(options, maxToolRounds = 5) {
485
484
  include: options.include || ['reasoning.encrypted_content'],
486
485
  prompt_cache_key: options.prompt_cache_key,
487
486
  };
488
- const response = await client.responses.create(requestPayload);
487
+ const response = await withRetry(() => client.responses.create(requestPayload), {
488
+ abortSignal,
489
+ onRetry
490
+ });
489
491
  const output = response.output;
490
492
  if (!output || output.length === 0) {
491
493
  throw new Error('No output from AI');
package/dist/app.js CHANGED
@@ -6,6 +6,7 @@ import ApiConfigScreen from './ui/pages/ApiConfigScreen.js';
6
6
  import ModelConfigScreen from './ui/pages/ModelConfigScreen.js';
7
7
  import MCPConfigScreen from './ui/pages/MCPConfigScreen.js';
8
8
  import SystemPromptConfigScreen from './ui/pages/SystemPromptConfigScreen.js';
9
+ import CustomHeadersScreen from './ui/pages/CustomHeadersScreen.js';
9
10
  import ChatScreen from './ui/pages/ChatScreen.js';
10
11
  import { useGlobalExit } from './hooks/useGlobalExit.js';
11
12
  import { onNavigate } from './hooks/useGlobalNavigation.js';
@@ -25,7 +26,7 @@ export default function App({ version }) {
25
26
  return unsubscribe;
26
27
  }, []);
27
28
  const handleMenuSelect = (value) => {
28
- if (value === 'chat' || value === 'settings' || value === 'config' || value === 'models' || value === 'mcp' || value === 'systemprompt') {
29
+ if (value === 'chat' || value === 'settings' || value === 'config' || value === 'models' || value === 'mcp' || value === 'systemprompt' || value === 'customheaders') {
29
30
  setCurrentView(value);
30
31
  }
31
32
  else if (value === 'exit') {
@@ -50,6 +51,8 @@ export default function App({ version }) {
50
51
  return (React.createElement(MCPConfigScreen, { onBack: () => setCurrentView('welcome'), onSave: () => setCurrentView('welcome') }));
51
52
  case 'systemprompt':
52
53
  return (React.createElement(SystemPromptConfigScreen, { onBack: () => setCurrentView('welcome'), onSave: () => setCurrentView('welcome') }));
54
+ case 'customheaders':
55
+ return (React.createElement(CustomHeadersScreen, { onBack: () => setCurrentView('welcome'), onSave: () => setCurrentView('welcome') }));
53
56
  default:
54
57
  return (React.createElement(WelcomeScreen, { version: version, onMenuSelect: handleMenuSelect }));
55
58
  }
@@ -0,0 +1,4 @@
1
+ import { TextBuffer } from '../utils/textBuffer.js';
2
+ export declare function useClipboard(buffer: TextBuffer, updateCommandPanelState: (text: string) => void, updateFilePickerState: (text: string, cursorPos: number) => void, triggerUpdate: () => void): {
3
+ pasteFromClipboard: () => Promise<void>;
4
+ };
@@ -0,0 +1,120 @@
1
+ import { useCallback } from 'react';
2
+ import { execSync } from 'child_process';
3
+ export function useClipboard(buffer, updateCommandPanelState, updateFilePickerState, triggerUpdate) {
4
+ const pasteFromClipboard = useCallback(async () => {
5
+ try {
6
+ // Try to read image from clipboard
7
+ if (process.platform === 'win32') {
8
+ // Windows: Use PowerShell to read image from clipboard
9
+ try {
10
+ const psScript = `Add-Type -AssemblyName System.Windows.Forms; Add-Type -AssemblyName System.Drawing; $clipboard = [System.Windows.Forms.Clipboard]::GetImage(); if ($clipboard -ne $null) { $ms = New-Object System.IO.MemoryStream; $clipboard.Save($ms, [System.Drawing.Imaging.ImageFormat]::Png); $bytes = $ms.ToArray(); $ms.Close(); [Convert]::ToBase64String($bytes) }`;
11
+ const base64 = execSync(`powershell -Command "${psScript}"`, {
12
+ encoding: 'utf-8',
13
+ timeout: 5000,
14
+ }).trim();
15
+ if (base64 && base64.length > 100) {
16
+ const dataUrl = `data:image/png;base64,${base64}`;
17
+ buffer.insertImage(dataUrl, 'image/png');
18
+ const text = buffer.getFullText();
19
+ const cursorPos = buffer.getCursorPosition();
20
+ updateCommandPanelState(text);
21
+ updateFilePickerState(text, cursorPos);
22
+ triggerUpdate();
23
+ return;
24
+ }
25
+ }
26
+ catch (imgError) {
27
+ // No image in clipboard or error, fall through to text
28
+ }
29
+ }
30
+ else if (process.platform === 'darwin') {
31
+ // macOS: Use osascript to read image from clipboard
32
+ try {
33
+ // First check if there's an image in clipboard
34
+ const checkScript = `osascript -e 'try
35
+ set imgData to the clipboard as «class PNGf»
36
+ return "hasImage"
37
+ on error
38
+ return "noImage"
39
+ end try'`;
40
+ const hasImage = execSync(checkScript, {
41
+ encoding: 'utf-8',
42
+ timeout: 2000,
43
+ }).trim();
44
+ if (hasImage === 'hasImage') {
45
+ // Save clipboard image to temporary file and read it
46
+ const tmpFile = `/tmp/snow_clipboard_${Date.now()}.png`;
47
+ const saveScript = `osascript -e 'set imgData to the clipboard as «class PNGf»' -e 'set fileRef to open for access POSIX file "${tmpFile}" with write permission' -e 'write imgData to fileRef' -e 'close access fileRef'`;
48
+ execSync(saveScript, {
49
+ encoding: 'utf-8',
50
+ timeout: 3000,
51
+ });
52
+ // Read the file as base64
53
+ const base64 = execSync(`base64 -i "${tmpFile}"`, {
54
+ encoding: 'utf-8',
55
+ timeout: 2000,
56
+ }).trim();
57
+ // Clean up temp file
58
+ try {
59
+ execSync(`rm "${tmpFile}"`, { timeout: 1000 });
60
+ }
61
+ catch (e) {
62
+ // Ignore cleanup errors
63
+ }
64
+ if (base64 && base64.length > 100) {
65
+ const dataUrl = `data:image/png;base64,${base64}`;
66
+ buffer.insertImage(dataUrl, 'image/png');
67
+ const text = buffer.getFullText();
68
+ const cursorPos = buffer.getCursorPosition();
69
+ updateCommandPanelState(text);
70
+ updateFilePickerState(text, cursorPos);
71
+ triggerUpdate();
72
+ return;
73
+ }
74
+ }
75
+ }
76
+ catch (imgError) {
77
+ // No image in clipboard or error, fall through to text
78
+ console.error('Failed to read image from macOS clipboard:', imgError);
79
+ }
80
+ }
81
+ // If no image, try to read text from clipboard
82
+ try {
83
+ let clipboardText = '';
84
+ if (process.platform === 'win32') {
85
+ clipboardText = execSync('powershell -Command "Get-Clipboard"', {
86
+ encoding: 'utf-8',
87
+ timeout: 2000,
88
+ }).trim();
89
+ }
90
+ else if (process.platform === 'darwin') {
91
+ clipboardText = execSync('pbpaste', {
92
+ encoding: 'utf-8',
93
+ timeout: 2000,
94
+ }).trim();
95
+ }
96
+ else {
97
+ clipboardText = execSync('xclip -selection clipboard -o', {
98
+ encoding: 'utf-8',
99
+ timeout: 2000,
100
+ }).trim();
101
+ }
102
+ if (clipboardText) {
103
+ buffer.insert(clipboardText);
104
+ const fullText = buffer.getFullText();
105
+ const cursorPos = buffer.getCursorPosition();
106
+ updateCommandPanelState(fullText);
107
+ updateFilePickerState(fullText, cursorPos);
108
+ triggerUpdate();
109
+ }
110
+ }
111
+ catch (textError) {
112
+ console.error('Failed to read text from clipboard:', textError);
113
+ }
114
+ }
115
+ catch (error) {
116
+ console.error('Failed to read from clipboard:', error);
117
+ }
118
+ }, [buffer, updateCommandPanelState, updateFilePickerState, triggerUpdate]);
119
+ return { pasteFromClipboard };
120
+ }
@@ -0,0 +1,26 @@
1
+ import type { Message } from '../ui/components/MessageList.js';
2
+ import type { UsageInfo } from '../api/chat.js';
3
+ type CommandHandlerOptions = {
4
+ messages: Message[];
5
+ setMessages: React.Dispatch<React.SetStateAction<Message[]>>;
6
+ setRemountKey: React.Dispatch<React.SetStateAction<number>>;
7
+ clearSavedMessages: () => void;
8
+ setIsCompressing: React.Dispatch<React.SetStateAction<boolean>>;
9
+ setCompressionError: React.Dispatch<React.SetStateAction<string | null>>;
10
+ setShowSessionPanel: React.Dispatch<React.SetStateAction<boolean>>;
11
+ setShowMcpInfo: React.Dispatch<React.SetStateAction<boolean>>;
12
+ setShowMcpPanel: React.Dispatch<React.SetStateAction<boolean>>;
13
+ setMcpPanelKey: React.Dispatch<React.SetStateAction<number>>;
14
+ setYoloMode: React.Dispatch<React.SetStateAction<boolean>>;
15
+ setContextUsage: React.Dispatch<React.SetStateAction<UsageInfo | null>>;
16
+ setShouldIncludeSystemInfo: React.Dispatch<React.SetStateAction<boolean>>;
17
+ setVscodeConnectionStatus: React.Dispatch<React.SetStateAction<'disconnected' | 'connecting' | 'connected' | 'error'>>;
18
+ processMessage: (message: string, images?: Array<{
19
+ data: string;
20
+ mimeType: string;
21
+ }>, useBasicModel?: boolean, hideUserMessage?: boolean) => Promise<void>;
22
+ };
23
+ export declare function useCommandHandler(options: CommandHandlerOptions): {
24
+ handleCommandExecution: (commandName: string, result: any) => Promise<void>;
25
+ };
26
+ export {};
@@ -0,0 +1,158 @@
1
+ import { useStdout } from 'ink';
2
+ import { useCallback } from 'react';
3
+ import { sessionManager } from '../utils/sessionManager.js';
4
+ import { compressContext } from '../utils/contextCompressor.js';
5
+ import { navigateTo } from './useGlobalNavigation.js';
6
+ export function useCommandHandler(options) {
7
+ const { stdout } = useStdout();
8
+ const handleCommandExecution = useCallback(async (commandName, result) => {
9
+ // Handle /compact command
10
+ if (commandName === 'compact' &&
11
+ result.success &&
12
+ result.action === 'compact') {
13
+ // Set compressing state (不添加命令面板消息)
14
+ options.setIsCompressing(true);
15
+ options.setCompressionError(null);
16
+ try {
17
+ // Convert messages to ChatMessage format for compression
18
+ const chatMessages = options.messages
19
+ .filter(msg => msg.role !== 'command')
20
+ .map(msg => ({
21
+ role: msg.role,
22
+ content: msg.content,
23
+ tool_call_id: msg.toolCallId,
24
+ }));
25
+ // Compress the context
26
+ const result = await compressContext(chatMessages);
27
+ // Replace all messages with a summary message (不包含 "Context Compressed" 标题)
28
+ const summaryMessage = {
29
+ role: 'assistant',
30
+ content: result.summary,
31
+ streaming: false,
32
+ };
33
+ // Clear session and set new compressed state
34
+ sessionManager.clearCurrentSession();
35
+ options.clearSavedMessages();
36
+ options.setMessages([summaryMessage]);
37
+ options.setRemountKey(prev => prev + 1);
38
+ // Reset system info flag to include in next message
39
+ options.setShouldIncludeSystemInfo(true);
40
+ // Update token usage with compression result
41
+ options.setContextUsage({
42
+ prompt_tokens: result.usage.prompt_tokens,
43
+ completion_tokens: result.usage.completion_tokens,
44
+ total_tokens: result.usage.total_tokens,
45
+ });
46
+ }
47
+ catch (error) {
48
+ // Show error message
49
+ const errorMsg = error instanceof Error ? error.message : 'Unknown compression error';
50
+ options.setCompressionError(errorMsg);
51
+ const errorMessage = {
52
+ role: 'assistant',
53
+ content: `**Compression Failed**\n\n${errorMsg}`,
54
+ streaming: false,
55
+ };
56
+ options.setMessages(prev => [...prev, errorMessage]);
57
+ }
58
+ finally {
59
+ options.setIsCompressing(false);
60
+ }
61
+ return;
62
+ }
63
+ // Handle /ide command
64
+ if (commandName === 'ide') {
65
+ if (result.success) {
66
+ options.setVscodeConnectionStatus('connecting');
67
+ // Add command execution feedback
68
+ const commandMessage = {
69
+ role: 'command',
70
+ content: '',
71
+ commandName: commandName,
72
+ };
73
+ options.setMessages(prev => [...prev, commandMessage]);
74
+ }
75
+ else {
76
+ options.setVscodeConnectionStatus('error');
77
+ }
78
+ return;
79
+ }
80
+ if (result.success && result.action === 'clear') {
81
+ if (stdout && typeof stdout.write === 'function') {
82
+ stdout.write('\x1B[3J\x1B[2J\x1B[H');
83
+ }
84
+ // Clear current session and start new one
85
+ sessionManager.clearCurrentSession();
86
+ options.clearSavedMessages();
87
+ options.setMessages([]);
88
+ options.setRemountKey(prev => prev + 1);
89
+ // Reset context usage (token statistics)
90
+ options.setContextUsage(null);
91
+ // Reset system info flag to include in next message
92
+ options.setShouldIncludeSystemInfo(true);
93
+ // Note: yoloMode is preserved via localStorage (lines 68-76, 104-111)
94
+ // Note: VSCode connection is preserved and managed by vscodeConnection utility
95
+ // Add command execution feedback
96
+ const commandMessage = {
97
+ role: 'command',
98
+ content: '',
99
+ commandName: commandName,
100
+ };
101
+ options.setMessages([commandMessage]);
102
+ }
103
+ else if (result.success && result.action === 'showSessionPanel') {
104
+ options.setShowSessionPanel(true);
105
+ const commandMessage = {
106
+ role: 'command',
107
+ content: '',
108
+ commandName: commandName,
109
+ };
110
+ options.setMessages(prev => [...prev, commandMessage]);
111
+ }
112
+ else if (result.success && result.action === 'showMcpInfo') {
113
+ options.setShowMcpInfo(true);
114
+ options.setMcpPanelKey(prev => prev + 1);
115
+ const commandMessage = {
116
+ role: 'command',
117
+ content: '',
118
+ commandName: commandName,
119
+ };
120
+ options.setMessages(prev => [...prev, commandMessage]);
121
+ }
122
+ else if (result.success && result.action === 'showMcpPanel') {
123
+ options.setShowMcpPanel(true);
124
+ const commandMessage = {
125
+ role: 'command',
126
+ content: '',
127
+ commandName: commandName,
128
+ };
129
+ options.setMessages(prev => [...prev, commandMessage]);
130
+ }
131
+ else if (result.success && result.action === 'goHome') {
132
+ navigateTo('welcome');
133
+ }
134
+ else if (result.success && result.action === 'toggleYolo') {
135
+ options.setYoloMode(prev => !prev);
136
+ const commandMessage = {
137
+ role: 'command',
138
+ content: '',
139
+ commandName: commandName,
140
+ };
141
+ options.setMessages(prev => [...prev, commandMessage]);
142
+ }
143
+ else if (result.success &&
144
+ result.action === 'initProject' &&
145
+ result.prompt) {
146
+ // Add command execution feedback
147
+ const commandMessage = {
148
+ role: 'command',
149
+ content: '',
150
+ commandName: commandName,
151
+ };
152
+ options.setMessages(prev => [...prev, commandMessage]);
153
+ // Auto-send the prompt using basicModel, hide the prompt from UI
154
+ options.processMessage(result.prompt, undefined, true, true);
155
+ }
156
+ }, [stdout, options]);
157
+ return { handleCommandExecution };
158
+ }
@@ -0,0 +1,16 @@
1
+ import { TextBuffer } from '../utils/textBuffer.js';
2
+ export declare function useCommandPanel(buffer: TextBuffer): {
3
+ showCommands: boolean;
4
+ setShowCommands: import("react").Dispatch<import("react").SetStateAction<boolean>>;
5
+ commandSelectedIndex: number;
6
+ setCommandSelectedIndex: import("react").Dispatch<import("react").SetStateAction<number>>;
7
+ getFilteredCommands: () => {
8
+ name: string;
9
+ description: string;
10
+ }[];
11
+ updateCommandPanelState: (text: string) => void;
12
+ commands: {
13
+ name: string;
14
+ description: string;
15
+ }[];
16
+ };