snow-ai 0.2.23 → 0.2.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/dist/agents/compactAgent.d.ts +55 -0
  2. package/dist/agents/compactAgent.js +301 -0
  3. package/dist/api/chat.d.ts +0 -8
  4. package/dist/api/chat.js +1 -144
  5. package/dist/api/responses.d.ts +0 -11
  6. package/dist/api/responses.js +1 -189
  7. package/dist/api/systemPrompt.d.ts +1 -1
  8. package/dist/api/systemPrompt.js +80 -206
  9. package/dist/app.d.ts +2 -1
  10. package/dist/app.js +11 -13
  11. package/dist/cli.js +23 -3
  12. package/dist/hooks/useConversation.js +51 -7
  13. package/dist/hooks/useGlobalNavigation.d.ts +1 -1
  14. package/dist/hooks/useKeyboardInput.js +14 -8
  15. package/dist/mcp/filesystem.d.ts +49 -6
  16. package/dist/mcp/filesystem.js +243 -86
  17. package/dist/mcp/websearch.d.ts +118 -0
  18. package/dist/mcp/websearch.js +451 -0
  19. package/dist/ui/components/ToolResultPreview.js +60 -1
  20. package/dist/ui/pages/ChatScreen.d.ts +4 -2
  21. package/dist/ui/pages/ChatScreen.js +62 -14
  22. package/dist/ui/pages/{ApiConfigScreen.d.ts → ConfigScreen.d.ts} +1 -1
  23. package/dist/ui/pages/ConfigScreen.js +549 -0
  24. package/dist/ui/pages/{ModelConfigScreen.d.ts → ProxyConfigScreen.d.ts} +1 -1
  25. package/dist/ui/pages/ProxyConfigScreen.js +143 -0
  26. package/dist/ui/pages/WelcomeScreen.js +15 -15
  27. package/dist/utils/apiConfig.d.ts +8 -2
  28. package/dist/utils/apiConfig.js +21 -0
  29. package/dist/utils/commandExecutor.d.ts +1 -1
  30. package/dist/utils/contextCompressor.js +363 -49
  31. package/dist/utils/mcpToolsManager.d.ts +1 -1
  32. package/dist/utils/mcpToolsManager.js +106 -6
  33. package/dist/utils/resourceMonitor.d.ts +65 -0
  34. package/dist/utils/resourceMonitor.js +175 -0
  35. package/dist/utils/retryUtils.js +6 -0
  36. package/dist/utils/sessionManager.d.ts +1 -0
  37. package/dist/utils/sessionManager.js +10 -0
  38. package/dist/utils/textBuffer.js +7 -2
  39. package/dist/utils/toolExecutor.d.ts +2 -2
  40. package/dist/utils/toolExecutor.js +4 -4
  41. package/package.json +5 -1
  42. package/dist/ui/pages/ApiConfigScreen.js +0 -161
  43. package/dist/ui/pages/ModelConfigScreen.js +0 -467
@@ -0,0 +1,55 @@
1
+ /**
2
+ * Compact Agent Service
3
+ *
4
+ * Provides lightweight AI agent capabilities using the basic model.
5
+ * This service operates independently from the main conversation flow
6
+ * but follows the EXACT same configuration and routing as the main flow:
7
+ * - API endpoint (baseUrl)
8
+ * - Authentication (apiKey)
9
+ * - Custom headers
10
+ * - Request method (chat, responses, gemini, anthropic)
11
+ * - Uses basicModel instead of advancedModel
12
+ *
13
+ * All requests go through streaming APIs and are intercepted to assemble
14
+ * the complete response, ensuring complete consistency with main flow.
15
+ *
16
+ * Use cases:
17
+ * - Content preprocessing for web pages
18
+ * - Information extraction from large documents
19
+ * - Quick analysis tasks that don't require the main model
20
+ */
21
+ export declare class CompactAgent {
22
+ private modelName;
23
+ private requestMethod;
24
+ private initialized;
25
+ /**
26
+ * Initialize the compact agent with current configuration
27
+ * @returns true if initialized successfully, false otherwise
28
+ */
29
+ private initialize;
30
+ /**
31
+ * Check if compact agent is available
32
+ */
33
+ isAvailable(): Promise<boolean>;
34
+ /**
35
+ * Call the compact model with the same routing as main flow
36
+ * Uses streaming APIs and intercepts to assemble complete response
37
+ * This ensures 100% consistency with main flow routing
38
+ * @param messages - Chat messages
39
+ * @param abortSignal - Optional abort signal to cancel the request
40
+ * @param onTokenUpdate - Optional callback to update token count during streaming
41
+ */
42
+ private callCompactModel;
43
+ /**
44
+ * Extract key information from web page content based on user query
45
+ *
46
+ * @param content - Full web page content
47
+ * @param userQuery - User's original question/query
48
+ * @param url - URL of the web page (for context)
49
+ * @param abortSignal - Optional abort signal to cancel extraction
50
+ * @param onTokenUpdate - Optional callback to update token count during streaming
51
+ * @returns Extracted key information relevant to the query
52
+ */
53
+ extractWebPageContent(content: string, userQuery: string, url: string, abortSignal?: AbortSignal, onTokenUpdate?: (tokenCount: number) => void): Promise<string>;
54
+ }
55
+ export declare const compactAgent: CompactAgent;
@@ -0,0 +1,301 @@
1
+ import { getOpenAiConfig } from '../utils/apiConfig.js';
2
+ import { logger } from '../utils/logger.js';
3
+ import { createStreamingChatCompletion } from '../api/chat.js';
4
+ import { createStreamingResponse } from '../api/responses.js';
5
+ import { createStreamingGeminiCompletion } from '../api/gemini.js';
6
+ import { createStreamingAnthropicCompletion } from '../api/anthropic.js';
7
+ /**
8
+ * Compact Agent Service
9
+ *
10
+ * Provides lightweight AI agent capabilities using the basic model.
11
+ * This service operates independently from the main conversation flow
12
+ * but follows the EXACT same configuration and routing as the main flow:
13
+ * - API endpoint (baseUrl)
14
+ * - Authentication (apiKey)
15
+ * - Custom headers
16
+ * - Request method (chat, responses, gemini, anthropic)
17
+ * - Uses basicModel instead of advancedModel
18
+ *
19
+ * All requests go through streaming APIs and are intercepted to assemble
20
+ * the complete response, ensuring complete consistency with main flow.
21
+ *
22
+ * Use cases:
23
+ * - Content preprocessing for web pages
24
+ * - Information extraction from large documents
25
+ * - Quick analysis tasks that don't require the main model
26
+ */
27
+ export class CompactAgent {
28
+ constructor() {
29
+ Object.defineProperty(this, "modelName", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: ''
34
+ });
35
+ Object.defineProperty(this, "requestMethod", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: 'chat'
40
+ });
41
+ Object.defineProperty(this, "initialized", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: false
46
+ });
47
+ }
48
+ /**
49
+ * Initialize the compact agent with current configuration
50
+ * @returns true if initialized successfully, false otherwise
51
+ */
52
+ async initialize() {
53
+ try {
54
+ const config = getOpenAiConfig();
55
+ // Check if basic model is configured
56
+ if (!config.basicModel) {
57
+ return false;
58
+ }
59
+ this.modelName = config.basicModel;
60
+ this.requestMethod = config.requestMethod; // Follow main flow's request method
61
+ this.initialized = true;
62
+ return true;
63
+ }
64
+ catch (error) {
65
+ logger.warn('Failed to initialize compact agent:', error);
66
+ return false;
67
+ }
68
+ }
69
+ /**
70
+ * Check if compact agent is available
71
+ */
72
+ async isAvailable() {
73
+ if (!this.initialized) {
74
+ return await this.initialize();
75
+ }
76
+ return true;
77
+ }
78
+ /**
79
+ * Call the compact model with the same routing as main flow
80
+ * Uses streaming APIs and intercepts to assemble complete response
81
+ * This ensures 100% consistency with main flow routing
82
+ * @param messages - Chat messages
83
+ * @param abortSignal - Optional abort signal to cancel the request
84
+ * @param onTokenUpdate - Optional callback to update token count during streaming
85
+ */
86
+ async callCompactModel(messages, abortSignal, onTokenUpdate) {
87
+ const config = getOpenAiConfig();
88
+ if (!config.basicModel) {
89
+ throw new Error('Basic model not configured');
90
+ }
91
+ // Temporarily override advancedModel with basicModel
92
+ const originalAdvancedModel = config.advancedModel;
93
+ try {
94
+ // Override config to use basicModel
95
+ config.advancedModel = config.basicModel;
96
+ let streamGenerator;
97
+ // Route to appropriate streaming API based on request method (follows main flow exactly)
98
+ switch (this.requestMethod) {
99
+ case 'anthropic':
100
+ streamGenerator = createStreamingAnthropicCompletion({
101
+ model: this.modelName,
102
+ messages,
103
+ max_tokens: 4096,
104
+ }, abortSignal);
105
+ break;
106
+ case 'gemini':
107
+ streamGenerator = createStreamingGeminiCompletion({
108
+ model: this.modelName,
109
+ messages
110
+ }, abortSignal);
111
+ break;
112
+ case 'responses':
113
+ streamGenerator = createStreamingResponse({
114
+ model: this.modelName,
115
+ messages,
116
+ stream: true
117
+ }, abortSignal);
118
+ break;
119
+ case 'chat':
120
+ default:
121
+ streamGenerator = createStreamingChatCompletion({
122
+ model: this.modelName,
123
+ messages,
124
+ stream: true
125
+ }, abortSignal);
126
+ break;
127
+ }
128
+ // Intercept streaming response and assemble complete content
129
+ let completeContent = '';
130
+ let chunkCount = 0;
131
+ // Initialize token encoder for token counting
132
+ let encoder;
133
+ try {
134
+ const { encoding_for_model } = await import('tiktoken');
135
+ encoder = encoding_for_model('gpt-5');
136
+ }
137
+ catch (e) {
138
+ const { encoding_for_model } = await import('tiktoken');
139
+ encoder = encoding_for_model('gpt-5');
140
+ }
141
+ try {
142
+ for await (const chunk of streamGenerator) {
143
+ chunkCount++;
144
+ // Check abort signal
145
+ if (abortSignal?.aborted) {
146
+ throw new Error('Request aborted');
147
+ }
148
+ // Handle different chunk formats based on request method
149
+ if (this.requestMethod === 'chat') {
150
+ // Chat API uses standard OpenAI format: {choices: [{delta: {content}}]}
151
+ if (chunk.choices && chunk.choices[0]?.delta?.content) {
152
+ completeContent += chunk.choices[0].delta.content;
153
+ // Update token count if callback provided
154
+ if (onTokenUpdate && encoder) {
155
+ try {
156
+ const tokens = encoder.encode(completeContent);
157
+ onTokenUpdate(tokens.length);
158
+ }
159
+ catch (e) {
160
+ // Ignore encoding errors
161
+ }
162
+ }
163
+ }
164
+ }
165
+ else {
166
+ // Responses, Gemini, and Anthropic APIs all use: {type: 'content', content: string}
167
+ if (chunk.type === 'content' && chunk.content) {
168
+ completeContent += chunk.content;
169
+ // Update token count if callback provided
170
+ if (onTokenUpdate && encoder) {
171
+ try {
172
+ const tokens = encoder.encode(completeContent);
173
+ onTokenUpdate(tokens.length);
174
+ }
175
+ catch (e) {
176
+ // Ignore encoding errors
177
+ }
178
+ }
179
+ }
180
+ }
181
+ }
182
+ }
183
+ catch (streamError) {
184
+ // Log streaming error with details
185
+ if (streamError instanceof Error) {
186
+ logger.error('Compact agent: Streaming error:', {
187
+ error: streamError.message,
188
+ stack: streamError.stack,
189
+ name: streamError.name,
190
+ chunkCount,
191
+ contentLength: completeContent.length
192
+ });
193
+ }
194
+ else {
195
+ logger.error('Compact agent: Unknown streaming error:', {
196
+ error: streamError,
197
+ chunkCount,
198
+ contentLength: completeContent.length
199
+ });
200
+ }
201
+ throw streamError;
202
+ }
203
+ finally {
204
+ // Free encoder
205
+ if (encoder) {
206
+ encoder.free();
207
+ }
208
+ }
209
+ return completeContent;
210
+ }
211
+ catch (error) {
212
+ // Log detailed error from API call setup or streaming
213
+ if (error instanceof Error) {
214
+ logger.error('Compact agent: API call failed:', {
215
+ error: error.message,
216
+ stack: error.stack,
217
+ name: error.name,
218
+ requestMethod: this.requestMethod,
219
+ modelName: this.modelName
220
+ });
221
+ }
222
+ else {
223
+ logger.error('Compact agent: Unknown API error:', {
224
+ error,
225
+ requestMethod: this.requestMethod,
226
+ modelName: this.modelName
227
+ });
228
+ }
229
+ throw error;
230
+ }
231
+ finally {
232
+ // Restore original config
233
+ config.advancedModel = originalAdvancedModel;
234
+ }
235
+ }
236
+ /**
237
+ * Extract key information from web page content based on user query
238
+ *
239
+ * @param content - Full web page content
240
+ * @param userQuery - User's original question/query
241
+ * @param url - URL of the web page (for context)
242
+ * @param abortSignal - Optional abort signal to cancel extraction
243
+ * @param onTokenUpdate - Optional callback to update token count during streaming
244
+ * @returns Extracted key information relevant to the query
245
+ */
246
+ async extractWebPageContent(content, userQuery, url, abortSignal, onTokenUpdate) {
247
+ const available = await this.isAvailable();
248
+ if (!available) {
249
+ // If compact agent is not available, return original content
250
+ return content;
251
+ }
252
+ try {
253
+ const extractionPrompt = `You are a content extraction assistant. Your task is to extract and summarize the most relevant information from a web page based on the user's query.
254
+
255
+ User's Query: ${userQuery}
256
+
257
+ Web Page URL: ${url}
258
+
259
+ Web Page Content:
260
+ ${content}
261
+
262
+ Instructions:
263
+ 1. Extract ONLY the information that is directly relevant to the user's query
264
+ 2. Preserve important details, facts, code examples, and key points
265
+ 3. Remove navigation, ads, irrelevant sections, and boilerplate text
266
+ 4. Organize the information in a clear, structured format
267
+ 5. If there are multiple relevant sections, separate them clearly
268
+ 6. Keep technical terms and specific details intact
269
+
270
+ Provide the extracted content below:`;
271
+ const messages = [
272
+ {
273
+ role: 'user',
274
+ content: extractionPrompt,
275
+ },
276
+ ];
277
+ const extractedContent = await this.callCompactModel(messages, abortSignal, onTokenUpdate);
278
+ if (!extractedContent || extractedContent.trim().length === 0) {
279
+ logger.warn('Compact agent returned empty response, using original content');
280
+ return content;
281
+ }
282
+ return extractedContent;
283
+ }
284
+ catch (error) {
285
+ // Log detailed error information
286
+ if (error instanceof Error) {
287
+ logger.warn('Compact agent extraction failed, using original content:', {
288
+ error: error.message,
289
+ stack: error.stack,
290
+ name: error.name
291
+ });
292
+ }
293
+ else {
294
+ logger.warn('Compact agent extraction failed with unknown error:', error);
295
+ }
296
+ return content;
297
+ }
298
+ }
299
+ }
300
+ // Export singleton instance
301
+ export const compactAgent = new CompactAgent();
@@ -57,14 +57,6 @@ export interface ChatCompletionChunk {
57
57
  }>;
58
58
  }
59
59
  export declare function resetOpenAIClient(): void;
60
- /**
61
- * Create chat completion with automatic function calling support
62
- */
63
- export declare function createChatCompletionWithTools(options: ChatCompletionOptions, maxToolRounds?: number, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): Promise<{
64
- content: string;
65
- toolCalls: ToolCall[];
66
- }>;
67
- export declare function createChatCompletion(options: ChatCompletionOptions, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): Promise<string>;
68
60
  export interface UsageInfo {
69
61
  prompt_tokens: number;
70
62
  completion_tokens: number;
package/dist/api/chat.js CHANGED
@@ -1,8 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders } from '../utils/apiConfig.js';
3
- import { executeMCPTool } from '../utils/mcpToolsManager.js';
4
3
  import { SYSTEM_PROMPT } from './systemPrompt.js';
5
- import { withRetry, withRetryGenerator } from '../utils/retryUtils.js';
4
+ import { withRetryGenerator } from '../utils/retryUtils.js';
6
5
  /**
7
6
  * Convert our ChatMessage format to OpenAI's ChatCompletionMessageParam format
8
7
  * Automatically prepends system prompt if not present
@@ -112,148 +111,6 @@ function getOpenAIClient() {
112
111
  export function resetOpenAIClient() {
113
112
  openaiClient = null;
114
113
  }
115
- /**
116
- * Create chat completion with automatic function calling support
117
- */
118
- export async function createChatCompletionWithTools(options, maxToolRounds = 5, abortSignal, onRetry) {
119
- const client = getOpenAIClient();
120
- let messages = [...options.messages];
121
- let allToolCalls = [];
122
- let rounds = 0;
123
- try {
124
- while (rounds < maxToolRounds) {
125
- const response = await withRetry(() => client.chat.completions.create({
126
- model: options.model,
127
- messages: convertToOpenAIMessages(messages),
128
- stream: false,
129
- temperature: options.temperature || 0.7,
130
- max_tokens: options.max_tokens,
131
- tools: options.tools,
132
- tool_choice: options.tool_choice,
133
- }), {
134
- abortSignal,
135
- onRetry
136
- });
137
- const message = response.choices[0]?.message;
138
- if (!message) {
139
- throw new Error('No response from AI');
140
- }
141
- // Add assistant message to conversation
142
- messages.push({
143
- role: 'assistant',
144
- content: message.content || '',
145
- tool_calls: message.tool_calls
146
- });
147
- // Check if AI wants to call tools
148
- if (message.tool_calls && message.tool_calls.length > 0) {
149
- allToolCalls.push(...message.tool_calls);
150
- // Execute each tool call
151
- for (const toolCall of message.tool_calls) {
152
- if (toolCall.type === 'function') {
153
- try {
154
- const args = JSON.parse(toolCall.function.arguments);
155
- const result = await executeMCPTool(toolCall.function.name, args);
156
- // Add tool result to conversation
157
- messages.push({
158
- role: 'tool',
159
- content: JSON.stringify(result),
160
- tool_call_id: toolCall.id
161
- });
162
- }
163
- catch (error) {
164
- // Add error result to conversation
165
- messages.push({
166
- role: 'tool',
167
- content: `Error: ${error instanceof Error ? error.message : 'Tool execution failed'}`,
168
- tool_call_id: toolCall.id
169
- });
170
- }
171
- }
172
- }
173
- rounds++;
174
- continue;
175
- }
176
- // No tool calls, return the content
177
- return {
178
- content: message.content || '',
179
- toolCalls: allToolCalls
180
- };
181
- }
182
- throw new Error(`Maximum tool calling rounds (${maxToolRounds}) exceeded`);
183
- }
184
- catch (error) {
185
- if (error instanceof Error) {
186
- throw new Error(`Chat completion with tools failed: ${error.message}`);
187
- }
188
- throw new Error('Chat completion with tools failed: Unknown error');
189
- }
190
- }
191
- export async function createChatCompletion(options, abortSignal, onRetry) {
192
- const client = getOpenAIClient();
193
- let messages = [...options.messages];
194
- try {
195
- while (true) {
196
- const response = await withRetry(() => client.chat.completions.create({
197
- model: options.model,
198
- messages: convertToOpenAIMessages(messages),
199
- stream: false,
200
- temperature: options.temperature || 0.7,
201
- max_tokens: options.max_tokens,
202
- tools: options.tools,
203
- tool_choice: options.tool_choice,
204
- }), {
205
- abortSignal,
206
- onRetry
207
- });
208
- const message = response.choices[0]?.message;
209
- if (!message) {
210
- throw new Error('No response from AI');
211
- }
212
- // Add assistant message to conversation
213
- messages.push({
214
- role: 'assistant',
215
- content: message.content || '',
216
- tool_calls: message.tool_calls
217
- });
218
- // Check if AI wants to call tools
219
- if (message.tool_calls && message.tool_calls.length > 0) {
220
- // Execute each tool call
221
- for (const toolCall of message.tool_calls) {
222
- if (toolCall.type === 'function') {
223
- try {
224
- const args = JSON.parse(toolCall.function.arguments);
225
- const result = await executeMCPTool(toolCall.function.name, args);
226
- // Add tool result to conversation
227
- messages.push({
228
- role: 'tool',
229
- content: JSON.stringify(result),
230
- tool_call_id: toolCall.id
231
- });
232
- }
233
- catch (error) {
234
- // Add error result to conversation
235
- messages.push({
236
- role: 'tool',
237
- content: `Error: ${error instanceof Error ? error.message : 'Tool execution failed'}`,
238
- tool_call_id: toolCall.id
239
- });
240
- }
241
- }
242
- }
243
- // Continue the conversation with tool results
244
- continue;
245
- }
246
- // No tool calls, return the content
247
- return message.content || '';
248
- }
249
- }
250
- catch (error) {
251
- if (error instanceof Error) {
252
- throw new Error(`Chat completion failed: ${error.message}`);
253
- }
254
- throw new Error('Chat completion failed: Unknown error');
255
- }
256
- }
257
114
  /**
258
115
  * Simple streaming chat completion - only handles OpenAI interaction
259
116
  * Tool execution should be handled by the caller
@@ -38,18 +38,7 @@ export interface ResponseStreamChunk {
38
38
  usage?: UsageInfo;
39
39
  }
40
40
  export declare function resetOpenAIClient(): void;
41
- /**
42
- * 使用 Responses API 创建响应(非流式,带自动工具调用)
43
- */
44
- export declare function createResponse(options: ResponseOptions, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): Promise<string>;
45
41
  /**
46
42
  * 使用 Responses API 创建流式响应(带自动工具调用)
47
43
  */
48
44
  export declare function createStreamingResponse(options: ResponseOptions, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): AsyncGenerator<ResponseStreamChunk, void, unknown>;
49
- /**
50
- * 使用 Responses API 创建响应(限制工具调用轮数)
51
- */
52
- export declare function createResponseWithTools(options: ResponseOptions, maxToolRounds?: number, abortSignal?: AbortSignal, onRetry?: (error: Error, attempt: number, nextDelay: number) => void): Promise<{
53
- content: string;
54
- toolCalls: ToolCall[];
55
- }>;