snow-ai 0.3.6 → 0.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/dist/agents/reviewAgent.d.ts +50 -0
  2. package/dist/agents/reviewAgent.js +264 -0
  3. package/dist/api/anthropic.js +104 -71
  4. package/dist/api/chat.d.ts +1 -1
  5. package/dist/api/chat.js +60 -41
  6. package/dist/api/gemini.js +97 -57
  7. package/dist/api/responses.d.ts +9 -1
  8. package/dist/api/responses.js +110 -70
  9. package/dist/api/systemPrompt.d.ts +1 -1
  10. package/dist/api/systemPrompt.js +36 -7
  11. package/dist/api/types.d.ts +8 -0
  12. package/dist/hooks/useCommandHandler.d.ts +1 -0
  13. package/dist/hooks/useCommandHandler.js +44 -1
  14. package/dist/hooks/useCommandPanel.js +13 -0
  15. package/dist/hooks/useConversation.d.ts +4 -1
  16. package/dist/hooks/useConversation.js +48 -6
  17. package/dist/hooks/useKeyboardInput.js +19 -0
  18. package/dist/hooks/useTerminalFocus.js +13 -3
  19. package/dist/mcp/aceCodeSearch.d.ts +2 -76
  20. package/dist/mcp/aceCodeSearch.js +31 -467
  21. package/dist/mcp/bash.d.ts +1 -8
  22. package/dist/mcp/bash.js +20 -40
  23. package/dist/mcp/filesystem.d.ts +3 -68
  24. package/dist/mcp/filesystem.js +32 -348
  25. package/dist/mcp/ideDiagnostics.js +2 -4
  26. package/dist/mcp/todo.d.ts +1 -17
  27. package/dist/mcp/todo.js +11 -15
  28. package/dist/mcp/types/aceCodeSearch.types.d.ts +92 -0
  29. package/dist/mcp/types/aceCodeSearch.types.js +4 -0
  30. package/dist/mcp/types/bash.types.d.ts +13 -0
  31. package/dist/mcp/types/bash.types.js +4 -0
  32. package/dist/mcp/types/filesystem.types.d.ts +44 -0
  33. package/dist/mcp/types/filesystem.types.js +4 -0
  34. package/dist/mcp/types/todo.types.d.ts +27 -0
  35. package/dist/mcp/types/todo.types.js +4 -0
  36. package/dist/mcp/types/websearch.types.d.ts +30 -0
  37. package/dist/mcp/types/websearch.types.js +4 -0
  38. package/dist/mcp/utils/aceCodeSearch/filesystem.utils.d.ts +34 -0
  39. package/dist/mcp/utils/aceCodeSearch/filesystem.utils.js +146 -0
  40. package/dist/mcp/utils/aceCodeSearch/language.utils.d.ts +14 -0
  41. package/dist/mcp/utils/aceCodeSearch/language.utils.js +99 -0
  42. package/dist/mcp/utils/aceCodeSearch/search.utils.d.ts +31 -0
  43. package/dist/mcp/utils/aceCodeSearch/search.utils.js +136 -0
  44. package/dist/mcp/utils/aceCodeSearch/symbol.utils.d.ts +20 -0
  45. package/dist/mcp/utils/aceCodeSearch/symbol.utils.js +141 -0
  46. package/dist/mcp/utils/bash/security.utils.d.ts +20 -0
  47. package/dist/mcp/utils/bash/security.utils.js +34 -0
  48. package/dist/mcp/utils/filesystem/code-analysis.utils.d.ts +18 -0
  49. package/dist/mcp/utils/filesystem/code-analysis.utils.js +165 -0
  50. package/dist/mcp/utils/filesystem/match-finder.utils.d.ts +16 -0
  51. package/dist/mcp/utils/filesystem/match-finder.utils.js +85 -0
  52. package/dist/mcp/utils/filesystem/similarity.utils.d.ts +22 -0
  53. package/dist/mcp/utils/filesystem/similarity.utils.js +75 -0
  54. package/dist/mcp/utils/todo/date.utils.d.ts +9 -0
  55. package/dist/mcp/utils/todo/date.utils.js +14 -0
  56. package/dist/mcp/utils/websearch/browser.utils.d.ts +8 -0
  57. package/dist/mcp/utils/websearch/browser.utils.js +58 -0
  58. package/dist/mcp/utils/websearch/text.utils.d.ts +16 -0
  59. package/dist/mcp/utils/websearch/text.utils.js +39 -0
  60. package/dist/mcp/websearch.d.ts +1 -31
  61. package/dist/mcp/websearch.js +21 -97
  62. package/dist/ui/components/ChatInput.d.ts +2 -1
  63. package/dist/ui/components/ChatInput.js +10 -3
  64. package/dist/ui/components/MarkdownRenderer.d.ts +1 -2
  65. package/dist/ui/components/MarkdownRenderer.js +16 -153
  66. package/dist/ui/components/MessageList.js +4 -4
  67. package/dist/ui/components/SessionListScreen.js +37 -17
  68. package/dist/ui/components/ToolResultPreview.js +6 -6
  69. package/dist/ui/components/UsagePanel.d.ts +2 -0
  70. package/dist/ui/components/UsagePanel.js +360 -0
  71. package/dist/ui/pages/ChatScreen.d.ts +4 -0
  72. package/dist/ui/pages/ChatScreen.js +70 -30
  73. package/dist/ui/pages/ConfigScreen.js +23 -19
  74. package/dist/ui/pages/HeadlessModeScreen.js +2 -4
  75. package/dist/ui/pages/SubAgentConfigScreen.js +17 -17
  76. package/dist/ui/pages/SystemPromptConfigScreen.js +7 -6
  77. package/dist/utils/commandExecutor.d.ts +3 -3
  78. package/dist/utils/commandExecutor.js +4 -4
  79. package/dist/utils/commands/home.d.ts +2 -0
  80. package/dist/utils/commands/home.js +12 -0
  81. package/dist/utils/commands/review.d.ts +2 -0
  82. package/dist/utils/commands/review.js +81 -0
  83. package/dist/utils/commands/role.d.ts +2 -0
  84. package/dist/utils/commands/role.js +37 -0
  85. package/dist/utils/commands/usage.d.ts +2 -0
  86. package/dist/utils/commands/usage.js +12 -0
  87. package/dist/utils/contextCompressor.js +99 -367
  88. package/dist/utils/fileUtils.js +3 -3
  89. package/dist/utils/mcpToolsManager.js +12 -12
  90. package/dist/utils/proxyUtils.d.ts +15 -0
  91. package/dist/utils/proxyUtils.js +50 -0
  92. package/dist/utils/retryUtils.d.ts +27 -0
  93. package/dist/utils/retryUtils.js +114 -2
  94. package/dist/utils/sessionManager.d.ts +2 -5
  95. package/dist/utils/sessionManager.js +16 -83
  96. package/dist/utils/terminal.js +4 -3
  97. package/dist/utils/usageLogger.d.ts +11 -0
  98. package/dist/utils/usageLogger.js +99 -0
  99. package/package.json +3 -7
  100. package/dist/agents/summaryAgent.d.ts +0 -31
  101. package/dist/agents/summaryAgent.js +0 -256
@@ -1,5 +1,5 @@
1
1
  import type { ChatMessage, ChatCompletionTool, ToolCall, UsageInfo, ImageContent } from './types.js';
2
- export type { ChatMessage, ChatCompletionTool, ToolCall, UsageInfo, ImageContent };
2
+ export type { ChatMessage, ChatCompletionTool, ToolCall, UsageInfo, ImageContent, };
3
3
  export interface ChatCompletionOptions {
4
4
  model: string;
5
5
  messages: ChatMessage[];
package/dist/api/chat.js CHANGED
@@ -1,6 +1,8 @@
1
- import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders } from '../utils/apiConfig.js';
2
- import { SYSTEM_PROMPT } from './systemPrompt.js';
3
- import { withRetryGenerator } from '../utils/retryUtils.js';
1
+ import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders, } from '../utils/apiConfig.js';
2
+ import { getSystemPrompt } from './systemPrompt.js';
3
+ import { withRetryGenerator, parseJsonWithFix } from '../utils/retryUtils.js';
4
+ import { addProxyToFetchOptions } from '../utils/proxyUtils.js';
5
+ import { saveUsageToFile } from '../utils/usageLogger.js';
4
6
  /**
5
7
  * Convert our ChatMessage format to OpenAI's ChatCompletionMessageParam format
6
8
  * Automatically prepends system prompt if not present
@@ -18,7 +20,7 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
18
20
  if (msg.content) {
19
21
  contentParts.push({
20
22
  type: 'text',
21
- text: msg.content
23
+ text: msg.content,
22
24
  });
23
25
  }
24
26
  // 添加图片内容
@@ -26,30 +28,30 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
26
28
  contentParts.push({
27
29
  type: 'image_url',
28
30
  image_url: {
29
- url: image.data // Base64 data URL
30
- }
31
+ url: image.data, // Base64 data URL
32
+ },
31
33
  });
32
34
  }
33
35
  return {
34
36
  role: 'user',
35
- content: contentParts
37
+ content: contentParts,
36
38
  };
37
39
  }
38
40
  const baseMessage = {
39
41
  role: msg.role,
40
- content: msg.content
42
+ content: msg.content,
41
43
  };
42
44
  if (msg.role === 'assistant' && msg.tool_calls) {
43
45
  return {
44
46
  ...baseMessage,
45
- tool_calls: msg.tool_calls
47
+ tool_calls: msg.tool_calls,
46
48
  };
47
49
  }
48
50
  if (msg.role === 'tool' && msg.tool_call_id) {
49
51
  return {
50
52
  role: 'tool',
51
53
  content: msg.content,
52
- tool_call_id: msg.tool_call_id
54
+ tool_call_id: msg.tool_call_id,
53
55
  };
54
56
  }
55
57
  return baseMessage;
@@ -66,13 +68,13 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
66
68
  result = [
67
69
  {
68
70
  role: 'system',
69
- content: customSystemPrompt
71
+ content: customSystemPrompt,
70
72
  },
71
73
  {
72
74
  role: 'user',
73
- content: SYSTEM_PROMPT
75
+ content: getSystemPrompt(),
74
76
  },
75
- ...result
77
+ ...result,
76
78
  ];
77
79
  }
78
80
  else {
@@ -80,9 +82,9 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
80
82
  result = [
81
83
  {
82
84
  role: 'system',
83
- content: SYSTEM_PROMPT
85
+ content: getSystemPrompt(),
84
86
  },
85
- ...result
87
+ ...result,
86
88
  ];
87
89
  }
88
90
  }
@@ -99,7 +101,7 @@ function getOpenAIConfig() {
99
101
  openaiConfig = {
100
102
  apiKey: config.apiKey,
101
103
  baseUrl: config.baseUrl,
102
- customHeaders
104
+ customHeaders,
103
105
  };
104
106
  }
105
107
  return openaiConfig;
@@ -124,16 +126,26 @@ async function* parseSSEStream(reader) {
124
126
  const trimmed = line.trim();
125
127
  if (!trimmed || trimmed.startsWith(':'))
126
128
  continue;
127
- if (trimmed === 'data: [DONE]') {
129
+ if (trimmed === 'data: [DONE]' || trimmed === 'data:[DONE]') {
128
130
  return;
129
131
  }
130
- if (trimmed.startsWith('data: ')) {
131
- const data = trimmed.slice(6);
132
- try {
133
- yield JSON.parse(data);
134
- }
135
- catch (e) {
136
- console.error('Failed to parse SSE data:', data);
132
+ // Handle both "event: " and "event:" formats
133
+ if (trimmed.startsWith('event:')) {
134
+ // Event type, will be followed by data
135
+ continue;
136
+ }
137
+ // Handle both "data: " and "data:" formats
138
+ if (trimmed.startsWith('data:')) {
139
+ const data = trimmed.startsWith('data: ')
140
+ ? trimmed.slice(6)
141
+ : trimmed.slice(5);
142
+ const parseResult = parseJsonWithFix(data, {
143
+ toolName: 'SSE stream',
144
+ logWarning: false,
145
+ logError: true,
146
+ });
147
+ if (parseResult.success) {
148
+ yield parseResult.data;
137
149
  }
138
150
  }
139
151
  }
@@ -157,16 +169,18 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
157
169
  tools: options.tools,
158
170
  tool_choice: options.tool_choice,
159
171
  };
160
- const response = await fetch(`${config.baseUrl}/chat/completions`, {
172
+ const url = `${config.baseUrl}/chat/completions`;
173
+ const fetchOptions = addProxyToFetchOptions(url, {
161
174
  method: 'POST',
162
175
  headers: {
163
176
  'Content-Type': 'application/json',
164
177
  'Authorization': `Bearer ${config.apiKey}`,
165
- ...config.customHeaders
178
+ ...config.customHeaders,
166
179
  },
167
180
  body: JSON.stringify(requestBody),
168
- signal: abortSignal
181
+ signal: abortSignal,
169
182
  });
183
+ const response = await fetch(url, fetchOptions);
170
184
  if (!response.ok) {
171
185
  const errorText = await response.text();
172
186
  throw new Error(`OpenAI API error: ${response.status} ${response.statusText} - ${errorText}`);
@@ -191,7 +205,7 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
191
205
  completion_tokens: usageValue.completion_tokens || 0,
192
206
  total_tokens: usageValue.total_tokens || 0,
193
207
  // OpenAI Chat API: cached_tokens in prompt_tokens_details
194
- cached_tokens: usageValue.prompt_tokens_details?.cached_tokens
208
+ cached_tokens: usageValue.prompt_tokens_details?.cached_tokens,
195
209
  };
196
210
  }
197
211
  // Skip content processing if no choices (but usage is already captured above)
@@ -205,7 +219,7 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
205
219
  contentBuffer += content;
206
220
  yield {
207
221
  type: 'content',
208
- content
222
+ content,
209
223
  };
210
224
  }
211
225
  // Stream reasoning content (for o1 models, etc.)
@@ -216,12 +230,12 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
216
230
  if (!reasoningStarted) {
217
231
  reasoningStarted = true;
218
232
  yield {
219
- type: 'reasoning_started'
233
+ type: 'reasoning_started',
220
234
  };
221
235
  }
222
236
  yield {
223
237
  type: 'reasoning_delta',
224
- delta: reasoningContent
238
+ delta: reasoningContent,
225
239
  };
226
240
  }
227
241
  // Accumulate tool calls and stream deltas
@@ -236,8 +250,8 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
236
250
  type: 'function',
237
251
  function: {
238
252
  name: '',
239
- arguments: ''
240
- }
253
+ arguments: '',
254
+ },
241
255
  };
242
256
  }
243
257
  if (deltaCall.id) {
@@ -250,14 +264,15 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
250
264
  deltaText += deltaCall.function.name;
251
265
  }
252
266
  if (deltaCall.function?.arguments) {
253
- toolCallsBuffer[index].function.arguments += deltaCall.function.arguments;
267
+ toolCallsBuffer[index].function.arguments +=
268
+ deltaCall.function.arguments;
254
269
  deltaText += deltaCall.function.arguments;
255
270
  }
256
271
  // Stream the delta to frontend for real-time token counting
257
272
  if (deltaText) {
258
273
  yield {
259
274
  type: 'tool_call_delta',
260
- delta: deltaText
275
+ delta: deltaText,
261
276
  };
262
277
  }
263
278
  }
@@ -270,23 +285,25 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
270
285
  if (hasToolCalls) {
271
286
  yield {
272
287
  type: 'tool_calls',
273
- tool_calls: Object.values(toolCallsBuffer)
288
+ tool_calls: Object.values(toolCallsBuffer),
274
289
  };
275
290
  }
276
291
  // Yield usage information if available
277
292
  if (usageData) {
293
+ // Save usage to file system at API layer
294
+ saveUsageToFile(options.model, usageData);
278
295
  yield {
279
296
  type: 'usage',
280
- usage: usageData
297
+ usage: usageData,
281
298
  };
282
299
  }
283
300
  // Signal completion
284
301
  yield {
285
- type: 'done'
302
+ type: 'done',
286
303
  };
287
304
  }, {
288
305
  abortSignal,
289
- onRetry
306
+ onRetry,
290
307
  });
291
308
  }
292
309
  export function validateChatOptions(options) {
@@ -298,7 +315,8 @@ export function validateChatOptions(options) {
298
315
  errors.push('At least one message is required');
299
316
  }
300
317
  for (const message of options.messages || []) {
301
- if (!message.role || !['system', 'user', 'assistant', 'tool'].includes(message.role)) {
318
+ if (!message.role ||
319
+ !['system', 'user', 'assistant', 'tool'].includes(message.role)) {
302
320
  errors.push('Invalid message role');
303
321
  }
304
322
  // Tool messages must have tool_call_id
@@ -306,7 +324,8 @@ export function validateChatOptions(options) {
306
324
  errors.push('Tool messages must have tool_call_id');
307
325
  }
308
326
  // Content can be empty for tool calls
309
- if (message.role !== 'tool' && (!message.content || message.content.trim().length === 0)) {
327
+ if (message.role !== 'tool' &&
328
+ (!message.content || message.content.trim().length === 0)) {
310
329
  errors.push('Message content cannot be empty (except for tool messages)');
311
330
  }
312
331
  }
@@ -1,6 +1,8 @@
1
- import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders } from '../utils/apiConfig.js';
2
- import { SYSTEM_PROMPT } from './systemPrompt.js';
3
- import { withRetryGenerator } from '../utils/retryUtils.js';
1
+ import { getOpenAiConfig, getCustomSystemPrompt, getCustomHeaders, } from '../utils/apiConfig.js';
2
+ import { getSystemPrompt } from './systemPrompt.js';
3
+ import { withRetryGenerator, parseJsonWithFix } from '../utils/retryUtils.js';
4
+ import { addProxyToFetchOptions } from '../utils/proxyUtils.js';
5
+ import { saveUsageToFile } from '../utils/usageLogger.js';
4
6
  let geminiConfig = null;
5
7
  function getGeminiConfig() {
6
8
  if (!geminiConfig) {
@@ -14,7 +16,7 @@ function getGeminiConfig() {
14
16
  baseUrl: config.baseUrl && config.baseUrl !== 'https://api.openai.com/v1'
15
17
  ? config.baseUrl
16
18
  : 'https://generativelanguage.googleapis.com/v1beta',
17
- customHeaders
19
+ customHeaders,
18
20
  };
19
21
  }
20
22
  return geminiConfig;
@@ -41,8 +43,8 @@ function convertToolsToGemini(tools) {
41
43
  parametersJsonSchema: {
42
44
  type: 'object',
43
45
  properties: params.properties || {},
44
- required: params.required || []
45
- }
46
+ required: params.required || [],
47
+ },
46
48
  };
47
49
  }
48
50
  throw new Error('Invalid tool format');
@@ -93,21 +95,28 @@ function convertToGeminiMessages(messages) {
93
95
  let contentToParse = msg.content;
94
96
  // Sometimes the content is double-encoded as JSON
95
97
  // First, try to parse it once
96
- try {
97
- const firstParse = JSON.parse(contentToParse);
98
+ const firstParseResult = parseJsonWithFix(contentToParse, {
99
+ toolName: 'Gemini tool response (first parse)',
100
+ logWarning: false,
101
+ logError: false,
102
+ });
103
+ if (firstParseResult.success &&
104
+ typeof firstParseResult.data === 'string') {
98
105
  // If it's a string, it might be double-encoded, try parsing again
99
- if (typeof firstParse === 'string') {
100
- contentToParse = firstParse;
101
- }
102
- }
103
- catch {
104
- // Not JSON, use as-is
106
+ contentToParse = firstParseResult.data;
105
107
  }
106
108
  // Now parse or wrap the final content
107
- try {
108
- const parsed = JSON.parse(contentToParse);
109
+ const finalParseResult = parseJsonWithFix(contentToParse, {
110
+ toolName: 'Gemini tool response (final parse)',
111
+ logWarning: false,
112
+ logError: false,
113
+ });
114
+ if (finalParseResult.success) {
115
+ const parsed = finalParseResult.data;
109
116
  // If parsed result is an object (not array, not null), use it directly
110
- if (typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)) {
117
+ if (typeof parsed === 'object' &&
118
+ parsed !== null &&
119
+ !Array.isArray(parsed)) {
111
120
  responseData = parsed;
112
121
  }
113
122
  else {
@@ -115,24 +124,28 @@ function convertToGeminiMessages(messages) {
115
124
  responseData = { content: parsed };
116
125
  }
117
126
  }
118
- catch {
127
+ else {
119
128
  // Not valid JSON, wrap the raw string
120
129
  responseData = { content: contentToParse };
121
130
  }
122
131
  }
123
132
  contents.push({
124
133
  role: 'user',
125
- parts: [{
134
+ parts: [
135
+ {
126
136
  functionResponse: {
127
137
  name: functionName,
128
- response: responseData
129
- }
130
- }]
138
+ response: responseData,
139
+ },
140
+ },
141
+ ],
131
142
  });
132
143
  continue;
133
144
  }
134
145
  // Handle tool calls in assistant messages
135
- if (msg.role === 'assistant' && msg.tool_calls && msg.tool_calls.length > 0) {
146
+ if (msg.role === 'assistant' &&
147
+ msg.tool_calls &&
148
+ msg.tool_calls.length > 0) {
136
149
  const parts = [];
137
150
  // Add text content if exists
138
151
  if (msg.content) {
@@ -140,16 +153,22 @@ function convertToGeminiMessages(messages) {
140
153
  }
141
154
  // Add function calls
142
155
  for (const toolCall of msg.tool_calls) {
156
+ const argsParseResult = parseJsonWithFix(toolCall.function.arguments, {
157
+ toolName: `Gemini function call: ${toolCall.function.name}`,
158
+ fallbackValue: {},
159
+ logWarning: true,
160
+ logError: true,
161
+ });
143
162
  parts.push({
144
163
  functionCall: {
145
164
  name: toolCall.function.name,
146
- args: JSON.parse(toolCall.function.arguments)
147
- }
165
+ args: argsParseResult.data,
166
+ },
148
167
  });
149
168
  }
150
169
  contents.push({
151
170
  role: 'model',
152
- parts
171
+ parts,
153
172
  });
154
173
  continue;
155
174
  }
@@ -167,8 +186,8 @@ function convertToGeminiMessages(messages) {
167
186
  parts.push({
168
187
  inlineData: {
169
188
  mimeType: base64Match[1] || image.mimeType,
170
- data: base64Match[2] || ''
171
- }
189
+ data: base64Match[2] || '',
190
+ },
172
191
  });
173
192
  }
174
193
  }
@@ -183,11 +202,11 @@ function convertToGeminiMessages(messages) {
183
202
  // Prepend default system prompt as first user message
184
203
  contents.unshift({
185
204
  role: 'user',
186
- parts: [{ text: SYSTEM_PROMPT }]
205
+ parts: [{ text: getSystemPrompt() }],
187
206
  });
188
207
  }
189
208
  else if (!systemInstruction) {
190
- systemInstruction = SYSTEM_PROMPT;
209
+ systemInstruction = getSystemPrompt();
191
210
  }
192
211
  return { systemInstruction, contents };
193
212
  }
@@ -202,10 +221,12 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
202
221
  // Build request payload
203
222
  const requestBody = {
204
223
  contents,
205
- systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined,
224
+ systemInstruction: systemInstruction
225
+ ? { parts: [{ text: systemInstruction }] }
226
+ : undefined,
206
227
  generationConfig: {
207
228
  temperature: options.temperature ?? 0.7,
208
- }
229
+ },
209
230
  };
210
231
  // Add tools if provided
211
232
  const geminiTools = convertToolsToGemini(options.tools);
@@ -213,18 +234,21 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
213
234
  requestBody.tools = geminiTools;
214
235
  }
215
236
  // Extract model name from options.model (e.g., "gemini-pro" or "models/gemini-pro")
216
- const modelName = options.model.startsWith('models/') ? options.model : `models/${options.model}`;
237
+ const modelName = options.model.startsWith('models/')
238
+ ? options.model
239
+ : `models/${options.model}`;
217
240
  const url = `${config.baseUrl}/${modelName}:streamGenerateContent?key=${config.apiKey}&alt=sse`;
218
- const response = await fetch(url, {
241
+ const fetchOptions = addProxyToFetchOptions(url, {
219
242
  method: 'POST',
220
243
  headers: {
221
244
  'Content-Type': 'application/json',
222
- 'authorization': `Bearer ${config.apiKey}`,
223
- ...config.customHeaders
245
+ 'Authorization': `Bearer ${config.apiKey}`,
246
+ ...config.customHeaders,
224
247
  },
225
248
  body: JSON.stringify(requestBody),
226
- signal: abortSignal
249
+ signal: abortSignal,
227
250
  });
251
+ const response = await fetch(url, fetchOptions);
228
252
  if (!response.ok) {
229
253
  const errorText = await response.text();
230
254
  throw new Error(`Gemini API error: ${response.status} ${response.statusText} - ${errorText}`);
@@ -255,10 +279,26 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
255
279
  const trimmed = line.trim();
256
280
  if (!trimmed || trimmed.startsWith(':'))
257
281
  continue;
258
- if (trimmed.startsWith('data: ')) {
259
- const data = trimmed.slice(6);
260
- try {
261
- const chunk = JSON.parse(data);
282
+ if (trimmed === 'data: [DONE]' || trimmed === 'data:[DONE]') {
283
+ break;
284
+ }
285
+ // Handle both "event: " and "event:" formats
286
+ if (trimmed.startsWith('event:')) {
287
+ // Event type, will be followed by data
288
+ continue;
289
+ }
290
+ // Handle both "data: " and "data:" formats
291
+ if (trimmed.startsWith('data:')) {
292
+ const data = trimmed.startsWith('data: ')
293
+ ? trimmed.slice(6)
294
+ : trimmed.slice(5);
295
+ const parseResult = parseJsonWithFix(data, {
296
+ toolName: 'Gemini SSE stream',
297
+ logWarning: false,
298
+ logError: true,
299
+ });
300
+ if (parseResult.success) {
301
+ const chunk = parseResult.data;
262
302
  // Process candidates
263
303
  if (chunk.candidates && chunk.candidates.length > 0) {
264
304
  const candidate = chunk.candidates[0];
@@ -269,7 +309,7 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
269
309
  contentBuffer += part.text;
270
310
  yield {
271
311
  type: 'content',
272
- content: part.text
312
+ content: part.text,
273
313
  };
274
314
  }
275
315
  // Process function calls
@@ -281,15 +321,15 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
281
321
  type: 'function',
282
322
  function: {
283
323
  name: fc.name,
284
- arguments: JSON.stringify(fc.args || {})
285
- }
324
+ arguments: JSON.stringify(fc.args || {}),
325
+ },
286
326
  };
287
327
  toolCallsBuffer.push(toolCall);
288
328
  // Yield delta for token counting
289
329
  const deltaText = fc.name + JSON.stringify(fc.args || {});
290
330
  yield {
291
331
  type: 'tool_call_delta',
292
- delta: deltaText
332
+ delta: deltaText,
293
333
  };
294
334
  }
295
335
  }
@@ -300,13 +340,10 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
300
340
  totalTokens = {
301
341
  prompt: chunk.usageMetadata.promptTokenCount || 0,
302
342
  completion: chunk.usageMetadata.candidatesTokenCount || 0,
303
- total: chunk.usageMetadata.totalTokenCount || 0
343
+ total: chunk.usageMetadata.totalTokenCount || 0,
304
344
  };
305
345
  }
306
346
  }
307
- catch (e) {
308
- console.error('Failed to parse Gemini SSE data:', data);
309
- }
310
347
  }
311
348
  }
312
349
  }
@@ -314,26 +351,29 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
314
351
  if (hasToolCalls && toolCallsBuffer.length > 0) {
315
352
  yield {
316
353
  type: 'tool_calls',
317
- tool_calls: toolCallsBuffer
354
+ tool_calls: toolCallsBuffer,
318
355
  };
319
356
  }
320
357
  // Yield usage info
321
358
  if (totalTokens.total > 0) {
359
+ const usageData = {
360
+ prompt_tokens: totalTokens.prompt,
361
+ completion_tokens: totalTokens.completion,
362
+ total_tokens: totalTokens.total,
363
+ };
364
+ // Save usage to file system at API layer
365
+ saveUsageToFile(options.model, usageData);
322
366
  yield {
323
367
  type: 'usage',
324
- usage: {
325
- prompt_tokens: totalTokens.prompt,
326
- completion_tokens: totalTokens.completion,
327
- total_tokens: totalTokens.total
328
- }
368
+ usage: usageData,
329
369
  };
330
370
  }
331
371
  // Signal completion
332
372
  yield {
333
- type: 'done'
373
+ type: 'done',
334
374
  };
335
375
  }, {
336
376
  abortSignal,
337
- onRetry
377
+ onRetry,
338
378
  });
339
379
  }
@@ -16,11 +16,19 @@ export interface ResponseOptions {
16
16
  include?: string[];
17
17
  }
18
18
  export interface ResponseStreamChunk {
19
- type: 'content' | 'tool_calls' | 'tool_call_delta' | 'reasoning_delta' | 'reasoning_started' | 'done' | 'usage';
19
+ type: 'content' | 'tool_calls' | 'tool_call_delta' | 'reasoning_delta' | 'reasoning_started' | 'reasoning_data' | 'done' | 'usage';
20
20
  content?: string;
21
21
  tool_calls?: ToolCall[];
22
22
  delta?: string;
23
23
  usage?: UsageInfo;
24
+ reasoning?: {
25
+ summary?: Array<{
26
+ type: 'summary_text';
27
+ text: string;
28
+ }>;
29
+ content?: any;
30
+ encrypted_content?: string;
31
+ };
24
32
  }
25
33
  export declare function resetOpenAIClient(): void;
26
34
  /**