codeep 1.2.17 → 1.2.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +20 -7
  2. package/dist/api/index.d.ts +7 -0
  3. package/dist/api/index.js +21 -17
  4. package/dist/renderer/App.d.ts +1 -5
  5. package/dist/renderer/App.js +106 -486
  6. package/dist/renderer/agentExecution.d.ts +36 -0
  7. package/dist/renderer/agentExecution.js +394 -0
  8. package/dist/renderer/commands.d.ts +16 -0
  9. package/dist/renderer/commands.js +838 -0
  10. package/dist/renderer/handlers.d.ts +87 -0
  11. package/dist/renderer/handlers.js +260 -0
  12. package/dist/renderer/highlight.d.ts +18 -0
  13. package/dist/renderer/highlight.js +130 -0
  14. package/dist/renderer/main.d.ts +4 -2
  15. package/dist/renderer/main.js +103 -1550
  16. package/dist/utils/agent.d.ts +5 -15
  17. package/dist/utils/agent.js +9 -693
  18. package/dist/utils/agentChat.d.ts +46 -0
  19. package/dist/utils/agentChat.js +343 -0
  20. package/dist/utils/agentStream.d.ts +23 -0
  21. package/dist/utils/agentStream.js +216 -0
  22. package/dist/utils/keychain.js +3 -2
  23. package/dist/utils/learning.js +9 -3
  24. package/dist/utils/mcpIntegration.d.ts +61 -0
  25. package/dist/utils/mcpIntegration.js +154 -0
  26. package/dist/utils/project.js +8 -3
  27. package/dist/utils/skills.js +21 -11
  28. package/dist/utils/smartContext.d.ts +4 -0
  29. package/dist/utils/smartContext.js +51 -14
  30. package/dist/utils/toolExecution.d.ts +27 -0
  31. package/dist/utils/toolExecution.js +525 -0
  32. package/dist/utils/toolParsing.d.ts +18 -0
  33. package/dist/utils/toolParsing.js +302 -0
  34. package/dist/utils/tools.d.ts +11 -24
  35. package/dist/utils/tools.js +22 -1187
  36. package/package.json +3 -1
  37. package/dist/config/config.test.d.ts +0 -1
  38. package/dist/config/config.test.js +0 -157
  39. package/dist/config/providers.test.d.ts +0 -1
  40. package/dist/config/providers.test.js +0 -187
  41. package/dist/hooks/index.d.ts +0 -4
  42. package/dist/hooks/index.js +0 -4
  43. package/dist/hooks/useAgent.d.ts +0 -29
  44. package/dist/hooks/useAgent.js +0 -148
  45. package/dist/utils/agent.test.d.ts +0 -1
  46. package/dist/utils/agent.test.js +0 -315
  47. package/dist/utils/git.test.d.ts +0 -1
  48. package/dist/utils/git.test.js +0 -193
  49. package/dist/utils/gitignore.test.d.ts +0 -1
  50. package/dist/utils/gitignore.test.js +0 -167
  51. package/dist/utils/project.test.d.ts +0 -1
  52. package/dist/utils/project.test.js +0 -212
  53. package/dist/utils/ratelimit.test.d.ts +0 -1
  54. package/dist/utils/ratelimit.test.js +0 -131
  55. package/dist/utils/retry.test.d.ts +0 -1
  56. package/dist/utils/retry.test.js +0 -163
  57. package/dist/utils/smartContext.test.d.ts +0 -1
  58. package/dist/utils/smartContext.test.js +0 -382
  59. package/dist/utils/tools.test.d.ts +0 -1
  60. package/dist/utils/tools.test.js +0 -681
  61. package/dist/utils/validation.test.d.ts +0 -1
  62. package/dist/utils/validation.test.js +0 -164
@@ -1,24 +1,17 @@
1
1
  /**
2
- * Agent loop - autonomous task execution
2
+ * Agent loop - autonomous task execution.
3
+ *
4
+ * Private chat/stream logic lives in agentChat.ts and agentStream.ts.
3
5
  */
4
- import { existsSync, readFileSync } from 'fs';
5
- import { join } from 'path';
6
- import { recordTokenUsage, extractOpenAIUsage, extractAnthropicUsage } from './tokenTracker.js';
7
6
  // Debug logging helper - only logs when CODEEP_DEBUG=1
8
7
  const debug = (...args) => {
9
8
  if (process.env.CODEEP_DEBUG === '1') {
10
9
  console.error('[DEBUG]', ...args);
11
10
  }
12
11
  };
13
- /**
14
- * Custom error class for timeout - allows distinguishing from user abort
15
- */
16
- class TimeoutError extends Error {
17
- constructor(message = 'Request timed out') {
18
- super(message);
19
- this.name = 'TimeoutError';
20
- }
21
- }
12
+ // Import chat layer (prompt building + API calls)
13
+ import { agentChat, getAgentSystemPrompt, getFallbackSystemPrompt, loadProjectRules, formatChatHistoryForAgent, } from './agentChat.js';
14
+ export { loadProjectRules, formatChatHistoryForAgent };
22
15
  /**
23
16
  * Calculate dynamic timeout based on task complexity
24
17
  * Complex tasks (creating pages, multiple files) need more time
@@ -38,9 +31,9 @@ function calculateDynamicTimeout(prompt, iteration, baseTimeout) {
38
31
  const calculatedTimeout = baseTimeout * multiplier;
39
32
  return Math.min(Math.max(calculatedTimeout, 120000), 300000);
40
33
  }
41
- import { parseToolCalls, executeTool, createActionLog, formatToolDefinitions, getOpenAITools, getAnthropicTools, parseOpenAIToolCalls, parseAnthropicToolCalls } from './tools.js';
42
- import { config, getApiKey } from '../config/index.js';
43
- import { getProviderBaseUrl, getProviderAuthHeader, supportsNativeTools } from '../config/providers.js';
34
+ import { parseToolCalls, executeTool, createActionLog } from './tools.js';
35
+ import { config } from '../config/index.js';
36
+ import { supportsNativeTools } from '../config/providers.js';
44
37
  import { startSession, endSession, undoLastAction, undoAllActions, getCurrentSession, getRecentSessions, formatSession } from './history.js';
45
38
  import { runAllVerifications, formatErrorsForAgent, hasVerificationErrors, getVerificationSummary } from './verify.js';
46
39
  import { gatherSmartContext, formatSmartContext, extractTargetFile } from './smartContext.js';
@@ -50,683 +43,6 @@ const DEFAULT_OPTIONS = {
50
43
  maxDuration: 20 * 60 * 1000, // 20 minutes
51
44
  usePlanning: false, // Disable task planning - causes more problems than it solves
52
45
  };
53
- /**
54
- * Load project rules from .codeep/rules.md or CODEEP.md
55
- * Returns the rules content formatted for system prompt, or empty string if no rules found
56
- */
57
- export function loadProjectRules(projectRoot) {
58
- const candidates = [
59
- join(projectRoot, '.codeep', 'rules.md'),
60
- join(projectRoot, 'CODEEP.md'),
61
- ];
62
- for (const filePath of candidates) {
63
- if (existsSync(filePath)) {
64
- try {
65
- const content = readFileSync(filePath, 'utf-8').trim();
66
- if (content) {
67
- debug('Loaded project rules from', filePath);
68
- return `\n\n## Project Rules\nThe following rules are defined by the project owner. You MUST follow these rules:\n\n${content}`;
69
- }
70
- }
71
- catch (err) {
72
- debug('Failed to read project rules from', filePath, err);
73
- }
74
- }
75
- }
76
- return '';
77
- }
78
- /**
79
- * Format chat session history for inclusion in agent system prompt.
80
- * Keeps the most recent messages within a character budget so the agent
81
- * has conversational context without overwhelming the context window.
82
- */
83
- export function formatChatHistoryForAgent(history, maxChars = 16000) {
84
- if (!history || history.length === 0)
85
- return '';
86
- // Filter out agent execution messages
87
- const filtered = history.filter(m => {
88
- const content = m.content.trimStart();
89
- if (content.startsWith('[AGENT]') || content.startsWith('[DRY RUN]'))
90
- return false;
91
- if (content.startsWith('Agent completed') || content.startsWith('Agent failed') || content.startsWith('Agent stopped'))
92
- return false;
93
- return true;
94
- });
95
- if (filtered.length === 0)
96
- return '';
97
- // Walk backward (newest first) and accumulate within budget
98
- const selected = [];
99
- let totalChars = 0;
100
- for (let i = filtered.length - 1; i >= 0; i--) {
101
- const msg = filtered[i];
102
- const entry = `${msg.role === 'user' ? 'User' : 'Assistant'}: ${msg.content}`;
103
- if (totalChars + entry.length > maxChars && selected.length > 0)
104
- break;
105
- // If single message exceeds budget, truncate it
106
- if (entry.length > maxChars) {
107
- selected.unshift({ role: msg.role, content: msg.content.slice(0, maxChars - 100) + '\n[truncated]' });
108
- break;
109
- }
110
- selected.unshift(msg);
111
- totalChars += entry.length;
112
- }
113
- if (selected.length === 0)
114
- return '';
115
- const lines = selected.map(m => `**${m.role === 'user' ? 'User' : 'Assistant'}:** ${m.content}`).join('\n\n');
116
- return `\n\n## Prior Conversation Context\nThe following is the recent chat history from this session. Use it as background context to understand the user's intent, but focus on completing the current task.\n\n${lines}`;
117
- }
118
- /**
119
- * Generate system prompt for agent mode (used with native tool calling)
120
- */
121
- function getAgentSystemPrompt(projectContext) {
122
- return `You are an AI coding agent with FULL autonomous access to this project.
123
-
124
- ## Your Capabilities
125
- - Read, write, edit, and delete files and directories
126
- - Create directories with create_directory tool
127
- - Execute shell commands (npm, git, build tools, etc.)
128
- - Search code in the project
129
- - List directory contents
130
-
131
- ## IMPORTANT: Follow User Instructions Exactly
132
- - Do EXACTLY what the user asks
133
- - If user says "create a website" -> create ALL necessary files (HTML, CSS, JS, images, etc.)
134
- - If user says "create folder X" -> use create_directory tool to create folder X
135
- - If user says "delete file X" -> use delete_file tool to delete file X
136
- - The user may write in any language - understand their request and execute it
137
- - Tool names and parameters must ALWAYS be in English (e.g., "create_directory", not "kreiraj_direktorij")
138
- - KEEP WORKING until the ENTIRE task is finished - do NOT stop after creating just directories or partial files
139
- - Only stop when you have created ALL files needed for a complete, working solution
140
-
141
- ## Rules
142
- 1. Always read files before editing them to understand the current content
143
- 2. Use edit_file for modifications to existing files (preserves other content)
144
- 3. Use write_file only for creating new files or complete overwrites
145
- 4. Use create_directory to create new folders/directories
146
- 5. Use list_files to see directory contents
147
- 6. Use search_code to find files or search patterns
148
- 7. NEVER use execute_command for: ls, find, cat, grep, mkdir, rm, cp, mv, touch
149
- 8. Use execute_command ONLY for: npm, git, composer, pip, cargo (build/package managers)
150
- 9. When the task is complete, respond with a summary WITHOUT any tool calls
151
- 10. IMPORTANT: After finishing, your response must NOT include any tool calls - just provide a summary
152
- 11. IGNORE the .codeep folder - it contains internal configuration, do NOT read or modify it
153
-
154
- ## Self-Verification
155
- After you make changes, the system will automatically run build and tests.
156
- If there are errors, you will receive them and must fix them.
157
- - Read error messages carefully
158
- - Fix the specific files and lines mentioned
159
- - Keep trying until verification passes
160
-
161
- ## Project Context
162
- **Name:** ${projectContext.name}
163
- **Type:** ${projectContext.type}
164
-
165
- **Structure:**
166
- \`\`\`
167
- ${projectContext.structure}
168
- \`\`\`
169
-
170
- **Key Files:** ${projectContext.keyFiles.join(', ')}
171
-
172
- You have FULL READ AND WRITE access. Use the tools to complete tasks autonomously.`;
173
- }
174
- /**
175
- * Generate fallback system prompt (text-based tool calling)
176
- */
177
- function getFallbackSystemPrompt(projectContext) {
178
- return `You are an AI coding agent with FULL autonomous access to this project.
179
-
180
- ## IMPORTANT: Follow User Instructions Exactly
181
- - Do EXACTLY what the user asks
182
- - If user says "create a website" -> create ALL necessary files (HTML, CSS, JS, images, etc.)
183
- - If user says "create folder X" -> use create_directory tool
184
- - If user says "delete file X" -> use delete_file tool
185
- - The user may write in any language - understand and execute
186
- - Tool names and parameters must ALWAYS be in English
187
- - KEEP WORKING until the ENTIRE task is finished - do NOT stop after creating just directories or partial files
188
- - Only stop when you have created ALL files needed for a complete, working solution
189
-
190
- ## Available Tools
191
- ${formatToolDefinitions()}
192
-
193
- ## Tool Call Format
194
- When you need to use a tool, respond with:
195
- <tool_call>
196
- {"tool": "tool_name", "parameters": {"param1": "value1"}}
197
- </tool_call>
198
-
199
- ## Examples
200
- <tool_call>
201
- {"tool": "create_directory", "parameters": {"path": "my-folder"}}
202
- </tool_call>
203
-
204
- <tool_call>
205
- {"tool": "list_files", "parameters": {"path": "."}}
206
- </tool_call>
207
-
208
- <tool_call>
209
- {"tool": "write_file", "parameters": {"path": "test/index.html", "content": "<!DOCTYPE html>..."}}
210
- </tool_call>
211
-
212
- ## Rules
213
- 1. Use the exact format shown above
214
- 2. Use list_files to see directory contents
215
- 3. Use search_code to find files or search patterns
216
- 4. NEVER use execute_command for: ls, find, cat, grep, mkdir, rm, cp, mv, touch
217
- 5. Use execute_command ONLY for: npm, git, composer, pip, cargo (build/package managers)
218
- 6. Always read files before editing
219
- 7. When done, respond WITHOUT tool calls
220
- 8. IGNORE the .codeep folder - it contains internal configuration, do NOT read or modify it
221
-
222
- ## Project: ${projectContext.name} (${projectContext.type})
223
- ${projectContext.structure}
224
-
225
- You have FULL access. Execute tasks autonomously.`;
226
- }
227
- /**
228
- * Make a chat API call for agent mode with native tool support
229
- */
230
- async function agentChat(messages, systemPrompt, onChunk, abortSignal, dynamicTimeout) {
231
- const protocol = config.get('protocol');
232
- const model = config.get('model');
233
- const apiKey = getApiKey();
234
- const providerId = config.get('provider');
235
- const baseUrl = getProviderBaseUrl(providerId, protocol);
236
- const authHeader = getProviderAuthHeader(providerId, protocol);
237
- if (!baseUrl) {
238
- throw new Error(`Provider ${providerId} does not support ${protocol} protocol`);
239
- }
240
- // Check if provider supports native tools - if not, use text-based fallback directly
241
- if (!supportsNativeTools(providerId, protocol)) {
242
- // Provider doesn't support native tools, use text-based fallback
243
- return await agentChatFallback(messages, systemPrompt, onChunk, abortSignal);
244
- }
245
- const controller = new AbortController();
246
- const timeoutMs = dynamicTimeout || config.get('apiTimeout');
247
- let isTimeout = false;
248
- const timeout = setTimeout(() => {
249
- isTimeout = true;
250
- controller.abort();
251
- }, timeoutMs);
252
- if (abortSignal) {
253
- abortSignal.addEventListener('abort', () => {
254
- isTimeout = false; // User abort, not timeout
255
- controller.abort();
256
- });
257
- }
258
- const headers = {
259
- 'Content-Type': 'application/json',
260
- };
261
- if (authHeader === 'Bearer') {
262
- headers['Authorization'] = `Bearer ${apiKey}`;
263
- }
264
- else {
265
- headers['x-api-key'] = apiKey;
266
- }
267
- if (protocol === 'anthropic') {
268
- headers['anthropic-version'] = '2023-06-01';
269
- }
270
- try {
271
- let endpoint;
272
- let body;
273
- const useStreaming = Boolean(onChunk);
274
- if (protocol === 'openai') {
275
- endpoint = `${baseUrl}/chat/completions`;
276
- body = {
277
- model,
278
- messages: [
279
- { role: 'system', content: systemPrompt },
280
- ...messages,
281
- ],
282
- tools: getOpenAITools(),
283
- tool_choice: 'auto',
284
- stream: useStreaming,
285
- temperature: config.get('temperature'),
286
- max_tokens: Math.max(config.get('maxTokens'), 16384),
287
- };
288
- }
289
- else {
290
- endpoint = `${baseUrl}/v1/messages`;
291
- body = {
292
- model,
293
- system: systemPrompt,
294
- messages: messages,
295
- tools: getAnthropicTools(),
296
- stream: useStreaming,
297
- temperature: config.get('temperature'),
298
- max_tokens: Math.max(config.get('maxTokens'), 16384),
299
- };
300
- }
301
- const response = await fetch(endpoint, {
302
- method: 'POST',
303
- headers,
304
- body: JSON.stringify(body),
305
- signal: controller.signal,
306
- });
307
- if (!response.ok) {
308
- const errorText = await response.text();
309
- // Check if error is due to tools not being supported - fallback to text mode
310
- if (errorText.includes('tools') || errorText.includes('function') || response.status === 400) {
311
- return await agentChatFallback(messages, systemPrompt, onChunk, abortSignal);
312
- }
313
- throw new Error(`API error: ${response.status} - ${errorText}`);
314
- }
315
- // Streaming path — parse tool calls from SSE deltas
316
- if (useStreaming && response.body) {
317
- if (protocol === 'openai') {
318
- return await handleOpenAIAgentStream(response.body, onChunk, model, providerId);
319
- }
320
- else {
321
- return await handleAnthropicAgentStream(response.body, onChunk, model, providerId);
322
- }
323
- }
324
- // Non-streaming path (fallback if no body)
325
- const data = await response.json();
326
- // Track token usage
327
- const usageExtractor = protocol === 'openai' ? extractOpenAIUsage : extractAnthropicUsage;
328
- const usage = usageExtractor(data);
329
- if (usage)
330
- recordTokenUsage(usage, model, providerId);
331
- debug('Raw API response:', JSON.stringify(data, null, 2).substring(0, 1500));
332
- if (protocol === 'openai') {
333
- const message = data.choices?.[0]?.message;
334
- const content = message?.content || '';
335
- const rawToolCalls = message?.tool_calls || [];
336
- debug('Raw tool_calls:', JSON.stringify(rawToolCalls, null, 2));
337
- const toolCalls = parseOpenAIToolCalls(rawToolCalls);
338
- debug('Parsed tool calls:', toolCalls.length, toolCalls.map(t => t.tool));
339
- // If no native tool calls, try parsing from content (some models return text-based)
340
- if (toolCalls.length === 0 && content) {
341
- debug('No native tool calls, checking content for text-based calls...');
342
- debug('Content preview:', content.substring(0, 300));
343
- const textToolCalls = parseToolCalls(content);
344
- if (textToolCalls.length > 0) {
345
- debug('Found text-based tool calls:', textToolCalls.length);
346
- return { content, toolCalls: textToolCalls, usedNativeTools: false };
347
- }
348
- }
349
- if (onChunk && content) {
350
- onChunk(content);
351
- }
352
- return { content, toolCalls, usedNativeTools: true };
353
- }
354
- else {
355
- // Anthropic format
356
- const contentBlocks = data.content || [];
357
- let textContent = '';
358
- for (const block of contentBlocks) {
359
- if (block.type === 'text') {
360
- textContent += block.text;
361
- if (onChunk)
362
- onChunk(block.text);
363
- }
364
- }
365
- const toolCalls = parseAnthropicToolCalls(contentBlocks);
366
- return { content: textContent, toolCalls, usedNativeTools: true };
367
- }
368
- }
369
- catch (error) {
370
- const err = error;
371
- // Check if this was a timeout vs user abort
372
- if (err.name === 'AbortError') {
373
- if (isTimeout) {
374
- throw new TimeoutError(`API request timed out after ${timeoutMs}ms`);
375
- }
376
- // User abort - rethrow as-is
377
- throw error;
378
- }
379
- // If native tools failed, try fallback
380
- if (err.message.includes('tools') || err.message.includes('function')) {
381
- return await agentChatFallback(messages, systemPrompt, onChunk, abortSignal);
382
- }
383
- throw error;
384
- }
385
- finally {
386
- clearTimeout(timeout);
387
- }
388
- }
389
- /**
390
- * Fallback chat without native tools (text-based parsing)
391
- */
392
- async function agentChatFallback(messages, systemPrompt, onChunk, abortSignal, dynamicTimeout) {
393
- const protocol = config.get('protocol');
394
- const model = config.get('model');
395
- const apiKey = getApiKey();
396
- const providerId = config.get('provider');
397
- const baseUrl = getProviderBaseUrl(providerId, protocol);
398
- const authHeader = getProviderAuthHeader(providerId, protocol);
399
- if (!baseUrl) {
400
- throw new Error(`Provider ${providerId} does not support ${protocol} protocol`);
401
- }
402
- const controller = new AbortController();
403
- const timeoutMs = dynamicTimeout || config.get('apiTimeout');
404
- let isTimeout = false;
405
- const timeout = setTimeout(() => {
406
- isTimeout = true;
407
- controller.abort();
408
- }, timeoutMs);
409
- if (abortSignal) {
410
- abortSignal.addEventListener('abort', () => {
411
- isTimeout = false; // User abort, not timeout
412
- controller.abort();
413
- });
414
- }
415
- const headers = {
416
- 'Content-Type': 'application/json',
417
- };
418
- if (authHeader === 'Bearer') {
419
- headers['Authorization'] = `Bearer ${apiKey}`;
420
- }
421
- else {
422
- headers['x-api-key'] = apiKey;
423
- }
424
- if (protocol === 'anthropic') {
425
- headers['anthropic-version'] = '2023-06-01';
426
- }
427
- // Use fallback system prompt with text-based tool format
428
- const fallbackPrompt = systemPrompt.includes('## Available Tools')
429
- ? systemPrompt
430
- : systemPrompt + '\n\n' + formatToolDefinitions();
431
- try {
432
- let endpoint;
433
- let body;
434
- if (protocol === 'openai') {
435
- endpoint = `${baseUrl}/chat/completions`;
436
- body = {
437
- model,
438
- messages: [
439
- { role: 'system', content: fallbackPrompt },
440
- ...messages,
441
- ],
442
- stream: Boolean(onChunk),
443
- temperature: config.get('temperature'),
444
- max_tokens: Math.max(config.get('maxTokens'), 16384), // Ensure enough tokens for large file generation
445
- };
446
- }
447
- else {
448
- endpoint = `${baseUrl}/v1/messages`;
449
- body = {
450
- model,
451
- messages: [
452
- { role: 'user', content: fallbackPrompt },
453
- { role: 'assistant', content: 'Understood. I will use the tools as specified.' },
454
- ...messages,
455
- ],
456
- stream: Boolean(onChunk),
457
- temperature: config.get('temperature'),
458
- max_tokens: Math.max(config.get('maxTokens'), 16384), // Ensure enough tokens for large file generation
459
- };
460
- }
461
- const response = await fetch(endpoint, {
462
- method: 'POST',
463
- headers,
464
- body: JSON.stringify(body),
465
- signal: controller.signal,
466
- });
467
- if (!response.ok) {
468
- const error = await response.text();
469
- throw new Error(`API error: ${response.status} - ${error}`);
470
- }
471
- let content;
472
- if (onChunk && response.body) {
473
- content = await handleStream(response.body, protocol, onChunk);
474
- }
475
- else {
476
- const data = await response.json();
477
- // Track token usage
478
- const fallbackUsageExtractor = protocol === 'openai' ? extractOpenAIUsage : extractAnthropicUsage;
479
- const fallbackUsage = fallbackUsageExtractor(data);
480
- if (fallbackUsage)
481
- recordTokenUsage(fallbackUsage, model, providerId);
482
- if (protocol === 'openai') {
483
- content = data.choices?.[0]?.message?.content || '';
484
- }
485
- else {
486
- content = data.content?.[0]?.text || '';
487
- }
488
- }
489
- // Parse tool calls from text response
490
- const toolCalls = parseToolCalls(content);
491
- return { content, toolCalls, usedNativeTools: false };
492
- }
493
- catch (error) {
494
- const err = error;
495
- // Check if this was a timeout vs user abort
496
- if (err.name === 'AbortError') {
497
- if (isTimeout) {
498
- throw new TimeoutError(`API request timed out after ${timeoutMs}ms`);
499
- }
500
- // User abort - rethrow as-is
501
- throw error;
502
- }
503
- throw error;
504
- }
505
- finally {
506
- clearTimeout(timeout);
507
- }
508
- }
509
- /**
510
- * Handle OpenAI streaming response with tool call accumulation
511
- */
512
- async function handleOpenAIAgentStream(body, onChunk, model, providerId) {
513
- const reader = body.getReader();
514
- const decoder = new TextDecoder();
515
- let buffer = '';
516
- let content = '';
517
- // Accumulate tool calls from deltas
518
- const toolCallMap = new Map();
519
- let usageData = null;
520
- while (true) {
521
- const { done, value } = await reader.read();
522
- if (done)
523
- break;
524
- buffer += decoder.decode(value, { stream: true });
525
- const lines = buffer.split('\n');
526
- buffer = lines.pop() || '';
527
- for (const line of lines) {
528
- if (!line.startsWith('data: '))
529
- continue;
530
- const data = line.slice(6);
531
- if (data === '[DONE]')
532
- continue;
533
- try {
534
- const parsed = JSON.parse(data);
535
- // Track usage from final chunk
536
- if (parsed.usage) {
537
- usageData = parsed;
538
- }
539
- const delta = parsed.choices?.[0]?.delta;
540
- if (!delta)
541
- continue;
542
- // Accumulate text content
543
- if (delta.content) {
544
- content += delta.content;
545
- onChunk(delta.content);
546
- }
547
- // Accumulate tool calls
548
- if (delta.tool_calls) {
549
- for (const tc of delta.tool_calls) {
550
- const idx = tc.index ?? 0;
551
- if (!toolCallMap.has(idx)) {
552
- toolCallMap.set(idx, {
553
- id: tc.id || '',
554
- name: tc.function?.name || '',
555
- arguments: '',
556
- });
557
- }
558
- const entry = toolCallMap.get(idx);
559
- if (tc.id)
560
- entry.id = tc.id;
561
- if (tc.function?.name)
562
- entry.name = tc.function.name;
563
- if (tc.function?.arguments)
564
- entry.arguments += tc.function.arguments;
565
- }
566
- }
567
- }
568
- catch {
569
- // Ignore parse errors
570
- }
571
- }
572
- }
573
- // Track token usage if available
574
- if (usageData) {
575
- const usage = extractOpenAIUsage(usageData);
576
- if (usage)
577
- recordTokenUsage(usage, model, providerId);
578
- }
579
- // Convert accumulated tool calls
580
- const rawToolCalls = Array.from(toolCallMap.values()).map(tc => ({
581
- id: tc.id,
582
- type: 'function',
583
- function: { name: tc.name, arguments: tc.arguments },
584
- }));
585
- const toolCalls = parseOpenAIToolCalls(rawToolCalls);
586
- debug('Stream parsed tool calls:', toolCalls.length, toolCalls.map(t => t.tool));
587
- // If no native tool calls, try text-based parsing
588
- if (toolCalls.length === 0 && content) {
589
- const textToolCalls = parseToolCalls(content);
590
- if (textToolCalls.length > 0) {
591
- return { content, toolCalls: textToolCalls, usedNativeTools: false };
592
- }
593
- }
594
- return { content, toolCalls, usedNativeTools: true };
595
- }
596
- /**
597
- * Handle Anthropic streaming response with tool call accumulation
598
- */
599
- async function handleAnthropicAgentStream(body, onChunk, model, providerId) {
600
- const reader = body.getReader();
601
- const decoder = new TextDecoder();
602
- let buffer = '';
603
- let content = '';
604
- // Accumulate content blocks for tool use
605
- const contentBlocks = [];
606
- let currentBlockIndex = -1;
607
- let currentBlockType = '';
608
- let currentToolName = '';
609
- let currentToolId = '';
610
- let currentToolInput = '';
611
- let usageData = null;
612
- while (true) {
613
- const { done, value } = await reader.read();
614
- if (done)
615
- break;
616
- buffer += decoder.decode(value, { stream: true });
617
- const lines = buffer.split('\n');
618
- buffer = lines.pop() || '';
619
- for (const line of lines) {
620
- if (!line.startsWith('data: '))
621
- continue;
622
- const data = line.slice(6);
623
- try {
624
- const parsed = JSON.parse(data);
625
- // Track usage
626
- if (parsed.usage) {
627
- usageData = parsed;
628
- }
629
- if (parsed.type === 'message_delta' && parsed.usage) {
630
- usageData = parsed;
631
- }
632
- if (parsed.type === 'content_block_start') {
633
- currentBlockIndex = parsed.index;
634
- const block = parsed.content_block;
635
- if (block.type === 'text') {
636
- currentBlockType = 'text';
637
- }
638
- else if (block.type === 'tool_use') {
639
- currentBlockType = 'tool_use';
640
- currentToolName = block.name || '';
641
- currentToolId = block.id || '';
642
- currentToolInput = '';
643
- }
644
- }
645
- else if (parsed.type === 'content_block_delta') {
646
- if (currentBlockType === 'text' && parsed.delta?.text) {
647
- content += parsed.delta.text;
648
- onChunk(parsed.delta.text);
649
- }
650
- else if (currentBlockType === 'tool_use' && parsed.delta?.partial_json) {
651
- currentToolInput += parsed.delta.partial_json;
652
- }
653
- }
654
- else if (parsed.type === 'content_block_stop') {
655
- if (currentBlockType === 'tool_use') {
656
- contentBlocks.push({
657
- type: 'tool_use',
658
- id: currentToolId,
659
- name: currentToolName,
660
- input: tryParseJSON(currentToolInput),
661
- });
662
- }
663
- currentBlockType = '';
664
- }
665
- }
666
- catch {
667
- // Ignore parse errors
668
- }
669
- }
670
- }
671
- // Track token usage
672
- if (usageData) {
673
- const usage = extractAnthropicUsage(usageData);
674
- if (usage)
675
- recordTokenUsage(usage, model, providerId);
676
- }
677
- const toolCalls = parseAnthropicToolCalls(contentBlocks);
678
- return { content, toolCalls, usedNativeTools: true };
679
- }
680
- function tryParseJSON(str) {
681
- try {
682
- return JSON.parse(str);
683
- }
684
- catch {
685
- return {};
686
- }
687
- }
688
- /**
689
- * Handle streaming response (text-based fallback)
690
- */
691
- async function handleStream(body, protocol, onChunk) {
692
- const reader = body.getReader();
693
- const decoder = new TextDecoder();
694
- const chunks = [];
695
- let buffer = '';
696
- while (true) {
697
- const { done, value } = await reader.read();
698
- if (done)
699
- break;
700
- buffer += decoder.decode(value, { stream: true });
701
- const lines = buffer.split('\n');
702
- buffer = lines.pop() || '';
703
- for (const line of lines) {
704
- if (line.startsWith('data: ')) {
705
- const data = line.slice(6);
706
- if (data === '[DONE]')
707
- continue;
708
- try {
709
- const parsed = JSON.parse(data);
710
- let content;
711
- if (protocol === 'openai') {
712
- content = parsed.choices?.[0]?.delta?.content;
713
- }
714
- else if (parsed.type === 'content_block_delta') {
715
- content = parsed.delta?.text;
716
- }
717
- if (content) {
718
- chunks.push(content);
719
- onChunk(content);
720
- }
721
- }
722
- catch {
723
- // Skip parse errors
724
- }
725
- }
726
- }
727
- }
728
- return chunks.join('');
729
- }
730
46
  /**
731
47
  * Run the agent loop
732
48
  */