claude-code-workflow 6.3.13 → 6.3.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.claude/agents/issue-plan-agent.md +57 -103
  2. package/.claude/agents/issue-queue-agent.md +69 -120
  3. package/.claude/commands/issue/new.md +217 -473
  4. package/.claude/commands/issue/plan.md +76 -154
  5. package/.claude/commands/issue/queue.md +208 -259
  6. package/.claude/skills/issue-manage/SKILL.md +63 -22
  7. package/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json +3 -3
  8. package/.claude/workflows/cli-templates/schemas/issues-jsonl-schema.json +3 -3
  9. package/.claude/workflows/cli-templates/schemas/queue-schema.json +0 -5
  10. package/.codex/prompts/issue-plan.md +16 -19
  11. package/.codex/prompts/issue-queue.md +0 -1
  12. package/README.md +1 -0
  13. package/ccw/dist/cli.d.ts.map +1 -1
  14. package/ccw/dist/cli.js +3 -1
  15. package/ccw/dist/cli.js.map +1 -1
  16. package/ccw/dist/commands/cli.d.ts.map +1 -1
  17. package/ccw/dist/commands/cli.js +45 -3
  18. package/ccw/dist/commands/cli.js.map +1 -1
  19. package/ccw/dist/commands/issue.d.ts +3 -1
  20. package/ccw/dist/commands/issue.d.ts.map +1 -1
  21. package/ccw/dist/commands/issue.js +383 -30
  22. package/ccw/dist/commands/issue.js.map +1 -1
  23. package/ccw/dist/core/routes/issue-routes.d.ts.map +1 -1
  24. package/ccw/dist/core/routes/issue-routes.js +77 -16
  25. package/ccw/dist/core/routes/issue-routes.js.map +1 -1
  26. package/ccw/dist/tools/cli-executor.d.ts.map +1 -1
  27. package/ccw/dist/tools/cli-executor.js +117 -4
  28. package/ccw/dist/tools/cli-executor.js.map +1 -1
  29. package/ccw/dist/tools/litellm-executor.d.ts +4 -0
  30. package/ccw/dist/tools/litellm-executor.d.ts.map +1 -1
  31. package/ccw/dist/tools/litellm-executor.js +54 -1
  32. package/ccw/dist/tools/litellm-executor.js.map +1 -1
  33. package/ccw/dist/tools/ui-generate-preview.d.ts +18 -0
  34. package/ccw/dist/tools/ui-generate-preview.d.ts.map +1 -1
  35. package/ccw/dist/tools/ui-generate-preview.js +26 -10
  36. package/ccw/dist/tools/ui-generate-preview.js.map +1 -1
  37. package/ccw/src/cli.ts +3 -1
  38. package/ccw/src/commands/cli.ts +47 -3
  39. package/ccw/src/commands/issue.ts +442 -34
  40. package/ccw/src/core/routes/issue-routes.ts +82 -16
  41. package/ccw/src/tools/cli-executor.ts +125 -4
  42. package/ccw/src/tools/litellm-executor.ts +107 -24
  43. package/ccw/src/tools/ui-generate-preview.js +60 -37
  44. package/codex-lens/src/codexlens/__pycache__/config.cpython-313.pyc +0 -0
  45. package/codex-lens/src/codexlens/__pycache__/entities.cpython-313.pyc +0 -0
  46. package/codex-lens/src/codexlens/config.py +25 -2
  47. package/codex-lens/src/codexlens/entities.py +5 -1
  48. package/codex-lens/src/codexlens/indexing/__pycache__/symbol_extractor.cpython-313.pyc +0 -0
  49. package/codex-lens/src/codexlens/indexing/symbol_extractor.py +243 -243
  50. package/codex-lens/src/codexlens/parsers/__pycache__/factory.cpython-313.pyc +0 -0
  51. package/codex-lens/src/codexlens/parsers/__pycache__/treesitter_parser.cpython-313.pyc +0 -0
  52. package/codex-lens/src/codexlens/parsers/factory.py +256 -256
  53. package/codex-lens/src/codexlens/parsers/treesitter_parser.py +335 -335
  54. package/codex-lens/src/codexlens/search/__pycache__/chain_search.cpython-313.pyc +0 -0
  55. package/codex-lens/src/codexlens/search/__pycache__/hybrid_search.cpython-313.pyc +0 -0
  56. package/codex-lens/src/codexlens/search/__pycache__/ranking.cpython-313.pyc +0 -0
  57. package/codex-lens/src/codexlens/search/chain_search.py +30 -1
  58. package/codex-lens/src/codexlens/semantic/__pycache__/__init__.cpython-313.pyc +0 -0
  59. package/codex-lens/src/codexlens/semantic/__pycache__/embedder.cpython-313.pyc +0 -0
  60. package/codex-lens/src/codexlens/semantic/__pycache__/reranker.cpython-313.pyc +0 -0
  61. package/codex-lens/src/codexlens/semantic/__pycache__/vector_store.cpython-313.pyc +0 -0
  62. package/codex-lens/src/codexlens/semantic/embedder.py +6 -9
  63. package/codex-lens/src/codexlens/semantic/vector_store.py +271 -200
  64. package/codex-lens/src/codexlens/storage/__pycache__/dir_index.cpython-313.pyc +0 -0
  65. package/codex-lens/src/codexlens/storage/__pycache__/index_tree.cpython-313.pyc +0 -0
  66. package/codex-lens/src/codexlens/storage/__pycache__/sqlite_store.cpython-313.pyc +0 -0
  67. package/codex-lens/src/codexlens/storage/sqlite_store.py +184 -108
  68. package/package.json +6 -1
  69. package/.claude/commands/issue/manage.md +0 -113
@@ -67,6 +67,17 @@ function readSolutionsJsonl(issuesDir: string, issueId: string): any[] {
67
67
  }
68
68
  }
69
69
 
70
+ function readIssueHistoryJsonl(issuesDir: string): any[] {
71
+ const historyPath = join(issuesDir, 'issue-history.jsonl');
72
+ if (!existsSync(historyPath)) return [];
73
+ try {
74
+ const content = readFileSync(historyPath, 'utf8');
75
+ return content.split('\n').filter(line => line.trim()).map(line => JSON.parse(line));
76
+ } catch {
77
+ return [];
78
+ }
79
+ }
80
+
70
81
  function writeSolutionsJsonl(issuesDir: string, issueId: string, solutions: any[]) {
71
82
  const solutionsDir = join(issuesDir, 'solutions');
72
83
  if (!existsSync(solutionsDir)) mkdirSync(solutionsDir, { recursive: true });
@@ -109,7 +120,18 @@ function readQueue(issuesDir: string) {
109
120
 
110
121
  function writeQueue(issuesDir: string, queue: any) {
111
122
  if (!existsSync(issuesDir)) mkdirSync(issuesDir, { recursive: true });
112
- queue._metadata = { ...queue._metadata, updated_at: new Date().toISOString(), total_tasks: queue.tasks?.length || 0 };
123
+
124
+ // Support both solution-based and task-based queues
125
+ const items = queue.solutions || queue.tasks || [];
126
+ const isSolutionBased = Array.isArray(queue.solutions) && queue.solutions.length > 0;
127
+
128
+ queue._metadata = {
129
+ ...queue._metadata,
130
+ updated_at: new Date().toISOString(),
131
+ ...(isSolutionBased
132
+ ? { total_solutions: items.length }
133
+ : { total_tasks: items.length })
134
+ };
113
135
 
114
136
  // Check if using new multi-queue structure
115
137
  const queuesDir = join(issuesDir, 'queues');
@@ -125,8 +147,13 @@ function writeQueue(issuesDir: string, queue: any) {
125
147
  const index = JSON.parse(readFileSync(indexPath, 'utf8'));
126
148
  const queueEntry = index.queues?.find((q: any) => q.id === queue.id);
127
149
  if (queueEntry) {
128
- queueEntry.total_tasks = queue.tasks?.length || 0;
129
- queueEntry.completed_tasks = queue.tasks?.filter((i: any) => i.status === 'completed').length || 0;
150
+ if (isSolutionBased) {
151
+ queueEntry.total_solutions = items.length;
152
+ queueEntry.completed_solutions = items.filter((i: any) => i.status === 'completed').length;
153
+ } else {
154
+ queueEntry.total_tasks = items.length;
155
+ queueEntry.completed_tasks = items.filter((i: any) => i.status === 'completed').length;
156
+ }
130
157
  writeFileSync(indexPath, JSON.stringify(index, null, 2));
131
158
  }
132
159
  } catch {
@@ -173,9 +200,26 @@ function enrichIssues(issues: any[], issuesDir: string) {
173
200
  });
174
201
  }
175
202
 
203
+ /**
204
+ * Get queue items (supports both solution-based and task-based queues)
205
+ */
206
+ function getQueueItems(queue: any): any[] {
207
+ return queue.solutions || queue.tasks || [];
208
+ }
209
+
210
+ /**
211
+ * Check if queue is solution-based
212
+ */
213
+ function isSolutionBasedQueue(queue: any): boolean {
214
+ return Array.isArray(queue.solutions) && queue.solutions.length > 0;
215
+ }
216
+
176
217
  function groupQueueByExecutionGroup(queue: any) {
177
218
  const groups: { [key: string]: any[] } = {};
178
- for (const item of queue.tasks || []) {
219
+ const items = getQueueItems(queue);
220
+ const isSolutionBased = isSolutionBasedQueue(queue);
221
+
222
+ for (const item of items) {
179
223
  const groupId = item.execution_group || 'ungrouped';
180
224
  if (!groups[groupId]) groups[groupId] = [];
181
225
  groups[groupId].push(item);
@@ -183,11 +227,13 @@ function groupQueueByExecutionGroup(queue: any) {
183
227
  for (const groupId of Object.keys(groups)) {
184
228
  groups[groupId].sort((a, b) => (a.execution_order || 0) - (b.execution_order || 0));
185
229
  }
186
- const executionGroups = Object.entries(groups).map(([id, items]) => ({
230
+ const executionGroups = Object.entries(groups).map(([id, groupItems]) => ({
187
231
  id,
188
232
  type: id.startsWith('P') ? 'parallel' : id.startsWith('S') ? 'sequential' : 'unknown',
189
- task_count: items.length,
190
- tasks: items.map(i => i.item_id)
233
+ // Use appropriate count field based on queue type
234
+ ...(isSolutionBased
235
+ ? { solution_count: groupItems.length, solutions: groupItems.map(i => i.item_id) }
236
+ : { task_count: groupItems.length, tasks: groupItems.map(i => i.item_id) })
191
237
  })).sort((a, b) => {
192
238
  const aFirst = groups[a.id]?.[0]?.execution_order || 0;
193
239
  const bFirst = groups[b.id]?.[0]?.execution_order || 0;
@@ -312,7 +358,7 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
312
358
  return true;
313
359
  }
314
360
 
315
- // POST /api/queue/reorder - Reorder queue items
361
+ // POST /api/queue/reorder - Reorder queue items (supports both solutions and tasks)
316
362
  if (pathname === '/api/queue/reorder' && req.method === 'POST') {
317
363
  handlePostRequest(req, res, async (body: any) => {
318
364
  const { groupId, newOrder } = body;
@@ -321,8 +367,11 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
321
367
  }
322
368
 
323
369
  const queue = readQueue(issuesDir);
324
- const groupItems = queue.tasks.filter((item: any) => item.execution_group === groupId);
325
- const otherItems = queue.tasks.filter((item: any) => item.execution_group !== groupId);
370
+ const items = getQueueItems(queue);
371
+ const isSolutionBased = isSolutionBasedQueue(queue);
372
+
373
+ const groupItems = items.filter((item: any) => item.execution_group === groupId);
374
+ const otherItems = items.filter((item: any) => item.execution_group !== groupId);
326
375
 
327
376
  if (groupItems.length === 0) return { error: `No items in group ${groupId}` };
328
377
 
@@ -336,7 +385,7 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
336
385
 
337
386
  const itemMap = new Map(groupItems.map((i: any) => [i.item_id, i]));
338
387
  const reorderedItems = newOrder.map((qid: string, idx: number) => ({ ...itemMap.get(qid), _idx: idx }));
339
- const newQueue = [...otherItems, ...reorderedItems].sort((a, b) => {
388
+ const newQueueItems = [...otherItems, ...reorderedItems].sort((a, b) => {
340
389
  const aGroup = parseInt(a.execution_group?.match(/\d+/)?.[0] || '999');
341
390
  const bGroup = parseInt(b.execution_group?.match(/\d+/)?.[0] || '999');
342
391
  if (aGroup !== bGroup) return aGroup - bGroup;
@@ -346,8 +395,14 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
346
395
  return (a.execution_order || 0) - (b.execution_order || 0);
347
396
  });
348
397
 
349
- newQueue.forEach((item, idx) => { item.execution_order = idx + 1; delete item._idx; });
350
- queue.tasks = newQueue;
398
+ newQueueItems.forEach((item, idx) => { item.execution_order = idx + 1; delete item._idx; });
399
+
400
+ // Write back to appropriate array based on queue type
401
+ if (isSolutionBased) {
402
+ queue.solutions = newQueueItems;
403
+ } else {
404
+ queue.tasks = newQueueItems;
405
+ }
351
406
  writeQueue(issuesDir, queue);
352
407
 
353
408
  return { success: true, groupId, reordered: newOrder.length };
@@ -376,6 +431,17 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
376
431
  return true;
377
432
  }
378
433
 
434
+ // GET /api/issues/history - List completed issues from history
435
+ if (pathname === '/api/issues/history' && req.method === 'GET') {
436
+ const history = readIssueHistoryJsonl(issuesDir);
437
+ res.writeHead(200, { 'Content-Type': 'application/json' });
438
+ res.end(JSON.stringify({
439
+ issues: history,
440
+ _metadata: { version: '1.0', storage: 'jsonl', total_issues: history.length, last_updated: new Date().toISOString() }
441
+ }));
442
+ return true;
443
+ }
444
+
379
445
  // POST /api/issues - Create issue
380
446
  if (pathname === '/api/issues' && req.method === 'POST') {
381
447
  handlePostRequest(req, res, async (body: any) => {
@@ -392,7 +458,7 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
392
458
  context: body.context || '',
393
459
  source: body.source || 'text',
394
460
  source_url: body.source_url || null,
395
- labels: body.labels || [],
461
+ tags: body.tags || [],
396
462
  created_at: new Date().toISOString(),
397
463
  updated_at: new Date().toISOString()
398
464
  };
@@ -451,7 +517,7 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
451
517
  }
452
518
 
453
519
  // Update other fields
454
- for (const field of ['title', 'context', 'status', 'priority', 'labels']) {
520
+ for (const field of ['title', 'context', 'status', 'priority', 'tags']) {
455
521
  if (body[field] !== undefined) {
456
522
  issues[issueIndex][field] = body[field];
457
523
  updates.push(field);
@@ -633,7 +699,7 @@ export async function handleIssueRoutes(ctx: RouteContext): Promise<boolean> {
633
699
  if (issueIndex === -1) return { error: 'Issue not found' };
634
700
 
635
701
  const updates: string[] = [];
636
- for (const field of ['title', 'context', 'status', 'priority', 'bound_solution_id', 'labels']) {
702
+ for (const field of ['title', 'context', 'status', 'priority', 'bound_solution_id', 'tags']) {
637
703
  if (body[field] !== undefined) {
638
704
  issues[issueIndex][field] = body[field];
639
705
  updates.push(field);
@@ -10,6 +10,37 @@ import { spawn, ChildProcess } from 'child_process';
10
10
  import { existsSync, mkdirSync, readFileSync, writeFileSync, unlinkSync, readdirSync, statSync } from 'fs';
11
11
  import { join, relative } from 'path';
12
12
 
13
+ // Debug logging utility
14
+ const DEBUG = process.env.DEBUG === 'true' || process.env.DEBUG === '1' || process.env.CCW_DEBUG === 'true';
15
+
16
+ function debugLog(category: string, message: string, data?: Record<string, unknown>): void {
17
+ if (!DEBUG) return;
18
+ const timestamp = new Date().toISOString();
19
+ const prefix = `[${timestamp}] [CLI-DEBUG] [${category}]`;
20
+ if (data) {
21
+ console.error(`${prefix} ${message}`, JSON.stringify(data, null, 2));
22
+ } else {
23
+ console.error(`${prefix} ${message}`);
24
+ }
25
+ }
26
+
27
+ function errorLog(category: string, message: string, error?: Error | unknown, context?: Record<string, unknown>): void {
28
+ const timestamp = new Date().toISOString();
29
+ const prefix = `[${timestamp}] [CLI-ERROR] [${category}]`;
30
+ console.error(`${prefix} ${message}`);
31
+ if (error instanceof Error) {
32
+ console.error(`${prefix} Error: ${error.message}`);
33
+ if (DEBUG && error.stack) {
34
+ console.error(`${prefix} Stack: ${error.stack}`);
35
+ }
36
+ } else if (error) {
37
+ console.error(`${prefix} Error: ${String(error)}`);
38
+ }
39
+ if (context) {
40
+ console.error(`${prefix} Context:`, JSON.stringify(context, null, 2));
41
+ }
42
+ }
43
+
13
44
  // LiteLLM integration
14
45
  import { executeLiteLLMEndpoint } from './litellm-executor.js';
15
46
  import { findEndpointById } from '../config/litellm-api-config-manager.js';
@@ -205,9 +236,12 @@ interface ExecutionOutput {
205
236
  * Check if a CLI tool is available (with caching)
206
237
  */
207
238
  async function checkToolAvailability(tool: string): Promise<ToolAvailability> {
239
+ debugLog('TOOL_CHECK', `Checking availability for tool: ${tool}`);
240
+
208
241
  // Check cache first
209
242
  const cached = toolAvailabilityCache.get(tool);
210
243
  if (cached && isCacheValid(cached)) {
244
+ debugLog('TOOL_CHECK', `Cache hit for ${tool}`, { available: cached.result.available, path: cached.result.path });
211
245
  return cached.result;
212
246
  }
213
247
 
@@ -219,6 +253,8 @@ async function checkToolAvailability(tool: string): Promise<ToolAvailability> {
219
253
  const isWindows = process.platform === 'win32';
220
254
  const command = isWindows ? 'where' : 'which';
221
255
 
256
+ debugLog('TOOL_CHECK', `Running ${command} ${tool}`, { platform: process.platform });
257
+
222
258
  // Direct spawn - where/which are system commands that don't need shell wrapper
223
259
  const child = spawn(command, [tool], {
224
260
  shell: false,
@@ -226,25 +262,31 @@ async function checkToolAvailability(tool: string): Promise<ToolAvailability> {
226
262
  });
227
263
 
228
264
  let stdout = '';
265
+ let stderr = '';
229
266
  child.stdout!.on('data', (data) => { stdout += data.toString(); });
267
+ child.stderr?.on('data', (data) => { stderr += data.toString(); });
230
268
 
231
269
  child.on('close', (code) => {
232
270
  const result: ToolAvailability = code === 0 && stdout.trim()
233
271
  ? { available: true, path: stdout.trim().split('\n')[0] }
234
272
  : { available: false, path: null };
235
273
 
236
- // Only cache positive results to avoid caching transient failures
237
274
  if (result.available) {
275
+ debugLog('TOOL_CHECK', `Tool ${tool} found`, { path: result.path });
276
+ // Only cache positive results to avoid caching transient failures
238
277
  toolAvailabilityCache.set(tool, {
239
278
  result,
240
279
  timestamp: Date.now()
241
280
  });
281
+ } else {
282
+ debugLog('TOOL_CHECK', `Tool ${tool} not found`, { exitCode: code, stderr: stderr.trim() || '(empty)' });
242
283
  }
243
284
 
244
285
  resolve(result);
245
286
  });
246
287
 
247
- child.on('error', () => {
288
+ child.on('error', (error) => {
289
+ errorLog('TOOL_CHECK', `Failed to check tool availability: ${tool}`, error, { command, tool });
248
290
  // Don't cache errors - they may be transient
249
291
  resolve({ available: false, path: null });
250
292
  });
@@ -252,6 +294,7 @@ async function checkToolAvailability(tool: string): Promise<ToolAvailability> {
252
294
  // Timeout after 5 seconds
253
295
  setTimeout(() => {
254
296
  child.kill();
297
+ debugLog('TOOL_CHECK', `Timeout checking tool ${tool} (5s)`);
255
298
  // Don't cache timeouts - they may be transient
256
299
  resolve({ available: false, path: null });
257
300
  }, 5000);
@@ -279,6 +322,15 @@ function buildCommand(params: {
279
322
  }): { command: string; args: string[]; useStdin: boolean } {
280
323
  const { tool, prompt, mode = 'analysis', model, dir, include, nativeResume } = params;
281
324
 
325
+ debugLog('BUILD_CMD', `Building command for tool: ${tool}`, {
326
+ mode,
327
+ model: model || '(default)',
328
+ dir: dir || '(cwd)',
329
+ include: include || '(none)',
330
+ nativeResume: nativeResume ? { enabled: nativeResume.enabled, isLatest: nativeResume.isLatest, sessionId: nativeResume.sessionId } : '(none)',
331
+ promptLength: prompt.length
332
+ });
333
+
282
334
  let command = tool;
283
335
  let args: string[] = [];
284
336
  // Default to stdin for all tools to avoid escaping issues on Windows
@@ -418,9 +470,17 @@ function buildCommand(params: {
418
470
  break;
419
471
 
420
472
  default:
473
+ errorLog('BUILD_CMD', `Unknown CLI tool: ${tool}`);
421
474
  throw new Error(`Unknown CLI tool: ${tool}`);
422
475
  }
423
476
 
477
+ debugLog('BUILD_CMD', `Command built successfully`, {
478
+ command,
479
+ args,
480
+ useStdin,
481
+ fullCommand: `${command} ${args.join(' ')}${useStdin ? ' (stdin)' : ''}`
482
+ });
483
+
424
484
  return { command, args, useStdin };
425
485
  }
426
486
 
@@ -596,7 +656,7 @@ async function executeCliTool(
596
656
  ensureHistoryDir(workingDir); // Ensure history directory exists
597
657
 
598
658
  // NEW: Check if model is a custom LiteLLM endpoint ID
599
- if (model && !['gemini', 'qwen', 'codex'].includes(tool)) {
659
+ if (model) {
600
660
  const endpoint = findEndpointById(workingDir, model);
601
661
  if (endpoint) {
602
662
  // Route to LiteLLM executor
@@ -821,18 +881,42 @@ async function executeCliTool(
821
881
 
822
882
  const startTime = Date.now();
823
883
 
884
+ debugLog('EXEC', `Starting CLI execution`, {
885
+ tool,
886
+ mode,
887
+ workingDir,
888
+ conversationId,
889
+ promptLength: finalPrompt.length,
890
+ hasResume: !!resume,
891
+ hasCustomId: !!customId
892
+ });
893
+
824
894
  return new Promise((resolve, reject) => {
825
895
  // Windows requires shell: true for npm global commands (.cmd files)
826
896
  // Unix-like systems can use shell: false for direct execution
827
897
  const isWindows = process.platform === 'win32';
898
+
899
+ debugLog('SPAWN', `Spawning process`, {
900
+ command,
901
+ args,
902
+ cwd: workingDir,
903
+ shell: isWindows,
904
+ useStdin,
905
+ platform: process.platform,
906
+ fullCommand: `${command} ${args.join(' ')}`
907
+ });
908
+
828
909
  const child = spawn(command, args, {
829
910
  cwd: workingDir,
830
911
  shell: isWindows, // Enable shell on Windows for .cmd files
831
912
  stdio: [useStdin ? 'pipe' : 'ignore', 'pipe', 'pipe']
832
913
  });
833
914
 
915
+ debugLog('SPAWN', `Process spawned`, { pid: child.pid });
916
+
834
917
  // Write prompt to stdin if using stdin mode (for gemini/qwen)
835
918
  if (useStdin && child.stdin) {
919
+ debugLog('STDIN', `Writing prompt to stdin (${finalPrompt.length} bytes)`);
836
920
  child.stdin.write(finalPrompt);
837
921
  child.stdin.end();
838
922
  }
@@ -864,10 +948,19 @@ async function executeCliTool(
864
948
  const endTime = Date.now();
865
949
  const duration = endTime - startTime;
866
950
 
951
+ debugLog('CLOSE', `Process closed`, {
952
+ exitCode: code,
953
+ duration: `${duration}ms`,
954
+ timedOut,
955
+ stdoutLength: stdout.length,
956
+ stderrLength: stderr.length
957
+ });
958
+
867
959
  // Determine status - prioritize output content over exit code
868
960
  let status: 'success' | 'error' | 'timeout' = 'success';
869
961
  if (timedOut) {
870
962
  status = 'timeout';
963
+ debugLog('STATUS', `Execution timed out after ${duration}ms`);
871
964
  } else if (code !== 0) {
872
965
  // Non-zero exit code doesn't always mean failure
873
966
  // Check if there's valid output (AI response) - treat as success
@@ -877,12 +970,31 @@ async function executeCliTool(
877
970
  stderr.includes('API key') ||
878
971
  stderr.includes('rate limit exceeded');
879
972
 
973
+ debugLog('STATUS', `Non-zero exit code analysis`, {
974
+ exitCode: code,
975
+ hasValidOutput,
976
+ hasFatalError,
977
+ stderrPreview: stderr.substring(0, 500)
978
+ });
979
+
880
980
  if (hasValidOutput && !hasFatalError) {
881
981
  // Has output and no fatal errors - treat as success despite exit code
882
982
  status = 'success';
983
+ debugLog('STATUS', `Treating as success (has valid output, no fatal errors)`);
883
984
  } else {
884
985
  status = 'error';
986
+ errorLog('EXEC', `CLI execution failed`, undefined, {
987
+ exitCode: code,
988
+ tool,
989
+ command,
990
+ args,
991
+ workingDir,
992
+ stderrFull: stderr,
993
+ stdoutPreview: stdout.substring(0, 200)
994
+ });
885
995
  }
996
+ } else {
997
+ debugLog('STATUS', `Execution successful (exit code 0)`);
886
998
  }
887
999
 
888
1000
  // Create new turn - cache full output when not streaming (default)
@@ -1066,7 +1178,16 @@ async function executeCliTool(
1066
1178
 
1067
1179
  // Handle errors
1068
1180
  child.on('error', (error) => {
1069
- reject(new Error(`Failed to spawn ${tool}: ${error.message}`));
1181
+ errorLog('SPAWN', `Failed to spawn process`, error, {
1182
+ tool,
1183
+ command,
1184
+ args,
1185
+ workingDir,
1186
+ fullCommand: `${command} ${args.join(' ')}`,
1187
+ platform: process.platform,
1188
+ path: process.env.PATH?.split(process.platform === 'win32' ? ';' : ':').slice(0, 10).join('\n ') + '...'
1189
+ });
1190
+ reject(new Error(`Failed to spawn ${tool}: ${error.message}\n Command: ${command} ${args.join(' ')}\n Working Dir: ${workingDir}`));
1070
1191
  });
1071
1192
 
1072
1193
  // Timeout handling (timeout=0 disables internal timeout, controlled by external caller)
@@ -11,15 +11,19 @@ import {
11
11
  } from '../config/litellm-api-config-manager.js';
12
12
  import type { CustomEndpoint, ProviderCredential } from '../types/litellm-api-config.js';
13
13
 
14
- export interface LiteLLMExecutionOptions {
15
- prompt: string;
16
- endpointId: string; // Custom endpoint ID (e.g., "my-gpt4o")
17
- baseDir: string; // Project base directory
18
- cwd?: string; // Working directory for file resolution
19
- includeDirs?: string[]; // Additional directories for @patterns
20
- enableCache?: boolean; // Override endpoint cache setting
21
- onOutput?: (data: { type: string; data: string }) => void;
22
- }
14
+ export interface LiteLLMExecutionOptions {
15
+ prompt: string;
16
+ endpointId: string; // Custom endpoint ID (e.g., "my-gpt4o")
17
+ baseDir: string; // Project base directory
18
+ cwd?: string; // Working directory for file resolution
19
+ includeDirs?: string[]; // Additional directories for @patterns
20
+ enableCache?: boolean; // Override endpoint cache setting
21
+ onOutput?: (data: { type: string; data: string }) => void;
22
+ /** Number of retries after the initial attempt (default: 0) */
23
+ maxRetries?: number;
24
+ /** Base delay for exponential backoff in milliseconds (default: 1000) */
25
+ retryBaseDelayMs?: number;
26
+ }
23
27
 
24
28
  export interface LiteLLMExecutionResult {
25
29
  success: boolean;
@@ -48,10 +52,10 @@ export function extractPatterns(prompt: string): string[] {
48
52
  /**
49
53
  * Execute LiteLLM endpoint with optional context caching
50
54
  */
51
- export async function executeLiteLLMEndpoint(
52
- options: LiteLLMExecutionOptions
53
- ): Promise<LiteLLMExecutionResult> {
54
- const { prompt, endpointId, baseDir, cwd, includeDirs, enableCache, onOutput } = options;
55
+ export async function executeLiteLLMEndpoint(
56
+ options: LiteLLMExecutionOptions
57
+ ): Promise<LiteLLMExecutionResult> {
58
+ const { prompt, endpointId, baseDir, cwd, includeDirs, enableCache, onOutput } = options;
55
59
 
56
60
  // 1. Find endpoint configuration
57
61
  const endpoint = findEndpointById(baseDir, endpointId);
@@ -179,8 +183,16 @@ export async function executeLiteLLMEndpoint(
179
183
  }
180
184
  }
181
185
 
182
- // Use litellm-client to call chat
183
- const response = await client.chat(finalPrompt, endpoint.model);
186
+ // Use litellm-client to call chat
187
+ const response = await callWithRetries(
188
+ () => client.chat(finalPrompt, endpoint.model),
189
+ {
190
+ maxRetries: options.maxRetries ?? 0,
191
+ baseDelayMs: options.retryBaseDelayMs ?? 1000,
192
+ onOutput,
193
+ rateLimitKey: `${provider.type}:${endpoint.model}`,
194
+ },
195
+ );
184
196
 
185
197
  if (onOutput) {
186
198
  onOutput({ type: 'stdout', data: response });
@@ -230,12 +242,83 @@ function getProviderEnvVarName(providerType: string): string | null {
230
242
  /**
231
243
  * Get environment variable name for provider base URL
232
244
  */
233
- function getProviderBaseUrlEnvVarName(providerType: string): string | null {
234
- const envVarMap: Record<string, string> = {
235
- openai: 'OPENAI_API_BASE',
236
- anthropic: 'ANTHROPIC_API_BASE',
237
- azure: 'AZURE_API_BASE',
238
- };
239
-
240
- return envVarMap[providerType] || null;
241
- }
245
+ function getProviderBaseUrlEnvVarName(providerType: string): string | null {
246
+ const envVarMap: Record<string, string> = {
247
+ openai: 'OPENAI_API_BASE',
248
+ anthropic: 'ANTHROPIC_API_BASE',
249
+ azure: 'AZURE_API_BASE',
250
+ };
251
+
252
+ return envVarMap[providerType] || null;
253
+ }
254
+
255
+ const rateLimitRetryQueueNextAt = new Map<string, number>();
256
+
257
+ function sleep(ms: number): Promise<void> {
258
+ return new Promise((resolve) => setTimeout(resolve, ms));
259
+ }
260
+
261
+ function isRateLimitError(errorMessage: string): boolean {
262
+ return /429|rate limit|too many requests/i.test(errorMessage);
263
+ }
264
+
265
+ function isRetryableError(errorMessage: string): boolean {
266
+ // Never retry auth/config errors
267
+ if (/401|403|unauthorized|forbidden/i.test(errorMessage)) {
268
+ return false;
269
+ }
270
+
271
+ // Retry rate limits, transient server errors, and network timeouts
272
+ return /(429|500|502|503|504|timeout|timed out|econnreset|enotfound|econnrefused|socket hang up)/i.test(
273
+ errorMessage,
274
+ );
275
+ }
276
+
277
+ async function callWithRetries(
278
+ call: () => Promise<string>,
279
+ options: {
280
+ maxRetries: number;
281
+ baseDelayMs: number;
282
+ onOutput?: (data: { type: string; data: string }) => void;
283
+ rateLimitKey: string;
284
+ },
285
+ ): Promise<string> {
286
+ const { maxRetries, baseDelayMs, onOutput, rateLimitKey } = options;
287
+ let attempt = 0;
288
+
289
+ while (true) {
290
+ try {
291
+ return await call();
292
+ } catch (err) {
293
+ const errorMessage = err instanceof Error ? err.message : String(err);
294
+
295
+ if (attempt >= maxRetries || !isRetryableError(errorMessage)) {
296
+ throw err;
297
+ }
298
+
299
+ const delayMs = baseDelayMs * 2 ** attempt;
300
+
301
+ if (onOutput) {
302
+ onOutput({
303
+ type: 'stderr',
304
+ data: `[LiteLLM retry ${attempt + 1}/${maxRetries}: waiting ${delayMs}ms] ${errorMessage}\n`,
305
+ });
306
+ }
307
+
308
+ attempt += 1;
309
+
310
+ if (isRateLimitError(errorMessage)) {
311
+ const now = Date.now();
312
+ const earliestAt = now + delayMs;
313
+ const queuedAt = rateLimitRetryQueueNextAt.get(rateLimitKey) ?? 0;
314
+ const scheduledAt = Math.max(queuedAt, earliestAt);
315
+ rateLimitRetryQueueNextAt.set(rateLimitKey, scheduledAt + delayMs);
316
+
317
+ await sleep(scheduledAt - now);
318
+ continue;
319
+ }
320
+
321
+ await sleep(delayMs);
322
+ }
323
+ }
324
+ }