@claude-flow/cli 3.0.0-alpha.29 → 3.0.0-alpha.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/commands/daemon.d.ts.map +1 -1
- package/dist/src/commands/daemon.js +33 -9
- package/dist/src/commands/daemon.js.map +1 -1
- package/dist/src/init/settings-generator.d.ts.map +1 -1
- package/dist/src/init/settings-generator.js +9 -6
- package/dist/src/init/settings-generator.js.map +1 -1
- package/dist/src/services/container-worker-pool.d.ts +197 -0
- package/dist/src/services/container-worker-pool.d.ts.map +1 -0
- package/dist/src/services/container-worker-pool.js +581 -0
- package/dist/src/services/container-worker-pool.js.map +1 -0
- package/dist/src/services/headless-worker-executor.d.ts +304 -0
- package/dist/src/services/headless-worker-executor.d.ts.map +1 -0
- package/dist/src/services/headless-worker-executor.js +997 -0
- package/dist/src/services/headless-worker-executor.js.map +1 -0
- package/dist/src/services/index.d.ts +6 -0
- package/dist/src/services/index.d.ts.map +1 -1
- package/dist/src/services/index.js +5 -0
- package/dist/src/services/index.js.map +1 -1
- package/dist/src/services/worker-daemon.d.ts +55 -5
- package/dist/src/services/worker-daemon.d.ts.map +1 -1
- package/dist/src/services/worker-daemon.js +191 -13
- package/dist/src/services/worker-daemon.js.map +1 -1
- package/dist/src/services/worker-queue.d.ts +194 -0
- package/dist/src/services/worker-queue.d.ts.map +1 -0
- package/dist/src/services/worker-queue.js +511 -0
- package/dist/src/services/worker-queue.js.map +1 -0
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
|
@@ -0,0 +1,997 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Headless Worker Executor
|
|
3
|
+
* Enables workers to invoke Claude Code in headless mode with configurable sandbox profiles.
|
|
4
|
+
*
|
|
5
|
+
* ADR-020: Headless Worker Integration Architecture
|
|
6
|
+
* - Integrates with CLAUDE_CODE_HEADLESS and CLAUDE_CODE_SANDBOX_MODE environment variables
|
|
7
|
+
* - Provides process pool for concurrent execution
|
|
8
|
+
* - Builds context from file glob patterns
|
|
9
|
+
* - Supports prompt templates and output parsing
|
|
10
|
+
* - Implements timeout and graceful error handling
|
|
11
|
+
*
|
|
12
|
+
* Key Features:
|
|
13
|
+
* - Process pool with configurable maxConcurrent
|
|
14
|
+
* - Context building from file glob patterns with caching
|
|
15
|
+
* - Prompt template system with context injection
|
|
16
|
+
* - Output parsing (text, json, markdown)
|
|
17
|
+
* - Timeout handling with graceful termination
|
|
18
|
+
* - Execution logging for debugging
|
|
19
|
+
* - Event emission for monitoring
|
|
20
|
+
*/
|
|
21
|
+
import { spawn, execSync } from 'child_process';
|
|
22
|
+
import { EventEmitter } from 'events';
|
|
23
|
+
import { existsSync, readFileSync, readdirSync, mkdirSync, writeFileSync } from 'fs';
|
|
24
|
+
import { join } from 'path';
|
|
25
|
+
// ============================================
|
|
26
|
+
// Constants
|
|
27
|
+
// ============================================
|
|
28
|
+
/**
|
|
29
|
+
* Array of headless worker types for runtime checking
|
|
30
|
+
*/
|
|
31
|
+
export const HEADLESS_WORKER_TYPES = [
|
|
32
|
+
'audit',
|
|
33
|
+
'optimize',
|
|
34
|
+
'testgaps',
|
|
35
|
+
'document',
|
|
36
|
+
'ultralearn',
|
|
37
|
+
'refactor',
|
|
38
|
+
'deepdive',
|
|
39
|
+
'predict',
|
|
40
|
+
];
|
|
41
|
+
/**
|
|
42
|
+
* Array of local worker types
|
|
43
|
+
*/
|
|
44
|
+
export const LOCAL_WORKER_TYPES = [
|
|
45
|
+
'map',
|
|
46
|
+
'consolidate',
|
|
47
|
+
'benchmark',
|
|
48
|
+
'preload',
|
|
49
|
+
];
|
|
50
|
+
/**
|
|
51
|
+
* Model ID mapping
|
|
52
|
+
*/
|
|
53
|
+
const MODEL_IDS = {
|
|
54
|
+
sonnet: 'claude-sonnet-4-20250514',
|
|
55
|
+
opus: 'claude-opus-4-20250514',
|
|
56
|
+
haiku: 'claude-haiku-4-20250514',
|
|
57
|
+
};
|
|
58
|
+
/**
|
|
59
|
+
* Default headless worker configurations based on ADR-020
|
|
60
|
+
*/
|
|
61
|
+
export const HEADLESS_WORKER_CONFIGS = {
|
|
62
|
+
audit: {
|
|
63
|
+
type: 'audit',
|
|
64
|
+
mode: 'headless',
|
|
65
|
+
intervalMs: 30 * 60 * 1000,
|
|
66
|
+
priority: 'critical',
|
|
67
|
+
description: 'AI-powered security analysis',
|
|
68
|
+
enabled: true,
|
|
69
|
+
headless: {
|
|
70
|
+
promptTemplate: `Analyze this codebase for security vulnerabilities:
|
|
71
|
+
- Check for hardcoded secrets (API keys, passwords)
|
|
72
|
+
- Identify SQL injection risks
|
|
73
|
+
- Find XSS vulnerabilities
|
|
74
|
+
- Check for insecure dependencies
|
|
75
|
+
- Identify authentication/authorization issues
|
|
76
|
+
|
|
77
|
+
Provide a JSON report with:
|
|
78
|
+
{
|
|
79
|
+
"vulnerabilities": [{ "severity": "high|medium|low", "file": "...", "line": N, "description": "..." }],
|
|
80
|
+
"riskScore": 0-100,
|
|
81
|
+
"recommendations": ["..."]
|
|
82
|
+
}`,
|
|
83
|
+
sandbox: 'strict',
|
|
84
|
+
model: 'haiku',
|
|
85
|
+
outputFormat: 'json',
|
|
86
|
+
contextPatterns: ['**/*.ts', '**/*.js', '**/.env*', '**/package.json'],
|
|
87
|
+
timeoutMs: 5 * 60 * 1000,
|
|
88
|
+
},
|
|
89
|
+
},
|
|
90
|
+
optimize: {
|
|
91
|
+
type: 'optimize',
|
|
92
|
+
mode: 'headless',
|
|
93
|
+
intervalMs: 60 * 60 * 1000,
|
|
94
|
+
priority: 'high',
|
|
95
|
+
description: 'AI optimization suggestions',
|
|
96
|
+
enabled: true,
|
|
97
|
+
headless: {
|
|
98
|
+
promptTemplate: `Analyze this codebase for performance optimizations:
|
|
99
|
+
- Identify N+1 query patterns
|
|
100
|
+
- Find unnecessary re-renders in React
|
|
101
|
+
- Suggest caching opportunities
|
|
102
|
+
- Identify memory leaks
|
|
103
|
+
- Find redundant computations
|
|
104
|
+
|
|
105
|
+
Provide actionable suggestions with code examples.`,
|
|
106
|
+
sandbox: 'permissive',
|
|
107
|
+
model: 'sonnet',
|
|
108
|
+
outputFormat: 'markdown',
|
|
109
|
+
contextPatterns: ['src/**/*.ts', 'src/**/*.tsx'],
|
|
110
|
+
timeoutMs: 10 * 60 * 1000,
|
|
111
|
+
},
|
|
112
|
+
},
|
|
113
|
+
testgaps: {
|
|
114
|
+
type: 'testgaps',
|
|
115
|
+
mode: 'headless',
|
|
116
|
+
intervalMs: 60 * 60 * 1000,
|
|
117
|
+
priority: 'normal',
|
|
118
|
+
description: 'AI test gap analysis',
|
|
119
|
+
enabled: true,
|
|
120
|
+
headless: {
|
|
121
|
+
promptTemplate: `Analyze test coverage and identify gaps:
|
|
122
|
+
- Find untested functions and classes
|
|
123
|
+
- Identify edge cases not covered
|
|
124
|
+
- Suggest new test scenarios
|
|
125
|
+
- Check for missing error handling tests
|
|
126
|
+
- Identify integration test gaps
|
|
127
|
+
|
|
128
|
+
For each gap, provide a test skeleton.`,
|
|
129
|
+
sandbox: 'permissive',
|
|
130
|
+
model: 'sonnet',
|
|
131
|
+
outputFormat: 'markdown',
|
|
132
|
+
contextPatterns: ['src/**/*.ts', 'tests/**/*.ts', '__tests__/**/*.ts'],
|
|
133
|
+
timeoutMs: 10 * 60 * 1000,
|
|
134
|
+
},
|
|
135
|
+
},
|
|
136
|
+
document: {
|
|
137
|
+
type: 'document',
|
|
138
|
+
mode: 'headless',
|
|
139
|
+
intervalMs: 120 * 60 * 1000,
|
|
140
|
+
priority: 'low',
|
|
141
|
+
description: 'AI documentation generation',
|
|
142
|
+
enabled: false,
|
|
143
|
+
headless: {
|
|
144
|
+
promptTemplate: `Generate documentation for undocumented code:
|
|
145
|
+
- Add JSDoc comments to functions
|
|
146
|
+
- Create README sections for modules
|
|
147
|
+
- Document API endpoints
|
|
148
|
+
- Add inline comments for complex logic
|
|
149
|
+
- Generate usage examples
|
|
150
|
+
|
|
151
|
+
Focus on public APIs and exported functions.`,
|
|
152
|
+
sandbox: 'permissive',
|
|
153
|
+
model: 'haiku',
|
|
154
|
+
outputFormat: 'markdown',
|
|
155
|
+
contextPatterns: ['src/**/*.ts'],
|
|
156
|
+
timeoutMs: 10 * 60 * 1000,
|
|
157
|
+
},
|
|
158
|
+
},
|
|
159
|
+
ultralearn: {
|
|
160
|
+
type: 'ultralearn',
|
|
161
|
+
mode: 'headless',
|
|
162
|
+
intervalMs: 0, // Manual trigger only
|
|
163
|
+
priority: 'normal',
|
|
164
|
+
description: 'Deep knowledge acquisition',
|
|
165
|
+
enabled: false,
|
|
166
|
+
headless: {
|
|
167
|
+
promptTemplate: `Deeply analyze this codebase to learn:
|
|
168
|
+
- Architectural patterns used
|
|
169
|
+
- Coding conventions
|
|
170
|
+
- Domain-specific terminology
|
|
171
|
+
- Common patterns and idioms
|
|
172
|
+
- Team preferences
|
|
173
|
+
|
|
174
|
+
Provide insights as JSON:
|
|
175
|
+
{
|
|
176
|
+
"architecture": { "patterns": [...], "style": "..." },
|
|
177
|
+
"conventions": { "naming": "...", "formatting": "..." },
|
|
178
|
+
"domains": ["..."],
|
|
179
|
+
"insights": ["..."]
|
|
180
|
+
}`,
|
|
181
|
+
sandbox: 'strict',
|
|
182
|
+
model: 'opus',
|
|
183
|
+
outputFormat: 'json',
|
|
184
|
+
contextPatterns: ['**/*.ts', '**/CLAUDE.md', '**/README.md'],
|
|
185
|
+
timeoutMs: 15 * 60 * 1000,
|
|
186
|
+
},
|
|
187
|
+
},
|
|
188
|
+
refactor: {
|
|
189
|
+
type: 'refactor',
|
|
190
|
+
mode: 'headless',
|
|
191
|
+
intervalMs: 0, // Manual trigger only
|
|
192
|
+
priority: 'normal',
|
|
193
|
+
description: 'AI refactoring suggestions',
|
|
194
|
+
enabled: false,
|
|
195
|
+
headless: {
|
|
196
|
+
promptTemplate: `Suggest refactoring opportunities:
|
|
197
|
+
- Identify code duplication
|
|
198
|
+
- Suggest better abstractions
|
|
199
|
+
- Find opportunities for design patterns
|
|
200
|
+
- Identify overly complex functions
|
|
201
|
+
- Suggest module reorganization
|
|
202
|
+
|
|
203
|
+
Provide before/after code examples.`,
|
|
204
|
+
sandbox: 'permissive',
|
|
205
|
+
model: 'sonnet',
|
|
206
|
+
outputFormat: 'markdown',
|
|
207
|
+
contextPatterns: ['src/**/*.ts'],
|
|
208
|
+
timeoutMs: 10 * 60 * 1000,
|
|
209
|
+
},
|
|
210
|
+
},
|
|
211
|
+
deepdive: {
|
|
212
|
+
type: 'deepdive',
|
|
213
|
+
mode: 'headless',
|
|
214
|
+
intervalMs: 0, // Manual trigger only
|
|
215
|
+
priority: 'normal',
|
|
216
|
+
description: 'Deep code analysis',
|
|
217
|
+
enabled: false,
|
|
218
|
+
headless: {
|
|
219
|
+
promptTemplate: `Perform deep analysis of this codebase:
|
|
220
|
+
- Understand data flow
|
|
221
|
+
- Map dependencies
|
|
222
|
+
- Identify architectural issues
|
|
223
|
+
- Find potential bugs
|
|
224
|
+
- Analyze error handling
|
|
225
|
+
|
|
226
|
+
Provide comprehensive report.`,
|
|
227
|
+
sandbox: 'strict',
|
|
228
|
+
model: 'opus',
|
|
229
|
+
outputFormat: 'markdown',
|
|
230
|
+
contextPatterns: ['src/**/*.ts'],
|
|
231
|
+
timeoutMs: 15 * 60 * 1000,
|
|
232
|
+
},
|
|
233
|
+
},
|
|
234
|
+
predict: {
|
|
235
|
+
type: 'predict',
|
|
236
|
+
mode: 'headless',
|
|
237
|
+
intervalMs: 10 * 60 * 1000,
|
|
238
|
+
priority: 'low',
|
|
239
|
+
description: 'Predictive preloading',
|
|
240
|
+
enabled: false,
|
|
241
|
+
headless: {
|
|
242
|
+
promptTemplate: `Based on recent activity, predict what the developer needs:
|
|
243
|
+
- Files likely to be edited next
|
|
244
|
+
- Tests that should be run
|
|
245
|
+
- Documentation to reference
|
|
246
|
+
- Dependencies to check
|
|
247
|
+
|
|
248
|
+
Provide preload suggestions as JSON:
|
|
249
|
+
{
|
|
250
|
+
"filesToPreload": ["..."],
|
|
251
|
+
"testsToRun": ["..."],
|
|
252
|
+
"docsToReference": ["..."],
|
|
253
|
+
"confidence": 0.0-1.0
|
|
254
|
+
}`,
|
|
255
|
+
sandbox: 'strict',
|
|
256
|
+
model: 'haiku',
|
|
257
|
+
outputFormat: 'json',
|
|
258
|
+
contextPatterns: ['.claude-flow/metrics/*.json'],
|
|
259
|
+
timeoutMs: 2 * 60 * 1000,
|
|
260
|
+
},
|
|
261
|
+
},
|
|
262
|
+
};
|
|
263
|
+
/**
|
|
264
|
+
* Local worker configurations
|
|
265
|
+
*/
|
|
266
|
+
export const LOCAL_WORKER_CONFIGS = {
|
|
267
|
+
map: {
|
|
268
|
+
type: 'map',
|
|
269
|
+
mode: 'local',
|
|
270
|
+
intervalMs: 15 * 60 * 1000,
|
|
271
|
+
priority: 'normal',
|
|
272
|
+
description: 'Codebase mapping',
|
|
273
|
+
enabled: true,
|
|
274
|
+
},
|
|
275
|
+
consolidate: {
|
|
276
|
+
type: 'consolidate',
|
|
277
|
+
mode: 'local',
|
|
278
|
+
intervalMs: 30 * 60 * 1000,
|
|
279
|
+
priority: 'low',
|
|
280
|
+
description: 'Memory consolidation',
|
|
281
|
+
enabled: true,
|
|
282
|
+
},
|
|
283
|
+
benchmark: {
|
|
284
|
+
type: 'benchmark',
|
|
285
|
+
mode: 'local',
|
|
286
|
+
intervalMs: 60 * 60 * 1000,
|
|
287
|
+
priority: 'low',
|
|
288
|
+
description: 'Performance benchmarking',
|
|
289
|
+
enabled: false,
|
|
290
|
+
},
|
|
291
|
+
preload: {
|
|
292
|
+
type: 'preload',
|
|
293
|
+
mode: 'local',
|
|
294
|
+
intervalMs: 5 * 60 * 1000,
|
|
295
|
+
priority: 'low',
|
|
296
|
+
description: 'Resource preloading',
|
|
297
|
+
enabled: false,
|
|
298
|
+
},
|
|
299
|
+
};
|
|
300
|
+
/**
|
|
301
|
+
* Combined worker configurations
|
|
302
|
+
*/
|
|
303
|
+
export const ALL_WORKER_CONFIGS = [
|
|
304
|
+
...Object.values(HEADLESS_WORKER_CONFIGS),
|
|
305
|
+
...Object.values(LOCAL_WORKER_CONFIGS),
|
|
306
|
+
];
|
|
307
|
+
// ============================================
|
|
308
|
+
// Utility Functions
|
|
309
|
+
// ============================================
|
|
310
|
+
/**
|
|
311
|
+
* Check if a worker type is a headless worker
|
|
312
|
+
*/
|
|
313
|
+
export function isHeadlessWorker(type) {
|
|
314
|
+
return HEADLESS_WORKER_TYPES.includes(type);
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Check if a worker type is a local worker
|
|
318
|
+
*/
|
|
319
|
+
export function isLocalWorker(type) {
|
|
320
|
+
return LOCAL_WORKER_TYPES.includes(type);
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Get model ID from model type
|
|
324
|
+
*/
|
|
325
|
+
export function getModelId(model) {
|
|
326
|
+
return MODEL_IDS[model];
|
|
327
|
+
}
|
|
328
|
+
/**
|
|
329
|
+
* Get worker configuration by type
|
|
330
|
+
*/
|
|
331
|
+
export function getWorkerConfig(type) {
|
|
332
|
+
if (isHeadlessWorker(type)) {
|
|
333
|
+
return HEADLESS_WORKER_CONFIGS[type];
|
|
334
|
+
}
|
|
335
|
+
if (isLocalWorker(type)) {
|
|
336
|
+
return LOCAL_WORKER_CONFIGS[type];
|
|
337
|
+
}
|
|
338
|
+
return undefined;
|
|
339
|
+
}
|
|
340
|
+
// ============================================
|
|
341
|
+
// HeadlessWorkerExecutor Class
|
|
342
|
+
// ============================================
|
|
343
|
+
/**
|
|
344
|
+
* HeadlessWorkerExecutor - Executes workers using Claude Code in headless mode
|
|
345
|
+
*
|
|
346
|
+
* Features:
|
|
347
|
+
* - Process pool with configurable concurrency limit
|
|
348
|
+
* - Pending queue for overflow requests
|
|
349
|
+
* - Context caching with configurable TTL
|
|
350
|
+
* - Execution logging for debugging
|
|
351
|
+
* - Event emission for monitoring
|
|
352
|
+
* - Graceful termination
|
|
353
|
+
*/
|
|
354
|
+
export class HeadlessWorkerExecutor extends EventEmitter {
|
|
355
|
+
projectRoot;
|
|
356
|
+
config;
|
|
357
|
+
processPool = new Map();
|
|
358
|
+
pendingQueue = [];
|
|
359
|
+
contextCache = new Map();
|
|
360
|
+
claudeCodeAvailable = null;
|
|
361
|
+
claudeCodeVersion = null;
|
|
362
|
+
constructor(projectRoot, options) {
|
|
363
|
+
super();
|
|
364
|
+
this.projectRoot = projectRoot;
|
|
365
|
+
// Merge with defaults
|
|
366
|
+
this.config = {
|
|
367
|
+
maxConcurrent: options?.maxConcurrent ?? 2,
|
|
368
|
+
defaultTimeoutMs: options?.defaultTimeoutMs ?? 5 * 60 * 1000,
|
|
369
|
+
maxContextFiles: options?.maxContextFiles ?? 20,
|
|
370
|
+
maxCharsPerFile: options?.maxCharsPerFile ?? 5000,
|
|
371
|
+
logDir: options?.logDir ?? join(projectRoot, '.claude-flow', 'logs', 'headless'),
|
|
372
|
+
cacheContext: options?.cacheContext ?? true,
|
|
373
|
+
cacheTtlMs: options?.cacheTtlMs ?? 60000, // 1 minute default
|
|
374
|
+
};
|
|
375
|
+
// Ensure log directory exists
|
|
376
|
+
this.ensureLogDir();
|
|
377
|
+
}
|
|
378
|
+
// ============================================
|
|
379
|
+
// Public API
|
|
380
|
+
// ============================================
|
|
381
|
+
/**
|
|
382
|
+
* Check if Claude Code CLI is available
|
|
383
|
+
*/
|
|
384
|
+
async isAvailable() {
|
|
385
|
+
if (this.claudeCodeAvailable !== null) {
|
|
386
|
+
return this.claudeCodeAvailable;
|
|
387
|
+
}
|
|
388
|
+
try {
|
|
389
|
+
const output = execSync('claude --version', {
|
|
390
|
+
encoding: 'utf-8',
|
|
391
|
+
stdio: 'pipe',
|
|
392
|
+
timeout: 5000,
|
|
393
|
+
});
|
|
394
|
+
this.claudeCodeAvailable = true;
|
|
395
|
+
this.claudeCodeVersion = output.trim();
|
|
396
|
+
this.emit('status', { available: true, version: this.claudeCodeVersion });
|
|
397
|
+
return true;
|
|
398
|
+
}
|
|
399
|
+
catch {
|
|
400
|
+
this.claudeCodeAvailable = false;
|
|
401
|
+
this.emit('status', { available: false });
|
|
402
|
+
return false;
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
/**
|
|
406
|
+
* Get Claude Code version
|
|
407
|
+
*/
|
|
408
|
+
async getVersion() {
|
|
409
|
+
await this.isAvailable();
|
|
410
|
+
return this.claudeCodeVersion;
|
|
411
|
+
}
|
|
412
|
+
/**
|
|
413
|
+
* Execute a headless worker
|
|
414
|
+
*/
|
|
415
|
+
async execute(workerType, configOverrides) {
|
|
416
|
+
const baseConfig = HEADLESS_WORKER_CONFIGS[workerType];
|
|
417
|
+
if (!baseConfig) {
|
|
418
|
+
throw new Error(`Unknown headless worker type: ${workerType}`);
|
|
419
|
+
}
|
|
420
|
+
// Check availability
|
|
421
|
+
const available = await this.isAvailable();
|
|
422
|
+
if (!available) {
|
|
423
|
+
const result = this.createErrorResult(workerType, 'Claude Code CLI not available. Install with: npm install -g @anthropic-ai/claude-code');
|
|
424
|
+
this.emit('error', result);
|
|
425
|
+
return result;
|
|
426
|
+
}
|
|
427
|
+
// Check concurrent limit
|
|
428
|
+
if (this.processPool.size >= this.config.maxConcurrent) {
|
|
429
|
+
// Queue the request
|
|
430
|
+
return new Promise((resolve, reject) => {
|
|
431
|
+
const entry = {
|
|
432
|
+
workerType,
|
|
433
|
+
config: configOverrides,
|
|
434
|
+
resolve,
|
|
435
|
+
reject,
|
|
436
|
+
queuedAt: new Date(),
|
|
437
|
+
};
|
|
438
|
+
this.pendingQueue.push(entry);
|
|
439
|
+
this.emit('queued', {
|
|
440
|
+
workerType,
|
|
441
|
+
queuePosition: this.pendingQueue.length,
|
|
442
|
+
});
|
|
443
|
+
});
|
|
444
|
+
}
|
|
445
|
+
// Execute immediately
|
|
446
|
+
return this.executeInternal(workerType, configOverrides);
|
|
447
|
+
}
|
|
448
|
+
/**
|
|
449
|
+
* Get pool status
|
|
450
|
+
*/
|
|
451
|
+
getPoolStatus() {
|
|
452
|
+
const now = Date.now();
|
|
453
|
+
return {
|
|
454
|
+
activeCount: this.processPool.size,
|
|
455
|
+
queueLength: this.pendingQueue.length,
|
|
456
|
+
maxConcurrent: this.config.maxConcurrent,
|
|
457
|
+
activeWorkers: Array.from(this.processPool.values()).map((entry) => ({
|
|
458
|
+
executionId: entry.executionId,
|
|
459
|
+
workerType: entry.workerType,
|
|
460
|
+
startTime: entry.startTime,
|
|
461
|
+
elapsedMs: now - entry.startTime.getTime(),
|
|
462
|
+
})),
|
|
463
|
+
queuedWorkers: this.pendingQueue.map((entry) => ({
|
|
464
|
+
workerType: entry.workerType,
|
|
465
|
+
queuedAt: entry.queuedAt,
|
|
466
|
+
waitingMs: now - entry.queuedAt.getTime(),
|
|
467
|
+
})),
|
|
468
|
+
};
|
|
469
|
+
}
|
|
470
|
+
/**
|
|
471
|
+
* Get number of active executions
|
|
472
|
+
*/
|
|
473
|
+
getActiveCount() {
|
|
474
|
+
return this.processPool.size;
|
|
475
|
+
}
|
|
476
|
+
/**
|
|
477
|
+
* Cancel a running execution
|
|
478
|
+
*/
|
|
479
|
+
cancel(executionId) {
|
|
480
|
+
const entry = this.processPool.get(executionId);
|
|
481
|
+
if (!entry) {
|
|
482
|
+
return false;
|
|
483
|
+
}
|
|
484
|
+
clearTimeout(entry.timeout);
|
|
485
|
+
entry.process.kill('SIGTERM');
|
|
486
|
+
this.processPool.delete(executionId);
|
|
487
|
+
this.emit('cancelled', { executionId });
|
|
488
|
+
// Process next in queue
|
|
489
|
+
this.processQueue();
|
|
490
|
+
return true;
|
|
491
|
+
}
|
|
492
|
+
/**
|
|
493
|
+
* Cancel all running executions
|
|
494
|
+
*/
|
|
495
|
+
cancelAll() {
|
|
496
|
+
let cancelled = 0;
|
|
497
|
+
// Cancel active processes (convert to array to avoid iterator issues)
|
|
498
|
+
const entries = Array.from(this.processPool.entries());
|
|
499
|
+
for (const [executionId, entry] of entries) {
|
|
500
|
+
clearTimeout(entry.timeout);
|
|
501
|
+
entry.process.kill('SIGTERM');
|
|
502
|
+
this.emit('cancelled', { executionId });
|
|
503
|
+
cancelled++;
|
|
504
|
+
}
|
|
505
|
+
this.processPool.clear();
|
|
506
|
+
// Reject pending queue
|
|
507
|
+
for (const entry of this.pendingQueue) {
|
|
508
|
+
entry.reject(new Error('Executor cancelled all executions'));
|
|
509
|
+
}
|
|
510
|
+
this.pendingQueue = [];
|
|
511
|
+
this.emit('allCancelled', { count: cancelled });
|
|
512
|
+
return cancelled;
|
|
513
|
+
}
|
|
514
|
+
/**
|
|
515
|
+
* Clear context cache
|
|
516
|
+
*/
|
|
517
|
+
clearContextCache() {
|
|
518
|
+
this.contextCache.clear();
|
|
519
|
+
this.emit('cacheClear', {});
|
|
520
|
+
}
|
|
521
|
+
/**
|
|
522
|
+
* Get worker configuration
|
|
523
|
+
*/
|
|
524
|
+
getConfig(workerType) {
|
|
525
|
+
return HEADLESS_WORKER_CONFIGS[workerType];
|
|
526
|
+
}
|
|
527
|
+
/**
|
|
528
|
+
* Get all headless worker types
|
|
529
|
+
*/
|
|
530
|
+
getHeadlessWorkerTypes() {
|
|
531
|
+
return [...HEADLESS_WORKER_TYPES];
|
|
532
|
+
}
|
|
533
|
+
/**
|
|
534
|
+
* Get all local worker types
|
|
535
|
+
*/
|
|
536
|
+
getLocalWorkerTypes() {
|
|
537
|
+
return [...LOCAL_WORKER_TYPES];
|
|
538
|
+
}
|
|
539
|
+
// ============================================
|
|
540
|
+
// Private Methods
|
|
541
|
+
// ============================================
|
|
542
|
+
/**
|
|
543
|
+
* Ensure log directory exists
|
|
544
|
+
*/
|
|
545
|
+
ensureLogDir() {
|
|
546
|
+
try {
|
|
547
|
+
if (!existsSync(this.config.logDir)) {
|
|
548
|
+
mkdirSync(this.config.logDir, { recursive: true });
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
catch (error) {
|
|
552
|
+
this.emit('warning', { message: 'Failed to create log directory', error });
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
/**
|
|
556
|
+
* Internal execution logic
|
|
557
|
+
*/
|
|
558
|
+
async executeInternal(workerType, configOverrides) {
|
|
559
|
+
const baseConfig = HEADLESS_WORKER_CONFIGS[workerType];
|
|
560
|
+
const headless = { ...baseConfig.headless, ...configOverrides };
|
|
561
|
+
const startTime = Date.now();
|
|
562
|
+
const executionId = `${workerType}_${startTime}_${Math.random().toString(36).slice(2, 8)}`;
|
|
563
|
+
this.emit('start', { executionId, workerType, config: headless });
|
|
564
|
+
try {
|
|
565
|
+
// Build context from file patterns
|
|
566
|
+
const context = await this.buildContext(headless.contextPatterns || []);
|
|
567
|
+
// Build the full prompt
|
|
568
|
+
const fullPrompt = this.buildPrompt(headless.promptTemplate, context);
|
|
569
|
+
// Log prompt for debugging
|
|
570
|
+
this.logExecution(executionId, 'prompt', fullPrompt);
|
|
571
|
+
// Execute Claude Code headlessly
|
|
572
|
+
const result = await this.executeClaudeCode(fullPrompt, {
|
|
573
|
+
sandbox: headless.sandbox,
|
|
574
|
+
model: headless.model || 'sonnet',
|
|
575
|
+
timeoutMs: headless.timeoutMs || this.config.defaultTimeoutMs,
|
|
576
|
+
executionId,
|
|
577
|
+
workerType,
|
|
578
|
+
});
|
|
579
|
+
// Parse output based on format
|
|
580
|
+
let parsedOutput;
|
|
581
|
+
if (headless.outputFormat === 'json' && result.output) {
|
|
582
|
+
parsedOutput = this.parseJsonOutput(result.output);
|
|
583
|
+
}
|
|
584
|
+
else if (headless.outputFormat === 'markdown' && result.output) {
|
|
585
|
+
parsedOutput = this.parseMarkdownOutput(result.output);
|
|
586
|
+
}
|
|
587
|
+
const executionResult = {
|
|
588
|
+
success: result.success,
|
|
589
|
+
output: result.output,
|
|
590
|
+
parsedOutput,
|
|
591
|
+
durationMs: Date.now() - startTime,
|
|
592
|
+
tokensUsed: result.tokensUsed,
|
|
593
|
+
model: headless.model || 'sonnet',
|
|
594
|
+
sandboxMode: headless.sandbox,
|
|
595
|
+
workerType,
|
|
596
|
+
timestamp: new Date(),
|
|
597
|
+
executionId,
|
|
598
|
+
error: result.error,
|
|
599
|
+
};
|
|
600
|
+
// Log result
|
|
601
|
+
this.logExecution(executionId, 'result', JSON.stringify(executionResult, null, 2));
|
|
602
|
+
this.emit('complete', executionResult);
|
|
603
|
+
return executionResult;
|
|
604
|
+
}
|
|
605
|
+
catch (error) {
|
|
606
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
607
|
+
const executionResult = this.createErrorResult(workerType, errorMessage);
|
|
608
|
+
executionResult.executionId = executionId;
|
|
609
|
+
executionResult.durationMs = Date.now() - startTime;
|
|
610
|
+
this.logExecution(executionId, 'error', errorMessage);
|
|
611
|
+
this.emit('error', executionResult);
|
|
612
|
+
return executionResult;
|
|
613
|
+
}
|
|
614
|
+
finally {
|
|
615
|
+
// Process next in queue
|
|
616
|
+
this.processQueue();
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
/**
|
|
620
|
+
* Process the pending queue
|
|
621
|
+
*/
|
|
622
|
+
processQueue() {
|
|
623
|
+
while (this.pendingQueue.length > 0 &&
|
|
624
|
+
this.processPool.size < this.config.maxConcurrent) {
|
|
625
|
+
const next = this.pendingQueue.shift();
|
|
626
|
+
if (!next)
|
|
627
|
+
break;
|
|
628
|
+
this.executeInternal(next.workerType, next.config)
|
|
629
|
+
.then(next.resolve)
|
|
630
|
+
.catch(next.reject);
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
/**
|
|
634
|
+
* Build context from file patterns
|
|
635
|
+
*/
|
|
636
|
+
async buildContext(patterns) {
|
|
637
|
+
if (patterns.length === 0)
|
|
638
|
+
return '';
|
|
639
|
+
// Check cache
|
|
640
|
+
const cacheKey = patterns.sort().join('|');
|
|
641
|
+
if (this.config.cacheContext) {
|
|
642
|
+
const cached = this.contextCache.get(cacheKey);
|
|
643
|
+
if (cached && Date.now() - cached.timestamp < this.config.cacheTtlMs) {
|
|
644
|
+
return cached.content;
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
// Collect files matching patterns
|
|
648
|
+
const files = [];
|
|
649
|
+
for (const pattern of patterns) {
|
|
650
|
+
const matches = this.simpleGlob(pattern);
|
|
651
|
+
files.push(...matches);
|
|
652
|
+
}
|
|
653
|
+
// Deduplicate and limit
|
|
654
|
+
const uniqueFiles = Array.from(new Set(files)).slice(0, this.config.maxContextFiles);
|
|
655
|
+
// Build context
|
|
656
|
+
const contextParts = [];
|
|
657
|
+
for (const file of uniqueFiles) {
|
|
658
|
+
try {
|
|
659
|
+
const fullPath = join(this.projectRoot, file);
|
|
660
|
+
if (!existsSync(fullPath))
|
|
661
|
+
continue;
|
|
662
|
+
const content = readFileSync(fullPath, 'utf-8');
|
|
663
|
+
const truncated = content.slice(0, this.config.maxCharsPerFile);
|
|
664
|
+
const wasTruncated = content.length > this.config.maxCharsPerFile;
|
|
665
|
+
contextParts.push(`--- ${file}${wasTruncated ? ' (truncated)' : ''} ---\n${truncated}`);
|
|
666
|
+
}
|
|
667
|
+
catch {
|
|
668
|
+
// Skip unreadable files
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
const contextContent = contextParts.join('\n\n');
|
|
672
|
+
// Cache the result
|
|
673
|
+
if (this.config.cacheContext) {
|
|
674
|
+
this.contextCache.set(cacheKey, {
|
|
675
|
+
content: contextContent,
|
|
676
|
+
timestamp: Date.now(),
|
|
677
|
+
patterns,
|
|
678
|
+
});
|
|
679
|
+
}
|
|
680
|
+
return contextContent;
|
|
681
|
+
}
|
|
682
|
+
/**
|
|
683
|
+
* Simple glob implementation for file matching
|
|
684
|
+
*/
|
|
685
|
+
simpleGlob(pattern) {
|
|
686
|
+
const results = [];
|
|
687
|
+
// Handle simple patterns (no wildcards)
|
|
688
|
+
if (!pattern.includes('*')) {
|
|
689
|
+
const fullPath = join(this.projectRoot, pattern);
|
|
690
|
+
if (existsSync(fullPath)) {
|
|
691
|
+
results.push(pattern);
|
|
692
|
+
}
|
|
693
|
+
return results;
|
|
694
|
+
}
|
|
695
|
+
// Parse pattern parts
|
|
696
|
+
const parts = pattern.split('/');
|
|
697
|
+
const scanDir = (dir, remainingParts) => {
|
|
698
|
+
if (remainingParts.length === 0)
|
|
699
|
+
return;
|
|
700
|
+
if (results.length >= 100)
|
|
701
|
+
return; // Limit results
|
|
702
|
+
try {
|
|
703
|
+
const fullDir = join(this.projectRoot, dir);
|
|
704
|
+
if (!existsSync(fullDir))
|
|
705
|
+
return;
|
|
706
|
+
const entries = readdirSync(fullDir, { withFileTypes: true });
|
|
707
|
+
const currentPart = remainingParts[0];
|
|
708
|
+
const isLastPart = remainingParts.length === 1;
|
|
709
|
+
for (const entry of entries) {
|
|
710
|
+
// Skip common non-code directories
|
|
711
|
+
if (entry.name === 'node_modules' ||
|
|
712
|
+
entry.name === '.git' ||
|
|
713
|
+
entry.name === 'dist' ||
|
|
714
|
+
entry.name === 'build' ||
|
|
715
|
+
entry.name === 'coverage' ||
|
|
716
|
+
entry.name === '.next' ||
|
|
717
|
+
entry.name === '.cache') {
|
|
718
|
+
continue;
|
|
719
|
+
}
|
|
720
|
+
const entryPath = dir ? `${dir}/${entry.name}` : entry.name;
|
|
721
|
+
if (currentPart === '**') {
|
|
722
|
+
// Recursive glob
|
|
723
|
+
if (entry.isDirectory()) {
|
|
724
|
+
scanDir(entryPath, remainingParts); // Continue with **
|
|
725
|
+
scanDir(entryPath, remainingParts.slice(1)); // Try next part
|
|
726
|
+
}
|
|
727
|
+
else if (entry.isFile() && remainingParts.length > 1) {
|
|
728
|
+
// Check if file matches next pattern part
|
|
729
|
+
const nextPart = remainingParts[1];
|
|
730
|
+
if (this.matchesPattern(entry.name, nextPart)) {
|
|
731
|
+
results.push(entryPath);
|
|
732
|
+
}
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
else if (this.matchesPattern(entry.name, currentPart)) {
|
|
736
|
+
if (isLastPart && entry.isFile()) {
|
|
737
|
+
results.push(entryPath);
|
|
738
|
+
}
|
|
739
|
+
else if (!isLastPart && entry.isDirectory()) {
|
|
740
|
+
scanDir(entryPath, remainingParts.slice(1));
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
}
|
|
745
|
+
catch {
|
|
746
|
+
// Skip unreadable directories
|
|
747
|
+
}
|
|
748
|
+
};
|
|
749
|
+
scanDir('', parts);
|
|
750
|
+
return results;
|
|
751
|
+
}
|
|
752
|
+
/**
|
|
753
|
+
* Match filename against a simple pattern
|
|
754
|
+
*/
|
|
755
|
+
matchesPattern(name, pattern) {
|
|
756
|
+
if (pattern === '*')
|
|
757
|
+
return true;
|
|
758
|
+
if (pattern === '**')
|
|
759
|
+
return true;
|
|
760
|
+
// Handle *.ext patterns
|
|
761
|
+
if (pattern.startsWith('*.')) {
|
|
762
|
+
return name.endsWith(pattern.slice(1));
|
|
763
|
+
}
|
|
764
|
+
// Handle prefix* patterns
|
|
765
|
+
if (pattern.endsWith('*')) {
|
|
766
|
+
return name.startsWith(pattern.slice(0, -1));
|
|
767
|
+
}
|
|
768
|
+
// Handle *suffix patterns
|
|
769
|
+
if (pattern.startsWith('*')) {
|
|
770
|
+
return name.endsWith(pattern.slice(1));
|
|
771
|
+
}
|
|
772
|
+
// Exact match
|
|
773
|
+
return name === pattern;
|
|
774
|
+
}
|
|
775
|
+
/**
|
|
776
|
+
* Build full prompt with context
|
|
777
|
+
*/
|
|
778
|
+
buildPrompt(template, context) {
|
|
779
|
+
if (!context) {
|
|
780
|
+
return `${template}
|
|
781
|
+
|
|
782
|
+
## Instructions
|
|
783
|
+
|
|
784
|
+
Analyze the codebase and provide your response following the format specified in the task.`;
|
|
785
|
+
}
|
|
786
|
+
return `${template}
|
|
787
|
+
|
|
788
|
+
## Codebase Context
|
|
789
|
+
|
|
790
|
+
${context}
|
|
791
|
+
|
|
792
|
+
## Instructions
|
|
793
|
+
|
|
794
|
+
Analyze the above codebase context and provide your response following the format specified in the task.`;
|
|
795
|
+
}
|
|
796
|
+
/**
|
|
797
|
+
* Execute Claude Code in headless mode
|
|
798
|
+
*/
|
|
799
|
+
executeClaudeCode(prompt, options) {
|
|
800
|
+
return new Promise((resolve) => {
|
|
801
|
+
const env = {
|
|
802
|
+
...process.env,
|
|
803
|
+
CLAUDE_CODE_HEADLESS: 'true',
|
|
804
|
+
CLAUDE_CODE_SANDBOX_MODE: options.sandbox,
|
|
805
|
+
};
|
|
806
|
+
// Set model
|
|
807
|
+
env.ANTHROPIC_MODEL = MODEL_IDS[options.model];
|
|
808
|
+
// Spawn claude CLI process
|
|
809
|
+
const child = spawn('claude', ['--print', prompt], {
|
|
810
|
+
cwd: this.projectRoot,
|
|
811
|
+
env,
|
|
812
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
813
|
+
});
|
|
814
|
+
// Setup timeout
|
|
815
|
+
const timeoutHandle = setTimeout(() => {
|
|
816
|
+
if (this.processPool.has(options.executionId)) {
|
|
817
|
+
child.kill('SIGTERM');
|
|
818
|
+
// Give it a moment to terminate gracefully
|
|
819
|
+
setTimeout(() => {
|
|
820
|
+
if (!child.killed) {
|
|
821
|
+
child.kill('SIGKILL');
|
|
822
|
+
}
|
|
823
|
+
}, 5000);
|
|
824
|
+
}
|
|
825
|
+
}, options.timeoutMs);
|
|
826
|
+
// Track in process pool
|
|
827
|
+
const poolEntry = {
|
|
828
|
+
process: child,
|
|
829
|
+
executionId: options.executionId,
|
|
830
|
+
workerType: options.workerType,
|
|
831
|
+
startTime: new Date(),
|
|
832
|
+
timeout: timeoutHandle,
|
|
833
|
+
};
|
|
834
|
+
this.processPool.set(options.executionId, poolEntry);
|
|
835
|
+
let stdout = '';
|
|
836
|
+
let stderr = '';
|
|
837
|
+
let resolved = false;
|
|
838
|
+
const cleanup = () => {
|
|
839
|
+
clearTimeout(timeoutHandle);
|
|
840
|
+
this.processPool.delete(options.executionId);
|
|
841
|
+
};
|
|
842
|
+
child.stdout?.on('data', (data) => {
|
|
843
|
+
const chunk = data.toString();
|
|
844
|
+
stdout += chunk;
|
|
845
|
+
this.emit('output', {
|
|
846
|
+
executionId: options.executionId,
|
|
847
|
+
type: 'stdout',
|
|
848
|
+
data: chunk,
|
|
849
|
+
});
|
|
850
|
+
});
|
|
851
|
+
child.stderr?.on('data', (data) => {
|
|
852
|
+
const chunk = data.toString();
|
|
853
|
+
stderr += chunk;
|
|
854
|
+
this.emit('output', {
|
|
855
|
+
executionId: options.executionId,
|
|
856
|
+
type: 'stderr',
|
|
857
|
+
data: chunk,
|
|
858
|
+
});
|
|
859
|
+
});
|
|
860
|
+
child.on('close', (code) => {
|
|
861
|
+
if (resolved)
|
|
862
|
+
return;
|
|
863
|
+
resolved = true;
|
|
864
|
+
cleanup();
|
|
865
|
+
resolve({
|
|
866
|
+
success: code === 0,
|
|
867
|
+
output: stdout || stderr,
|
|
868
|
+
error: code !== 0 ? stderr || `Process exited with code ${code}` : undefined,
|
|
869
|
+
});
|
|
870
|
+
});
|
|
871
|
+
child.on('error', (error) => {
|
|
872
|
+
if (resolved)
|
|
873
|
+
return;
|
|
874
|
+
resolved = true;
|
|
875
|
+
cleanup();
|
|
876
|
+
resolve({
|
|
877
|
+
success: false,
|
|
878
|
+
output: '',
|
|
879
|
+
error: error.message,
|
|
880
|
+
});
|
|
881
|
+
});
|
|
882
|
+
// Handle timeout
|
|
883
|
+
setTimeout(() => {
|
|
884
|
+
if (resolved)
|
|
885
|
+
return;
|
|
886
|
+
if (!this.processPool.has(options.executionId))
|
|
887
|
+
return;
|
|
888
|
+
resolved = true;
|
|
889
|
+
child.kill('SIGTERM');
|
|
890
|
+
cleanup();
|
|
891
|
+
resolve({
|
|
892
|
+
success: false,
|
|
893
|
+
output: stdout || stderr,
|
|
894
|
+
error: `Execution timed out after ${options.timeoutMs}ms`,
|
|
895
|
+
});
|
|
896
|
+
}, options.timeoutMs + 100); // Slightly after the kill timeout
|
|
897
|
+
});
|
|
898
|
+
}
|
|
899
|
+
/**
|
|
900
|
+
* Parse JSON output from Claude Code
|
|
901
|
+
*/
|
|
902
|
+
parseJsonOutput(output) {
|
|
903
|
+
try {
|
|
904
|
+
// Try to find JSON in code blocks first
|
|
905
|
+
const codeBlockMatch = output.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
906
|
+
if (codeBlockMatch) {
|
|
907
|
+
return JSON.parse(codeBlockMatch[1].trim());
|
|
908
|
+
}
|
|
909
|
+
// Try to find any JSON object
|
|
910
|
+
const jsonMatch = output.match(/\{[\s\S]*\}/);
|
|
911
|
+
if (jsonMatch) {
|
|
912
|
+
return JSON.parse(jsonMatch[0]);
|
|
913
|
+
}
|
|
914
|
+
// Try direct parse
|
|
915
|
+
return JSON.parse(output.trim());
|
|
916
|
+
}
|
|
917
|
+
catch {
|
|
918
|
+
return {
|
|
919
|
+
parseError: true,
|
|
920
|
+
rawOutput: output,
|
|
921
|
+
};
|
|
922
|
+
}
|
|
923
|
+
}
|
|
924
|
+
/**
|
|
925
|
+
* Parse markdown output into sections
|
|
926
|
+
*/
|
|
927
|
+
parseMarkdownOutput(output) {
|
|
928
|
+
const sections = [];
|
|
929
|
+
const codeBlocks = [];
|
|
930
|
+
// Extract code blocks first
|
|
931
|
+
const codeBlockRegex = /```(\w*)\n([\s\S]*?)```/g;
|
|
932
|
+
let codeMatch;
|
|
933
|
+
while ((codeMatch = codeBlockRegex.exec(output)) !== null) {
|
|
934
|
+
codeBlocks.push({
|
|
935
|
+
language: codeMatch[1] || 'text',
|
|
936
|
+
code: codeMatch[2].trim(),
|
|
937
|
+
});
|
|
938
|
+
}
|
|
939
|
+
// Parse sections
|
|
940
|
+
const lines = output.split('\n');
|
|
941
|
+
let currentSection = null;
|
|
942
|
+
for (const line of lines) {
|
|
943
|
+
const headerMatch = line.match(/^(#{1,6})\s+(.+)$/);
|
|
944
|
+
if (headerMatch) {
|
|
945
|
+
if (currentSection) {
|
|
946
|
+
sections.push(currentSection);
|
|
947
|
+
}
|
|
948
|
+
currentSection = {
|
|
949
|
+
title: headerMatch[2].trim(),
|
|
950
|
+
content: '',
|
|
951
|
+
level: headerMatch[1].length,
|
|
952
|
+
};
|
|
953
|
+
}
|
|
954
|
+
else if (currentSection) {
|
|
955
|
+
currentSection.content += line + '\n';
|
|
956
|
+
}
|
|
957
|
+
}
|
|
958
|
+
if (currentSection) {
|
|
959
|
+
currentSection.content = currentSection.content.trim();
|
|
960
|
+
sections.push(currentSection);
|
|
961
|
+
}
|
|
962
|
+
return { sections, codeBlocks };
|
|
963
|
+
}
|
|
964
|
+
/**
|
|
965
|
+
* Create an error result
|
|
966
|
+
*/
|
|
967
|
+
createErrorResult(workerType, error) {
|
|
968
|
+
return {
|
|
969
|
+
success: false,
|
|
970
|
+
output: '',
|
|
971
|
+
durationMs: 0,
|
|
972
|
+
model: 'unknown',
|
|
973
|
+
sandboxMode: 'strict',
|
|
974
|
+
workerType,
|
|
975
|
+
timestamp: new Date(),
|
|
976
|
+
executionId: `error_${Date.now()}`,
|
|
977
|
+
error,
|
|
978
|
+
};
|
|
979
|
+
}
|
|
980
|
+
/**
|
|
981
|
+
* Log execution details for debugging
|
|
982
|
+
*/
|
|
983
|
+
logExecution(executionId, type, content) {
|
|
984
|
+
try {
|
|
985
|
+
const timestamp = new Date().toISOString();
|
|
986
|
+
const logFile = join(this.config.logDir, `${executionId}_${type}.log`);
|
|
987
|
+
const logContent = `[${timestamp}] ${type.toUpperCase()}\n${'='.repeat(60)}\n${content}\n`;
|
|
988
|
+
writeFileSync(logFile, logContent);
|
|
989
|
+
}
|
|
990
|
+
catch {
|
|
991
|
+
// Ignore log write errors
|
|
992
|
+
}
|
|
993
|
+
}
|
|
994
|
+
}
|
|
995
|
+
// Export default
|
|
996
|
+
export default HeadlessWorkerExecutor;
|
|
997
|
+
//# sourceMappingURL=headless-worker-executor.js.map
|