crewswarm-cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3381 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropNames = Object.getOwnPropertyNames;
3
+ var __esm = (fn, res) => function __init() {
4
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
5
+ };
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+
11
+ // src/worker/autonomous-loop.ts
12
+ async function executeAutonomous(task, executeLLM, executeTool, config) {
13
+ const maxTurns = config.maxTurns || DEFAULT_MAX_TURNS;
14
+ const repeatThreshold = config.repeatThreshold || DEFAULT_REPEAT_THRESHOLD;
15
+ const history = [];
16
+ let lastResponseText = "";
17
+ let staleCount = 0;
18
+ for (let turn = 0; turn < maxTurns; turn++) {
19
+ config.onProgress?.(turn + 1, "THINKING");
20
+ const response = await executeLLM(task, config.tools, history);
21
+ if (response.status === "COMPLETE" || !response.toolCalls || response.toolCalls.length === 0) {
22
+ return {
23
+ success: true,
24
+ turns: turn + 1,
25
+ history,
26
+ finalResponse: response.response
27
+ };
28
+ }
29
+ if (response.response && response.response.length > 20) {
30
+ if (response.response === lastResponseText) {
31
+ staleCount++;
32
+ if (staleCount >= 2) {
33
+ return {
34
+ success: true,
35
+ turns: turn + 1,
36
+ history,
37
+ finalResponse: response.response,
38
+ reason: "Detected stale response (same output repeated), treating as complete"
39
+ };
40
+ }
41
+ } else {
42
+ staleCount = 0;
43
+ }
44
+ lastResponseText = response.response;
45
+ }
46
+ if (response.toolCalls.length === 1) {
47
+ const call = response.toolCalls[0];
48
+ config.onProgress?.(turn + 1, `EXECUTING: ${call.tool}`);
49
+ try {
50
+ const result = await executeTool(call.tool, call.params);
51
+ history.push({ turn: turn + 1, tool: call.tool, params: call.params, result });
52
+ } catch (error) {
53
+ history.push({ turn: turn + 1, tool: call.tool, params: call.params, result: null, error: error.message });
54
+ }
55
+ } else {
56
+ config.onProgress?.(turn + 1, `EXECUTING ${response.toolCalls.length} tools in parallel`);
57
+ const results = await Promise.allSettled(
58
+ response.toolCalls.map(async (call) => {
59
+ config.onProgress?.(turn + 1, `EXECUTING: ${call.tool}`);
60
+ return { call, result: await executeTool(call.tool, call.params) };
61
+ })
62
+ );
63
+ for (let i = 0; i < results.length; i++) {
64
+ const r = results[i];
65
+ const call = response.toolCalls[i];
66
+ if (r.status === "fulfilled") {
67
+ history.push({ turn: turn + 1, tool: r.value.call.tool, params: r.value.call.params, result: r.value.result });
68
+ } else {
69
+ history.push({ turn: turn + 1, tool: call.tool, params: call.params, result: null, error: r.reason?.message || "parallel execution failed" });
70
+ }
71
+ }
72
+ }
73
+ if (turn > repeatThreshold && isRepeating(history, 3)) {
74
+ return {
75
+ success: false,
76
+ turns: turn + 1,
77
+ history,
78
+ reason: "Detected repeated actions, stopping to prevent infinite loop"
79
+ };
80
+ }
81
+ }
82
+ return {
83
+ success: false,
84
+ turns: maxTurns,
85
+ history,
86
+ reason: "Maximum turns exceeded without completing task"
87
+ };
88
+ }
89
+ function isRepeating(history, windowSize = 3) {
90
+ if (history.length < windowSize * 2) return false;
91
+ const recentActions = history.slice(-windowSize).map((h) => `${h.tool}:${JSON.stringify(h.params)}`);
92
+ const previousActions = history.slice(-windowSize * 2, -windowSize).map((h) => `${h.tool}:${JSON.stringify(h.params)}`);
93
+ return JSON.stringify(recentActions) === JSON.stringify(previousActions);
94
+ }
95
+ var DEFAULT_MAX_TURNS, DEFAULT_REPEAT_THRESHOLD;
96
+ var init_autonomous_loop = __esm({
97
+ "src/worker/autonomous-loop.ts"() {
98
+ "use strict";
99
+ DEFAULT_MAX_TURNS = 25;
100
+ DEFAULT_REPEAT_THRESHOLD = 10;
101
+ }
102
+ });
103
+
104
+ // src/tools/gemini/definitions/base-declarations.ts
105
+ var GLOB_TOOL_NAME, GREP_TOOL_NAME, LS_TOOL_NAME, READ_FILE_TOOL_NAME, SHELL_TOOL_NAME, WRITE_FILE_TOOL_NAME, EDIT_TOOL_NAME, WEB_SEARCH_TOOL_NAME, WRITE_TODOS_TOOL_NAME, WEB_FETCH_TOOL_NAME, READ_MANY_FILES_TOOL_NAME, MEMORY_TOOL_NAME, GET_INTERNAL_DOCS_TOOL_NAME, ACTIVATE_SKILL_TOOL_NAME, ASK_USER_TOOL_NAME, EXIT_PLAN_MODE_TOOL_NAME, ENTER_PLAN_MODE_TOOL_NAME;
106
+ var init_base_declarations = __esm({
107
+ "src/tools/gemini/definitions/base-declarations.ts"() {
108
+ "use strict";
109
+ GLOB_TOOL_NAME = "glob";
110
+ GREP_TOOL_NAME = "grep_search";
111
+ LS_TOOL_NAME = "list_directory";
112
+ READ_FILE_TOOL_NAME = "read_file";
113
+ SHELL_TOOL_NAME = "run_shell_command";
114
+ WRITE_FILE_TOOL_NAME = "write_file";
115
+ EDIT_TOOL_NAME = "replace";
116
+ WEB_SEARCH_TOOL_NAME = "google_web_search";
117
+ WRITE_TODOS_TOOL_NAME = "write_todos";
118
+ WEB_FETCH_TOOL_NAME = "web_fetch";
119
+ READ_MANY_FILES_TOOL_NAME = "read_many_files";
120
+ MEMORY_TOOL_NAME = "save_memory";
121
+ GET_INTERNAL_DOCS_TOOL_NAME = "get_internal_docs";
122
+ ACTIVATE_SKILL_TOOL_NAME = "activate_skill";
123
+ ASK_USER_TOOL_NAME = "ask_user";
124
+ EXIT_PLAN_MODE_TOOL_NAME = "exit_plan_mode";
125
+ ENTER_PLAN_MODE_TOOL_NAME = "enter_plan_mode";
126
+ }
127
+ });
128
+
129
+ // src/lsp/index.ts
130
+ var lsp_exports = {};
131
+ __export(lsp_exports, {
132
+ getCompletions: () => getCompletions,
133
+ getDefinitions: () => getDefinitions,
134
+ getDocumentSymbols: () => getDocumentSymbols,
135
+ getReferences: () => getReferences,
136
+ typeCheckProject: () => typeCheckProject
137
+ });
138
+ import { existsSync } from "node:fs";
139
+ import { dirname, resolve } from "node:path";
140
+ async function ensureTs() {
141
+ if (!_ts) {
142
+ _ts = await import("typescript").then((m) => m.default ?? m);
143
+ }
144
+ return _ts;
145
+ }
146
+ function categoryToText(cat) {
147
+ if (cat === 1) return "error";
148
+ if (cat === 0) return "warning";
149
+ if (cat === 2) return "suggestion";
150
+ return "message";
151
+ }
152
+ function loadProject(projectDir, ts) {
153
+ const root = resolve(projectDir);
154
+ const configPath = ts.findConfigFile(root, ts.sys.fileExists, "tsconfig.json");
155
+ if (!configPath) {
156
+ throw new Error(`No tsconfig.json found at or above ${root}`);
157
+ }
158
+ const configFile = ts.readConfigFile(configPath, ts.sys.readFile);
159
+ if (configFile.error) {
160
+ throw new Error(ts.flattenDiagnosticMessageText(configFile.error.messageText, "\n"));
161
+ }
162
+ const parsed = ts.parseJsonConfigFileContent(
163
+ configFile.config,
164
+ ts.sys,
165
+ dirname(configPath)
166
+ );
167
+ if (parsed.errors.length > 0) {
168
+ const first = parsed.errors[0];
169
+ throw new Error(ts.flattenDiagnosticMessageText(first.messageText, "\n"));
170
+ }
171
+ return {
172
+ root,
173
+ options: parsed.options,
174
+ fileNames: parsed.fileNames
175
+ };
176
+ }
177
+ async function typeCheckProject(projectDir, includeFiles = []) {
178
+ const ts = await ensureTs();
179
+ const project = loadProject(projectDir, ts);
180
+ const includeAbs = new Set(includeFiles.map((x) => resolve(project.root, x)));
181
+ const shouldFilter = includeAbs.size > 0;
182
+ const host = ts.createCompilerHost(project.options, true);
183
+ const program = ts.createProgram(project.fileNames, project.options, host);
184
+ const diagnostics = ts.getPreEmitDiagnostics(program);
185
+ const out = [];
186
+ for (const diagnostic of diagnostics) {
187
+ const sourceFile = diagnostic.file;
188
+ if (!sourceFile) continue;
189
+ const absFile = resolve(sourceFile.fileName);
190
+ if (shouldFilter && !includeAbs.has(absFile)) continue;
191
+ const { line, character } = sourceFile.getLineAndCharacterOfPosition(diagnostic.start ?? 0);
192
+ out.push({
193
+ file: absFile,
194
+ line: line + 1,
195
+ column: character + 1,
196
+ code: diagnostic.code,
197
+ category: categoryToText(diagnostic.category),
198
+ message: ts.flattenDiagnosticMessageText(diagnostic.messageText, "\n")
199
+ });
200
+ }
201
+ return out;
202
+ }
203
+ function kindToText(kind) {
204
+ return String(kind || "unknown");
205
+ }
206
+ function createLanguageService(projectDir, ts) {
207
+ const project = loadProject(projectDir, ts);
208
+ const sourceTexts = /* @__PURE__ */ new Map();
209
+ for (const file of project.fileNames) {
210
+ const text = ts.sys.readFile(file) || "";
211
+ sourceTexts.set(resolve(file), { version: 1, text });
212
+ }
213
+ const serviceHost = {
214
+ getCompilationSettings: () => project.options,
215
+ getScriptFileNames: () => Array.from(sourceTexts.keys()),
216
+ getScriptVersion: (fileName) => String(sourceTexts.get(resolve(fileName))?.version || 1),
217
+ getScriptSnapshot: (fileName) => {
218
+ const resolved = resolve(fileName);
219
+ const entry = sourceTexts.get(resolved);
220
+ if (!entry) return void 0;
221
+ return ts.ScriptSnapshot.fromString(entry.text);
222
+ },
223
+ getCurrentDirectory: () => project.root,
224
+ getDefaultLibFileName: (options) => ts.getDefaultLibFilePath(options),
225
+ fileExists: ts.sys.fileExists,
226
+ readFile: ts.sys.readFile,
227
+ readDirectory: ts.sys.readDirectory,
228
+ directoryExists: ts.sys.directoryExists,
229
+ getDirectories: ts.sys.getDirectories
230
+ };
231
+ const service = ts.createLanguageService(serviceHost);
232
+ return { service, project, sourceTexts };
233
+ }
234
+ function lineColToPosition(text, line, column) {
235
+ const lines = text.split("\n");
236
+ const lineIndex = Math.max(0, line - 1);
237
+ const offset = lines.slice(0, lineIndex).reduce((sum, x) => sum + x.length + 1, 0) + Math.max(0, column - 1);
238
+ return Math.min(offset, text.length);
239
+ }
240
+ async function getCompletions(projectDir, filePath, line, column, limit = 50, prefix = "") {
241
+ const ts = await ensureTs();
242
+ const { service, project, sourceTexts } = createLanguageService(projectDir, ts);
243
+ try {
244
+ const absFile = resolve(project.root, filePath);
245
+ if (!existsSync(absFile)) {
246
+ throw new Error(`File not found: ${absFile}`);
247
+ }
248
+ if (!sourceTexts.has(absFile)) {
249
+ sourceTexts.set(absFile, { version: 1, text: ts.sys.readFile(absFile) || "" });
250
+ }
251
+ const fileText = sourceTexts.get(absFile)?.text || "";
252
+ const lines = fileText.split("\n");
253
+ const lineIndex = Math.max(0, line - 1);
254
+ const safeLine = lines[lineIndex] || "";
255
+ const offset = lines.slice(0, lineIndex).reduce((sum, x) => sum + x.length + 1, 0) + Math.max(0, column - 1);
256
+ const position = Math.min(offset, fileText.length);
257
+ const completions = service.getCompletionsAtPosition(absFile, position, {
258
+ includeCompletionsWithInsertText: true,
259
+ includeCompletionsForModuleExports: true
260
+ });
261
+ const items = (completions?.entries || []).filter((entry) => {
262
+ if (!prefix) return true;
263
+ return entry.name.toLowerCase().startsWith(prefix.toLowerCase());
264
+ });
265
+ return items.slice(0, Math.max(1, limit)).map((entry) => ({
266
+ name: entry.name,
267
+ kind: kindToText(entry.kind),
268
+ sortText: entry.sortText
269
+ }));
270
+ } finally {
271
+ service.dispose();
272
+ }
273
+ }
274
+ async function getDefinitions(projectDir, filePath, line, column) {
275
+ const ts = await ensureTs();
276
+ const { service, project, sourceTexts } = createLanguageService(projectDir, ts);
277
+ try {
278
+ const absFile = resolve(project.root, filePath);
279
+ const fileText = sourceTexts.get(absFile)?.text || ts.sys.readFile(absFile) || "";
280
+ const position = lineColToPosition(fileText, line, column);
281
+ const defs = service.getDefinitionAtPosition(absFile, position) || [];
282
+ return defs.map((def) => {
283
+ const sf = service.getProgram()?.getSourceFile(def.fileName);
284
+ const lc = sf?.getLineAndCharacterOfPosition(def.textSpan.start) || { line: 0, character: 0 };
285
+ return {
286
+ file: resolve(def.fileName),
287
+ line: lc.line + 1,
288
+ column: lc.character + 1
289
+ };
290
+ });
291
+ } finally {
292
+ service.dispose();
293
+ }
294
+ }
295
+ async function getReferences(projectDir, filePath, line, column) {
296
+ const ts = await ensureTs();
297
+ const { service, project, sourceTexts } = createLanguageService(projectDir, ts);
298
+ try {
299
+ const absFile = resolve(project.root, filePath);
300
+ const fileText = sourceTexts.get(absFile)?.text || ts.sys.readFile(absFile) || "";
301
+ const position = lineColToPosition(fileText, line, column);
302
+ const refs = service.getReferencesAtPosition(absFile, position) || [];
303
+ return refs.map((ref) => {
304
+ const sf = service.getProgram()?.getSourceFile(ref.fileName);
305
+ const lc = sf?.getLineAndCharacterOfPosition(ref.textSpan.start) || { line: 0, character: 0 };
306
+ return {
307
+ file: resolve(ref.fileName),
308
+ line: lc.line + 1,
309
+ column: lc.character + 1
310
+ };
311
+ });
312
+ } finally {
313
+ service.dispose();
314
+ }
315
+ }
316
+ async function getDocumentSymbols(projectDir, filePath) {
317
+ const ts = await ensureTs();
318
+ const { service, project } = createLanguageService(projectDir, ts);
319
+ try {
320
+ const absFile = resolve(project.root, filePath);
321
+ const nav = service.getNavigationTree(absFile);
322
+ const out = [];
323
+ const walk = (node) => {
324
+ for (const span of node.spans || []) {
325
+ const sf = service.getProgram()?.getSourceFile(absFile);
326
+ const lc = sf?.getLineAndCharacterOfPosition(span.start) || { line: 0, character: 0 };
327
+ if (node.text && node.text !== "<global>") {
328
+ out.push({
329
+ name: node.text,
330
+ kind: String(node.kind || "unknown"),
331
+ line: lc.line + 1,
332
+ column: lc.character + 1
333
+ });
334
+ }
335
+ }
336
+ for (const child of node.childItems || []) walk(child);
337
+ };
338
+ walk(nav);
339
+ return out;
340
+ } finally {
341
+ service.dispose();
342
+ }
343
+ }
344
+ var _ts;
345
+ var init_lsp = __esm({
346
+ "src/lsp/index.ts"() {
347
+ "use strict";
348
+ }
349
+ });
350
+
351
+ // src/tools/docker-sandbox.ts
352
+ var docker_sandbox_exports = {};
353
+ __export(docker_sandbox_exports, {
354
+ DockerSandbox: () => DockerSandbox
355
+ });
356
+ import { execSync } from "child_process";
357
+ import fs from "fs";
358
+ import path from "path";
359
+ import { randomUUID } from "crypto";
360
+ var DockerSandbox;
361
+ var init_docker_sandbox = __esm({
362
+ "src/tools/docker-sandbox.ts"() {
363
+ "use strict";
364
+ DockerSandbox = class {
365
+ constructor() {
366
+ this.defaultImage = "node:20-slim";
367
+ this.defaultTimeout = 3e4;
368
+ }
369
+ // 30 seconds
370
+ /**
371
+ * Check if Docker is available and running
372
+ */
373
+ async isDockerAvailable() {
374
+ try {
375
+ execSync("docker info", {
376
+ stdio: "ignore",
377
+ timeout: 5e3
378
+ });
379
+ return true;
380
+ } catch {
381
+ return false;
382
+ }
383
+ }
384
+ /**
385
+ * Copy staged files from sandbox to temp directory
386
+ */
387
+ async prepareTempDir(sandbox, tempDir) {
388
+ const pendingPaths = sandbox.getPendingPaths();
389
+ const branch = sandbox.state?.branches?.[sandbox.getActiveBranch()];
390
+ if (!branch) return 0;
391
+ let fileCount = 0;
392
+ for (const filePath of pendingPaths) {
393
+ const fileData = branch[filePath];
394
+ if (!fileData?.modified) continue;
395
+ const fullPath = path.join(tempDir, filePath);
396
+ const dir = path.dirname(fullPath);
397
+ if (!fs.existsSync(dir)) {
398
+ fs.mkdirSync(dir, { recursive: true });
399
+ }
400
+ fs.writeFileSync(fullPath, fileData.modified, "utf8");
401
+ fileCount++;
402
+ }
403
+ return fileCount;
404
+ }
405
+ /**
406
+ * Run command in Docker container with staged files
407
+ */
408
+ async runCommand(command, sandbox, options = {}) {
409
+ const startTime = Date.now();
410
+ const tempDir = path.join("/tmp", `crew-sandbox-${randomUUID()}`);
411
+ const image = options.image || this.defaultImage;
412
+ const timeout = options.timeout || this.defaultTimeout;
413
+ const workDir = options.workDir || process.cwd();
414
+ try {
415
+ fs.mkdirSync(tempDir, { recursive: true });
416
+ console.log(`[Docker] Created temp dir: ${tempDir}`);
417
+ const fileCount = await this.prepareTempDir(sandbox, tempDir);
418
+ console.log(`[Docker] Copied ${fileCount} staged file(s) to sandbox`);
419
+ const pkgPath = path.join(workDir, "package.json");
420
+ if (fs.existsSync(pkgPath)) {
421
+ fs.copyFileSync(pkgPath, path.join(tempDir, "package.json"));
422
+ console.log(`[Docker] Copied package.json`);
423
+ }
424
+ const needsNodeModules = /\b(npm|node|npx)\b/.test(command);
425
+ if (needsNodeModules) {
426
+ const nodeModulesPath = path.join(workDir, "node_modules");
427
+ if (fs.existsSync(nodeModulesPath)) {
428
+ console.log(`[Docker] Copying node_modules (this may take a few seconds)...`);
429
+ execSync(`cp -r "${nodeModulesPath}" "${tempDir}/"`, {
430
+ stdio: "ignore",
431
+ timeout: 1e4
432
+ });
433
+ }
434
+ }
435
+ const envFlags = options.env ? Object.entries(options.env).map(([k, v]) => `-e ${k}="${v}"`).join(" ") : "";
436
+ console.log(`[Docker] Running: ${command}`);
437
+ const dockerCmd = `docker run --rm -v "${tempDir}":/work -w /work ${envFlags} ${image} sh -c "${command.replace(/"/g, '\\"')}"`;
438
+ const output = execSync(dockerCmd, {
439
+ encoding: "utf8",
440
+ timeout,
441
+ stdio: ["ignore", "pipe", "pipe"]
442
+ });
443
+ const duration = Date.now() - startTime;
444
+ console.log(`[Docker] \u2713 Command completed in ${duration}ms`);
445
+ return {
446
+ success: true,
447
+ output,
448
+ exitCode: 0,
449
+ duration
450
+ };
451
+ } catch (err) {
452
+ const duration = Date.now() - startTime;
453
+ console.log(`[Docker] \u2717 Command failed after ${duration}ms`);
454
+ return {
455
+ success: false,
456
+ output: err.stdout || err.stderr || err.message,
457
+ exitCode: err.status || 1,
458
+ duration
459
+ };
460
+ } finally {
461
+ try {
462
+ if (fs.existsSync(tempDir)) {
463
+ fs.rmSync(tempDir, { recursive: true, force: true });
464
+ console.log(`[Docker] Cleaned up temp dir`);
465
+ }
466
+ } catch (cleanupErr) {
467
+ console.warn(`[Docker] Failed to cleanup ${tempDir}:`, cleanupErr);
468
+ }
469
+ }
470
+ }
471
+ /**
472
+ * Pull Docker image if not present (with progress)
473
+ */
474
+ async ensureImage(image = this.defaultImage) {
475
+ try {
476
+ execSync(`docker image inspect ${image}`, {
477
+ stdio: "ignore",
478
+ timeout: 5e3
479
+ });
480
+ return true;
481
+ } catch {
482
+ console.log(`[Docker] Pulling image ${image}...`);
483
+ try {
484
+ execSync(`docker pull ${image}`, {
485
+ stdio: "inherit",
486
+ // Show progress
487
+ timeout: 12e4
488
+ // 2 minutes for image pull
489
+ });
490
+ console.log(`[Docker] \u2713 Image pulled successfully`);
491
+ return true;
492
+ } catch (pullErr) {
493
+ console.error(`[Docker] Failed to pull image:`, pullErr);
494
+ return false;
495
+ }
496
+ }
497
+ }
498
+ };
499
+ }
500
+ });
501
+
502
+ // src/tools/gemini/crew-adapter.ts
503
+ import { execSync as execSync2 } from "node:child_process";
504
+ import { mkdir, readFile, readdir, writeFile } from "node:fs/promises";
505
+ import { join, resolve as resolve2 } from "node:path";
506
+ function getShellTimeout() {
507
+ const envVal = parseInt(process.env.CREW_SHELL_TIMEOUT || "", 10);
508
+ if (envVal > 0) return Math.min(envVal * 1e3, 6e5);
509
+ return 12e4;
510
+ }
511
+ var CrewConfig, CrewMessageBus, DANGEROUS_SHELL_PATTERNS, _backgroundProcesses, GeminiToolAdapter;
512
+ var init_crew_adapter = __esm({
513
+ "src/tools/gemini/crew-adapter.ts"() {
514
+ "use strict";
515
+ init_base_declarations();
516
+ CrewConfig = class {
517
+ constructor(workspaceRoot) {
518
+ this.workspaceRoot = workspaceRoot;
519
+ }
520
+ getWorkspaceRoot() {
521
+ return this.workspaceRoot;
522
+ }
523
+ getTargetDir() {
524
+ return this.workspaceRoot;
525
+ }
526
+ };
527
+ CrewMessageBus = class {
528
+ async requestConfirmation() {
529
+ return { status: "approved" };
530
+ }
531
+ };
532
+ DANGEROUS_SHELL_PATTERNS = [
533
+ /\brm\s+-rf?\s/,
534
+ // rm -r / rm -rf
535
+ /\bgit\s+push\s+.*--force/,
536
+ // force push
537
+ /\bgit\s+reset\s+--hard/,
538
+ // hard reset
539
+ /\bgit\s+clean\s+-f/,
540
+ // clean untracked
541
+ /\bdrop\s+table\b/i,
542
+ // SQL drop
543
+ /\bdrop\s+database\b/i,
544
+ // SQL drop database
545
+ /\bkill\s+-9\b/,
546
+ // kill -9
547
+ /\bmkfs\b/,
548
+ // format filesystem
549
+ /\bdd\s+if=/
550
+ // dd (disk destroyer)
551
+ ];
552
+ _backgroundProcesses = /* @__PURE__ */ new Map();
553
+ GeminiToolAdapter = class _GeminiToolAdapter {
554
+ // Track reads for read-before-edit guard
555
+ constructor(sandbox) {
556
+ this.sandbox = sandbox;
557
+ this._filesRead = /* @__PURE__ */ new Set();
558
+ const workspaceRoot = sandbox.baseDir || process.cwd();
559
+ this.config = new CrewConfig(workspaceRoot);
560
+ this.messageBus = new CrewMessageBus();
561
+ }
562
+ buildDynamicDeclarations() {
563
+ const staticDecls = this.getStaticToolDeclarations();
564
+ const staticByName = new Map(staticDecls.map((d) => [d.name, d]));
565
+ const canonicalNames = [
566
+ READ_FILE_TOOL_NAME,
567
+ WRITE_FILE_TOOL_NAME,
568
+ EDIT_TOOL_NAME,
569
+ GLOB_TOOL_NAME,
570
+ GREP_TOOL_NAME,
571
+ LS_TOOL_NAME,
572
+ SHELL_TOOL_NAME,
573
+ WEB_SEARCH_TOOL_NAME,
574
+ WEB_FETCH_TOOL_NAME,
575
+ READ_MANY_FILES_TOOL_NAME,
576
+ MEMORY_TOOL_NAME,
577
+ WRITE_TODOS_TOOL_NAME,
578
+ GET_INTERNAL_DOCS_TOOL_NAME,
579
+ ACTIVATE_SKILL_TOOL_NAME,
580
+ ASK_USER_TOOL_NAME,
581
+ ENTER_PLAN_MODE_TOOL_NAME,
582
+ EXIT_PLAN_MODE_TOOL_NAME,
583
+ "grep_search_ripgrep",
584
+ "tracker_create_task",
585
+ "tracker_update_task",
586
+ "tracker_get_task",
587
+ "tracker_list_tasks",
588
+ "tracker_add_dependency",
589
+ "tracker_visualize",
590
+ "spawn_agent",
591
+ "check_background_task"
592
+ ];
593
+ const canonical = canonicalNames.map((name) => {
594
+ const found = staticByName.get(name);
595
+ if (found) return found;
596
+ return {
597
+ name,
598
+ description: `${name} tool`,
599
+ parameters: { type: "object", properties: {} }
600
+ };
601
+ });
602
+ const aliases = [
603
+ { alias: "read_file", target: "read_file" },
604
+ { alias: "write_file", target: "write_file" },
605
+ { alias: "append_file", target: "write_file" },
606
+ { alias: "edit", target: "replace" },
607
+ { alias: "replace", target: "replace" },
608
+ { alias: "glob", target: "glob" },
609
+ { alias: "grep", target: "grep_search" },
610
+ { alias: "grep_search", target: "grep_search" },
611
+ { alias: "grep_search_ripgrep", target: "grep_search_ripgrep" },
612
+ { alias: "list", target: "list_directory" },
613
+ { alias: "list_directory", target: "list_directory" },
614
+ { alias: "shell", target: "run_shell_command" },
615
+ { alias: "run_cmd", target: "run_shell_command" },
616
+ { alias: "run_shell_command", target: "run_shell_command" },
617
+ { alias: "web_search", target: "google_web_search" },
618
+ { alias: "google_web_search", target: "google_web_search" },
619
+ { alias: "web_fetch", target: "web_fetch" },
620
+ { alias: "save_memory", target: "save_memory" },
621
+ { alias: "write_todos", target: "write_todos" },
622
+ { alias: "get_internal_docs", target: "get_internal_docs" },
623
+ { alias: "ask_user", target: "ask_user" },
624
+ { alias: "enter_plan_mode", target: "enter_plan_mode" },
625
+ { alias: "exit_plan_mode", target: "exit_plan_mode" },
626
+ { alias: "activate_skill", target: "activate_skill" },
627
+ { alias: "tracker_create_task", target: "tracker_create_task" },
628
+ { alias: "tracker_update_task", target: "tracker_update_task" },
629
+ { alias: "tracker_get_task", target: "tracker_get_task" },
630
+ { alias: "tracker_list_tasks", target: "tracker_list_tasks" },
631
+ { alias: "tracker_add_dependency", target: "tracker_add_dependency" },
632
+ { alias: "tracker_visualize", target: "tracker_visualize" },
633
+ { alias: "mkdir", target: "write_file" },
634
+ { alias: "git", target: "run_shell_command" }
635
+ // LSP is not yet implemented — don't alias to read_file (misleads the model)
636
+ // { alias: 'lsp', target: 'read_file' }
637
+ ];
638
+ const byName = /* @__PURE__ */ new Map();
639
+ for (const decl of canonical) byName.set(decl.name, decl);
640
+ for (const a of aliases) {
641
+ const target = byName.get(a.target);
642
+ if (!target) continue;
643
+ if (!byName.has(a.alias)) {
644
+ byName.set(a.alias, { ...target, name: a.alias });
645
+ }
646
+ }
647
+ byName.set("mkdir", {
648
+ name: "mkdir",
649
+ description: "Create a directory path (staged via sandbox).",
650
+ parameters: {
651
+ type: "object",
652
+ properties: {
653
+ path: { type: "string", description: "Directory path to create" },
654
+ dir_path: { type: "string", description: "Alternative directory path field" }
655
+ }
656
+ }
657
+ });
658
+ byName.set("git", {
659
+ name: "git",
660
+ description: "Run limited git subcommands (status/diff/log/add/commit/show/branch).",
661
+ parameters: {
662
+ type: "object",
663
+ properties: {
664
+ command: { type: "string", description: "Git subcommand and args" }
665
+ },
666
+ required: ["command"]
667
+ }
668
+ });
669
+ byName.set("lsp", {
670
+ name: "lsp",
671
+ description: "Run code-intel queries (symbols/refs/goto/diagnostics/complete).",
672
+ parameters: {
673
+ type: "object",
674
+ properties: {
675
+ query: { type: "string", description: "LSP query string" }
676
+ },
677
+ required: ["query"]
678
+ }
679
+ });
680
+ return Array.from(byName.values());
681
+ }
682
+ getStaticToolDeclarations() {
683
+ return [
684
+ { name: "read_file", description: "Read file", parameters: { type: "object", properties: { file_path: { type: "string" } }, required: ["file_path"] } },
685
+ { name: "write_file", description: "Write file", parameters: { type: "object", properties: { file_path: { type: "string" }, content: { type: "string" } }, required: ["file_path", "content"] } },
686
+ { name: "replace", description: "Replace text in file. old_string must uniquely match one location (use replace_all:true for all occurrences). You MUST read_file before editing.", parameters: { type: "object", properties: { file_path: { type: "string" }, old_string: { type: "string" }, new_string: { type: "string" }, replace_all: { type: "boolean", description: "Replace ALL occurrences (useful for renames). Default: false (unique match required)" } }, required: ["file_path", "old_string", "new_string"] } },
687
+ { name: "glob", description: "Glob search", parameters: { type: "object", properties: { pattern: { type: "string" } }, required: ["pattern"] } },
688
+ { name: "grep_search", description: "Search for regex/text in files. Supports output modes (content/files/count), context lines, case insensitivity, file type filters.", parameters: { type: "object", properties: { pattern: { type: "string" }, path: { type: "string" }, dir_path: { type: "string" }, output_mode: { type: "string", description: "content (matching lines), files (file paths only), count (match counts)" }, context: { type: "number", description: "Lines of context around matches" }, before: { type: "number" }, after: { type: "number" }, case_insensitive: { type: "boolean" }, type: { type: "string", description: "File type filter (js, py, ts, go, etc.)" }, max_results: { type: "number" } }, required: ["pattern"] } },
689
+ { name: "grep_search_ripgrep", description: "Alias for grep_search with same capabilities", parameters: { type: "object", properties: { pattern: { type: "string" }, path: { type: "string" }, dir_path: { type: "string" }, output_mode: { type: "string" }, context: { type: "number" }, case_insensitive: { type: "boolean" }, type: { type: "string" }, max_results: { type: "number" } }, required: ["pattern"] } },
690
+ { name: "list_directory", description: "List directory", parameters: { type: "object", properties: { dir_path: { type: "string" }, path: { type: "string" } } } },
691
+ { name: "run_shell_command", description: "Run shell command (configurable timeout, Docker isolation when staged files exist). Use run_in_background:true for long-running commands.", parameters: { type: "object", properties: { command: { type: "string" }, run_in_background: { type: "boolean", description: "Run in background and return task ID. Use check_background_task to get result." }, description: { type: "string", description: "Brief description of what the command does" } }, required: ["command"] } },
692
+ { name: "google_web_search", description: "Web search", parameters: { type: "object", properties: { query: { type: "string" } }, required: ["query"] } },
693
+ { name: "web_fetch", description: "Fetch URL", parameters: { type: "object", properties: { url: { type: "string" }, prompt: { type: "string" } } } },
694
+ { name: "read_many_files", description: "Read many files", parameters: { type: "object", properties: { include: { type: "string" }, exclude: { type: "string" }, recursive: { type: "boolean" } } } },
695
+ { name: "save_memory", description: "Save memory fact", parameters: { type: "object", properties: { fact: { type: "string" } }, required: ["fact"] } },
696
+ { name: "write_todos", description: "Write todos", parameters: { type: "object", properties: { todos: { type: "array", items: { type: "object", properties: { text: { type: "string" }, done: { type: "boolean" } } } } }, required: ["todos"] } },
697
+ { name: "get_internal_docs", description: "Read internal docs", parameters: { type: "object", properties: { path: { type: "string" } } } },
698
+ { name: "ask_user", description: "Ask user placeholder", parameters: { type: "object", properties: { questions: { type: "array", items: { type: "object", properties: { question: { type: "string" } } } } } } },
699
+ { name: "enter_plan_mode", description: "Enter plan mode", parameters: { type: "object", properties: { reason: { type: "string" } } } },
700
+ { name: "exit_plan_mode", description: "Exit plan mode", parameters: { type: "object", properties: { plan_path: { type: "string" } } } },
701
+ { name: "activate_skill", description: "Activate skill", parameters: { type: "object", properties: { name: { type: "string" } }, required: ["name"] } },
702
+ { name: "tracker_create_task", description: "Create tracker task", parameters: { type: "object", properties: { title: { type: "string" }, description: { type: "string" }, type: { type: "string" }, parentId: { type: "string" }, dependencies: { type: "array", items: { type: "string" } } }, required: ["title", "description", "type"] } },
703
+ { name: "tracker_update_task", description: "Update tracker task", parameters: { type: "object", properties: { id: { type: "string" } }, required: ["id"] } },
704
+ { name: "tracker_get_task", description: "Get tracker task", parameters: { type: "object", properties: { id: { type: "string" } }, required: ["id"] } },
705
+ { name: "tracker_list_tasks", description: "List tracker tasks", parameters: { type: "object", properties: { status: { type: "string" }, type: { type: "string" }, parentId: { type: "string" } } } },
706
+ { name: "tracker_add_dependency", description: "Add tracker dependency", parameters: { type: "object", properties: { taskId: { type: "string" }, dependencyId: { type: "string" } }, required: ["taskId", "dependencyId"] } },
707
+ { name: "tracker_visualize", description: "Visualize tracker graph", parameters: { type: "object", properties: {} } },
708
+ { name: "spawn_agent", description: "Spawn a sub-agent to handle a task autonomously in parallel. Use for independent research, file analysis, or coding subtasks. Returns the sub-agent result when complete.", parameters: { type: "object", properties: { task: { type: "string", description: "Clear task description for the sub-agent" }, model: { type: "string", description: "Optional model override (default: use cheap model for workers)" }, max_turns: { type: "number", description: "Max turns for sub-agent (default: 15)" } }, required: ["task"] } },
709
+ { name: "check_background_task", description: "Check the status/result of a background shell command. Returns result if done, or elapsed time if still running.", parameters: { type: "object", properties: { task_id: { type: "string", description: "Task ID returned by run_shell_command with run_in_background:true" } }, required: ["task_id"] } }
710
+ ];
711
+ }
712
+ /**
713
+ * Execute a tool call from LLM
714
+ */
715
+ async executeTool(toolName, params) {
716
+ try {
717
+ switch (toolName) {
718
+ // Canonical Gemini names + local aliases
719
+ case "write_file":
720
+ return await this.writeFile(params);
721
+ case "replace":
722
+ return await this.editFile({
723
+ file_path: params.file_path,
724
+ old_string: params.old_string,
725
+ new_string: params.new_string,
726
+ replace_all: params.replace_all
727
+ });
728
+ case "append_file":
729
+ return await this.appendFile(params);
730
+ case "read_file":
731
+ return await this.readFile(params);
732
+ case "edit":
733
+ return await this.editFile(params);
734
+ case "read_many_files":
735
+ return await this.readManyFilesTool(params);
736
+ case "save_memory":
737
+ return await this.saveMemoryTool(params);
738
+ case "write_todos":
739
+ return await this.writeTodosTool(params);
740
+ case "get_internal_docs":
741
+ return await this.getInternalDocsTool(params);
742
+ case "ask_user":
743
+ return await this.askUserTool(params);
744
+ case "enter_plan_mode":
745
+ return await this.enterPlanModeTool(params);
746
+ case "exit_plan_mode":
747
+ return await this.exitPlanModeTool(params);
748
+ case "activate_skill":
749
+ return await this.activateSkillTool(params);
750
+ case "mkdir":
751
+ return await this.mkdirTool(params);
752
+ case "list":
753
+ return await this.listTool(params);
754
+ case "list_directory":
755
+ return await this.listTool({ dir_path: params.dir_path || params.path });
756
+ case "glob":
757
+ return await this.globTool(params);
758
+ case "grep":
759
+ return await this.grepTool(params);
760
+ case "grep_search":
761
+ case "grep_search_ripgrep":
762
+ return await this.grepTool({
763
+ pattern: params.pattern,
764
+ path: params.dir_path || params.path,
765
+ output_mode: params.output_mode,
766
+ context: params.context,
767
+ before: params.before,
768
+ after: params.after,
769
+ case_insensitive: params.case_insensitive,
770
+ type: params.type,
771
+ max_results: params.max_results
772
+ });
773
+ case "git":
774
+ return await this.gitTool(params);
775
+ case "shell":
776
+ case "run_cmd":
777
+ case "run_shell_command":
778
+ return await this.shellTool(params);
779
+ case "lsp":
780
+ return await this.lspTool(params);
781
+ case "web_search":
782
+ case "google_web_search":
783
+ return await this.webSearchTool(params);
784
+ case "web_fetch":
785
+ return await this.webFetchTool(params);
786
+ case "tracker_create_task":
787
+ return await this.trackerCreateTaskTool(params);
788
+ case "tracker_update_task":
789
+ return await this.trackerUpdateTaskTool(params);
790
+ case "tracker_get_task":
791
+ return await this.trackerGetTaskTool(params);
792
+ case "tracker_list_tasks":
793
+ return await this.trackerListTasksTool(params);
794
+ case "tracker_add_dependency":
795
+ return await this.trackerAddDependencyTool(params);
796
+ case "tracker_visualize":
797
+ return await this.trackerVisualizeTool();
798
+ case "spawn_agent":
799
+ return await this.spawnAgentTool(params);
800
+ case "check_background_task":
801
+ return await this.checkBackgroundTask(params);
802
+ default:
803
+ return {
804
+ success: false,
805
+ error: `Unknown tool: ${toolName}`
806
+ };
807
+ }
808
+ } catch (err) {
809
+ return {
810
+ success: false,
811
+ error: err.message
812
+ };
813
+ }
814
+ }
815
+ async writeFile(params) {
816
+ const fullPath = resolve2(this.config.getWorkspaceRoot(), params.file_path);
817
+ const wsRoot = resolve2(this.config.getWorkspaceRoot());
818
+ if (!fullPath.startsWith(wsRoot + "/") && fullPath !== wsRoot) {
819
+ return { success: false, error: `Access denied: path "${params.file_path}" resolves outside workspace root.` };
820
+ }
821
+ await this.sandbox.addChange(params.file_path, params.content);
822
+ return {
823
+ success: true,
824
+ output: `Staged ${params.file_path} (${params.content.length} bytes)`
825
+ };
826
+ }
827
+ async appendFile(params) {
828
+ const filePath = resolve2(this.config.getWorkspaceRoot(), params.file_path);
829
+ let existing = "";
830
+ try {
831
+ const stagedContent = this.sandbox.getStagedContent?.(params.file_path) || this.sandbox.getStagedContent?.(filePath);
832
+ existing = stagedContent ?? await readFile(filePath, "utf8");
833
+ } catch {
834
+ existing = "";
835
+ }
836
+ const combined = `${existing}${params.content || ""}`;
837
+ await this.sandbox.addChange(params.file_path, combined);
838
+ return {
839
+ success: true,
840
+ output: `Appended ${params.file_path} (${(params.content || "").length} bytes)`
841
+ };
842
+ }
843
+ async readFile(params) {
844
+ const filePath = resolve2(this.config.getWorkspaceRoot(), params.file_path);
845
+ const wsRoot = resolve2(this.config.getWorkspaceRoot());
846
+ if (!filePath.startsWith(wsRoot + "/") && filePath !== wsRoot) {
847
+ return { success: false, error: `Access denied: path "${params.file_path}" resolves outside workspace root.` };
848
+ }
849
+ this._filesRead.add(params.file_path);
850
+ this._filesRead.add(filePath);
851
+ const stagedContent = this.sandbox.getStagedContent?.(params.file_path) || this.sandbox.getStagedContent?.(filePath);
852
+ const content = stagedContent ?? await readFile(filePath, "utf8");
853
+ if (params.start_line || params.end_line) {
854
+ const lines = content.split("\n");
855
+ const start = (params.start_line || 1) - 1;
856
+ const end = params.end_line || lines.length;
857
+ const slice = lines.slice(start, end).join("\n");
858
+ return { success: true, output: slice };
859
+ }
860
+ return { success: true, output: content };
861
+ }
862
+ async editFile(params) {
863
+ const filePath = resolve2(this.config.getWorkspaceRoot(), params.file_path);
864
+ const wsRoot = resolve2(this.config.getWorkspaceRoot());
865
+ if (!filePath.startsWith(wsRoot + "/") && filePath !== wsRoot) {
866
+ return { success: false, error: `Access denied: path "${params.file_path}" resolves outside workspace root.` };
867
+ }
868
+ if (!this._filesRead.has(params.file_path) && !this._filesRead.has(filePath)) {
869
+ return {
870
+ success: false,
871
+ error: `You must read_file "${params.file_path}" before editing it. Never guess at file contents.`
872
+ };
873
+ }
874
+ const stagedContent = this.sandbox.getStagedContent?.(params.file_path) || this.sandbox.getStagedContent?.(filePath);
875
+ const content = stagedContent ?? await readFile(filePath, "utf8");
876
+ if (!content.includes(params.old_string)) {
877
+ return {
878
+ success: false,
879
+ error: `String not found in ${params.file_path}`
880
+ };
881
+ }
882
+ const occurrences = content.split(params.old_string).length - 1;
883
+ if (params.replace_all) {
884
+ const updated2 = content.split(params.old_string).join(params.new_string);
885
+ await this.sandbox.addChange(params.file_path, updated2);
886
+ const diagnostics2 = await this.shadowValidate(params.file_path);
887
+ return {
888
+ success: true,
889
+ output: `Edited ${params.file_path} (${occurrences} replacements)${diagnostics2}`
890
+ };
891
+ }
892
+ if (occurrences > 1) {
893
+ return {
894
+ success: false,
895
+ error: `old_string matches ${occurrences} locations in ${params.file_path}. Provide more context to make it unique, or use replace_all:true to replace all occurrences.`
896
+ };
897
+ }
898
+ const updated = content.replace(params.old_string, params.new_string);
899
+ await this.sandbox.addChange(params.file_path, updated);
900
+ const diagnostics = await this.shadowValidate(params.file_path);
901
+ return {
902
+ success: true,
903
+ output: `Edited ${params.file_path}${diagnostics}`
904
+ };
905
+ }
906
+ /**
907
+ * Shadow validation: after an edit, check for type/lint errors using LSP.
908
+ * Returns empty string if clean, or diagnostic summary if errors found.
909
+ * Non-fatal — silently returns empty on any failure.
910
+ */
911
+ async shadowValidate(filePath) {
912
+ if (!/\.(ts|tsx|js|jsx|mjs|mts)$/.test(filePath)) return "";
913
+ try {
914
+ const lsp = await Promise.resolve().then(() => (init_lsp(), lsp_exports));
915
+ const diags = await lsp.typeCheckProject(this.config.getWorkspaceRoot(), [filePath]);
916
+ const fileErrors = diags.filter(
917
+ (d) => d.category === "error" && d.file?.endsWith(filePath)
918
+ );
919
+ if (fileErrors.length === 0) return "";
920
+ const errorLines = fileErrors.slice(0, 5).map(
921
+ (d) => ` ${d.file}:${d.line} \u2014 ${d.message}`
922
+ );
923
+ return `
924
+
925
+ \u26A0\uFE0F Shadow validation found ${fileErrors.length} error(s) after edit:
926
+ ${errorLines.join("\n")}${fileErrors.length > 5 ? `
927
+ ... and ${fileErrors.length - 5} more` : ""}
928
+ Fix these before moving on.`;
929
+ } catch {
930
+ return "";
931
+ }
932
+ }
933
+ async mkdirTool(params) {
934
+ const dir = (params.path || params.dir_path || "").trim();
935
+ if (!dir) return { success: false, error: "mkdir requires path" };
936
+ const keep = join(dir, ".gitkeep");
937
+ await this.sandbox.addChange(keep, "");
938
+ return { success: true, output: `Staged directory ${dir}` };
939
+ }
940
+ async listTool(params) {
941
+ const target = (params.path || params.dir_path || ".").trim();
942
+ const abs = resolve2(process.cwd(), target);
943
+ const items = await readdir(abs, { withFileTypes: true });
944
+ const lines = items.map((i) => `${i.isDirectory() ? "d" : "f"} ${i.name}`);
945
+ return { success: true, output: lines.join("\n") };
946
+ }
947
+ async globTool(params) {
948
+ const pattern = String(params.pattern || "").trim();
949
+ if (!pattern) return { success: false, error: "glob requires pattern" };
950
+ try {
951
+ const out = execSync2(`rg --files -g ${JSON.stringify(pattern)}`, { cwd: process.cwd(), stdio: "pipe", encoding: "utf8" });
952
+ return { success: true, output: out.trim() };
953
+ } catch (err) {
954
+ return { success: false, error: err?.stderr?.toString?.() || err?.message || "glob failed" };
955
+ }
956
+ }
957
+ async grepTool(params) {
958
+ const pattern = String(params.pattern || "").trim();
959
+ const searchPath = String(params.path || ".").trim();
960
+ if (!pattern) return { success: false, error: "grep requires pattern" };
961
+ const args = ["rg"];
962
+ const mode = params.output_mode || "content";
963
+ if (mode === "files") {
964
+ args.push("-l");
965
+ } else if (mode === "count") {
966
+ args.push("-c");
967
+ } else {
968
+ args.push("-n");
969
+ }
970
+ if (params.context) args.push(`-C${params.context}`);
971
+ else {
972
+ if (params.before) args.push(`-B${params.before}`);
973
+ if (params.after) args.push(`-A${params.after}`);
974
+ }
975
+ if (params.case_insensitive) args.push("-i");
976
+ if (params.type) args.push(`--type=${params.type}`);
977
+ if (params.max_results) args.push(`-m${params.max_results}`);
978
+ args.push(JSON.stringify(pattern), JSON.stringify(searchPath));
979
+ try {
980
+ const out = execSync2(args.join(" "), {
981
+ cwd: process.cwd(),
982
+ stdio: "pipe",
983
+ encoding: "utf8"
984
+ });
985
+ return { success: true, output: out.trim() };
986
+ } catch (err) {
987
+ const text = `${err?.stdout?.toString?.() || ""}
988
+ ${err?.stderr?.toString?.() || ""}`.trim();
989
+ if (err?.status === 1 && !text) return { success: true, output: "(no matches)" };
990
+ return { success: false, error: text || err?.message || "grep failed" };
991
+ }
992
+ }
993
+ async gitTool(params) {
994
+ const command = String(params.command || "").trim();
995
+ if (!command) return { success: false, error: "git requires command" };
996
+ const allowed = ["status", "diff", "log", "add", "commit", "show", "branch", "stash", "tag", "blame", "checkout", "switch", "restore", "rev-parse", "remote", "fetch", "pull", "push", "merge", "rebase", "reset", "cherry-pick", "worktree"];
997
+ const verb = command.split(/\s+/)[0];
998
+ if (!allowed.includes(verb)) {
999
+ return { success: false, error: `git subcommand not allowed: ${verb}. Allowed: ${allowed.join(", ")}` };
1000
+ }
1001
+ if (/--force|--force-with-lease/.test(command) && verb === "push") {
1002
+ return { success: false, error: "Force push is not allowed. Use a regular push or create a new branch." };
1003
+ }
1004
+ if (/--no-verify/.test(command)) {
1005
+ return { success: false, error: "Skipping hooks (--no-verify) is not allowed. Fix the hook issue instead." };
1006
+ }
1007
+ if (verb === "reset" && /--hard/.test(command)) {
1008
+ return { success: false, error: "git reset --hard is destructive. Use git stash or git checkout <file> instead." };
1009
+ }
1010
+ if (/[;&|`$(){}\\!<>]/.test(command)) {
1011
+ return { success: false, error: "git command contains disallowed shell characters. Use only git arguments." };
1012
+ }
1013
+ try {
1014
+ const args = command.split(/\s+/).filter(Boolean);
1015
+ const { execFileSync } = await import("node:child_process");
1016
+ const out = execFileSync("git", args, {
1017
+ cwd: this.config.getWorkspaceRoot(),
1018
+ stdio: "pipe",
1019
+ encoding: "utf8",
1020
+ timeout: 3e4
1021
+ });
1022
+ return { success: true, output: out.trim() };
1023
+ } catch (err) {
1024
+ const text = `${err?.stdout?.toString?.() || ""}
1025
+ ${err?.stderr?.toString?.() || ""}`.trim();
1026
+ return { success: false, error: text || err?.message || "git failed" };
1027
+ }
1028
+ }
1029
+ async shellTool(params) {
1030
+ const command = String(params.command || "").trim();
1031
+ if (!command) return { success: false, error: "shell requires command" };
1032
+ for (const pat of DANGEROUS_SHELL_PATTERNS) {
1033
+ if (pat.test(command)) {
1034
+ return { success: false, error: `Blocked: destructive command detected (${command.slice(0, 60)}). Use a safer alternative.` };
1035
+ }
1036
+ }
1037
+ if (params.run_in_background) {
1038
+ const taskId = `bg_${Date.now()}_${Math.random().toString(36).slice(2, 6)}`;
1039
+ const bgPromise = (async () => {
1040
+ try {
1041
+ const { spawn } = await import("node:child_process");
1042
+ return new Promise((resolve4) => {
1043
+ const proc = spawn("sh", ["-c", command], {
1044
+ cwd: this.config.getWorkspaceRoot(),
1045
+ stdio: "pipe"
1046
+ });
1047
+ let stdout = "", stderr = "";
1048
+ proc.stdout?.on("data", (d) => {
1049
+ stdout += d.toString();
1050
+ });
1051
+ proc.stderr?.on("data", (d) => {
1052
+ stderr += d.toString();
1053
+ });
1054
+ const timeout = setTimeout(() => {
1055
+ proc.kill("SIGTERM");
1056
+ resolve4({ success: false, error: "Background task timed out" });
1057
+ }, getShellTimeout());
1058
+ proc.on("close", (code) => {
1059
+ clearTimeout(timeout);
1060
+ resolve4(code === 0 ? { success: true, output: stdout.trim() } : { success: false, error: (stderr || stdout).trim() || `exit code ${code}` });
1061
+ });
1062
+ });
1063
+ } catch (err) {
1064
+ return { success: false, error: err.message };
1065
+ }
1066
+ })();
1067
+ _backgroundProcesses.set(taskId, { promise: bgPromise, startedAt: Date.now() });
1068
+ return { success: true, output: `Background task started: ${taskId}
1069
+ Use check_background_task with this ID to get the result.` };
1070
+ }
1071
+ try {
1072
+ const hasStagedFiles = this.sandbox.getPendingPaths().length > 0;
1073
+ if (hasStagedFiles) {
1074
+ const { DockerSandbox: DockerSandbox2 } = await Promise.resolve().then(() => (init_docker_sandbox(), docker_sandbox_exports));
1075
+ const docker = new DockerSandbox2();
1076
+ const dockerAvailable = await docker.isDockerAvailable();
1077
+ if (dockerAvailable) {
1078
+ console.log(`[GeminiAdapter] Running command in Docker with ${this.sandbox.getPendingPaths().length} staged file(s)`);
1079
+ const result = await docker.runCommand(command, this.sandbox, {
1080
+ workDir: this.config.getWorkspaceRoot(),
1081
+ timeout: getShellTimeout()
1082
+ });
1083
+ return {
1084
+ success: result.success,
1085
+ output: result.output,
1086
+ error: result.success ? void 0 : result.output
1087
+ };
1088
+ } else {
1089
+ console.warn("[GeminiAdapter] Docker unavailable - running natively (staged files not available to command)");
1090
+ }
1091
+ }
1092
+ const out = execSync2(command, {
1093
+ cwd: this.config.getWorkspaceRoot(),
1094
+ stdio: "pipe",
1095
+ encoding: "utf8",
1096
+ timeout: getShellTimeout()
1097
+ });
1098
+ return { success: true, output: out.trim() };
1099
+ } catch (err) {
1100
+ const text = `${err?.stdout?.toString?.() || ""}
1101
+ ${err?.stderr?.toString?.() || ""}`.trim();
1102
+ return { success: false, error: text || err?.message || "shell failed" };
1103
+ }
1104
+ }
1105
+ async webSearchTool(params) {
1106
+ const query = String(params.query || "").trim();
1107
+ if (!query) return { success: false, error: "web_search requires query" };
1108
+ const braveKey = process.env.BRAVE_API_KEY || process.env.BRAVE_SEARCH_API_KEY;
1109
+ if (!braveKey) return { success: false, error: "web_search unavailable (missing BRAVE_API_KEY)" };
1110
+ try {
1111
+ const res = await fetch(
1112
+ `https://api.search.brave.com/res/v1/web/search?q=${encodeURIComponent(query)}&count=5`,
1113
+ {
1114
+ headers: {
1115
+ "Accept": "application/json",
1116
+ "X-Subscription-Token": braveKey
1117
+ },
1118
+ signal: AbortSignal.timeout(1e4)
1119
+ }
1120
+ );
1121
+ if (!res.ok) return { success: false, error: `web_search failed: HTTP ${res.status}` };
1122
+ const data = await res.json();
1123
+ const hits = (data?.web?.results || []).slice(0, 5);
1124
+ const formatted = hits.map(
1125
+ (r, i) => `${i + 1}. ${r.title || "(untitled)"}
1126
+ ${r.url || ""}
1127
+ ${r.description || ""}`
1128
+ ).join("\n\n");
1129
+ return { success: true, output: formatted || "No results" };
1130
+ } catch (err) {
1131
+ return { success: false, error: err?.message || "web_search failed" };
1132
+ }
1133
+ }
1134
+ async webFetchTool(params) {
1135
+ const url = String(params.url || "").trim();
1136
+ if (!url || !/^https?:\/\//i.test(url)) {
1137
+ return { success: false, error: "web_fetch requires valid http(s) url" };
1138
+ }
1139
+ try {
1140
+ const res = await fetch(url, {
1141
+ headers: { "User-Agent": "CrewSwarm-CLI/1.0" },
1142
+ signal: AbortSignal.timeout(12e3)
1143
+ });
1144
+ if (!res.ok) return { success: false, error: `web_fetch failed: HTTP ${res.status}` };
1145
+ const ct = String(res.headers.get("content-type") || "");
1146
+ let text = await res.text();
1147
+ if (ct.includes("html")) {
1148
+ text = text.replace(/<script[\s\S]*?<\/script>/gi, "").replace(/<style[\s\S]*?<\/style>/gi, "").replace(/<[^>]+>/g, " ").replace(/\s{2,}/g, " ").trim();
1149
+ }
1150
+ return { success: true, output: text.slice(0, 12e3) };
1151
+ } catch (err) {
1152
+ return { success: false, error: err?.message || "web_fetch failed" };
1153
+ }
1154
+ }
1155
+ async readManyFilesTool(params) {
1156
+ const include = String(params.include || "**/*").trim();
1157
+ try {
1158
+ const out = execSync2(`rg --files -g ${JSON.stringify(include)}`, {
1159
+ cwd: this.config.getWorkspaceRoot(),
1160
+ stdio: "pipe",
1161
+ encoding: "utf8"
1162
+ });
1163
+ const files = out.split("\n").filter(Boolean).slice(0, 20);
1164
+ const chunks = [];
1165
+ for (const rel of files) {
1166
+ const full = resolve2(this.config.getWorkspaceRoot(), rel);
1167
+ try {
1168
+ const content = await readFile(full, "utf8");
1169
+ chunks.push(`--- ${rel} ---
1170
+ ${content.slice(0, 2e3)}`);
1171
+ } catch {
1172
+ }
1173
+ }
1174
+ return { success: true, output: chunks.join("\n\n") || "No readable files matched" };
1175
+ } catch (err) {
1176
+ return { success: false, error: err?.message || "read_many_files failed" };
1177
+ }
1178
+ }
1179
+ async saveMemoryTool(params) {
1180
+ const fact = String(params.fact || "").trim();
1181
+ if (!fact) return { success: false, error: "save_memory requires fact" };
1182
+ const memDir = resolve2(this.config.getWorkspaceRoot(), ".crew");
1183
+ await mkdir(memDir, { recursive: true });
1184
+ const memFile = resolve2(memDir, "memory-facts.log");
1185
+ let prior = "";
1186
+ try {
1187
+ prior = await readFile(memFile, "utf8");
1188
+ } catch {
1189
+ }
1190
+ await writeFile(memFile, `${prior}${(/* @__PURE__ */ new Date()).toISOString()} ${fact}
1191
+ `, "utf8");
1192
+ return { success: true, output: "Memory saved" };
1193
+ }
1194
+ async writeTodosTool(params) {
1195
+ const todos = Array.isArray(params.todos) ? params.todos : [];
1196
+ const memDir = resolve2(this.config.getWorkspaceRoot(), ".crew");
1197
+ await mkdir(memDir, { recursive: true });
1198
+ const todoFile = resolve2(memDir, "todos.json");
1199
+ await writeFile(todoFile, JSON.stringify(todos, null, 2), "utf8");
1200
+ return { success: true, output: `Saved ${todos.length} todos` };
1201
+ }
1202
+ async getInternalDocsTool(params) {
1203
+ const target = String(params.path || "AGENTS.md").trim();
1204
+ const abs = resolve2(this.config.getWorkspaceRoot(), target);
1205
+ try {
1206
+ const content = await readFile(abs, "utf8");
1207
+ return { success: true, output: content.slice(0, 12e3) };
1208
+ } catch (err) {
1209
+ return { success: false, error: `get_internal_docs failed: ${err?.message || target}` };
1210
+ }
1211
+ }
1212
+ async askUserTool(params) {
1213
+ const qs = Array.isArray(params.questions) ? params.questions : [];
1214
+ if (qs.length === 0) {
1215
+ return { success: false, error: "ask_user requires at least one question" };
1216
+ }
1217
+ const now = (/* @__PURE__ */ new Date()).toISOString();
1218
+ const request = {
1219
+ id: `ask-${Date.now()}-${Math.random().toString(16).slice(2, 8)}`,
1220
+ ts: now,
1221
+ status: "pending",
1222
+ questions: qs
1223
+ };
1224
+ const crewDir = this.crewDirPath();
1225
+ await mkdir(crewDir, { recursive: true });
1226
+ await this.appendJsonLine(this.askUserRequestsPath(), request);
1227
+ await writeFile(this.askUserLatestPath(), JSON.stringify(request, null, 2), "utf8");
1228
+ const summary = qs.map((q, i) => `${i + 1}. ${q?.question || "question"}`).join("\n");
1229
+ return {
1230
+ success: true,
1231
+ output: `User input required (non-interactive runtime).
1232
+ Saved request: ${this.relativeCrewPath(this.askUserLatestPath())}
1233
+ Questions:
1234
+ ${summary}`
1235
+ };
1236
+ }
1237
+ async enterPlanModeTool(params) {
1238
+ const crewDir = this.crewDirPath();
1239
+ await mkdir(crewDir, { recursive: true });
1240
+ const state = {
1241
+ active: true,
1242
+ enteredAt: (/* @__PURE__ */ new Date()).toISOString(),
1243
+ exitedAt: null,
1244
+ reason: String(params?.reason || "").trim() || null,
1245
+ planPath: null
1246
+ };
1247
+ await writeFile(this.planModeStatePath(), JSON.stringify(state, null, 2), "utf8");
1248
+ return {
1249
+ success: true,
1250
+ output: `Plan mode entered${state.reason ? `: ${state.reason}` : ""} (${this.relativeCrewPath(this.planModeStatePath())})`
1251
+ };
1252
+ }
1253
+ async exitPlanModeTool(params) {
1254
+ const crewDir = this.crewDirPath();
1255
+ await mkdir(crewDir, { recursive: true });
1256
+ let prior = {};
1257
+ try {
1258
+ prior = JSON.parse(await readFile(this.planModeStatePath(), "utf8"));
1259
+ } catch {
1260
+ prior = {};
1261
+ }
1262
+ const state = {
1263
+ ...prior,
1264
+ active: false,
1265
+ exitedAt: (/* @__PURE__ */ new Date()).toISOString(),
1266
+ planPath: String(params?.plan_path || "").trim() || prior?.planPath || null
1267
+ };
1268
+ await writeFile(this.planModeStatePath(), JSON.stringify(state, null, 2), "utf8");
1269
+ return {
1270
+ success: true,
1271
+ output: `Plan mode exited${state.planPath ? `: ${state.planPath}` : ""} (${this.relativeCrewPath(this.planModeStatePath())})`
1272
+ };
1273
+ }
1274
+ async activateSkillTool(params) {
1275
+ const name = String(params?.name || "").trim();
1276
+ if (!name) return { success: false, error: "activate_skill requires name" };
1277
+ const crewDir = this.crewDirPath();
1278
+ await mkdir(crewDir, { recursive: true });
1279
+ let state = { active: [] };
1280
+ try {
1281
+ state = JSON.parse(await readFile(this.activeSkillsPath(), "utf8"));
1282
+ } catch {
1283
+ state = { active: [] };
1284
+ }
1285
+ const active = new Set(Array.isArray(state?.active) ? state.active : []);
1286
+ active.add(name);
1287
+ const next = {
1288
+ active: Array.from(active).sort(),
1289
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1290
+ };
1291
+ await writeFile(this.activeSkillsPath(), JSON.stringify(next, null, 2), "utf8");
1292
+ return { success: true, output: `Skill activated: ${name} (${this.relativeCrewPath(this.activeSkillsPath())})` };
1293
+ }
1294
+ crewDirPath() {
1295
+ return resolve2(this.config.getWorkspaceRoot(), ".crew");
1296
+ }
1297
+ askUserRequestsPath() {
1298
+ return resolve2(this.crewDirPath(), "ask-user-requests.jsonl");
1299
+ }
1300
+ askUserLatestPath() {
1301
+ return resolve2(this.crewDirPath(), "ask-user-latest.json");
1302
+ }
1303
+ planModeStatePath() {
1304
+ return resolve2(this.crewDirPath(), "plan-mode.json");
1305
+ }
1306
+ activeSkillsPath() {
1307
+ return resolve2(this.crewDirPath(), "active-skills.json");
1308
+ }
1309
+ relativeCrewPath(absPath) {
1310
+ return absPath.replace(this.config.getWorkspaceRoot(), ".");
1311
+ }
1312
+ async appendJsonLine(filePath, data) {
1313
+ let prior = "";
1314
+ try {
1315
+ prior = await readFile(filePath, "utf8");
1316
+ } catch {
1317
+ prior = "";
1318
+ }
1319
+ const line = `${JSON.stringify(data)}
1320
+ `;
1321
+ await writeFile(filePath, `${prior}${line}`, "utf8");
1322
+ }
1323
+ trackerFilePath() {
1324
+ return resolve2(this.config.getWorkspaceRoot(), ".crew", "tracker.json");
1325
+ }
1326
+ async readTracker() {
1327
+ try {
1328
+ const raw = await readFile(this.trackerFilePath(), "utf8");
1329
+ const parsed = JSON.parse(raw);
1330
+ return Array.isArray(parsed) ? parsed : [];
1331
+ } catch {
1332
+ return [];
1333
+ }
1334
+ }
1335
+ async writeTracker(tasks) {
1336
+ const dir = resolve2(this.config.getWorkspaceRoot(), ".crew");
1337
+ await mkdir(dir, { recursive: true });
1338
+ await writeFile(this.trackerFilePath(), JSON.stringify(tasks, null, 2), "utf8");
1339
+ }
1340
+ mkTrackerId() {
1341
+ return Math.random().toString(16).slice(2, 8);
1342
+ }
1343
+ async trackerCreateTaskTool(params) {
1344
+ const tasks = await this.readTracker();
1345
+ const task = {
1346
+ id: this.mkTrackerId(),
1347
+ title: String(params?.title || "Untitled"),
1348
+ description: String(params?.description || ""),
1349
+ type: String(params?.type || "task"),
1350
+ status: "open",
1351
+ parentId: params?.parentId || null,
1352
+ dependencies: Array.isArray(params?.dependencies) ? params.dependencies : []
1353
+ };
1354
+ tasks.push(task);
1355
+ await this.writeTracker(tasks);
1356
+ return { success: true, output: JSON.stringify(task, null, 2) };
1357
+ }
1358
+ async trackerUpdateTaskTool(params) {
1359
+ const tasks = await this.readTracker();
1360
+ const id = String(params?.id || "");
1361
+ const idx = tasks.findIndex((t) => t.id === id);
1362
+ if (idx < 0) return { success: false, error: `Task not found: ${id}` };
1363
+ tasks[idx] = { ...tasks[idx], ...params };
1364
+ await this.writeTracker(tasks);
1365
+ return { success: true, output: JSON.stringify(tasks[idx], null, 2) };
1366
+ }
1367
+ async trackerGetTaskTool(params) {
1368
+ const tasks = await this.readTracker();
1369
+ const id = String(params?.id || "");
1370
+ const task = tasks.find((t) => t.id === id);
1371
+ if (!task) return { success: false, error: `Task not found: ${id}` };
1372
+ return { success: true, output: JSON.stringify(task, null, 2) };
1373
+ }
1374
+ async trackerListTasksTool(params) {
1375
+ const tasks = await this.readTracker();
1376
+ const filtered = tasks.filter((t) => {
1377
+ if (params?.status && t.status !== params.status) return false;
1378
+ if (params?.type && t.type !== params.type) return false;
1379
+ if (params?.parentId && t.parentId !== params.parentId) return false;
1380
+ return true;
1381
+ });
1382
+ return { success: true, output: JSON.stringify(filtered, null, 2) };
1383
+ }
1384
+ async trackerAddDependencyTool(params) {
1385
+ const tasks = await this.readTracker();
1386
+ const taskId = String(params?.taskId || "");
1387
+ const depId = String(params?.dependencyId || "");
1388
+ const idx = tasks.findIndex((t) => t.id === taskId);
1389
+ if (idx < 0) return { success: false, error: `Task not found: ${taskId}` };
1390
+ const deps = new Set(Array.isArray(tasks[idx].dependencies) ? tasks[idx].dependencies : []);
1391
+ deps.add(depId);
1392
+ tasks[idx].dependencies = Array.from(deps);
1393
+ await this.writeTracker(tasks);
1394
+ return { success: true, output: JSON.stringify(tasks[idx], null, 2) };
1395
+ }
1396
+ async trackerVisualizeTool() {
1397
+ const tasks = await this.readTracker();
1398
+ const lines = tasks.map((t) => {
1399
+ const deps = Array.isArray(t.dependencies) && t.dependencies.length ? ` -> [${t.dependencies.join(", ")}]` : "";
1400
+ return `${t.id} [${t.status}] ${t.title}${deps}`;
1401
+ });
1402
+ return { success: true, output: lines.join("\n") || "(no tasks)" };
1403
+ }
1404
+ async lspTool(params) {
1405
+ const query = String(params.query || "").trim();
1406
+ if (!query) return { success: false, error: "lsp requires query" };
1407
+ const lower = query.toLowerCase();
1408
+ const lsp = await Promise.resolve().then(() => (init_lsp(), lsp_exports));
1409
+ if (lower.startsWith("symbols")) {
1410
+ const file = query.slice("symbols".length).trim();
1411
+ if (!file) return { success: false, error: "lsp symbols requires file path" };
1412
+ const symbols = await lsp.getDocumentSymbols(process.cwd(), file);
1413
+ return { success: true, output: symbols.map((s) => `${file}:${s.line}:${s.column} ${s.kind} ${s.name}`).join("\n") };
1414
+ }
1415
+ if (lower.startsWith("refs")) {
1416
+ const target = query.slice("refs".length).trim();
1417
+ const match = target.match(/^(.+):(\d+)(?::(\d+))?$/);
1418
+ if (match) {
1419
+ const refs = await lsp.getReferences(process.cwd(), match[1], Number(match[2]), Number(match[3] || "1"));
1420
+ return { success: true, output: refs.map((r) => `${r.file}:${r.line}:${r.column}`).join("\n") };
1421
+ }
1422
+ if (target) return this.grepTool({ pattern: `\\b${target}\\b`, path: "." });
1423
+ return { success: false, error: "lsp refs requires symbol or file:line[:col]" };
1424
+ }
1425
+ if (lower.startsWith("goto")) {
1426
+ const target = query.slice("goto".length).trim();
1427
+ const match = target.match(/^(.+):(\d+)(?::(\d+))?$/);
1428
+ if (!match) return { success: false, error: "lsp goto format: file:line[:col]" };
1429
+ const defs = await lsp.getDefinitions(process.cwd(), match[1], Number(match[2]), Number(match[3] || "1"));
1430
+ return { success: true, output: defs.map((d) => `${d.file}:${d.line}:${d.column}`).join("\n") };
1431
+ }
1432
+ if (lower.startsWith("diagnostics") || lower === "check") {
1433
+ const diags = await lsp.typeCheckProject(process.cwd(), []);
1434
+ return { success: true, output: diags.map((d) => `${d.file}:${d.line}:${d.column} [${d.category}] ${d.message}`).join("\n") };
1435
+ }
1436
+ if (lower.startsWith("complete")) {
1437
+ const target = query.slice("complete".length).trim();
1438
+ const match = target.match(/^(.+):(\d+):(\d+)(?:\s+(.+))?$/);
1439
+ if (!match) return { success: false, error: "lsp complete format: file:line:col [prefix]" };
1440
+ const items = await lsp.getCompletions(process.cwd(), match[1], Number(match[2]), Number(match[3]), 50, match[4] || "");
1441
+ return { success: true, output: items.map((i) => `${i.name} (${i.kind})`).join("\n") };
1442
+ }
1443
+ return { success: false, error: `Unsupported lsp query: ${query}` };
1444
+ }
1445
+ async checkBackgroundTask(params) {
1446
+ const taskId = String(params.task_id || "").trim();
1447
+ if (!taskId) return { success: false, error: "check_background_task requires task_id" };
1448
+ const bg = _backgroundProcesses.get(taskId);
1449
+ if (!bg) return { success: false, error: `No background task found with ID: ${taskId}` };
1450
+ const done = await Promise.race([
1451
+ bg.promise.then((r) => ({ done: true, result: r })),
1452
+ new Promise((resolve4) => setTimeout(() => resolve4({ done: false }), 50))
1453
+ ]);
1454
+ if (!done.done) {
1455
+ const elapsed = Math.round((Date.now() - bg.startedAt) / 1e3);
1456
+ return { success: true, output: `Task ${taskId} still running (${elapsed}s elapsed). Check again later.` };
1457
+ }
1458
+ _backgroundProcesses.delete(taskId);
1459
+ return done.result;
1460
+ }
1461
+ static {
1462
+ // Track sub-agent depth to prevent infinite recursion
1463
+ this._spawnDepth = 0;
1464
+ }
1465
+ static {
1466
+ this.MAX_SPAWN_DEPTH = 3;
1467
+ }
1468
+ async spawnAgentTool(params) {
1469
+ const task = String(params.task || "").trim();
1470
+ if (!task) return { success: false, error: "spawn_agent requires task" };
1471
+ if (_GeminiToolAdapter._spawnDepth >= _GeminiToolAdapter.MAX_SPAWN_DEPTH) {
1472
+ return { success: false, error: `Sub-agent depth limit reached (max ${_GeminiToolAdapter.MAX_SPAWN_DEPTH}). Complete this task directly instead.` };
1473
+ }
1474
+ const maxTurns = Math.min(params.max_turns || 15, 25);
1475
+ const model = params.model || process.env.CREW_WORKER_MODEL || process.env.CREW_EXECUTION_MODEL || "";
1476
+ const branchName = `sub-agent-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`;
1477
+ try {
1478
+ await this.sandbox.createBranch(branchName);
1479
+ _GeminiToolAdapter._spawnDepth++;
1480
+ const { runAgenticWorker: runAgenticWorker2 } = await Promise.resolve().then(() => (init_agentic_executor(), agentic_executor_exports));
1481
+ const result = await runAgenticWorker2(task, this.sandbox, {
1482
+ model,
1483
+ maxTurns,
1484
+ stream: false,
1485
+ // Sub-agents don't stream to stdout
1486
+ verbose: Boolean(process.env.CREW_DEBUG),
1487
+ tier: "fast"
1488
+ // Default to cheap model for sub-agents
1489
+ });
1490
+ _GeminiToolAdapter._spawnDepth--;
1491
+ const parentBranch = this.sandbox.getActiveBranch();
1492
+ if (parentBranch !== branchName) {
1493
+ await this.sandbox.mergeBranch(branchName, parentBranch);
1494
+ } else {
1495
+ const branches = this.sandbox.getBranches();
1496
+ const parent = branches.find((b) => b !== branchName) || "main";
1497
+ await this.sandbox.switchBranch(parent);
1498
+ await this.sandbox.mergeBranch(branchName, parent);
1499
+ }
1500
+ try {
1501
+ await this.sandbox.deleteBranch(branchName);
1502
+ } catch {
1503
+ }
1504
+ const output = [
1505
+ `Sub-agent completed in ${result.turns || 0} turns (${result.modelUsed || "unknown"})`,
1506
+ result.cost ? `Cost: $${result.cost.toFixed(4)}` : "",
1507
+ `Status: ${result.success ? "SUCCESS" : "FAILED"}`,
1508
+ "",
1509
+ result.output?.slice(0, 3e3) || "(no output)"
1510
+ ].filter(Boolean).join("\n");
1511
+ return { success: result.success, output };
1512
+ } catch (err) {
1513
+ _GeminiToolAdapter._spawnDepth = Math.max(0, _GeminiToolAdapter._spawnDepth - 1);
1514
+ try {
1515
+ await this.sandbox.switchBranch("main");
1516
+ } catch {
1517
+ }
1518
+ try {
1519
+ await this.sandbox.deleteBranch(branchName);
1520
+ } catch {
1521
+ }
1522
+ return { success: false, error: `Sub-agent failed: ${err.message}` };
1523
+ }
1524
+ }
1525
+ /**
1526
+ * Get tool declarations for LLM function calling
1527
+ */
1528
+ getToolDeclarations() {
1529
+ const dynamicEnabled = process.env.CREW_GEMINI_DYNAMIC_DECLARATIONS !== "false";
1530
+ if (dynamicEnabled) {
1531
+ try {
1532
+ const decls = this.buildDynamicDeclarations();
1533
+ if (decls.length > 0) return decls;
1534
+ } catch {
1535
+ }
1536
+ }
1537
+ return [
1538
+ {
1539
+ name: "read_file",
1540
+ description: "Read the contents of a file. ALWAYS read files before editing them. Use start_line/end_line for large files.",
1541
+ parameters: {
1542
+ type: "object",
1543
+ properties: {
1544
+ file_path: { type: "string", description: "Relative path from project root" },
1545
+ start_line: { type: "number", description: "Start line number (1-based, optional)" },
1546
+ end_line: { type: "number", description: "End line number (inclusive, optional)" }
1547
+ },
1548
+ required: ["file_path"]
1549
+ }
1550
+ },
1551
+ {
1552
+ name: "glob",
1553
+ description: 'Find files matching a glob pattern. Use this to discover file structure. Examples: "**/*.ts", "src/**/*.tsx", "*.json"',
1554
+ parameters: {
1555
+ type: "object",
1556
+ properties: {
1557
+ pattern: { type: "string", description: 'Glob pattern (e.g. "src/**/*.ts")' }
1558
+ },
1559
+ required: ["pattern"]
1560
+ }
1561
+ },
1562
+ {
1563
+ name: "grep",
1564
+ description: "Search for text/regex patterns in files. Returns matching lines with file paths and line numbers.",
1565
+ parameters: {
1566
+ type: "object",
1567
+ properties: {
1568
+ pattern: { type: "string", description: "Regex or text pattern to search for" },
1569
+ path: { type: "string", description: 'Directory or file to search in (default: ".")' }
1570
+ },
1571
+ required: ["pattern"]
1572
+ }
1573
+ },
1574
+ {
1575
+ name: "grep_search",
1576
+ description: "Canonical alias for grep. Search for regex/text in files.",
1577
+ parameters: {
1578
+ type: "object",
1579
+ properties: {
1580
+ pattern: { type: "string", description: "Regex/text pattern" },
1581
+ dir_path: { type: "string", description: "Path to search (default: .)" }
1582
+ },
1583
+ required: ["pattern"]
1584
+ }
1585
+ },
1586
+ {
1587
+ name: "grep_search_ripgrep",
1588
+ description: "Ripgrep-optimized canonical name. Routed to grep tool in this adapter.",
1589
+ parameters: {
1590
+ type: "object",
1591
+ properties: {
1592
+ pattern: { type: "string", description: "Regex/text pattern" },
1593
+ dir_path: { type: "string", description: "Path to search (default: .)" },
1594
+ path: { type: "string", description: "Alternative path field" }
1595
+ },
1596
+ required: ["pattern"]
1597
+ }
1598
+ },
1599
+ {
1600
+ name: "write_file",
1601
+ description: "Write content to a file (creates or overwrites). Changes are staged in sandbox. Use for new files or full rewrites.",
1602
+ parameters: {
1603
+ type: "object",
1604
+ properties: {
1605
+ file_path: { type: "string", description: "Relative path from project root" },
1606
+ content: { type: "string", description: "Complete file content" }
1607
+ },
1608
+ required: ["file_path", "content"]
1609
+ }
1610
+ },
1611
+ {
1612
+ name: "append_file",
1613
+ description: "Append content to an existing file. Creates file if it does not exist. Changes are staged in sandbox.",
1614
+ parameters: {
1615
+ type: "object",
1616
+ properties: {
1617
+ file_path: { type: "string", description: "Relative path from project root" },
1618
+ content: { type: "string", description: "Content to append" }
1619
+ },
1620
+ required: ["file_path", "content"]
1621
+ }
1622
+ },
1623
+ {
1624
+ name: "edit",
1625
+ description: "Edit a file by replacing an exact string match. ALWAYS read the file first to get the exact string. Use for targeted changes.",
1626
+ parameters: {
1627
+ type: "object",
1628
+ properties: {
1629
+ file_path: { type: "string", description: "Relative path from project root" },
1630
+ old_string: { type: "string", description: "Exact string to find (must match precisely)" },
1631
+ new_string: { type: "string", description: "Replacement string" }
1632
+ },
1633
+ required: ["file_path", "old_string", "new_string"]
1634
+ }
1635
+ },
1636
+ {
1637
+ name: "replace",
1638
+ description: "Canonical alias for edit. Replace exact old_string with new_string.",
1639
+ parameters: {
1640
+ type: "object",
1641
+ properties: {
1642
+ file_path: { type: "string", description: "Relative path from project root" },
1643
+ old_string: { type: "string", description: "Exact string to replace" },
1644
+ new_string: { type: "string", description: "Replacement string" }
1645
+ },
1646
+ required: ["file_path", "old_string", "new_string"]
1647
+ }
1648
+ },
1649
+ {
1650
+ name: "shell",
1651
+ description: "Run a shell command (e.g. npm test, node script.js, cat, ls). Use for build verification, running tests, or commands not covered by other tools.",
1652
+ parameters: {
1653
+ type: "object",
1654
+ properties: {
1655
+ command: { type: "string", description: "Shell command to execute" }
1656
+ },
1657
+ required: ["command"]
1658
+ }
1659
+ },
1660
+ {
1661
+ name: "run_cmd",
1662
+ description: "Alias for shell. Run a shell command. Prefer this for compatibility with existing prompts.",
1663
+ parameters: {
1664
+ type: "object",
1665
+ properties: {
1666
+ command: { type: "string", description: "Shell command to execute" }
1667
+ },
1668
+ required: ["command"]
1669
+ }
1670
+ },
1671
+ {
1672
+ name: "run_shell_command",
1673
+ description: "Canonical alias for shell/run_cmd.",
1674
+ parameters: {
1675
+ type: "object",
1676
+ properties: {
1677
+ command: { type: "string", description: "Shell command to execute" }
1678
+ },
1679
+ required: ["command"]
1680
+ }
1681
+ },
1682
+ {
1683
+ name: "mkdir",
1684
+ description: "Create a directory (staged via .gitkeep in sandbox).",
1685
+ parameters: {
1686
+ type: "object",
1687
+ properties: {
1688
+ path: { type: "string", description: "Directory path to create" },
1689
+ dir_path: { type: "string", description: "Alternate directory path field" }
1690
+ },
1691
+ required: []
1692
+ }
1693
+ },
1694
+ {
1695
+ name: "list",
1696
+ description: "List files and directories for a path.",
1697
+ parameters: {
1698
+ type: "object",
1699
+ properties: {
1700
+ path: { type: "string", description: "Path to list (default: .)" },
1701
+ dir_path: { type: "string", description: "Alternate path field" }
1702
+ },
1703
+ required: []
1704
+ }
1705
+ },
1706
+ {
1707
+ name: "list_directory",
1708
+ description: "Canonical alias for list.",
1709
+ parameters: {
1710
+ type: "object",
1711
+ properties: {
1712
+ dir_path: { type: "string", description: "Directory path to list (default: .)" }
1713
+ },
1714
+ required: []
1715
+ }
1716
+ },
1717
+ {
1718
+ name: "git",
1719
+ description: "Run git subcommands (status, diff, log, show, branch). Use to understand repo state and recent changes.",
1720
+ parameters: {
1721
+ type: "object",
1722
+ properties: {
1723
+ command: { type: "string", description: 'Git subcommand (e.g. "diff HEAD~3", "log --oneline -10")' }
1724
+ },
1725
+ required: ["command"]
1726
+ }
1727
+ },
1728
+ {
1729
+ name: "lsp",
1730
+ description: 'Code intelligence: "symbols <file>" for outline, "refs <file:line:col>" for references, "goto <file:line:col>" for definition, "diagnostics" for type errors.',
1731
+ parameters: {
1732
+ type: "object",
1733
+ properties: {
1734
+ query: { type: "string", description: 'LSP query (e.g. "symbols src/app.ts", "goto src/app.ts:42:5")' }
1735
+ },
1736
+ required: ["query"]
1737
+ }
1738
+ },
1739
+ {
1740
+ name: "web_search",
1741
+ description: "Search the web via Brave Search API (requires BRAVE_API_KEY).",
1742
+ parameters: {
1743
+ type: "object",
1744
+ properties: {
1745
+ query: { type: "string", description: "Search query" }
1746
+ },
1747
+ required: ["query"]
1748
+ }
1749
+ },
1750
+ {
1751
+ name: "google_web_search",
1752
+ description: "Canonical alias for web_search.",
1753
+ parameters: {
1754
+ type: "object",
1755
+ properties: {
1756
+ query: { type: "string", description: "Search query" }
1757
+ },
1758
+ required: ["query"]
1759
+ }
1760
+ },
1761
+ {
1762
+ name: "web_fetch",
1763
+ description: "Fetch content from a URL and return cleaned text for analysis.",
1764
+ parameters: {
1765
+ type: "object",
1766
+ properties: {
1767
+ url: { type: "string", description: "http(s) URL to fetch" }
1768
+ },
1769
+ required: ["url"]
1770
+ }
1771
+ },
1772
+ {
1773
+ name: "read_many_files",
1774
+ description: "Read multiple files by include glob and return concatenated excerpts.",
1775
+ parameters: {
1776
+ type: "object",
1777
+ properties: {
1778
+ include: { type: "string", description: "Glob include pattern (default: **/*)" },
1779
+ exclude: { type: "string", description: "Optional exclude glob" },
1780
+ recursive: { type: "boolean", description: "Recursive search (optional)" }
1781
+ },
1782
+ required: []
1783
+ }
1784
+ },
1785
+ {
1786
+ name: "save_memory",
1787
+ description: "Save a memory fact to local project memory log.",
1788
+ parameters: {
1789
+ type: "object",
1790
+ properties: {
1791
+ fact: { type: "string", description: "Memory fact to persist" }
1792
+ },
1793
+ required: ["fact"]
1794
+ }
1795
+ },
1796
+ {
1797
+ name: "write_todos",
1798
+ description: "Persist todo items for the current project.",
1799
+ parameters: {
1800
+ type: "object",
1801
+ properties: {
1802
+ todos: { type: "array", description: "Todo items array" }
1803
+ },
1804
+ required: ["todos"]
1805
+ }
1806
+ },
1807
+ {
1808
+ name: "get_internal_docs",
1809
+ description: "Read internal docs by relative path (default AGENTS.md).",
1810
+ parameters: {
1811
+ type: "object",
1812
+ properties: {
1813
+ path: { type: "string", description: "Relative doc path" }
1814
+ },
1815
+ required: []
1816
+ }
1817
+ },
1818
+ {
1819
+ name: "ask_user",
1820
+ description: "Non-interactive placeholder for ask-user; returns summarized questions.",
1821
+ parameters: {
1822
+ type: "object",
1823
+ properties: {
1824
+ questions: { type: "array", description: "Question descriptors" }
1825
+ },
1826
+ required: []
1827
+ }
1828
+ },
1829
+ {
1830
+ name: "enter_plan_mode",
1831
+ description: "Enter plan mode (no-op marker in CLI adapter).",
1832
+ parameters: {
1833
+ type: "object",
1834
+ properties: {
1835
+ reason: { type: "string", description: "Plan mode reason" }
1836
+ },
1837
+ required: []
1838
+ }
1839
+ },
1840
+ {
1841
+ name: "exit_plan_mode",
1842
+ description: "Exit plan mode (no-op marker in CLI adapter).",
1843
+ parameters: {
1844
+ type: "object",
1845
+ properties: {
1846
+ plan_path: { type: "string", description: "Optional plan file path" }
1847
+ },
1848
+ required: []
1849
+ }
1850
+ },
1851
+ {
1852
+ name: "activate_skill",
1853
+ description: "Activate a named skill (adapter acknowledgment).",
1854
+ parameters: {
1855
+ type: "object",
1856
+ properties: {
1857
+ name: { type: "string", description: "Skill name" }
1858
+ },
1859
+ required: ["name"]
1860
+ }
1861
+ },
1862
+ {
1863
+ name: "tracker_create_task",
1864
+ description: "Create tracker task in local .crew/tracker.json.",
1865
+ parameters: {
1866
+ type: "object",
1867
+ properties: {
1868
+ title: { type: "string" },
1869
+ description: { type: "string" },
1870
+ type: { type: "string" },
1871
+ parentId: { type: "string" },
1872
+ dependencies: { type: "array" }
1873
+ },
1874
+ required: ["title", "description", "type"]
1875
+ }
1876
+ },
1877
+ {
1878
+ name: "tracker_update_task",
1879
+ description: "Update tracker task by id.",
1880
+ parameters: {
1881
+ type: "object",
1882
+ properties: {
1883
+ id: { type: "string" },
1884
+ title: { type: "string" },
1885
+ description: { type: "string" },
1886
+ status: { type: "string" },
1887
+ dependencies: { type: "array" }
1888
+ },
1889
+ required: ["id"]
1890
+ }
1891
+ },
1892
+ {
1893
+ name: "tracker_get_task",
1894
+ description: "Get tracker task by id.",
1895
+ parameters: {
1896
+ type: "object",
1897
+ properties: {
1898
+ id: { type: "string" }
1899
+ },
1900
+ required: ["id"]
1901
+ }
1902
+ },
1903
+ {
1904
+ name: "tracker_list_tasks",
1905
+ description: "List tracker tasks with optional filters.",
1906
+ parameters: {
1907
+ type: "object",
1908
+ properties: {
1909
+ status: { type: "string" },
1910
+ type: { type: "string" },
1911
+ parentId: { type: "string" }
1912
+ },
1913
+ required: []
1914
+ }
1915
+ },
1916
+ {
1917
+ name: "tracker_add_dependency",
1918
+ description: "Add dependency between tracker tasks.",
1919
+ parameters: {
1920
+ type: "object",
1921
+ properties: {
1922
+ taskId: { type: "string" },
1923
+ dependencyId: { type: "string" }
1924
+ },
1925
+ required: ["taskId", "dependencyId"]
1926
+ }
1927
+ },
1928
+ {
1929
+ name: "tracker_visualize",
1930
+ description: "Visualize tracker tasks as ASCII list.",
1931
+ parameters: {
1932
+ type: "object",
1933
+ properties: {},
1934
+ required: []
1935
+ }
1936
+ }
1937
+ ];
1938
+ }
1939
+ };
1940
+ }
1941
+ });
1942
+
1943
+ // src/learning/corrections.ts
1944
+ import { access, copyFile, mkdir as mkdir2, readFile as readFile2, writeFile as writeFile2 } from "node:fs/promises";
1945
+ import { constants } from "node:fs";
1946
+ import { join as join2 } from "node:path";
1947
+ function nowIso() {
1948
+ return (/* @__PURE__ */ new Date()).toISOString();
1949
+ }
1950
+ async function pathExists(path2) {
1951
+ try {
1952
+ await access(path2, constants.F_OK);
1953
+ return true;
1954
+ } catch {
1955
+ return false;
1956
+ }
1957
+ }
1958
+ var CorrectionStore;
1959
+ var init_corrections = __esm({
1960
+ "src/learning/corrections.ts"() {
1961
+ "use strict";
1962
+ CorrectionStore = class {
1963
+ constructor(baseDir = process.cwd()) {
1964
+ this.baseDir = baseDir;
1965
+ this.stateDir = join2(baseDir, ".crew");
1966
+ this.dataPath = join2(this.stateDir, "training-data.jsonl");
1967
+ }
1968
+ async ensureReady() {
1969
+ if (!await pathExists(this.stateDir)) {
1970
+ await mkdir2(this.stateDir, { recursive: true });
1971
+ }
1972
+ if (!await pathExists(this.dataPath)) {
1973
+ await writeFile2(this.dataPath, "", "utf8");
1974
+ }
1975
+ }
1976
+ async record(entry) {
1977
+ await this.ensureReady();
1978
+ const payload = {
1979
+ timestamp: nowIso(),
1980
+ ...entry
1981
+ };
1982
+ await writeFile2(this.dataPath, `${JSON.stringify(payload)}
1983
+ `, {
1984
+ encoding: "utf8",
1985
+ flag: "a"
1986
+ });
1987
+ return payload;
1988
+ }
1989
+ async loadAll() {
1990
+ await this.ensureReady();
1991
+ const raw = await readFile2(this.dataPath, "utf8");
1992
+ const lines = raw.split("\n").map((line) => line.trim()).filter(Boolean);
1993
+ return lines.map((line) => JSON.parse(line));
1994
+ }
1995
+ async summary() {
1996
+ const all = await this.loadAll();
1997
+ return {
1998
+ count: all.length,
1999
+ latest: all.length > 0 ? all[all.length - 1] : void 0
2000
+ };
2001
+ }
2002
+ async exportTo(path2) {
2003
+ await this.ensureReady();
2004
+ await copyFile(this.dataPath, path2);
2005
+ }
2006
+ };
2007
+ }
2008
+ });
2009
+
2010
+ // src/collections/index.ts
2011
+ var collections_exports = {};
2012
+ __export(collections_exports, {
2013
+ buildCollectionIndex: () => buildCollectionIndex,
2014
+ searchCollection: () => searchCollection
2015
+ });
2016
+ import { readdir as readdir2, readFile as readFile3, stat as stat2 } from "node:fs/promises";
2017
+ import { extname, join as join3, relative, resolve as resolve3 } from "node:path";
2018
+ function tokenize(text) {
2019
+ return text.toLowerCase().replace(/[^a-z0-9\s_-]/g, " ").split(/\s+/).filter((t) => t.length > 1);
2020
+ }
2021
+ function hashToken(token, dim) {
2022
+ let h = 2166136261;
2023
+ for (let i = 0; i < token.length; i++) {
2024
+ h ^= token.charCodeAt(i);
2025
+ h = Math.imul(h, 16777619);
2026
+ }
2027
+ return Math.abs(h) % dim;
2028
+ }
2029
+ function toHashedVector(text, dim = 256) {
2030
+ const vec = new Float64Array(dim);
2031
+ const toks = tokenize(text);
2032
+ for (const t of toks) {
2033
+ vec[hashToken(t, dim)] += 1;
2034
+ }
2035
+ let norm = 0;
2036
+ for (let i = 0; i < dim; i++) norm += vec[i] * vec[i];
2037
+ norm = Math.sqrt(norm);
2038
+ if (norm > 0) {
2039
+ for (let i = 0; i < dim; i++) vec[i] /= norm;
2040
+ }
2041
+ return vec;
2042
+ }
2043
+ function cosineSimilarity(a, b) {
2044
+ const dim = Math.min(a.length, b.length);
2045
+ let dot = 0;
2046
+ for (let i = 0; i < dim; i++) {
2047
+ dot += a[i] * b[i];
2048
+ }
2049
+ return dot;
2050
+ }
2051
+ function chunkFile(content, source) {
2052
+ const lines = content.split("\n");
2053
+ const chunks = [];
2054
+ let currentLines = [];
2055
+ let currentStart = 1;
2056
+ const flush = () => {
2057
+ const text = currentLines.join("\n").trim();
2058
+ if (text.length > 0) {
2059
+ chunks.push({ source, startLine: currentStart, text, score: 0 });
2060
+ }
2061
+ currentLines = [];
2062
+ };
2063
+ for (let i = 0; i < lines.length; i++) {
2064
+ const line = lines[i];
2065
+ if (/^#{1,4}\s/.test(line) && currentLines.length > 0) {
2066
+ flush();
2067
+ currentStart = i + 1;
2068
+ }
2069
+ currentLines.push(line);
2070
+ if (currentLines.length >= 40 && !/^#{1,4}\s/.test(line)) {
2071
+ flush();
2072
+ currentStart = i + 2;
2073
+ }
2074
+ }
2075
+ flush();
2076
+ return chunks;
2077
+ }
2078
+ async function walkDocs(rootDir, includeCode = false) {
2079
+ const files = [];
2080
+ async function walk(dir) {
2081
+ let entries;
2082
+ try {
2083
+ entries = await readdir2(dir);
2084
+ } catch {
2085
+ return;
2086
+ }
2087
+ for (const entry of entries) {
2088
+ if (IGNORED_DIRS.has(entry)) continue;
2089
+ const fullPath = join3(dir, entry);
2090
+ let st;
2091
+ try {
2092
+ st = await stat2(fullPath);
2093
+ } catch {
2094
+ continue;
2095
+ }
2096
+ if (st.isDirectory()) {
2097
+ await walk(fullPath);
2098
+ } else {
2099
+ const ext = extname(entry).toLowerCase();
2100
+ if (DOC_EXTENSIONS.has(ext) || includeCode && CODE_EXTENSIONS.has(ext)) {
2101
+ files.push(fullPath);
2102
+ }
2103
+ }
2104
+ }
2105
+ }
2106
+ await walk(rootDir);
2107
+ return files;
2108
+ }
2109
+ async function buildCollectionIndex(paths, options = {}) {
2110
+ const allChunks = [];
2111
+ const roots = paths.map((p) => resolve3(p));
2112
+ let fileCount = 0;
2113
+ for (const rootPath of roots) {
2114
+ let st;
2115
+ try {
2116
+ st = await stat2(rootPath);
2117
+ } catch {
2118
+ continue;
2119
+ }
2120
+ const files = st.isDirectory() ? await walkDocs(rootPath, Boolean(options.includeCode)) : [rootPath];
2121
+ for (const file of files) {
2122
+ let content;
2123
+ try {
2124
+ content = await readFile3(file, "utf8");
2125
+ } catch {
2126
+ continue;
2127
+ }
2128
+ fileCount++;
2129
+ const rel = relative(resolve3(rootPath, st.isDirectory() ? "." : ".."), file);
2130
+ const chunks = chunkFile(content, rel);
2131
+ allChunks.push(...chunks);
2132
+ }
2133
+ }
2134
+ const terms = /* @__PURE__ */ new Map();
2135
+ for (let i = 0; i < allChunks.length; i++) {
2136
+ const tokens = tokenize(allChunks[i].text);
2137
+ for (const token of tokens) {
2138
+ if (!terms.has(token)) terms.set(token, /* @__PURE__ */ new Set());
2139
+ terms.get(token).add(i);
2140
+ }
2141
+ }
2142
+ return {
2143
+ root: roots[0] || ".",
2144
+ fileCount,
2145
+ chunkCount: allChunks.length,
2146
+ terms,
2147
+ chunks: allChunks
2148
+ };
2149
+ }
2150
+ function searchCollection(index, query, maxResults = 10) {
2151
+ const queryTokens = tokenize(query);
2152
+ if (queryTokens.length === 0) {
2153
+ return { query, hits: [], totalChunks: index.chunkCount };
2154
+ }
2155
+ const scores = new Float64Array(index.chunkCount);
2156
+ for (const token of queryTokens) {
2157
+ const matchingChunks = index.terms.get(token);
2158
+ if (!matchingChunks) continue;
2159
+ const idf = Math.log(1 + index.chunkCount / matchingChunks.size);
2160
+ for (const idx of matchingChunks) {
2161
+ scores[idx] += idf;
2162
+ }
2163
+ }
2164
+ const candidates = [];
2165
+ for (let i = 0; i < scores.length; i++) {
2166
+ if (scores[i] > 0) {
2167
+ candidates.push({ idx: i, score: scores[i] });
2168
+ }
2169
+ }
2170
+ candidates.sort((a, b) => b.score - a.score);
2171
+ const queryVector = toHashedVector(query);
2172
+ const maxTfidf = candidates.length > 0 ? candidates[0].score : 1;
2173
+ const tfidfWeight = 0.7;
2174
+ const vectorWeight = 0.3;
2175
+ const hybrid = candidates.map((c) => {
2176
+ const chunk = index.chunks[c.idx];
2177
+ const chunkVector = toHashedVector(chunk.text);
2178
+ const cosine = Math.max(0, cosineSimilarity(queryVector, chunkVector));
2179
+ const tfidfNorm = maxTfidf > 0 ? c.score / maxTfidf : 0;
2180
+ const hybridScore = tfidfNorm * tfidfWeight + cosine * vectorWeight;
2181
+ return { idx: c.idx, score: hybridScore };
2182
+ });
2183
+ hybrid.sort((a, b) => b.score - a.score);
2184
+ const hits = hybrid.slice(0, maxResults).map((c) => ({
2185
+ ...index.chunks[c.idx],
2186
+ score: Math.round(c.score * 1e3) / 1e3
2187
+ }));
2188
+ return { query, hits, totalChunks: index.chunkCount };
2189
+ }
2190
+ var DOC_EXTENSIONS, CODE_EXTENSIONS, IGNORED_DIRS;
2191
+ var init_collections = __esm({
2192
+ "src/collections/index.ts"() {
2193
+ "use strict";
2194
+ DOC_EXTENSIONS = /* @__PURE__ */ new Set([".md", ".mdx", ".txt", ".rst", ".adoc"]);
2195
+ CODE_EXTENSIONS = /* @__PURE__ */ new Set([
2196
+ ".ts",
2197
+ ".tsx",
2198
+ ".js",
2199
+ ".jsx",
2200
+ ".mjs",
2201
+ ".cjs",
2202
+ ".json",
2203
+ ".py",
2204
+ ".go",
2205
+ ".rs",
2206
+ ".java",
2207
+ ".kt",
2208
+ ".swift",
2209
+ ".sh",
2210
+ ".bash",
2211
+ ".zsh",
2212
+ ".yaml",
2213
+ ".yml",
2214
+ ".toml"
2215
+ ]);
2216
+ IGNORED_DIRS = /* @__PURE__ */ new Set([
2217
+ "node_modules",
2218
+ ".git",
2219
+ "dist",
2220
+ "build",
2221
+ ".crew",
2222
+ ".next",
2223
+ ".turbo",
2224
+ "coverage",
2225
+ "__pycache__"
2226
+ ]);
2227
+ }
2228
+ });
2229
+
2230
+ // src/executor/agentic-executor.ts
2231
+ var agentic_executor_exports = {};
2232
+ __export(agentic_executor_exports, {
2233
+ runAgenticWorker: () => runAgenticWorker
2234
+ });
2235
+ function repairJson(raw) {
2236
+ if (!raw || raw.trim() === "") return "{}";
2237
+ let s = raw.trim();
2238
+ s = s.replace(/,\s*([}\]])/g, "$1");
2239
+ if (!s.includes('"') && s.includes("'")) {
2240
+ s = s.replace(/'/g, '"');
2241
+ }
2242
+ s = s.replace(/([{,])\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*:/g, '$1"$2":');
2243
+ const openBraces = (s.match(/{/g) || []).length;
2244
+ const closeBraces = (s.match(/}/g) || []).length;
2245
+ for (let i = 0; i < openBraces - closeBraces; i++) s += "}";
2246
+ const openBrackets = (s.match(/\[/g) || []).length;
2247
+ const closeBrackets = (s.match(/]/g) || []).length;
2248
+ for (let i = 0; i < openBrackets - closeBrackets; i++) s += "]";
2249
+ return s;
2250
+ }
2251
+ async function loadCorrectionsContext(projectDir) {
2252
+ try {
2253
+ const store = new CorrectionStore(projectDir);
2254
+ const entries = await store.loadAll();
2255
+ if (entries.length === 0) return "";
2256
+ const recent = entries.slice(-10);
2257
+ const lines = recent.map((c) => {
2258
+ const tags = c.tags?.length ? ` [${c.tags.join(", ")}]` : "";
2259
+ return `- ${c.prompt.slice(0, 100)}${tags}: ${c.corrected.slice(0, 200)}`;
2260
+ });
2261
+ return `
2262
+
2263
+ ## Past Corrections (avoid repeating these mistakes)
2264
+ ${lines.join("\n")}`;
2265
+ } catch {
2266
+ return "";
2267
+ }
2268
+ }
2269
+ function compressTurnHistory(history) {
2270
+ return history.map((h) => {
2271
+ const paramStr = JSON.stringify(h.params);
2272
+ const keyParam = h.params.file_path || h.params.pattern || h.params.command || h.params.query || h.params.path || h.params.dir_path || "";
2273
+ const action = `${h.tool}(${String(keyParam).slice(0, 80)})`;
2274
+ const isError = Boolean(h.error);
2275
+ const resultText = isError ? h.error : typeof h.result === "object" && h.result && "output" in h.result ? String(h.result.output ?? "") : String(h.result ?? "");
2276
+ const outcome = isError ? `FAIL: ${resultText.slice(0, 120)}` : `OK: ${resultText.slice(0, 120)}`;
2277
+ const topic = h.params.file_path ? String(h.params.file_path).split("/").pop() || "file" : h.tool;
2278
+ return { turn: h.turn, topic, action, outcome };
2279
+ });
2280
+ }
2281
+ function formatToolResult(h, maxLen = 1500) {
2282
+ const res = h.error ? `ERROR: ${h.error}` : typeof h.result === "object" && h.result && "output" in h.result ? String(h.result.output ?? "") : String(h.result ?? "");
2283
+ return res.slice(0, maxLen);
2284
+ }
2285
+ function historyToGeminiContents(history) {
2286
+ if (history.length === 0) return [];
2287
+ const contents = [];
2288
+ const firstN = 3;
2289
+ const lastN = 5;
2290
+ const needsCompression = history.length > firstN + lastN;
2291
+ const headDetailed = needsCompression ? history.slice(0, firstN) : [];
2292
+ const middleTurns = needsCompression ? history.slice(firstN, -lastN) : [];
2293
+ const structuredTurns = needsCompression ? history.slice(-lastN) : history;
2294
+ for (const h of headDetailed) {
2295
+ contents.push({
2296
+ role: "model",
2297
+ parts: [{ functionCall: { name: h.tool, args: h.params } }]
2298
+ });
2299
+ const resultObj = h.error ? { error: h.error } : typeof h.result === "object" && h.result ? h.result : { output: formatToolResult(h) };
2300
+ contents.push({
2301
+ role: "user",
2302
+ parts: [{ functionResponse: { name: h.tool, response: resultObj } }]
2303
+ });
2304
+ }
2305
+ if (middleTurns.length > 0) {
2306
+ const compressed = compressTurnHistory(middleTurns);
2307
+ const summary = compressed.map((c) => `[${c.turn}] ${c.action} \u2192 ${c.outcome}`).join("\n");
2308
+ contents.push(
2309
+ { role: "model", parts: [{ text: `[Earlier execution summary]
2310
+ ${summary}` }] },
2311
+ { role: "user", parts: [{ text: "Acknowledged. Continue with the task." }] }
2312
+ );
2313
+ }
2314
+ for (const h of structuredTurns) {
2315
+ contents.push({
2316
+ role: "model",
2317
+ parts: [{ functionCall: { name: h.tool, args: h.params } }]
2318
+ });
2319
+ const resultObj = h.error ? { error: h.error } : typeof h.result === "object" && h.result ? h.result : { output: formatToolResult(h) };
2320
+ contents.push({
2321
+ role: "user",
2322
+ parts: [{ functionResponse: { name: h.tool, response: resultObj } }]
2323
+ });
2324
+ }
2325
+ return contents;
2326
+ }
2327
+ function historyToOpenAIMessages(history) {
2328
+ if (history.length === 0) return [];
2329
+ const messages = [];
2330
+ const firstN = 3;
2331
+ const lastN = 5;
2332
+ const needsCompression = history.length > firstN + lastN;
2333
+ const headDetailed = needsCompression ? history.slice(0, firstN) : [];
2334
+ const middleTurns = needsCompression ? history.slice(firstN, -lastN) : [];
2335
+ const structuredTurns = needsCompression ? history.slice(-lastN) : history;
2336
+ for (const h of headDetailed) {
2337
+ const callId = `call_${h.turn}_${h.tool}`;
2338
+ messages.push({
2339
+ role: "assistant",
2340
+ tool_calls: [{
2341
+ id: callId,
2342
+ type: "function",
2343
+ function: { name: h.tool, arguments: JSON.stringify(h.params) }
2344
+ }]
2345
+ });
2346
+ messages.push({
2347
+ role: "tool",
2348
+ tool_call_id: callId,
2349
+ content: formatToolResult(h)
2350
+ });
2351
+ }
2352
+ if (middleTurns.length > 0) {
2353
+ const compressed = compressTurnHistory(middleTurns);
2354
+ const summary = compressed.map((c) => `[${c.turn}] ${c.action} \u2192 ${c.outcome}`).join("\n");
2355
+ messages.push(
2356
+ { role: "assistant", content: `[Earlier execution summary]
2357
+ ${summary}` },
2358
+ { role: "user", content: "Acknowledged. Continue with the task." }
2359
+ );
2360
+ }
2361
+ for (const h of structuredTurns) {
2362
+ const callId = `call_${h.turn}_${h.tool}`;
2363
+ messages.push({
2364
+ role: "assistant",
2365
+ tool_calls: [{
2366
+ id: callId,
2367
+ type: "function",
2368
+ function: { name: h.tool, arguments: JSON.stringify(h.params) }
2369
+ }]
2370
+ });
2371
+ messages.push({
2372
+ role: "tool",
2373
+ tool_call_id: callId,
2374
+ content: formatToolResult(h)
2375
+ });
2376
+ }
2377
+ return messages;
2378
+ }
2379
+ function historyToAnthropicMessages(history) {
2380
+ if (history.length === 0) return [];
2381
+ const messages = [];
2382
+ const firstN = 3;
2383
+ const lastN = 5;
2384
+ const needsCompression = history.length > firstN + lastN;
2385
+ const headDetailed = needsCompression ? history.slice(0, firstN) : [];
2386
+ const middleTurns = needsCompression ? history.slice(firstN, -lastN) : [];
2387
+ const structuredTurns = needsCompression ? history.slice(-lastN) : history;
2388
+ for (const h of headDetailed) {
2389
+ const useId = `tu_${h.turn}_${h.tool}`;
2390
+ messages.push({
2391
+ role: "assistant",
2392
+ content: [{
2393
+ type: "tool_use",
2394
+ id: useId,
2395
+ name: h.tool,
2396
+ input: h.params
2397
+ }]
2398
+ });
2399
+ messages.push({
2400
+ role: "user",
2401
+ content: [{
2402
+ type: "tool_result",
2403
+ tool_use_id: useId,
2404
+ content: formatToolResult(h)
2405
+ }]
2406
+ });
2407
+ }
2408
+ if (middleTurns.length > 0) {
2409
+ const compressed = compressTurnHistory(middleTurns);
2410
+ const summary = compressed.map((c) => `[${c.turn}] ${c.action} \u2192 ${c.outcome}`).join("\n");
2411
+ messages.push(
2412
+ { role: "assistant", content: `[Earlier execution summary]
2413
+ ${summary}` },
2414
+ { role: "user", content: "Acknowledged. Continue with the task." }
2415
+ );
2416
+ }
2417
+ for (const h of structuredTurns) {
2418
+ const useId = `tu_${h.turn}_${h.tool}`;
2419
+ messages.push({
2420
+ role: "assistant",
2421
+ content: [{
2422
+ type: "tool_use",
2423
+ id: useId,
2424
+ name: h.tool,
2425
+ input: h.params
2426
+ }]
2427
+ });
2428
+ messages.push({
2429
+ role: "user",
2430
+ content: [{
2431
+ type: "tool_result",
2432
+ tool_use_id: useId,
2433
+ content: formatToolResult(h)
2434
+ }]
2435
+ });
2436
+ }
2437
+ return messages;
2438
+ }
2439
+ function resolveProvider(modelOverride, preferTier) {
2440
+ const effectiveModel = (modelOverride || process.env.CREW_EXECUTION_MODEL || "").trim().toLowerCase();
2441
+ if (effectiveModel) {
2442
+ for (const p of PROVIDER_ORDER) {
2443
+ const key = process.env[p.envKey];
2444
+ if (!key || key.length < 5) continue;
2445
+ if (p.envKey === "GOOGLE_API_KEY" && process.env.GEMINI_API_KEY) continue;
2446
+ if (p.modelPrefix && effectiveModel.includes(p.modelPrefix)) {
2447
+ return { key, model: modelOverride || process.env.CREW_EXECUTION_MODEL || p.model, driver: p.driver, apiUrl: p.apiUrl, id: p.id };
2448
+ }
2449
+ }
2450
+ }
2451
+ const targetTier = preferTier || "standard";
2452
+ const tieredOrder = [
2453
+ ...PROVIDER_ORDER.filter((p) => p.tier === targetTier),
2454
+ ...PROVIDER_ORDER.filter((p) => p.tier !== targetTier)
2455
+ ];
2456
+ for (const p of tieredOrder) {
2457
+ const key = process.env[p.envKey];
2458
+ if (!key || key.length < 5) continue;
2459
+ if (p.envKey === "GOOGLE_API_KEY" && process.env.GEMINI_API_KEY) continue;
2460
+ return { key, model: p.model, driver: p.driver, apiUrl: p.apiUrl, id: p.id };
2461
+ }
2462
+ return null;
2463
+ }
2464
+ async function executeStreamingGeminiTurn(fullTask, tools, key, model, systemPrompt, stream, images, historyMessages) {
2465
+ const functionDeclarations = tools.map((t) => ({
2466
+ name: t.name,
2467
+ description: t.description,
2468
+ parameters: t.parameters
2469
+ }));
2470
+ const userParts = [{ text: `${systemPrompt}
2471
+
2472
+ Task:
2473
+ ${fullTask}` }];
2474
+ if (images?.length) {
2475
+ for (const img of images) {
2476
+ userParts.push({ inlineData: { mimeType: img.mimeType, data: img.data } });
2477
+ }
2478
+ }
2479
+ const contents = [
2480
+ { role: "user", parts: userParts },
2481
+ // Insert structured history (tool call/result pairs)
2482
+ ...historyMessages || [],
2483
+ // Continuation prompt if we have history
2484
+ ...historyMessages?.length ? [{ role: "user", parts: [{ text: "Continue executing the task based on the results above." }] }] : []
2485
+ ];
2486
+ const endpoint = stream ? "streamGenerateContent" : "generateContent";
2487
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:${endpoint}?key=${encodeURIComponent(key)}${stream ? "&alt=sse" : ""}`;
2488
+ const res = await fetch(url, {
2489
+ method: "POST",
2490
+ headers: { "Content-Type": "application/json" },
2491
+ signal: AbortSignal.timeout(12e4),
2492
+ body: JSON.stringify({
2493
+ contents,
2494
+ tools: [{ functionDeclarations }],
2495
+ generationConfig: { temperature: 0.3, maxOutputTokens: 8192 }
2496
+ })
2497
+ });
2498
+ if (!res.ok) {
2499
+ const err = await res.text();
2500
+ throw new Error(`Gemini API ${res.status}: ${err.slice(0, 300)}`);
2501
+ }
2502
+ if (stream && res.body) {
2503
+ let fullText = "";
2504
+ const toolCalls2 = [];
2505
+ let totalCost = 0;
2506
+ const reader = res.body.getReader();
2507
+ const decoder = new TextDecoder();
2508
+ let buffer = "";
2509
+ try {
2510
+ while (true) {
2511
+ const { done, value } = await reader.read();
2512
+ if (done) break;
2513
+ buffer += decoder.decode(value, { stream: true });
2514
+ const lines = buffer.split("\n");
2515
+ buffer = lines.pop() || "";
2516
+ for (const line of lines) {
2517
+ if (!line.startsWith("data: ")) continue;
2518
+ const jsonStr = line.slice(6).trim();
2519
+ if (!jsonStr || jsonStr === "[DONE]") continue;
2520
+ try {
2521
+ const chunk = JSON.parse(jsonStr);
2522
+ const parts2 = chunk?.candidates?.[0]?.content?.parts ?? [];
2523
+ for (const part of parts2) {
2524
+ if (part.text) {
2525
+ process.stdout.write(part.text);
2526
+ fullText += part.text;
2527
+ }
2528
+ if (part.functionCall) {
2529
+ toolCalls2.push({
2530
+ tool: part.functionCall.name || "",
2531
+ params: part.functionCall.args || {}
2532
+ });
2533
+ }
2534
+ }
2535
+ const usage2 = chunk?.usageMetadata;
2536
+ if (usage2) {
2537
+ totalCost = (usage2.promptTokenCount || 0) * 0.075 / 1e6 + (usage2.candidatesTokenCount || 0) * 0.3 / 1e6;
2538
+ }
2539
+ } catch {
2540
+ }
2541
+ }
2542
+ }
2543
+ } finally {
2544
+ reader.releaseLock();
2545
+ }
2546
+ if (fullText) process.stdout.write("\n");
2547
+ if (toolCalls2.length > 0) {
2548
+ return { toolCalls: toolCalls2, response: fullText, cost: totalCost };
2549
+ }
2550
+ return { response: fullText, status: "COMPLETE", cost: totalCost };
2551
+ }
2552
+ const data = await res.json();
2553
+ const parts = data?.candidates?.[0]?.content?.parts ?? [];
2554
+ const usage = data?.usageMetadata ?? {};
2555
+ const cost = (usage.promptTokenCount || 0) * 0.075 / 1e6 + (usage.candidatesTokenCount || 0) * 0.3 / 1e6;
2556
+ const toolCalls = [];
2557
+ for (const part of parts) {
2558
+ if (part.functionCall) {
2559
+ toolCalls.push({ tool: part.functionCall.name || "", params: part.functionCall.args || {} });
2560
+ }
2561
+ }
2562
+ if (toolCalls.length > 0) return { toolCalls, response: "", cost };
2563
+ const textPart = parts.find((p) => p.text);
2564
+ return { response: textPart?.text ?? "", status: "COMPLETE", cost };
2565
+ }
2566
+ async function executeStreamingOpenAITurn(fullTask, tools, apiUrl, apiKey, model, systemPrompt, stream, images, historyMessages) {
2567
+ let userContent = fullTask;
2568
+ if (images?.length) {
2569
+ const parts = [{ type: "text", text: fullTask }];
2570
+ for (const img of images) {
2571
+ parts.push({
2572
+ type: "image_url",
2573
+ image_url: { url: `data:${img.mimeType};base64,${img.data}` }
2574
+ });
2575
+ }
2576
+ userContent = parts;
2577
+ }
2578
+ const messages = [
2579
+ { role: "system", content: systemPrompt },
2580
+ { role: "user", content: userContent },
2581
+ // Insert structured history (assistant tool_calls + tool results)
2582
+ ...historyMessages || []
2583
+ ];
2584
+ const openaiTools = tools.map((t) => ({
2585
+ type: "function",
2586
+ function: { name: t.name, description: t.description, parameters: t.parameters }
2587
+ }));
2588
+ const temp = model?.startsWith?.("gpt-5") || model?.startsWith?.("gpt-6") ? 1 : 0.3;
2589
+ const body = {
2590
+ model,
2591
+ messages,
2592
+ tools: openaiTools,
2593
+ temperature: temp,
2594
+ max_tokens: 8192,
2595
+ stream
2596
+ };
2597
+ const res = await fetch(apiUrl, {
2598
+ method: "POST",
2599
+ headers: {
2600
+ "Content-Type": "application/json",
2601
+ "Authorization": `Bearer ${apiKey}`
2602
+ },
2603
+ signal: AbortSignal.timeout(12e4),
2604
+ body: JSON.stringify(body)
2605
+ });
2606
+ if (!res.ok) {
2607
+ const err = await res.text();
2608
+ throw new Error(`OpenAI API ${res.status}: ${err.slice(0, 300)}`);
2609
+ }
2610
+ if (stream && res.body) {
2611
+ let fullText = "";
2612
+ const toolCallAccumulator = /* @__PURE__ */ new Map();
2613
+ const reader = res.body.getReader();
2614
+ const decoder = new TextDecoder();
2615
+ let buffer = "";
2616
+ try {
2617
+ while (true) {
2618
+ const { done, value } = await reader.read();
2619
+ if (done) break;
2620
+ buffer += decoder.decode(value, { stream: true });
2621
+ const lines = buffer.split("\n");
2622
+ buffer = lines.pop() || "";
2623
+ for (const line of lines) {
2624
+ if (!line.startsWith("data: ")) continue;
2625
+ const jsonStr = line.slice(6).trim();
2626
+ if (!jsonStr || jsonStr === "[DONE]") continue;
2627
+ try {
2628
+ const chunk = JSON.parse(jsonStr);
2629
+ const delta = chunk?.choices?.[0]?.delta;
2630
+ if (!delta) continue;
2631
+ if (delta.content) {
2632
+ process.stdout.write(delta.content);
2633
+ fullText += delta.content;
2634
+ }
2635
+ if (delta.tool_calls) {
2636
+ for (const tc of delta.tool_calls) {
2637
+ const idx = tc.index ?? 0;
2638
+ if (!toolCallAccumulator.has(idx)) {
2639
+ toolCallAccumulator.set(idx, { name: "", args: "" });
2640
+ }
2641
+ const acc = toolCallAccumulator.get(idx);
2642
+ if (tc.function?.name) acc.name += tc.function.name;
2643
+ if (tc.function?.arguments) acc.args += tc.function.arguments;
2644
+ }
2645
+ }
2646
+ } catch {
2647
+ }
2648
+ }
2649
+ }
2650
+ } finally {
2651
+ reader.releaseLock();
2652
+ }
2653
+ if (fullText) process.stdout.write("\n");
2654
+ const toolCalls = [];
2655
+ for (const [, tc] of toolCallAccumulator) {
2656
+ if (tc.name) {
2657
+ let params = {};
2658
+ try {
2659
+ params = JSON.parse(repairJson(tc.args));
2660
+ } catch {
2661
+ }
2662
+ toolCalls.push({ tool: tc.name, params });
2663
+ }
2664
+ }
2665
+ if (toolCalls.length > 0) return { toolCalls, response: fullText, cost: 0 };
2666
+ return { response: fullText, status: "COMPLETE", cost: 0 };
2667
+ }
2668
+ const data = await res.json();
2669
+ const choice = data?.choices?.[0];
2670
+ const msg = choice?.message;
2671
+ if (msg?.tool_calls?.length > 0) {
2672
+ const toolCalls = msg.tool_calls.map((tc) => {
2673
+ let params = {};
2674
+ try {
2675
+ params = JSON.parse(repairJson(tc.function?.arguments || "{}"));
2676
+ } catch {
2677
+ }
2678
+ return { tool: tc.function?.name || "", params };
2679
+ });
2680
+ return { toolCalls, response: msg?.content || "", cost: 0 };
2681
+ }
2682
+ return { response: msg?.content || "", status: "COMPLETE", cost: 0 };
2683
+ }
2684
+ async function executeStreamingAnthropicTurn(fullTask, tools, apiKey, model, systemPrompt, stream, images, historyMessages) {
2685
+ let userContent = fullTask;
2686
+ if (images?.length) {
2687
+ const parts = [{ type: "text", text: fullTask }];
2688
+ for (const img of images) {
2689
+ parts.push({
2690
+ type: "image",
2691
+ source: { type: "base64", media_type: img.mimeType, data: img.data }
2692
+ });
2693
+ }
2694
+ userContent = parts;
2695
+ }
2696
+ const anthropicTools = tools.map((t) => ({
2697
+ name: t.name,
2698
+ description: t.description,
2699
+ input_schema: t.parameters
2700
+ }));
2701
+ const body = {
2702
+ model,
2703
+ max_tokens: 8192,
2704
+ system: systemPrompt,
2705
+ messages: [
2706
+ { role: "user", content: userContent },
2707
+ // Insert structured history (assistant tool_use + user tool_result)
2708
+ ...historyMessages || []
2709
+ ],
2710
+ temperature: 0.3,
2711
+ tools: anthropicTools,
2712
+ stream
2713
+ };
2714
+ const res = await fetch("https://api.anthropic.com/v1/messages", {
2715
+ method: "POST",
2716
+ headers: {
2717
+ "Content-Type": "application/json",
2718
+ "x-api-key": apiKey,
2719
+ "anthropic-version": "2023-06-01"
2720
+ },
2721
+ signal: AbortSignal.timeout(12e4),
2722
+ body: JSON.stringify(body)
2723
+ });
2724
+ if (!res.ok) {
2725
+ const err = await res.text();
2726
+ throw new Error(`Anthropic API ${res.status}: ${err.slice(0, 300)}`);
2727
+ }
2728
+ if (stream && res.body) {
2729
+ let fullText = "";
2730
+ const toolBlocks = /* @__PURE__ */ new Map();
2731
+ let totalCost = 0;
2732
+ const reader = res.body.getReader();
2733
+ const decoder = new TextDecoder();
2734
+ let buffer = "";
2735
+ try {
2736
+ while (true) {
2737
+ const { done, value } = await reader.read();
2738
+ if (done) break;
2739
+ buffer += decoder.decode(value, { stream: true });
2740
+ const lines = buffer.split("\n");
2741
+ buffer = lines.pop() || "";
2742
+ for (const line of lines) {
2743
+ if (!line.startsWith("data: ")) continue;
2744
+ const jsonStr = line.slice(6).trim();
2745
+ if (!jsonStr) continue;
2746
+ try {
2747
+ const event = JSON.parse(jsonStr);
2748
+ if (event.type === "content_block_start") {
2749
+ if (event.content_block?.type === "tool_use") {
2750
+ toolBlocks.set(event.index, {
2751
+ name: event.content_block.name || "",
2752
+ inputJson: ""
2753
+ });
2754
+ }
2755
+ }
2756
+ if (event.type === "content_block_delta") {
2757
+ if (event.delta?.type === "text_delta" && event.delta.text) {
2758
+ process.stdout.write(event.delta.text);
2759
+ fullText += event.delta.text;
2760
+ }
2761
+ if (event.delta?.type === "input_json_delta" && event.delta.partial_json) {
2762
+ const block = toolBlocks.get(event.index);
2763
+ if (block) block.inputJson += event.delta.partial_json;
2764
+ }
2765
+ }
2766
+ if (event.type === "message_delta" && event.usage) {
2767
+ totalCost = (event.usage.input_tokens || 0) * 3 / 1e6 + (event.usage.output_tokens || 0) * 15 / 1e6;
2768
+ }
2769
+ } catch {
2770
+ }
2771
+ }
2772
+ }
2773
+ } finally {
2774
+ reader.releaseLock();
2775
+ }
2776
+ if (fullText) process.stdout.write("\n");
2777
+ const toolCalls = [];
2778
+ for (const [, block] of toolBlocks) {
2779
+ if (block.name) {
2780
+ let params = {};
2781
+ try {
2782
+ params = JSON.parse(repairJson(block.inputJson));
2783
+ } catch {
2784
+ }
2785
+ toolCalls.push({ tool: block.name, params });
2786
+ }
2787
+ }
2788
+ if (toolCalls.length > 0) return { toolCalls, response: fullText, cost: totalCost };
2789
+ return { response: fullText, status: "COMPLETE", cost: totalCost };
2790
+ }
2791
+ const data = await res.json();
2792
+ const usage = data?.usage || {};
2793
+ const cost = (usage.input_tokens || 0) * 3 / 1e6 + (usage.output_tokens || 0) * 15 / 1e6;
2794
+ const content = data?.content || [];
2795
+ const toolUseBlocks = content.filter((b) => b.type === "tool_use");
2796
+ const textBlocks = content.filter((b) => b.type === "text");
2797
+ const textResponse = textBlocks.map((b) => b.text).join("\n");
2798
+ if (toolUseBlocks.length > 0) {
2799
+ const toolCalls = toolUseBlocks.map((b) => ({ tool: b.name, params: b.input || {} }));
2800
+ return { toolCalls, response: textResponse, cost };
2801
+ }
2802
+ return { response: textResponse, status: "COMPLETE", cost };
2803
+ }
2804
+ async function executeLLMTurn(task, tools, history, model, systemPrompt, stream, images) {
2805
+ const resolved = resolveProvider(model);
2806
+ if (!resolved) {
2807
+ throw new Error(
2808
+ 'No LLM providers available. Set at least one API key:\n \u2192 GEMINI_API_KEY (free tier \u2014 https://aistudio.google.com/apikey)\n \u2192 GROQ_API_KEY (free \u2014 https://console.groq.com/keys)\n \u2192 XAI_API_KEY ($5/mo free credits \u2014 https://console.x.ai)\nOr any of: OPENAI_API_KEY, ANTHROPIC_API_KEY, DEEPSEEK_API_KEY, OPENROUTER_API_KEY\nRun "crew doctor" to check your setup.'
2809
+ );
2810
+ }
2811
+ const { key, model: effectiveModel, driver, apiUrl, id } = resolved;
2812
+ if (driver === "gemini") {
2813
+ const historyMsgs = historyToGeminiContents(history);
2814
+ return executeStreamingGeminiTurn(task, tools, key, effectiveModel, systemPrompt, stream, images, historyMsgs);
2815
+ }
2816
+ if (driver === "anthropic") {
2817
+ const historyMsgs = historyToAnthropicMessages(history);
2818
+ return executeStreamingAnthropicTurn(task, tools, key, effectiveModel, systemPrompt, stream, images, historyMsgs);
2819
+ }
2820
+ if (driver === "openai" || driver === "openrouter") {
2821
+ const historyMsgs = historyToOpenAIMessages(history);
2822
+ return executeStreamingOpenAITurn(task, tools, apiUrl, key, effectiveModel, systemPrompt, stream, images, historyMsgs);
2823
+ }
2824
+ throw new Error(`Unsupported driver: ${driver}`);
2825
+ }
2826
+ async function buildRepoMapContext(task, projectDir) {
2827
+ try {
2828
+ const { buildCollectionIndex: buildCollectionIndex2, searchCollection: searchCollection2 } = await Promise.resolve().then(() => (init_collections(), collections_exports));
2829
+ const index = await buildCollectionIndex2([projectDir], { includeCode: true });
2830
+ if (index.chunkCount === 0) return "";
2831
+ const results = searchCollection2(index, task, 5);
2832
+ if (results.hits.length === 0) return "";
2833
+ const chunks = results.hits.map(
2834
+ (h) => `--- ${h.source}:${h.startLine} (score: ${h.score}) ---
2835
+ ${h.text.slice(0, 600)}`
2836
+ );
2837
+ return `
2838
+
2839
+ Relevant codebase context (${index.fileCount} files indexed, ${index.chunkCount} chunks):
2840
+ ${chunks.join("\n\n")}`;
2841
+ } catch {
2842
+ return "";
2843
+ }
2844
+ }
2845
+ async function executeToolWithRetry(adapter, name, params, verbose) {
2846
+ for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
2847
+ const result = await adapter.executeTool(name, params);
2848
+ if (result.success) {
2849
+ return { output: result.output ?? "", success: true };
2850
+ }
2851
+ if (attempt < MAX_RETRIES) {
2852
+ if (verbose) {
2853
+ console.log(` \u27F3 Retry ${attempt}/${MAX_RETRIES - 1} for ${name}: ${(result.error || "").slice(0, 80)}`);
2854
+ }
2855
+ if (result.error?.includes("String not found") && params.old_string) {
2856
+ if (params.file_path) {
2857
+ try {
2858
+ const freshRead = await adapter.executeTool("read_file", { file_path: params.file_path });
2859
+ if (freshRead.success && freshRead.output) {
2860
+ return {
2861
+ output: `File content has changed. Current content of ${params.file_path}:
2862
+ ${freshRead.output.slice(0, 3e3)}`,
2863
+ success: false,
2864
+ error: `String not found in ${params.file_path}. File was re-read \u2014 content returned above for correction.`
2865
+ };
2866
+ }
2867
+ } catch {
2868
+ }
2869
+ }
2870
+ params.old_string = params.old_string.trim();
2871
+ } else if (result.error?.includes("No such file") && params.file_path) {
2872
+ params.file_path = params.file_path.replace(/^\.\//, "");
2873
+ } else {
2874
+ return { output: result.output ?? "", success: false, error: result.error };
2875
+ }
2876
+ } else {
2877
+ return { output: result.output ?? "", success: false, error: result.error };
2878
+ }
2879
+ }
2880
+ return { output: "", success: false, error: "Max retries exceeded" };
2881
+ }
2882
+ async function runAgenticWorker(task, sandbox, options = {}) {
2883
+ const adapter = new GeminiToolAdapter(sandbox);
2884
+ const allTools = adapter.getToolDeclarations();
2885
+ const systemPrompt = options.systemPrompt || L3_SYSTEM_PROMPT;
2886
+ const model = options.model || process.env.CREW_EXECUTION_MODEL || "";
2887
+ const maxTurns = options.maxTurns ?? 25;
2888
+ const projectDir = options.projectDir || sandbox.baseDir || process.cwd();
2889
+ const verbose = options.verbose ?? Boolean(process.env.CREW_DEBUG);
2890
+ const stream = options.stream ?? !process.env.CREW_NO_STREAM;
2891
+ const jit = options.priorDiscoveredFiles?.length ? JITContextTracker.fromPrior(options.priorDiscoveredFiles) : new JITContextTracker();
2892
+ const resolvedProvider = resolveProvider(model, options.tier);
2893
+ if (verbose) {
2894
+ const prov = resolvedProvider ? `${resolvedProvider.id}/${resolvedProvider.model}` : "none";
2895
+ console.log(`[AgenticExecutor] Provider: ${prov} | Stream: ${stream} | Tools: ${allTools.length}`);
2896
+ }
2897
+ let enrichedTask = task;
2898
+ try {
2899
+ const repoContext = await buildRepoMapContext(task, projectDir);
2900
+ if (repoContext) {
2901
+ enrichedTask = `${task}${repoContext}`;
2902
+ if (verbose) {
2903
+ console.log(`[AgenticExecutor] Repo-map: ${repoContext.length} chars injected`);
2904
+ }
2905
+ }
2906
+ } catch {
2907
+ }
2908
+ try {
2909
+ const correctionsContext = await loadCorrectionsContext(projectDir);
2910
+ if (correctionsContext) {
2911
+ enrichedTask = `${enrichedTask}${correctionsContext}`;
2912
+ if (verbose) {
2913
+ console.log(`[AgenticExecutor] Corrections context injected`);
2914
+ }
2915
+ }
2916
+ } catch {
2917
+ }
2918
+ if (verbose) {
2919
+ console.log(`[AgenticExecutor] ${allTools.length} tools: ${allTools.map((t) => t.name).join(", ")}`);
2920
+ }
2921
+ let totalCost = 0;
2922
+ const toolsUsed = /* @__PURE__ */ new Set();
2923
+ const executeTool = async (name, params) => {
2924
+ toolsUsed.add(name);
2925
+ options.onToolCall?.(name, params);
2926
+ if (verbose) {
2927
+ const paramStr = JSON.stringify(params).slice(0, 120);
2928
+ process.stdout.write(` \u{1F527} ${name}(${paramStr})...`);
2929
+ }
2930
+ const result2 = await executeToolWithRetry(adapter, name, params, verbose);
2931
+ if (name === "read_file" && result2.success && result2.output) {
2932
+ const outputLen = result2.output.length;
2933
+ if (outputLen > 8e3 && (result2.output.includes("... (truncated)") || result2.output.includes("content truncated"))) {
2934
+ result2.output += "\n\n[NOTE: File output was truncated. Use line_start and line_end parameters to read specific sections.]";
2935
+ }
2936
+ }
2937
+ jit.trackFromToolResult(name, params, result2);
2938
+ if (verbose) {
2939
+ const status = result2.success ? "\u2713" : "\u2717";
2940
+ const preview = (result2.output || result2.error || "").slice(0, 80).replace(/\n/g, " ");
2941
+ console.log(` ${status} ${preview}`);
2942
+ }
2943
+ return result2;
2944
+ };
2945
+ let turnCount = 0;
2946
+ const result = await executeAutonomous(
2947
+ enrichedTask,
2948
+ async (prompt, tools, history) => {
2949
+ turnCount++;
2950
+ let taskWithJIT = enrichedTask;
2951
+ if (turnCount > 1 && turnCount % 3 === 0 && jit.fileCount > 0) {
2952
+ try {
2953
+ const jitContext = await jit.buildJITContext(projectDir);
2954
+ if (jitContext) {
2955
+ taskWithJIT = `${enrichedTask}${jitContext}`;
2956
+ if (verbose) {
2957
+ console.log(` [JIT] Injected context from ${jit.fileCount} discovered files`);
2958
+ }
2959
+ }
2960
+ } catch {
2961
+ }
2962
+ }
2963
+ const turnImages = turnCount === 1 ? options.images : void 0;
2964
+ const turnResult = await executeLLMTurn(taskWithJIT, allTools, history, model, systemPrompt, stream, turnImages);
2965
+ totalCost += turnResult.cost || 0;
2966
+ return {
2967
+ toolCalls: turnResult.toolCalls,
2968
+ response: turnResult.response,
2969
+ status: turnResult.status
2970
+ };
2971
+ },
2972
+ async (name, params) => {
2973
+ return await executeTool(name, params);
2974
+ },
2975
+ {
2976
+ maxTurns,
2977
+ tools: allTools,
2978
+ onProgress: verbose ? (turn, action) => {
2979
+ console.log(` [Turn ${turn}] ${action}`);
2980
+ } : void 0
2981
+ }
2982
+ );
2983
+ return {
2984
+ success: result.success ?? false,
2985
+ output: result.finalResponse ?? result.history?.map((h) => String(h.result)).join("\n") ?? "",
2986
+ cost: totalCost,
2987
+ turns: result.turns,
2988
+ toolsUsed: Array.from(toolsUsed),
2989
+ providerId: resolvedProvider?.id,
2990
+ modelUsed: resolvedProvider?.model,
2991
+ filesDiscovered: jit.fileCount,
2992
+ discoveredFiles: jit.toFileList(),
2993
+ history: result.history,
2994
+ stopReason: result.reason
2995
+ };
2996
+ }
2997
+ var L3_SYSTEM_PROMPT, PROVIDER_ORDER, JITContextTracker, MAX_RETRIES;
2998
+ var init_agentic_executor = __esm({
2999
+ "src/executor/agentic-executor.ts"() {
3000
+ "use strict";
3001
+ init_autonomous_loop();
3002
+ init_crew_adapter();
3003
+ init_corrections();
3004
+ L3_SYSTEM_PROMPT = `You are a senior AI engineer executing coding tasks autonomously.
3005
+
3006
+ ## Cognitive Loop: THINK \u2192 ACT \u2192 OBSERVE
3007
+
3008
+ Every turn, follow this exact pattern:
3009
+
3010
+ **THINK** (internal reasoning, 1-3 sentences):
3011
+ - What is the current state? What do I know from previous tool results?
3012
+ - What is the minimal next action to make progress?
3013
+ - Am I done? If so, summarize and stop.
3014
+
3015
+ **ACT** (one or more tool calls):
3016
+ - Choose the most targeted tool for the job.
3017
+ - Prefer small, verifiable steps over large changes.
3018
+ - When multiple independent lookups are needed, call multiple tools in parallel.
3019
+
3020
+ **OBSERVE** (after tools return):
3021
+ - Did the tool succeed or fail? What does the output tell me?
3022
+ - Do I need to adjust my approach?
3023
+
3024
+ ## Operating Principles
3025
+
3026
+ - Match the request. Do what was asked \u2014 nothing more. A bug fix is just a bug fix. Don't refactor adjacent code, add docstrings to unchanged functions, or suggest rewrites beyond the task scope.
3027
+ - Simplest approach first. Don't over-engineer. Three similar lines are better than a premature abstraction. Only add error handling, validation, or fallbacks at system boundaries (user input, external APIs), not for internal guarantees.
3028
+ - Own mistakes. If a tool call fails or your approach is wrong, say so briefly and try a different approach. Don't repeat the same failing action. If the same failure pattern repeats twice, switch strategy.
3029
+ - Be security-conscious. Don't introduce injection, XSS, or hardcoded secrets. Validate at trust boundaries.
3030
+
3031
+ ## Available Tools
3032
+
3033
+ **Files**: read_file, write_file, replace (edit with replace_all flag), read_many_files, glob, grep_search (output_mode: content/files/count, context, type filter), list_directory, mkdir
3034
+ **Shell**: run_shell_command (Docker isolation when staged files exist; run_in_background for long commands; configurable timeout via CREW_SHELL_TIMEOUT, default 120s, max 600s), check_background_task
3035
+ **Git**: git (status, diff, log, add, commit, show, branch, stash, tag, blame, checkout, fetch, pull, merge, rebase, cherry-pick, worktree \u2014 force-push and --no-verify blocked)
3036
+ **Web**: google_web_search, web_fetch
3037
+ **Memory**: save_memory (persist facts across sessions), write_todos
3038
+ **Docs**: get_internal_docs (read project documentation)
3039
+ **Agents**: spawn_agent (spawn autonomous sub-agent for independent subtasks \u2014 isolated sandbox branch, cheap model by default, merges changes on completion)
3040
+
3041
+ ## File Reading Strategy
3042
+
3043
+ 1. ALWAYS read a file before editing it. Never guess at file contents.
3044
+ 2. For large files (500+ lines): read specific line ranges instead of the whole file.
3045
+ 3. If a read_file result looks truncated, re-read with a narrower range around the area of interest.
3046
+ 4. Use grep_search to locate exact strings before attempting replace/edit.
3047
+
3048
+ ## Edit Strategy
3049
+
3050
+ 1. ALWAYS read_file before editing. Edits on unread files will be rejected.
3051
+ 2. Use replace (edit) for surgical changes \u2014 provide exact old_string that uniquely matches.
3052
+ 3. Use replace_all:true when renaming a variable/function across the file.
3053
+ 4. For new files, use write_file.
3054
+ 5. Never rewrite an entire existing file \u2014 always use targeted edits.
3055
+ 6. If an edit fails with "not unique", provide more surrounding context or use replace_all:true.
3056
+ 7. If an edit fails with "String not found", re-read the file and retry with current content.
3057
+
3058
+ ## Shell Strategy
3059
+
3060
+ 1. For long-running commands (builds, tests, installs), use run_in_background:true.
3061
+ 2. Use check_background_task to poll for results.
3062
+ 3. Prefer dedicated tools over shell: use read_file not cat, grep_search not rg, glob not find.
3063
+ 4. Never use destructive commands (rm -rf, git reset --hard) without explicit task instruction.
3064
+
3065
+ ## Verification
3066
+
3067
+ 1. After code changes: run the build command (usually "npm run build" or "tsc --noEmit").
3068
+ 2. After logic changes: run relevant tests ("npm test", or specific test file).
3069
+ 3. Check git diff to confirm only intended changes were made.
3070
+
3071
+ ## Output
3072
+
3073
+ - Lead with what you did, not how you thought about it. Skip preamble.
3074
+ - For informational queries (listing files, reading code, checking status): include the actual tool output in your response so the user sees the data.
3075
+ - For code changes: concise summary of files modified, what changed, verification result.
3076
+ - When you used a tool to answer a question, show the results \u2014 don't just say "I listed the files" without showing them.
3077
+
3078
+ ## Stop Conditions \u2014 When to Finish
3079
+
3080
+ - The task is fully complete and verified.
3081
+ - You have confirmed the changes work (via build, test, or diagnostic check).
3082
+ - Do NOT keep reading files or running tools after the work is done.
3083
+ - Do NOT repeat yourself or restate your work \u2014 just give a concise summary.
3084
+
3085
+ ## Anti-Patterns to Avoid
3086
+
3087
+ - Do NOT read every file in the project to "understand context" \u2014 read only what's needed.
3088
+ - Do NOT make speculative changes to files you haven't read.
3089
+ - Do NOT run the same command twice if it already succeeded.
3090
+ - Do NOT apologize or explain failures at length \u2014 just fix them and move on.
3091
+ - Do NOT add features, refactor, or "improve" code beyond what the task asks.
3092
+ - Do NOT add comments, docstrings, or type annotations to code you didn't change.`;
3093
+ PROVIDER_ORDER = [
3094
+ // Heavy tier — L2 brain (complex multi-file tasks, planning)
3095
+ { id: "openai", envKey: "OPENAI_API_KEY", model: "gpt-4.1", driver: "openai", apiUrl: "https://api.openai.com/v1/chat/completions", modelPrefix: "gpt", tier: "heavy" },
3096
+ { id: "anthropic", envKey: "ANTHROPIC_API_KEY", model: "claude-sonnet-4-20250514", driver: "anthropic", modelPrefix: "claude", tier: "heavy" },
3097
+ { id: "grok", envKey: "XAI_API_KEY", model: "grok-3-mini-beta", driver: "openai", apiUrl: "https://api.x.ai/v1/chat/completions", modelPrefix: "grok", tier: "heavy" },
3098
+ // Standard tier — L3 workers (execution, parallel tasks)
3099
+ { id: "gemini", envKey: "GEMINI_API_KEY", model: "gemini-2.5-flash", driver: "gemini", modelPrefix: "gemini", tier: "standard" },
3100
+ { id: "gemini", envKey: "GOOGLE_API_KEY", model: "gemini-2.5-flash", driver: "gemini", modelPrefix: "gemini", tier: "standard" },
3101
+ { id: "deepseek", envKey: "DEEPSEEK_API_KEY", model: "deepseek-chat", driver: "openai", apiUrl: "https://api.deepseek.com/v1/chat/completions", modelPrefix: "deepseek", tier: "standard" },
3102
+ { id: "kimi", envKey: "MOONSHOT_API_KEY", model: "moonshot-v1-128k", driver: "openai", apiUrl: "https://api.moonshot.cn/v1/chat/completions", modelPrefix: "kimi", tier: "standard" },
3103
+ // Fast tier — L1 routing (classification, cheap)
3104
+ { id: "groq", envKey: "GROQ_API_KEY", model: "llama-3.3-70b-versatile", driver: "openai", apiUrl: "https://api.groq.com/openai/v1/chat/completions", modelPrefix: "llama", tier: "fast" },
3105
+ // Fallback — free tier
3106
+ { id: "openrouter", envKey: "OPENROUTER_API_KEY", model: "google/gemini-2.0-flash-exp:free", driver: "openrouter", apiUrl: "https://openrouter.ai/api/v1/chat/completions", modelPrefix: "openrouter", tier: "standard" },
3107
+ // Additional providers (OpenAI-compatible, cheap workers)
3108
+ { id: "together", envKey: "TOGETHER_API_KEY", model: "Qwen/Qwen3.5-397B-A17B", driver: "openai", apiUrl: "https://api.together.xyz/v1/chat/completions", modelPrefix: "qwen", tier: "standard" },
3109
+ { id: "fireworks", envKey: "FIREWORKS_API_KEY", model: "accounts/fireworks/models/qwen3.5-397b-a17b", driver: "openai", apiUrl: "https://api.fireworks.ai/inference/v1/chat/completions", modelPrefix: "fireworks", tier: "standard" }
3110
+ ];
3111
+ JITContextTracker = class _JITContextTracker {
3112
+ constructor() {
3113
+ this.discoveredFiles = /* @__PURE__ */ new Set();
3114
+ this.contextCache = "";
3115
+ }
3116
+ /** Hydrate from a prior session's discovered files */
3117
+ static fromPrior(files) {
3118
+ const tracker = new _JITContextTracker();
3119
+ for (const f of files) tracker.discoveredFiles.add(f);
3120
+ return tracker;
3121
+ }
3122
+ /** Serialize discovered files for session persistence */
3123
+ toFileList() {
3124
+ return Array.from(this.discoveredFiles);
3125
+ }
3126
+ /** Track a file that was read/written/grepped during tool execution */
3127
+ trackFile(filePath) {
3128
+ if (filePath && !this.discoveredFiles.has(filePath)) {
3129
+ this.discoveredFiles.add(filePath);
3130
+ }
3131
+ }
3132
+ /** Extract file paths from tool calls and results */
3133
+ trackFromToolResult(toolName, params, result) {
3134
+ for (const key of ["file_path", "path", "dir_path"]) {
3135
+ if (params[key]) this.trackFile(String(params[key]));
3136
+ }
3137
+ if ((toolName === "glob" || toolName === "grep_search" || toolName === "grep_search_ripgrep") && result?.output) {
3138
+ const lines = String(result.output).split("\n");
3139
+ for (const line of lines) {
3140
+ const match = line.match(/^([^\s:]+\.[a-zA-Z]+)/);
3141
+ if (match) this.trackFile(match[1]);
3142
+ }
3143
+ }
3144
+ if (toolName === "list_directory" && result?.output) {
3145
+ const lines = String(result.output).split("\n");
3146
+ const dir = params.dir_path || params.path || ".";
3147
+ for (const line of lines) {
3148
+ const match = line.match(/^[fd]\s+(.+)/);
3149
+ if (match && match[1].includes(".")) {
3150
+ this.trackFile(`${dir}/${match[1]}`);
3151
+ }
3152
+ }
3153
+ }
3154
+ }
3155
+ /** Build enriched context from discovered files for next turn */
3156
+ async buildJITContext(projectDir) {
3157
+ if (this.discoveredFiles.size === 0) return "";
3158
+ try {
3159
+ const { buildCollectionIndex: buildCollectionIndex2, searchCollection: searchCollection2 } = await Promise.resolve().then(() => (init_collections(), collections_exports));
3160
+ const uniqueDirs = /* @__PURE__ */ new Set();
3161
+ for (const f of this.discoveredFiles) {
3162
+ const parts = f.split("/");
3163
+ if (parts.length > 1) {
3164
+ uniqueDirs.add(parts.slice(0, -1).join("/"));
3165
+ }
3166
+ }
3167
+ const dirsToIndex = Array.from(uniqueDirs).slice(0, 5);
3168
+ if (dirsToIndex.length === 0) return "";
3169
+ const { resolve: resolvePath } = await import("node:path");
3170
+ const paths = dirsToIndex.map((d) => resolvePath(projectDir, d));
3171
+ const index = await buildCollectionIndex2(paths, { includeCode: true });
3172
+ if (index.chunkCount === 0) return "";
3173
+ const query = Array.from(this.discoveredFiles).slice(0, 10).join(" ");
3174
+ const results = searchCollection2(index, query, 3);
3175
+ if (results.hits.length === 0) return "";
3176
+ const newContext = results.hits.map(
3177
+ (h) => `--- JIT: ${h.source}:${h.startLine} ---
3178
+ ${h.text.slice(0, 400)}`
3179
+ ).join("\n\n");
3180
+ this.contextCache = newContext;
3181
+ return `
3182
+
3183
+ JIT-discovered context:
3184
+ ${newContext}`;
3185
+ } catch {
3186
+ return "";
3187
+ }
3188
+ }
3189
+ get fileCount() {
3190
+ return this.discoveredFiles.size;
3191
+ }
3192
+ };
3193
+ MAX_RETRIES = 3;
3194
+ }
3195
+ });
3196
+
3197
+ // src/engine-api.ts
3198
+ init_agentic_executor();
3199
+
3200
+ // src/sandbox/index.ts
3201
+ import { createTwoFilesPatch } from "diff";
3202
+ import { readFile as readFile4, writeFile as writeFile3, mkdir as mkdir3, access as access2 } from "node:fs/promises";
3203
+ import { constants as constants2 } from "node:fs";
3204
+ import { join as join4, dirname as dirname3 } from "node:path";
3205
+ var Sandbox = class {
3206
+ constructor(baseDir = process.cwd()) {
3207
+ this.state = {
3208
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
3209
+ activeBranch: "main",
3210
+ branches: { main: {} }
3211
+ };
3212
+ this.baseDir = baseDir;
3213
+ this.stateFilePath = join4(baseDir, ".crew", "sandbox.json");
3214
+ }
3215
+ async exists(path2) {
3216
+ try {
3217
+ await access2(path2, constants2.F_OK);
3218
+ return true;
3219
+ } catch {
3220
+ return false;
3221
+ }
3222
+ }
3223
+ async load() {
3224
+ if (await this.exists(this.stateFilePath)) {
3225
+ try {
3226
+ const data = await readFile4(this.stateFilePath, "utf8");
3227
+ const parsed = JSON.parse(data);
3228
+ this.state = {
3229
+ ...this.state,
3230
+ ...parsed,
3231
+ branches: parsed.branches || { main: {} },
3232
+ activeBranch: parsed.activeBranch || "main"
3233
+ };
3234
+ } catch (err) {
3235
+ console.error(`Failed to load sandbox state: ${err.message}`);
3236
+ }
3237
+ }
3238
+ }
3239
+ async persist() {
3240
+ this.state.updatedAt = (/* @__PURE__ */ new Date()).toISOString();
3241
+ const dir = dirname3(this.stateFilePath);
3242
+ if (!await this.exists(dir)) {
3243
+ await mkdir3(dir, { recursive: true });
3244
+ }
3245
+ await writeFile3(this.stateFilePath, JSON.stringify(this.state, null, 2), "utf8");
3246
+ }
3247
+ /** Alias for persist() to match external API expectations */
3248
+ async save() {
3249
+ await this.persist();
3250
+ }
3251
+ async addChange(filePath, modifiedContent) {
3252
+ const fullPath = join4(this.baseDir, filePath);
3253
+ let original = "";
3254
+ if (!this.state.branches[this.state.activeBranch] || Array.isArray(this.state.branches[this.state.activeBranch])) {
3255
+ this.state.branches[this.state.activeBranch] = {};
3256
+ }
3257
+ const activeChanges = this.state.branches[this.state.activeBranch];
3258
+ if (activeChanges[filePath]) {
3259
+ original = activeChanges[filePath].original;
3260
+ } else if (await this.exists(fullPath)) {
3261
+ original = await readFile4(fullPath, "utf8");
3262
+ }
3263
+ activeChanges[filePath] = {
3264
+ path: filePath,
3265
+ original,
3266
+ modified: modifiedContent,
3267
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
3268
+ };
3269
+ await this.persist();
3270
+ }
3271
+ preview(branchName = this.state.activeBranch) {
3272
+ const branch = this.state.branches[branchName];
3273
+ if (!branch) return `Branch "${branchName}" not found.`;
3274
+ let diff = "";
3275
+ for (const [path2, change] of Object.entries(branch)) {
3276
+ diff += createTwoFilesPatch(
3277
+ `a/${path2}`,
3278
+ `b/${path2}`,
3279
+ change.original,
3280
+ change.modified,
3281
+ void 0,
3282
+ void 0,
3283
+ { context: 3 }
3284
+ );
3285
+ }
3286
+ return diff || "No pending changes.";
3287
+ }
3288
+ async apply(branchName = this.state.activeBranch) {
3289
+ const branch = this.state.branches[branchName];
3290
+ if (!branch) throw new Error(`Branch "${branchName}" not found.`);
3291
+ for (const [path2, change] of Object.entries(branch)) {
3292
+ const fullPath = join4(this.baseDir, path2);
3293
+ const dir = dirname3(fullPath);
3294
+ if (!await this.exists(dir)) {
3295
+ await mkdir3(dir, { recursive: true });
3296
+ }
3297
+ await writeFile3(fullPath, change.modified, "utf8");
3298
+ }
3299
+ await this.rollback(branchName);
3300
+ }
3301
+ async rollback(branchName = this.state.activeBranch) {
3302
+ if (this.state.branches[branchName]) {
3303
+ this.state.branches[branchName] = {};
3304
+ await this.persist();
3305
+ }
3306
+ }
3307
+ async createBranch(name, fromBranch = this.state.activeBranch) {
3308
+ if (this.state.branches[name]) {
3309
+ throw new Error(`Branch "${name}" already exists.`);
3310
+ }
3311
+ const sourceBranch = this.state.branches[fromBranch] || {};
3312
+ this.state.branches[name] = JSON.parse(JSON.stringify(sourceBranch));
3313
+ this.state.activeBranch = name;
3314
+ await this.persist();
3315
+ }
3316
+ async switchBranch(name) {
3317
+ if (!this.state.branches[name]) {
3318
+ throw new Error(`Branch "${name}" does not exist.`);
3319
+ }
3320
+ this.state.activeBranch = name;
3321
+ await this.persist();
3322
+ }
3323
+ async deleteBranch(name) {
3324
+ if (name === "main") throw new Error("Cannot delete main branch.");
3325
+ if (this.state.activeBranch === name) {
3326
+ this.state.activeBranch = "main";
3327
+ }
3328
+ delete this.state.branches[name];
3329
+ await this.persist();
3330
+ }
3331
+ async mergeBranch(source, target = this.state.activeBranch) {
3332
+ if (!this.state.branches[source]) throw new Error(`Source branch "${source}" not found.`);
3333
+ if (!this.state.branches[target]) throw new Error(`Target branch "${target}" not found.`);
3334
+ const sourceChanges = this.state.branches[source];
3335
+ const targetChanges = this.state.branches[target];
3336
+ for (const [path2, change] of Object.entries(sourceChanges)) {
3337
+ targetChanges[path2] = JSON.parse(JSON.stringify(change));
3338
+ }
3339
+ await this.persist();
3340
+ }
3341
+ getActiveBranch() {
3342
+ return this.state.activeBranch;
3343
+ }
3344
+ getBranches() {
3345
+ return Object.keys(this.state.branches);
3346
+ }
3347
+ getPendingPaths(branchName = this.state.activeBranch) {
3348
+ return Object.keys(this.state.branches[branchName] || {});
3349
+ }
3350
+ hasChanges(branchName = this.state.activeBranch) {
3351
+ return Object.keys(this.state.branches[branchName] || {}).length > 0;
3352
+ }
3353
+ /**
3354
+ * Get staged content for a file path (returns undefined if not staged).
3355
+ * Used by tool adapter so agentic workers can read their own staged files.
3356
+ */
3357
+ getStagedContent(filePath, branchName = this.state.activeBranch) {
3358
+ const branch = this.state.branches[branchName];
3359
+ if (!branch) return void 0;
3360
+ const change = branch[filePath];
3361
+ if (change) return change.modified;
3362
+ return void 0;
3363
+ }
3364
+ };
3365
+ export {
3366
+ Sandbox,
3367
+ runAgenticWorker
3368
+ };
3369
+ /**
3370
+ * Autonomous Worker Loop
3371
+ * Implements OpenOrca-style THINK → ACT → OBSERVE pattern
3372
+ *
3373
+ * @license
3374
+ * Copyright 2026 CrewSwarm
3375
+ */
3376
+ /**
3377
+ * @license
3378
+ * Copyright 2025 Google LLC
3379
+ * SPDX-License-Identifier: Apache-2.0
3380
+ */
3381
+ //# sourceMappingURL=engine.mjs.map