@defai.digital/automatosx 12.5.4 → 12.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +27048 -26324
  3. package/dist/mcp/index.js +2373 -260
  4. package/package.json +1 -1
package/dist/mcp/index.js CHANGED
@@ -8390,12 +8390,11 @@ function calculateMaxConcurrentAgents(staticLimit) {
8390
8390
  init_esm_shims();
8391
8391
  var PRECOMPILED_CONFIG = {
8392
8392
  "providers": {
8393
- "gemini-cli": {
8393
+ "claude-code": {
8394
8394
  "enabled": true,
8395
- "priority": 1,
8395
+ "priority": 3,
8396
8396
  "timeout": 27e5,
8397
- "command": "gemini",
8398
- "description": "#1 Frontend/Design, multimodal, free tier, WebDev Arena leader",
8397
+ "command": "claude",
8399
8398
  "healthCheck": {
8400
8399
  "enabled": true,
8401
8400
  "interval": 3e5,
@@ -8417,16 +8416,15 @@ var PRECOMPILED_CONFIG = {
8417
8416
  },
8418
8417
  "limitTracking": {
8419
8418
  "enabled": true,
8420
- "window": "daily",
8419
+ "window": "weekly",
8421
8420
  "resetHourUtc": 0
8422
8421
  }
8423
8422
  },
8424
- "openai": {
8423
+ "gemini-cli": {
8425
8424
  "enabled": true,
8426
- "priority": 3,
8425
+ "priority": 2,
8427
8426
  "timeout": 27e5,
8428
- "command": "codex",
8429
- "description": "Best reasoning (o3), strategy, 75% accuracy, 192k context",
8427
+ "command": "gemini",
8430
8428
  "healthCheck": {
8431
8429
  "enabled": true,
8432
8430
  "interval": 3e5,
@@ -8452,12 +8450,11 @@ var PRECOMPILED_CONFIG = {
8452
8450
  "resetHourUtc": 0
8453
8451
  }
8454
8452
  },
8455
- "claude-code": {
8453
+ "openai": {
8456
8454
  "enabled": true,
8457
- "priority": 2,
8455
+ "priority": 1,
8458
8456
  "timeout": 27e5,
8459
- "command": "claude",
8460
- "description": "#1 Coding model, agentic workflows, security, 0% edit error rate",
8457
+ "command": "codex",
8461
8458
  "healthCheck": {
8462
8459
  "enabled": true,
8463
8460
  "interval": 3e5,
@@ -8477,50 +8474,6 @@ var PRECOMPILED_CONFIG = {
8477
8474
  "forceKillDelay": 1e3,
8478
8475
  "cacheEnabled": true
8479
8476
  },
8480
- "limitTracking": {
8481
- "enabled": true,
8482
- "window": "weekly",
8483
- "resetHourUtc": 0
8484
- }
8485
- },
8486
- "glm": {
8487
- "enabled": true,
8488
- "priority": 4,
8489
- "timeout": 27e5,
8490
- "type": "sdk",
8491
- "description": "Near Claude coding (48.6%), low cost $3/mo, 200k context, agentic",
8492
- "healthCheck": {
8493
- "enabled": true,
8494
- "interval": 3e5,
8495
- "timeout": 5e3
8496
- },
8497
- "circuitBreaker": {
8498
- "enabled": true,
8499
- "failureThreshold": 3,
8500
- "recoveryTimeout": 6e4
8501
- },
8502
- "limitTracking": {
8503
- "enabled": true,
8504
- "window": "daily",
8505
- "resetHourUtc": 0
8506
- }
8507
- },
8508
- "grok": {
8509
- "enabled": true,
8510
- "priority": 5,
8511
- "timeout": 27e5,
8512
- "type": "sdk",
8513
- "description": "Fastest (67ms), reasoning (93% AIME), 1M context, DeepSearch",
8514
- "healthCheck": {
8515
- "enabled": true,
8516
- "interval": 3e5,
8517
- "timeout": 5e3
8518
- },
8519
- "circuitBreaker": {
8520
- "enabled": true,
8521
- "failureThreshold": 3,
8522
- "recoveryTimeout": 6e4
8523
- },
8524
8477
  "limitTracking": {
8525
8478
  "enabled": true,
8526
8479
  "window": "daily",
@@ -8737,7 +8690,7 @@ var PRECOMPILED_CONFIG = {
8737
8690
  "enableFreeTierPrioritization": true,
8738
8691
  "enableWorkloadAwareRouting": true
8739
8692
  },
8740
- "version": "12.5.4"
8693
+ "version": "12.5.5"
8741
8694
  };
8742
8695
 
8743
8696
  // src/core/config/schemas.ts
@@ -10403,6 +10356,12 @@ var ProviderLimitManager = class _ProviderLimitManager extends DisposableEventEm
10403
10356
  }
10404
10357
  });
10405
10358
  }
10359
+ /**
10360
+ * Clean up resources and remove all event listeners.
10361
+ */
10362
+ async destroy() {
10363
+ this.removeAllListeners();
10364
+ }
10406
10365
  };
10407
10366
  async function getProviderLimitManager(stateDirectory) {
10408
10367
  if (!stateDirectory) {
@@ -11098,6 +11057,12 @@ var ProviderMetricsTracker = class extends DisposableEventEmitter {
11098
11057
  const records = this.metrics.get(provider);
11099
11058
  return records ? [...records] : null;
11100
11059
  }
11060
+ /**
11061
+ * Clean up resources and remove all event listeners.
11062
+ */
11063
+ async destroy() {
11064
+ this.removeAllListeners();
11065
+ }
11101
11066
  };
11102
11067
  var globalMetricsTracker = null;
11103
11068
  function getProviderMetricsTracker() {
@@ -11381,6 +11346,12 @@ var RoutingStrategyManager = class extends DisposableEventEmitter {
11381
11346
  exportHistory() {
11382
11347
  return [...this.decisionHistory];
11383
11348
  }
11349
+ /**
11350
+ * Clean up resources and remove all event listeners.
11351
+ */
11352
+ async destroy() {
11353
+ this.removeAllListeners();
11354
+ }
11384
11355
  };
11385
11356
  var globalRoutingStrategy = null;
11386
11357
  function getRoutingStrategyManager(config) {
@@ -13996,9 +13967,9 @@ var MemoryManager = class _MemoryManager {
13996
13967
  throw new MemoryError("Memory manager not initialized", "DATABASE_ERROR");
13997
13968
  }
13998
13969
  try {
13999
- const { mkdir: mkdir4 } = await import('fs/promises');
13970
+ const { mkdir: mkdir5 } = await import('fs/promises');
14000
13971
  const destDir = dirname3(destPath);
14001
- await mkdir4(destDir, { recursive: true });
13972
+ await mkdir5(destDir, { recursive: true });
14002
13973
  await this.db.backup(destPath);
14003
13974
  logger.info("Database backup created", { destPath: normalizePath(destPath) });
14004
13975
  } catch (error) {
@@ -14033,8 +14004,8 @@ var MemoryManager = class _MemoryManager {
14033
14004
  tempDb.prepare("SELECT COUNT(*) FROM memory_entries").get();
14034
14005
  tempDb.close();
14035
14006
  } catch (verifyError) {
14036
- const { unlink: unlink3 } = await import('fs/promises');
14037
- await unlink3(tempPath).catch(() => {
14007
+ const { unlink: unlink4 } = await import('fs/promises');
14008
+ await unlink4(tempPath).catch(() => {
14038
14009
  });
14039
14010
  throw new MemoryError(
14040
14011
  `Backup file verification failed: ${verifyError.message}`,
@@ -14154,9 +14125,9 @@ var MemoryManager = class _MemoryManager {
14154
14125
  },
14155
14126
  entries
14156
14127
  };
14157
- const { writeFile: writeFile6 } = await import('fs/promises');
14128
+ const { writeFile: writeFile7 } = await import('fs/promises');
14158
14129
  const json = pretty ? JSON.stringify(exportData, null, 2) : JSON.stringify(exportData);
14159
- await writeFile6(filePath, json, "utf-8");
14130
+ await writeFile7(filePath, json, "utf-8");
14160
14131
  const sizeBytes = Buffer.byteLength(json, "utf-8");
14161
14132
  logger.info("Memory exported to JSON", {
14162
14133
  filePath: normalizePath(filePath),
@@ -14205,8 +14176,8 @@ var MemoryManager = class _MemoryManager {
14205
14176
  { filePath }
14206
14177
  );
14207
14178
  }
14208
- const { readFile: readFile11 } = await import('fs/promises');
14209
- const content = await readFile11(filePath, "utf-8");
14179
+ const { readFile: readFile13 } = await import('fs/promises');
14180
+ const content = await readFile13(filePath, "utf-8");
14210
14181
  const importData = JSON.parse(content);
14211
14182
  const SUPPORTED_VERSIONS = ["1.0", "4.0.0", "4.11.0"];
14212
14183
  if (!importData.version || !SUPPORTED_VERSIONS.includes(importData.version)) {
@@ -18423,12 +18394,7 @@ var WarningEmitter = class extends EventEmitter {
18423
18394
  this.emit("timeout-warning", event);
18424
18395
  }
18425
18396
  /**
18426
- * Destroy warning emitter
18427
- *
18428
- * CRITICAL FIX (v5.6.18): Remove all event listeners to prevent memory leak.
18429
- * Must be called when WarningEmitter instance is no longer needed.
18430
- *
18431
- * @since 5.6.18
18397
+ * Clean up resources and remove all event listeners.
18432
18398
  */
18433
18399
  destroy() {
18434
18400
  this.removeAllListeners("timeout-warning");
@@ -22789,149 +22755,1796 @@ Task: ${task}`;
22789
22755
  };
22790
22756
  }
22791
22757
 
22792
- // src/mcp/tools/get-capabilities.ts
22758
+ // src/mcp/tools/bugfix-scan.ts
22793
22759
  init_esm_shims();
22794
22760
  init_logger();
22795
- function categorizeTools(name) {
22796
- if (name.startsWith("memory_") || name === "search_memory") return "memory";
22797
- if (name.startsWith("session_")) return "session";
22798
- if (name.startsWith("create_task") || name.startsWith("run_task") || name.startsWith("get_task") || name.startsWith("list_task") || name.startsWith("delete_task")) return "task";
22799
- if (name === "get_capabilities" || name === "list_agents" || name === "get_status" || name === "get_agent_context") return "discovery";
22800
- if (name.includes("context")) return "context";
22801
- return "execution";
22802
- }
22803
- function getExecutionMode(providerName, _providerConfig) {
22804
- if (providerName === "glm" || providerName === "grok") {
22805
- return "sdk";
22761
+
22762
+ // src/core/bugfix/bug-detector.ts
22763
+ init_esm_shims();
22764
+ init_logger();
22765
+ var DEFAULT_DETECTION_RULES = [
22766
+ // Timer leak: setInterval without .unref()
22767
+ // v12.5.5: Increased withinLines from 5 to 50 to handle multi-line callbacks
22768
+ // where .unref() is called after the closing brace (some callbacks are 35+ lines)
22769
+ // NOTE: This is a workaround - proper fix would use AST-based detection
22770
+ {
22771
+ id: "timer-leak-interval",
22772
+ type: "timer_leak",
22773
+ name: "setInterval without unref",
22774
+ description: "setInterval() without .unref() blocks process exit",
22775
+ pattern: "setInterval\\s*\\(",
22776
+ negativePattern: "\\.unref\\s*\\(\\)",
22777
+ withinLines: 50,
22778
+ confidence: 0.9,
22779
+ severity: "high",
22780
+ autoFixable: true,
22781
+ fixTemplate: "add_unref",
22782
+ fileExtensions: [".ts", ".js", ".mts", ".mjs"]
22783
+ },
22784
+ // Timer leak: setTimeout in promise without cleanup
22785
+ {
22786
+ id: "timer-leak-timeout-promise",
22787
+ type: "promise_timeout_leak",
22788
+ name: "setTimeout in Promise without cleanup",
22789
+ description: "setTimeout in Promise should be cleared in finally block",
22790
+ pattern: "new\\s+Promise[^}]*setTimeout\\s*\\(",
22791
+ negativePattern: "finally|clearTimeout",
22792
+ withinLines: 20,
22793
+ confidence: 0.7,
22794
+ severity: "medium",
22795
+ autoFixable: false,
22796
+ // Complex, needs manual review
22797
+ fileExtensions: [".ts", ".js", ".mts", ".mjs"]
22798
+ },
22799
+ // Missing destroy: EventEmitter without destroy method
22800
+ // v12.5.5: Increased withinLines from 100 to 800 to scan entire class
22801
+ // Classes can be large - need to check the whole class for destroy() method
22802
+ {
22803
+ id: "missing-destroy-eventemitter",
22804
+ type: "missing_destroy",
22805
+ name: "EventEmitter without destroy",
22806
+ description: "Classes extending EventEmitter should have destroy() method",
22807
+ pattern: "class\\s+\\w+\\s+extends\\s+(?:EventEmitter|DisposableEventEmitter)",
22808
+ negativePattern: "destroy\\s*\\(\\s*\\)",
22809
+ withinLines: 800,
22810
+ confidence: 0.85,
22811
+ severity: "high",
22812
+ autoFixable: true,
22813
+ fixTemplate: "add_destroy_method",
22814
+ fileExtensions: [".ts", ".js", ".mts", ".mjs"]
22815
+ },
22816
+ // Event leak: .on() without corresponding cleanup
22817
+ {
22818
+ id: "event-leak-on",
22819
+ type: "event_leak",
22820
+ name: "Event listener without cleanup",
22821
+ description: ".on() or .addListener() without corresponding .off() or .removeListener()",
22822
+ pattern: "\\.(on|addListener)\\s*\\(['\"`]\\w+['\"`]",
22823
+ negativePattern: "\\.(off|removeListener|removeAllListeners)\\s*\\(",
22824
+ withinLines: 50,
22825
+ confidence: 0.6,
22826
+ // Lower confidence - may have false positives
22827
+ severity: "medium",
22828
+ autoFixable: false,
22829
+ fileExtensions: [".ts", ".js", ".mts", ".mjs"]
22830
+ },
22831
+ // Uncaught promise: Promise without catch
22832
+ {
22833
+ id: "uncaught-promise",
22834
+ type: "uncaught_promise",
22835
+ name: "Promise without error handling",
22836
+ description: "Promise should have .catch() or be awaited in try/catch",
22837
+ pattern: "new\\s+Promise\\s*\\([^)]+\\)",
22838
+ negativePattern: "\\.catch\\s*\\(|try\\s*\\{",
22839
+ withinLines: 10,
22840
+ confidence: 0.5,
22841
+ // Low confidence - many false positives
22842
+ severity: "low",
22843
+ autoFixable: false,
22844
+ fileExtensions: [".ts", ".js", ".mts", ".mjs"]
22806
22845
  }
22807
- if (providerName === "claude-code" || providerName === "gemini-cli") {
22808
- return "cli";
22846
+ ];
22847
+ var BugDetector = class {
22848
+ rules;
22849
+ config;
22850
+ constructor(config, customRules) {
22851
+ this.config = config;
22852
+ this.rules = customRules || DEFAULT_DETECTION_RULES;
22853
+ if (config.bugTypes && config.bugTypes.length > 0) {
22854
+ this.rules = this.rules.filter(
22855
+ (rule) => config.bugTypes.includes(rule.type)
22856
+ );
22857
+ }
22858
+ logger.debug("BugDetector initialized", {
22859
+ ruleCount: this.rules.length,
22860
+ bugTypes: config.bugTypes,
22861
+ scope: config.scope
22862
+ });
22809
22863
  }
22810
- if (providerName === "openai") {
22811
- return "hybrid";
22864
+ /**
22865
+ * Scan codebase for bugs
22866
+ *
22867
+ * @param rootDir - Root directory to scan
22868
+ * @returns Array of bug findings
22869
+ */
22870
+ async scan(rootDir) {
22871
+ const startTime = Date.now();
22872
+ const findings = [];
22873
+ logger.info("Starting bug scan", {
22874
+ rootDir,
22875
+ scope: this.config.scope,
22876
+ ruleCount: this.rules.length
22877
+ });
22878
+ const scanDir = this.config.scope ? join(rootDir, this.config.scope) : rootDir;
22879
+ const files = await this.getFilesToScan(scanDir, rootDir);
22880
+ logger.debug("Files to scan", { count: files.length });
22881
+ for (const file of files) {
22882
+ try {
22883
+ const fileFindings = await this.scanFile(file, rootDir);
22884
+ findings.push(...fileFindings);
22885
+ } catch (error) {
22886
+ logger.warn("Error scanning file", {
22887
+ file,
22888
+ error: error.message
22889
+ });
22890
+ }
22891
+ }
22892
+ const filteredFindings = this.filterBySeverity(findings);
22893
+ filteredFindings.sort((a, b) => {
22894
+ const severityOrder = {
22895
+ critical: 4,
22896
+ high: 3,
22897
+ medium: 2,
22898
+ low: 1
22899
+ };
22900
+ const severityDiff = severityOrder[b.severity] - severityOrder[a.severity];
22901
+ if (severityDiff !== 0) return severityDiff;
22902
+ return b.confidence - a.confidence;
22903
+ });
22904
+ const duration = Date.now() - startTime;
22905
+ logger.info("Bug scan complete", {
22906
+ totalFindings: filteredFindings.length,
22907
+ filesScanned: files.length,
22908
+ durationMs: duration
22909
+ });
22910
+ return filteredFindings;
22812
22911
  }
22813
- return "cli";
22814
- }
22815
- function getProviderType(providerName) {
22816
- if (providerName === "glm" || providerName === "grok") return "sdk";
22817
- if (providerName === "openai") return "hybrid";
22818
- return "cli";
22819
- }
22820
- function createGetCapabilitiesHandler(deps) {
22821
- return async () => {
22822
- logger.info("[MCP] get_capabilities called");
22823
- try {
22824
- const projectDir = process.cwd();
22825
- const config = await loadConfig(projectDir);
22826
- const version = getVersion();
22827
- const providers = [];
22828
- const providerConfigs = config.providers || {};
22829
- for (const [name, providerConfig] of Object.entries(providerConfigs)) {
22830
- const cfg = providerConfig;
22831
- const enabled = cfg.enabled === true;
22832
- let available = false;
22833
- try {
22834
- const availableProviders = await deps.router.getAvailableProviders();
22835
- available = availableProviders.some((p) => p.name === name);
22836
- } catch {
22837
- available = enabled;
22912
+ /**
22913
+ * Scan a single file for bugs
22914
+ */
22915
+ async scanFile(filePath, rootDir) {
22916
+ const findings = [];
22917
+ const content = await readFile(filePath, "utf-8");
22918
+ const lines = content.split("\n");
22919
+ const relativePath = relative(rootDir, filePath);
22920
+ for (const rule of this.rules) {
22921
+ if (rule.fileExtensions && rule.fileExtensions.length > 0) {
22922
+ const ext = extname$1(filePath);
22923
+ if (!rule.fileExtensions.includes(ext)) {
22924
+ continue;
22838
22925
  }
22839
- providers.push({
22840
- name,
22841
- enabled,
22842
- available,
22843
- type: getProviderType(name),
22844
- executionMode: getExecutionMode(name, cfg),
22845
- priority: cfg.priority || 0,
22846
- model: cfg.model
22847
- });
22848
22926
  }
22849
- providers.sort((a, b) => b.priority - a.priority);
22850
- const agentNames = await deps.profileLoader.listProfiles();
22851
- const agentResults = await Promise.all(
22852
- agentNames.map(async (agentName) => {
22853
- try {
22854
- const profile = await deps.profileLoader.loadProfile(agentName);
22855
- return {
22856
- name: profile.name,
22857
- displayName: profile.displayName,
22858
- role: profile.role,
22859
- description: profile.systemPrompt?.substring(0, 200),
22860
- team: profile.team,
22861
- abilities: profile.abilities || []
22862
- };
22863
- } catch (error) {
22864
- logger.warn(`Failed to load profile for ${agentName}`, { error });
22865
- return null;
22927
+ const ruleFindings = this.applyRule(rule, content, lines, relativePath);
22928
+ findings.push(...ruleFindings);
22929
+ }
22930
+ return findings;
22931
+ }
22932
+ /**
22933
+ * Apply a detection rule to file content
22934
+ */
22935
+ applyRule(rule, content, lines, filePath) {
22936
+ const findings = [];
22937
+ if (!rule.pattern) {
22938
+ return findings;
22939
+ }
22940
+ try {
22941
+ const regex = new RegExp(rule.pattern, "g");
22942
+ let match;
22943
+ while ((match = regex.exec(content)) !== null) {
22944
+ const beforeMatch = content.substring(0, match.index);
22945
+ const lineNumber = beforeMatch.split("\n").length;
22946
+ if (rule.negativePattern) {
22947
+ const withinLines = rule.withinLines || 5;
22948
+ const startLine = Math.max(0, lineNumber - 1);
22949
+ const endLine = Math.min(lines.length, lineNumber + withinLines);
22950
+ const contextLines = lines.slice(startLine, endLine).join("\n");
22951
+ const negativeRegex = new RegExp(rule.negativePattern);
22952
+ if (negativeRegex.test(contextLines)) {
22953
+ continue;
22866
22954
  }
22867
- })
22868
- );
22869
- const agents = agentResults.filter((a) => a !== null);
22870
- const tools = deps.toolSchemas.map((schema) => ({
22871
- name: schema.name,
22872
- description: schema.description,
22873
- category: categorizeTools(schema.name)
22874
- }));
22875
- const [memoryStats, activeSessions] = await Promise.all([
22876
- deps.memoryManager.getStats(),
22877
- deps.sessionManager.getActiveSessions()
22878
- ]);
22879
- const result = {
22880
- version,
22881
- providers,
22882
- agents,
22883
- tools,
22884
- memory: {
22885
- enabled: true,
22886
- entryCount: memoryStats.totalEntries,
22887
- maxEntries: config.memory?.maxEntries || 1e4
22888
- },
22889
- sessions: {
22890
- enabled: true,
22891
- activeCount: activeSessions.length,
22892
- maxSessions: config.orchestration?.session?.maxSessions || 100
22893
- },
22894
- features: {
22895
- smartRouting: true,
22896
- // Always enabled in v13.0.0
22897
- memorySearch: true,
22898
- multiAgentSessions: true,
22899
- streamingNotifications: false
22900
- // Configured via MCP server options
22901
22955
  }
22902
- };
22903
- logger.info("[MCP] get_capabilities completed", {
22904
- version,
22905
- providersCount: providers.length,
22906
- agentsCount: agents.length,
22907
- toolsCount: tools.length
22908
- });
22909
- return result;
22956
+ const contextStart = Math.max(0, lineNumber - 3);
22957
+ const contextEnd = Math.min(lines.length, lineNumber + 3);
22958
+ const context = lines.slice(contextStart, contextEnd).join("\n");
22959
+ const finding = {
22960
+ id: randomUUID(),
22961
+ file: filePath,
22962
+ lineStart: lineNumber,
22963
+ lineEnd: lineNumber + (rule.withinLines ? Math.min(rule.withinLines, 5) : 1),
22964
+ type: rule.type,
22965
+ severity: rule.severity,
22966
+ message: rule.description,
22967
+ context,
22968
+ fixStrategy: rule.autoFixable ? rule.fixTemplate : void 0,
22969
+ confidence: rule.confidence,
22970
+ detectionMethod: "regex",
22971
+ metadata: {
22972
+ ruleId: rule.id,
22973
+ ruleName: rule.name,
22974
+ matchedText: match[0].substring(0, 100)
22975
+ },
22976
+ detectedAt: (/* @__PURE__ */ new Date()).toISOString()
22977
+ };
22978
+ findings.push(finding);
22979
+ logger.debug("Bug detected", {
22980
+ file: filePath,
22981
+ line: lineNumber,
22982
+ type: rule.type,
22983
+ rule: rule.id
22984
+ });
22985
+ }
22910
22986
  } catch (error) {
22911
- logger.error("[MCP] get_capabilities failed", { error });
22912
- throw new Error(`Capabilities check failed: ${error.message}`);
22987
+ logger.warn("Rule application failed", {
22988
+ ruleId: rule.id,
22989
+ file: filePath,
22990
+ error: error.message
22991
+ });
22913
22992
  }
22914
- };
22915
- }
22916
-
22917
- // src/mcp/tools/task/index.ts
22918
- init_esm_shims();
22919
-
22920
- // src/mcp/tools/task/create-task.ts
22921
- init_esm_shims();
22922
-
22923
- // src/core/task-engine/index.ts
22924
- init_esm_shims();
22925
-
22926
- // src/core/task-engine/types.ts
22927
- init_esm_shims();
22928
- var TaskEngineError = class _TaskEngineError extends Error {
22929
- constructor(message, code, details) {
22930
- super(message);
22931
- this.code = code;
22932
- this.details = details;
22933
- this.name = "TaskEngineError";
22934
- Error.captureStackTrace?.(this, _TaskEngineError);
22993
+ return findings;
22994
+ }
22995
+ /**
22996
+ * Get all files to scan
22997
+ */
22998
+ async getFilesToScan(scanDir, rootDir) {
22999
+ const files = [];
23000
+ const scanDirectory = async (dir) => {
23001
+ try {
23002
+ const entries = await readdir(dir);
23003
+ for (const entry of entries) {
23004
+ const fullPath = join(dir, entry);
23005
+ const relativePath = relative(rootDir, fullPath);
23006
+ if (this.isExcluded(relativePath)) {
23007
+ continue;
23008
+ }
23009
+ const stats = await stat(fullPath);
23010
+ if (stats.isDirectory()) {
23011
+ await scanDirectory(fullPath);
23012
+ } else if (stats.isFile()) {
23013
+ const ext = extname$1(fullPath);
23014
+ if ([".ts", ".js", ".mts", ".mjs", ".tsx", ".jsx"].includes(ext)) {
23015
+ files.push(fullPath);
23016
+ }
23017
+ }
23018
+ }
23019
+ } catch (error) {
23020
+ logger.warn("Error reading directory", {
23021
+ dir,
23022
+ error: error.message
23023
+ });
23024
+ }
23025
+ };
23026
+ await scanDirectory(scanDir);
23027
+ return files;
23028
+ }
23029
+ /**
23030
+ * Check if a path should be excluded
23031
+ */
23032
+ isExcluded(relativePath) {
23033
+ const defaultExclusions = [
23034
+ "node_modules",
23035
+ "dist",
23036
+ "build",
23037
+ ".git",
23038
+ "coverage",
23039
+ ".nyc_output",
23040
+ "*.test.ts",
23041
+ "*.spec.ts",
23042
+ "__tests__",
23043
+ "__mocks__"
23044
+ ];
23045
+ const exclusions = [...defaultExclusions, ...this.config.excludePatterns || []];
23046
+ for (const pattern of exclusions) {
23047
+ if (pattern.includes("*")) {
23048
+ const regexPattern = pattern.replace(/\./g, "\\.").replace(/\*/g, ".*");
23049
+ const regex = new RegExp(regexPattern);
23050
+ if (regex.test(relativePath)) {
23051
+ return true;
23052
+ }
23053
+ } else {
23054
+ if (relativePath.includes(pattern)) {
23055
+ return true;
23056
+ }
23057
+ }
23058
+ }
23059
+ return false;
23060
+ }
23061
+ /**
23062
+ * Filter findings by severity threshold
23063
+ */
23064
+ filterBySeverity(findings) {
23065
+ const severityOrder = {
23066
+ low: 1,
23067
+ medium: 2,
23068
+ high: 3,
23069
+ critical: 4
23070
+ };
23071
+ const threshold = severityOrder[this.config.severityThreshold];
23072
+ return findings.filter(
23073
+ (finding) => severityOrder[finding.severity] >= threshold
23074
+ );
23075
+ }
23076
+ /**
23077
+ * Get detection rules
23078
+ */
23079
+ getRules() {
23080
+ return [...this.rules];
23081
+ }
23082
+ /**
23083
+ * Add a custom detection rule
23084
+ */
23085
+ addRule(rule) {
23086
+ this.rules.push(rule);
23087
+ logger.debug("Detection rule added", { ruleId: rule.id });
23088
+ }
23089
+ /**
23090
+ * Load rules from YAML file
23091
+ */
23092
+ async loadRulesFromFile(filePath) {
23093
+ try {
23094
+ const content = await readFile(filePath, "utf-8");
23095
+ const yaml2 = await import('js-yaml');
23096
+ const parsed = yaml2.load(content);
23097
+ if (parsed.rules && Array.isArray(parsed.rules)) {
23098
+ for (const rule of parsed.rules) {
23099
+ this.addRule(rule);
23100
+ }
23101
+ logger.info("Detection rules loaded from file", {
23102
+ filePath,
23103
+ ruleCount: parsed.rules.length
23104
+ });
23105
+ }
23106
+ } catch (error) {
23107
+ logger.warn("Failed to load detection rules", {
23108
+ filePath,
23109
+ error: error.message
23110
+ });
23111
+ }
23112
+ }
23113
+ };
23114
+ function createDefaultBugfixConfig(overrides) {
23115
+ return {
23116
+ maxBugs: 10,
23117
+ maxDurationMinutes: 45,
23118
+ maxTokens: 5e5,
23119
+ maxRetriesPerBug: 3,
23120
+ minConfidence: 0.7,
23121
+ bugTypes: ["timer_leak", "missing_destroy", "promise_timeout_leak"],
23122
+ severityThreshold: "medium",
23123
+ excludePatterns: [],
23124
+ dryRun: false,
23125
+ requireTests: true,
23126
+ requireTypecheck: true,
23127
+ generateTests: false,
23128
+ verbose: false,
23129
+ ...overrides
23130
+ };
23131
+ }
23132
+
23133
+ // src/mcp/tools/bugfix-scan.ts
23134
+ function createBugfixScanHandler() {
23135
+ return async (input) => {
23136
+ const startTime = Date.now();
23137
+ logger.info("[MCP] bugfix_scan called", { path: input.path, types: input.types });
23138
+ const progressToken = sendMcpProgressBegin("Bug Scan", "Scanning codebase...");
23139
+ try {
23140
+ const rootDir = input.path || process.cwd();
23141
+ const limit = input.limit || 50;
23142
+ const configOverrides = {};
23143
+ if (input.types) configOverrides.bugTypes = input.types;
23144
+ if (input.excludePatterns) configOverrides.excludePatterns = input.excludePatterns;
23145
+ const config = createDefaultBugfixConfig(configOverrides);
23146
+ sendMcpProgress("Initializing bug detector...", progressToken);
23147
+ const detector = new BugDetector(config);
23148
+ let findings = await detector.scan(rootDir);
23149
+ sendMcpProgress(`Found ${findings.length} potential bugs`, progressToken);
23150
+ if (input.minSeverity) {
23151
+ const severityOrder = ["low", "medium", "high", "critical"];
23152
+ const minIndex = severityOrder.indexOf(input.minSeverity);
23153
+ findings = findings.filter((f) => severityOrder.indexOf(f.severity) >= minIndex);
23154
+ }
23155
+ const bySeverity = {
23156
+ low: 0,
23157
+ medium: 0,
23158
+ high: 0,
23159
+ critical: 0
23160
+ };
23161
+ const byType = {};
23162
+ for (const finding of findings) {
23163
+ bySeverity[finding.severity]++;
23164
+ byType[finding.type] = (byType[finding.type] || 0) + 1;
23165
+ }
23166
+ const limitedFindings = findings.slice(0, limit).map((f) => ({
23167
+ id: f.id,
23168
+ file: f.file,
23169
+ line: f.lineStart,
23170
+ type: f.type,
23171
+ severity: f.severity,
23172
+ message: f.message,
23173
+ confidence: f.confidence,
23174
+ hasAutoFix: !!f.fixStrategy
23175
+ }));
23176
+ const result = {
23177
+ total: findings.length,
23178
+ bySeverity,
23179
+ byType,
23180
+ findings: limitedFindings,
23181
+ durationMs: Date.now() - startTime
23182
+ };
23183
+ logger.info("[MCP] bugfix_scan completed", {
23184
+ total: result.total,
23185
+ durationMs: result.durationMs
23186
+ });
23187
+ sendMcpProgressEnd(progressToken, `Completed: ${result.total} bugs found`);
23188
+ return result;
23189
+ } catch (error) {
23190
+ logger.error("[MCP] bugfix_scan failed", { error });
23191
+ sendMcpProgressEnd(progressToken, `Error: ${error.message}`);
23192
+ throw new Error(`Bug scan failed: ${error.message}`);
23193
+ }
23194
+ };
23195
+ }
23196
+ var bugfixScanSchema = {
23197
+ name: "bugfix_scan",
23198
+ description: `Scan codebase for bugs without applying fixes.
23199
+
23200
+ **Time**: Usually 10-60 seconds. You can stop anytime.
23201
+ **Progress**: Streamed notifications show scanning status.
23202
+
23203
+ Detects: Timer leaks, missing destroy(), promise leaks, resource issues.
23204
+ Returns: Severity-sorted findings with auto-fix availability.`,
23205
+ inputSchema: {
23206
+ type: "object",
23207
+ properties: {
23208
+ path: {
23209
+ type: "string",
23210
+ description: "Directory to scan (default: current directory)"
23211
+ },
23212
+ types: {
23213
+ type: "array",
23214
+ items: {
23215
+ type: "string",
23216
+ enum: ["timer_leak", "missing_destroy", "promise_timeout_leak", "event_leak", "resource_leak", "race_condition", "memory_leak", "uncaught_promise", "deprecated_api", "security_issue", "type_error", "test_failure", "custom"]
23217
+ },
23218
+ description: "Bug types to scan for (default: all)"
23219
+ },
23220
+ minSeverity: {
23221
+ type: "string",
23222
+ enum: ["low", "medium", "high", "critical"],
23223
+ description: "Minimum severity to report (default: low)"
23224
+ },
23225
+ includePatterns: {
23226
+ type: "array",
23227
+ items: { type: "string" },
23228
+ description: 'File patterns to include (default: ["**/*.ts", "**/*.js"])'
23229
+ },
23230
+ excludePatterns: {
23231
+ type: "array",
23232
+ items: { type: "string" },
23233
+ description: 'File patterns to exclude (default: ["node_modules", "dist"])'
23234
+ },
23235
+ limit: {
23236
+ type: "integer",
23237
+ minimum: 1,
23238
+ maximum: 100,
23239
+ description: "Maximum number of bugs to return (default: 50)"
23240
+ }
23241
+ }
23242
+ }
23243
+ };
23244
+
23245
+ // src/mcp/tools/bugfix-run.ts
23246
+ init_esm_shims();
23247
+ init_logger();
23248
+
23249
+ // src/core/bugfix/bugfix-controller.ts
23250
+ init_esm_shims();
23251
+ init_logger();
23252
+
23253
+ // src/core/bugfix/bug-fixer.ts
23254
+ init_esm_shims();
23255
+ init_logger();
23256
+ var DEFAULT_FIX_TEMPLATES = [
23257
+ {
23258
+ id: "add_unref",
23259
+ name: "Add .unref() to interval",
23260
+ description: "Add .unref() call after setInterval to prevent blocking process exit",
23261
+ bugType: "timer_leak",
23262
+ template: `
23263
+ // Replace setInterval with createSafeInterval for automatic cleanup
23264
+ import { createSafeInterval } from '@/shared/utils';
23265
+
23266
+ // Or add .unref() manually:
23267
+ // const interval = setInterval(callback, ms);
23268
+ // if (interval.unref) interval.unref();
23269
+ `,
23270
+ imports: ['createSafeInterval from "@/shared/utils"'],
23271
+ confidence: 0.9
23272
+ },
23273
+ {
23274
+ id: "add_destroy_method",
23275
+ name: "Add destroy() method",
23276
+ description: "Add destroy() method that calls removeAllListeners()",
23277
+ bugType: "missing_destroy",
23278
+ template: `
23279
+ /**
23280
+ * Clean up resources and remove all event listeners.
23281
+ */
23282
+ destroy(): void {
23283
+ this.removeAllListeners();
23284
+ }
23285
+ `,
23286
+ confidence: 0.85
23287
+ },
23288
+ {
23289
+ id: "use_disposable_eventemitter",
23290
+ name: "Extend DisposableEventEmitter",
23291
+ description: "Replace EventEmitter with DisposableEventEmitter for automatic cleanup",
23292
+ bugType: "missing_destroy",
23293
+ template: `
23294
+ // Change: extends EventEmitter
23295
+ // To: extends DisposableEventEmitter
23296
+
23297
+ import { DisposableEventEmitter } from '@/shared/utils';
23298
+
23299
+ // Then implement onDestroy() hook:
23300
+ protected onDestroy(): void {
23301
+ // Custom cleanup logic
23302
+ }
23303
+ `,
23304
+ imports: ['DisposableEventEmitter from "@/shared/utils"'],
23305
+ confidence: 0.9
23306
+ },
23307
+ {
23308
+ id: "wrap_with_timeout",
23309
+ name: "Wrap with withTimeout",
23310
+ description: "Use withTimeout() utility for automatic cleanup",
23311
+ bugType: "promise_timeout_leak",
23312
+ template: `
23313
+ import { withTimeout } from '@/shared/utils';
23314
+
23315
+ // Replace manual timeout handling with:
23316
+ const result = await withTimeout(promise, timeoutMs, {
23317
+ message: 'Operation timed out'
23318
+ });
23319
+ `,
23320
+ imports: ['withTimeout from "@/shared/utils"'],
23321
+ confidence: 0.85
23322
+ }
23323
+ ];
23324
+ var BugFixer = class {
23325
+ templates;
23326
+ backupDir;
23327
+ backups;
23328
+ // filePath -> backupPath
23329
+ constructor(backupDir) {
23330
+ this.templates = /* @__PURE__ */ new Map();
23331
+ this.backupDir = backupDir || join(process.cwd(), ".automatosx", "backups");
23332
+ this.backups = /* @__PURE__ */ new Map();
23333
+ for (const template of DEFAULT_FIX_TEMPLATES) {
23334
+ this.templates.set(template.id, template);
23335
+ }
23336
+ logger.debug("BugFixer initialized", {
23337
+ templateCount: this.templates.size,
23338
+ backupDir: this.backupDir
23339
+ });
23340
+ }
23341
+ /**
23342
+ * Apply a fix for a bug finding
23343
+ *
23344
+ * @param finding - Bug finding to fix
23345
+ * @param rootDir - Root directory of the project
23346
+ * @param dryRun - If true, don't actually modify files
23347
+ * @returns Fix attempt result
23348
+ */
23349
+ async applyFix(finding, rootDir, dryRun = false) {
23350
+ const startTime = Date.now();
23351
+ const attemptId = randomUUID();
23352
+ const filePath = join(rootDir, finding.file);
23353
+ logger.info("Applying fix", {
23354
+ bugId: finding.id,
23355
+ file: finding.file,
23356
+ type: finding.type,
23357
+ dryRun
23358
+ });
23359
+ try {
23360
+ const originalContent = await readFile(filePath, "utf-8");
23361
+ const lines = originalContent.split("\n");
23362
+ const strategy = this.determineStrategy(finding);
23363
+ if (!strategy) {
23364
+ return this.createAttempt(attemptId, finding.id, 1, "manual_review", "", "skipped", startTime, "No automatic fix available");
23365
+ }
23366
+ const { fixedContent, diff } = await this.generateFix(finding, originalContent, lines, strategy);
23367
+ if (!fixedContent || fixedContent === originalContent) {
23368
+ return this.createAttempt(attemptId, finding.id, 1, strategy, "", "skipped", startTime, "No changes needed");
23369
+ }
23370
+ if (dryRun) {
23371
+ logger.info("Dry run - fix not applied", {
23372
+ bugId: finding.id,
23373
+ strategy,
23374
+ diffLength: diff.length
23375
+ });
23376
+ return this.createAttempt(attemptId, finding.id, 1, strategy, diff, "applied", startTime);
23377
+ }
23378
+ await this.createBackup(filePath);
23379
+ await writeFile(filePath, fixedContent, "utf-8");
23380
+ logger.info("Fix applied", {
23381
+ bugId: finding.id,
23382
+ file: finding.file,
23383
+ strategy
23384
+ });
23385
+ return this.createAttempt(attemptId, finding.id, 1, strategy, diff, "applied", startTime);
23386
+ } catch (error) {
23387
+ logger.error("Fix application failed", {
23388
+ bugId: finding.id,
23389
+ file: finding.file,
23390
+ error: error.message
23391
+ });
23392
+ return this.createAttempt(attemptId, finding.id, 1, "unknown", "", "failed", startTime, error.message);
23393
+ }
23394
+ }
23395
+ /**
23396
+ * Rollback a fix
23397
+ *
23398
+ * @param filePath - File to rollback
23399
+ * @returns True if rollback successful
23400
+ */
23401
+ async rollback(filePath) {
23402
+ const backupPath = this.backups.get(filePath);
23403
+ if (!backupPath || !existsSync(backupPath)) {
23404
+ logger.warn("No backup found for rollback", { filePath });
23405
+ return false;
23406
+ }
23407
+ try {
23408
+ await copyFile(backupPath, filePath);
23409
+ await unlink(backupPath);
23410
+ this.backups.delete(filePath);
23411
+ logger.info("Fix rolled back", { filePath });
23412
+ return true;
23413
+ } catch (error) {
23414
+ logger.error("Rollback failed", {
23415
+ filePath,
23416
+ error: error.message
23417
+ });
23418
+ return false;
23419
+ }
23420
+ }
23421
+ /**
23422
+ * Rollback all fixes in this session
23423
+ */
23424
+ async rollbackAll() {
23425
+ let rolledBack = 0;
23426
+ for (const filePath of this.backups.keys()) {
23427
+ if (await this.rollback(filePath)) {
23428
+ rolledBack++;
23429
+ }
23430
+ }
23431
+ logger.info("All fixes rolled back", { count: rolledBack });
23432
+ return rolledBack;
23433
+ }
23434
+ /**
23435
+ * Clean up backups (call after successful verification)
23436
+ */
23437
+ async cleanupBackups() {
23438
+ for (const [filePath, backupPath] of this.backups.entries()) {
23439
+ try {
23440
+ if (existsSync(backupPath)) {
23441
+ await unlink(backupPath);
23442
+ }
23443
+ this.backups.delete(filePath);
23444
+ } catch (error) {
23445
+ logger.warn("Failed to cleanup backup", {
23446
+ filePath,
23447
+ backupPath,
23448
+ error: error.message
23449
+ });
23450
+ }
23451
+ }
23452
+ logger.debug("Backups cleaned up");
23453
+ }
23454
+ /**
23455
+ * Determine fix strategy for a finding
23456
+ */
23457
+ determineStrategy(finding) {
23458
+ if (finding.fixStrategy) {
23459
+ return finding.fixStrategy;
23460
+ }
23461
+ for (const template of this.templates.values()) {
23462
+ if (template.bugType === finding.type) {
23463
+ return template.id;
23464
+ }
23465
+ }
23466
+ const autoFixableTypes = ["timer_leak", "missing_destroy"];
23467
+ if (autoFixableTypes.includes(finding.type)) {
23468
+ return `auto_fix_${finding.type}`;
23469
+ }
23470
+ return null;
23471
+ }
23472
+ /**
23473
+ * Generate fix for a finding
23474
+ */
23475
+ async generateFix(finding, originalContent, lines, strategy) {
23476
+ let fixedContent = originalContent;
23477
+ let diff = "";
23478
+ switch (strategy) {
23479
+ case "add_unref":
23480
+ ({ fixedContent, diff } = this.applyAddUnrefFix(finding, originalContent, lines));
23481
+ break;
23482
+ case "add_destroy_method":
23483
+ ({ fixedContent, diff } = this.applyAddDestroyMethodFix(finding, originalContent, lines));
23484
+ break;
23485
+ case "use_disposable_eventemitter":
23486
+ ({ fixedContent, diff } = this.applyUseDisposableEventEmitterFix(finding, originalContent, lines));
23487
+ break;
23488
+ case "auto_fix_timer_leak":
23489
+ ({ fixedContent, diff } = this.applyAddUnrefFix(finding, originalContent, lines));
23490
+ break;
23491
+ case "auto_fix_missing_destroy":
23492
+ ({ fixedContent, diff } = this.applyAddDestroyMethodFix(finding, originalContent, lines));
23493
+ break;
23494
+ default:
23495
+ logger.warn("Unknown fix strategy", { strategy });
23496
+ }
23497
+ return { fixedContent, diff };
23498
+ }
23499
+ /**
23500
+ * Apply add .unref() fix
23501
+ */
23502
+ applyAddUnrefFix(finding, originalContent, lines) {
23503
+ const lineIndex = finding.lineStart - 1;
23504
+ const line = lines[lineIndex];
23505
+ if (!line) {
23506
+ return { fixedContent: originalContent, diff: "" };
23507
+ }
23508
+ const setIntervalPattern = /(\w+)\s*=\s*setInterval\s*\([^)]+\)\s*;?/;
23509
+ const match = line.match(setIntervalPattern);
23510
+ if (match) {
23511
+ const varName = match[1];
23512
+ const nextLines = lines.slice(lineIndex + 1, lineIndex + 5).join("\n");
23513
+ if (nextLines.includes(`${varName}.unref`) || nextLines.includes(`${varName}?.unref`)) {
23514
+ return { fixedContent: originalContent, diff: "" };
23515
+ }
23516
+ const indent = line.match(/^(\s*)/)?.[1] || "";
23517
+ const unrefLine = `${indent}if (${varName}.unref) ${varName}.unref();`;
23518
+ const newLines = [...lines];
23519
+ newLines.splice(lineIndex + 1, 0, unrefLine);
23520
+ const fixedContent = newLines.join("\n");
23521
+ const diff = `@@ -${finding.lineStart},1 +${finding.lineStart},2 @@
23522
+ ${line}
23523
+ +${unrefLine}`;
23524
+ return { fixedContent, diff };
23525
+ }
23526
+ const directSetIntervalPattern = /setInterval\s*\(/;
23527
+ if (directSetIntervalPattern.test(line)) {
23528
+ line.match(/^(\s*)/)?.[1] || "";
23529
+ const newLine = line.replace(
23530
+ /(setInterval\s*\([^)]+\))/,
23531
+ "const _interval = $1; if (_interval.unref) _interval.unref()"
23532
+ );
23533
+ const newLines = [...lines];
23534
+ newLines[lineIndex] = newLine;
23535
+ const fixedContent = newLines.join("\n");
23536
+ const diff = `@@ -${finding.lineStart},1 +${finding.lineStart},1 @@
23537
+ -${line}
23538
+ +${newLine}`;
23539
+ return { fixedContent, diff };
23540
+ }
23541
+ return { fixedContent: originalContent, diff: "" };
23542
+ }
23543
+ /**
23544
+ * Apply add destroy() method fix
23545
+ */
23546
+ applyAddDestroyMethodFix(finding, originalContent, lines) {
23547
+ const classPattern = /class\s+(\w+)\s+extends\s+(?:EventEmitter|DisposableEventEmitter)/;
23548
+ let classStartLine = -1;
23549
+ for (let i = 0; i < lines.length; i++) {
23550
+ const currentLine = lines[i];
23551
+ if (!currentLine) continue;
23552
+ const match = currentLine.match(classPattern);
23553
+ if (match && match[1]) {
23554
+ classStartLine = i;
23555
+ match[1];
23556
+ break;
23557
+ }
23558
+ }
23559
+ if (classStartLine === -1) {
23560
+ return { fixedContent: originalContent, diff: "" };
23561
+ }
23562
+ let braceCount = 0;
23563
+ let classEndLine = -1;
23564
+ for (let i = classStartLine; i < lines.length; i++) {
23565
+ const line = lines[i];
23566
+ if (!line) continue;
23567
+ braceCount += (line.match(/{/g) || []).length;
23568
+ braceCount -= (line.match(/}/g) || []).length;
23569
+ if (braceCount === 0 && i > classStartLine) {
23570
+ classEndLine = i;
23571
+ break;
23572
+ }
23573
+ }
23574
+ if (classEndLine === -1) {
23575
+ return { fixedContent: originalContent, diff: "" };
23576
+ }
23577
+ let indent = " ";
23578
+ for (let i = classStartLine + 1; i < classEndLine; i++) {
23579
+ const indentLine = lines[i];
23580
+ if (!indentLine) continue;
23581
+ const indentMatch = indentLine.match(/^(\s+)\S/);
23582
+ if (indentMatch && indentMatch[1]) {
23583
+ indent = indentMatch[1];
23584
+ break;
23585
+ }
23586
+ }
23587
+ const destroyMethod = [
23588
+ "",
23589
+ `${indent}/**`,
23590
+ `${indent} * Clean up resources and remove all event listeners.`,
23591
+ `${indent} */`,
23592
+ `${indent}destroy(): void {`,
23593
+ `${indent} this.removeAllListeners();`,
23594
+ `${indent}}`
23595
+ ].join("\n");
23596
+ const newLines = [...lines];
23597
+ newLines.splice(classEndLine, 0, destroyMethod);
23598
+ const fixedContent = newLines.join("\n");
23599
+ const diff = `@@ -${classEndLine + 1},1 +${classEndLine + 1},8 @@
23600
+ +${destroyMethod}
23601
+ ${lines[classEndLine]}`;
23602
+ return { fixedContent, diff };
23603
+ }
23604
+ /**
23605
+ * Apply use DisposableEventEmitter fix
23606
+ */
23607
+ applyUseDisposableEventEmitterFix(finding, originalContent, lines) {
23608
+ let fixedContent = originalContent.replace(
23609
+ /extends\s+EventEmitter\b/g,
23610
+ "extends DisposableEventEmitter"
23611
+ );
23612
+ if (!originalContent.includes("DisposableEventEmitter")) {
23613
+ const importPattern = /^import\s+.*from\s+['"][^'"]+['"];?\s*$/m;
23614
+ const lastImportMatch = originalContent.match(new RegExp(importPattern.source + "(?!.*" + importPattern.source + ")", "s"));
23615
+ if (lastImportMatch) {
23616
+ const importStatement = `import { DisposableEventEmitter } from '@/shared/utils';`;
23617
+ const insertPos = lastImportMatch.index + lastImportMatch[0].length;
23618
+ fixedContent = fixedContent.slice(0, insertPos) + "\n" + importStatement + fixedContent.slice(insertPos);
23619
+ }
23620
+ }
23621
+ const diff = '--- EventEmitter\n+++ DisposableEventEmitter\n+ import { DisposableEventEmitter } from "@/shared/utils";';
23622
+ return { fixedContent, diff };
23623
+ }
23624
+ /**
23625
+ * Create backup of a file
23626
+ */
23627
+ async createBackup(filePath) {
23628
+ if (!existsSync(this.backupDir)) {
23629
+ await mkdir(this.backupDir, { recursive: true });
23630
+ }
23631
+ const backupName = `${basename(filePath)}.${Date.now()}.bak`;
23632
+ const backupPath = join(this.backupDir, backupName);
23633
+ await copyFile(filePath, backupPath);
23634
+ this.backups.set(filePath, backupPath);
23635
+ logger.debug("Backup created", { filePath, backupPath });
23636
+ return backupPath;
23637
+ }
23638
+ /**
23639
+ * Create a fix attempt result
23640
+ */
23641
+ createAttempt(id, bugId, attemptNumber, strategy, diff, status, startTime, error) {
23642
+ return {
23643
+ id,
23644
+ bugId,
23645
+ attemptNumber,
23646
+ strategy,
23647
+ diff,
23648
+ status,
23649
+ error,
23650
+ attemptedAt: (/* @__PURE__ */ new Date()).toISOString(),
23651
+ durationMs: Date.now() - startTime
23652
+ };
23653
+ }
23654
+ /**
23655
+ * Add a custom fix template
23656
+ */
23657
+ addTemplate(template) {
23658
+ this.templates.set(template.id, template);
23659
+ logger.debug("Fix template added", { templateId: template.id });
23660
+ }
23661
+ /**
23662
+ * Get all fix templates
23663
+ */
23664
+ getTemplates() {
23665
+ return Array.from(this.templates.values());
23666
+ }
23667
+ };
23668
+
23669
+ // src/core/bugfix/verification-gate.ts
23670
+ init_esm_shims();
23671
+ init_logger();
23672
+ var DEFAULT_OPTIONS3 = {
23673
+ typecheck: true,
23674
+ tests: true,
23675
+ checkNewErrors: true,
23676
+ checkCoverage: false,
23677
+ timeout: 12e4,
23678
+ // 2 minutes
23679
+ testCommand: "npm test",
23680
+ typecheckCommand: "npm run typecheck",
23681
+ cwd: process.cwd()
23682
+ };
23683
+ var VerificationGate = class {
23684
+ options;
23685
+ constructor(options) {
23686
+ this.options = { ...DEFAULT_OPTIONS3, ...options };
23687
+ logger.debug("VerificationGate initialized", {
23688
+ typecheck: this.options.typecheck,
23689
+ tests: this.options.tests,
23690
+ timeout: this.options.timeout
23691
+ });
23692
+ }
23693
+ /**
23694
+ * Verify a fix passes all gates
23695
+ *
23696
+ * @param finding - Bug finding that was fixed
23697
+ * @param affectedFiles - Files affected by the fix
23698
+ * @returns Verification result
23699
+ */
23700
+ async verify(finding, affectedFiles) {
23701
+ const startTime = Date.now();
23702
+ logger.info("Starting verification", {
23703
+ bugId: finding.id,
23704
+ file: finding.file,
23705
+ affectedFiles
23706
+ });
23707
+ const result = {
23708
+ success: true,
23709
+ typecheckPassed: true,
23710
+ testsPassed: true,
23711
+ noNewErrors: true,
23712
+ affectedTests: [],
23713
+ failedTests: [],
23714
+ newErrors: [],
23715
+ durationMs: 0
23716
+ };
23717
+ try {
23718
+ if (this.options.typecheck) {
23719
+ logger.debug("Running typecheck gate");
23720
+ const typecheckResult = await this.runTypecheck();
23721
+ result.typecheckPassed = typecheckResult.success;
23722
+ if (!typecheckResult.success) {
23723
+ result.success = false;
23724
+ result.newErrors = typecheckResult.errors;
23725
+ logger.warn("Typecheck failed", {
23726
+ bugId: finding.id,
23727
+ errors: typecheckResult.errors.slice(0, 5)
23728
+ });
23729
+ } else {
23730
+ logger.debug("Typecheck passed");
23731
+ }
23732
+ }
23733
+ if (this.options.tests && result.typecheckPassed) {
23734
+ logger.debug("Running test gate");
23735
+ const testResult = await this.runTests(affectedFiles);
23736
+ result.testsPassed = testResult.success;
23737
+ result.affectedTests = testResult.affectedTests;
23738
+ result.failedTests = testResult.failedTests;
23739
+ if (!testResult.success) {
23740
+ result.success = false;
23741
+ logger.warn("Tests failed", {
23742
+ bugId: finding.id,
23743
+ failed: testResult.failedTests.slice(0, 5)
23744
+ });
23745
+ } else {
23746
+ logger.debug("Tests passed", { count: testResult.affectedTests.length });
23747
+ }
23748
+ }
23749
+ if (this.options.checkNewErrors && result.typecheckPassed && result.testsPassed) {
23750
+ result.noNewErrors = true;
23751
+ }
23752
+ if (this.options.checkCoverage && result.success) {
23753
+ result.coverageMaintained = true;
23754
+ }
23755
+ } catch (error) {
23756
+ result.success = false;
23757
+ result.newErrors = [error.message];
23758
+ logger.error("Verification error", {
23759
+ bugId: finding.id,
23760
+ error: error.message
23761
+ });
23762
+ }
23763
+ result.durationMs = Date.now() - startTime;
23764
+ logger.info("Verification complete", {
23765
+ bugId: finding.id,
23766
+ success: result.success,
23767
+ typecheckPassed: result.typecheckPassed,
23768
+ testsPassed: result.testsPassed,
23769
+ durationMs: result.durationMs
23770
+ });
23771
+ return result;
23772
+ }
23773
+ /**
23774
+ * Run TypeScript typecheck
23775
+ */
23776
+ async runTypecheck() {
23777
+ return this.runCommand(this.options.typecheckCommand, "typecheck");
23778
+ }
23779
+ /**
23780
+ * Run tests for affected files
23781
+ */
23782
+ async runTests(affectedFiles) {
23783
+ const result = await this.runCommand(this.options.testCommand, "test");
23784
+ return {
23785
+ success: result.success,
23786
+ affectedTests: affectedFiles.map((f) => `${f} tests`),
23787
+ failedTests: result.success ? [] : result.errors
23788
+ };
23789
+ }
23790
+ /**
23791
+ * Run a shell command
23792
+ */
23793
+ async runCommand(command, name) {
23794
+ return new Promise((resolve5) => {
23795
+ const parts = command.split(" ");
23796
+ const cmd = parts[0];
23797
+ const args2 = parts.slice(1);
23798
+ const errors = [];
23799
+ let stderr = "";
23800
+ if (!cmd) {
23801
+ resolve5({ success: false, errors: ["Empty command"] });
23802
+ return;
23803
+ }
23804
+ logger.debug(`Running ${name}`, { command });
23805
+ const proc = spawn(cmd, args2, {
23806
+ cwd: this.options.cwd,
23807
+ shell: true,
23808
+ stdio: ["ignore", "pipe", "pipe"]
23809
+ });
23810
+ const timeoutId = setTimeout(() => {
23811
+ proc.kill("SIGTERM");
23812
+ errors.push(`${name} timed out after ${this.options.timeout}ms`);
23813
+ }, this.options.timeout);
23814
+ if (timeoutId.unref) {
23815
+ timeoutId.unref();
23816
+ }
23817
+ proc.stderr?.on("data", (data) => {
23818
+ stderr += data.toString();
23819
+ });
23820
+ proc.on("close", (code) => {
23821
+ clearTimeout(timeoutId);
23822
+ if (code === 0) {
23823
+ resolve5({ success: true, errors: [] });
23824
+ } else {
23825
+ const errorLines = stderr.split("\n").filter((line) => line.includes("error") || line.includes("Error") || line.includes("FAIL")).slice(0, 10);
23826
+ resolve5({
23827
+ success: false,
23828
+ errors: errorLines.length > 0 ? errorLines : [`${name} failed with exit code ${code}`]
23829
+ });
23830
+ }
23831
+ });
23832
+ proc.on("error", (err) => {
23833
+ clearTimeout(timeoutId);
23834
+ resolve5({
23835
+ success: false,
23836
+ errors: [err.message]
23837
+ });
23838
+ });
23839
+ });
23840
+ }
23841
+ /**
23842
+ * Quick verification (typecheck only)
23843
+ */
23844
+ async quickVerify(finding) {
23845
+ const result = await this.runTypecheck();
23846
+ return result.success;
23847
+ }
23848
+ /**
23849
+ * Full verification (all gates)
23850
+ */
23851
+ async fullVerify(finding, affectedFiles) {
23852
+ return this.verify(finding, affectedFiles);
23853
+ }
23854
+ };
23855
+
23856
+ // src/core/bugfix/bugfix-controller.ts
23857
+ var BugfixController = class {
23858
+ config;
23859
+ rootDir;
23860
+ state = "IDLE";
23861
+ sessionId;
23862
+ startTime = 0;
23863
+ totalTokens = 0;
23864
+ // Components
23865
+ detector;
23866
+ fixer;
23867
+ verifier;
23868
+ // Session data
23869
+ findings = [];
23870
+ attempts = [];
23871
+ currentBugIndex = 0;
23872
+ retryCount = /* @__PURE__ */ new Map();
23873
+ // Callbacks
23874
+ onProgress;
23875
+ onBugFound;
23876
+ onFixApplied;
23877
+ onVerification;
23878
+ constructor(options = {}) {
23879
+ this.config = createDefaultBugfixConfig(options.config);
23880
+ this.rootDir = options.rootDir || process.cwd();
23881
+ this.sessionId = randomUUID();
23882
+ this.detector = new BugDetector(this.config);
23883
+ this.fixer = new BugFixer();
23884
+ this.verifier = new VerificationGate({
23885
+ typecheck: this.config.requireTypecheck,
23886
+ tests: this.config.requireTests,
23887
+ cwd: this.rootDir
23888
+ });
23889
+ this.onProgress = options.onProgress;
23890
+ this.onBugFound = options.onBugFound;
23891
+ this.onFixApplied = options.onFixApplied;
23892
+ this.onVerification = options.onVerification;
23893
+ logger.debug("BugfixController initialized", {
23894
+ sessionId: this.sessionId,
23895
+ rootDir: this.rootDir,
23896
+ config: this.config
23897
+ });
23898
+ }
23899
+ /**
23900
+ * Execute autonomous bugfix workflow
23901
+ *
23902
+ * @returns Bugfix session result
23903
+ */
23904
+ async execute() {
23905
+ this.startTime = Date.now();
23906
+ this.state = "SCANNING";
23907
+ logger.info("Starting bugfix session", {
23908
+ sessionId: this.sessionId,
23909
+ rootDir: this.rootDir,
23910
+ maxBugs: this.config.maxBugs,
23911
+ dryRun: this.config.dryRun
23912
+ });
23913
+ this.emitProgress("Starting bug scan...");
23914
+ try {
23915
+ while (this.shouldContinue()) {
23916
+ const currentState = this.state;
23917
+ if (currentState === "IDLE") {
23918
+ this.state = "SCANNING";
23919
+ } else if (currentState === "SCANNING") {
23920
+ await this.handleScanning();
23921
+ } else if (currentState === "ANALYZING") {
23922
+ await this.handleAnalyzing();
23923
+ } else if (currentState === "PLANNING") {
23924
+ await this.handlePlanning();
23925
+ } else if (currentState === "FIXING") {
23926
+ await this.handleFixing();
23927
+ } else if (currentState === "VERIFYING") {
23928
+ await this.handleVerifying();
23929
+ } else if (currentState === "LEARNING") {
23930
+ await this.handleLearning();
23931
+ } else if (currentState === "ITERATING") {
23932
+ await this.handleIterating();
23933
+ } else if (currentState === "COMPLETE" || currentState === "FAILED") {
23934
+ break;
23935
+ }
23936
+ }
23937
+ return this.buildResult();
23938
+ } catch (error) {
23939
+ this.state = "FAILED";
23940
+ logger.error("Bugfix session failed", {
23941
+ sessionId: this.sessionId,
23942
+ error: error.message,
23943
+ state: this.state
23944
+ });
23945
+ return this.buildResult(error.message);
23946
+ }
23947
+ }
23948
+ /**
23949
+ * Check if execution should continue
23950
+ */
23951
+ shouldContinue() {
23952
+ if (this.state === "COMPLETE" || this.state === "FAILED") {
23953
+ return false;
23954
+ }
23955
+ const elapsedMinutes = (Date.now() - this.startTime) / 1e3 / 60;
23956
+ if (elapsedMinutes >= this.config.maxDurationMinutes) {
23957
+ logger.warn("Time limit exceeded", {
23958
+ elapsed: elapsedMinutes,
23959
+ limit: this.config.maxDurationMinutes
23960
+ });
23961
+ this.state = "COMPLETE";
23962
+ return false;
23963
+ }
23964
+ if (this.totalTokens >= this.config.maxTokens) {
23965
+ logger.warn("Token limit exceeded", {
23966
+ tokens: this.totalTokens,
23967
+ limit: this.config.maxTokens
23968
+ });
23969
+ this.state = "COMPLETE";
23970
+ return false;
23971
+ }
23972
+ const fixedCount = this.attempts.filter((a) => a.status === "verified").length;
23973
+ if (fixedCount >= this.config.maxBugs) {
23974
+ logger.info("Max bugs fixed", {
23975
+ fixed: fixedCount,
23976
+ limit: this.config.maxBugs
23977
+ });
23978
+ this.state = "COMPLETE";
23979
+ return false;
23980
+ }
23981
+ return true;
23982
+ }
23983
+ /**
23984
+ * Handle SCANNING state
23985
+ */
23986
+ async handleScanning() {
23987
+ this.emitProgress("Scanning for bugs...");
23988
+ this.findings = await this.detector.scan(this.rootDir);
23989
+ if (this.findings.length === 0) {
23990
+ this.emitProgress("No bugs found!");
23991
+ this.state = "COMPLETE";
23992
+ return;
23993
+ }
23994
+ this.emitProgress(`Found ${this.findings.length} bugs`);
23995
+ for (const finding of this.findings) {
23996
+ this.onBugFound?.(finding);
23997
+ }
23998
+ this.state = "ANALYZING";
23999
+ }
24000
+ /**
24001
+ * Handle ANALYZING state
24002
+ */
24003
+ async handleAnalyzing() {
24004
+ this.emitProgress("Analyzing bugs...");
24005
+ this.findings = this.findings.filter((f) => f.confidence >= this.config.minConfidence);
24006
+ if (this.findings.length === 0) {
24007
+ this.emitProgress("No bugs above confidence threshold");
24008
+ this.state = "COMPLETE";
24009
+ return;
24010
+ }
24011
+ this.emitProgress(`${this.findings.length} bugs to fix`);
24012
+ this.currentBugIndex = 0;
24013
+ this.state = "PLANNING";
24014
+ }
24015
+ /**
24016
+ * Handle PLANNING state
24017
+ */
24018
+ async handlePlanning() {
24019
+ const finding = this.findings[this.currentBugIndex];
24020
+ if (!finding) {
24021
+ this.state = "COMPLETE";
24022
+ return;
24023
+ }
24024
+ this.emitProgress(`Planning fix for bug ${this.currentBugIndex + 1}/${this.findings.length}`, {
24025
+ file: finding.file,
24026
+ type: finding.type,
24027
+ severity: finding.severity
24028
+ });
24029
+ if (!finding.fixStrategy) {
24030
+ logger.info("Bug requires manual review", {
24031
+ bugId: finding.id,
24032
+ type: finding.type
24033
+ });
24034
+ const skippedAttempt = {
24035
+ id: randomUUID(),
24036
+ bugId: finding.id,
24037
+ attemptNumber: 1,
24038
+ strategy: "manual_review",
24039
+ diff: "",
24040
+ status: "skipped",
24041
+ error: "No automatic fix available",
24042
+ attemptedAt: (/* @__PURE__ */ new Date()).toISOString(),
24043
+ durationMs: 0
24044
+ };
24045
+ this.attempts.push(skippedAttempt);
24046
+ this.currentBugIndex++;
24047
+ if (this.currentBugIndex >= this.findings.length) {
24048
+ this.state = "COMPLETE";
24049
+ }
24050
+ return;
24051
+ }
24052
+ this.state = "FIXING";
24053
+ }
24054
+ /**
24055
+ * Handle FIXING state
24056
+ */
24057
+ async handleFixing() {
24058
+ const finding = this.findings[this.currentBugIndex];
24059
+ if (!finding) {
24060
+ this.state = "COMPLETE";
24061
+ return;
24062
+ }
24063
+ this.emitProgress(`Fixing: ${finding.file}:${finding.lineStart}`, {
24064
+ type: finding.type,
24065
+ strategy: finding.fixStrategy
24066
+ });
24067
+ const attempt = await this.fixer.applyFix(finding, this.rootDir, this.config.dryRun);
24068
+ this.attempts.push(attempt);
24069
+ this.onFixApplied?.(finding, attempt);
24070
+ if (attempt.status === "applied") {
24071
+ this.state = "VERIFYING";
24072
+ } else if (attempt.status === "skipped") {
24073
+ this.emitProgress(`Skipped: ${attempt.error || "No changes needed"}`);
24074
+ this.currentBugIndex++;
24075
+ this.state = this.currentBugIndex >= this.findings.length ? "COMPLETE" : "PLANNING";
24076
+ } else {
24077
+ this.state = "ITERATING";
24078
+ }
24079
+ }
24080
+ /**
24081
+ * Handle VERIFYING state
24082
+ */
24083
+ async handleVerifying() {
24084
+ const finding = this.findings[this.currentBugIndex];
24085
+ const lastAttempt = this.attempts[this.attempts.length - 1];
24086
+ if (!finding || !lastAttempt) {
24087
+ this.state = "COMPLETE";
24088
+ return;
24089
+ }
24090
+ this.emitProgress(`Verifying fix for ${finding.file}...`);
24091
+ if (!this.config.dryRun) {
24092
+ const result = await this.verifier.verify(finding, [finding.file]);
24093
+ lastAttempt.verificationResult = result;
24094
+ this.onVerification?.(finding, result.success);
24095
+ if (result.success) {
24096
+ lastAttempt.status = "verified";
24097
+ this.emitProgress("Fix verified!", {
24098
+ typecheck: result.typecheckPassed,
24099
+ tests: result.testsPassed
24100
+ });
24101
+ this.state = "LEARNING";
24102
+ } else {
24103
+ lastAttempt.status = "failed";
24104
+ lastAttempt.error = result.newErrors.join("; ") || "Verification failed";
24105
+ this.emitProgress("Verification failed, rolling back...", {
24106
+ errors: result.newErrors
24107
+ });
24108
+ await this.fixer.rollback(finding.file);
24109
+ this.state = "ITERATING";
24110
+ }
24111
+ } else {
24112
+ lastAttempt.status = "verified";
24113
+ this.emitProgress("Dry run - skipping verification");
24114
+ this.state = "LEARNING";
24115
+ }
24116
+ }
24117
+ /**
24118
+ * Handle LEARNING state
24119
+ */
24120
+ async handleLearning() {
24121
+ const finding = this.findings[this.currentBugIndex];
24122
+ if (!finding) {
24123
+ this.state = "COMPLETE";
24124
+ return;
24125
+ }
24126
+ this.emitProgress("Storing pattern to knowledge base...");
24127
+ logger.info("Pattern learned", {
24128
+ bugId: finding.id,
24129
+ type: finding.type,
24130
+ file: finding.file
24131
+ });
24132
+ this.currentBugIndex++;
24133
+ if (this.currentBugIndex >= this.findings.length) {
24134
+ this.state = "COMPLETE";
24135
+ } else {
24136
+ this.state = "PLANNING";
24137
+ }
24138
+ }
24139
+ /**
24140
+ * Handle ITERATING state (retry logic)
24141
+ */
24142
+ async handleIterating() {
24143
+ const finding = this.findings[this.currentBugIndex];
24144
+ if (!finding) {
24145
+ this.state = "COMPLETE";
24146
+ return;
24147
+ }
24148
+ const currentRetries = this.retryCount.get(finding.id) || 0;
24149
+ if (currentRetries >= this.config.maxRetriesPerBug) {
24150
+ this.emitProgress(`Max retries reached for ${finding.file}`, {
24151
+ retries: currentRetries
24152
+ });
24153
+ this.currentBugIndex++;
24154
+ if (this.currentBugIndex >= this.findings.length) {
24155
+ this.state = "COMPLETE";
24156
+ } else {
24157
+ this.state = "PLANNING";
24158
+ }
24159
+ return;
24160
+ }
24161
+ this.retryCount.set(finding.id, currentRetries + 1);
24162
+ this.emitProgress(`Retrying fix (attempt ${currentRetries + 2})...`);
24163
+ this.state = "FIXING";
24164
+ }
24165
+ /**
24166
+ * Build final result
24167
+ */
24168
+ buildResult(error) {
24169
+ const stats = this.calculateStats();
24170
+ return {
24171
+ sessionId: this.sessionId,
24172
+ startedAt: new Date(this.startTime).toISOString(),
24173
+ endedAt: (/* @__PURE__ */ new Date()).toISOString(),
24174
+ config: this.config,
24175
+ findings: this.findings,
24176
+ attempts: this.attempts,
24177
+ stats,
24178
+ finalState: this.state,
24179
+ error
24180
+ };
24181
+ }
24182
+ /**
24183
+ * Calculate session statistics
24184
+ */
24185
+ calculateStats() {
24186
+ const verified = this.attempts.filter((a) => a.status === "verified").length;
24187
+ const failed = this.attempts.filter((a) => a.status === "failed").length;
24188
+ const skipped = this.attempts.filter((a) => a.status === "skipped").length;
24189
+ const bugsByType = {
24190
+ timer_leak: 0,
24191
+ missing_destroy: 0,
24192
+ promise_timeout_leak: 0,
24193
+ event_leak: 0,
24194
+ resource_leak: 0,
24195
+ race_condition: 0,
24196
+ memory_leak: 0,
24197
+ uncaught_promise: 0,
24198
+ deprecated_api: 0,
24199
+ security_issue: 0,
24200
+ type_error: 0,
24201
+ test_failure: 0,
24202
+ custom: 0
24203
+ };
24204
+ for (const finding of this.findings) {
24205
+ bugsByType[finding.type] = (bugsByType[finding.type] || 0) + 1;
24206
+ }
24207
+ const bugsBySeverity = {
24208
+ low: 0,
24209
+ medium: 0,
24210
+ high: 0,
24211
+ critical: 0
24212
+ };
24213
+ for (const finding of this.findings) {
24214
+ bugsBySeverity[finding.severity] = (bugsBySeverity[finding.severity] || 0) + 1;
24215
+ }
24216
+ let stopReason = "complete";
24217
+ const elapsedMinutes = (Date.now() - this.startTime) / 1e3 / 60;
24218
+ if (this.state === "FAILED") {
24219
+ stopReason = "error";
24220
+ } else if (verified >= this.config.maxBugs) {
24221
+ stopReason = "max_bugs";
24222
+ } else if (elapsedMinutes >= this.config.maxDurationMinutes) {
24223
+ stopReason = "max_time";
24224
+ } else if (this.totalTokens >= this.config.maxTokens) {
24225
+ stopReason = "max_tokens";
24226
+ }
24227
+ return {
24228
+ bugsFound: this.findings.length,
24229
+ bugsFixed: verified,
24230
+ bugsFailed: failed,
24231
+ bugsSkipped: skipped,
24232
+ totalAttempts: this.attempts.length,
24233
+ successRate: this.attempts.length > 0 ? verified / this.attempts.length : 0,
24234
+ totalDurationMs: Date.now() - this.startTime,
24235
+ totalTokens: this.totalTokens,
24236
+ patternsLearned: verified,
24237
+ // Each verified fix is a learned pattern
24238
+ regressions: 0,
24239
+ // Should always be 0 with verification
24240
+ stopReason,
24241
+ bugsByType,
24242
+ bugsBySeverity
24243
+ };
24244
+ }
24245
+ /**
24246
+ * Emit progress update
24247
+ */
24248
+ emitProgress(message, data) {
24249
+ logger.info(message, { sessionId: this.sessionId, ...data });
24250
+ this.onProgress?.(message, data);
24251
+ }
24252
+ /**
24253
+ * Get current state
24254
+ */
24255
+ getState() {
24256
+ return this.state;
24257
+ }
24258
+ /**
24259
+ * Get current statistics
24260
+ */
24261
+ getStats() {
24262
+ return this.calculateStats();
24263
+ }
24264
+ /**
24265
+ * Stop execution
24266
+ */
24267
+ async stop() {
24268
+ logger.info("Stopping bugfix session", { sessionId: this.sessionId });
24269
+ this.state = "COMPLETE";
24270
+ await this.fixer.cleanupBackups();
24271
+ }
24272
+ };
24273
+
24274
+ // src/mcp/tools/bugfix-run.ts
24275
+ function createBugfixRunHandler() {
24276
+ return async (input) => {
24277
+ logger.info("[MCP] bugfix_run called", {
24278
+ path: input.path,
24279
+ maxBugs: input.maxBugs,
24280
+ dryRun: input.dryRun
24281
+ });
24282
+ const progressToken = sendMcpProgressBegin("Bugfix", "Starting bug scan...");
24283
+ try {
24284
+ const controller = new BugfixController({
24285
+ rootDir: input.path || process.cwd(),
24286
+ config: {
24287
+ bugTypes: input.types,
24288
+ maxBugs: input.maxBugs || 10,
24289
+ dryRun: input.dryRun || false,
24290
+ excludePatterns: input.excludePatterns,
24291
+ requireTypecheck: input.requireTypecheck ?? true,
24292
+ requireTests: input.requireTests ?? false
24293
+ },
24294
+ // v12.5.5: Stream brief progress updates to MCP client
24295
+ onProgress: (message) => {
24296
+ sendMcpProgress(message, progressToken);
24297
+ },
24298
+ onBugFound: (finding) => {
24299
+ sendMcpProgress(`Found: ${finding.type} in ${finding.file}`, progressToken);
24300
+ },
24301
+ onFixApplied: (finding, attempt) => {
24302
+ const status = attempt.status === "applied" ? "\u2713" : "\u25CB";
24303
+ sendMcpProgress(`${status} Fix applied: ${finding.file}:${finding.lineStart}`, progressToken);
24304
+ }
24305
+ });
24306
+ const result = await controller.execute();
24307
+ const fixed = result.attempts.filter((a) => a.status === "verified").map((a) => {
24308
+ const finding = result.findings.find((f) => f.id === a.bugId);
24309
+ return {
24310
+ file: finding?.file || "unknown",
24311
+ line: finding?.lineStart || 0,
24312
+ type: finding?.type || "custom",
24313
+ message: finding?.message || "Fixed"
24314
+ };
24315
+ });
24316
+ const output = {
24317
+ sessionId: result.sessionId,
24318
+ bugsFound: result.stats.bugsFound,
24319
+ bugsFixed: result.stats.bugsFixed,
24320
+ bugsFailed: result.stats.bugsFailed,
24321
+ bugsSkipped: result.stats.bugsSkipped,
24322
+ successRate: result.stats.successRate,
24323
+ durationMs: result.stats.totalDurationMs,
24324
+ finalState: result.finalState,
24325
+ bySeverity: result.stats.bugsBySeverity,
24326
+ fixed,
24327
+ error: result.error
24328
+ };
24329
+ logger.info("[MCP] bugfix_run completed", {
24330
+ sessionId: output.sessionId,
24331
+ bugsFixed: output.bugsFixed,
24332
+ durationMs: output.durationMs
24333
+ });
24334
+ sendMcpProgressEnd(progressToken, `Completed: ${output.bugsFixed} bugs fixed`);
24335
+ return output;
24336
+ } catch (error) {
24337
+ logger.error("[MCP] bugfix_run failed", { error });
24338
+ sendMcpProgressEnd(progressToken, `Error: ${error.message}`);
24339
+ throw new Error(`Bugfix run failed: ${error.message}`);
24340
+ }
24341
+ };
24342
+ }
24343
+ var bugfixRunSchema = {
24344
+ name: "bugfix_run",
24345
+ description: `Run autonomous bug-fixing workflow.
24346
+
24347
+ **Time limit**: Max 45 minutes. You can stop anytime.
24348
+ **Progress**: Streamed notifications show what's happening.
24349
+
24350
+ Workflow:
24351
+ 1. Scans codebase for bugs
24352
+ 2. Prioritizes by severity
24353
+ 3. Applies fixes with backups
24354
+ 4. Verifies via typecheck/tests
24355
+ 5. Rolls back failed fixes
24356
+
24357
+ Safety: Backups created, dry-run available, stops at limits.`,
24358
+ inputSchema: {
24359
+ type: "object",
24360
+ properties: {
24361
+ path: {
24362
+ type: "string",
24363
+ description: "Directory to scan (default: current directory)"
24364
+ },
24365
+ types: {
24366
+ type: "array",
24367
+ items: {
24368
+ type: "string",
24369
+ enum: ["timer_leak", "missing_destroy", "promise_timeout_leak", "event_leak", "resource_leak", "race_condition", "memory_leak", "uncaught_promise", "deprecated_api", "security_issue", "type_error", "test_failure", "custom"]
24370
+ },
24371
+ description: "Bug types to fix (default: all)"
24372
+ },
24373
+ maxBugs: {
24374
+ type: "integer",
24375
+ minimum: 1,
24376
+ maximum: 100,
24377
+ description: "Maximum bugs to fix (default: 10)"
24378
+ },
24379
+ dryRun: {
24380
+ type: "boolean",
24381
+ description: "Preview fixes without applying (default: false)"
24382
+ },
24383
+ includePatterns: {
24384
+ type: "array",
24385
+ items: { type: "string" },
24386
+ description: "File patterns to include"
24387
+ },
24388
+ excludePatterns: {
24389
+ type: "array",
24390
+ items: { type: "string" },
24391
+ description: "File patterns to exclude"
24392
+ },
24393
+ requireTypecheck: {
24394
+ type: "boolean",
24395
+ description: "Require typecheck after each fix (default: true)"
24396
+ },
24397
+ requireTests: {
24398
+ type: "boolean",
24399
+ description: "Require tests to pass after fixes (default: false)"
24400
+ }
24401
+ }
24402
+ }
24403
+ };
24404
+
24405
+ // src/mcp/tools/get-capabilities.ts
24406
+ init_esm_shims();
24407
+ init_logger();
24408
+ function categorizeTools(name) {
24409
+ if (name.startsWith("memory_") || name === "search_memory") return "memory";
24410
+ if (name.startsWith("session_")) return "session";
24411
+ if (name.startsWith("create_task") || name.startsWith("run_task") || name.startsWith("get_task") || name.startsWith("list_task") || name.startsWith("delete_task")) return "task";
24412
+ if (name === "get_capabilities" || name === "list_agents" || name === "get_status" || name === "get_agent_context") return "discovery";
24413
+ if (name.includes("context")) return "context";
24414
+ return "execution";
24415
+ }
24416
+ function getExecutionMode(providerName, _providerConfig) {
24417
+ if (providerName === "glm" || providerName === "grok") {
24418
+ return "sdk";
24419
+ }
24420
+ if (providerName === "claude-code" || providerName === "gemini-cli") {
24421
+ return "cli";
24422
+ }
24423
+ if (providerName === "openai") {
24424
+ return "hybrid";
24425
+ }
24426
+ return "cli";
24427
+ }
24428
+ function getProviderType(providerName) {
24429
+ if (providerName === "glm" || providerName === "grok") return "sdk";
24430
+ if (providerName === "openai") return "hybrid";
24431
+ return "cli";
24432
+ }
24433
+ function createGetCapabilitiesHandler(deps) {
24434
+ return async () => {
24435
+ logger.info("[MCP] get_capabilities called");
24436
+ try {
24437
+ const projectDir = process.cwd();
24438
+ const config = await loadConfig(projectDir);
24439
+ const version = getVersion();
24440
+ const providers = [];
24441
+ const providerConfigs = config.providers || {};
24442
+ for (const [name, providerConfig] of Object.entries(providerConfigs)) {
24443
+ const cfg = providerConfig;
24444
+ const enabled = cfg.enabled === true;
24445
+ let available = false;
24446
+ try {
24447
+ const availableProviders = await deps.router.getAvailableProviders();
24448
+ available = availableProviders.some((p) => p.name === name);
24449
+ } catch {
24450
+ available = enabled;
24451
+ }
24452
+ providers.push({
24453
+ name,
24454
+ enabled,
24455
+ available,
24456
+ type: getProviderType(name),
24457
+ executionMode: getExecutionMode(name, cfg),
24458
+ priority: cfg.priority || 0,
24459
+ model: cfg.model
24460
+ });
24461
+ }
24462
+ providers.sort((a, b) => b.priority - a.priority);
24463
+ const agentNames = await deps.profileLoader.listProfiles();
24464
+ const agentResults = await Promise.all(
24465
+ agentNames.map(async (agentName) => {
24466
+ try {
24467
+ const profile = await deps.profileLoader.loadProfile(agentName);
24468
+ return {
24469
+ name: profile.name,
24470
+ displayName: profile.displayName,
24471
+ role: profile.role,
24472
+ description: profile.systemPrompt?.substring(0, 200),
24473
+ team: profile.team,
24474
+ abilities: profile.abilities || []
24475
+ };
24476
+ } catch (error) {
24477
+ logger.warn(`Failed to load profile for ${agentName}`, { error });
24478
+ return null;
24479
+ }
24480
+ })
24481
+ );
24482
+ const agents = agentResults.filter((a) => a !== null);
24483
+ const tools = deps.toolSchemas.map((schema) => ({
24484
+ name: schema.name,
24485
+ description: schema.description,
24486
+ category: categorizeTools(schema.name)
24487
+ }));
24488
+ const [memoryStats, activeSessions] = await Promise.all([
24489
+ deps.memoryManager.getStats(),
24490
+ deps.sessionManager.getActiveSessions()
24491
+ ]);
24492
+ const result = {
24493
+ version,
24494
+ providers,
24495
+ agents,
24496
+ tools,
24497
+ memory: {
24498
+ enabled: true,
24499
+ entryCount: memoryStats.totalEntries,
24500
+ maxEntries: config.memory?.maxEntries || 1e4
24501
+ },
24502
+ sessions: {
24503
+ enabled: true,
24504
+ activeCount: activeSessions.length,
24505
+ maxSessions: config.orchestration?.session?.maxSessions || 100
24506
+ },
24507
+ features: {
24508
+ smartRouting: true,
24509
+ // Always enabled in v13.0.0
24510
+ memorySearch: true,
24511
+ multiAgentSessions: true,
24512
+ streamingNotifications: false
24513
+ // Configured via MCP server options
24514
+ }
24515
+ };
24516
+ logger.info("[MCP] get_capabilities completed", {
24517
+ version,
24518
+ providersCount: providers.length,
24519
+ agentsCount: agents.length,
24520
+ toolsCount: tools.length
24521
+ });
24522
+ return result;
24523
+ } catch (error) {
24524
+ logger.error("[MCP] get_capabilities failed", { error });
24525
+ throw new Error(`Capabilities check failed: ${error.message}`);
24526
+ }
24527
+ };
24528
+ }
24529
+
24530
+ // src/mcp/tools/task/index.ts
24531
+ init_esm_shims();
24532
+
24533
+ // src/mcp/tools/task/create-task.ts
24534
+ init_esm_shims();
24535
+
24536
+ // src/core/task-engine/index.ts
24537
+ init_esm_shims();
24538
+
24539
+ // src/core/task-engine/types.ts
24540
+ init_esm_shims();
24541
+ var TaskEngineError = class _TaskEngineError extends Error {
24542
+ constructor(message, code, details) {
24543
+ super(message);
24544
+ this.code = code;
24545
+ this.details = details;
24546
+ this.name = "TaskEngineError";
24547
+ Error.captureStackTrace?.(this, _TaskEngineError);
22935
24548
  }
22936
24549
  };
22937
24550
  var LoopPreventionError = class extends TaskEngineError {
@@ -24639,6 +26252,12 @@ var TaskEngine = class extends EventEmitter {
24639
26252
  signal?.addEventListener("abort", abortHandler, { once: true });
24640
26253
  });
24641
26254
  }
26255
+ /**
26256
+ * Clean up resources and remove all event listeners.
26257
+ */
26258
+ destroy() {
26259
+ this.removeAllListeners();
26260
+ }
24642
26261
  };
24643
26262
  var defaultTaskEngine = null;
24644
26263
  function getTaskEngine(config) {
@@ -24743,14 +26362,33 @@ function mapNormalizedProviderToOriginClient(provider) {
24743
26362
  }
24744
26363
  var createTaskSchema = {
24745
26364
  name: "create_task",
24746
- description: "Create a new task with payload for deferred execution. Returns task_id for later execution via run_task.",
26365
+ description: `Create a new task with payload for deferred execution. Returns task_id for later execution via run_task.
26366
+
26367
+ **When to use**: For tasks that benefit from decoupled creation and execution:
26368
+ - Queue work for later processing
26369
+ - Create multiple tasks in batch, then execute selectively
26370
+ - Store task definitions for retries on failure
26371
+
26372
+ **Task types** optimize routing:
26373
+ - web_search: Internet searches, fact-checking (uses Gemini for free tier)
26374
+ - code_review: Code analysis, style checks (uses Claude for accuracy)
26375
+ - code_generation: Write new code (uses best available coding model)
26376
+ - analysis: Data analysis, summarization (general purpose)
26377
+ - custom: User-defined tasks
26378
+
26379
+ **Workflow**:
26380
+ 1. create_task({ type: "code_review", payload: { files: ["src/api.ts"] } })
26381
+ 2. ... later ...
26382
+ 3. run_task({ task_id: "abc-123" })
26383
+
26384
+ **Returns**: task_id, estimated_engine, expires_at, compression info`,
24747
26385
  inputSchema: {
24748
26386
  type: "object",
24749
26387
  properties: {
24750
26388
  type: {
24751
26389
  type: "string",
24752
26390
  enum: ["web_search", "code_review", "code_generation", "analysis", "custom"],
24753
- description: "Task type for routing optimization"
26391
+ description: "Task type: web_search, code_review, code_generation, analysis, or custom"
24754
26392
  },
24755
26393
  payload: {
24756
26394
  type: "object",
@@ -24842,7 +26480,23 @@ function createRunTaskHandler(deps) {
24842
26480
  }
24843
26481
  var runTaskSchema = {
24844
26482
  name: "run_task",
24845
- description: "Execute a previously created task and return results. Blocks until completion or timeout.",
26483
+ description: `Execute a previously created task and return results. Blocks until completion or timeout.
26484
+
26485
+ **When to use**: Execute a task created with create_task.
26486
+
26487
+ **Features**:
26488
+ - Caching: Identical tasks return cached results (use skip_cache to bypass)
26489
+ - Engine override: Force execution on specific provider
26490
+ - Timeout control: Adjust for long-running tasks
26491
+
26492
+ **Returns**:
26493
+ - status: "completed" or "failed"
26494
+ - result: Task output (null if failed)
26495
+ - engine: Which AI provider executed the task
26496
+ - metrics: Duration, token counts
26497
+ - cache_hit: Whether result came from cache
26498
+
26499
+ **Example**: run_task({ task_id: "abc-123", timeout_ms: 60000 })`,
24846
26500
  inputSchema: {
24847
26501
  type: "object",
24848
26502
  properties: {
@@ -24860,7 +26514,7 @@ var runTaskSchema = {
24860
26514
  minimum: 1e3,
24861
26515
  maximum: 3e5,
24862
26516
  default: 3e4,
24863
- description: "Custom timeout in milliseconds"
26517
+ description: "Custom timeout in milliseconds (default: 30000)"
24864
26518
  },
24865
26519
  skip_cache: {
24866
26520
  type: "boolean",
@@ -24922,7 +26576,18 @@ function createGetTaskResultHandler() {
24922
26576
  }
24923
26577
  var getTaskResultSchema = {
24924
26578
  name: "get_task_result",
24925
- description: "Retrieve the result of a task. Does not execute - use run_task for execution.",
26579
+ description: `Retrieve the result of a task. Does not execute - use run_task for execution.
26580
+
26581
+ **When to use**: Check status or get results of a previously created/executed task.
26582
+
26583
+ **Returns**:
26584
+ - status: pending, running, completed, failed, or expired
26585
+ - result: Output data (null if not completed)
26586
+ - engine: Which provider executed the task
26587
+ - created_at, completed_at, expires_at: Timestamps
26588
+ - error: Details if failed
26589
+
26590
+ **Example**: get_task_result({ task_id: "abc-123", include_payload: true })`,
24926
26591
  inputSchema: {
24927
26592
  type: "object",
24928
26593
  properties: {
@@ -24997,7 +26662,24 @@ function createListTasksHandler() {
24997
26662
  }
24998
26663
  var listTasksSchema = {
24999
26664
  name: "list_tasks",
25000
- description: "List tasks with optional filtering. Supports pagination.",
26665
+ description: `List tasks with optional filtering. Supports pagination.
26666
+
26667
+ **When to use**: Browse task queue, find tasks to execute, or monitor task status.
26668
+
26669
+ **Filters**:
26670
+ - status: pending, running, completed, failed, expired
26671
+ - type: web_search, code_review, code_generation, analysis, custom
26672
+ - engine: gemini, claude, codex, glm, grok
26673
+
26674
+ **Returns**: Array of task summaries with pagination info:
26675
+ - tasks: [{task_id, type, status, engine, priority, created_at, expires_at, has_result}]
26676
+ - total: Total matching tasks
26677
+ - has_more: Whether more pages exist
26678
+
26679
+ **Examples**:
26680
+ - list_tasks({}) - List all tasks
26681
+ - list_tasks({ status: "pending" }) - Find pending tasks
26682
+ - list_tasks({ type: "code_review", limit: 50 }) - List code reviews`,
25001
26683
  inputSchema: {
25002
26684
  type: "object",
25003
26685
  properties: {
@@ -25086,7 +26768,20 @@ function createDeleteTaskHandler() {
25086
26768
  }
25087
26769
  var deleteTaskSchema = {
25088
26770
  name: "delete_task",
25089
- description: "Delete a task and its associated data. Cannot delete running tasks unless force=true.",
26771
+ description: `Delete a task and its associated data. Cannot delete running tasks unless force=true.
26772
+
26773
+ **When to use**: Clean up completed tasks, remove failed tasks, or cancel pending tasks.
26774
+
26775
+ **Safety**: Running tasks are protected - use force=true to delete anyway.
26776
+
26777
+ **Returns**:
26778
+ - deleted: Whether deletion succeeded
26779
+ - previous_status: Status before deletion
26780
+ - message: Human-readable result
26781
+
26782
+ **Examples**:
26783
+ - delete_task({ task_id: "abc-123" }) - Delete completed task
26784
+ - delete_task({ task_id: "abc-123", force: true }) - Force delete running task`,
25090
26785
  inputSchema: {
25091
26786
  type: "object",
25092
26787
  properties: {
@@ -25476,6 +27171,12 @@ var McpClient = class _McpClient extends EventEmitter {
25476
27171
  this.process = null;
25477
27172
  }
25478
27173
  }
27174
+ /**
27175
+ * Clean up resources and remove all event listeners.
27176
+ */
27177
+ destroy() {
27178
+ this.removeAllListeners();
27179
+ }
25479
27180
  };
25480
27181
  var MCP_DEFAULTS = {
25481
27182
  timeout: 6e4,
@@ -25910,6 +27611,12 @@ var McpClientPool = class extends EventEmitter {
25910
27611
  this.emit(type, event);
25911
27612
  this.emit("event", event);
25912
27613
  }
27614
+ /**
27615
+ * Clean up resources and remove all event listeners.
27616
+ */
27617
+ destroy() {
27618
+ this.removeAllListeners();
27619
+ }
25913
27620
  };
25914
27621
  var globalPool = null;
25915
27622
  function getGlobalPool(config) {
@@ -26626,6 +28333,9 @@ var McpServer = class _McpServer {
26626
28333
  * Get static tool schemas (no initialization required)
26627
28334
  * Returns tool schemas that can be provided during MCP handshake
26628
28335
  * before services are initialized.
28336
+ *
28337
+ * v12.5.5: Enhanced descriptions with examples, return formats, and use cases
28338
+ * to improve AI client understanding and tool selection.
26629
28339
  */
26630
28340
  static getStaticToolSchemas() {
26631
28341
  return [
@@ -26633,37 +28343,110 @@ var McpServer = class _McpServer {
26633
28343
  name: "run_agent",
26634
28344
  description: `Execute an AutomatosX agent with a specific task.
26635
28345
 
26636
- v12.5.1: Agent auto-selection - if agent is omitted, system automatically selects the best agent based on task keywords.
26637
- Uses Smart Routing: returns context for same-provider calls, spawns cross-provider execution.
28346
+ **When to use**: Delegate specialized tasks to expert agents. Each agent has domain expertise:
28347
+ - "backend": API development, database design, server-side logic
28348
+ - "frontend": UI/UX, React components, styling, accessibility
28349
+ - "quality": Testing, debugging, bug detection, code review
28350
+ - "security": Security audits, vulnerability scanning, auth systems
28351
+ - "architecture": System design, tech stack decisions, scalability
28352
+ - "devops": CI/CD, Docker, deployment, infrastructure
28353
+
28354
+ **Auto-selection**: If agent is omitted, system analyzes task keywords and selects the best agent.
26638
28355
 
26639
- Examples:
26640
- - With agent: run_agent({ agent: "backend", task: "implement API" })
26641
- - Auto-select: run_agent({ task: "fix bugs in the codebase" }) \u2192 selects "quality" agent`,
28356
+ **Returns**: JSON with execution result, output from the agent, and metadata.
28357
+
28358
+ **Examples**:
28359
+ - run_agent({ agent: "backend", task: "Create a REST API endpoint for user authentication with JWT" })
28360
+ - run_agent({ agent: "quality", task: "Review the auth module for security issues" })
28361
+ - run_agent({ task: "Fix the memory leak in the connection pool" }) \u2192 auto-selects "quality"
28362
+ - run_agent({ task: "Design a microservices architecture for the payment system" }) \u2192 auto-selects "architecture"`,
26642
28363
  inputSchema: {
26643
28364
  type: "object",
26644
28365
  properties: {
26645
- agent: { type: "string", description: "Optional: Agent name (e.g., backend, quality). If omitted, best agent is auto-selected based on task." },
26646
- task: { type: "string", description: "The task for the agent to perform" },
26647
- provider: { type: "string", description: "Optional: Override the AI provider", enum: ["claude", "gemini", "openai"] },
26648
- no_memory: { type: "boolean", description: "Optional: Skip memory injection", default: false },
26649
- mode: { type: "string", description: "Optional: Execution mode - auto (default), context (always return context), execute (always spawn)", enum: ["auto", "context", "execute"], default: "auto" }
28366
+ agent: { type: "string", description: "Agent name: backend, frontend, quality, security, architecture, devops. If omitted, auto-selected based on task." },
28367
+ task: { type: "string", description: "Detailed task description. Be specific about requirements and expected outcomes." },
28368
+ provider: { type: "string", description: "AI provider override: claude (best coding), gemini (free tier), openai (balanced)", enum: ["claude", "gemini", "openai"] },
28369
+ no_memory: { type: "boolean", description: "Skip injecting relevant past context into agent prompt", default: false },
28370
+ mode: { type: "string", description: "auto: smart routing, context: return prompt without executing, execute: always spawn process", enum: ["auto", "context", "execute"], default: "auto" }
26650
28371
  },
26651
28372
  required: ["task"]
26652
28373
  }
26653
28374
  },
26654
28375
  {
26655
28376
  name: "list_agents",
26656
- description: "List all available AutomatosX agents",
28377
+ description: `List all available AutomatosX agents with their profiles.
28378
+
28379
+ **When to use**: Discover available agents before delegating tasks, or to understand agent capabilities.
28380
+
28381
+ **Returns**: Array of agent profiles, each containing:
28382
+ - name: Agent identifier (e.g., "backend", "quality")
28383
+ - displayName: Human-readable name (e.g., "Queenie")
28384
+ - role: Job title (e.g., "QA Engineer", "Backend Developer")
28385
+ - description: What the agent specializes in
28386
+ - abilities: List of capabilities (e.g., ["testing", "debugging"])
28387
+ - provider: Preferred AI provider
28388
+ - team: Team membership (e.g., "core", "platform")
28389
+
28390
+ **Example response**:
28391
+ [
28392
+ { "name": "backend", "displayName": "Benny", "role": "Backend Developer", "abilities": ["api-design", "database"] },
28393
+ { "name": "quality", "displayName": "Queenie", "role": "QA Engineer", "abilities": ["testing", "debugging"] }
28394
+ ]`,
26657
28395
  inputSchema: { type: "object", properties: {} }
26658
28396
  },
26659
28397
  {
26660
28398
  name: "search_memory",
26661
- description: "Search AutomatosX memory for relevant information",
26662
- inputSchema: { type: "object", properties: { query: { type: "string", description: "Search query" }, limit: { type: "number", description: "Maximum number of results", default: 10 } }, required: ["query"] }
28399
+ description: `Search AutomatosX persistent memory using full-text search.
28400
+
28401
+ **What memory contains**: Past task executions, code snippets, architectural decisions, debugging sessions, and context from previous conversations. Memory persists across sessions.
28402
+
28403
+ **When to use**:
28404
+ - Find previous work on a similar task
28405
+ - Retrieve past decisions or implementations
28406
+ - Check if a bug was fixed before
28407
+ - Get context before starting new work
28408
+
28409
+ **Search tips**:
28410
+ - Use specific keywords: "authentication JWT" rather than "auth"
28411
+ - Include file names: "router.ts error handling"
28412
+ - Search by agent: Results include which agent created the entry
28413
+
28414
+ **Returns**: Array of memory entries with content, timestamp, agent, and relevance score.
28415
+
28416
+ **Examples**:
28417
+ - search_memory({ query: "database migration", limit: 5 })
28418
+ - search_memory({ query: "authentication security audit" })
28419
+ - search_memory({ query: "React component performance optimization" })`,
28420
+ inputSchema: {
28421
+ type: "object",
28422
+ properties: {
28423
+ query: { type: "string", description: "Search terms. Use specific keywords for better results." },
28424
+ limit: { type: "number", description: "Max results to return (default: 10, max: 100)", default: 10 }
28425
+ },
28426
+ required: ["query"]
28427
+ }
26663
28428
  },
26664
28429
  {
26665
28430
  name: "get_status",
26666
- description: "Get AutomatosX system status and configuration",
28431
+ description: `Get AutomatosX system status and health information.
28432
+
28433
+ **When to use**: Check system health, verify configuration, or diagnose issues.
28434
+
28435
+ **Returns**:
28436
+ - version: AutomatosX version
28437
+ - providers: List of configured AI providers and their availability
28438
+ - memory: Stats (entry count, database size, last access)
28439
+ - sessions: Active session count and recent activity
28440
+ - router: Current routing configuration and provider health
28441
+ - uptime: Server uptime and initialization state
28442
+
28443
+ **Example response**:
28444
+ {
28445
+ "version": "12.5.4",
28446
+ "providers": { "claude": "available", "gemini": "available", "openai": "unavailable" },
28447
+ "memory": { "entries": 1523, "sizeMB": 12.5 },
28448
+ "sessions": { "active": 2, "total": 45 }
28449
+ }`,
26667
28450
  inputSchema: { type: "object", properties: {} }
26668
28451
  },
26669
28452
  // v13.0.0: Enhanced Service Discovery
@@ -26684,92 +28467,419 @@ Use this tool first to understand what AutomatosX offers.`,
26684
28467
  },
26685
28468
  {
26686
28469
  name: "session_create",
26687
- description: "Create a new multi-agent session",
26688
- inputSchema: { type: "object", properties: { name: { type: "string", description: "Session name/task description" }, agent: { type: "string", description: "Initiating agent name" } }, required: ["name", "agent"] }
28470
+ description: `Create a new multi-agent collaborative session.
28471
+
28472
+ **What sessions are for**: Sessions track complex tasks that involve multiple agents working together. They provide:
28473
+ - Task state persistence across agent invocations
28474
+ - History of which agents contributed what
28475
+ - Ability to resume interrupted work
28476
+ - Coordination between agents (e.g., backend implements, quality reviews)
28477
+
28478
+ **When to use**:
28479
+ - Multi-step tasks requiring multiple agents
28480
+ - Long-running work that may span multiple conversations
28481
+ - Collaborative workflows (design \u2192 implement \u2192 review \u2192 deploy)
28482
+
28483
+ **Returns**: Session object with unique ID, state, timestamps, and task list.
28484
+
28485
+ **Example workflow**:
28486
+ 1. session_create({ name: "Implement auth system", agent: "architecture" })
28487
+ 2. run_agent({ agent: "backend", task: "Implement JWT auth" })
28488
+ 3. run_agent({ agent: "quality", task: "Review auth implementation" })
28489
+ 4. session_complete({ id: "session-uuid" })`,
28490
+ inputSchema: {
28491
+ type: "object",
28492
+ properties: {
28493
+ name: { type: "string", description: 'Descriptive session name (e.g., "Implement user authentication")' },
28494
+ agent: { type: "string", description: 'Initial agent to assign (e.g., "backend", "architecture")' }
28495
+ },
28496
+ required: ["name", "agent"]
28497
+ }
26689
28498
  },
26690
28499
  {
26691
28500
  name: "session_list",
26692
- description: "List all active sessions",
28501
+ description: `List all active and recent sessions.
28502
+
28503
+ **When to use**: Find existing sessions to resume, check what work is in progress, or audit past work.
28504
+
28505
+ **Returns**: Array of session summaries with:
28506
+ - id: Unique session identifier
28507
+ - name: Session description
28508
+ - state: "active", "completed", or "failed"
28509
+ - createdAt: Session start time
28510
+ - updatedAt: Last activity time
28511
+ - agents: List of agents that participated
28512
+
28513
+ **Example response**:
28514
+ [
28515
+ { "id": "abc-123", "name": "Auth system", "state": "active", "agents": ["backend", "quality"] },
28516
+ { "id": "def-456", "name": "API refactor", "state": "completed", "agents": ["architecture", "backend"] }
28517
+ ]`,
26693
28518
  inputSchema: { type: "object", properties: {} }
26694
28519
  },
26695
28520
  {
26696
28521
  name: "session_status",
26697
- description: "Get detailed status of a specific session",
26698
- inputSchema: { type: "object", properties: { id: { type: "string", description: "Session ID" } }, required: ["id"] }
28522
+ description: `Get detailed status of a specific session including task history.
28523
+
28524
+ **When to use**: Check progress of a session, see what agents have done, or decide next steps.
28525
+
28526
+ **Returns**: Full session details with:
28527
+ - id, name, state, timestamps
28528
+ - tasks: Array of all tasks executed with results
28529
+ - currentAgent: Which agent is active (if any)
28530
+ - metadata: Additional context stored with session
28531
+
28532
+ **Example**: session_status({ id: "abc-123" })`,
28533
+ inputSchema: {
28534
+ type: "object",
28535
+ properties: {
28536
+ id: { type: "string", description: "Session ID from session_create or session_list" }
28537
+ },
28538
+ required: ["id"]
28539
+ }
26699
28540
  },
26700
28541
  {
26701
28542
  name: "session_complete",
26702
- description: "Mark a session as completed",
26703
- inputSchema: { type: "object", properties: { id: { type: "string", description: "Session ID" } }, required: ["id"] }
28543
+ description: `Mark a session as successfully completed.
28544
+
28545
+ **When to use**: After all tasks in a session are done and verified.
28546
+
28547
+ **What it does**:
28548
+ - Updates session state to "completed"
28549
+ - Records completion timestamp
28550
+ - Persists session to history for future reference
28551
+ - Releases any held resources
28552
+
28553
+ **Example**: session_complete({ id: "abc-123" })`,
28554
+ inputSchema: {
28555
+ type: "object",
28556
+ properties: {
28557
+ id: { type: "string", description: "Session ID to mark as completed" }
28558
+ },
28559
+ required: ["id"]
28560
+ }
26704
28561
  },
26705
28562
  {
26706
28563
  name: "session_fail",
26707
- description: "Mark a session as failed with an error reason",
26708
- inputSchema: { type: "object", properties: { id: { type: "string", description: "Session ID" }, reason: { type: "string", description: "Failure reason" } }, required: ["id", "reason"] }
28564
+ description: `Mark a session as failed with an error reason.
28565
+
28566
+ **When to use**: When a session cannot be completed due to errors, blockers, or abandonment.
28567
+
28568
+ **What it does**:
28569
+ - Updates session state to "failed"
28570
+ - Records failure reason for debugging
28571
+ - Preserves partial work for potential recovery
28572
+
28573
+ **Example**: session_fail({ id: "abc-123", reason: "API rate limit exceeded, cannot complete integration" })`,
28574
+ inputSchema: {
28575
+ type: "object",
28576
+ properties: {
28577
+ id: { type: "string", description: "Session ID to mark as failed" },
28578
+ reason: { type: "string", description: "Detailed explanation of why the session failed" }
28579
+ },
28580
+ required: ["id", "reason"]
28581
+ }
26709
28582
  },
26710
28583
  {
26711
28584
  name: "memory_add",
26712
- description: "Add a new memory entry to the system",
26713
- inputSchema: { type: "object", properties: { content: { type: "string", description: "Memory content" }, metadata: { type: "object", description: "Optional metadata (agent, timestamp, etc.)", properties: { agent: { type: "string" }, timestamp: { type: "string" } } } }, required: ["content"] }
28585
+ description: `Add a new entry to AutomatosX persistent memory.
28586
+
28587
+ **When to use**:
28588
+ - Save important decisions or implementations for future reference
28589
+ - Store code snippets that worked well
28590
+ - Record architectural decisions and their rationale
28591
+ - Preserve debugging insights
28592
+
28593
+ **Memory is searchable**: Entries are indexed for full-text search via search_memory.
28594
+
28595
+ **Best practices**:
28596
+ - Include context: what problem was solved, what approach was used
28597
+ - Tag with relevant keywords for easier retrieval
28598
+ - Include agent name if storing agent output
28599
+
28600
+ **Example**: memory_add({ content: "Implemented rate limiting using token bucket algorithm. Config: 100 req/min burst, 10 req/s sustained.", metadata: { agent: "backend", tags: ["rate-limiting", "performance"] } })`,
28601
+ inputSchema: {
28602
+ type: "object",
28603
+ properties: {
28604
+ content: { type: "string", description: "Content to store. Be descriptive for better searchability." },
28605
+ metadata: {
28606
+ type: "object",
28607
+ description: "Optional metadata for categorization and filtering",
28608
+ properties: {
28609
+ agent: { type: "string", description: "Agent that created this entry" },
28610
+ timestamp: { type: "string", description: "Custom timestamp (ISO format)" },
28611
+ tags: { type: "array", items: { type: "string" }, description: "Tags for categorization" }
28612
+ }
28613
+ }
28614
+ },
28615
+ required: ["content"]
28616
+ }
26714
28617
  },
26715
28618
  {
26716
28619
  name: "memory_list",
26717
- description: "List memory entries with optional filtering",
26718
- inputSchema: { type: "object", properties: { agent: { type: "string", description: "Filter by agent name" }, limit: { type: "number", description: "Maximum number of entries", default: 50 } } }
28620
+ description: `List memory entries with optional filtering.
28621
+
28622
+ **When to use**: Browse memory contents, audit what's stored, or filter by agent.
28623
+
28624
+ **Returns**: Array of memory entries sorted by recency, with:
28625
+ - id: Unique identifier (use for memory_delete)
28626
+ - content: Stored text
28627
+ - agent: Source agent (if specified)
28628
+ - createdAt: When entry was added
28629
+ - metadata: Additional tags/info
28630
+
28631
+ **Example**: memory_list({ agent: "backend", limit: 20 })`,
28632
+ inputSchema: {
28633
+ type: "object",
28634
+ properties: {
28635
+ agent: { type: "string", description: "Filter to entries from specific agent" },
28636
+ limit: { type: "number", description: "Max entries to return (default: 50)", default: 50 }
28637
+ }
28638
+ }
26719
28639
  },
26720
28640
  {
26721
28641
  name: "memory_delete",
26722
- description: "Delete a specific memory entry by ID",
26723
- inputSchema: { type: "object", properties: { id: { type: "number", description: "Memory entry ID" } }, required: ["id"] }
28642
+ description: `Delete a specific memory entry by ID.
28643
+
28644
+ **When to use**: Remove outdated, incorrect, or sensitive information from memory.
28645
+
28646
+ **Note**: Deletion is permanent. Use memory_list to find the ID first.
28647
+
28648
+ **Example**: memory_delete({ id: 42 })`,
28649
+ inputSchema: {
28650
+ type: "object",
28651
+ properties: {
28652
+ id: { type: "number", description: "Memory entry ID from memory_list" }
28653
+ },
28654
+ required: ["id"]
28655
+ }
26724
28656
  },
26725
28657
  {
26726
28658
  name: "memory_export",
26727
- description: "Export all memory entries to a JSON file",
26728
- inputSchema: { type: "object", properties: { path: { type: "string", description: "Export file path" } }, required: ["path"] }
28659
+ description: `Export all memory entries to a JSON file.
28660
+
28661
+ **When to use**:
28662
+ - Backup memory before major changes
28663
+ - Transfer memory to another system
28664
+ - Archive project knowledge
28665
+ - Audit stored information
28666
+
28667
+ **Returns**: Confirmation with entry count and file path.
28668
+
28669
+ **Example**: memory_export({ path: "./backup/memory-2024-01-15.json" })`,
28670
+ inputSchema: {
28671
+ type: "object",
28672
+ properties: {
28673
+ path: { type: "string", description: "File path for export (JSON format)" }
28674
+ },
28675
+ required: ["path"]
28676
+ }
26729
28677
  },
26730
28678
  {
26731
28679
  name: "memory_import",
26732
- description: "Import memory entries from a JSON file",
26733
- inputSchema: { type: "object", properties: { path: { type: "string", description: "Import file path" } }, required: ["path"] }
28680
+ description: `Import memory entries from a JSON file.
28681
+
28682
+ **When to use**:
28683
+ - Restore from backup
28684
+ - Load memory from another project
28685
+ - Seed new project with existing knowledge
28686
+
28687
+ **Format**: JSON array of memory entries (same format as memory_export).
28688
+
28689
+ **Note**: Imported entries are merged with existing memory.
28690
+
28691
+ **Example**: memory_import({ path: "./backup/memory-2024-01-15.json" })`,
28692
+ inputSchema: {
28693
+ type: "object",
28694
+ properties: {
28695
+ path: { type: "string", description: "File path to import from (JSON format)" }
28696
+ },
28697
+ required: ["path"]
28698
+ }
26734
28699
  },
26735
28700
  {
26736
28701
  name: "memory_stats",
26737
- description: "Get detailed memory statistics",
28702
+ description: `Get detailed memory statistics and health information.
28703
+
28704
+ **When to use**: Check memory usage, diagnose performance issues, or monitor growth.
28705
+
28706
+ **Returns**:
28707
+ - totalEntries: Number of memory entries
28708
+ - totalSizeBytes: Database size
28709
+ - entriesByAgent: Breakdown by agent
28710
+ - oldestEntry: Timestamp of oldest entry
28711
+ - newestEntry: Timestamp of newest entry
28712
+ - averageEntrySize: Bytes per entry
28713
+
28714
+ **Example response**:
28715
+ {
28716
+ "totalEntries": 1523,
28717
+ "totalSizeBytes": 1245678,
28718
+ "entriesByAgent": { "backend": 456, "quality": 312, "architecture": 89 }
28719
+ }`,
26738
28720
  inputSchema: { type: "object", properties: {} }
26739
28721
  },
26740
28722
  {
26741
28723
  name: "memory_clear",
26742
- description: "Clear all memory entries from the database",
28724
+ description: `Clear ALL memory entries from the database.
28725
+
28726
+ **WARNING**: This permanently deletes all stored memory. Use with caution.
28727
+
28728
+ **When to use**:
28729
+ - Starting fresh on a new project
28730
+ - Removing sensitive data
28731
+ - Resetting after major changes
28732
+
28733
+ **Recommendation**: Use memory_export first to create a backup.
28734
+
28735
+ **Returns**: Confirmation with count of deleted entries.`,
26743
28736
  inputSchema: { type: "object", properties: {} }
26744
28737
  },
26745
28738
  {
26746
28739
  name: "get_conversation_context",
26747
- description: "Retrieve conversation context from the shared context store",
26748
- inputSchema: { type: "object", properties: { id: { type: "string", description: "Optional: Context ID to retrieve" }, source: { type: "string", description: "Optional: Filter by source (e.g., gemini-cli)" }, limit: { type: "number", description: "Optional: Max results (default: 10)", default: 10 } } }
28740
+ description: `Retrieve conversation context from the shared cross-assistant context store.
28741
+
28742
+ **What this is for**: AutomatosX allows different AI assistants (Claude, Gemini, etc.) to share context. This enables workflows where one assistant's work informs another's.
28743
+
28744
+ **When to use**:
28745
+ - Resume work started by another assistant
28746
+ - Get context from a different AI provider's session
28747
+ - Check what information was shared across assistants
28748
+
28749
+ **Returns**: Array of context entries with:
28750
+ - id: Context entry identifier
28751
+ - source: Which assistant created it (e.g., "gemini-cli", "claude-code")
28752
+ - content: The shared context/information
28753
+ - metadata: Topic, participants, tags
28754
+ - createdAt: When context was stored
28755
+
28756
+ **Example**: get_conversation_context({ source: "gemini-cli", limit: 5 })`,
28757
+ inputSchema: {
28758
+ type: "object",
28759
+ properties: {
28760
+ id: { type: "string", description: "Specific context ID to retrieve" },
28761
+ source: { type: "string", description: 'Filter by source assistant (e.g., "gemini-cli", "claude-code")' },
28762
+ limit: { type: "number", description: "Max entries to return (default: 10)", default: 10 }
28763
+ }
28764
+ }
26749
28765
  },
26750
28766
  {
26751
28767
  name: "inject_conversation_context",
26752
- description: "Inject conversation context into the shared context store",
26753
- inputSchema: { type: "object", properties: { source: { type: "string", description: "Source assistant (e.g., gemini-cli, claude-code)" }, content: { type: "string", description: "Context content" }, metadata: { type: "object", description: "Optional metadata", properties: { topic: { type: "string" }, participants: { type: "array", items: { type: "string" } }, tags: { type: "array", items: { type: "string" } } } } }, required: ["source", "content"] }
28768
+ description: `Share conversation context with other AI assistants via the context store.
28769
+
28770
+ **What this is for**: Enables cross-assistant collaboration by sharing context between different AI providers.
28771
+
28772
+ **When to use**:
28773
+ - Hand off work to another AI assistant
28774
+ - Share discoveries or decisions across assistants
28775
+ - Create checkpoints in multi-assistant workflows
28776
+
28777
+ **Best practices**:
28778
+ - Include enough context for the other assistant to continue
28779
+ - Use descriptive topics and tags
28780
+ - List relevant participants (agents involved)
28781
+
28782
+ **Example**: inject_conversation_context({
28783
+ source: "claude-code",
28784
+ content: "Completed auth module implementation. JWT tokens work, need security review.",
28785
+ metadata: { topic: "authentication", tags: ["security", "backend"], participants: ["backend", "security"] }
28786
+ })`,
28787
+ inputSchema: {
28788
+ type: "object",
28789
+ properties: {
28790
+ source: { type: "string", description: 'Your assistant identifier (e.g., "claude-code", "gemini-cli")' },
28791
+ content: { type: "string", description: "Context to share - be descriptive for handoffs" },
28792
+ metadata: {
28793
+ type: "object",
28794
+ description: "Optional categorization",
28795
+ properties: {
28796
+ topic: { type: "string", description: 'Main topic (e.g., "authentication")' },
28797
+ participants: { type: "array", items: { type: "string" }, description: "Agents involved" },
28798
+ tags: { type: "array", items: { type: "string" }, description: "Searchable tags" }
28799
+ }
28800
+ }
28801
+ },
28802
+ required: ["source", "content"]
28803
+ }
26754
28804
  },
26755
28805
  {
26756
28806
  name: "implement_and_document",
26757
- description: "Implement code and generate documentation atomically to prevent documentation drift",
26758
- inputSchema: { type: "object", properties: { task: { type: "string", description: "Task description" }, agent: { type: "string", description: "Optional: Agent to use (default: backend)" }, documentation: { type: "object", description: "Documentation options", properties: { format: { type: "string", enum: ["markdown", "jsdoc"], description: "Doc format (default: markdown)" }, outputPath: { type: "string", description: "Optional: Custom doc output path" }, updateChangelog: { type: "boolean", description: "Update CHANGELOG.md (default: true)", default: true } } }, provider: { type: "string", enum: ["claude", "gemini", "openai"], description: "Optional: AI provider override" } }, required: ["task"] }
28807
+ description: `Implement code AND generate documentation in one atomic operation.
28808
+
28809
+ **Why this exists**: Prevents "documentation drift" where code changes but docs don't. Both are generated together from the same understanding.
28810
+
28811
+ **When to use**:
28812
+ - New feature implementations that need docs
28813
+ - API changes that require documentation updates
28814
+ - Any code change that should be documented
28815
+
28816
+ **What it does**:
28817
+ 1. Runs agent to implement the task
28818
+ 2. Generates documentation from the implementation
28819
+ 3. Optionally updates CHANGELOG.md
28820
+ 4. Returns both code and docs together
28821
+
28822
+ **Example**: implement_and_document({
28823
+ task: "Add rate limiting middleware with configurable limits",
28824
+ agent: "backend",
28825
+ documentation: { format: "markdown", updateChangelog: true }
28826
+ })`,
28827
+ inputSchema: {
28828
+ type: "object",
28829
+ properties: {
28830
+ task: { type: "string", description: "Implementation task - be specific about requirements" },
28831
+ agent: { type: "string", description: "Agent to use (default: backend)" },
28832
+ documentation: {
28833
+ type: "object",
28834
+ description: "Documentation generation options",
28835
+ properties: {
28836
+ format: { type: "string", enum: ["markdown", "jsdoc"], description: "Doc format (default: markdown)" },
28837
+ outputPath: { type: "string", description: "Custom output path for docs" },
28838
+ updateChangelog: { type: "boolean", description: "Update CHANGELOG.md (default: true)", default: true }
28839
+ }
28840
+ },
28841
+ provider: { type: "string", enum: ["claude", "gemini", "openai"], description: "AI provider override" }
28842
+ },
28843
+ required: ["task"]
28844
+ }
26759
28845
  },
26760
28846
  // v10.5.0: Smart Routing - Explicit context retrieval
26761
28847
  {
26762
28848
  name: "get_agent_context",
26763
- description: `Get agent context without executing. Returns profile, relevant memory, and enhanced prompt for AI assistant to execute directly.
28849
+ description: `Get agent profile and context WITHOUT executing. Returns everything needed for YOU to execute the task directly.
28850
+
28851
+ **When to use**:
28852
+ - You want to understand an agent's expertise before deciding to delegate
28853
+ - You prefer to execute the task yourself with agent guidance
28854
+ - You need the agent's system prompt and relevant memory
28855
+
28856
+ **What it returns**:
28857
+ - profile: Full agent profile (role, abilities, system prompt)
28858
+ - memory: Relevant past context from search_memory
28859
+ - enhancedPrompt: Ready-to-use prompt incorporating agent expertise
28860
+
28861
+ **Auto-selection**: If agent is omitted, system analyzes task and selects the best-matching agent.
28862
+
28863
+ **Example**: get_agent_context({
28864
+ task: "Implement caching layer for database queries",
28865
+ includeMemory: true,
28866
+ maxMemoryResults: 5
28867
+ })
26764
28868
 
26765
- v12.5.1: Agent auto-selection - if agent is omitted, system automatically selects the best agent based on task keywords.`,
28869
+ **Returns example**:
28870
+ {
28871
+ "selectedAgent": "backend",
28872
+ "profile": { "role": "Backend Developer", "abilities": [...], "systemPrompt": "..." },
28873
+ "memory": [{ "content": "Previous caching implementation used Redis...", ... }],
28874
+ "enhancedPrompt": "You are a Backend Developer. Your task: Implement caching..."
28875
+ }`,
26766
28876
  inputSchema: {
26767
28877
  type: "object",
26768
28878
  properties: {
26769
- agent: { type: "string", description: "Optional: Agent name (e.g., backend, quality). If omitted, best agent is auto-selected based on task." },
26770
- task: { type: "string", description: "The task description for context building" },
28879
+ agent: { type: "string", description: "Agent name, or omit for auto-selection based on task" },
28880
+ task: { type: "string", description: "Task description - used for auto-selection and memory search" },
26771
28881
  includeMemory: { type: "boolean", description: "Include relevant memory entries (default: true)", default: true },
26772
- maxMemoryResults: { type: "number", description: "Maximum memory entries to return (default: 5)", default: 5 }
28882
+ maxMemoryResults: { type: "number", description: "Max memory entries to include (default: 5)", default: 5 }
26773
28883
  },
26774
28884
  required: ["task"]
26775
28885
  }
@@ -26779,9 +28889,10 @@ v12.5.1: Agent auto-selection - if agent is omitted, system automatically select
26779
28889
  runTaskSchema,
26780
28890
  getTaskResultSchema,
26781
28891
  listTasksSchema,
26782
- deleteTaskSchema
26783
- // v12.4.0: Bugfix tools intentionally NOT exposed via MCP
26784
- // Access via: run_agent({ agent: "quality", task: "scan for bugs" })
28892
+ deleteTaskSchema,
28893
+ // v12.5.5: Bugfix tools now exposed directly via MCP
28894
+ bugfixScanSchema,
28895
+ bugfixRunSchema
26785
28896
  ];
26786
28897
  }
26787
28898
  /**
@@ -26998,6 +29109,8 @@ v12.5.1: Agent auto-selection - if agent is omitted, system automatically select
26998
29109
  register("get_task_result", createGetTaskResultHandler());
26999
29110
  register("list_tasks", createListTasksHandler());
27000
29111
  register("delete_task", createDeleteTaskHandler());
29112
+ register("bugfix_scan", createBugfixScanHandler());
29113
+ register("bugfix_run", createBugfixRunHandler());
27001
29114
  logger.info("[MCP Server] Registered tools", {
27002
29115
  count: this.tools.size,
27003
29116
  tools: Array.from(this.tools.keys())