@treedy/lsp-mcp 0.2.6 → 0.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -22148,6 +22148,43 @@ var singletonRpcStarting = false;
22148
22148
  var REGISTRY_LOOKUP_TTL_MS = Number.parseInt(process.env.LSP_MCP_REGISTRY_LOOKUP_TTL_MS || "300000", 10);
22149
22149
  var registryLatestCache = new Map;
22150
22150
  var registryLatestInflight = new Map;
22151
+ var CAPABILITY_SNAPSHOT_TTL_MS = Number.parseInt(process.env.LSP_MCP_CAPABILITY_SNAPSHOT_TTL_MS || "600000", 10);
22152
+ var capabilitySnapshotStore = new Map;
22153
+ var diagnosticsDeltaStore = new Map;
22154
+ function resolveLikelyBundledBackendPath(name) {
22155
+ const entryFile = process.argv[1] ? path2.resolve(process.argv[1]) : null;
22156
+ if (!entryFile)
22157
+ return null;
22158
+ const entryDir = path2.dirname(entryFile);
22159
+ const candidates = [
22160
+ path2.resolve(entryDir, "bundled", name),
22161
+ path2.resolve(entryDir, "..", "dist", "bundled", name)
22162
+ ];
22163
+ for (const candidate of candidates) {
22164
+ if (fs3.existsSync(candidate))
22165
+ return candidate;
22166
+ }
22167
+ return candidates[0] || null;
22168
+ }
22169
+ function loadBenchmarkReport(reportPath) {
22170
+ if (!fs3.existsSync(reportPath)) {
22171
+ return { found: false, path: reportPath, report: null };
22172
+ }
22173
+ try {
22174
+ const parsed = JSON.parse(fs3.readFileSync(reportPath, "utf8"));
22175
+ if (!parsed || !Array.isArray(parsed.cases)) {
22176
+ return { found: false, path: reportPath, report: null, error: "invalid benchmark report schema" };
22177
+ }
22178
+ return { found: true, path: reportPath, report: parsed };
22179
+ } catch (error2) {
22180
+ return { found: false, path: reportPath, report: null, error: String(error2) };
22181
+ }
22182
+ }
22183
+ function loadLatestBenchmarkReport() {
22184
+ const configured = process.env.LSP_MCP_BENCHMARK_REPORT_PATH || ".tmp/benchmark-latest.json";
22185
+ const reportPath = path2.isAbsolute(configured) ? configured : path2.resolve(process.cwd(), configured);
22186
+ return loadBenchmarkReport(reportPath);
22187
+ }
22151
22188
  function resolveBackendRuntimeMode3() {
22152
22189
  const requireBundled = (process.env.LSP_MCP_REQUIRE_BUNDLED_BACKENDS ?? "false").toLowerCase() === "true";
22153
22190
  if (requireBundled)
@@ -22188,6 +22225,52 @@ command timeout`.trim() });
22188
22225
  });
22189
22226
  });
22190
22227
  }
22228
+ function makeCapabilitySnapshotId(enabledLanguages) {
22229
+ return `cap_${Date.now().toString(36)}_${enabledLanguages.join("-")}_${randomBytes(4).toString("hex")}`;
22230
+ }
22231
+ function readCapabilitySnapshot(id) {
22232
+ if (!id)
22233
+ return null;
22234
+ const entry = capabilitySnapshotStore.get(id);
22235
+ if (!entry)
22236
+ return null;
22237
+ if (entry.expiresAt <= Date.now()) {
22238
+ capabilitySnapshotStore.delete(id);
22239
+ return null;
22240
+ }
22241
+ return entry;
22242
+ }
22243
+ function cleanupCapabilitySnapshots() {
22244
+ const now = Date.now();
22245
+ for (const [id, entry] of capabilitySnapshotStore.entries()) {
22246
+ if (entry.expiresAt <= now)
22247
+ capabilitySnapshotStore.delete(id);
22248
+ }
22249
+ }
22250
+ function withConfidenceFields(payload) {
22251
+ if (typeof payload.confidence === "number" && typeof payload.confidence_reason === "string") {
22252
+ return payload;
22253
+ }
22254
+ const errorCode = typeof payload.error_code === "string" ? payload.error_code : null;
22255
+ const hasError = typeof payload.error === "string" && payload.error.length > 0;
22256
+ let confidence = 0.9;
22257
+ let reason = "Backend response appears complete and non-fallback.";
22258
+ if (hasError) {
22259
+ confidence = 0.2;
22260
+ reason = `Strict error returned${errorCode ? ` (${errorCode})` : ""}; follow recovery plan before trusting result.`;
22261
+ } else if (payload.fallback_used === true || payload.approximate === true) {
22262
+ confidence = 0.55;
22263
+ reason = "Fallback/approximate path used; validate with follow-up semantic call.";
22264
+ } else if (typeof payload.count === "number" && payload.count === 0) {
22265
+ confidence = 0.65;
22266
+ reason = "No results found; this may be valid but should be double-checked.";
22267
+ }
22268
+ return {
22269
+ ...payload,
22270
+ confidence,
22271
+ confidence_reason: reason
22272
+ };
22273
+ }
22191
22274
  function getWorkspaceForLanguage(language) {
22192
22275
  return activeWorkspaceByLanguage.get(language) || null;
22193
22276
  }
@@ -22214,10 +22297,12 @@ var SEMANTIC_TOOL_NAMES = new Set([
22214
22297
  "symbols",
22215
22298
  "completions",
22216
22299
  "diagnostics",
22300
+ "diagnostics_delta",
22217
22301
  "rename",
22218
22302
  "prepare_rename",
22219
22303
  "signature_help",
22220
22304
  "read_file_with_hints",
22305
+ "semantic_navigate",
22221
22306
  "peek_definition",
22222
22307
  "code_action",
22223
22308
  "update_document",
@@ -22231,6 +22316,15 @@ function isSemanticTool(toolName) {
22231
22316
  }
22232
22317
  function semanticWorkspaceRequiredResponse(language, toolName) {
22233
22318
  const setupCommand = `switch_workspace_for_language(language='${language}', path='/abs/project/root')`;
22319
+ const recoveryPlan = [{
22320
+ step: 1,
22321
+ action: "set_language_workspace",
22322
+ type: "tool_call",
22323
+ tool: "switch_workspace_for_language",
22324
+ args: { language, path: "/abs/project/root" },
22325
+ command: setupCommand,
22326
+ reason: "Semantic tools require an explicit per-language workspace mapping."
22327
+ }];
22234
22328
  return {
22235
22329
  content: [{
22236
22330
  type: "text",
@@ -22246,13 +22340,155 @@ function semanticWorkspaceRequiredResponse(language, toolName) {
22246
22340
  required_workspace_scope: "language",
22247
22341
  next_step: `Call ${setupCommand} before using semantic tools.`,
22248
22342
  install_commands: [setupCommand],
22343
+ recovery_plan: recoveryPlan,
22249
22344
  missing_packages: [],
22250
22345
  resolved_workspace: null,
22251
- backend_instance_id: null
22346
+ backend_instance_id: null,
22347
+ result_size: 0,
22348
+ cursor_available: false,
22349
+ truncated: false,
22350
+ latency_ms: null,
22351
+ confidence: 0.25,
22352
+ confidence_reason: "Strict workspace precondition failed; no semantic result available."
22252
22353
  })
22253
22354
  }]
22254
22355
  };
22255
22356
  }
22357
+ function parseToolLikeCommand(command) {
22358
+ const trimmed = command.trim();
22359
+ const match = /^([a-z_][a-z0-9_]*)\((.*)\)$/i.exec(trimmed);
22360
+ if (!match)
22361
+ return null;
22362
+ const tool = match[1];
22363
+ const argsRaw = match[2].trim();
22364
+ if (argsRaw.length === 0)
22365
+ return { tool, args: {} };
22366
+ const pairs = [];
22367
+ let current = "";
22368
+ let depth = 0;
22369
+ let quote = null;
22370
+ for (const char of argsRaw) {
22371
+ if (quote) {
22372
+ if (char === quote)
22373
+ quote = null;
22374
+ current += char;
22375
+ continue;
22376
+ }
22377
+ if (char === "'" || char === '"') {
22378
+ quote = char;
22379
+ current += char;
22380
+ continue;
22381
+ }
22382
+ if (char === "(") {
22383
+ depth += 1;
22384
+ current += char;
22385
+ continue;
22386
+ }
22387
+ if (char === ")") {
22388
+ depth = Math.max(0, depth - 1);
22389
+ current += char;
22390
+ continue;
22391
+ }
22392
+ if (char === "," && depth === 0) {
22393
+ if (current.trim().length > 0)
22394
+ pairs.push(current.trim());
22395
+ current = "";
22396
+ continue;
22397
+ }
22398
+ current += char;
22399
+ }
22400
+ if (current.trim().length > 0)
22401
+ pairs.push(current.trim());
22402
+ const args = {};
22403
+ for (const pair of pairs) {
22404
+ const idx = pair.indexOf("=");
22405
+ if (idx <= 0)
22406
+ return null;
22407
+ const key = pair.slice(0, idx).trim();
22408
+ const valueRaw = pair.slice(idx + 1).trim();
22409
+ let value = valueRaw;
22410
+ if (valueRaw.startsWith("'") && valueRaw.endsWith("'") || valueRaw.startsWith('"') && valueRaw.endsWith('"')) {
22411
+ value = valueRaw.slice(1, -1);
22412
+ } else if (valueRaw === "true" || valueRaw === "false") {
22413
+ value = valueRaw === "true";
22414
+ } else if (/^-?\d+$/.test(valueRaw)) {
22415
+ value = Number.parseInt(valueRaw, 10);
22416
+ } else if (/^-?\d+\.\d+$/.test(valueRaw)) {
22417
+ value = Number.parseFloat(valueRaw);
22418
+ }
22419
+ args[key] = value;
22420
+ }
22421
+ return { tool, args };
22422
+ }
22423
+ function buildRecoveryPlan(installCommands, nextStep) {
22424
+ const normalized = Array.from(new Set(installCommands.map((command) => String(command || "").trim()).filter((command) => command.length > 0)));
22425
+ if (normalized.length === 0 && nextStep.trim().length > 0) {
22426
+ normalized.push(nextStep.trim());
22427
+ }
22428
+ return normalized.map((command, idx) => {
22429
+ const parsed = parseToolLikeCommand(command);
22430
+ return {
22431
+ step: idx + 1,
22432
+ action: idx === 0 ? "run_next_step" : "retry_with_followup",
22433
+ type: parsed ? "tool_call" : "shell_command",
22434
+ tool: parsed?.tool ?? null,
22435
+ args: parsed?.args ?? null,
22436
+ command,
22437
+ reason: "Follow this command sequence to recover from strict semantic errors."
22438
+ };
22439
+ });
22440
+ }
22441
+ function normalizeRecoveryPlan(value, installCommands, nextStep) {
22442
+ if (!Array.isArray(value))
22443
+ return buildRecoveryPlan(installCommands, nextStep);
22444
+ const out = [];
22445
+ for (let i = 0;i < value.length; i++) {
22446
+ const raw = value[i];
22447
+ if (!raw || typeof raw !== "object")
22448
+ continue;
22449
+ const rec = raw;
22450
+ const command = String(rec.command || "").trim();
22451
+ const parsed = command.length > 0 ? parseToolLikeCommand(command) : null;
22452
+ const type = rec.type === "tool_call" || rec.type === "shell_command" ? rec.type : parsed ? "tool_call" : "shell_command";
22453
+ out.push({
22454
+ step: typeof rec.step === "number" && Number.isFinite(rec.step) ? rec.step : i + 1,
22455
+ action: typeof rec.action === "string" && rec.action.length > 0 ? rec.action : i === 0 ? "run_next_step" : "retry_with_followup",
22456
+ type,
22457
+ tool: typeof rec.tool === "string" ? rec.tool : type === "tool_call" ? parsed?.tool ?? null : null,
22458
+ args: rec.args && typeof rec.args === "object" && !Array.isArray(rec.args) ? rec.args : type === "tool_call" ? parsed?.args ?? null : null,
22459
+ command: command.length > 0 ? command : typeof rec.tool === "string" ? `${String(rec.tool)}(...)` : "",
22460
+ reason: typeof rec.reason === "string" && rec.reason.length > 0 ? rec.reason : "Follow this command sequence to recover from strict semantic errors."
22461
+ });
22462
+ }
22463
+ if (out.length > 0)
22464
+ return out;
22465
+ return buildRecoveryPlan(installCommands, nextStep);
22466
+ }
22467
+ function inferResponseResultSize(payload) {
22468
+ if (typeof payload.count === "number" && Number.isFinite(payload.count))
22469
+ return payload.count;
22470
+ const arrayKeys = ["matches", "references", "diagnostics", "lines", "symbols", "hints"];
22471
+ for (const key of arrayKeys) {
22472
+ if (Array.isArray(payload[key]))
22473
+ return payload[key].length;
22474
+ }
22475
+ if (typeof payload.result === "string")
22476
+ return payload.result.length;
22477
+ return 0;
22478
+ }
22479
+ function withStandardCostFields(payload) {
22480
+ const page = payload.page && typeof payload.page === "object" ? payload.page : null;
22481
+ const preview = payload.preview && typeof payload.preview === "object" ? payload.preview : null;
22482
+ const hasCursor = payload.next && typeof payload.next === "object" && !!payload.next?.arguments?.cursor || page && page.has_more === true;
22483
+ const truncated = preview && preview.truncated === true || page && page.has_more === true;
22484
+ return {
22485
+ ...payload,
22486
+ result_size: typeof payload.result_size === "number" ? payload.result_size : inferResponseResultSize(payload),
22487
+ cursor_available: typeof payload.cursor_available === "boolean" ? payload.cursor_available : !!hasCursor,
22488
+ truncated: typeof payload.truncated === "boolean" ? payload.truncated : !!truncated,
22489
+ latency_ms: typeof payload.latency_ms === "number" ? payload.latency_ms : null
22490
+ };
22491
+ }
22256
22492
  function normalizeSemanticErrorPayload(payload, toolName, resolvedLanguage, resolvedWorkspace) {
22257
22493
  const rawError = payload.error;
22258
22494
  const hasError = typeof rawError === "string" && rawError.length > 0;
@@ -22263,17 +22499,19 @@ function normalizeSemanticErrorPayload(payload, toolName, resolvedLanguage, reso
22263
22499
  const nextStep = typeof payload.next_step === "string" && payload.next_step.length > 0 ? payload.next_step : resolvedLanguage && resolvedLanguage !== "multi" ? `Call switch_workspace_for_language(language='${resolvedLanguage}', path='/abs/project/root') then retry ${toolName}.` : `Retry ${toolName} after setting per-language workspace with switch_workspace_for_language(...).`;
22264
22500
  const installCommands = Array.isArray(payload.install_commands) ? payload.install_commands : [nextStep];
22265
22501
  const missingPackages = Array.isArray(payload.missing_packages) ? payload.missing_packages : [];
22266
- return {
22502
+ const recoveryPlan = normalizeRecoveryPlan(payload.recovery_plan, installCommands.map((cmd) => String(cmd)), nextStep);
22503
+ return withStandardCostFields({
22267
22504
  ...payload,
22268
22505
  error_code: normalizedCode,
22269
22506
  strict_mode: payload.strict_mode ?? true,
22270
22507
  next_step: nextStep,
22271
22508
  install_commands: installCommands,
22509
+ recovery_plan: recoveryPlan,
22272
22510
  missing_packages: missingPackages,
22273
22511
  tool: payload.tool ?? toolName,
22274
22512
  resolved_language: payload.resolved_language ?? resolvedLanguage ?? null,
22275
22513
  resolved_workspace: payload.resolved_workspace ?? resolvedWorkspace ?? null
22276
- };
22514
+ });
22277
22515
  }
22278
22516
  function withSemanticContext(response, toolName, resolvedWorkspace, backendInstanceId, resolvedLanguage) {
22279
22517
  if (!isSemanticTool(toolName))
@@ -22303,11 +22541,13 @@ function withSemanticContext(response, toolName, resolvedWorkspace, backendInsta
22303
22541
  }]
22304
22542
  };
22305
22543
  }
22544
+ const parsedRecord = parsed;
22545
+ const parsedResolvedLanguage = (typeof parsedRecord.resolved_language === "string" ? parsedRecord.resolved_language : null) || (typeof parsedRecord.language === "string" ? parsedRecord.language : null) || effectiveLanguage;
22306
22546
  return {
22307
22547
  content: [{
22308
22548
  type: "text",
22309
22549
  text: JSON.stringify({
22310
- ...normalizeSemanticErrorPayload(parsed, toolName, parsed.resolved_language ?? parsed.language ?? effectiveLanguage, resolvedWorkspace),
22550
+ ...withConfidenceFields(withStandardCostFields(normalizeSemanticErrorPayload(parsedRecord, toolName, parsedResolvedLanguage, resolvedWorkspace))),
22311
22551
  resolved_language: parsed.resolved_language ?? parsed.language ?? effectiveLanguage,
22312
22552
  resolved_workspace: resolvedWorkspace,
22313
22553
  backend_instance_id: backendInstanceId
@@ -22322,7 +22562,13 @@ function withSemanticContext(response, toolName, resolvedWorkspace, backendInsta
22322
22562
  result: first.text,
22323
22563
  resolved_language: effectiveLanguage,
22324
22564
  resolved_workspace: resolvedWorkspace,
22325
- backend_instance_id: backendInstanceId
22565
+ backend_instance_id: backendInstanceId,
22566
+ result_size: typeof first.text === "string" ? first.text.length : 0,
22567
+ cursor_available: false,
22568
+ truncated: false,
22569
+ latency_ms: null,
22570
+ confidence: 0.6,
22571
+ confidence_reason: "Non-JSON response; semantic confidence is reduced."
22326
22572
  })
22327
22573
  }]
22328
22574
  };
@@ -22364,6 +22610,10 @@ var LLM_FEATURE_PROBE_METADATA = {
22364
22610
  call_hierarchy: {
22365
22611
  expected_latency_ms: { p50: 280, p95: 2200 },
22366
22612
  failure_signatures: ["NOT_IMPLEMENTED", "No call hierarchy available", "LANGUAGE_WORKSPACE_REQUIRED"]
22613
+ },
22614
+ type_hierarchy: {
22615
+ expected_latency_ms: { p50: 320, p95: 2600 },
22616
+ failure_signatures: ["NOT_IMPLEMENTED", "NO_SYMBOL_AT_POSITION", "TYPE_HIERARCHY_FALLBACK_ERROR"]
22367
22617
  }
22368
22618
  };
22369
22619
  var LLM_FEATURE_TARGETS = [
@@ -22372,7 +22622,8 @@ var LLM_FEATURE_TARGETS = [
22372
22622
  "moniker",
22373
22623
  "inlay_hint_resolve",
22374
22624
  "read_file_with_hints",
22375
- "call_hierarchy"
22625
+ "call_hierarchy",
22626
+ "type_hierarchy"
22376
22627
  ];
22377
22628
  function fileExistsSafe(p) {
22378
22629
  try {
@@ -22803,37 +23054,42 @@ server.registerTool("semantic_session_start", {
22803
23054
  resolvedLanguage = inferLanguageFromPath(abs, config2);
22804
23055
  }
22805
23056
  if (!resolvedLanguage) {
23057
+ const nextStep2 = "Call semantic_session_start(language='typescript'|'python'|'vue', workspace='/abs/project/root').";
23058
+ const installCommands2 = ["semantic_session_start(language='typescript', workspace='/abs/project/root')"];
22806
23059
  return {
22807
23060
  content: [{
22808
23061
  type: "text",
22809
- text: JSON.stringify({
23062
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
22810
23063
  success: false,
22811
23064
  error: "SEMANTIC_SESSION_LANGUAGE_REQUIRED",
22812
23065
  error_code: "SEMANTIC_SESSION_LANGUAGE_REQUIRED",
22813
23066
  message: "Unable to infer language. Provide language or a file/workspace path.",
22814
- next_step: "Call semantic_session_start(language='typescript'|'python'|'vue', workspace='/abs/project/root').",
22815
- install_commands: ["semantic_session_start(language='typescript', workspace='/abs/project/root')"],
23067
+ next_step: nextStep2,
23068
+ install_commands: installCommands2,
23069
+ recovery_plan: buildRecoveryPlan(installCommands2, nextStep2),
22816
23070
  missing_packages: [],
22817
23071
  strict_mode: true
22818
- })
23072
+ })))
22819
23073
  }]
22820
23074
  };
22821
23075
  }
22822
23076
  if (!config2.languages[resolvedLanguage]?.enabled) {
23077
+ const nextStep2 = `Set LSP_MCP_${resolvedLanguage.toUpperCase()}_ENABLED=true and restart server.`;
22823
23078
  return {
22824
23079
  content: [{
22825
23080
  type: "text",
22826
- text: JSON.stringify({
23081
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
22827
23082
  success: false,
22828
23083
  error: "LANGUAGE_DISABLED",
22829
23084
  error_code: "LANGUAGE_DISABLED",
22830
23085
  language: resolvedLanguage,
22831
23086
  message: `Language '${resolvedLanguage}' is disabled in current config.`,
22832
- next_step: `Set LSP_MCP_${resolvedLanguage.toUpperCase()}_ENABLED=true and restart server.`,
23087
+ next_step: nextStep2,
22833
23088
  install_commands: [],
23089
+ recovery_plan: buildRecoveryPlan([], nextStep2),
22834
23090
  missing_packages: [],
22835
23091
  strict_mode: true
22836
- })
23092
+ })))
22837
23093
  }]
22838
23094
  };
22839
23095
  }
@@ -22982,10 +23238,11 @@ server.registerTool("semantic_session_start", {
22982
23238
  fallback_command: sampleHover
22983
23239
  });
22984
23240
  const success = !!resolvedWorkspace && dependencyStatus === "ok" && (!shouldStartBackend || backendStarted);
23241
+ const nextStep = success ? commands[commands.length - 1] : installCommands[0] || commands[0];
22985
23242
  return {
22986
23243
  content: [{
22987
23244
  type: "text",
22988
- text: JSON.stringify({
23245
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
22989
23246
  success,
22990
23247
  language: resolvedLanguage,
22991
23248
  resolved_language: resolvedLanguage,
@@ -22999,21 +23256,522 @@ server.registerTool("semantic_session_start", {
22999
23256
  strict_mode: true,
23000
23257
  commands,
23001
23258
  feature_probe_sequence: probeSteps,
23002
- next_step: success ? commands[commands.length - 1] : installCommands[0] || commands[0]
23003
- })
23259
+ next_step: nextStep,
23260
+ recovery_plan: success ? [] : buildRecoveryPlan(installCommands, nextStep)
23261
+ })))
23004
23262
  }]
23005
23263
  };
23006
23264
  });
23265
+ server.registerTool("semantic_navigate", {
23266
+ description: "Run an LLM-oriented semantic workflow in one call: optional search -> definition -> references -> read_file_with_hints.",
23267
+ inputSchema: {
23268
+ file: exports_external.string(),
23269
+ line: exports_external.number().int().positive(),
23270
+ column: exports_external.number().int().positive(),
23271
+ mode: exports_external.enum(["fast", "deep"]).default("deep").optional(),
23272
+ strategy: exports_external.enum(["balanced", "definition_first", "references_first"]).default("balanced").optional(),
23273
+ query: exports_external.string().optional(),
23274
+ page_size: exports_external.number().int().positive().max(200).default(20).optional(),
23275
+ hint_start_line: exports_external.number().int().positive().default(1).optional(),
23276
+ hint_max_lines: exports_external.number().int().positive().max(400).default(120).optional(),
23277
+ reference_preview: exports_external.number().int().positive().max(200).default(20).optional()
23278
+ }
23279
+ }, async ({ file, line, column, mode, strategy, query, page_size, hint_start_line, hint_max_lines, reference_preview }) => {
23280
+ const startedAt = Date.now();
23281
+ const navigateMode = mode === "fast" ? "fast" : "deep";
23282
+ const navigateStrategy = strategy === "definition_first" || strategy === "references_first" ? strategy : "balanced";
23283
+ const absFile = path2.isAbsolute(file) ? file : activeWorkspacePath ? path2.join(activeWorkspacePath, file) : path2.resolve(file);
23284
+ const language = inferLanguageFromPath(absFile, config2);
23285
+ if (!language) {
23286
+ return {
23287
+ content: [{
23288
+ type: "text",
23289
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
23290
+ error: "UNSUPPORTED_FILE_TYPE",
23291
+ error_code: "UNSUPPORTED_FILE_TYPE",
23292
+ message: `Cannot infer language for '${file}'.`,
23293
+ next_step: "Provide a file path with a supported extension.",
23294
+ recovery_plan: [{
23295
+ step: 1,
23296
+ action: "provide_supported_file",
23297
+ type: "tool_call",
23298
+ tool: "semantic_navigate",
23299
+ args: { file: "/abs/path/to/file.ts|.py|.vue", line: 1, column: 1 },
23300
+ command: "semantic_navigate(file='/abs/path/to/file.ts|.py|.vue', line=1, column=1)",
23301
+ reason: "semantic_navigate requires a resolvable source file."
23302
+ }],
23303
+ strict_mode: true
23304
+ })))
23305
+ }]
23306
+ };
23307
+ }
23308
+ const workspace = getWorkspaceForLanguage(language);
23309
+ if (!workspace) {
23310
+ return semanticWorkspaceRequiredResponse(language, "semantic_navigate");
23311
+ }
23312
+ const singletonLock = await ensureBackendSingleton(language, workspace);
23313
+ if (!singletonLock.ok) {
23314
+ return withSemanticContext(singletonLock.response, "semantic_navigate", workspace, null, language);
23315
+ }
23316
+ const proxyHost = singletonLock.proxyHost;
23317
+ const proxyPort = singletonLock.proxyPort;
23318
+ const backendInstanceId = proxyHost && proxyPort ? `proxy:${language}@${proxyHost}:${proxyPort}` : backendManager.getBackendIdentity(language)?.instanceId ?? null;
23319
+ const callBackendTool = (toolName, backendArgs) => {
23320
+ if (proxyHost && proxyPort) {
23321
+ return callRemoteBackendTool(proxyHost, proxyPort, language, toolName, backendArgs, workspace);
23322
+ }
23323
+ return backendManager.callTool(language, toolName, backendArgs);
23324
+ };
23325
+ if (!proxyHost && !proxyPort && !startedBackends.has(language)) {
23326
+ await backendManager.getBackend(language);
23327
+ startedBackends.add(language);
23328
+ await backendManager.callTool(language, "switch_workspace", { path: workspace });
23329
+ }
23330
+ const parseToolPayload = (response) => {
23331
+ const text = response.content?.[0]?.text ?? "{}";
23332
+ try {
23333
+ return JSON.parse(text);
23334
+ } catch {
23335
+ return { result: text };
23336
+ }
23337
+ };
23338
+ const workflowSteps = {};
23339
+ const runStep = async (stepName, toolName, toolArgs, fallbackResult = {}) => {
23340
+ const stepStarted = Date.now();
23341
+ try {
23342
+ const payload = parseToolPayload(await callBackendTool(toolName, toolArgs));
23343
+ if (payload && typeof payload === "object" && payload.error) {
23344
+ workflowSteps[stepName] = {
23345
+ status: "error",
23346
+ tool: toolName,
23347
+ latency_ms: Date.now() - stepStarted,
23348
+ error: payload.error,
23349
+ error_code: payload.error_code || "STEP_ERROR",
23350
+ next_step: payload.next_step || `Retry ${toolName}.`
23351
+ };
23352
+ return { ok: false, payload };
23353
+ }
23354
+ workflowSteps[stepName] = {
23355
+ status: "ok",
23356
+ tool: toolName,
23357
+ latency_ms: Date.now() - stepStarted,
23358
+ ...payload || fallbackResult
23359
+ };
23360
+ return { ok: true, payload };
23361
+ } catch (error2) {
23362
+ workflowSteps[stepName] = {
23363
+ status: "error",
23364
+ tool: toolName,
23365
+ latency_ms: Date.now() - stepStarted,
23366
+ error: String(error2),
23367
+ error_code: "STEP_EXCEPTION"
23368
+ };
23369
+ return { ok: false, payload: { error: String(error2), error_code: "STEP_EXCEPTION" } };
23370
+ }
23371
+ };
23372
+ const stepOrder = [];
23373
+ if (typeof query === "string" && query.trim().length > 0) {
23374
+ stepOrder.push("search");
23375
+ const searchRes = await runStep("search", "search", {
23376
+ pattern: query.trim(),
23377
+ path: workspace,
23378
+ page_size: typeof page_size === "number" ? page_size : 20
23379
+ });
23380
+ if (searchRes.ok && searchRes.payload && typeof searchRes.payload === "object") {
23381
+ const items = extractSearchLikeItems(searchRes.payload);
23382
+ workflowSteps.search = {
23383
+ ...workflowSteps.search,
23384
+ count: extractSearchLikeCount(searchRes.payload, items),
23385
+ preview: items.slice(0, typeof page_size === "number" ? page_size : 20),
23386
+ cursor_available: !!searchRes.payload?.next?.arguments?.cursor,
23387
+ truncated: !!searchRes.payload?.page?.has_more
23388
+ };
23389
+ }
23390
+ }
23391
+ let definitionRes = { ok: false, payload: {} };
23392
+ let referencesRes = { ok: false, payload: {} };
23393
+ const runDefinition = async () => {
23394
+ stepOrder.push("definition");
23395
+ definitionRes = await runStep("definition", "definition", { file: absFile, line, column });
23396
+ };
23397
+ const runReferences = async () => {
23398
+ stepOrder.push("references");
23399
+ referencesRes = await runStep("references", "references", {
23400
+ file: absFile,
23401
+ line,
23402
+ column,
23403
+ page_size: typeof reference_preview === "number" ? reference_preview : navigateMode === "fast" ? 10 : 20
23404
+ });
23405
+ };
23406
+ if (navigateStrategy === "references_first") {
23407
+ await runReferences();
23408
+ await runDefinition();
23409
+ } else if (navigateStrategy === "definition_first") {
23410
+ await runDefinition();
23411
+ await runReferences();
23412
+ } else {
23413
+ await runDefinition();
23414
+ await runReferences();
23415
+ }
23416
+ let hintsRes = { ok: false, payload: {} };
23417
+ const runHints = navigateMode === "deep" || typeof hint_start_line === "number" || typeof hint_max_lines === "number";
23418
+ if (runHints) {
23419
+ hintsRes = await runStep("read_file_with_hints", "read_file_with_hints", {
23420
+ file: absFile,
23421
+ start_line: typeof hint_start_line === "number" ? hint_start_line : 1,
23422
+ max_lines: typeof hint_max_lines === "number" ? hint_max_lines : 120
23423
+ });
23424
+ } else {
23425
+ workflowSteps.read_file_with_hints = {
23426
+ status: "skipped",
23427
+ tool: "read_file_with_hints",
23428
+ reason: "Skipped in fast mode to reduce payload and latency."
23429
+ };
23430
+ }
23431
+ const referencesPayload = referencesRes.payload && typeof referencesRes.payload === "object" ? referencesRes.payload : {};
23432
+ const referenceItems = extractReferencesItems(referencesPayload);
23433
+ const referenceCount = extractReferencesCount(referencesPayload, referenceItems);
23434
+ const ok = Boolean(definitionRes.ok || referencesRes.ok || hintsRes.ok);
23435
+ const nextStep = ok ? navigateMode === "fast" ? "For richer context, rerun semantic_navigate(mode='deep', ...) before refactors." : "Continue with code_action/rename based on references and hints." : `Retry semantic_navigate after running doctor(probe_backends=true) and checking workspace/dependencies for '${language}'.`;
23436
+ const recoveryPlan = ok ? [] : buildRecoveryPlan([`doctor(probe_backends=true, check_latest_versions=true)`, `switch_workspace_for_language(language='${language}', path='${workspace}')`], nextStep);
23437
+ const result = withConfidenceFields(withStandardCostFields({
23438
+ ok,
23439
+ tool: "semantic_navigate",
23440
+ strict_mode: true,
23441
+ mode: navigateMode,
23442
+ strategy: navigateStrategy,
23443
+ file: absFile,
23444
+ position: { line, column },
23445
+ resolved_language: language,
23446
+ resolved_workspace: workspace,
23447
+ backend_instance_id: backendInstanceId,
23448
+ steps: workflowSteps,
23449
+ summary: {
23450
+ mode: navigateMode,
23451
+ strategy: navigateStrategy,
23452
+ step_order: stepOrder,
23453
+ references_count: referenceCount,
23454
+ definition_ok: definitionRes.ok,
23455
+ references_ok: referencesRes.ok,
23456
+ hints_ok: hintsRes.ok
23457
+ },
23458
+ next_step: nextStep,
23459
+ recovery_plan: recoveryPlan,
23460
+ latency_ms: Date.now() - startedAt,
23461
+ result_size: referenceCount,
23462
+ cursor_available: !!referencesPayload?.next?.arguments?.cursor,
23463
+ truncated: !!referencesPayload?.page?.has_more
23464
+ }));
23465
+ return {
23466
+ content: [{ type: "text", text: JSON.stringify(result) }]
23467
+ };
23468
+ });
23469
+ server.registerTool("diagnostics_delta", {
23470
+ description: "Run diagnostics and return delta against previous diagnostics snapshot for the same language/workspace/path.",
23471
+ inputSchema: {
23472
+ path: exports_external.string(),
23473
+ summary_only: exports_external.boolean().default(false).optional(),
23474
+ preview_limit: exports_external.number().int().positive().default(100).optional(),
23475
+ hotspot_limit: exports_external.number().int().positive().max(50).default(5).optional(),
23476
+ severity: exports_external.enum(["error", "warning", "information", "hint"]).optional(),
23477
+ source: exports_external.string().optional(),
23478
+ page_size: exports_external.number().int().positive().max(500).default(100).optional(),
23479
+ cursor: exports_external.string().optional()
23480
+ }
23481
+ }, async ({ path: targetPath, summary_only, preview_limit, hotspot_limit, severity, source, page_size, cursor }) => {
23482
+ const pageSize = typeof page_size === "number" ? page_size : 100;
23483
+ const hotspotLimit = typeof hotspot_limit === "number" ? hotspot_limit : 5;
23484
+ if (typeof cursor === "string") {
23485
+ const page = readCursorPage("diagnostics_delta", cursor, pageSize);
23486
+ if (!page.ok) {
23487
+ return {
23488
+ content: [{
23489
+ type: "text",
23490
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
23491
+ ok: false,
23492
+ tool: "diagnostics_delta",
23493
+ strict_mode: true,
23494
+ error: page.data.error || "INVALID_CURSOR",
23495
+ error_code: "INVALID_CURSOR",
23496
+ next_step: "Call diagnostics_delta(path=...) again without cursor to create a new baseline page.",
23497
+ cursor_available: false,
23498
+ truncated: false,
23499
+ result_size: 0
23500
+ })))
23501
+ }]
23502
+ };
23503
+ }
23504
+ const items = Array.isArray(page.data.items) ? page.data.items : [];
23505
+ const summary = page.data.summary || {};
23506
+ return {
23507
+ content: [{
23508
+ type: "text",
23509
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
23510
+ ok: true,
23511
+ tool: "diagnostics_delta",
23512
+ strict_mode: true,
23513
+ page: page.data.page,
23514
+ count: page.data.count ?? items.length,
23515
+ delta: {
23516
+ ...summary.delta || {},
23517
+ changes_page: items
23518
+ },
23519
+ next_step: page.data.page?.has_more ? "Use expand_result(cursor=...) or diagnostics_delta(cursor=...) to continue paged diagnostics changes." : "Call diagnostics_delta again after edits to get new incremental diagnostics changes."
23520
+ })))
23521
+ }]
23522
+ };
23523
+ }
23524
+ const absPath = path2.isAbsolute(targetPath) ? targetPath : activeWorkspacePath ? path2.join(activeWorkspacePath, targetPath) : path2.resolve(targetPath);
23525
+ const language = inferLanguageFromPath(absPath, config2);
23526
+ if (!language) {
23527
+ return {
23528
+ content: [{
23529
+ type: "text",
23530
+ text: JSON.stringify(withConfidenceFields(withStandardCostFields({
23531
+ error: "UNSUPPORTED_FILE_TYPE",
23532
+ error_code: "UNSUPPORTED_FILE_TYPE",
23533
+ message: `Cannot infer language for '${targetPath}'.`,
23534
+ strict_mode: true,
23535
+ next_step: "Provide a file/path with a supported extension for diagnostics_delta.",
23536
+ recovery_plan: [{
23537
+ step: 1,
23538
+ action: "provide_supported_path",
23539
+ type: "tool_call",
23540
+ tool: "diagnostics_delta",
23541
+ args: { path: "/abs/path/to/file.ts|.py|.vue" },
23542
+ command: "diagnostics_delta(path='/abs/path/to/file.ts|.py|.vue')",
23543
+ reason: "diagnostics_delta needs a language-resolvable file or directory."
23544
+ }],
23545
+ ...withStandardCostFields({ result_size: 0, cursor_available: false, truncated: false, latency_ms: null })
23546
+ })))
23547
+ }]
23548
+ };
23549
+ }
23550
+ const workspace = getWorkspaceForLanguage(language);
23551
+ if (!workspace) {
23552
+ return semanticWorkspaceRequiredResponse(language, "diagnostics_delta");
23553
+ }
23554
+ const startedAt = Date.now();
23555
+ const singletonLock = await ensureBackendSingleton(language, workspace);
23556
+ if (!singletonLock.ok) {
23557
+ return withSemanticContext(singletonLock.response, "diagnostics_delta", workspace, null, language);
23558
+ }
23559
+ const proxyHost = singletonLock.proxyHost;
23560
+ const proxyPort = singletonLock.proxyPort;
23561
+ const backendInstanceId = proxyHost && proxyPort ? `proxy:${language}@${proxyHost}:${proxyPort}` : backendManager.getBackendIdentity(language)?.instanceId ?? null;
23562
+ const callBackendTool = (toolName, backendArgs) => {
23563
+ if (proxyHost && proxyPort) {
23564
+ return callRemoteBackendTool(proxyHost, proxyPort, language, toolName, backendArgs, workspace);
23565
+ }
23566
+ return backendManager.callTool(language, toolName, backendArgs);
23567
+ };
23568
+ if (!proxyHost && !proxyPort && !startedBackends.has(language)) {
23569
+ await backendManager.getBackend(language);
23570
+ startedBackends.add(language);
23571
+ await backendManager.callTool(language, "switch_workspace", { path: workspace });
23572
+ }
23573
+ try {
23574
+ const diagnosticsRes = await callBackendTool("diagnostics", {
23575
+ path: absPath,
23576
+ summary_only: !!summary_only
23577
+ });
23578
+ const parsed = JSON.parse(diagnosticsRes.content?.[0]?.text || "{}");
23579
+ const diagnostics = extractDiagnosticsItems(parsed);
23580
+ const currentByFingerprint = new Map;
23581
+ for (const diag of diagnostics) {
23582
+ currentByFingerprint.set(fingerprintDiagnostic(diag), diag);
23583
+ }
23584
+ const cacheKey = `${language}:${workspace}:${absPath}`;
23585
+ const previous = diagnosticsDeltaStore.get(cacheKey);
23586
+ const previousByFingerprint = new Map;
23587
+ for (const diag of previous?.diagnostics || []) {
23588
+ previousByFingerprint.set(fingerprintDiagnostic(diag), diag);
23589
+ }
23590
+ const added = [];
23591
+ const removed = [];
23592
+ for (const [fp, diag] of currentByFingerprint.entries()) {
23593
+ if (!previousByFingerprint.has(fp))
23594
+ added.push(diag);
23595
+ }
23596
+ for (const [fp, diag] of previousByFingerprint.entries()) {
23597
+ if (!currentByFingerprint.has(fp))
23598
+ removed.push(diag);
23599
+ }
23600
+ const levelMap = {
23601
+ error: 1,
23602
+ warning: 2,
23603
+ information: 3,
23604
+ hint: 4
23605
+ };
23606
+ const severityCode = severity ? levelMap[severity] : null;
23607
+ const applyFilters = (diag) => {
23608
+ if (typeof severityCode === "number" && Number(diag.severity ?? 0) !== severityCode)
23609
+ return false;
23610
+ if (typeof source === "string" && source.trim().length > 0) {
23611
+ const src = String(diag.source ?? "").toLowerCase();
23612
+ if (!src.includes(source.trim().toLowerCase()))
23613
+ return false;
23614
+ }
23615
+ return true;
23616
+ };
23617
+ const filteredAdded = added.filter(applyFilters);
23618
+ const filteredRemoved = removed.filter(applyFilters);
23619
+ const deltaChanges = [
23620
+ ...filteredAdded.map((diag) => ({ kind: "added", diagnostic: diag })),
23621
+ ...filteredRemoved.map((diag) => ({ kind: "removed", diagnostic: diag }))
23622
+ ];
23623
+ const resolveDiagFile = (diag) => {
23624
+ const file = String(diag.file || diag.path || "").trim();
23625
+ if (file.length > 0)
23626
+ return file;
23627
+ const uri = String(diag.uri || "").trim();
23628
+ if (uri.startsWith("file://")) {
23629
+ try {
23630
+ return decodeURIComponent(uri.replace("file://", ""));
23631
+ } catch {
23632
+ return uri;
23633
+ }
23634
+ }
23635
+ return absPath;
23636
+ };
23637
+ const resolveDiagSeverity = (diag) => {
23638
+ const code = Number(diag.severity ?? 0);
23639
+ if (code === 1)
23640
+ return "error";
23641
+ if (code === 2)
23642
+ return "warning";
23643
+ if (code === 3)
23644
+ return "information";
23645
+ if (code === 4)
23646
+ return "hint";
23647
+ return "unknown";
23648
+ };
23649
+ const fileSummaryMap = new Map;
23650
+ for (const diag of diagnostics) {
23651
+ const file = resolveDiagFile(diag);
23652
+ const sev = resolveDiagSeverity(diag);
23653
+ const row = fileSummaryMap.get(file) || {
23654
+ file,
23655
+ current_count: 0,
23656
+ added_count: 0,
23657
+ removed_count: 0,
23658
+ by_severity: {}
23659
+ };
23660
+ row.current_count += 1;
23661
+ row.by_severity[sev] = (row.by_severity[sev] || 0) + 1;
23662
+ fileSummaryMap.set(file, row);
23663
+ }
23664
+ for (const diag of filteredAdded) {
23665
+ const file = resolveDiagFile(diag);
23666
+ const row = fileSummaryMap.get(file) || {
23667
+ file,
23668
+ current_count: 0,
23669
+ added_count: 0,
23670
+ removed_count: 0,
23671
+ by_severity: {}
23672
+ };
23673
+ row.added_count += 1;
23674
+ fileSummaryMap.set(file, row);
23675
+ }
23676
+ for (const diag of filteredRemoved) {
23677
+ const file = resolveDiagFile(diag);
23678
+ const row = fileSummaryMap.get(file) || {
23679
+ file,
23680
+ current_count: 0,
23681
+ added_count: 0,
23682
+ removed_count: 0,
23683
+ by_severity: {}
23684
+ };
23685
+ row.removed_count += 1;
23686
+ fileSummaryMap.set(file, row);
23687
+ }
23688
+ const file_summary = Array.from(fileSummaryMap.values()).sort((a, b) => b.current_count + b.added_count - (a.current_count + a.added_count));
23689
+ const top_hotspots = file_summary.slice(0, hotspotLimit);
23690
+ diagnosticsDeltaStore.set(cacheKey, {
23691
+ updatedAt: Date.now(),
23692
+ diagnostics
23693
+ });
23694
+ const limit = typeof preview_limit === "number" ? preview_limit : 100;
23695
+ const deltaCursor = deltaChanges.length > limit ? makeCursor("diagnostics_delta", deltaChanges, deltaChanges.length, {
23696
+ delta: {
23697
+ previous_count: previous?.diagnostics.length ?? 0,
23698
+ current_count: diagnostics.length,
23699
+ added_count: filteredAdded.length,
23700
+ removed_count: filteredRemoved.length,
23701
+ baseline_created: !previous,
23702
+ baseline_updated_at: previous?.updatedAt ?? null,
23703
+ file_summary,
23704
+ top_hotspots,
23705
+ filters: {
23706
+ severity: severity ?? null,
23707
+ source: source ?? null
23708
+ }
23709
+ }
23710
+ }) : null;
23711
+ const deltaPayload = withConfidenceFields(withStandardCostFields({
23712
+ ok: true,
23713
+ tool: "diagnostics_delta",
23714
+ strict_mode: true,
23715
+ path: absPath,
23716
+ resolved_language: language,
23717
+ resolved_workspace: workspace,
23718
+ backend_instance_id: backendInstanceId,
23719
+ delta: {
23720
+ previous_count: previous?.diagnostics.length ?? 0,
23721
+ current_count: diagnostics.length,
23722
+ added_count: filteredAdded.length,
23723
+ removed_count: filteredRemoved.length,
23724
+ added_preview: filteredAdded.slice(0, limit),
23725
+ removed_preview: filteredRemoved.slice(0, limit),
23726
+ changes_page: deltaChanges.slice(0, limit),
23727
+ baseline_created: !previous,
23728
+ baseline_updated_at: previous?.updatedAt ?? null,
23729
+ file_summary,
23730
+ top_hotspots,
23731
+ filters: {
23732
+ hotspot_limit: hotspotLimit,
23733
+ severity: severity ?? null,
23734
+ source: source ?? null
23735
+ }
23736
+ },
23737
+ next_step: "Call diagnostics_delta again after edits to get incremental diagnostics changes.",
23738
+ fallback_used: false,
23739
+ approximate: false,
23740
+ latency_ms: Date.now() - startedAt,
23741
+ result_size: diagnostics.length,
23742
+ cursor_available: !!deltaCursor,
23743
+ truncated: deltaChanges.length > limit,
23744
+ next: deltaCursor ? { tool: "diagnostics_delta", arguments: { cursor: deltaCursor, page_size: pageSize } } : null
23745
+ }));
23746
+ return { content: [{ type: "text", text: JSON.stringify(deltaPayload) }] };
23747
+ } catch (error2) {
23748
+ return withSemanticContext({
23749
+ content: [{
23750
+ type: "text",
23751
+ text: JSON.stringify({
23752
+ error: String(error2),
23753
+ error_code: "DIAGNOSTICS_DELTA_ERROR",
23754
+ strict_mode: true,
23755
+ next_step: "Retry diagnostics_delta or run diagnostics(path=...) directly.",
23756
+ install_commands: [],
23757
+ missing_packages: []
23758
+ })
23759
+ }]
23760
+ }, "diagnostics_delta", workspace, backendInstanceId, language);
23761
+ }
23762
+ });
23007
23763
  server.registerTool("doctor", {
23008
23764
  description: "Run environment and backend readiness checks for out-of-box troubleshooting.",
23009
23765
  inputSchema: {
23010
23766
  probe_backends: exports_external.boolean().default(false).optional(),
23011
23767
  check_latest_versions: exports_external.boolean().default(false).optional(),
23768
+ capability_snapshot_id: exports_external.string().optional(),
23012
23769
  page_size: exports_external.number().int().positive().default(50).optional(),
23013
23770
  cursor: exports_external.string().optional()
23014
23771
  }
23015
- }, async ({ probe_backends, check_latest_versions, page_size, cursor }) => {
23772
+ }, async ({ probe_backends, check_latest_versions, capability_snapshot_id, page_size, cursor }) => {
23016
23773
  const pageSize = typeof page_size === "number" ? page_size : 50;
23774
+ cleanupCapabilitySnapshots();
23017
23775
  if (typeof cursor === "string") {
23018
23776
  const page = readCursorPage("doctor", cursor, pageSize);
23019
23777
  if (!page.ok) {
@@ -23062,6 +23820,115 @@ server.registerTool("doctor", {
23062
23820
  uv: checkCommand("uv"),
23063
23821
  bun: checkCommand("bun")
23064
23822
  };
23823
+ const latestBenchmark = loadLatestBenchmarkReport();
23824
+ const benchmarkInsights = (() => {
23825
+ if (!latestBenchmark.found || !latestBenchmark.report) {
23826
+ return {
23827
+ found: false,
23828
+ path: latestBenchmark.path,
23829
+ error: latestBenchmark.error || null,
23830
+ next_step: `Run \`bun run benchmark:report\` to generate ${latestBenchmark.path}.`
23831
+ };
23832
+ }
23833
+ const report = latestBenchmark.report;
23834
+ const cases = Array.isArray(report.cases) ? report.cases : [];
23835
+ const baselineConfigured = process.env.LSP_MCP_BENCHMARK_BASELINE_PATH || ".tmp/benchmark-baseline.json";
23836
+ const baselinePath = path2.isAbsolute(baselineConfigured) ? baselineConfigured : path2.resolve(process.cwd(), baselineConfigured);
23837
+ const baseline = loadBenchmarkReport(baselinePath);
23838
+ const baselineCases = baseline.report?.cases || [];
23839
+ const baselineMap = new Map(baselineCases.map((c) => [c.id, c]));
23840
+ const trendPairs = cases.map((curr) => {
23841
+ const prev = baselineMap.get(curr.id);
23842
+ if (!prev || prev.latency_ms <= 0)
23843
+ return null;
23844
+ const deltaMs = curr.latency_ms - prev.latency_ms;
23845
+ const deltaPct = deltaMs / prev.latency_ms * 100;
23846
+ return {
23847
+ id: curr.id,
23848
+ tool: curr.tool,
23849
+ current_latency_ms: curr.latency_ms,
23850
+ baseline_latency_ms: prev.latency_ms,
23851
+ delta_ms: deltaMs,
23852
+ delta_pct: Math.round(deltaPct * 10) / 10
23853
+ };
23854
+ }).filter((v) => !!v);
23855
+ const regressions = trendPairs.filter((p) => p.delta_pct > 20 && p.delta_ms > 50).sort((a, b) => b.delta_pct - a.delta_pct);
23856
+ const improvements = trendPairs.filter((p) => p.delta_pct < -20 && p.delta_ms < -50).sort((a, b) => a.delta_pct - b.delta_pct);
23857
+ const slowCases = [...cases].filter((c) => typeof c.latency_ms === "number" && c.latency_ms >= 1200).sort((a, b) => b.latency_ms - a.latency_ms).slice(0, 5);
23858
+ const errorCases = cases.filter((c) => !c.ok);
23859
+ const tokenHeavyCases = cases.filter((c) => c.truncated || c.cursor_available || c.result_size > 400);
23860
+ const totalLatency = report.summary?.total_latency_ms ?? cases.reduce((sum, c) => sum + (Number(c.latency_ms) || 0), 0);
23861
+ const budgetStatus = regressions.length > 0 ? "regressed" : errorCases.length > 0 ? "degraded" : totalLatency > 6000 ? "high_latency" : "healthy";
23862
+ return {
23863
+ found: true,
23864
+ path: latestBenchmark.path,
23865
+ generated_at: report.generated_at,
23866
+ total_cases: report.summary?.total_cases ?? cases.length,
23867
+ ok_cases: report.summary?.ok_cases ?? cases.filter((c) => c.ok).length,
23868
+ error_cases: report.summary?.error_cases ?? errorCases.length,
23869
+ total_latency_ms: totalLatency,
23870
+ budget_status: budgetStatus,
23871
+ trend: {
23872
+ baseline_found: baseline.found,
23873
+ baseline_path: baseline.path,
23874
+ compared_cases: trendPairs.length,
23875
+ regressions_count: regressions.length,
23876
+ improvements_count: improvements.length,
23877
+ regressions: regressions.slice(0, 5),
23878
+ improvements: improvements.slice(0, 5)
23879
+ },
23880
+ slow_cases: slowCases,
23881
+ token_heavy_cases: tokenHeavyCases.slice(0, 5),
23882
+ recommended_mode: totalLatency > 6000 ? "semantic_navigate(mode='fast')" : "semantic_navigate(mode='deep')",
23883
+ next_step: regressions.length > 0 ? "Benchmark regressed vs baseline; review regressions before changing LLM defaults." : errorCases.length > 0 ? "Investigate failed benchmark cases before trusting semantic automation." : "Use slow_cases and token_heavy_cases to set default mode/strategy for LLM workflows."
23884
+ };
23885
+ })();
23886
+ const llmSemanticDefaults = (() => {
23887
+ const found = !!benchmarkInsights.found;
23888
+ const budget = String(benchmarkInsights.budget_status || "unknown");
23889
+ const tokenHeavyCount = Array.isArray(benchmarkInsights.token_heavy_cases) ? benchmarkInsights.token_heavy_cases.length : 0;
23890
+ const slowCaseIds = new Set(Array.isArray(benchmarkInsights.slow_cases) ? benchmarkInsights.slow_cases.map((c) => String(c.id || "")) : []);
23891
+ const mode = budget === "high_latency" || budget === "degraded" || budget === "regressed" ? "fast" : "deep";
23892
+ const strategy = slowCaseIds.has("semantic_navigate_references_first_fast") ? "definition_first" : slowCaseIds.has("semantic_navigate_definition_first_fast") ? "references_first" : "balanced";
23893
+ const pageSize2 = mode === "fast" || tokenHeavyCount > 0 ? 20 : 50;
23894
+ const referencePreview = mode === "fast" ? 10 : 20;
23895
+ const hintMaxLines = mode === "fast" ? 60 : 120;
23896
+ const diagnosticsPageSize = mode === "fast" ? 50 : 100;
23897
+ const diagnosticsPreviewLimit = mode === "fast" ? 50 : 100;
23898
+ const diagnosticsHotspotLimit = mode === "fast" ? 5 : 10;
23899
+ const rationale = [];
23900
+ if (!found)
23901
+ rationale.push("No benchmark report found; using conservative defaults.");
23902
+ if (budget === "regressed")
23903
+ rationale.push("Regression vs baseline detected; prioritize faster/safer navigation settings.");
23904
+ if (budget === "degraded")
23905
+ rationale.push("Benchmark has failing cases; reduce semantic payload until errors are resolved.");
23906
+ if (budget === "high_latency")
23907
+ rationale.push("High latency detected; prefer fast mode and smaller pages.");
23908
+ if (tokenHeavyCount > 0)
23909
+ rationale.push("Token-heavy benchmark cases detected; cap page sizes and preview windows.");
23910
+ if (rationale.length === 0)
23911
+ rationale.push("Benchmark health is acceptable; use balanced deep defaults.");
23912
+ return {
23913
+ version: 1,
23914
+ source: found ? "doctor.benchmarkInsights" : "default_policy",
23915
+ budget_status: budget,
23916
+ semantic_navigate: {
23917
+ mode,
23918
+ strategy,
23919
+ page_size: pageSize2,
23920
+ reference_preview: referencePreview,
23921
+ hint_start_line: 1,
23922
+ hint_max_lines: hintMaxLines
23923
+ },
23924
+ diagnostics_delta: {
23925
+ page_size: diagnosticsPageSize,
23926
+ preview_limit: diagnosticsPreviewLimit,
23927
+ hotspot_limit: diagnosticsHotspotLimit
23928
+ },
23929
+ rationale
23930
+ };
23931
+ })();
23065
23932
  const backendPackages = getBackendPackages(config2).filter((pkg) => config2.languages[pkg.language]?.enabled);
23066
23933
  const backendRuntimeMode = resolveBackendRuntimeMode3();
23067
23934
  const versionByLanguage = new Map(backendManager.getVersions().map((version2) => [version2.language, version2]));
@@ -23119,7 +23986,9 @@ server.registerTool("doctor", {
23119
23986
  }
23120
23987
  };
23121
23988
  const latestLookupStats = {
23989
+ schema_version: 1,
23122
23990
  enabled: !!check_latest_versions,
23991
+ cache_ttl_ms: REGISTRY_LOOKUP_TTL_MS,
23123
23992
  requested: 0,
23124
23993
  cache_hits: 0,
23125
23994
  inflight_hits: 0,
@@ -23233,6 +24102,7 @@ server.registerTool("doctor", {
23233
24102
  }];
23234
24103
  }));
23235
24104
  const backendVersionSummary = {
24105
+ schema_version: 1,
23236
24106
  check_latest_versions: !!check_latest_versions,
23237
24107
  lookup_stats: latestLookupStats,
23238
24108
  counts: backendVersionCounts,
@@ -23263,6 +24133,41 @@ server.registerTool("doctor", {
23263
24133
  uv_cache_dir: uvCacheDir,
23264
24134
  uv_cache_writable: uvCacheWritable
23265
24135
  };
24136
+ if (config2.languages.python?.enabled && backendRuntimeMode === "bundled") {
24137
+ const pythonBundledDir = resolveLikelyBundledBackendPath("python");
24138
+ const bundledExists = !!pythonBundledDir && fs3.existsSync(pythonBundledDir);
24139
+ const bundledRuntimeCheck = {
24140
+ runtime_mode: backendRuntimeMode,
24141
+ bundled_dir: pythonBundledDir,
24142
+ bundled_dir_exists: bundledExists,
24143
+ uv_available: checks4.uv.available,
24144
+ probe_executed: false
24145
+ };
24146
+ if (!bundledExists) {
24147
+ bundledRuntimeCheck.status = "missing_bundle";
24148
+ bundledRuntimeCheck.next_step = "Run `bun run build:bundled` to produce `dist/bundled/python`.";
24149
+ } else if (!checks4.uv.available) {
24150
+ bundledRuntimeCheck.status = "missing_uv";
24151
+ bundledRuntimeCheck.next_step = "Install uv and ensure `uv` is available in PATH.";
24152
+ } else if (probe_backends) {
24153
+ const probe = await runCommandCapture("uv", ["run", "--quiet", "--directory", pythonBundledDir, "python-lsp-mcp", "--help"], 8000);
24154
+ bundledRuntimeCheck.probe_executed = true;
24155
+ bundledRuntimeCheck.probe_command = `uv run --quiet --directory ${pythonBundledDir} python-lsp-mcp --help`;
24156
+ bundledRuntimeCheck.probe_exit_code = probe.code;
24157
+ bundledRuntimeCheck.probe_output = (probe.stdout || probe.stderr || "").trim().slice(0, 500);
24158
+ if (probe.code === 0) {
24159
+ bundledRuntimeCheck.status = "ok";
24160
+ bundledRuntimeCheck.next_step = "Bundled python runtime probe succeeded.";
24161
+ } else {
24162
+ bundledRuntimeCheck.status = "probe_failed";
24163
+ bundledRuntimeCheck.next_step = "Run the probe command manually to inspect full error and ensure UV cache/network access.";
24164
+ }
24165
+ } else {
24166
+ bundledRuntimeCheck.status = "probe_skipped";
24167
+ bundledRuntimeCheck.next_step = "Run doctor(probe_backends=true) to execute bundled python runtime probe.";
24168
+ }
24169
+ workspaceDependencyChecks.python_bundled_runtime = bundledRuntimeCheck;
24170
+ }
23266
24171
  const discoveryRoot = activeWorkspacePath;
23267
24172
  if (discoveryRoot && fileExistsSafe(discoveryRoot) && fs3.statSync(discoveryRoot).isDirectory()) {
23268
24173
  const candidates = discoverWorkspaceCandidates(discoveryRoot, 2);
@@ -23297,6 +24202,12 @@ server.registerTool("doctor", {
23297
24202
  };
23298
24203
  }
23299
24204
  const enabledLanguages = Object.keys(config2.languages).filter((lang) => config2.languages[lang]?.enabled);
24205
+ const inputSnapshot = readCapabilitySnapshot(capability_snapshot_id || null);
24206
+ let capabilitySnapshotStatus = "none";
24207
+ if (capability_snapshot_id && !inputSnapshot) {
24208
+ capabilitySnapshotStatus = "invalid_or_expired";
24209
+ }
24210
+ let outputCapabilitySnapshotId = inputSnapshot?.id || null;
23300
24211
  const workspaceDiscovery = workspaceDependencyChecks.language_workspace_discovery;
23301
24212
  const vueChecks = workspaceDependencyChecks.vue;
23302
24213
  const languageCommandChains = Object.fromEntries(enabledLanguages.map((lang) => {
@@ -23366,75 +24277,99 @@ server.registerTool("doctor", {
23366
24277
  return `read_file_with_hints(file='${sampleFile}', start_line=1, max_lines=80)`;
23367
24278
  if (feature === "call_hierarchy")
23368
24279
  return `call_hierarchy(file='${sampleFile}', line=1, column=1, direction='both')`;
24280
+ if (feature === "type_hierarchy")
24281
+ return `type_hierarchy(file='${sampleFile}', line=1, column=1, direction='both')`;
23369
24282
  return `hover(file='${sampleFile}', line=1, column=1)`;
23370
24283
  };
23371
24284
  const featureCapabilityMatrix = {};
23372
- for (const lang of enabledLanguages) {
23373
- const language = lang;
23374
- const chainWorkspace = languageCommandChains[language]?.workspace || null;
23375
- if (!probe_backends) {
23376
- const featureNextSteps = Object.fromEntries(LLM_FEATURE_TARGETS.map((feature) => {
23377
- const meta = LLM_FEATURE_PROBE_METADATA[feature];
23378
- return [
23379
- feature,
23380
- {
23381
- status: "unknown",
23382
- command: featureCommandTemplate(language, feature, chainWorkspace),
23383
- note: "Run doctor(probe_backends=true) for backend capability verification.",
23384
- expected_latency_ms: meta.expected_latency_ms,
23385
- failure_signatures: meta.failure_signatures
23386
- }
23387
- ];
23388
- }));
23389
- featureCapabilityMatrix[lang] = {
23390
- probe_required: true,
24285
+ const canReuseSnapshot = !!inputSnapshot && !probe_backends && JSON.stringify([...inputSnapshot.enabledLanguages].sort()) === JSON.stringify([...enabledLanguages].sort());
24286
+ if (canReuseSnapshot && inputSnapshot) {
24287
+ capabilitySnapshotStatus = "reused";
24288
+ for (const lang of enabledLanguages) {
24289
+ featureCapabilityMatrix[lang] = inputSnapshot.featureCapabilityMatrix[lang] || {
23391
24290
  status: "unknown",
23392
- next_step: "Call doctor(probe_backends=true) to fetch per-language feature capabilities.",
23393
- feature_next_steps: featureNextSteps
24291
+ note: "Language missing in capability snapshot; rerun doctor(probe_backends=true)."
23394
24292
  };
23395
- continue;
23396
24293
  }
23397
- try {
23398
- const tools = await backendManager.getTools(language);
23399
- const toolSet = new Set(tools.map((t) => t.name));
23400
- const features = Object.fromEntries(LLM_FEATURE_TARGETS.map((name) => [
23401
- name,
23402
- toolSet.has(name) ? "supported" : "not_supported"
23403
- ]));
23404
- const featureNextSteps = Object.fromEntries(LLM_FEATURE_TARGETS.map((feature) => {
23405
- const supported = toolSet.has(feature);
23406
- const command = featureCommandTemplate(language, feature, chainWorkspace);
23407
- const meta = LLM_FEATURE_PROBE_METADATA[feature];
23408
- return [
23409
- feature,
23410
- supported ? {
23411
- status: "supported",
23412
- command,
23413
- note: `Run ${feature} directly after workspace setup.`,
23414
- expected_latency_ms: meta.expected_latency_ms,
23415
- failure_signatures: meta.failure_signatures
23416
- } : {
23417
- status: "not_supported",
23418
- command,
23419
- fallback_command: "hover(file='/abs/path/to/file', line=1, column=1)",
23420
- note: "Feature missing in backend; expect strict NOT_IMPLEMENTED.",
23421
- expected_latency_ms: meta.expected_latency_ms,
23422
- failure_signatures: meta.failure_signatures
23423
- }
23424
- ];
23425
- }));
23426
- featureCapabilityMatrix[lang] = {
23427
- status: "ok",
23428
- tool_count: tools.length,
23429
- features,
23430
- feature_next_steps: featureNextSteps
23431
- };
23432
- } catch (error2) {
23433
- featureCapabilityMatrix[lang] = {
23434
- status: "error",
23435
- error: String(error2)
23436
- };
24294
+ } else
24295
+ for (const lang of enabledLanguages) {
24296
+ const language = lang;
24297
+ const chainWorkspace = languageCommandChains[language]?.workspace || null;
24298
+ if (!probe_backends) {
24299
+ const featureNextSteps = Object.fromEntries(LLM_FEATURE_TARGETS.map((feature) => {
24300
+ const meta = LLM_FEATURE_PROBE_METADATA[feature];
24301
+ return [
24302
+ feature,
24303
+ {
24304
+ status: "unknown",
24305
+ command: featureCommandTemplate(language, feature, chainWorkspace),
24306
+ note: "Run doctor(probe_backends=true) for backend capability verification.",
24307
+ expected_latency_ms: meta.expected_latency_ms,
24308
+ failure_signatures: meta.failure_signatures
24309
+ }
24310
+ ];
24311
+ }));
24312
+ featureCapabilityMatrix[lang] = {
24313
+ probe_required: true,
24314
+ status: "unknown",
24315
+ next_step: "Call doctor(probe_backends=true) to fetch per-language feature capabilities.",
24316
+ feature_next_steps: featureNextSteps
24317
+ };
24318
+ continue;
24319
+ }
24320
+ try {
24321
+ const tools = await backendManager.getTools(language);
24322
+ const toolSet = new Set(tools.map((t) => t.name));
24323
+ const features = Object.fromEntries(LLM_FEATURE_TARGETS.map((name) => [
24324
+ name,
24325
+ toolSet.has(name) ? "supported" : "not_supported"
24326
+ ]));
24327
+ const featureNextSteps = Object.fromEntries(LLM_FEATURE_TARGETS.map((feature) => {
24328
+ const supported = toolSet.has(feature);
24329
+ const command = featureCommandTemplate(language, feature, chainWorkspace);
24330
+ const meta = LLM_FEATURE_PROBE_METADATA[feature];
24331
+ return [
24332
+ feature,
24333
+ supported ? {
24334
+ status: "supported",
24335
+ command,
24336
+ note: `Run ${feature} directly after workspace setup.`,
24337
+ expected_latency_ms: meta.expected_latency_ms,
24338
+ failure_signatures: meta.failure_signatures
24339
+ } : {
24340
+ status: "not_supported",
24341
+ command,
24342
+ fallback_command: "hover(file='/abs/path/to/file', line=1, column=1)",
24343
+ note: "Feature missing in backend; expect strict NOT_IMPLEMENTED.",
24344
+ expected_latency_ms: meta.expected_latency_ms,
24345
+ failure_signatures: meta.failure_signatures
24346
+ }
24347
+ ];
24348
+ }));
24349
+ featureCapabilityMatrix[lang] = {
24350
+ status: "ok",
24351
+ tool_count: tools.length,
24352
+ features,
24353
+ feature_next_steps: featureNextSteps
24354
+ };
24355
+ } catch (error2) {
24356
+ featureCapabilityMatrix[lang] = {
24357
+ status: "error",
24358
+ error: String(error2)
24359
+ };
24360
+ }
23437
24361
  }
24362
+ if (probe_backends) {
24363
+ const snapshotId = makeCapabilitySnapshotId(enabledLanguages);
24364
+ capabilitySnapshotStore.set(snapshotId, {
24365
+ id: snapshotId,
24366
+ createdAt: Date.now(),
24367
+ expiresAt: Date.now() + CAPABILITY_SNAPSHOT_TTL_MS,
24368
+ enabledLanguages: [...enabledLanguages],
24369
+ featureCapabilityMatrix
24370
+ });
24371
+ outputCapabilitySnapshotId = snapshotId;
24372
+ capabilitySnapshotStatus = "created";
23438
24373
  }
23439
24374
  const probeResults = {};
23440
24375
  if (probe_backends) {
@@ -23457,6 +24392,17 @@ server.registerTool("doctor", {
23457
24392
  recommendations.push("Install uv for Python backend support.");
23458
24393
  if (!checks4.bun.available)
23459
24394
  recommendations.push("Install Bun if you run this server from source.");
24395
+ if (!benchmarkInsights.found) {
24396
+ recommendations.push(`Benchmark report not found. ${benchmarkInsights.next_step}`);
24397
+ } else {
24398
+ if (benchmarkInsights.budget_status === "regressed") {
24399
+ recommendations.push("Benchmark trend regressed vs baseline. Review benchmarkInsights.trend.regressions before rollout.");
24400
+ } else if (benchmarkInsights.budget_status === "degraded") {
24401
+ recommendations.push("Latest benchmark has failing cases; prioritize fixing those before enabling aggressive semantic automation.");
24402
+ } else if (benchmarkInsights.budget_status === "high_latency") {
24403
+ recommendations.push("Latest benchmark latency is high; prefer semantic_navigate(mode='fast') and narrower page_size defaults.");
24404
+ }
24405
+ }
23460
24406
  if (config2.languages.vue?.enabled && activeWorkspacePath) {
23461
24407
  const missing = (vueChecks?.projects || []).filter((p) => !p.ok);
23462
24408
  if (missing.length > 0) {
@@ -23486,6 +24432,22 @@ server.registerTool("doctor", {
23486
24432
  recommendations.push("Python backend failed before handshake. Run `uv run --directory dist/bundled/python python-lsp-mcp --help` to preinstall runtime dependencies.");
23487
24433
  }
23488
24434
  }
24435
+ const pythonBundledRuntimeCheck = workspaceDependencyChecks.python_bundled_runtime;
24436
+ if (pythonBundledRuntimeCheck) {
24437
+ if (pythonBundledRuntimeCheck.status === "missing_bundle") {
24438
+ recommendations.push(String(pythonBundledRuntimeCheck.next_step || "Bundled python runtime missing."));
24439
+ } else if (pythonBundledRuntimeCheck.status === "missing_uv") {
24440
+ recommendations.push(String(pythonBundledRuntimeCheck.next_step || "uv is required for bundled python runtime."));
24441
+ } else if (pythonBundledRuntimeCheck.status === "probe_failed") {
24442
+ recommendations.push("Bundled python runtime probe failed. Check uv cache/network, then retry doctor(probe_backends=true).");
24443
+ if (pythonBundledRuntimeCheck.probe_command) {
24444
+ recommendations.push(`Probe command: ${pythonBundledRuntimeCheck.probe_command}`);
24445
+ }
24446
+ }
24447
+ }
24448
+ if (capabilitySnapshotStatus === "invalid_or_expired") {
24449
+ recommendations.push("Provided capability_snapshot_id is invalid or expired. Run doctor(probe_backends=true) to refresh.");
24450
+ }
23489
24451
  for (const [lang, drift] of Object.entries(backendPackageDrift)) {
23490
24452
  if (drift.drift_status === "policy_drift") {
23491
24453
  recommendations.push(`${lang} backend is not using latest update policy. ${drift.next_step}`);
@@ -23504,9 +24466,13 @@ server.registerTool("doctor", {
23504
24466
  checks: checks4,
23505
24467
  activeWorkspacePath,
23506
24468
  backendRuntimeMode,
24469
+ capability_snapshot_id: outputCapabilitySnapshotId,
24470
+ capability_snapshot_status: capabilitySnapshotStatus,
23507
24471
  enabledLanguages,
23508
24472
  backendPackageDrift,
23509
24473
  backendVersionSummary,
24474
+ benchmarkInsights,
24475
+ llmSemanticDefaults,
23510
24476
  workspaceDependencyChecks,
23511
24477
  languageCommandChains,
23512
24478
  backendCommands,
@@ -23520,6 +24486,8 @@ server.registerTool("doctor", {
23520
24486
  for (const [name, check2] of Object.entries(checks4)) {
23521
24487
  items.push({ kind: "runtime_check", key: name, value: check2 });
23522
24488
  }
24489
+ items.push({ kind: "benchmark_insight", key: "latest", value: benchmarkInsights });
24490
+ items.push({ kind: "llm_default", key: "semantic_defaults", value: llmSemanticDefaults });
23523
24491
  for (const [name, depCheck] of Object.entries(workspaceDependencyChecks)) {
23524
24492
  items.push({ kind: "workspace_dependency", key: name, value: depCheck });
23525
24493
  }
@@ -23547,7 +24515,13 @@ server.registerTool("doctor", {
23547
24515
  enabledLanguages,
23548
24516
  recommendations_count: recommendations.length,
23549
24517
  command_chains_count: Object.keys(languageCommandChains).length,
24518
+ capability_snapshot_id: outputCapabilitySnapshotId,
24519
+ capability_snapshot_status: capabilitySnapshotStatus,
24520
+ backend_version_schema_version: backendVersionSummary.schema_version,
23550
24521
  backend_version_counts: backendVersionSummary.counts,
24522
+ benchmark_found: !!benchmarkInsights.found,
24523
+ benchmark_budget_status: benchmarkInsights.budget_status || "unknown",
24524
+ llm_defaults_version: llmSemanticDefaults.version,
23551
24525
  item_count: items.length
23552
24526
  };
23553
24527
  const doctorCursor = makeCursor("doctor", items, items.length, doctorSummary);
@@ -23585,7 +24559,8 @@ server.registerTool("lsp_probe_profile", {
23585
24559
  moniker: "p2_advanced",
23586
24560
  linked_editing_range: "p2_advanced",
23587
24561
  inlay_hint_resolve: "p2_advanced",
23588
- call_hierarchy: "p2_advanced"
24562
+ call_hierarchy: "p2_advanced",
24563
+ type_hierarchy: "p2_advanced"
23589
24564
  };
23590
24565
  const selectedFeatures = feature ? [feature] : [...LLM_FEATURE_TARGETS, "hover", "definition", "references"];
23591
24566
  const uniqueFeatures = Array.from(new Set(selectedFeatures)).filter((name) => LLM_FEATURE_PROBE_METADATA[name]);
@@ -23793,6 +24768,22 @@ function extractReferencesItems(parsed) {
23793
24768
  function extractReferencesCount(parsed, items) {
23794
24769
  return typeof parsed?.count === "number" ? parsed.count : items.length;
23795
24770
  }
24771
+ function extractDiagnosticsItems(parsed) {
24772
+ if (Array.isArray(parsed))
24773
+ return parsed;
24774
+ if (Array.isArray(parsed?.diagnostics))
24775
+ return parsed.diagnostics;
24776
+ return [];
24777
+ }
24778
+ function fingerprintDiagnostic(diag) {
24779
+ const file = String(diag.file || diag.path || "");
24780
+ const line = Number(diag.line ?? diag?.range?.start?.line ?? -1);
24781
+ const column = Number(diag.column ?? diag?.range?.start?.character ?? -1);
24782
+ const severity = String(diag.severity || "");
24783
+ const code = String(diag.code || "");
24784
+ const message = String(diag.message || "");
24785
+ return `${file}|${line}|${column}|${severity}|${code}|${message}`;
24786
+ }
23796
24787
  function isInlayHintUnsupportedError(errorText) {
23797
24788
  const text = errorText.toLowerCase();
23798
24789
  return text.includes("textdocument/inlayhint") || text.includes("unhandled method") || text.includes("method not found") || text.includes("not implemented") || text.includes("unknown tool") || text.includes("tool not found") || text.includes("inlay_hints") || text.includes("-32601");
@@ -23924,6 +24915,169 @@ function findWorkspaceIdentifierHits(identifier, workspacePath) {
23924
24915
  }
23925
24916
  return hits;
23926
24917
  }
24918
+ function searchWorkspacePatternHits(workspacePath, pattern, globs, maxHits = 200) {
24919
+ if (!workspacePath || !pattern.trim())
24920
+ return [];
24921
+ const args = [
24922
+ "--no-ignore-vcs",
24923
+ "--line-number",
24924
+ "--column",
24925
+ "--no-heading",
24926
+ "--color",
24927
+ "never",
24928
+ ...globs.flatMap((g) => ["-g", g]),
24929
+ pattern,
24930
+ "."
24931
+ ];
24932
+ const result = spawnSync("rg", args, { cwd: workspacePath, encoding: "utf-8" });
24933
+ if (result.error || typeof result.stdout !== "string" || result.stdout.trim().length === 0)
24934
+ return [];
24935
+ const hits = [];
24936
+ for (const line of result.stdout.trim().split(`
24937
+ `)) {
24938
+ const match = /^(.*?):(\d+):(\d+):(.*)$/.exec(line);
24939
+ if (!match)
24940
+ continue;
24941
+ hits.push({
24942
+ file: path2.resolve(workspacePath, match[1]),
24943
+ line: Number.parseInt(match[2], 10),
24944
+ column: Number.parseInt(match[3], 10),
24945
+ text: match[4]
24946
+ });
24947
+ if (hits.length >= maxHits)
24948
+ break;
24949
+ }
24950
+ return hits;
24951
+ }
24952
+ function parseTypeSupertypesFromLine(language, lineText, symbol) {
24953
+ const escaped = symbol.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
24954
+ if (language === "python") {
24955
+ const m = new RegExp(`^\\s*class\\s+${escaped}\\s*\\(([^)]*)\\)`).exec(lineText);
24956
+ if (!m)
24957
+ return [];
24958
+ return m[1].split(",").map((s) => s.trim()).filter(Boolean);
24959
+ }
24960
+ const classMatch = new RegExp(`\\bclass\\s+${escaped}\\b([^\\{]*)`).exec(lineText);
24961
+ const interfaceMatch = new RegExp(`\\binterface\\s+${escaped}\\b([^\\{]*)`).exec(lineText);
24962
+ const tail = (classMatch?.[1] || interfaceMatch?.[1] || "").trim();
24963
+ if (!tail)
24964
+ return [];
24965
+ const supers = [];
24966
+ const extendsMatch = /\bextends\s+([A-Za-z0-9_$.,<>\s]+)/.exec(tail);
24967
+ if (extendsMatch?.[1]) {
24968
+ supers.push(...extendsMatch[1].split(",").map((s) => s.replace(/<.*?>/g, "").trim()).filter(Boolean));
24969
+ }
24970
+ const implMatch = /\bimplements\s+([A-Za-z0-9_$.,<>\s]+)/.exec(tail);
24971
+ if (implMatch?.[1]) {
24972
+ supers.push(...implMatch[1].split(",").map((s) => s.replace(/<.*?>/g, "").trim()).filter(Boolean));
24973
+ }
24974
+ return Array.from(new Set(supers));
24975
+ }
24976
+ function toHintPosition1Based(hint, language) {
24977
+ const pos = hint.position;
24978
+ if (!pos || typeof pos !== "object")
24979
+ return null;
24980
+ if (typeof pos.line !== "number")
24981
+ return null;
24982
+ if (typeof pos.column === "number") {
24983
+ return { line: pos.line, column: pos.column };
24984
+ }
24985
+ if (typeof pos.character === "number") {
24986
+ return { line: pos.line + 1, column: pos.character + 1 };
24987
+ }
24988
+ if (language === "typescript" && typeof pos.line === "number") {
24989
+ return { line: pos.line, column: 1 };
24990
+ }
24991
+ return null;
24992
+ }
24993
+ function buildApproximateSemanticTokens(content, language) {
24994
+ const keywords = new Set([
24995
+ "class",
24996
+ "interface",
24997
+ "type",
24998
+ "extends",
24999
+ "implements",
25000
+ "function",
25001
+ "return",
25002
+ "const",
25003
+ "let",
25004
+ "var",
25005
+ "if",
25006
+ "else",
25007
+ "for",
25008
+ "while",
25009
+ "import",
25010
+ "export",
25011
+ "from",
25012
+ "def",
25013
+ "async",
25014
+ "await",
25015
+ "try",
25016
+ "except"
25017
+ ]);
25018
+ const tokens = [];
25019
+ const lines = content.split(`
25020
+ `);
25021
+ for (let i = 0;i < lines.length; i++) {
25022
+ const line = lines[i];
25023
+ if (!line)
25024
+ continue;
25025
+ const regex = /"([^"\\]|\\.)*"|'([^'\\]|\\.)*'|\b\d+(?:\.\d+)?\b|[A-Za-z_$][A-Za-z0-9_$]*/g;
25026
+ let m;
25027
+ while ((m = regex.exec(line)) !== null) {
25028
+ const text = m[0];
25029
+ const col = m.index + 1;
25030
+ let tokenType = "variable";
25031
+ if (text.startsWith("'") || text.startsWith('"'))
25032
+ tokenType = "string";
25033
+ else if (/^\d/.test(text))
25034
+ tokenType = "number";
25035
+ else if (keywords.has(text))
25036
+ tokenType = "keyword";
25037
+ else if (/[A-Z]/.test(text[0]))
25038
+ tokenType = language === "python" ? "class" : "type";
25039
+ else if (line.slice(m.index + text.length).trimStart().startsWith("("))
25040
+ tokenType = "function";
25041
+ else if (m.index > 0 && line[m.index - 1] === ".")
25042
+ tokenType = "property";
25043
+ tokens.push({
25044
+ line: i + 1,
25045
+ column: col,
25046
+ length: text.length,
25047
+ token_type: tokenType,
25048
+ token_modifiers: [],
25049
+ text
25050
+ });
25051
+ if (tokens.length >= 5000)
25052
+ return tokens;
25053
+ }
25054
+ }
25055
+ return tokens;
25056
+ }
25057
+ function buildApproximateLinkedEditingRanges(content, line, column) {
25058
+ const ident = extractIdentifierAtPosition(content, line, column);
25059
+ if (!ident)
25060
+ return { identifier: null, ranges: [] };
25061
+ const escaped = ident.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
25062
+ const regex = new RegExp(`\\b${escaped}\\b`, "g");
25063
+ const ranges = [];
25064
+ const lines = content.split(`
25065
+ `);
25066
+ for (let i = 0;i < lines.length; i++) {
25067
+ regex.lastIndex = 0;
25068
+ let m;
25069
+ while ((m = regex.exec(lines[i])) !== null) {
25070
+ const startCol = m.index + 1;
25071
+ ranges.push({
25072
+ start: { line: i + 1, column: startCol },
25073
+ end: { line: i + 1, column: startCol + ident.length }
25074
+ });
25075
+ if (ranges.length >= 5000)
25076
+ return { identifier: ident, ranges };
25077
+ }
25078
+ }
25079
+ return { identifier: ident, ranges };
25080
+ }
23927
25081
  function findDeclarationInFile(content, identifier) {
23928
25082
  const lines = content.split(`
23929
25083
  `);
@@ -24081,6 +25235,7 @@ function buildVueMissingDepsErrorResponse(toolName, workspacePath) {
24081
25235
  `cd ${installRoot} && yarn add -D typescript @vue/language-server`,
24082
25236
  `cd ${installRoot} && bun add -d typescript @vue/language-server`
24083
25237
  ];
25238
+ const recoveryPlan = buildRecoveryPlan(installCommands, installCommands[0]);
24084
25239
  return {
24085
25240
  content: [{
24086
25241
  type: "text",
@@ -24094,6 +25249,7 @@ function buildVueMissingDepsErrorResponse(toolName, workspacePath) {
24094
25249
  strict_mode: VUE_STRICT_SEMANTIC,
24095
25250
  missing_packages: ["typescript", "@vue/language-server"],
24096
25251
  install_commands: installCommands,
25252
+ recovery_plan: recoveryPlan,
24097
25253
  next_step: installCommands[0],
24098
25254
  required_packages: ["typescript", "@vue/language-server"],
24099
25255
  install_example: installCommands[0],
@@ -25366,6 +26522,259 @@ ${results.join(`
25366
26522
  const availableTools = await backendManager.getTools(language);
25367
26523
  const supportsTool = availableTools.some((t) => t.name === tool.name);
25368
26524
  if (!supportsTool) {
26525
+ if (tool.name === "inlay_hint_resolve" && availableTools.some((t) => t.name === "inlay_hints")) {
26526
+ try {
26527
+ const hintsResponse = await callBackendTool("inlay_hints", { file: filePath });
26528
+ const parsedHints = JSON.parse(hintsResponse.content[0]?.text || "{}");
26529
+ const hints = Array.isArray(parsedHints?.hints) ? parsedHints.hints : [];
26530
+ const targetLine = Number(args.line);
26531
+ const targetColumn = Number(args.column);
26532
+ const targetLabel = typeof args.label === "string" ? String(args.label) : null;
26533
+ const withDistance = hints.map((hint) => {
26534
+ const pos = toHintPosition1Based(hint, language);
26535
+ if (!pos)
26536
+ return null;
26537
+ const labelRaw = hint.label;
26538
+ const label = typeof labelRaw === "string" ? labelRaw : Array.isArray(labelRaw) ? labelRaw.map((p) => String(p.value || "")).join("") : "";
26539
+ if (targetLabel && label.trim() !== targetLabel.trim())
26540
+ return null;
26541
+ const distance = Math.abs(pos.line - targetLine) * 1000 + Math.abs(pos.column - targetColumn);
26542
+ return { hint, label, pos, distance };
26543
+ }).filter((item) => !!item).sort((a, b) => a.distance - b.distance);
26544
+ const best = withDistance[0];
26545
+ if (!best) {
26546
+ return withSemanticContext({
26547
+ content: [{
26548
+ type: "text",
26549
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26550
+ error: "No inlay hint found at this position",
26551
+ error_code: "NO_INLAY_HINT_FOUND",
26552
+ strict_mode: true,
26553
+ fallback_used: true,
26554
+ approximate: true,
26555
+ available_hints: hints.length,
26556
+ next_step: "Call inlay_hints(file=...) to inspect available hint positions, then retry with exact line/column.",
26557
+ recovery_plan: buildRecoveryPlan([`inlay_hints(file='${filePath}')`, `inlay_hint_resolve(file='${filePath}', line=${targetLine}, column=${targetColumn})`], "Inspect hint positions and retry inlay_hint_resolve with exact coordinates.")
26558
+ })))
26559
+ }]
26560
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26561
+ }
26562
+ return withSemanticContext({
26563
+ content: [{
26564
+ type: "text",
26565
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26566
+ ok: true,
26567
+ tool: "inlay_hint_resolve",
26568
+ strict_mode: true,
26569
+ fallback_used: true,
26570
+ approximate: true,
26571
+ position: best.pos,
26572
+ label: best.label,
26573
+ hint: best.hint,
26574
+ available_hints: hints.length,
26575
+ next_step: "Use resolved hint label/context for downstream explanation or refactor planning."
26576
+ })))
26577
+ }]
26578
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26579
+ } catch (error2) {
26580
+ return withSemanticContext({
26581
+ content: [{
26582
+ type: "text",
26583
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26584
+ error: String(error2),
26585
+ error_code: "INLAY_HINT_RESOLVE_FALLBACK_ERROR",
26586
+ strict_mode: true,
26587
+ fallback_used: true,
26588
+ approximate: true,
26589
+ next_step: "Retry inlay_hint_resolve or call inlay_hints(file=...) directly."
26590
+ })))
26591
+ }]
26592
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26593
+ }
26594
+ }
26595
+ if (tool.name === "type_hierarchy") {
26596
+ try {
26597
+ const direction = String(args.direction || "both");
26598
+ const sourceText = fs3.existsSync(absPath) ? fs3.readFileSync(absPath, "utf-8") : "";
26599
+ const symbol = extractIdentifierAtPosition(sourceText, Number(args.line), Number(args.column));
26600
+ if (!symbol) {
26601
+ return withSemanticContext({
26602
+ content: [{
26603
+ type: "text",
26604
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26605
+ error: "Unable to infer symbol for type hierarchy fallback",
26606
+ error_code: "NO_SYMBOL_AT_POSITION",
26607
+ strict_mode: true,
26608
+ fallback_used: true,
26609
+ approximate: true,
26610
+ next_step: "Move cursor onto a class/interface symbol and retry type_hierarchy."
26611
+ })))
26612
+ }]
26613
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26614
+ }
26615
+ const lines = sourceText.split(`
26616
+ `);
26617
+ const targetLine = Number(args.line);
26618
+ const lineText = lines[Math.max(0, targetLine - 1)] || "";
26619
+ const supertypesRaw = parseTypeSupertypesFromLine(language, lineText, symbol);
26620
+ const supertypes = supertypesRaw.map((name) => ({ name, relation: "supertype" }));
26621
+ const escaped = symbol.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
26622
+ const subtypePatterns = language === "python" ? [`^\\s*class\\s+[A-Za-z_][A-Za-z0-9_]*\\s*\\([^)]*\\b${escaped}\\b[^)]*\\)`] : [
26623
+ `\\bclass\\s+[A-Za-z_$][A-Za-z0-9_$]*\\s+extends\\s+${escaped}\\b`,
26624
+ `\\b(class|interface)\\s+[A-Za-z_$][A-Za-z0-9_$]*[^\\n\\{]*\\bimplements\\b[^\\n\\{]*\\b${escaped}\\b`
26625
+ ];
26626
+ const subtypeGlobs = language === "python" ? ["*.py"] : ["*.ts", "*.tsx", "*.js", "*.jsx", "*.vue", "*.d.ts"];
26627
+ const subtypeHits = resolvedWorkspace ? subtypePatterns.flatMap((p) => searchWorkspacePatternHits(resolvedWorkspace, p, subtypeGlobs, 120)) : [];
26628
+ const seenSubtype = new Set;
26629
+ const subtypes = subtypeHits.filter((hit) => {
26630
+ const key = `${hit.file}:${hit.line}:${hit.column}`;
26631
+ if (seenSubtype.has(key))
26632
+ return false;
26633
+ seenSubtype.add(key);
26634
+ return true;
26635
+ }).map((hit) => ({
26636
+ file: hit.file,
26637
+ line: hit.line,
26638
+ column: hit.column,
26639
+ preview: hit.text.trim(),
26640
+ relation: "subtype"
26641
+ })).slice(0, 120);
26642
+ const includeSuper = direction === "both" || direction === "supertypes";
26643
+ const includeSub = direction === "both" || direction === "subtypes";
26644
+ const filteredSupers = includeSuper ? supertypes : [];
26645
+ const filteredSubs = includeSub ? subtypes : [];
26646
+ return withSemanticContext({
26647
+ content: [{
26648
+ type: "text",
26649
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26650
+ ok: true,
26651
+ tool: "type_hierarchy",
26652
+ strict_mode: true,
26653
+ fallback_used: true,
26654
+ approximate: true,
26655
+ symbol,
26656
+ direction,
26657
+ hierarchy: {
26658
+ supertypes: filteredSupers,
26659
+ subtypes: filteredSubs
26660
+ },
26661
+ next_step: "Verify approximate hierarchy edges with definition/references before edits.",
26662
+ result_size: filteredSupers.length + filteredSubs.length,
26663
+ truncated: subtypes.length > filteredSubs.length,
26664
+ cursor_available: false
26665
+ })))
26666
+ }]
26667
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26668
+ } catch (error2) {
26669
+ return withSemanticContext({
26670
+ content: [{
26671
+ type: "text",
26672
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26673
+ error: String(error2),
26674
+ error_code: "TYPE_HIERARCHY_FALLBACK_ERROR",
26675
+ strict_mode: true,
26676
+ fallback_used: true,
26677
+ approximate: true,
26678
+ next_step: "Retry type_hierarchy or use definition/references manually."
26679
+ })))
26680
+ }]
26681
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26682
+ }
26683
+ }
26684
+ if (tool.name === "semantic_tokens") {
26685
+ try {
26686
+ const sourceText = fs3.existsSync(absPath) ? fs3.readFileSync(absPath, "utf-8") : "";
26687
+ const approxTokens = buildApproximateSemanticTokens(sourceText, language);
26688
+ return withSemanticContext({
26689
+ content: [{
26690
+ type: "text",
26691
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26692
+ ok: true,
26693
+ tool: "semantic_tokens",
26694
+ strict_mode: true,
26695
+ fallback_used: true,
26696
+ approximate: true,
26697
+ file: absPath,
26698
+ tokens: approxTokens.slice(0, 1000),
26699
+ count: approxTokens.length,
26700
+ next_step: "Use approximate tokens for structural reading; validate with hover/definition before edits.",
26701
+ truncated: approxTokens.length > 1000,
26702
+ cursor_available: false
26703
+ })))
26704
+ }]
26705
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26706
+ } catch (error2) {
26707
+ return withSemanticContext({
26708
+ content: [{
26709
+ type: "text",
26710
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26711
+ error: String(error2),
26712
+ error_code: "SEMANTIC_TOKENS_FALLBACK_ERROR",
26713
+ strict_mode: true,
26714
+ fallback_used: true,
26715
+ approximate: true,
26716
+ next_step: "Retry semantic_tokens or use summarize_file/read_file_with_hints."
26717
+ })))
26718
+ }]
26719
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26720
+ }
26721
+ }
26722
+ if (tool.name === "linked_editing_range") {
26723
+ try {
26724
+ const sourceText = fs3.existsSync(absPath) ? fs3.readFileSync(absPath, "utf-8") : "";
26725
+ const line = Number(args.line);
26726
+ const column = Number(args.column);
26727
+ const approx = buildApproximateLinkedEditingRanges(sourceText, line, column);
26728
+ if (!approx.identifier) {
26729
+ return withSemanticContext({
26730
+ content: [{
26731
+ type: "text",
26732
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26733
+ error: "Unable to infer symbol for linked editing fallback",
26734
+ error_code: "NO_SYMBOL_AT_POSITION",
26735
+ strict_mode: true,
26736
+ fallback_used: true,
26737
+ approximate: true,
26738
+ next_step: "Move cursor onto an identifier and retry linked_editing_range."
26739
+ })))
26740
+ }]
26741
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26742
+ }
26743
+ return withSemanticContext({
26744
+ content: [{
26745
+ type: "text",
26746
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26747
+ ok: true,
26748
+ tool: "linked_editing_range",
26749
+ strict_mode: true,
26750
+ fallback_used: true,
26751
+ approximate: true,
26752
+ file: absPath,
26753
+ identifier: approx.identifier,
26754
+ ranges: approx.ranges.slice(0, 1000),
26755
+ count: approx.ranges.length,
26756
+ next_step: "Use linked ranges for synchronized edits; validate with references before large refactors.",
26757
+ truncated: approx.ranges.length > 1000,
26758
+ cursor_available: false
26759
+ })))
26760
+ }]
26761
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26762
+ } catch (error2) {
26763
+ return withSemanticContext({
26764
+ content: [{
26765
+ type: "text",
26766
+ text: JSON.stringify(withStandardCostFields(withConfidenceFields({
26767
+ error: String(error2),
26768
+ error_code: "LINKED_EDITING_RANGE_FALLBACK_ERROR",
26769
+ strict_mode: true,
26770
+ fallback_used: true,
26771
+ approximate: true,
26772
+ next_step: "Retry linked_editing_range or use references for safer edit scope."
26773
+ })))
26774
+ }]
26775
+ }, tool.name, resolvedWorkspace, backendInstanceId(), language);
26776
+ }
26777
+ }
25369
26778
  return withSemanticContext({
25370
26779
  content: [
25371
26780
  {
@@ -25961,4 +27370,4 @@ main().catch((error2) => {
25961
27370
  process.exit(1);
25962
27371
  });
25963
27372
 
25964
- //# debugId=BA652C589634FFA364756E2164756E21
27373
+ //# debugId=15CB193D47835A7764756E2164756E21